text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
#!/usr/bin/env python
from pda.dataset import init_aggregate_and_appliance_dataset_figure
import slicedpy.feature_detectors as fd
from slicedpy.plot import plot_steady_states
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
SLIDING_MEANS_STEADY_STATES = False
RD_STEADY_STATES = True
TTESTS = False
SS_LINREGRESS = False
STD = False # spike then decay
STDP = True # spike then poly decay
subplots, chan = init_aggregate_and_appliance_dataset_figure(
start_date='2013/6/4', end_date='2013/6/5',
n_subplots=2 + np.array([TTESTS, SS_LINREGRESS, STD, STDP, STDP]).sum(),
date_format='%H:%M:%S', alpha=0.6)
subplot_i = 2
#####################
# SLIDING MEANS STEADY STATES
if SLIDING_MEANS_STEADY_STATES:
print("Sliding mean steady states...")
sliding_mean_steady_states = fd.sliding_mean_steady_states(chan.series,
max_range=15)
plot_steady_states(subplots[0], sliding_mean_steady_states,
offset=2, color='y', label='Sliding mean')
#####################
# RELATIVE DEVIATION STEADY STATES
if RD_STEADY_STATES:
print("Relative deviation steady states...")
relative_deviation_power_segments = fd.relative_deviation_power_sgmnts(chan.series)
plot_steady_states(subplots[0], relative_deviation_power_segments,
offset=-1, color='c', label='Relative deviation')
####################
subplots[0].legend()
##################
# T Test...
if TTESTS:
print("Calculating and plotting t-tests...")
for start, ss in relative_deviation_power_segments.iterrows():
end = ss['end']
p_value = fd.ttest_both_halves(chan.series, start=start, end=end)
subplots[subplot_i].plot([start, end], [p_value, p_value], color='r', linewidth=4)
subplots[subplot_i].set_title('p value for both halves of steady state from relative duration')
subplots[subplot_i].set_ylabel('p value')
subplot_i += 1
##################
# Linear regression...
if SS_LINREGRESS:
print("Calculating and plotting linear regression...")
for start, ss in relative_deviation_power_segments.iterrows():
end = ss['end']
slope, r_value, p_value, stderr = fd.linregress(chan.series, start, end)
subplots[subplot_i].plot([start, end], [slope, slope], color='r', linewidth=4)
subplots[subplot_i].set_title('Slope from linear regression from relative duration steady states')
subplots[subplot_i].set_ylabel('slope in watts/second')
subplot_i += 1
##################
# Spike then decay
if STD:
print("Calculating and plotting spike then decay...")
stds = fd.spike_then_decay(chan.series, mode='linear')
for start, std in stds.iterrows():
end = std['end']
subplots[subplot_i].plot([start], [std['slope']],
'o', markersize=6, color='r', linewidth=4)
subplots[subplot_i].set_title('Spike Then Decay')
subplots[subplot_i].set_ylabel('slope in watts/second')
subplots[subplot_i].grid()
subplot_i += 1
######################
#
# NEEDS 2 SUBPLOTS!
if STDP:
print("Calculating and plotting spike then poly decay...")
stds = fd.spike_then_decay(chan.series, mode='poly')
curve = lambda x, c, m: c + (m / x)
for start, std in stds.iterrows():
end = std['end']
X = chan.series.index[(chan.series.index >= start) &
(chan.series.index < end)]
X = mdates.date2num(X)
x = X * mdates.SEC_PER_DAY
subplots[subplot_i].plot([start], [std['slope']],
'o', markersize=6, color='r', linewidth=4)
subplots[subplot_i+1].plot(X,
curve((x-x[0])+1, std['intercept'],
std['slope']),
color='r')
subplots[subplot_i].set_title('Spike Then poly Decay. y = c + m/x')
subplots[subplot_i].set_ylabel('m')
subplots[subplot_i].grid()
subplots[subplot_i+1].set_title('Reconstructed curve')
subplots[subplot_i+1].set_ylabel('watts')
subplots[subplot_i+1].grid()
subplot_i += 2
plt.show()
|
{"hexsha": "d36a30f37d37c84128503a93eb258496549ca6b4", "size": 4213, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/plot_decays.py", "max_stars_repo_name": "JackKelly/slicedpy", "max_stars_repo_head_hexsha": "c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-02-03T22:05:25.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-29T19:06:17.000Z", "max_issues_repo_path": "scripts/plot_decays.py", "max_issues_repo_name": "JackKelly/slicedpy", "max_issues_repo_head_hexsha": "c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/plot_decays.py", "max_forks_repo_name": "JackKelly/slicedpy", "max_forks_repo_head_hexsha": "c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6347826087, "max_line_length": 102, "alphanum_fraction": 0.6266318538, "include": true, "reason": "import numpy", "num_tokens": 1075}
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
@autojit
def zip1(L1, L2):
"""
>>> zip1(range(2), range(5, 8))
[(0, 5), (1, 6)]
"""
return list(zip(L1, L2))
@autojit
def zip2(L1, L2, L3):
"""
>>> zip2(range(2), range(5, 8), range(9, 13))
[(0, 5, 9), (1, 6, 10)]
"""
return list(zip(L1, L2, L3))
@autojit
def ziploop1(L1, L2):
"""
>>> ziploop1(range(2), range(5, 8))
0 5
1 6
"""
for i, j in zip(L1, L2):
print(i, j)
@autojit
def ziploop2(L1, L2, L3):
"""
>>> ziploop2(range(2), range(5, 8), range(9, 13))
0 5 9
1 6 10
"""
for i, j, k in zip(L1, L2, L3):
print(i, j, k)
if __name__ == '__main__':
import numba
numba.testing.testmod()
|
{"hexsha": "5839da7c461edc4566039a792272752b67edef07", "size": 811, "ext": "py", "lang": "Python", "max_stars_repo_path": "oldnumba/tests/builtins/test_builtin_zip.py", "max_stars_repo_name": "meawoppl/numba", "max_stars_repo_head_hexsha": "bb8df0aee99133c6d52465ae9f9df2a7996339f3", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-01-29T06:52:36.000Z", "max_stars_repo_stars_event_max_datetime": "2015-01-29T06:52:36.000Z", "max_issues_repo_path": "oldnumba/tests/builtins/test_builtin_zip.py", "max_issues_repo_name": "meawoppl/numba", "max_issues_repo_head_hexsha": "bb8df0aee99133c6d52465ae9f9df2a7996339f3", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "oldnumba/tests/builtins/test_builtin_zip.py", "max_forks_repo_name": "meawoppl/numba", "max_forks_repo_head_hexsha": "bb8df0aee99133c6d52465ae9f9df2a7996339f3", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.0222222222, "max_line_length": 64, "alphanum_fraction": 0.5055487053, "include": true, "reason": "import numba,from numba", "num_tokens": 326}
|
"""
The `mode` type parameter must be either `:inflate` or `:deflate`.
"""
mutable struct Source{mode,T<:BufferedInputStream}
input::T
zstream::ZStream
state::State
reset_on_end::Bool
end
# inflate source constructors
# ---------------------------
function InflateSource(input::T, raw::Bool, gzip::Bool,
reset_on_end::Bool) where T<:BufferedInputStream
return Source{:inflate,T}(
input,
init_inflate_zstream(raw, gzip),
initialized,
reset_on_end)
end
function InflateSource(input::BufferedInputStream, bufsize::Integer, raw::Bool,
gzip::Bool, reset_on_end::Bool)
return InflateSource(input, raw, gzip, reset_on_end)
end
function InflateSource(input::IO, bufsize::Integer, raw::Bool, gzip::Bool,
reset_on_end::Bool)
input_stream = BufferedInputStream(input, bufsize)
return InflateSource(input_stream, raw, gzip, reset_on_end)
end
function InflateSource(input::Vector{UInt8}, bufsize::Integer, raw::Bool,
gzip::Bool, reset_on_end::Bool)
return InflateSource(BufferedInputStream(input), raw, gzip, reset_on_end)
end
"""
ZlibInflateInputStream(input[; <keyword arguments>])
Construct a zlib inflate input stream to decompress gzip/zlib data.
# Arguments
* `input`: a byte vector, IO object, or BufferedInputStream containing compressed data to inflate.
* `bufsize::Integer=8192`: input and output buffer size.
* `raw::Bool=falso`: if true, data is raw compress data, without zlib metadata
* `gzip::Bool=true`: if true, data is gzip compressed; if false, zlib compressed.
* `reset_on_end::Bool=true`: on stream end, try to find the start of another stream.
"""
function ZlibInflateInputStream(input; bufsize::Integer=8192, raw::Bool=false,
gzip::Bool=true, reset_on_end::Bool=true)
return BufferedInputStream(
InflateSource(input, bufsize, raw, gzip, reset_on_end),
bufsize)
end
# deflate source constructors
# ---------------------------
function DeflateSource(
input::T, raw::Bool, gzip::Bool, level::Integer, mem_level::Integer,
strategy) where T<:BufferedInputStream
return Source{:deflate,T}(
input,
init_deflate_zstream(raw, gzip, level, mem_level, strategy),
initialized,
false)
end
function DeflateSource(input::BufferedInputStream, bufsize::Integer, raw::Bool,
gzip::Bool, level::Integer, mem_level::Integer, strategy)
return DeflateSource(input, raw, gzip, level, mem_level, strategy)
end
function DeflateSource(input::IO, bufsize::Integer, raw::Bool, gzip::Bool,
level::Integer,
mem_level::Integer, strategy)
input_stream = BufferedInputStream(input, bufsize)
return DeflateSource(input_stream, raw, gzip, level, mem_level, strategy)
end
function DeflateSource(input::Vector{UInt8}, bufsize::Integer, raw::Bool,
gzip::Bool, level::Integer, mem_level::Integer, strategy)
return DeflateSource(BufferedInputStream(input), raw, gzip, level, mem_level, strategy)
end
"""
ZlibDeflateInputStream(input[; <keyword arguments>])
Construct a zlib deflate input stream to compress gzip/zlib data.
# Arguments
* `input`: a byte vector, IO object, or BufferedInputStream containing data to compress.
* `bufsize::Integer=8192`: input and output buffer size.
* `raw::Bool=false`: if true, data is raw compress data, without zlib metadata
* `gzip::Bool=true`: if true, data is gzip compressed; if false, zlib compressed.
* `level::Integer=6`: compression level in 1-9.
* `mem_level::Integer=8`: memory to use for compression in 1-9.
* `strategy=Z_DEFAULT_STRATEGY`: compression strategy; see zlib documentation.
"""
function ZlibDeflateInputStream(input;
bufsize::Integer=8192,
raw::Bool=false,
gzip::Bool=true,
level::Integer=6,
mem_level::Integer=8,
strategy=Z_DEFAULT_STRATEGY)
return BufferedInputStream(
DeflateSource(input, bufsize, raw, gzip, level, mem_level, strategy),
bufsize)
end
"""
readbytes!(source, buffer, from, to)
Read bytes from the zlib stream to a buffer. Satisfies the BufferedStreams
source interface.
"""
function BufferedStreams.readbytes!(
source::Source{mode},
buffer::Vector{UInt8},
from::Int, to::Int) where mode
if source.state == finalized
return 0
elseif source.state == finished && source.reset_on_end
reset!(source)
end
@trans source (
initialized => inprogress,
inprogress => inprogress
)
fillbuffer!(source.input)
source.zstream.next_out = pointer(buffer, from)
source.zstream.avail_out = to - from + 1
_, n_out = process(
source,
mode == :deflate && eof(source.input) ? Z_FINISH : Z_NO_FLUSH)
return n_out
end
function process(source::Source{mode}, flush) where mode
@assert source.state == inprogress
# counter of processed input/output bytes
n_in = n_out = 0
input = source.input
zstream = source.zstream
#println("--- Source{", mode, "} ---")
@label process
zstream.next_in = pointer(input)
zstream.avail_in = BufferedStreams.available_bytes(input)
old_avail_in = zstream.avail_in
old_avail_out = zstream.avail_out
if mode == :inflate
ret = inflate!(zstream, flush)
else
ret = deflate!(zstream, flush)
end
n_in += old_avail_in - zstream.avail_in
n_out += old_avail_out - zstream.avail_out
input.position += old_avail_in - zstream.avail_in
if ret == Z_OK
if zstream.avail_in == 0
if BufferedStreams.fillbuffer!(input) == 0
flush = Z_FINISH
end
@goto process
end
elseif ret == Z_STREAM_END
@trans source inprogress => finished
elseif ret == Z_BUF_ERROR
# could not consume more input or produce more output
elseif ret < 0
zerror(zstream, ret)
else
@assert false
end
return n_in, n_out
end
@inline function Base.eof(source::Source{mode}) where mode
if source.state == initialized ||
(mode == :inflate && source.state == finished && source.reset_on_end)
return eof(source.input)
end
return source.state == finished || source.state == finalized
end
function Base.close(source::Source{mode}) where mode
if source.state == finalized
isopen(source.input) && close(source.input)
return
end
if mode == :inflate
@zcheck end_inflate!(source.zstream)
else
@zcheck end_deflate!(source.zstream)
end
@trans source (
initialized => finalized,
inprogress => finalized,
finished => finalized
)
close(source.input)
return
end
function reset!(source::Source{mode}) where mode
if mode == :inflate
@zcheck reset_inflate!(source.zstream)
else
@zcheck reset_deflate!(source.zstream)
end
@trans source (
initialized => initialized,
inprogress => initialized,
finished => initialized,
finalized => initialized
)
return source
end
# For backwards compatibility with 0.2 releases.
InflateSource(input::BufferedInputStream, gzip::Bool, reset_on_end::Bool) =
InflateSource(input, false, gzip, reset_on_end)
InflateSource(input::BufferedInputStream, bufsize::Integer, gzip::Bool,
reset_on_end::Bool) =
InflateSource(input, bufsize, false, gzip, reset_on_end)
InflateSource(input::IO, bufsize::Integer, gzip::Bool, reset_on_end::Bool) =
InflateSource(input, bufsize, false, gzip, reset_on_end)
InflateSource(input::Vector{UInt8}, bufsize::Integer, gzip::Bool,
reset_on_end::Bool) =
InflateSource(input, bufsize, false, gzip, reset_on_end)
DeflateSource(input::BufferedInputStream, gzip::Bool, level::Integer,
mem_level::Integer, strategy) =
DeflateSource(input, false, gzip, level, mem_level, strategy)
DeflateSource(input::BufferedInputStream, bufsize::Integer, gzip::Bool,
level::Integer, mem_level::Integer, strategy) =
DeflateSource(input, bufsize, false, gzip, level, mem_level, strategy)
DeflateSource(input::IO, bufsize::Integer, gzip::Bool, level::Integer,
mem_level::Integer, strategy) =
DeflateSource(input, bufsize, false, gzip, level, mem_level, strategy)
DeflateSource(input::Vector{UInt8}, bufsize::Integer, gzip::Bool,
level::Integer, mem_level::Integer, strategy) =
DeflateSource(input, bufsize, false, gzip, level, mem_level, strategy)
|
{"hexsha": "a3790aab49845c3310aa47a345a31828c130aa5d", "size": 8833, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/source.jl", "max_stars_repo_name": "UnofficialJuliaMirror/Libz.jl-2ec943e9-cfe8-584d-b93d-64dcb6d567b7", "max_stars_repo_head_hexsha": "5b6e825f67bb72b528fa85a78fdd6ba0c1d5c724", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2015-09-04T07:53:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T17:26:32.000Z", "max_issues_repo_path": "src/source.jl", "max_issues_repo_name": "UnofficialJuliaMirror/Libz.jl-2ec943e9-cfe8-584d-b93d-64dcb6d567b7", "max_issues_repo_head_hexsha": "5b6e825f67bb72b528fa85a78fdd6ba0c1d5c724", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 63, "max_issues_repo_issues_event_min_datetime": "2015-09-04T16:41:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-16T15:24:02.000Z", "max_forks_repo_path": "src/source.jl", "max_forks_repo_name": "UnofficialJuliaMirror/Libz.jl-2ec943e9-cfe8-584d-b93d-64dcb6d567b7", "max_forks_repo_head_hexsha": "5b6e825f67bb72b528fa85a78fdd6ba0c1d5c724", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2015-10-17T05:23:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T12:28:07.000Z", "avg_line_length": 33.2067669173, "max_line_length": 98, "alphanum_fraction": 0.6561757047, "num_tokens": 2071}
|
import random
import csv
import numpy as np
import torch
import torch.utils.data as torchdata
from torchvision import transforms
import torchaudio
import librosa
from PIL import Image
from . import video_transforms as vtransforms
class BaseDataset(torchdata.Dataset):
def __init__(self, list_sample, opt, max_sample=-1, split='train'):
# params
self.num_frames = opt.num_frames
self.stride_frames = opt.stride_frames
self.frameRate = opt.frameRate
self.imgSize = opt.imgSize
self.audRate = opt.audRate
self.audLen = opt.audLen
self.audSec = 1. * self.audLen / self.audRate
self.binary_mask = opt.binary_mask
# STFT params
self.log_freq = opt.log_freq
self.stft_frame = opt.stft_frame
self.stft_hop = opt.stft_hop
self.HS = opt.stft_frame // 2 + 1
self.WS = (self.audLen + 1) // self.stft_hop
self.split = split
self.seed = opt.seed
random.seed(self.seed)
# initialize video transform
self._init_vtransform()
# list_sample can be a python list or a csv file of list
if isinstance(list_sample, str):
# self.list_sample = [x.rstrip() for x in open(list_sample, 'r')]
self.list_sample = []
for row in csv.reader(open(list_sample, 'r'), delimiter=','):
if len(row) < 2:
continue
self.list_sample.append(row)
elif isinstance(list_sample, list):
self.list_sample = list_sample
else:
raise('Error list_sample!')
if self.split == 'train':
self.list_sample *= opt.dup_trainset
random.shuffle(self.list_sample)
if max_sample > 0:
self.list_sample = self.list_sample[0:max_sample]
num_sample = len(self.list_sample)
assert num_sample > 0
print('# samples: {}'.format(num_sample))
def __len__(self):
return len(self.list_sample)
# video transform funcs
def _init_vtransform(self):
transform_list = []
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if self.split == 'train':
transform_list.append(vtransforms.Resize(int(self.imgSize * 1.1), Image.BICUBIC))
transform_list.append(vtransforms.RandomCrop(self.imgSize))
transform_list.append(vtransforms.RandomHorizontalFlip())
else:
transform_list.append(vtransforms.Resize(self.imgSize, Image.BICUBIC))
transform_list.append(vtransforms.CenterCrop(self.imgSize))
transform_list.append(vtransforms.ToTensor())
transform_list.append(vtransforms.Normalize(mean, std))
transform_list.append(vtransforms.Stack())
self.vid_transform = transforms.Compose(transform_list)
# image transform funcs, deprecated
def _init_transform(self):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if self.split == 'train':
self.img_transform = transforms.Compose([
transforms.Scale(int(self.imgSize * 1.2)),
transforms.RandomCrop(self.imgSize),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
else:
self.img_transform = transforms.Compose([
transforms.Scale(self.imgSize),
transforms.CenterCrop(self.imgSize),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
def _load_frames(self, paths):
frames = []
for path in paths:
frames.append(self._load_frame(path))
frames = self.vid_transform(frames)
return frames
def _load_frame(self, path):
img = Image.open(path).convert('RGB')
return img
def _stft(self, audio):
spec = librosa.stft(
audio, n_fft=self.stft_frame, hop_length=self.stft_hop)
amp = np.abs(spec)
phase = np.angle(spec)
return torch.from_numpy(amp), torch.from_numpy(phase)
def _load_audio_file(self, path):
if path.endswith('.mp3'):
audio_raw, rate = torchaudio.load(path)
audio_raw = audio_raw.numpy().astype(np.float32)
print("audio_raw shape",audio_raw.shape)
# range to [-1, 1]
audio_raw *= (2.0**-31)
# convert to mono
if audio_raw.shape[0] == 2:
audio_raw = (audio_raw[0] + audio_raw[1]) / 2
else:
audio_raw = audio_raw[0]
else:
audio_raw, rate = librosa.load(path, sr=None, mono=True)
print("audio_raw shape",audio_raw.shape)
return audio_raw, rate
def _load_audio(self, path, center_timestamp, nearest_resample=False):
audio = np.zeros(self.audLen, dtype=np.float32)
# silent
if path.endswith('silent'):
return audio
# load audio
audio_raw, rate = self._load_audio_file(path)
print("audio_raw shape, rate",audio_raw.shape,rate)
# repeat if audio is too short
if audio_raw.shape[0] < rate * self.audSec:
n = int(rate * self.audSec / audio_raw.shape[0]) + 1
audio_raw = np.tile(audio_raw, n)
print("too short, need repeat")
# resample
if rate > self.audRate:
# print('resmaple {}->{}'.format(rate, self.audRate))
print("large rate, need resample")
if nearest_resample:
audio_raw = audio_raw[::rate//self.audRate]
else:
audio_raw = librosa.resample(audio_raw, rate, self.audRate)
# crop N seconds
print("after repeat and resample, the shape",audio_raw.shape)
len_raw = audio_raw.shape[0]
center = int(center_timestamp * self.audRate)
start = max(0, center - self.audLen // 2)
end = min(len_raw, center + self.audLen // 2)
print("len_raw, center, start, end",len_raw, center, start, end)
print("interval:", self.audLen//2-(center-start), self.audLen//2+(end-center))
audio[self.audLen//2-(center-start): self.audLen//2+(end-center)] = \
audio_raw[start:end]
# randomize volume
if self.split == 'train':
scale = random.random() + 0.5 # 0.5-1.5
audio *= scale
audio[audio > 1.] = 1.
audio[audio < -1.] = -1.
return audio
def _mix_n_and_stft(self, audios):
N = len(audios)
mags = [None for n in range(N)]
# mix
for n in range(N):
audios[n] /= N
audio_mix = np.asarray(audios).sum(axis=0)
# STFT
amp_mix, phase_mix = self._stft(audio_mix)
for n in range(N):
ampN, _ = self._stft(audios[n])
mags[n] = ampN.unsqueeze(0)
# to tensor
# audio_mix = torch.from_numpy(audio_mix)
for n in range(N):
audios[n] = torch.from_numpy(audios[n])
return amp_mix.unsqueeze(0), mags, phase_mix.unsqueeze(0)
def dummy_mix_data(self, N):
frames = [None for n in range(N)]
audios = [None for n in range(N)]
mags = [None for n in range(N)]
amp_mix = torch.zeros(1, self.HS, self.WS)
phase_mix = torch.zeros(1, self.HS, self.WS)
for n in range(N):
frames[n] = torch.zeros(
3, self.num_frames, self.imgSize, self.imgSize)
audios[n] = torch.zeros(self.audLen)
mags[n] = torch.zeros(1, self.HS, self.WS)
return amp_mix, mags, frames, audios, phase_mix
|
{"hexsha": "534b367b18668c80e5e893e58d214da608546231", "size": 7721, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataset/base.py", "max_stars_repo_name": "TaoStarlit/Sound-of-Pixels", "max_stars_repo_head_hexsha": "06cd37a75836e22208f2e59bcc263b89938e065e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dataset/base.py", "max_issues_repo_name": "TaoStarlit/Sound-of-Pixels", "max_issues_repo_head_hexsha": "06cd37a75836e22208f2e59bcc263b89938e065e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataset/base.py", "max_forks_repo_name": "TaoStarlit/Sound-of-Pixels", "max_forks_repo_head_hexsha": "06cd37a75836e22208f2e59bcc263b89938e065e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-18T09:07:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-18T09:07:01.000Z", "avg_line_length": 34.1637168142, "max_line_length": 93, "alphanum_fraction": 0.5830850926, "include": true, "reason": "import numpy", "num_tokens": 1866}
|
program kind
REAL( KIND = 4 ) :: four
REAL( KIND = 8 ) :: eight
REAL( KIND = 16 ) :: sixteen
INTEGER :: i4, i8, i16
i4 = SIZEOF( four )
i8 = SIZEOF( eight )
i16 = SIZEOF( sixteen )
IF( i4 == 4 .AND. i8 == 8 .AND. i16 == 16 ) THEN
call EXIT( 0 )
ELSE
call EXIT( 1 )
END IF
end program
|
{"hexsha": "6475061db9e4c7778694528a3b2d6ae4e08fb1f7", "size": 332, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/simple/kind.f90", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/simple/kind.f90", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/simple/kind.f90", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 17.4736842105, "max_line_length": 50, "alphanum_fraction": 0.5120481928, "num_tokens": 131}
|
[STATEMENT]
lemma inv_end:
assumes "invariant ({}, B)"
shows "B = saturate"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. B = saturate
[PROOF STEP]
proof (intro set_eqI iffI, goal_cases lr rl)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x. x \<in> B \<Longrightarrow> x \<in> saturate
2. \<And>x. x \<in> saturate \<Longrightarrow> x \<in> B
[PROOF STEP]
case (lr x)
[PROOF STATE]
proof (state)
this:
x \<in> B
goal (2 subgoals):
1. \<And>x. x \<in> B \<Longrightarrow> x \<in> saturate
2. \<And>x. x \<in> saturate \<Longrightarrow> x \<in> B
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> B
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
x \<in> B
goal (1 subgoal):
1. x \<in> saturate
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
x \<in> B
invariant ({}, B)
goal (1 subgoal):
1. x \<in> saturate
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> saturate
goal (1 subgoal):
1. \<And>x. x \<in> saturate \<Longrightarrow> x \<in> B
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> saturate \<Longrightarrow> x \<in> B
[PROOF STEP]
case (rl x)
[PROOF STATE]
proof (state)
this:
x \<in> saturate
goal (1 subgoal):
1. \<And>x. x \<in> saturate \<Longrightarrow> x \<in> B
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> saturate
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
x \<in> saturate
goal (1 subgoal):
1. x \<in> B
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
x \<in> saturate
invariant ({}, B)
goal (1 subgoal):
1. x \<in> B
[PROOF STEP]
by (induct x rule: saturate.induct) fastforce
[PROOF STATE]
proof (state)
this:
x \<in> B
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 800, "file": "Regular_Tree_Relations_Horn_Setup_Horn_Inference", "length": 13}
|
/*
* The MIT License
*
* Copyright (c) 2012-2018 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "ConvectiveInterpolationMethods.h"
#include <boost/bimap.hpp>
#include <stdexcept>
#include <sstream>
namespace WasatchCore {
typedef boost::bimap<std::string,ConvInterpMethods> ConvInterpStringMap;
static ConvInterpStringMap validConvInterpStrings;
void set_conv_interp_string_map()
{
if( !validConvInterpStrings.empty() ) return;
typedef ConvInterpStringMap::left_value_type LVT;
validConvInterpStrings.left.insert( LVT("CENTRAL" , CENTRAL ) );
validConvInterpStrings.left.insert( LVT("UPWIND" , UPWIND ) );
validConvInterpStrings.left.insert( LVT("SUPERBEE", SUPERBEE) );
validConvInterpStrings.left.insert( LVT("CHARM" , CHARM ) );
validConvInterpStrings.left.insert( LVT("KOREN" , KOREN ) );
validConvInterpStrings.left.insert( LVT("MC" , MC ) );
validConvInterpStrings.left.insert( LVT("OSPRE" , OSPRE ) );
validConvInterpStrings.left.insert( LVT("SMART" , SMART ) );
validConvInterpStrings.left.insert( LVT("VANLEER" , VANLEER ) );
validConvInterpStrings.left.insert( LVT("HCUS" , HCUS ) );
validConvInterpStrings.left.insert( LVT("MINMOD" , MINMOD ) );
validConvInterpStrings.left.insert( LVT("HQUICK" , HQUICK ) );
}
//------------------------------------------------------------------
ConvInterpMethods get_conv_interp_method( std::string key )
{
set_conv_interp_string_map();
std::transform( key.begin(), key.end(), key.begin(), ::toupper );
ConvInterpStringMap::left_const_iterator ii = validConvInterpStrings.left.find(key);
if( ii == validConvInterpStrings.left.end() ){
std::ostringstream msg;
msg << __FILE__ << " : " << __LINE__ << std::endl
<< "No matching upwind method for '" << key << "'" << std::endl;
}
return ii->second;
}
std::string get_conv_interp_method( const ConvInterpMethods key )
{
set_conv_interp_string_map();
return validConvInterpStrings.right.find(key)->second;
}
} // namespace WasatchCore
|
{"hexsha": "dab6cce6b96889b739622ca1c6437445535bc207", "size": 3174, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/CCA/Components/Wasatch/ConvectiveInterpolationMethods.cc", "max_stars_repo_name": "damu1000/Uintah", "max_stars_repo_head_hexsha": "0c768664c1fe0a80eff2bbbd9b837e27f281f0a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2020-06-10T08:21:31.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-23T18:33:16.000Z", "max_issues_repo_path": "src/CCA/Components/Wasatch/ConvectiveInterpolationMethods.cc", "max_issues_repo_name": "damu1000/Uintah", "max_issues_repo_head_hexsha": "0c768664c1fe0a80eff2bbbd9b837e27f281f0a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/CCA/Components/Wasatch/ConvectiveInterpolationMethods.cc", "max_forks_repo_name": "damu1000/Uintah", "max_forks_repo_head_hexsha": "0c768664c1fe0a80eff2bbbd9b837e27f281f0a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-12-30T05:48:30.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-12T16:24:16.000Z", "avg_line_length": 41.2207792208, "max_line_length": 88, "alphanum_fraction": 0.6994328922, "num_tokens": 792}
|
#define PY_ARRAY_UNIQUE_SYMBOL pyhip_ARRAY_API
#include <hip.hpp>
#include <utility>
#include <numeric>
#include <algorithm>
#include "tools.hpp"
#include "wrap_helpers.hpp"
#include <boost/python/stl_iterator.hpp>
using namespace pyhip;
using boost::shared_ptr;
namespace
{
py::handle<>
HipError,
HipMemoryError,
HipLogicError,
HipRuntimeError,
HipLaunchError;
void translate_hip_error(const pyhip::error &err)
{
if (err.code() == hipErrorLaunchFailure
|| err.code() == hipErrorLaunchOutOfResources
|| err.code() == hipErrorLaunchTimeOut
)
PyErr_SetString(HipLaunchError.get(), err.what());
else if (err.code() == hipErrorOutOfMemory)
PyErr_SetString(HipMemoryError.get(), err.what());
else if (err.code() == hipErrorNoDevice
|| err.code() == hipErrorNoBinaryForGpu
|| err.code() == hipErrorNoBinaryForGpu
|| err.code() == hipErrorFileNotFound
|| err.code() == hipErrorNotReady
|| err.code() == hipErrorECCNotCorrectable
)
PyErr_SetString(HipRuntimeError.get(), err.what());
else if (err.code() == hipErrorUnknown)
PyErr_SetString(HipError.get(), err.what());
else
PyErr_SetString(HipLogicError.get(), err.what());
}
py::tuple hip_version()
{
return py::make_tuple(
HIP_VERSION_MAJOR,
HIP_VERSION_MINOR,
HIP_VERSION_PATCH);
};
class host_alloc_flags { };
class mem_host_register_flags { };
// {{{ "python-aware" wrappers
py::object device_get_attribute(device const &dev, hipDeviceAttribute_t attr)
{
if (attr == hipDeviceAttributeComputeMode)
return py::object(hipComputeMode(dev.get_attribute(attr)));
return py::object(dev.get_attribute(attr));
}
device_allocation *mem_alloc_wrap(size_t bytes)
{
return new device_allocation(pyhip::mem_alloc_gc(bytes));
}
class pointer_holder_base_wrap
: public pointer_holder_base,
public py::wrapper<pointer_holder_base>
{
public:
DEV_PTR get_pointer() const
{
return this->get_override("get_pointer")();
}
};
py::tuple mem_alloc_pitch_wrap(
size_t width, size_t height, unsigned int access_size)
{
std::auto_ptr<device_allocation> da;
Py_ssize_t pitch = mem_alloc_pitch(
da, width, height, access_size);
return py::make_tuple(
handle_from_new_ptr(da.release()), pitch);
}
// {{{ memory set
void py_memset_d8(DEV_PTR dst, unsigned char uc, size_t n )
{ PYHIP_CALL_GUARDED_THREADED(hipMemsetD8, (convert_devptr(dst), uc, n )); }
void py_memset_d16(DEV_PTR dst, unsigned short us, size_t n )
{ PYHIP_CALL_GUARDED_THREADED(hipMemsetD16, (convert_devptr(dst), us, n )); }
void py_memset_d32(DEV_PTR dst, unsigned int ui, size_t n )
{ PYHIP_CALL_GUARDED_THREADED(hipMemsetD32, (convert_devptr(dst), ui, n )); }
/* void py_memset_d2d8(DEV_PTR dst, size_t dst_pitch,
unsigned char uc, size_t width, size_t height )
{ PYHIP_CALL_GUARDED_THREADED(hipMemsetD2D8, (convert_devptr(dst), dst_pitch, uc, width, height)); }
void py_memset_d2d16(DEV_PTR dst, size_t dst_pitch,
unsigned short us, size_t width, size_t height )
{ PYHIP_CALL_GUARDED_THREADED(hipMemsetD2D16, (convert_devptr(dst), dst_pitch, us, width, height)); }
void py_memset_d2d32(DEV_PTR dst, size_t dst_pitch,
unsigned int ui, size_t width, size_t height )
{ PYHIP_CALL_GUARDED_THREADED(hipMemsetD2D32, (convert_devptr(dst), dst_pitch, ui, width, height)); }
*/
// }}}
// {{{ memory set async
void py_memset_d8_async(DEV_PTR dst, unsigned char uc, size_t n, py::object stream_py )
{
PYHIP_PARSE_STREAM_PY;
PYHIP_CALL_GUARDED_THREADED(hipMemsetD8Async, (convert_devptr(dst), uc, n, s_handle));
}
void py_memset_d16_async(DEV_PTR dst, unsigned short us, size_t n, py::object stream_py )
{
PYHIP_PARSE_STREAM_PY;
PYHIP_CALL_GUARDED_THREADED(hipMemsetD16Async, (convert_devptr(dst), us, n, s_handle));
}
void py_memset_d32_async(DEV_PTR dst, unsigned int ui, size_t n, py::object stream_py )
{
PYHIP_PARSE_STREAM_PY;
PYHIP_CALL_GUARDED_THREADED(hipMemsetD32Async, (convert_devptr(dst), ui, n, s_handle));
}
/*
void py_memset_d2d8_async(DEV_PTR dst, size_t dst_pitch,
unsigned char uc, size_t width, size_t height, py::object stream_py )
{
PYHIP_PARSE_STREAM_PY;
PYHIP_CALL_GUARDED_THREADED(hipMemsetD2D8Async, (convert_devptr(dst), dst_pitch, uc, width, height, s_handle));
}
void py_memset_d2d16_async(DEV_PTR dst, size_t dst_pitch,
unsigned short us, size_t width, size_t height, py::object stream_py )
{
PYHIP_PARSE_STREAM_PY;
PYHIP_CALL_GUARDED_THREADED(hipMemsetD2D16Async, (convert_devptr(dst), dst_pitch, us, width, height, s_handle));
}
void py_memset_d2d32_async(DEV_PTR dst, size_t dst_pitch,
unsigned int ui, size_t width, size_t height, py::object stream_py )
{
PYHIP_PARSE_STREAM_PY;
PYHIP_CALL_GUARDED_THREADED(hipMemsetD2D32Async, (convert_devptr(dst), dst_pitch, ui, width, height, s_handle));
}
*/
// }}}
// {{{ memory copies
void py_memcpy_htod(DEV_PTR dst, py::object src)
{
py_buffer_wrapper buf_wrapper;
buf_wrapper.get(src.ptr(), PyBUF_ANY_CONTIGUOUS);
PYHIP_CALL_GUARDED_THREADED(hipMemcpyHtoD,
(convert_devptr(dst), buf_wrapper.m_buf.buf, buf_wrapper.m_buf.len));
}
void py_memcpy_htod_async(DEV_PTR dst, py::object src, py::object stream_py)
{
py_buffer_wrapper buf_wrapper;
buf_wrapper.get(src.ptr(), PyBUF_ANY_CONTIGUOUS);
PYHIP_PARSE_STREAM_PY;
PYHIP_CALL_GUARDED_THREADED(hipMemcpyHtoDAsync,
(convert_devptr(dst), buf_wrapper.m_buf.buf, buf_wrapper.m_buf.len, s_handle));
}
void py_memcpy_dtoh(py::object dest, DEV_PTR src)
{
py_buffer_wrapper buf_wrapper;
buf_wrapper.get(dest.ptr(), PyBUF_ANY_CONTIGUOUS | PyBUF_WRITABLE);
PYHIP_CALL_GUARDED_THREADED(hipMemcpyDtoH,
(buf_wrapper.m_buf.buf, convert_devptr(src), buf_wrapper.m_buf.len));
}
void py_memcpy_dtoh_async(py::object dest, DEV_PTR src, py::object stream_py)
{
py_buffer_wrapper buf_wrapper;
buf_wrapper.get(dest.ptr(), PyBUF_ANY_CONTIGUOUS | PyBUF_WRITABLE);
PYHIP_PARSE_STREAM_PY;
PYHIP_CALL_GUARDED_THREADED(hipMemcpyDtoHAsync,
(buf_wrapper.m_buf.buf, convert_devptr(src), buf_wrapper.m_buf.len, s_handle));
}
// void py_memcpy_htoa(array const &ary, unsigned int index, py::object src)
// {
// py_buffer_wrapper buf_wrapper;
// buf_wrapper.get(src.ptr(), PyBUF_ANY_CONTIGUOUS);
// PYHIP_CALL_GUARDED_THREADED(hipMemcpyHtoA,
// (ary.handle(), index, buf_wrapper.m_buf.buf, buf_wrapper.m_buf.len));
// }
// void py_memcpy_atoh(py::object dest, array const &ary, unsigned int index)
// {
// py_buffer_wrapper buf_wrapper;
// buf_wrapper.get(dest.ptr(), PyBUF_ANY_CONTIGUOUS | PyBUF_WRITABLE);
// PYHIP_CALL_GUARDED_THREADED(hipMemcpyAtoH,
// (buf_wrapper.m_buf.buf, ary.handle(), index, buf_wrapper.m_buf.len));
// }
void py_memcpy_dtod(DEV_PTR dest, DEV_PTR src,
unsigned int byte_count)
{ PYHIP_CALL_GUARDED_THREADED(hipMemcpyDtoD, (convert_devptr(dest), convert_devptr(src), byte_count)); }
void py_memcpy_dtod_async(DEV_PTR dest, DEV_PTR src,
unsigned int byte_count, py::object stream_py)
{
PYHIP_PARSE_STREAM_PY;
PYHIP_CALL_GUARDED_THREADED(hipMemcpyDtoDAsync,
(convert_devptr(dest), convert_devptr(src), byte_count, s_handle));
}
module *module_from_buffer(py::object buffer, py::object py_options,
py::object message_handler)
{
const char *mod_buf;
PYHIP_BUFFER_SIZE_T len;
if (PyObject_AsCharBuffer(buffer.ptr(), &mod_buf, &len))
throw py::error_already_set();
hipModule_t mod;
// #if CUDAPP_CUDA_VERSION >= 2010
const size_t buf_size = 32768;
char info_buf[buf_size], error_buf[buf_size];
std::vector<hipJitOption> options;
std::vector<void *> option_values;
#define ADD_OPTION_PTR(KEY, PTR) \
{ \
options.push_back(KEY); \
option_values.push_back(PTR); \
}
ADD_OPTION_PTR(hipJitOptionInfoLogBuffer, info_buf);
ADD_OPTION_PTR(hipJitOptionInfoLogBufferSizeBytes, (void *) buf_size);
ADD_OPTION_PTR(hipJitOptionErrorLogBuffer, error_buf);
ADD_OPTION_PTR(hipJitOptionErrorLogBufferSizeBytes, (void *) buf_size);
PYTHON_FOREACH(key_value, py_options)
ADD_OPTION_PTR(
py::extract<hipJitOption>(key_value[0]),
(void *) py::extract<intptr_t>(key_value[1])());
#undef ADD_OPTION
PYHIP_PRINT_CALL_TRACE("hipModuleLoadDataEx");
hipError_t cu_status_code; \
cu_status_code = hipModuleLoadDataEx(&mod, mod_buf, (unsigned int) options.size(),
const_cast<hipJitOption *>(&*options.begin()),
const_cast<void **>(&*option_values.begin()));
size_t info_buf_size = size_t(option_values[1]);
size_t error_buf_size = size_t(option_values[3]);
if (message_handler != py::object())
message_handler(cu_status_code == hipSuccess,
std::string(info_buf, info_buf_size),
std::string(error_buf, error_buf_size));
if (cu_status_code != hipSuccess)
throw pyhip::error("hipModuleLoadDataEx", cu_status_code,
std::string(error_buf, error_buf_size).c_str());
// #else
// if (py::len(py_options))
// throw pycuda::error("module_from_buffer", CUDA_ERROR_INVALID_VALUE,
// "non-empty options argument only supported on CUDA 2.1 and newer");
// CUDAPP_CALL_GUARDED(cuModuleLoadData, (&mod, mod_buf));
// #endif
return new module(mod);
}
template <class T>
PyObject *mem_obj_to_long(T const &mo)
{
#if defined(_WIN32) && defined(_WIN64)
return PyLong_FromUnsignedLongLong((unsigned long long) mo);
#else
return PyLong_FromVoidPtr((hipDeviceptr_t) mo);
#endif
}
template <class Allocation>
py::handle<> numpy_empty(py::object shape, py::object dtype,
py::object order_py, unsigned par1)
{
PyArray_Descr *tp_descr;
if (PyArray_DescrConverter(dtype.ptr(), &tp_descr) != NPY_SUCCEED)
throw py::error_already_set();
py::extract<npy_intp> shape_as_int(shape);
std::vector<npy_intp> dims;
if (shape_as_int.check())
dims.push_back(shape_as_int());
else
std::copy(
py::stl_input_iterator<npy_intp>(shape),
py::stl_input_iterator<npy_intp>(),
back_inserter(dims));
std::auto_ptr<Allocation> alloc(
new Allocation(
tp_descr->elsize*pyhip::size_from_dims(dims.size(), &dims.front()),
par1)
);
NPY_ORDER order = PyArray_CORDER;
PyArray_OrderConverter(order_py.ptr(), &order);
int ary_flags = 0;
if (order == PyArray_FORTRANORDER)
ary_flags |= NPY_FARRAY;
else if (order == PyArray_CORDER)
ary_flags |= NPY_CARRAY;
else
throw pyhip::error("numpy_empty", hipErrorInvalidValue,
"unrecognized order specifier");
py::handle<> result = py::handle<>(PyArray_NewFromDescr(
&PyArray_Type, tp_descr,
int(dims.size()), &dims.front(), /*strides*/ NULL,
alloc->data(), ary_flags, /*obj*/NULL));
py::handle<> alloc_py(handle_from_new_ptr(alloc.release()));
PyArray_BASE(result.get()) = alloc_py.get();
Py_INCREF(alloc_py.get());
return result;
}
py::handle<> register_host_memory(py::object ary, unsigned flags)
{
if (!PyArray_Check(ary.ptr()))
throw pyhip::error("register_host_memory", hipErrorInvalidValue,
"ary argument is not a numpy array");
if (!PyArray_ISCONTIGUOUS(ary.ptr()))
throw pyhip::error("register_host_memory", hipErrorInvalidValue,
"ary argument is not contiguous");
std::auto_ptr<registered_host_memory> regmem(
new registered_host_memory(
PyArray_DATA(ary.ptr()), PyArray_NBYTES(ary.ptr()), flags, ary));
PyObject *new_array_ptr = PyArray_FromInterface(ary.ptr());
if (new_array_ptr == Py_NotImplemented)
throw pyhip::error("register_host_memory", hipErrorInvalidValue,
"ary argument does not expose array interface");
py::handle<> result(new_array_ptr);
py::handle<> regmem_py(handle_from_new_ptr(regmem.release()));
PyArray_BASE(result.get()) = regmem_py.get();
Py_INCREF(regmem_py.get());
return result;
}
}
void pyhip_expose_tools();
static bool import_numpy_helper()
{
import_array1(false);
return true;
}
BOOST_PYTHON_MODULE(_driver)
{
if (!import_numpy_helper())
throw py::error_already_set();
py::def("get_version", hip_version);
py::def("get_driver_version", pyhip::get_driver_version);
#define DECLARE_EXC(NAME, BASE) \
Hip##NAME = py::handle<>(PyErr_NewException("pyhip._driver." #NAME, BASE, NULL)); \
py::scope().attr(#NAME) = Hip##NAME;
{
DECLARE_EXC(Error, NULL);
DECLARE_EXC(MemoryError, HipError.get());
DECLARE_EXC(LogicError, HipError.get());
DECLARE_EXC(LaunchError, HipError.get());
DECLARE_EXC(RuntimeError, HipError.get());
py::register_exception_translator<pyhip::error>(translate_hip_error);
}
py::enum_<unsigned int>("ctx_flags")
.value("SCHED_AUTO", hipDeviceScheduleAuto)
.value("SCHED_SPIN", hipDeviceScheduleSpin)
.value("SCHED_YIELD", hipDeviceScheduleYield)
.value("SCHED_MASK", hipDeviceScheduleMask)
.value("BLOCKING_SYNC", hipDeviceScheduleBlockingSync)
.value("SCHED_BLOCKING_SYNC", hipDeviceScheduleBlockingSync)
.value("BLOCKING_SYNC", hipDeviceScheduleBlockingSync)
.value("SCHED_BLOCKING_SYNC", hipDeviceScheduleBlockingSync)
.value("MAP_HOST", hipDeviceMapHost)
.value("LMEM_RESIZE_TO_MAX", hipDeviceLmemResizeToMax)
;
/*
py::enum_<unsigned int>("event_flags")
.value("DEFAULT", hipEventDefault)
.value("BLOCKING_SYNC", hipEventBlockingSync)
.value("DISABLE_TIMING", hipEventDisableTiming)
.value("INTERPROCESS", hipEventInterprocess)
;
*/
py::enum_<hipArray_Format>("array_format")
.value("UNSIGNED_INT8", HIP_AD_FORMAT_UNSIGNED_INT8)
.value("UNSIGNED_INT16", HIP_AD_FORMAT_UNSIGNED_INT16)
.value("UNSIGNED_INT32", HIP_AD_FORMAT_UNSIGNED_INT32)
.value("SIGNED_INT8" , HIP_AD_FORMAT_SIGNED_INT8)
.value("SIGNED_INT16" , HIP_AD_FORMAT_SIGNED_INT16)
.value("SIGNED_INT32" , HIP_AD_FORMAT_SIGNED_INT32)
.value("HALF" , HIP_AD_FORMAT_HALF)
.value("FLOAT" , HIP_AD_FORMAT_FLOAT)
;
/*{
py::class_<array3d_flags> cls("array3d_flags", py::no_init);
// deprecated
cls.attr("ARRAY3D_LAYERED") = hipArrayLayered;
cls.attr("SURFACE_LDST") = hipArraySurfaceLoadStore;
cls.attr("CUBEMAP") = hipArrayCubemap;
cls.attr("TEXTURE_GATHER") = hipArrayTextureGather;
}*/
py::enum_<hipTextureAddressMode>("address_mode")
.value("WRAP", hipAddressModeWrap)
.value("CLAMP", hipAddressModeClamp)
.value("MIRROR", hipAddressModeMirror)
.value("BORDER", hipAddressModeBorder)
;
py::enum_<hipTextureFilterMode>("filter_mode")
.value("POINT", hipFilterModePoint)
.value("LINEAR", hipFilterModeLinear)
;
py::enum_<hipDeviceAttribute_t>("device_attribute")
.value("MAX_THREADS_PER_BLOCK", hipDeviceAttributeMaxThreadsPerBlock)
.value("MAX_BLOCK_DIM_X", hipDeviceAttributeMaxBlockDimX)
.value("MAX_BLOCK_DIM_Y", hipDeviceAttributeMaxBlockDimY)
.value("MAX_BLOCK_DIM_Z", hipDeviceAttributeMaxBlockDimZ)
.value("MAX_GRID_DIM_X", hipDeviceAttributeMaxGridDimX)
.value("MAX_GRID_DIM_Y", hipDeviceAttributeMaxGridDimY)
.value("MAX_GRID_DIM_Z", hipDeviceAttributeMaxGridDimZ)
.value("MAX_SHARED_MEMORY_PER_BLOCK", hipDeviceAttributeMaxSharedMemoryPerBlock)
.value("SHARED_MEMORY_PER_BLOCK", hipDeviceAttributeMaxSharedMemoryPerBlock)
.value("TOTAL_CONSTANT_MEMORY", hipDeviceAttributeTotalConstantMemory)
.value("WARP_SIZE", hipDeviceAttributeWarpSize)
.value("MAX_PITCH", hipDeviceAttributeMaxPitch)
.value("MAX_REGISTERS_PER_BLOCK", hipDeviceAttributeMaxRegistersPerBlock)
.value("REGISTERS_PER_BLOCK", hipDeviceAttributeMaxRegistersPerBlock)
.value("CLOCK_RATE", hipDeviceAttributeClockRate)
.value("TEXTURE_ALIGNMENT", hipDeviceAttributeTextureAlignment)
.value("MULTIPROCESSOR_COUNT", hipDeviceAttributeMultiprocessorCount)
.value("KERNEL_EXEC_TIMEOUT", hipDeviceAttributeKernelExecTimeout)
.value("INTEGRATED", hipDeviceAttributeIntegrated)
.value("CAN_MAP_HOST_MEMORY", hipDeviceAttributeCanMapHostMemory)
.value("COMPUTE_MODE", hipDeviceAttributeComputeMode)
.value("MAXIMUM_TEXTURE1D_WIDTH", hipDeviceAttributeMaxTexture1DWidth)
.value("MAXIMUM_TEXTURE2D_WIDTH", hipDeviceAttributeMaxTexture2DWidth)
.value("MAXIMUM_TEXTURE2D_HEIGHT", hipDeviceAttributeMaxTexture2DHeight)
.value("MAXIMUM_TEXTURE3D_WIDTH", hipDeviceAttributeMaxTexture3DWidth)
.value("MAXIMUM_TEXTURE3D_HEIGHT", hipDeviceAttributeMaxTexture3DHeight)
.value("MAXIMUM_TEXTURE3D_DEPTH", hipDeviceAttributeMaxTexture3DDepth)
.value("CONCURRENT_KERNELS", hipDeviceAttributeConcurrentKernels)
.value("ECC_ENABLED", hipDeviceAttributeEccEnabled)
.value("PCI_BUS_ID", hipDeviceAttributePciBusId)
.value("PCI_DEVICE_ID", hipDeviceAttributePciDeviceId)
.value("MEMORY_CLOCK_RATE", hipDeviceAttributeMemoryClockRate)
.value("GLOBAL_MEMORY_BUS_WIDTH", hipDeviceAttributeMemoryBusWidth)
.value("L2_CACHE_SIZE", hipDeviceAttributeL2CacheSize)
.value("MAX_THREADS_PER_MULTIPROCESSOR", hipDeviceAttributeMaxThreadsPerMultiProcessor)
.value("COMPUTE_CAPABILITY_MAJOR", hipDeviceAttributeComputeCapabilityMajor)
.value("COMPUTE_CAPABILITY_MINOR", hipDeviceAttributeComputeCapabilityMinor)
.value("MAX_SHARED_MEMORY_PER_MULTIPROCESSOR", hipDeviceAttributeMaxSharedMemoryPerMultiprocessor)
.value("MULTI_GPU_BOARD", hipDeviceAttributeIsMultiGpuBoard)
;
py::enum_<hipFuncCache_t>("func_cache")
.value("PREFER_NONE", hipFuncCachePreferNone)
.value("PREFER_SHARED", hipFuncCachePreferShared)
.value("PREFER_L1", hipFuncCachePreferL1)
.value("PREFER_EQUAL", hipFuncCachePreferEqual)
;
py::enum_<hipSharedMemConfig>("shared_config")
.value("DEFAULT_BANK_SIZE", hipSharedMemBankSizeDefault)
.value("FOUR_BYTE_BANK_SIZE", hipSharedMemBankSizeFourByte)
.value("EIGHT_BYTE_BANK_SIZE", hipSharedMemBankSizeEightByte)
;
py::enum_<hipFunction_attribute>("function_attribute")
.value("MAX_THREADS_PER_BLOCK", HIP_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK)
.value("SHARED_SIZE_BYTES", HIP_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES)
.value("CONST_SIZE_BYTES", HIP_FUNC_ATTRIBUTE_CONST_SIZE_BYTES)
.value("LOCAL_SIZE_BYTES", HIP_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES)
.value("NUM_REGS", HIP_FUNC_ATTRIBUTE_NUM_REGS)
.value("PTX_VERSION", HIP_FUNC_ATTRIBUTE_PTX_VERSION)
.value("BINARY_VERSION", HIP_FUNC_ATTRIBUTE_BINARY_VERSION)
.value("MAX", HIP_FUNC_ATTRIBUTE_MAX)
;
py::enum_<hipMemoryType>("memory_type")
.value("HOST", hipMemoryTypeHost)
.value("DEVICE", hipMemoryTypeDevice)
.value("ARRAY", hipMemoryTypeArray)
.value("UNIFIED", hipMemoryTypeUnified)
;
py::enum_<hipComputeMode>("compute_mode")
.value("DEFAULT", hipComputeModeDefault)
.value("EXCLUSIVE", hipComputeModeExclusive)
.value("PROHIBITED", hipComputeModeProhibited)
.value("EXCLUSIVE_PROCESS", hipComputeModeExclusiveProcess)
;
py::enum_<hipJitOption>("jit_option")
.value("MAX_REGISTERS", hipJitOptionMaxRegisters)
.value("THREADS_PER_BLOCK", hipJitOptionThreadsPerBlock)
.value("WALL_TIME", hipJitOptionWallTime)
.value("INFO_LOG_BUFFER", hipJitOptionInfoLogBuffer)
.value("INFO_LOG_BUFFER_SIZE_BYTES", hipJitOptionInfoLogBufferSizeBytes)
.value("ERROR_LOG_BUFFER", hipJitOptionErrorLogBuffer)
.value("ERROR_LOG_BUFFER_SIZE_BYTES", hipJitOptionErrorLogBufferSizeBytes)
.value("OPTIMIZATION_LEVEL", hipJitOptionOptimizationLevel)
.value("TARGET_FROM_CUCONTEXT", hipJitOptionTargetFromContext)
.value("TARGET", hipJitOptionTarget)
.value("FALLBACK_STRATEGY", hipJitOptionFallbackStrategy)
;
{
py::class_<host_alloc_flags> cls("host_alloc_flags", py::no_init);
cls.attr("PORTABLE") = hipHostMallocPortable;
cls.attr("DEVICEMAP") = hipHostMallocMapped;
cls.attr("WRITECOMBINED") = hipHostMallocWriteCombined;
}
{
py::class_<mem_host_register_flags> cls("mem_host_register_flags", py::no_init);
cls.attr("PORTABLE") = hipHostRegisterPortable;
cls.attr("DEVICEMAP") = hipHostRegisterMapped;
}
py::enum_<hipLimit_t>("limit")
.value("MALLOC_HEAP_SIZE", hipLimitMallocHeapSize)
;
py::def("init", init,
py::arg("flags")=0);
// {{{ device
{
typedef device cl;
py::class_<cl>("Device", py::no_init)
.def("__init__", py::make_constructor(make_device))
//.def("__init__", py::make_constructor(make_device_from_pci_bus_id))
.DEF_SIMPLE_METHOD(count)
.staticmethod("count")
.DEF_SIMPLE_METHOD(name)
.DEF_SIMPLE_METHOD(properties)
.DEF_SIMPLE_METHOD(pci_bus_id)
.DEF_SIMPLE_METHOD(compute_capability)
.DEF_SIMPLE_METHOD(total_memory)
.def("get_attribute", device_get_attribute)
.def(py::self == py::self)
.def(py::self != py::self)
.def("__hash__", &cl::hash)
.def("make_context", &cl::make_context,
(py::args("self"), py::args("flags")=0))
.def("retain_primary_context", &cl::retain_primary_context,
(py::args("self")))
;
}
// }}}
{
typedef hipDeviceProp_t cl;
py::class_<cl>("DeviceProps", py::no_init)
//.def("__init__", py::make_constructor(make_device_from_pci_bus_id))
.DEF_SIMPLE_RO_MEMBER_ANY(name)
.DEF_SIMPLE_RO_MEMBER_ANY(totalGlobalMem)
.DEF_SIMPLE_RO_MEMBER_ANY(sharedMemPerBlock)
.DEF_SIMPLE_RO_MEMBER_ANY(regsPerBlock)
.DEF_SIMPLE_RO_MEMBER_ANY(warpSize)
.DEF_SIMPLE_RO_MEMBER_ANY(maxThreadsPerBlock)
.DEF_SIMPLE_RO_MEMBER_ANY(maxThreadsDim)
.DEF_SIMPLE_RO_MEMBER_ANY(maxGridSize)
.DEF_SIMPLE_RO_MEMBER_ANY(clockRate)
.DEF_SIMPLE_RO_MEMBER_ANY(memoryClockRate)
.DEF_SIMPLE_RO_MEMBER_ANY(memoryBusWidth)
.DEF_SIMPLE_RO_MEMBER_ANY(totalConstMem)
.DEF_SIMPLE_RO_MEMBER_ANY(major)
.DEF_SIMPLE_RO_MEMBER_ANY(minor)
.DEF_SIMPLE_RO_MEMBER_ANY(multiProcessorCount)
.DEF_SIMPLE_RO_MEMBER_ANY(l2CacheSize)
.DEF_SIMPLE_RO_MEMBER_ANY(maxThreadsPerMultiProcessor)
.DEF_SIMPLE_RO_MEMBER_ANY(computeMode)
.DEF_SIMPLE_RO_MEMBER_ANY(clockInstructionRate)
.DEF_SIMPLE_RO_MEMBER_ANY(concurrentKernels)
.DEF_SIMPLE_RO_MEMBER_ANY(pciDomainID)
.DEF_SIMPLE_RO_MEMBER_ANY(pciBusID)
.DEF_SIMPLE_RO_MEMBER_ANY(pciDeviceID)
.DEF_SIMPLE_RO_MEMBER_ANY(maxSharedMemoryPerMultiProcessor)
.DEF_SIMPLE_RO_MEMBER_ANY(isMultiGpuBoard)
.DEF_SIMPLE_RO_MEMBER_ANY(canMapHostMemory)
.DEF_SIMPLE_RO_MEMBER_ANY(gcnArch)
.DEF_SIMPLE_RO_MEMBER_ANY(gcnArchName)
.DEF_SIMPLE_RO_MEMBER_ANY(integrated)
.DEF_SIMPLE_RO_MEMBER_ANY(cooperativeLaunch)
.DEF_SIMPLE_RO_MEMBER_ANY(cooperativeMultiDeviceLaunch)
.DEF_SIMPLE_RO_MEMBER_ANY(maxTexture1D)
.DEF_SIMPLE_RO_MEMBER_ANY(maxTexture2D)
.DEF_SIMPLE_RO_MEMBER_ANY(maxTexture3D)
.DEF_SIMPLE_RO_MEMBER_ANY(memPitch)
.DEF_SIMPLE_RO_MEMBER_ANY(textureAlignment)
.DEF_SIMPLE_RO_MEMBER_ANY(texturePitchAlignment)
.DEF_SIMPLE_RO_MEMBER_ANY(kernelExecTimeoutEnabled)
.DEF_SIMPLE_RO_MEMBER_ANY(ECCEnabled)
.DEF_SIMPLE_RO_MEMBER_ANY(tccDriver)
.DEF_SIMPLE_RO_MEMBER_ANY(cooperativeMultiDeviceUnmatchedFunc)
.DEF_SIMPLE_RO_MEMBER_ANY(cooperativeMultiDeviceUnmatchedGridDim)
.DEF_SIMPLE_RO_MEMBER_ANY(cooperativeMultiDeviceUnmatchedBlockDim)
.DEF_SIMPLE_RO_MEMBER_ANY(cooperativeMultiDeviceUnmatchedSharedMem)
.DEF_SIMPLE_RO_MEMBER_ANY(isLargeBar)
.DEF_SIMPLE_RO_MEMBER_ANY(asicRevision)
.DEF_SIMPLE_RO_MEMBER_ANY(managedMemory)
.DEF_SIMPLE_RO_MEMBER_ANY(directManagedMemAccessFromHost)
.DEF_SIMPLE_RO_MEMBER_ANY(concurrentManagedAccess)
.DEF_SIMPLE_RO_MEMBER_ANY(pageableMemoryAccess)
.DEF_SIMPLE_RO_MEMBER_ANY(pageableMemoryAccessUsesHostPageTables)
;
}
// {{{ context
{
typedef context cl;
py::class_<cl, shared_ptr<cl>, boost::noncopyable >("Context", py::no_init)
.def(py::self == py::self)
.def(py::self != py::self)
.def("__hash__", &cl::hash)
.def("attach", &cl::attach, (py::arg("flags")=0))
.staticmethod("attach")
.DEF_SIMPLE_METHOD(detach)
.def("push", context_push)
.DEF_SIMPLE_METHOD(pop)
.staticmethod("pop")
.DEF_SIMPLE_METHOD(get_device)
.staticmethod("get_device")
.DEF_SIMPLE_METHOD(synchronize)
.staticmethod("synchronize")
.def("get_current", (boost::shared_ptr<cl> (*)()) &cl::current_context)
.staticmethod("get_current")
// #if CUDAPP_CUDA_VERSION >= 3010
// .DEF_SIMPLE_METHOD(set_limit)
// .staticmethod("set_limit")
// .DEF_SIMPLE_METHOD(get_limit)
// .staticmethod("get_limit")
// #endif
.DEF_SIMPLE_METHOD(get_cache_config)
.staticmethod("get_cache_config")
.DEF_SIMPLE_METHOD(set_cache_config)
.staticmethod("set_cache_config")
.DEF_SIMPLE_METHOD(get_api_version)
.def("enable_peer_access", &cl::enable_peer_access,
(py::arg("peer"), py::arg("flags")=0))
.staticmethod("enable_peer_access")
.DEF_SIMPLE_METHOD(disable_peer_access)
.staticmethod("disable_peer_access")
.DEF_SIMPLE_METHOD(get_shared_config)
.staticmethod("get_shared_config")
.DEF_SIMPLE_METHOD(set_shared_config)
.staticmethod("set_shared_config")
.add_property("handle", &cl::handle_int)
;
}
// }}}
// {{{ stream
{
typedef stream cl;
py::class_<cl, boost::noncopyable, shared_ptr<cl> >
("Stream", py::init<unsigned int>(py::arg("flags")=0))
.DEF_SIMPLE_METHOD(synchronize)
.DEF_SIMPLE_METHOD(is_done)
.DEF_SIMPLE_METHOD(wait_for_event)
.add_property("handle", &cl::handle_int)
;
}
// }}}
// {{{ module
{
typedef module cl;
py::class_<cl, boost::noncopyable, shared_ptr<cl> >("Module", py::no_init)
.def("get_function", &cl::get_function, (py::args("self", "name")),
py::with_custodian_and_ward_postcall<0, 1>())
.def("get_global", &cl::get_global, (py::args("self", "name")))
// .def("get_texref", module_get_texref,
// (py::args("self", "name")),
// py::return_value_policy<py::manage_new_object>())
// #if CUDAPP_CUDA_VERSION >= 3010
// .def("get_surfref", module_get_surfref,
// (py::args("self", "name")),
// py::return_value_policy<py::manage_new_object>())
// #endif
;
}
py::def("module_from_file", module_from_file, (py::arg("filename")),
py::return_value_policy<py::manage_new_object>());
py::def("module_from_buffer", module_from_buffer,
(py::arg("buffer"),
py::arg("options")=py::list(),
py::arg("message_handler")=py::object()),
py::return_value_policy<py::manage_new_object>());
{
typedef function cl;
py::class_<cl>("Function", py::no_init)
.DEF_SIMPLE_METHOD(get_attribute)
.DEF_SIMPLE_METHOD(set_cache_config)
.def("_launch_kernel", &cl::launch_kernel)
;
}
{
typedef pointer_holder_base cl;
py::class_<pointer_holder_base_wrap, boost::noncopyable>(
"PointerHolderBase")
.def("get_pointer", py::pure_virtual(&cl::get_pointer))
.def("as_buffer", &cl::as_buffer,
(py::arg("size"), py::arg("offset")=0))
.def("__int__", &cl::operator DEV_PTR)
.def("__long__", mem_obj_to_long<cl>)
.def("__index__", mem_obj_to_long<cl>)
;
py::implicitly_convertible<pointer_holder_base, DEV_PTR>();
}
{
typedef device_allocation cl;
py::class_<cl, boost::noncopyable>("DeviceAllocation", py::no_init)
.def("__int__", &cl::operator DEV_PTR)
.def("__long__", mem_obj_to_long<cl>)
.def("__index__", mem_obj_to_long<cl>)
.def("as_buffer", &cl::as_buffer,
(py::arg("size"), py::arg("offset")=0))
.DEF_SIMPLE_METHOD(free)
;
py::implicitly_convertible<device_allocation, DEV_PTR>();
}
{
typedef host_pointer cl;
py::class_<cl, boost::noncopyable>("HostPointer", py::no_init)
.DEF_SIMPLE_METHOD(get_device_pointer)
;
}
{
typedef pagelocked_host_allocation cl;
py::class_<cl, boost::noncopyable, py::bases<host_pointer> > wrp(
"PagelockedHostAllocation", py::no_init);
wrp
.DEF_SIMPLE_METHOD(free)
.DEF_SIMPLE_METHOD(get_flags)
;
py::scope().attr("HostAllocation") = wrp;
}
{
typedef aligned_host_allocation cl;
py::class_<cl, boost::noncopyable, py::bases<host_pointer> > wrp(
"AlignedHostAllocation", py::no_init);
wrp
.DEF_SIMPLE_METHOD(free)
;
}
{
typedef managed_allocation cl;
py::class_<cl, boost::noncopyable, py::bases<device_allocation> > wrp(
"ManagedAllocation", py::no_init);
wrp
.DEF_SIMPLE_METHOD(get_device_pointer)
.def("attach", &cl::attach,
(py::arg("mem_flags"), py::arg("stream")=py::object()))
;
}
{
typedef registered_host_memory cl;
py::class_<cl, boost::noncopyable, py::bases<host_pointer> >(
"RegisteredHostMemory", py::no_init)
.def("unregister", &cl::free)
;
}
py::def("pagelocked_empty", numpy_empty<pagelocked_host_allocation>,
(py::arg("shape"), py::arg("dtype"), py::arg("order")="C",
py::arg("mem_flags")=0));
py::def("aligned_empty", numpy_empty<aligned_host_allocation>,
(py::arg("shape"), py::arg("dtype"),
py::arg("order")="C", py::arg("alignment")=4096));
py::def("managed_empty", numpy_empty<managed_allocation>,
(py::arg("shape"), py::arg("dtype"), py::arg("order")="C",
py::arg("mem_flags")=0));
py::def("register_host_memory", register_host_memory,
(py::arg("ary"), py::arg("flags")=0));
// }}}
DEF_SIMPLE_FUNCTION(mem_get_info);
py::def("mem_alloc", mem_alloc_wrap,
py::return_value_policy<py::manage_new_object>());
py::def("mem_alloc_pitch", mem_alloc_pitch_wrap,
py::args("width", "height", "access_size"));
DEF_SIMPLE_FUNCTION(mem_get_address_range);
// {{{ memset/memcpy
py::def("memset_d8", py_memset_d8, py::args("dest", "data", "size"));
py::def("memset_d16", py_memset_d16, py::args("dest", "data", "size"));
py::def("memset_d32", py_memset_d32, py::args("dest", "data", "size"));
// py::def("memset_d2d8", py_memset_d2d8,
// py::args("dest", "pitch", "data", "width", "height"));
// py::def("memset_d2d16", py_memset_d2d16,
// py::args("dest", "pitch", "data", "width", "height"));
// py::def("memset_d2d32", py_memset_d2d32,
// py::args("dest", "pitch", "data", "width", "height"));
py::def("memset_d8_async", py_memset_d8_async,
(py::args("dest", "data", "size"), py::arg("stream")=py::object()));
py::def("memset_d16_async", py_memset_d16_async,
(py::args("dest", "data", "size"), py::arg("stream")=py::object()));
py::def("memset_d32_async", py_memset_d32_async,
(py::args("dest", "data", "size"), py::arg("stream")=py::object()));
// py::def("memset_d2d8_async", py_memset_d2d8_async,
// (py::args("dest", "pitch", "data", "width", "height"),
// py::arg("stream")=py::object()));
// py::def("memset_d2d16_async", py_memset_d2d16_async,
// (py::args("dest", "pitch", "data", "width", "height"),
// py::arg("stream")=py::object()));
// py::def("memset_d2d32_async", py_memset_d2d32_async,
// (py::args("dest", "pitch", "data", "width", "height"),
// py::arg("stream")=py::object()));
py::def("memcpy_htod", py_memcpy_htod,
(py::args("dest"), py::arg("src")));
py::def("memcpy_htod_async", py_memcpy_htod_async,
(py::args("dest"), py::arg("src"), py::arg("stream")=py::object()));
py::def("memcpy_dtoh", py_memcpy_dtoh,
(py::args("dest"), py::arg("src")));
py::def("memcpy_dtoh_async", py_memcpy_dtoh_async,
(py::args("dest"), py::arg("src"), py::arg("stream")=py::object()));
py::def("memcpy_dtod", py_memcpy_dtod, py::args("dest", "src", "size"));
py::def("memcpy_dtod_async", py_memcpy_dtod_async,
(py::args("dest", "src", "size"), py::arg("stream")=py::object()));
// #if CUDAPP_CUDA_VERSION >= 4000
// py::def("memcpy_peer", py_memcpy_peer,
// (py::args("dest", "src", "size"),
// py::arg("dest_context")=py::object(),
// py::arg("src_context")=py::object()));
// py::def("memcpy_peer_async", py_memcpy_peer_async,
// (py::args("dest", "src", "size"),
// py::arg("dest_context")=py::object(),
// py::arg("src_context")=py::object(),
// py::arg("stream")=py::object()));
// #endif
// DEF_SIMPLE_FUNCTION_WITH_ARGS(memcpy_dtoa,
// ("ary", "index", "src", "len"));
// DEF_SIMPLE_FUNCTION_WITH_ARGS(memcpy_atod,
// ("dest", "ary", "index", "len"));
// DEF_SIMPLE_FUNCTION_WITH_ARGS(py_memcpy_htoa,
// ("ary", "index", "src"));
// DEF_SIMPLE_FUNCTION_WITH_ARGS(py_memcpy_atoh,
// ("dest", "ary", "index"));
// DEF_SIMPLE_FUNCTION_WITH_ARGS(memcpy_atoa,
// ("dest", "dest_index", "src", "src_index", "len"));
// #if CUDAPP_CUDA_VERSION >= 4000
// #define WRAP_MEMCPY_2D_UNIFIED_SETTERS \
// .DEF_SIMPLE_METHOD(set_src_unified) \
// .DEF_SIMPLE_METHOD(set_dst_unified)
// #else
// #define WRAP_MEMCPY_2D_UNIFIED_SETTERS /* empty */
// #endif
// #define WRAP_MEMCPY_2D_PROPERTIES \
// .def_readwrite("src_x_in_bytes", &cl::srcXInBytes) \
// .def_readwrite("src_y", &cl::srcY) \
// .def_readwrite("src_memory_type", &cl::srcMemoryType) \
// .def_readwrite("src_device", &cl::srcDevice) \
// .def_readwrite("src_pitch", &cl::srcPitch) \
// \
// .DEF_SIMPLE_METHOD(set_src_host) \
// .DEF_SIMPLE_METHOD(set_src_array) \
// .DEF_SIMPLE_METHOD(set_src_device) \
// \
// .def_readwrite("dst_x_in_bytes", &cl::dstXInBytes) \
// .def_readwrite("dst_y", &cl::dstY) \
// .def_readwrite("dst_memory_type", &cl::dstMemoryType) \
// .def_readwrite("dst_device", &cl::dstDevice) \
// .def_readwrite("dst_pitch", &cl::dstPitch) \
// \
// .DEF_SIMPLE_METHOD(set_dst_host) \
// .DEF_SIMPLE_METHOD(set_dst_array) \
// .DEF_SIMPLE_METHOD(set_dst_device) \
// \
// .def_readwrite("width_in_bytes", &cl::WidthInBytes) \
// .def_readwrite("height", &cl::Height) \
// \
// WRAP_MEMCPY_2D_UNIFIED_SETTERS
// {
// typedef memcpy_2d cl;
// py::class_<cl>("Memcpy2D")
// WRAP_MEMCPY_2D_PROPERTIES
// .def("__call__", &cl::execute, py::args("self", "aligned"))
// .def("__call__", &cl::execute_async)
// ;
// }
// #if CUDAPP_CUDA_VERSION >= 2000
// #define WRAP_MEMCPY_3D_PROPERTIES \
// WRAP_MEMCPY_2D_PROPERTIES \
// .def_readwrite("src_z", &cl::srcZ) \
// .def_readwrite("src_lod", &cl::srcLOD) \
// .def_readwrite("src_height", &cl::srcHeight) \
// \
// .def_readwrite("dst_z", &cl::dstZ) \
// .def_readwrite("dst_lod", &cl::dstLOD) \
// .def_readwrite("dst_height", &cl::dstHeight) \
// \
// .def_readwrite("depth", &cl::Depth) \
// {
// typedef memcpy_3d cl;
// py::class_<cl>("Memcpy3D")
// WRAP_MEMCPY_3D_PROPERTIES
// .def("__call__", &cl::execute)
// .def("__call__", &cl::execute_async)
// ;
// }
// #endif
// #if CUDAPP_CUDA_VERSION >= 4000
// {
// typedef memcpy_3d_peer cl;
// py::class_<cl>("Memcpy3DPeer")
// WRAP_MEMCPY_3D_PROPERTIES
// .DEF_SIMPLE_METHOD(set_src_context)
// .DEF_SIMPLE_METHOD(set_dst_context)
// .def("__call__", &cl::execute)
// .def("__call__", &cl::execute_async)
// ;
// }
// #endif
// }}}
// {{{ event
{
typedef event cl;
py::class_<cl, boost::noncopyable>
("Event", py::init<py::optional<unsigned int> >(py::arg("flags")))
.def("record", &cl::record,
py::arg("stream")=py::object(), py::return_self<>())
.def("synchronize", &cl::synchronize, py::return_self<>())
.DEF_SIMPLE_METHOD(query)
.DEF_SIMPLE_METHOD(time_since)
.DEF_SIMPLE_METHOD(time_till)
// #if CUDAPP_CUDA_VERSION >= 4010 && PY_VERSION_HEX >= 0x02060000
// .DEF_SIMPLE_METHOD(ipc_handle)
// .def("from_ipc_handle", event_from_ipc_handle,
// py::return_value_policy<py::manage_new_object>())
// .staticmethod("from_ipc_handle")
// #endif
;
}
// }}}
// {{{ arrays
// {
// typedef CUDA_ARRAY_DESCRIPTOR cl;
// py::class_<cl>("ArrayDescriptor")
// .def_readwrite("width", &cl::Width)
// .def_readwrite("height", &cl::Height)
// .def_readwrite("format", &cl::Format)
// .def_readwrite("num_channels", &cl::NumChannels)
// ;
// }
// #if CUDAPP_CUDA_VERSION >= 2000
// {
// typedef CUDA_ARRAY3D_DESCRIPTOR cl;
// py::class_<cl>("ArrayDescriptor3D")
// .def_readwrite("width", &cl::Width)
// .def_readwrite("height", &cl::Height)
// .def_readwrite("depth", &cl::Depth)
// .def_readwrite("format", &cl::Format)
// .def_readwrite("num_channels", &cl::NumChannels)
// .def_readwrite("flags", &cl::Flags)
// ;
// }
// #endif
// {
// typedef array cl;
// py::class_<cl, shared_ptr<cl>, boost::noncopyable>
// ("Array", py::init<const CUDA_ARRAY_DESCRIPTOR &>())
// .DEF_SIMPLE_METHOD(free)
// .DEF_SIMPLE_METHOD(get_descriptor)
// #if CUDAPP_CUDA_VERSION >= 2000
// .def(py::init<const CUDA_ARRAY3D_DESCRIPTOR &>())
// .DEF_SIMPLE_METHOD(get_descriptor_3d)
// #endif
// .add_property("handle", &cl::handle_int)
// ;
// }
// // }}}
// // {{{ texture reference
// {
// typedef texture_reference cl;
// py::class_<cl, boost::noncopyable>("TextureReference")
// .DEF_SIMPLE_METHOD(set_array)
// .def("set_address", &cl::set_address,
// (py::arg("devptr"), py::arg("bytes"), py::arg("allow_offset")=false))
// #if CUDAPP_CUDA_VERSION >= 2020
// .DEF_SIMPLE_METHOD_WITH_ARGS(set_address_2d, ("devptr", "descr", "pitch"))
// #endif
// .DEF_SIMPLE_METHOD_WITH_ARGS(set_format, ("format", "num_components"))
// .DEF_SIMPLE_METHOD_WITH_ARGS(set_address_mode, ("dim", "am"))
// .DEF_SIMPLE_METHOD(set_filter_mode)
// .DEF_SIMPLE_METHOD(set_flags)
// .DEF_SIMPLE_METHOD(get_address)
// .def("get_array", &cl::get_array,
// py::return_value_policy<py::manage_new_object>())
// .DEF_SIMPLE_METHOD(get_address_mode)
// .DEF_SIMPLE_METHOD(get_filter_mode)
// #if CUDAPP_CUDA_VERSION >= 2000
// .DEF_SIMPLE_METHOD(get_format)
// #endif
// .DEF_SIMPLE_METHOD(get_flags)
// ;
// }
// // }}}
// // {{{ surface reference
// #if CUDAPP_CUDA_VERSION >= 3010
// {
// typedef surface_reference cl;
// py::class_<cl, boost::noncopyable>("SurfaceReference", py::no_init)
// .def("set_array", &cl::set_array,
// (py::arg("array"), py::arg("flags")=0))
// .def("get_array", &cl::get_array,
// py::return_value_policy<py::manage_new_object>())
// ;
// }
// #endif
// // }}}
// // {{{ profiler control
// #if CUDAPP_CUDA_VERSION >= 4000
// DEF_SIMPLE_FUNCTION(initialize_profiler);
// DEF_SIMPLE_FUNCTION(start_profiler);
// DEF_SIMPLE_FUNCTION(stop_profiler);
// #endif
// // }}}
// py::scope().attr("TRSA_OVERRIDE_FORMAT") = CU_TRSA_OVERRIDE_FORMAT;
// py::scope().attr("TRSF_READ_AS_INTEGER") = CU_TRSF_READ_AS_INTEGER;
// py::scope().attr("TRSF_NORMALIZED_COORDINATES") = CU_TRSF_NORMALIZED_COORDINATES;
// py::scope().attr("TR_DEFAULT") = CU_PARAM_TR_DEFAULT;
// DEF_SIMPLE_FUNCTION(have_gl_ext);
pyhip_expose_tools();
}
|
{"hexsha": "3a6cb09d29dd191f4f9d53fdab4e18f42a7218a7", "size": 44195, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/wrapper/wrap_hipdriv.cpp", "max_stars_repo_name": "ahmed-f-alrefaie/pyhip", "max_stars_repo_head_hexsha": "713280b65ca5a375cdf4d303330e4ec10df606e7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wrapper/wrap_hipdriv.cpp", "max_issues_repo_name": "ahmed-f-alrefaie/pyhip", "max_issues_repo_head_hexsha": "713280b65ca5a375cdf4d303330e4ec10df606e7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wrapper/wrap_hipdriv.cpp", "max_forks_repo_name": "ahmed-f-alrefaie/pyhip", "max_forks_repo_head_hexsha": "713280b65ca5a375cdf4d303330e4ec10df606e7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2862684251, "max_line_length": 125, "alphanum_fraction": 0.618916167, "num_tokens": 10977}
|
\section{Overview}
\label{s:overview}
We start with an overview of how \sys works by describing
how it can prove the equivalence of two functions: a
recursive function |sumTo| that adds up the numbers
from |1| to |n|, and a \emph{tail-recursive} variant
|sumToTR| that uses a helper |loop| with an accumulator
|acc| to compute the same result.
\begin{figure*}[t!]
\centering
\begin{minipage}[t]{0.40\textwidth}
% \begin{minted}[linenos, firstnumber=1,breaklines=true]{ocaml}
\begin{ocode}
(* -- Recursive Summation -- *)
let rec sumTo n =
if n = 0
then 0
else n + sumTo (n - 1)
\end{ocode}
\end{minipage}
\hspace{0.14in}
\begin{minipage}[t]{0.49\textwidth}
% \begin{minted}[linenos, firstnumber=1,breaklines=true]{ocaml}
\begin{ocode}
(* -- Tail-Recursive Summation -- *)
let sumToTR n =
let rec loop n acc =
if n = 0
then acc
else loop (n - 1) (acc + n)
in
loop n 0
\end{ocode}
\end{minipage}
\caption{\textbf{(L)} Function that sums the numbers to $n$
and \textbf{(R)} Tail-recursive accumulator based variant.
}
\label{fig:example}
\end{figure*}
|
{"hexsha": "b81c3670fe170e5bf0c0539848b64dc5d7485a28", "size": 1097, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "papers/syn-refinements/overview.tex", "max_stars_repo_name": "qizhou92/icfp", "max_stars_repo_head_hexsha": "2f84c30e8f564f4bec2933a8b736ae58fd91821e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "papers/syn-refinements/overview.tex", "max_issues_repo_name": "qizhou92/icfp", "max_issues_repo_head_hexsha": "2f84c30e8f564f4bec2933a8b736ae58fd91821e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "papers/syn-refinements/overview.tex", "max_forks_repo_name": "qizhou92/icfp", "max_forks_repo_head_hexsha": "2f84c30e8f564f4bec2933a8b736ae58fd91821e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3777777778, "max_line_length": 67, "alphanum_fraction": 0.6809480401, "num_tokens": 367}
|
[STATEMENT]
lemma exactly_result:
assumes "exactly x s = Inr (y, r)"
shows "\<exists> w. s = x @ w @ r \<and> y = x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>w. s = x @ w @ r \<and> y = x
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>w. s = x @ w @ r \<and> y = x
[PROOF STEP]
{
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>w. s = x @ w @ r \<and> y = x
[PROOF STEP]
fix a b
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>w. s = x @ w @ r \<and> y = x
[PROOF STEP]
assume "exactly_aux a b x s = Inr (y,r)"
[PROOF STATE]
proof (state)
this:
exactly_aux a b x s = Inr (y, r)
goal (1 subgoal):
1. \<exists>w. s = x @ w @ r \<and> y = x
[PROOF STEP]
hence "\<exists> w. s = x @ w @ r \<and> y = a"
[PROOF STATE]
proof (prove)
using this:
exactly_aux a b x s = Inr (y, r)
goal (1 subgoal):
1. \<exists>w. s = x @ w @ r \<and> y = a
[PROOF STEP]
proof (induct x arbitrary: s)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>s. exactly_aux a b [] s = Inr (y, r) \<Longrightarrow> \<exists>w. s = [] @ w @ r \<and> y = a
2. \<And>aa x s. \<lbrakk>\<And>s. exactly_aux a b x s = Inr (y, r) \<Longrightarrow> \<exists>w. s = x @ w @ r \<and> y = a; exactly_aux a b (aa # x) s = Inr (y, r)\<rbrakk> \<Longrightarrow> \<exists>w. s = (aa # x) @ w @ r \<and> y = a
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
exactly_aux a b [] s = Inr (y, r)
goal (2 subgoals):
1. \<And>s. exactly_aux a b [] s = Inr (y, r) \<Longrightarrow> \<exists>w. s = [] @ w @ r \<and> y = a
2. \<And>aa x s. \<lbrakk>\<And>s. exactly_aux a b x s = Inr (y, r) \<Longrightarrow> \<exists>w. s = x @ w @ r \<and> y = a; exactly_aux a b (aa # x) s = Inr (y, r)\<rbrakk> \<Longrightarrow> \<exists>w. s = (aa # x) @ w @ r \<and> y = a
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
exactly_aux a b [] s = Inr (y, r)
goal (1 subgoal):
1. \<exists>w. s = [] @ w @ r \<and> y = a
[PROOF STEP]
using trim[of s]
[PROOF STATE]
proof (prove)
using this:
exactly_aux a b [] s = Inr (y, r)
\<exists>w. s = w @ trim s
goal (1 subgoal):
1. \<exists>w. s = [] @ w @ r \<and> y = a
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>w. s = [] @ w @ r \<and> y = a
goal (1 subgoal):
1. \<And>aa x s. \<lbrakk>\<And>s. exactly_aux a b x s = Inr (y, r) \<Longrightarrow> \<exists>w. s = x @ w @ r \<and> y = a; exactly_aux a b (aa # x) s = Inr (y, r)\<rbrakk> \<Longrightarrow> \<exists>w. s = (aa # x) @ w @ r \<and> y = a
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>aa x s. \<lbrakk>\<And>s. exactly_aux a b x s = Inr (y, r) \<Longrightarrow> \<exists>w. s = x @ w @ r \<and> y = a; exactly_aux a b (aa # x) s = Inr (y, r)\<rbrakk> \<Longrightarrow> \<exists>w. s = (aa # x) @ w @ r \<and> y = a
[PROOF STEP]
case (Cons c xs)
[PROOF STATE]
proof (state)
this:
exactly_aux a b xs ?s = Inr (y, r) \<Longrightarrow> \<exists>w. ?s = xs @ w @ r \<and> y = a
exactly_aux a b (c # xs) s = Inr (y, r)
goal (1 subgoal):
1. \<And>aa x s. \<lbrakk>\<And>s. exactly_aux a b x s = Inr (y, r) \<Longrightarrow> \<exists>w. s = x @ w @ r \<and> y = a; exactly_aux a b (aa # x) s = Inr (y, r)\<rbrakk> \<Longrightarrow> \<exists>w. s = (aa # x) @ w @ r \<and> y = a
[PROOF STEP]
note xs = this
[PROOF STATE]
proof (state)
this:
exactly_aux a b xs ?s = Inr (y, r) \<Longrightarrow> \<exists>w. ?s = xs @ w @ r \<and> y = a
exactly_aux a b (c # xs) s = Inr (y, r)
goal (1 subgoal):
1. \<And>aa x s. \<lbrakk>\<And>s. exactly_aux a b x s = Inr (y, r) \<Longrightarrow> \<exists>w. s = x @ w @ r \<and> y = a; exactly_aux a b (aa # x) s = Inr (y, r)\<rbrakk> \<Longrightarrow> \<exists>w. s = (aa # x) @ w @ r \<and> y = a
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>w. s = (c # xs) @ w @ r \<and> y = a
[PROOF STEP]
proof (cases s)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. s = [] \<Longrightarrow> \<exists>w. s = (c # xs) @ w @ r \<and> y = a
2. \<And>aa list. s = aa # list \<Longrightarrow> \<exists>w. s = (c # xs) @ w @ r \<and> y = a
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
s = []
goal (2 subgoals):
1. s = [] \<Longrightarrow> \<exists>w. s = (c # xs) @ w @ r \<and> y = a
2. \<And>aa list. s = aa # list \<Longrightarrow> \<exists>w. s = (c # xs) @ w @ r \<and> y = a
[PROOF STEP]
with xs
[PROOF STATE]
proof (chain)
picking this:
exactly_aux a b xs ?s = Inr (y, r) \<Longrightarrow> \<exists>w. ?s = xs @ w @ r \<and> y = a
exactly_aux a b (c # xs) s = Inr (y, r)
s = []
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
exactly_aux a b xs ?s = Inr (y, r) \<Longrightarrow> \<exists>w. ?s = xs @ w @ r \<and> y = a
exactly_aux a b (c # xs) s = Inr (y, r)
s = []
goal (1 subgoal):
1. \<exists>w. s = (c # xs) @ w @ r \<and> y = a
[PROOF STEP]
by (auto simp: err_expecting_def)
[PROOF STATE]
proof (state)
this:
\<exists>w. s = (c # xs) @ w @ r \<and> y = a
goal (1 subgoal):
1. \<And>aa list. s = aa # list \<Longrightarrow> \<exists>w. s = (c # xs) @ w @ r \<and> y = a
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>aa list. s = aa # list \<Longrightarrow> \<exists>w. s = (c # xs) @ w @ r \<and> y = a
[PROOF STEP]
case (Cons d ss)
[PROOF STATE]
proof (state)
this:
s = d # ss
goal (1 subgoal):
1. \<And>aa list. s = aa # list \<Longrightarrow> \<exists>w. s = (c # xs) @ w @ r \<and> y = a
[PROOF STEP]
note xs = xs[unfolded Cons]
[PROOF STATE]
proof (state)
this:
exactly_aux a b xs ?s = Inr (y, r) \<Longrightarrow> \<exists>w. ?s = xs @ w @ r \<and> y = a
exactly_aux a b (c # xs) (d # ss) = Inr (y, r)
goal (1 subgoal):
1. \<And>aa list. s = aa # list \<Longrightarrow> \<exists>w. s = (c # xs) @ w @ r \<and> y = a
[PROOF STEP]
from xs(2)
[PROOF STATE]
proof (chain)
picking this:
exactly_aux a b (c # xs) (d # ss) = Inr (y, r)
[PROOF STEP]
have "exactly_aux a b xs ss = Inr (y, r) \<and> c = d"
[PROOF STATE]
proof (prove)
using this:
exactly_aux a b (c # xs) (d # ss) = Inr (y, r)
goal (1 subgoal):
1. exactly_aux a b xs ss = Inr (y, r) \<and> c = d
[PROOF STEP]
by (cases "c = d") (auto simp: err_expecting_def)
[PROOF STATE]
proof (state)
this:
exactly_aux a b xs ss = Inr (y, r) \<and> c = d
goal (1 subgoal):
1. \<And>aa list. s = aa # list \<Longrightarrow> \<exists>w. s = (c # xs) @ w @ r \<and> y = a
[PROOF STEP]
hence res: "exactly_aux a b xs ss = Inr (y, r)" and c: "c = d"
[PROOF STATE]
proof (prove)
using this:
exactly_aux a b xs ss = Inr (y, r) \<and> c = d
goal (1 subgoal):
1. exactly_aux a b xs ss = Inr (y, r) &&& c = d
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
exactly_aux a b xs ss = Inr (y, r)
c = d
goal (1 subgoal):
1. \<And>aa list. s = aa # list \<Longrightarrow> \<exists>w. s = (c # xs) @ w @ r \<and> y = a
[PROOF STEP]
from xs(1)[OF res]
[PROOF STATE]
proof (chain)
picking this:
\<exists>w. ss = xs @ w @ r \<and> y = a
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<exists>w. ss = xs @ w @ r \<and> y = a
goal (1 subgoal):
1. \<exists>w. s = (c # xs) @ w @ r \<and> y = a
[PROOF STEP]
unfolding Cons c
[PROOF STATE]
proof (prove)
using this:
\<exists>w. ss = xs @ w @ r \<and> y = a
goal (1 subgoal):
1. \<exists>w. d # ss = (d # xs) @ w @ r \<and> y = a
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>w. s = (c # xs) @ w @ r \<and> y = a
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<exists>w. s = (c # xs) @ w @ r \<and> y = a
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<exists>w. s = x @ w @ r \<and> y = a
goal (1 subgoal):
1. \<exists>w. s = x @ w @ r \<and> y = x
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
exactly_aux ?a2 ?b2 x s = Inr (y, r) \<Longrightarrow> \<exists>w. s = x @ w @ r \<and> y = ?a2
goal (1 subgoal):
1. \<exists>w. s = x @ w @ r \<and> y = x
[PROOF STEP]
from this[OF assms [unfolded exactly_def]]
[PROOF STATE]
proof (chain)
picking this:
\<exists>w. s = x @ w @ r \<and> y = x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<exists>w. s = x @ w @ r \<and> y = x
goal (1 subgoal):
1. \<exists>w. s = x @ w @ r \<and> y = x
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
\<exists>w. s = x @ w @ r \<and> y = x
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3794, "file": "Certification_Monads_Parser_Monad", "length": 38}
|
import streamlit as st
import pandas as pd
import numpy as np
import requests
import time
import matplotlib.pyplot as plt
import seaborn as sns
from io import BytesIO
def get_last_8_days_hourly_bitcoin_data():
"""Call Coincap API and request last 8 days of hourly Bitcoin USD data,
return DataFrame with 'date' and 'price' columns. Date column is in UTC.
Returns
-------
pd.DataFrame
Dataframe (columns: 'date', 'price' with correct types).
Price is rounded to 2 decimal places. Last row contains most recent
price, first contains price 8 days ago.
"""
num_seconds_in_8_days = 60 * 60 * 24 * 8
num_milliseconds_in_8_days = num_seconds_in_8_days * 1000
now_ns = str(time.time_ns())
# Take first 13 digits for milliseconds
# Coincap API only accepts milliseconds
now_ms = int(now_ns[:13])
eight_days_ago = now_ms - num_milliseconds_in_8_days
# Get Bitcoin data for last 8 days
url = (
f"https://api.coincap.io/v2/assets/bitcoin/history?interval=h1"
f"&start={eight_days_ago}&end={now_ms}"
)
payload = {}
headers = {"Authorization": "Bearer bff099f6-aec1-4e2f-8cec-57f8eea14e27"}
response = requests.request("GET", url, headers=headers, data=payload)
response.raise_for_status()
response.status_code
json_data = response.json()
bitcoin_data = json_data["data"]
df = pd.DataFrame(bitcoin_data)
df = df.loc[:, ["date", "priceUsd"]]
df.rename(mapper={"priceUsd": "price"}, inplace=True, axis=1)
df["date"] = df["date"].apply(pd.to_datetime)
df["price"] = df["price"].apply(pd.to_numeric)
df["price"] = df["price"].round(2)
df.sort_values("date", ascending=False, ignore_index=True, inplace=True)
return df
|
{"hexsha": "3df93b1da1d4e8b6a7b2b42b5c1c65de365b285f", "size": 1772, "ext": "py", "lang": "Python", "max_stars_repo_path": "deploy/deploy_helpers.py", "max_stars_repo_name": "theadammurphy/bitcoin_price_predictor", "max_stars_repo_head_hexsha": "7d4cb3eb85f3800ac7d4f651881d5672457ae538", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deploy/deploy_helpers.py", "max_issues_repo_name": "theadammurphy/bitcoin_price_predictor", "max_issues_repo_head_hexsha": "7d4cb3eb85f3800ac7d4f651881d5672457ae538", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deploy/deploy_helpers.py", "max_forks_repo_name": "theadammurphy/bitcoin_price_predictor", "max_forks_repo_head_hexsha": "7d4cb3eb85f3800ac7d4f651881d5672457ae538", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8148148148, "max_line_length": 78, "alphanum_fraction": 0.6817155756, "include": true, "reason": "import numpy", "num_tokens": 477}
|
#include <array>
#include <cstdint>
#include <ostream>
#include <string>
#include <boost/format.hpp>
#include <zcpm/core/processor.hpp>
#include <zcpm/core/registers.hpp>
#include "writer.hpp"
namespace
{
const std::array<const char*, 8> ByteRegMask{ "B", "C", "D", "E", "H", "L", "(HL)", "A" };
const std::array<const char*, 4> WordRegMask{ "BC", "DE", "HL", "SP" };
const std::array<const char*, 4> WordRegMaskQq{ "BC", "DE", "HL", "AF" };
const std::array<const char*, 8> CondMask{ "NZ", "Z", "NC", "C", "PO", "PE", "P", "M" };
const std::array<const char*, 8> DDByteRegMask{ "B", "C", "D", "E", "IXH", "IXL", "(HL)", "A" };
const std::array<const char*, 8> FDByteRegMask{ "B", "C", "D", "E", "IYH", "IYL", "(HL)", "A" };
const std::map<uint8_t, const char*> DdFdCbLogicals = {
{ 0x06, "RLC" }, { 0x0E, "RRC" }, { 0x16, "RL" }, { 0x1E, "RR" },
{ 0x26, "SLA" }, { 0x2E, "SRA" }, { 0x36, "SLL" }, { 0x3E, "SRL" },
};
// A 8-bit literal as a 2 digit hex string
auto byte(uint8_t x)
{
return (boost::format("%02X") % static_cast<unsigned int>(x)).str();
}
// A 16-bit literal as a 4 digit hex string
auto word(uint8_t low, uint8_t high)
{
return (boost::format("%04X") % (high << 8 | low)).str();
}
std::string byte_array_to_string(const std::vector<uint8_t>& bytes)
{
std::string result;
for (const auto& b : bytes)
{
if (!result.empty())
{
result += " ";
}
result += byte(b);
}
return "[" + result + "]";
}
// Dereference NN from (string)
auto nn_string(uint8_t low, uint8_t high, const std::string& s)
{
return (boost::format("(%04X),%s") % (high << 8 | low) % s).str();
}
// Dereference (string) from NN
auto string_nn(uint8_t low, uint8_t high, const std::string& s)
{
return (boost::format("%s,(%04X)") % s % (high << 8 | low)).str();
}
// Dereference N from (string)
auto n_string(uint8_t n, const std::string& s)
{
return (boost::format("(%02X),%s") % static_cast<unsigned int>(n) % s).str();
}
// Dereference (string) from N
// auto string_n(uint8_t n, const std::string& s)
//{
// return (boost::format("%s,(%02X)") % s % static_cast<unsigned int>(n)).str();
//}
auto hl_ss(uint8_t ss)
{
return (boost::format("HL,%s") % WordRegMask[ss]).str();
}
auto byte_register(uint8_t r)
{
return ByteRegMask[r];
}
auto qq_word_register(uint8_t qq)
{
return WordRegMaskQq[qq];
}
// r,n where r is a byte register and n is a 8-bit literal
auto r_n(uint8_t r, uint8_t n)
{
return (boost::format("%s,%02X") % ByteRegMask[r] % static_cast<unsigned int>(n)).str();
}
// r,r where r1 and r2 are both byte registers
auto r_r(uint8_t r1, uint8_t r2)
{
return (boost::format("%s,%s") % ByteRegMask[r1] % ByteRegMask[r2]).str();
}
// dd,nn where dd is a word register and nn is a 16-bit literal
auto dd_nn(uint8_t dd, uint8_t nn_low, uint8_t nn_high)
{
const uint16_t nn = (nn_high << 8) | nn_low;
return (boost::format("%s,%04X") % WordRegMask[dd] % nn).str();
}
// (nn),dd where dd is a word register and nn is a 16-bit literal
auto inn_dd(uint8_t dd, uint8_t nn_low, uint8_t nn_high)
{
const uint16_t nn = (nn_high << 8) | nn_low;
return (boost::format("(%04X),%s") % nn % WordRegMask[dd]).str();
}
// dd,(nn) where dd is a word register and nn is a 16-bit literal
auto dd_inn(uint8_t dd, uint8_t nn_low, uint8_t nn_high)
{
const uint16_t nn = (nn_high << 8) | nn_low;
return (boost::format("%s,(%04X)") % WordRegMask[dd] % nn).str();
}
// cc,pq where cc is a 3-bit condition and pq is a 16-bit literal
auto cc_pq(uint8_t cc, uint8_t pq_low, uint8_t pq_high)
{
const uint16_t pq = (pq_high << 8) | pq_low;
return (boost::format("%s,%04X") % CondMask[cc] % pq).str();
}
// Relative target as a 4 digit hex value (PC+e)
auto offset(uint16_t pc, uint8_t e)
{
const int8_t ee = e;
const uint16_t dest = pc + 2 + ee;
return (boost::format("%04X") % dest).str();
}
// cc,e where cc is a 2-bit condition and e is a relative jump, which
// we combine with pc for display purposes for compatibility with DebugZ
auto cc_offset(uint8_t cc, uint8_t e, uint16_t pc)
{
const int8_t ee = e;
const uint16_t dest = pc + 2 + ee;
return (boost::format("%s,%04X") % CondMask[cc] % dest).str();
}
// "r,(reg+d)" where r is a 8-bit register index and reg is an index register name and d is an offset
auto r_ind_offset(uint8_t r, const std::string& reg, uint8_t offset)
{
const int8_t o = offset;
return (boost::format("%s,(%s+%02X)") % ByteRegMask[r] % reg % static_cast<short>(o)).str();
}
// "(reg+d),r" where r is a 8-bit register index and reg is an index register name and d is an offset
auto ind_offset_r(uint8_t r, const std::string& reg, uint8_t offset)
{
const int8_t o = offset;
return (boost::format("(%s+%02X),%s") % reg % static_cast<short>(o) % ByteRegMask[r]).str();
}
// TODO: Collate string constants
std::tuple<size_t, std::string, std::string> disassemble_cb(uint8_t op2, uint8_t /*op3*/, uint8_t /*op4*/)
{
// First check for specific opcodes
switch (op2)
{
// TODO: none handled here yet
default:
// Fall through to bytefield checks below
break;
}
// Now check for bytefields
if ((op2 & 0xC0) == 0x80)
{
const uint16_t b = (op2 >> 3) & 0x07;
const uint8_t r = op2 & 0x07;
return { 2, "RES", (boost::format("%d,") % b).str() + ByteRegMask[r] };
}
if ((op2 & 0xC0) == 0xC0)
{
const uint16_t b = (op2 >> 3) & 0x07;
const uint8_t r = op2 & 0x07;
return { 2, "SET", (boost::format("%d,") % b).str() + ByteRegMask[r] };
}
if ((op2 & 0xF8) == 0x00)
{
const uint8_t r = op2 & 0x07;
return { 2, "RLC", ByteRegMask[r] };
}
if ((op2 & 0xF8) == 0x08)
{
const uint8_t r = op2 & 0x07;
return { 2, "RRC", ByteRegMask[r] };
}
if ((op2 & 0xF8) == 0x10)
{
const uint8_t r = op2 & 0x07;
return { 2, "RL", ByteRegMask[r] };
}
if ((op2 & 0xF8) == 0x18)
{
const uint8_t r = op2 & 0x07;
return { 2, "RR", ByteRegMask[r] };
}
if ((op2 & 0xF8) == 0x20)
{
const uint8_t r = op2 & 0x07;
return { 2, "SLA", ByteRegMask[r] };
}
if ((op2 & 0xF8) == 0x28)
{
const uint8_t r = op2 & 0x07;
return { 2, "SRA", ByteRegMask[r] };
}
if ((op2 & 0xF8) == 0x30)
{
const uint8_t r = op2 & 0x07;
return { 2, "SLL", ByteRegMask[r] };
}
if ((op2 & 0xF8) == 0x38)
{
const uint8_t r = op2 & 0x07;
return { 2, "SRL", ByteRegMask[r] };
}
// No match
return { 0, (boost::format("?? CB %02X") % static_cast<unsigned short>(op2)).str(), "" };
}
std::tuple<size_t, std::string, std::string> disassemble_ddfd(const std::string& xy,
uint8_t op1,
uint8_t op2,
uint8_t op3,
uint8_t op4)
{
// First check for specific opcodes
switch (op2)
{
case 0x09: return { 2, "ADD", xy + ",BC" };
case 0x19: return { 2, "ADD", xy + ",DE" };
case 0x21: return { 4, "LD", (boost::format("%S,%04X") % xy % ((op4 << 8) | op3)).str() };
case 0x22: return { 4, "LD", (boost::format("(%04X),%S") % ((op4 << 8) | op3) % xy).str() };
case 0x23: return { 2, "INC", xy };
case 0x24: return { 2, "INC", xy + "H" };
case 0x25: return { 2, "DEC", xy + "H" };
case 0x26: return { 3, "LD", (boost::format("%SH,%02X") % xy % static_cast<uint16_t>(op3)).str() };
case 0x29: return { 2, "ADD", xy + "," + xy };
case 0x2A: return { 4, "LD", (boost::format("%S,(%04X)") % xy % ((op4 << 8) | op3)).str() };
case 0x2B: return { 2, "DEC", xy };
case 0x2C: return { 2, "INC", xy + "L" };
case 0x2D: return { 2, "DEC", xy + "L" };
case 0x2E: return { 3, "LD", (boost::format("%SL,%02X") % xy % static_cast<uint16_t>(op3)).str() };
case 0x34: return { 3, "INC", (boost::format("(%S+%02X)") % xy % static_cast<uint16_t>(op3)).str() };
case 0x35: return { 3, "DEC", (boost::format("(%S+%02X)") % xy % static_cast<uint16_t>(op3)).str() };
case 0x36:
return {
4,
"LD",
(boost::format("(%S+%02X),%02X") % xy % static_cast<uint16_t>(op3) % static_cast<uint16_t>(op4)).str()
};
case 0x39: return { 2, "ADD", xy + ",SP" };
case 0x86: return { 3, "ADD", (boost::format("A,(%S+%02X)") % xy % static_cast<uint16_t>(op3)).str() };
case 0x96: return { 3, "SUB", (boost::format("A,(%S+%02X)") % xy % static_cast<uint16_t>(op3)).str() };
case 0xCB:
{
// TODO: move to a new method of its own? We'll see how well this scales...
if (DdFdCbLogicals.contains(op4))
{
return { 4,
DdFdCbLogicals.at(op4),
(boost::format("(%S+%02X)") % xy % static_cast<uint16_t>(op3)).str() };
}
else if ((op4 & 0xC0) == 0x80)
{
const auto b = (op4 >> 3) & 0x07;
return { 4, "RES", (boost::format("%d,(%S+%02X)") % b % xy % static_cast<uint16_t>(op3)).str() };
}
else if ((op4 & 0xC0) == 0x40)
{
const auto b = (op4 >> 3) & 0x07;
return { 4, "BIT", (boost::format("%d,(%S+%02X)") % b % xy % static_cast<uint16_t>(op3)).str() };
}
else if ((op4 & 0xC0) == 0xC0)
{
const auto b = (op4 >> 3) & 0x07;
return { 4, "SET", (boost::format("%d,(%S+%02X)") % b % xy % static_cast<uint16_t>(op3)).str() };
}
else
{
// Unimplemented sequence
const auto message = boost::format("Unimplemented %02X %02X %02X %02X") % static_cast<uint16_t>(op1) %
static_cast<uint16_t>(op2) % static_cast<uint16_t>(op3) %
static_cast<uint16_t>(op4);
throw std::logic_error(message.str());
}
}
case 0xE1: return { 2, "POP", xy };
case 0xE5: return { 2, "PUSH", xy };
default:
// Fall through to bytefield checks below
break;
}
// Now check for bytefields
if ((op2 & 0xC0) == 0x40)
{
// Refer 'Undocumented Z80', page 24
const auto p1 = (op2 >> 3) & 0x07;
const auto p2 = op2 & 0x07;
const auto table = (op1 == 0xDD) ? DDByteRegMask : FDByteRegMask;
if (p1 == 0x06)
{
const auto lhs = boost::format("(%S+%02X)") % xy % static_cast<uint16_t>(op3);
return { 3, "LD", lhs.str() + "," + std::string(ByteRegMask[p2]) };
}
else if (p2 == 0x06)
{
const auto rhs = boost::format("(%S+%02X)") % xy % static_cast<uint16_t>(op3);
return { 3, "LD", std::string(ByteRegMask[p1]) + "," + rhs.str() };
}
else
{
return { 2, "LD", std::string(table[p1]) + "," + std::string(table[p2]) };
}
}
if ((op4 & 0xC0) == 0x40)
{
const auto b = (op4 >> 3) & 0x07;
return { 4, "BIT", (boost::format("%X,(%S+%02X)") % xy % b % static_cast<uint16_t>(op3)).str() };
}
if ((op2 & 0xC7) == 0x46)
{
const uint8_t r = (op2 >> 3) & 0x07;
return { 3, "LD", r_ind_offset(r, xy, op3) };
}
if ((op2 & 0xF8) == 0x70)
{
const uint8_t r = op2 & 0x07;
return { 3, "LD", ind_offset_r(r, xy, op3) };
}
// Unimplemented sequence
const auto message = boost::format("Unimplemented %02X %02X %02X %02X") % static_cast<uint16_t>(op1) %
static_cast<uint16_t>(op2) % static_cast<uint16_t>(op3) % static_cast<uint16_t>(op4);
throw std::logic_error(message.str());
}
std::tuple<size_t, std::string, std::string> disassemble_ed(uint8_t op2, uint8_t op3, uint8_t op4)
{
// First check for specific opcodes
switch (op2)
{
case 0x44: return { 2, "NEG", "" };
case 0x67: return { 2, "RRD", "" };
case 0x6F: return { 2, "RLD", "" };
case 0xA0: return { 2, "LDI", "" };
case 0xA1: return { 2, "CPI", "" };
case 0xA8: return { 2, "LDD", "" };
case 0xA9: return { 2, "CPD", "" };
case 0xB0: return { 2, "LDIR", "" };
case 0xB1: return { 2, "CPIR", "" };
case 0xB8: return { 2, "LDDR", "" };
case 0xB9: return { 2, "CPDR", "" };
default:
// Fall through to bytefield checks below
break;
}
// Now check for bytefields
if ((op2 & 0xCF) == 0x42)
{
const uint8_t ss = (op2 >> 4) & 0x03;
return { 2, "SBC", "HL," + std::string(WordRegMask[ss]) };
}
if ((op2 & 0xCF) == 0x43)
{
const uint8_t dd = (op2 >> 4) & 0x03;
return { 4, "LD", inn_dd(dd, op3, op4) };
}
if ((op2 & 0xCF) == 0x4A)
{
const uint8_t ss = (op2 >> 4) & 0x03;
return { 2, "ADC", "HL," + std::string(WordRegMask[ss]) };
}
if ((op2 & 0xCF) == 0x4B)
{
const uint8_t dd = (op2 >> 4) & 0x03;
return { 4, "LD", dd_inn(dd, op3, op4) };
}
// No match
return { 0, (boost::format("?? ED %02X") % static_cast<unsigned short>(op2)).str(), "" };
}
// Given 4 bytes at the current PC (plus the PC), returns two human-readable "words" of disassembly,
// plus the count of bytes actually used (as each disassembled instruction can be 1-4 bytes)
std::tuple<size_t, std::string, std::string> disassemble(uint8_t op1,
uint8_t op2,
uint8_t op3,
uint8_t op4,
uint16_t pc)
{
// First check for specific opcodes
switch (op1)
{
case 0x00: return { 1, "NOP", "" };
case 0x02: return { 1, "LD", "(BC),A" }; // Special case, can't use the usual lookups
case 0x07: return { 1, "RLCA", "" };
case 0x0A: return { 1, "LD", "A,(BC)" }; // Special case, can't use the usual lookups
case 0x0E:
{
const uint8_t r = (op1 >> 3) & 0x07;
const uint8_t n = op2;
return { 2, "LD", r_n(r, n) };
}
case 0x0F: return { 1, "RRCA", "" };
case 0x10: return { 2, "DJNZ", offset(pc, op2) };
case 0x12: return { 1, "LD", "(DE),A" }; // Special case, can't use the usual lookups
case 0x17: return { 1, "RLA", "" };
case 0x18: return { 2, "JR", offset(pc, op2) };
case 0x1A: return { 1, "LD", "A,(DE)" }; // Special case, can't use the usual lookups
case 0x1F: return { 1, "RRA", "" };
case 0x22: return { 3, "LD", nn_string(op2, op3, "HL") };
case 0x27: return { 1, "DAA", "" };
case 0x2A: return { 3, "LD", string_nn(op2, op3, "HL") };
case 0x2F: return { 1, "CPL", "" };
case 0x32: return { 3, "LD", nn_string(op2, op3, "A") };
case 0x37: return { 1, "SCF", "" };
case 0x3A: return { 3, "LD", string_nn(op2, op3, "A") };
case 0x3F: return { 1, "CCF", "" };
case 0xC3: return { 3, "JP", word(op2, op3) };
case 0xC6: return { 2, "ADD", "A," + byte(op2) };
case 0x08: return { 1, "EX", "AF,AF'" };
case 0xC9: return { 1, "RET", "" };
case 0xCB: return disassemble_cb(op2, op3, op4);
case 0xCD: return { 3, "CALL", word(op2, op3) };
case 0xCE: return { 2, "ADC", "A," + byte(op2) };
case 0xD3: return { 2, "OUT", n_string(op2, "A") };
case 0xDD: return disassemble_ddfd("IX", op1, op2, op3, op4);
case 0xD6: return { 2, "SUB", byte(op2) };
case 0xD9: return { 1, "EXX", "" };
case 0xDE: return { 2, "SBC", "A," + byte(op2) };
case 0xE3: return { 1, "EX", "(SP),HL" };
case 0xE6: return { 2, "AND", byte(op2) };
case 0xEE: return { 2, "XOR", byte(op2) };
case 0xF3: return { 1, "DI", "" };
case 0xF6: return { 2, "OR", byte(op2) };
case 0xFB: return { 1, "EI", "" };
case 0xE9: return { 1, "JP", "(HL)" };
case 0xEB: return { 1, "EX", "DE,HL" };
case 0xED: return disassemble_ed(op2, op3, op4);
case 0xF9: return { 1, "LD", "SP,HL" };
case 0xFD: return disassemble_ddfd("IY", op1, op2, op3, op4);
case 0xFE: return { 2, "CP", byte(op2) };
default:
// Fall through to bytefield checks below
break;
}
// Byte field checks
if ((op1 & 0xC0) == 0x40)
{
const uint8_t r1 = (op1 >> 3) & 0x07;
const uint8_t r2 = op1 & 0x07;
return { 1, "LD", r_r(r1, r2) };
}
if ((op1 & 0xC7) == 0x04)
{
const uint8_t r = (op1 >> 3) & 0x07;
return { 1, "INC", ByteRegMask[r] };
}
if ((op1 & 0xC7) == 0x05)
{
const uint8_t r = (op1 >> 3) & 0x07;
return { 1, "DEC", ByteRegMask[r] };
}
if ((op1 & 0xC7) == 0x06)
{
const uint8_t r = (op1 >> 3) & 0x07;
return { 2, "LD", r_n(r, op2) };
}
if ((op1 & 0xC7) == 0xC0)
{
const uint8_t cc = (op1 >> 3) & 0x07;
return { 1, "RET", CondMask[cc] };
}
if ((op1 & 0xC7) == 0xC2)
{
const uint8_t cc = (op1 >> 3) & 0x07;
return { 3, "JP", cc_pq(cc, op2, op3) };
}
if ((op1 & 0xC7) == 0xC4)
{
const uint8_t cc = (op1 >> 3) & 0x07;
return { 3, "CALL", cc_pq(cc, op2, op3) };
}
if ((op1 & 0xC7) == 0xC7)
{
const uint8_t p = (op1 >> 3) & 0x07;
return { 1, "RST", byte(p << 3) };
}
if ((op1 & 0xCF) == 0x01)
{
const uint8_t dd = (op1 >> 4) & 0x03;
return { 3, "LD", dd_nn(dd, op2, op3) };
}
if ((op1 & 0xCF) == 0x03)
{
const uint8_t rr = (op1 >> 4) & 0x03;
return { 1, "INC", WordRegMask[rr] };
}
if ((op1 & 0xCF) == 0x09)
{
const uint8_t ss = (op1 >> 4) & 0x03;
return { 1, "ADD", hl_ss(ss) };
}
if ((op1 & 0xCF) == 0x0B)
{
const uint8_t rr = (op1 >> 4) & 0x03;
return { 1, "DEC", WordRegMask[rr] };
}
if ((op1 & 0xCF) == 0xC1)
{
const uint8_t qq = (op1 >> 4) & 0x03;
return { 1, "POP", qq_word_register(qq) };
}
if ((op1 & 0xCF) == 0xC5)
{
const uint8_t qq = (op1 >> 4) & 0x03;
return { 1, "PUSH", qq_word_register(qq) };
}
if ((op1 & 0xE7) == 0x20)
{
const uint8_t cc = (op1 >> 3) & 0x03;
return { 2, "JR", cc_offset(cc, op2, pc) };
}
if ((op1 & 0xF8) == 0x80)
{
const uint8_t r = op1 & 0x07;
return { 1, "ADD", "A," + std::string(ByteRegMask[r]) };
}
if ((op1 & 0xF8) == 0x88)
{
const uint8_t r = op1 & 0x07;
return { 1, "ADC", ByteRegMask[r] };
}
if ((op1 & 0xF8) == 0x90)
{
const uint8_t r = op1 & 0x07;
return { 1, "SUB", ByteRegMask[r] };
}
if ((op1 & 0xF8) == 0x98)
{
const uint8_t r = op1 & 0x07;
return { 1, "SBC", ByteRegMask[r] };
}
if ((op1 & 0xF8) == 0xA0)
{
const uint8_t r = op1 & 0x07;
return { 1, "AND", ByteRegMask[r] };
}
if ((op1 & 0xF8) == 0xA8)
{
const uint8_t r = op1 & 0x07;
return { 1, "XOR", byte_register(r) };
}
if ((op1 & 0xF8) == 0xB0)
{
const uint8_t r = op1 & 0x07;
return { 1, "OR", byte_register(r) };
}
if ((op1 & 0xF8) == 0xB8)
{
const uint8_t r = op1 & 0x07;
return { 1, "CP", ByteRegMask[r] };
}
// Unhandled instruction
return { 0,
(boost::format("?TODO(%02X,%02X,%02X)") % static_cast<unsigned short>(op1) %
static_cast<unsigned short>(op2) % static_cast<unsigned short>(op3))
.str(),
"" };
}
} // namespace
Writer::Writer(const zcpm::IDebuggable* p_debuggable, zcpm::IMemory& memory, std::ostream& os)
: m_pdebuggable(p_debuggable), m_memory(memory), m_os(os)
{
BOOST_ASSERT(p_debuggable);
}
void Writer::examine() const
{
m_memory.check_memory_accesses(false);
const auto registers = m_pdebuggable->get_registers();
const auto [op1, op2, op3, op4, skipped] = m_pdebuggable->get_opcodes_at(registers.PC, 0);
if (!skipped.empty())
{
display(registers, byte_array_to_string(skipped) + " SKIPPED", "");
}
const auto num_skipped = skipped.size();
try
{
const auto [_, s1, s2] = disassemble(op1, op2, op3, op4, registers.PC + num_skipped);
display(registers, s1, s2, num_skipped);
m_memory.check_memory_accesses(true);
}
catch (const std::logic_error& e)
{
// A failure to parse; display enough information to help the maintainer:
// Memory content around the offending area, aligned on 16's
dump((registers.PC - 16) & 0xFFF0, 64);
throw;
}
}
void Writer::list(int start, size_t instructions) const
{
const auto registers = m_pdebuggable->get_registers();
const uint16_t base = (start < 0) ? registers.PC : start;
size_t offset = 0;
for (size_t i = 0; i < instructions; ++i)
{
const auto [op1, op2, op3, op4, skipped] = m_pdebuggable->get_opcodes_at(base, offset);
if (!skipped.empty())
{
display(registers, byte_array_to_string(skipped) + " SKIPPED", "");
offset += skipped.size();
}
const auto num_skipped = skipped.size();
const auto [nbytes, s1, s2] = disassemble(op1, op2, op3, op4, registers.PC + num_skipped);
display(base + offset + num_skipped, s1, s2);
offset += nbytes + num_skipped;
}
}
void Writer::dump(int start, size_t bytes) const
{
if (bytes == 0)
{
return;
}
const auto registers = m_pdebuggable->get_registers();
const uint16_t base = (start < 0) ? registers.PC : start;
std::string hex_bytes, ascii_bytes;
for (size_t offset = 0; offset < bytes; ++offset)
{
if ((offset % 16) == 0)
{
m_os << boost::format("%04X:") % (base + offset);
}
const auto b = m_memory.read_byte(base + offset);
hex_bytes += (boost::format(" %02X") % static_cast<unsigned short>(b)).str();
ascii_bytes += std::string(1, ((b < 0x20) || (b > 0x7f)) ? '.' : b);
if (((offset + 1) % 16) == 0)
{
m_os << hex_bytes << ' ' << ascii_bytes << std::endl;
hex_bytes.clear();
ascii_bytes.clear();
}
}
if (!hex_bytes.empty() || !ascii_bytes.empty())
{
const auto nbytes = ascii_bytes.length();
const auto padding = std::string((16 - nbytes) * 3, ' ');
m_os << hex_bytes << padding << ' ' << ascii_bytes << std::endl;
}
}
void Writer::display(uint16_t address, const std::string& s1, const std::string& s2) const
{
m_os << boost::format("%04X %-5s%s") % address % s1 % s2 << std::endl;
}
void Writer::display(const zcpm::Registers& registers,
const std::string& s1,
const std::string& s2,
const uint16_t offset) const
{
m_os << boost::format("%s A=%02X B=%04X D=%04X H=%04X S=%04X P=%04X %-5s%s") %
flags_to_string(registers.AF & 0xFF) % static_cast<unsigned short>(registers.AF >> 8) % registers.BC %
registers.DE % registers.HL % registers.SP % ((registers.PC + offset) & 0xFFFF) % s1 % s2;
m_os << std::endl;
m_os << boost::format("%s '=%02X '=%04X '=%04X '=%04X X=%04X Y=%04X") % flags_to_string(registers.altAF & 0xFF) %
static_cast<unsigned short>(registers.altAF >> 8) % registers.altBC % registers.altDE %
registers.altHL % registers.IX % registers.IY
<< std::endl;
}
std::string Writer::flags_to_string(uint8_t f) const
{
std::string result("------");
if (f & zcpm::Processor::C_FLAG_MASK)
{
result[0] = 'C';
}
if (f & zcpm::Processor::N_FLAG_MASK)
{
result[1] = 'S';
}
if (f & zcpm::Processor::PV_FLAG_MASK)
{
result[2] = 'E';
}
if (f & zcpm::Processor::H_FLAG_MASK)
{
result[3] = 'F';
}
if (f & zcpm::Processor::Z_FLAG_MASK)
{
result[4] = 'Z';
}
if (f & zcpm::Processor::S_FLAG_MASK)
{
result[5] = 'M';
}
return result;
}
|
{"hexsha": "4f6e95cf25b8fc5ad8004040a9e39ab9a5342c3d", "size": 26486, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "debugger/writer.cpp", "max_stars_repo_name": "VictorRandomCode/zcpm", "max_stars_repo_head_hexsha": "c121396bceda11a605e995bd10f46b82e7df6ced", "max_stars_repo_licenses": ["Xnet", "Linux-OpenIB", "X11"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "debugger/writer.cpp", "max_issues_repo_name": "VictorRandomCode/zcpm", "max_issues_repo_head_hexsha": "c121396bceda11a605e995bd10f46b82e7df6ced", "max_issues_repo_licenses": ["Xnet", "Linux-OpenIB", "X11"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "debugger/writer.cpp", "max_forks_repo_name": "VictorRandomCode/zcpm", "max_forks_repo_head_hexsha": "c121396bceda11a605e995bd10f46b82e7df6ced", "max_forks_repo_licenses": ["Xnet", "Linux-OpenIB", "X11"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8403247632, "max_line_length": 118, "alphanum_fraction": 0.4779506154, "num_tokens": 8583}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import time
import dgl.function as fn
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from matplotlib import pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
from ogb.nodeproppred import DglNodePropPredDataset, Evaluator
from gen_model import gen_model
from utils import (add_labels, adjust_learning_rate, compute_acc, compute_norm,
cross_entropy, loge_cross_entropy, loss_kd_only, plot,
save_checkpoint, seed)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
device = None
in_feats, n_classes = None, None
def train(args, model, graph, labels, train_idx, val_idx, test_idx, optimizer, teacher_output, loss_fcn, evaluator, epoch=1):
model.train()
feat = graph.ndata["feat"]
if args.use_labels:
mask = torch.rand(train_idx.shape) < args.mask_rate
train_labels_idx = train_idx[mask]
train_pred_idx = train_idx[~mask]
feat = add_labels(feat, labels, train_labels_idx, n_classes, device)
else:
mask = torch.rand(train_idx.shape) < args.mask_rate
# We change mask to ~mask to match previous definition
train_pred_idx = train_idx[~mask]
optimizer.zero_grad()
pred = model(graph, feat)
if args.n_label_iters > 0 and args.use_labels:
unlabel_idx = torch.cat([train_pred_idx, val_idx, test_idx])
for _ in range(args.n_label_iters):
pred = pred.detach()
torch.cuda.empty_cache()
# unlabel_probs = F.softmax(pred[unlabel_idx], dim=-1)
# unlabel_preds = torch.argmax(unlabel_probs, dim=-1)
# confident_unlabel_idx = unlabel_idx[unlabel_probs.max(dim=-1)[0] > 0.7]
feat[unlabel_idx, -n_classes:] = F.softmax(pred[unlabel_idx], dim=-1)
pred = model(graph, feat)
loss = loss_fcn(pred[train_pred_idx], labels[train_pred_idx])
if args.mode == "student":
loss_kd = loss_kd_only(pred, teacher_output, args.temp)
loss = loss*(1-args.alpha) + loss_kd*args.alpha
loss.backward()
optimizer.step()
return compute_acc(pred[train_idx], labels[train_idx], evaluator), loss
@torch.no_grad()
def evaluate(args, model, graph, labels, train_idx, val_idx, test_idx, use_labels, loss_fcn, evaluator):
model.eval()
feat = graph.ndata["feat"]
if use_labels:
feat = add_labels(feat, labels, train_idx, n_classes, device)
pred = model(graph, feat)
if args.n_label_iters > 0 and args.use_labels:
unlabel_idx = torch.cat([val_idx, test_idx])
for _ in range(args.n_label_iters):
feat[unlabel_idx, -n_classes:] = F.softmax(pred[unlabel_idx], dim=-1)
pred = model(graph, feat)
train_loss = loss_fcn(pred[train_idx], labels[train_idx])
val_loss = loss_fcn(pred[val_idx], labels[val_idx])
test_loss = loss_fcn(pred[test_idx], labels[test_idx])
return (
compute_acc(pred[train_idx], labels[train_idx], evaluator),
compute_acc(pred[val_idx], labels[val_idx], evaluator),
compute_acc(pred[test_idx], labels[test_idx], evaluator),
train_loss,
val_loss,
test_loss,
pred
)
def run(args, graph, labels, train_idx, val_idx, test_idx, evaluator, n_running):
# define model and optimizer
model = gen_model(in_feats, n_classes, args)
model = model.to(device)
if not args.standard_loss:
loss_fcn = loge_cross_entropy
else:
loss_fcn = cross_entropy
optimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.wd)
# training loop
total_time = 0
best_val_acc, best_test_acc, best_val_loss = 0, 0, float("inf")
accs, train_accs, val_accs, test_accs = [], [], [], []
losses, train_losses, val_losses, test_losses = [], [], [], []
### do nomalization for only one time
deg_sqrt, deg_isqrt = compute_norm(graph)
graph.srcdata.update({"src_norm": deg_isqrt})
graph.dstdata.update({"dst_norm": deg_isqrt})
graph.apply_edges(fn.u_mul_v("src_norm", "dst_norm", "gcn_norm"))
graph.srcdata.update({"src_norm": deg_isqrt})
graph.dstdata.update({"dst_norm": deg_sqrt})
graph.apply_edges(fn.u_mul_v("src_norm", "dst_norm", "gcn_norm_adjust"))
checkpoint_path = args.checkpoint_path
if args.mode == "student":
teacher_output = torch.load(os.path.join(checkpoint_path, f'best_pred_run{n_running}.pt')).cpu().cuda()
else:
teacher_output = None
for epoch in range(1, args.n_epochs + 1):
tic = time.time()
if args.adjust_lr:
adjust_learning_rate(optimizer, args.lr, epoch)
acc, loss = train(args, model, graph, labels, train_idx, val_idx, test_idx, optimizer, teacher_output, loss_fcn, evaluator, epoch=epoch)
train_acc, val_acc, test_acc, train_loss, val_loss, test_loss, pred = evaluate(
args, model, graph, labels, train_idx, val_idx, test_idx, args.use_labels, loss_fcn, evaluator
)
toc = time.time()
total_time += toc - tic
if val_loss < best_val_loss:
best_val_loss = val_loss
best_val_acc = val_acc
best_test_acc = test_acc
final_pred = pred
if args.mode == "teacher":
os.makedirs(checkpoint_path, exist_ok=True)
save_checkpoint(final_pred, n_running, checkpoint_path)
if epoch % args.log_every == 0:
print(f"Run: {n_running}/{args.n_runs}, Epoch: {epoch}/{args.n_epochs}", )
print(f"Time: {(total_time / epoch):.4f}, Loss: {loss.item():.4f}, Acc: {acc:.4f}")
print(f"Train/Val/Test loss: {train_loss:.4f}/{val_loss:.4f}/{test_loss:.4f}")
print(f"Train/Val/Test/Best val/Best test acc: {train_acc:.4f}/{val_acc:.4f}/{test_acc:.4f}/{best_val_acc:.4f}/{best_test_acc:.4f}")
for l, e in zip(
[accs, train_accs, val_accs, test_accs, losses, train_losses, val_losses, test_losses],
[acc, train_acc, val_acc, test_acc, loss.item(), train_loss, val_loss, test_loss],
):
l.append(e)
print("*" * 50)
print(f"Average epoch time: {total_time / args.n_epochs}, Test acc: {best_test_acc}")
if args.plot_curves:
plot(accs, train_accs, val_accs, test_accs,
losses, train_losses, val_losses, test_losses,
n_running, args.n_epochs)
if args.save_pred:
os.makedirs(args.output_path, exist_ok=True)
torch.save(F.softmax(final_pred, dim=1), os.path.join(args.output_path, f"{n_running - 1}.pt"))
return best_val_acc, best_test_acc
def count_parameters(args):
model = gen_model(in_feats, n_classes, args)
print([np.prod(p.size()) for p in model.parameters() if p.requires_grad])
return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad])
def main():
global device, in_feats, n_classes, epsilon
argparser = argparse.ArgumentParser("AGDN on OGBN-Arxiv", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
argparser.add_argument("--cpu", action="store_true", help="CPU mode. This option overrides --gpu.")
argparser.add_argument("--gpu", type=int, default=0, help="GPU device ID.")
argparser.add_argument("--root", type=str, default="../dataset")
argparser.add_argument("--model", type=str, default="gat-ha")
argparser.add_argument("--seed", type=int, default=0, help="initial random seed.")
argparser.add_argument("--mode", type=str, default="test")
argparser.add_argument("--alpha",type=float,default=0.5,help="ratio of kd loss")
argparser.add_argument("--temp",type=float,default=1.0,help="temperature of kd")
argparser.add_argument("--n-runs", type=int, default=10)
argparser.add_argument("--n-epochs", type=int, default=2000)
argparser.add_argument(
"--use-labels", action="store_true", help="Use labels in the training set as input features."
)
argparser.add_argument("--mask-rate", type=float, default=0.5, help="mask rate")
argparser.add_argument("--n-label-iters", type=int, default=0, help="number of label iterations")
argparser.add_argument("--no-attn-dst", action="store_true", help="Don't use attn_dst.")
argparser.add_argument("--norm", type=str, help="Choices of normalization methods. values=['none','sym','avg']", default='none')
argparser.add_argument("--lr", type=float, default=0.002)
argparser.add_argument("--n-layers", type=int, default=3)
argparser.add_argument("--K", type=int, default=3)
argparser.add_argument("--n-heads", type=int, default=1)
argparser.add_argument("--n-hidden", type=int, default=256)
argparser.add_argument("--dropout", type=float, default=0.5)
argparser.add_argument("--input_drop", type=float, default=0.0)
argparser.add_argument("--edge_drop", type=float, default=0.0)
argparser.add_argument("--attn_drop", type=float, default=0.05)
argparser.add_argument("--wd", type=float, default=0)
argparser.add_argument("--log-every", type=int, default=20)
argparser.add_argument("--plot-curves", action="store_true")
argparser.add_argument("--use-linear", action="store_true", help="only useful for gcn model")
argparser.add_argument("--checkpoint-path", type=str, default="../checkpoint/")
argparser.add_argument("--output-path", type=str, default="../output/")
argparser.add_argument("--save-pred", action="store_true", help="save final predictions")
argparser.add_argument("--adjust-lr", action="store_true", help="adjust learning rate in first 50 iterations")
argparser.add_argument("--standard-loss", action="store_true")
args = argparser.parse_args()
print(f"args: {args}")
assert args.mode in ["teacher", "student", "test"]
if args.cpu:
device = torch.device("cpu")
else:
device = torch.device("cuda:%d" % args.gpu)
# load data
data = DglNodePropPredDataset(name="ogbn-arxiv", root=args.root)
evaluator = Evaluator(name="ogbn-arxiv")
splitted_idx = data.get_idx_split()
train_idx, val_idx, test_idx = splitted_idx["train"], splitted_idx["valid"], splitted_idx["test"]
graph, labels = data[0]
# add reverse edges
srcs, dsts = graph.all_edges()
graph.add_edges(dsts, srcs)
# add self-loop
print(f"Total edges before adding self-loop {graph.number_of_edges()}")
graph = graph.remove_self_loop().add_self_loop()
print(f"Total edges after adding self-loop {graph.number_of_edges()}")
in_feats = graph.ndata["feat"].shape[1]
n_classes = (labels.max() + 1).item()
# graph.create_format_()
train_idx = train_idx.to(device)
val_idx = val_idx.to(device)
test_idx = test_idx.to(device)
labels = labels.to(device)
graph = graph.to(device)
# run
val_accs = []
test_accs = []
for i in range(1, args.n_runs + 1):
seed(i + args.seed)
val_acc, test_acc = run(args, graph, labels, train_idx, val_idx, test_idx, evaluator, i)
val_accs.append(val_acc)
test_accs.append(test_acc)
print(f"Runned {args.n_runs} times")
print(f"Val Accs: {val_accs}")
print(f"Test Accs: {test_accs}")
print(f"Average val accuracy: {np.mean(val_accs)} ± {np.std(val_accs)}")
print(f"Average test accuracy: {np.mean(test_accs)} ± {np.std(test_accs)}")
print(f"Number of params: {count_parameters(args)}")
if __name__ == "__main__":
main()
|
{"hexsha": "14ef27af8ac89902d656f56f7264a5f23c7cc272", "size": 11570, "ext": "py", "lang": "Python", "max_stars_repo_path": "ogbn-arxiv/src/main.py", "max_stars_repo_name": "skepsun/adaptive_graph_diffusion_convolution_networks", "max_stars_repo_head_hexsha": "a1b5e0d9c600a42a36d3c15cafdf36b7ccfb47c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-03-10T13:27:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T08:06:16.000Z", "max_issues_repo_path": "ogbn-arxiv/src/main.py", "max_issues_repo_name": "skepsun/adaptive_graph_diffusion_convolution_networks", "max_issues_repo_head_hexsha": "a1b5e0d9c600a42a36d3c15cafdf36b7ccfb47c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-10-11T04:39:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-20T08:11:20.000Z", "max_forks_repo_path": "ogbn-arxiv/src/main.py", "max_forks_repo_name": "skepsun/adaptive_graph_diffusion_convolution_networks", "max_forks_repo_head_hexsha": "a1b5e0d9c600a42a36d3c15cafdf36b7ccfb47c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-09-26T06:50:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T08:14:54.000Z", "avg_line_length": 39.7594501718, "max_line_length": 144, "alphanum_fraction": 0.6667242869, "include": true, "reason": "import numpy", "num_tokens": 2937}
|
/**
@file sysid_actuator_gazebo.cpp
ROS node for collecting data for sysid to determine
an appropriate scaling for thrust and body angular
acceleration commands over ActuatorControl messages.
*/
#include <ros/ros.h>
#include <ros/console.h>
#include <mavros_msgs/ParamSet.h>
#include <mavros_msgs/CommandBool.h>
#include <mavros_msgs/State.h>
#include <mavros_msgs/ActuatorControl.h>
#include <mavros_msgs/RCOut.h>
#include <gazebo_msgs/LinkStates.h>
#include <string>
#include <iostream>
#include <algorithm>
#include <vector>
#include <Eigen/Dense>
#include <utils/rotation.hpp>
bool px4_connected; // PX4 connection exists
std::string px4_mode; // PX4 mode
void state_cb(const mavros_msgs::State::ConstPtr& msg) {
px4_connected = msg->connected;
px4_mode = msg->mode;
}
unsigned pwm;
void rcout_cb(const mavros_msgs::RCOut::ConstPtr& msg) {
pwm = msg->channels[0];
}
/**
Extract the orientation of the control surfaces from Gazebo and
compute their relative orientation with respect to the body. Each
quaternion from Gazebo is rotation from world frame to link frame,
therefore need to convert to get relative rotation.
*/
std::vector<double> gz_ctrl(5, 0.0); // gz_ctrl = [al, ar, e, r, T]
Eigen::Vector4d ctrl_q; // quaternion for control surface
Eigen::Vector4d body_q; // quaternion for body
Eigen::Vector4d rel_q; // quaternion for relative orientation
Eigen::Vector3d axis;
double angle;
Eigen::Vector3d aa;
Eigen::Vector3d rel_om_wrld;
Eigen::Matrix3d R;
std::vector<int> ctrl_map = {4, 5, 6, 7}; // maps to gazebo message indices
void gz_links_cb(const gazebo_msgs::LinkStates::ConstPtr& msg) {
// Get body orientation relative to world
body_q(0) = msg->pose[1].orientation.w;
body_q(1) = msg->pose[1].orientation.x;
body_q(2) = msg->pose[1].orientation.y;
body_q(3) = msg->pose[1].orientation.z;
Rot::invert_quat(body_q); // invert to rotation from body to world
// Orientations of control surfaces
for (int i = 0; i < 4; ++i) {
// Get control surface orientation relative to world
ctrl_q(0) = msg->pose[ctrl_map[i]].orientation.w;
ctrl_q(1) = msg->pose[ctrl_map[i]].orientation.x;
ctrl_q(2) = msg->pose[ctrl_map[i]].orientation.y;
ctrl_q(3) = msg->pose[ctrl_map[i]].orientation.z;
// Compute relative orientation
Rot::compose_quats(body_q, ctrl_q, rel_q); // compose the rotations
Rot::quat_to_axis(rel_q, aa);
angle = aa.norm();
axis = aa.normalized();
gz_ctrl[i] = angle;
}
// relative = prop - body spin speed written in world frame coordinates
rel_om_wrld(0) = msg->twist[3].angular.x - msg->twist[1].angular.x;
rel_om_wrld(1) = msg->twist[3].angular.y - msg->twist[1].angular.y;
rel_om_wrld(2) = msg->twist[3].angular.z - msg->twist[1].angular.z;
// Convert world to body frame coordinates
Rot::quat_to_R(body_q, R);
gz_ctrl[4] = R.row(0)*rel_om_wrld;
}
int main(int argc, char **argv) {
ros::init(argc, argv, "act_ctrl_sysid_node");
ros::NodeHandle nh;
// Get parameters
double STEP_SIZE = 0.1;
if (!nh.getParam("/sysid_actuator_ctrl/STEP_SIZE", STEP_SIZE)) {
ROS_INFO("Using default step size of 0.1");
}
double HOLD_TIME = 5.0;
if (!nh.getParam("/sysid_actuator_ctrl/HOLD_TIME", HOLD_TIME)) {
ROS_INFO("Using default hold time of 5 seconds");
}
HOLD_TIME = std::max(HOLD_TIME, 0.1);
// Define subscribers
ros::Subscriber state_sub = nh.subscribe<mavros_msgs::State>
("mavros/state", 10, state_cb);
ros::Subscriber rcout_sub = nh.subscribe<mavros_msgs::RCOut>
("mavros/rc/out", 10, rcout_cb);
ros::Subscriber gz_links_sub = nh.subscribe<gazebo_msgs::LinkStates>
("gazebo/link_states", 10, gz_links_cb);
// Define publishers
ros::Publisher act_cmd_pub = nh.advertise<mavros_msgs::ActuatorControl>
("mavros/actuator_control", 10);
// Define services
ros::ServiceClient arm_clt = nh.serviceClient<mavros_msgs::CommandBool>
("mavros/cmd/arming");
ros::ServiceClient param_clt = nh.serviceClient<mavros_msgs::ParamSet>
("mavros/param/set");
// Get from user which set of servos to check
char input = 'z';
std::string controls = "rpyt";
while (controls.find(input) == std::string::npos) {
std::cout << "Select input: roll (r), pitch (p), yaw (y), thrust (t): ";
std::cin >> input;
}
int c = controls.find(input);
char sign = '.';
std::string signs = "+-";
while (signs.find(sign) == std::string::npos) {
std::cout << "Select direction: positive (+), negative (-): ";
std::cin >> sign;
}
double s;
if (sign == '+') s = 1.0;
else s = -1.0;
// Define rate for the node
ros::Rate rate(5.0);
// Turn off auto-disarm before takeoff so sysid can be performed on the ground
mavros_msgs::ParamSet dsrm_off;
dsrm_off.request.param_id = "COM_DISARM_PRFLT";
dsrm_off.request.value.real = -1.;
param_clt.call(dsrm_off);
if (dsrm_off.response.success) ROS_INFO("Disabled auto-disarm before takeoff");
// Automatically arm the vehicle
mavros_msgs::CommandBool arm_cmd;
arm_cmd.request.value = true;
arm_clt.call(arm_cmd);
if (arm_cmd.response.success) ROS_INFO("Vehicle armed");
// Define the actuator control message
mavros_msgs::ActuatorControl act_cmd;
act_cmd.group_mix = mavros_msgs::ActuatorControl::PX4_MIX_FLIGHT_CONTROL;
while(ros::ok() && !px4_connected) {
ros::spinOnce();
rate.sleep();
}
int hold_steps = HOLD_TIME/0.1;
int inc_steps = 1.0/STEP_SIZE;
int h = 0;
int i = 0;
std::cout << "Set PX4 to offboard mode to start sequence" << std::endl;
while(ros::ok() && i <= inc_steps) {
ros::spinOnce();
std::cout << "[|al|, |ar|, |e|, |r|, |T|] = [" << gz_ctrl[0] << " " << gz_ctrl[1]
<< " " << gz_ctrl[2] << " " << gz_ctrl[3] << " " << gz_ctrl[4]
<< "] rad, rad/s" << std::endl;
act_cmd_pub.publish(act_cmd);
if (px4_mode == "OFFBOARD") {
// Switch the level every hold_steps number of steps
if (h == hold_steps) {
act_cmd.controls[c] += s*STEP_SIZE;
h = 0;
i++;
// Print the command
if (i <= inc_steps) {
std::cout << "act_cmd.controls[" << input << "] = "
<< act_cmd.controls[c] << std::endl;
}
}
h++;
}
rate.sleep(); // sleep for remaining time
}
act_cmd.controls[c] = 0.0;
act_cmd_pub.publish(act_cmd);
ROS_INFO("Done with open-loop inputs, shutting down node");
ros::shutdown();
}
|
{"hexsha": "d163aadc3a35c7951aff811d5a087b36d6d7985d", "size": 6922, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "scripts/sysid/sysid_actuator_gazebo.cpp", "max_stars_repo_name": "jlorenze/asl_fixedwing", "max_stars_repo_head_hexsha": "9cac7c8d31f5d1c9f7d059d4614d6b60f1a3fbef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2021-06-28T17:30:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T09:04:35.000Z", "max_issues_repo_path": "scripts/sysid/sysid_actuator_gazebo.cpp", "max_issues_repo_name": "jlorenze/asl_fixedwing", "max_issues_repo_head_hexsha": "9cac7c8d31f5d1c9f7d059d4614d6b60f1a3fbef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-08-31T16:22:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-31T16:36:15.000Z", "max_forks_repo_path": "scripts/sysid/sysid_actuator_gazebo.cpp", "max_forks_repo_name": "jlorenze/asl_fixedwing", "max_forks_repo_head_hexsha": "9cac7c8d31f5d1c9f7d059d4614d6b60f1a3fbef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.931372549, "max_line_length": 89, "alphanum_fraction": 0.6203409419, "num_tokens": 1918}
|
# encoding: utf-8
"""
This module contains chord evaluation functionality.
It provides the evaluation measures used for the MIREX ACE task, and
tries to follow [1]_ and [2]_ as closely as possible.
Notes
-----
This implementation tries to follow the references and their implementation
(e.g., https://github.com/jpauwels/MusOOEvaluator for [2]_). However, there
are some known (and possibly some unknown) differences. If you find one not
listed in the following, please file an issue:
- Detected chord segments are adjusted to fit the length of the annotations.
In particular, this means that, if necessary, filler segments of 'no chord'
are added at beginnings and ends. This can result in different segmentation
scores compared to the original implementation.
References
----------
.. [1] Christopher Harte, "Towards Automatic Extraction of Harmony Information
from Music Signals." Dissertation,
Department for Electronic Engineering, Queen Mary University of London,
2010.
.. [2] Johan Pauwels and Geoffroy Peeters.
"Evaluating Automatically Estimated Chord Sequences."
In Proceedings of ICASSP 2013, Vancouver, Canada, 2013.
"""
import numpy as np
from . import evaluation_io, EvaluationMixin
from ..io import load_chords
CHORD_DTYPE = [('root', np.int),
('bass', np.int),
('intervals', np.int, (12,))]
CHORD_ANN_DTYPE = [('start', np.float),
('end', np.float),
('chord', CHORD_DTYPE)]
NO_CHORD = (-1, -1, np.zeros(12, dtype=np.int))
UNKNOWN_CHORD = (-1, -1, np.ones(12, dtype=np.int) * -1)
def encode(chord_labels):
"""
Encodes chord labels to numeric interval representations.
Parameters
----------
chord_labels : numpy structured array
Chord segments in `madmom.io.SEGMENT_DTYPE` format
Returns
-------
encoded_chords : numpy structured array
Chords in `CHORD_ANN_DTYPE` format
"""
encoded_chords = np.zeros(len(chord_labels), dtype=CHORD_ANN_DTYPE)
encoded_chords['start'] = chord_labels['start']
encoded_chords['end'] = chord_labels['end']
encoded_chords['chord'] = chords(chord_labels['label'])
return encoded_chords
def chords(labels):
"""
Transform a list of chord labels into an array of internal numeric
representations.
Parameters
----------
labels : list
List of chord labels (str).
Returns
-------
chords : numpy.array
Structured array with columns 'root', 'bass', and 'intervals',
containing a numeric representation of chords (`CHORD_DTYPE`).
"""
crds = np.zeros(len(labels), dtype=CHORD_DTYPE)
cache = {}
for i, lbl in enumerate(labels):
cv = cache.get(lbl, None)
if cv is None:
cv = chord(lbl)
cache[lbl] = cv
crds[i] = cv
return crds
def chord(label):
"""
Transform a chord label into the internal numeric represenation of
(root, bass, intervals array) as defined by `CHORD_DTYPE`.
Parameters
----------
label : str
Chord label.
Returns
-------
chord : tuple
Numeric representation of the chord: (root, bass, intervals array).
"""
if label == 'N':
return NO_CHORD
if label == 'X':
return UNKNOWN_CHORD
c_idx = label.find(':')
s_idx = label.find('/')
if c_idx == -1:
quality_str = 'maj'
if s_idx == -1:
root_str = label
bass_str = ''
else:
root_str = label[:s_idx]
bass_str = label[s_idx + 1:]
else:
root_str = label[:c_idx]
if s_idx == -1:
quality_str = label[c_idx + 1:]
bass_str = ''
else:
quality_str = label[c_idx + 1:s_idx]
bass_str = label[s_idx + 1:]
root = pitch(root_str)
bass = interval(bass_str) if bass_str else 0
ivs = chord_intervals(quality_str)
ivs[bass] = 1
return root, bass, ivs
_l = [0, 1, 1, 0, 1, 1, 1]
_chroma_id = (np.arange(len(_l) * 2) + 1) + np.array(_l + _l).cumsum() - 1
def modify(base_pitch, modifier):
"""
Modify a pitch class in integer representation by a given modifier string.
A modifier string can be any sequence of 'b' (one semitone down)
and '#' (one semitone up).
Parameters
----------
base_pitch : int
Pitch class as integer.
modifier : str
String of modifiers ('b' or '#').
Returns
-------
modified_pitch : int
Modified root note.
"""
for m in modifier:
if m == 'b':
base_pitch -= 1
elif m == '#':
base_pitch += 1
else:
raise ValueError('Unknown modifier: {}'.format(m))
return base_pitch
def pitch(pitch_str):
"""
Convert a string representation of a pitch class (consisting of root
note and modifiers) to an integer representation.
Parameters
----------
pitch_str : str
String representation of a pitch class.
Returns
-------
pitch : int
Integer representation of a pitch class.
"""
return modify(_chroma_id[(ord(pitch_str[0]) - ord('C')) % 7],
pitch_str[1:]) % 12
def interval(interval_str):
"""
Convert a string representation of a musical interval into a pitch class
(e.g. a minor seventh 'b7' into 10, because it is 10 semitones above its
base note).
Parameters
----------
interval_str : str
Musical interval.
Returns
-------
pitch_class : int
Number of semitones to base note of interval.
"""
for i, c in enumerate(interval_str):
if c.isdigit():
return modify(_chroma_id[int(interval_str[i:]) - 1],
interval_str[:i]) % 12
def interval_list(intervals_str, given_pitch_classes=None):
"""
Convert a list of intervals given as string to a binary pitch class
representation. For example, 'b3, 5' would become
[0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0].
Parameters
----------
intervals_str : str
List of intervals as comma-separated string (e.g. 'b3, 5').
given_pitch_classes : None or numpy array
If None, start with empty pitch class array, if numpy array of length
12, this array will be modified.
Returns
-------
pitch_classes : numpy array
Binary pitch class representation of intervals.
"""
if given_pitch_classes is None:
given_pitch_classes = np.zeros(12, dtype=np.int)
for int_def in intervals_str[1:-1].split(','):
int_def = int_def.strip()
if int_def[0] == '*':
given_pitch_classes[interval(int_def[1:])] = 0
else:
given_pitch_classes[interval(int_def)] = 1
return given_pitch_classes
# mapping of shorthand interval notations to the actual interval representation
_shorthands = {
'maj': interval_list('(1,3,5)'),
'min': interval_list('(1,b3,5)'),
'dim': interval_list('(1,b3,b5)'),
'aug': interval_list('(1,3,#5)'),
'maj7': interval_list('(1,3,5,7)'),
'min7': interval_list('(1,b3,5,b7)'),
'7': interval_list('(1,3,5,b7)'),
'5': interval_list('(1,5)'),
'1': interval_list('(1)'),
'dim7': interval_list('(1,b3,b5,bb7)'),
'hdim7': interval_list('(1,b3,b5,b7)'),
'minmaj7': interval_list('(1,b3,5,7)'),
'maj6': interval_list('(1,3,5,6)'),
'min6': interval_list('(1,b3,5,6)'),
'9': interval_list('(1,3,5,b7,9)'),
'maj9': interval_list('(1,3,5,7,9)'),
'min9': interval_list('(1,b3,5,b7,9)'),
'sus2': interval_list('(1,2,5)'),
'sus4': interval_list('(1,4,5)'),
'11': interval_list('(1,3,5,b7,9,11)'),
'min11': interval_list('(1,b3,5,b7,9,11)'),
'13': interval_list('(1,3,5,b7,13)'),
'maj13': interval_list('(1,3,5,7,13)'),
'min13': interval_list('(1,b3,5,b7,13)')
}
def chord_intervals(quality_str):
"""
Convert a chord quality string to a pitch class representation. For
example, 'maj' becomes [1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0].
Parameters
----------
quality_str : str
String defining the chord quality.
Returns
-------
pitch_classes : numpy array
Binary pitch class representation of chord quality.
"""
list_idx = quality_str.find('(')
if list_idx == -1:
return _shorthands[quality_str].copy()
if list_idx != 0:
ivs = _shorthands[quality_str[:list_idx]].copy()
else:
ivs = np.zeros(12, dtype=np.int)
return interval_list(quality_str[list_idx:], ivs)
def merge_chords(chords):
"""
Merge consecutive chord annotations if they represent the same chord.
Parameters
----------
chords : numpy structured arrray
Chord annotations to be merged, in `CHORD_ANN_DTYPE` format.
Returns
-------
merged_chords : numpy structured array
Merged chord annotations, in `CHORD_ANN_DTYPE` format.
"""
merged_starts = []
merged_ends = []
merged_chords = []
prev_chord = None
for start, end, chord in chords:
if chord != prev_chord:
prev_chord = chord
merged_starts.append(start)
merged_ends.append(end)
merged_chords.append(chord)
else:
# prolong the previous chord
merged_ends[-1] = end
crds = np.zeros(len(merged_chords), dtype=CHORD_ANN_DTYPE)
crds['start'] = merged_starts
crds['end'] = merged_ends
crds['chord'] = merged_chords
return crds
def evaluation_pairs(det_chords, ann_chords):
"""
Match detected with annotated chords and create paired label segments
for evaluation.
Parameters
----------
det_chords : numpy structured array
Chord detections with 'start' and 'end' fields.
ann_chords : numpy structured array
Chord annotations with 'start' and 'end' fields.
Returns
-------
annotations : numpy structured array
Annotated chords of evaluation segments.
detections : numpy structured array
Detected chords of evaluation segments.
durations : numpy array
Durations of evaluation segments.
"""
times = np.unique(np.hstack([ann_chords['start'], ann_chords['end'],
det_chords['start'], det_chords['end']]))
durations = times[1:] - times[:-1]
annotations = ann_chords['chord'][
np.searchsorted(ann_chords['start'], times[:-1], side='right') - 1]
detections = det_chords['chord'][
np.searchsorted(det_chords['start'], times[:-1], side='right') - 1]
return annotations, detections, durations
def score_root(det_chords, ann_chords):
"""
Score similarity of chords based on only the root, i.e. returns a score of
1 if roots match, 0 otherwise.
Parameters
----------
det_chords : numpy structured array
Detected chords.
ann_chords : numpy structured array
Annotated chords.
Returns
-------
scores : numpy array
Similarity score for each chord.
"""
return (ann_chords['root'] == det_chords['root']).astype(np.float)
def score_exact(det_chords, ann_chords):
"""
Score similarity of chords. Returns 1 if all chord information (root,
bass, and intervals) match exactly.
Parameters
----------
det_chords : numpy structured array
Detected chords.
ann_chords : numpy structured array
Annotated chords.
Returns
-------
scores : numpy array
Similarity score for each chord.
"""
return ((ann_chords['root'] == det_chords['root']) &
(ann_chords['bass'] == det_chords['bass']) &
((ann_chords['intervals'] == det_chords['intervals']).all(axis=1))
).astype(np.float)
def reduce_to_triads(chords, keep_bass=False):
"""
Reduce chords to triads.
The function follows the reduction rules implemented in [1]_. If a chord
chord does not contain a third, major second or fourth, it is reduced to
a power chord. If it does not contain neither a third nor a fifth, it is
reduced to a single note "chord".
Parameters
----------
chords : numpy structured array
Chords to be reduced.
keep_bass : bool
Indicates whether to keep the bass note or set it to 0.
Returns
-------
reduced_chords : numpy structured array
Chords reduced to triads.
References
----------
.. [1] Johan Pauwels and Geoffroy Peeters.
"Evaluating Automatically Estimated Chord Sequences."
In Proceedings of ICASSP 2013, Vancouver, Canada, 2013.
"""
unison = chords['intervals'][:, 0].astype(bool)
maj_sec = chords['intervals'][:, 2].astype(bool)
min_third = chords['intervals'][:, 3].astype(bool)
maj_third = chords['intervals'][:, 4].astype(bool)
perf_fourth = chords['intervals'][:, 5].astype(bool)
dim_fifth = chords['intervals'][:, 6].astype(bool)
perf_fifth = chords['intervals'][:, 7].astype(bool)
aug_fifth = chords['intervals'][:, 8].astype(bool)
no_chord = (chords['intervals'] == NO_CHORD[-1]).all(axis=1)
reduced_chords = chords.copy()
ivs = reduced_chords['intervals']
ivs[~no_chord] = interval_list('(1)')
ivs[unison & perf_fifth] = interval_list('(1,5)')
ivs[~perf_fourth & maj_sec] = _shorthands['sus2']
ivs[perf_fourth & ~maj_sec] = _shorthands['sus4']
ivs[min_third] = _shorthands['min']
ivs[min_third & aug_fifth & ~perf_fifth] = interval_list('(1,b3,#5)')
ivs[min_third & dim_fifth & ~perf_fifth] = _shorthands['dim']
ivs[maj_third] = _shorthands['maj']
ivs[maj_third & dim_fifth & ~perf_fifth] = interval_list('(1,3,b5)')
ivs[maj_third & aug_fifth & ~perf_fifth] = _shorthands['aug']
if not keep_bass:
reduced_chords['bass'] = 0
else:
# remove bass notes if they are not part of the intervals anymore
reduced_chords['bass'] *= ivs[range(len(reduced_chords)),
reduced_chords['bass']]
# keep -1 in bass for no chords
reduced_chords['bass'][no_chord] = -1
return reduced_chords
def reduce_to_tetrads(chords, keep_bass=False):
"""
Reduce chords to tetrads.
The function follows the reduction rules implemented in [1]_. If a chord
does not contain a third, major second or fourth, it is reduced to a power
chord. If it does not contain neither a third nor a fifth, it is reduced
to a single note "chord".
Parameters
----------
chords : numpy structured array
Chords to be reduced.
keep_bass : bool
Indicates whether to keep the bass note or set it to 0.
Returns
-------
reduced_chords : numpy structured array
Chords reduced to tetrads.
References
----------
.. [1] Johan Pauwels and Geoffroy Peeters.
"Evaluating Automatically Estimated Chord Sequences."
In Proceedings of ICASSP 2013, Vancouver, Canada, 2013.
"""
unison = chords['intervals'][:, 0].astype(bool)
maj_sec = chords['intervals'][:, 2].astype(bool)
min_third = chords['intervals'][:, 3].astype(bool)
maj_third = chords['intervals'][:, 4].astype(bool)
perf_fourth = chords['intervals'][:, 5].astype(bool)
dim_fifth = chords['intervals'][:, 6].astype(bool)
perf_fifth = chords['intervals'][:, 7].astype(bool)
aug_fifth = chords['intervals'][:, 8].astype(bool)
maj_sixth = chords['intervals'][:, 9].astype(bool)
dim_seventh = maj_sixth
min_seventh = chords['intervals'][:, 10].astype(bool)
maj_seventh = chords['intervals'][:, 11].astype(bool)
no_chord = (chords['intervals'] == NO_CHORD[-1]).all(axis=1)
reduced_chords = chords.copy()
ivs = reduced_chords['intervals']
ivs[~no_chord] = interval_list('(1)')
ivs[unison & perf_fifth] = interval_list('(1,5)')
sus2 = ~perf_fourth & maj_sec
sus2_ivs = _shorthands['sus2']
ivs[sus2] = sus2_ivs
ivs[sus2 & maj_sixth] = interval_list('(6)', sus2_ivs.copy())
ivs[sus2 & maj_seventh] = interval_list('(7)', sus2_ivs.copy())
ivs[sus2 & min_seventh] = interval_list('(b7)', sus2_ivs.copy())
sus4 = perf_fourth & ~maj_sec
sus4_ivs = _shorthands['sus4']
ivs[sus4] = sus4_ivs
ivs[sus4 & maj_sixth] = interval_list('(6)', sus4_ivs.copy())
ivs[sus4 & maj_seventh] = interval_list('(7)', sus4_ivs.copy())
ivs[sus4 & min_seventh] = interval_list('(b7)', sus4_ivs.copy())
ivs[min_third] = _shorthands['min']
ivs[min_third & maj_sixth] = _shorthands['min6']
ivs[min_third & maj_seventh] = _shorthands['minmaj7']
ivs[min_third & min_seventh] = _shorthands['min7']
minaugfifth = min_third & ~perf_fifth & aug_fifth
ivs[minaugfifth] = interval_list('(1,b3,#5)')
ivs[minaugfifth & maj_seventh] = interval_list('(1,b3,#5,7)')
ivs[minaugfifth & min_seventh] = interval_list('(1,b3,#5,b7)')
mindimfifth = min_third & ~perf_fifth & dim_fifth
ivs[mindimfifth] = _shorthands['dim']
ivs[mindimfifth & dim_seventh] = _shorthands['dim7']
ivs[mindimfifth & min_seventh] = _shorthands['hdim7']
ivs[maj_third] = _shorthands['maj']
ivs[maj_third & maj_sixth] = _shorthands['maj6']
ivs[maj_third & maj_seventh] = _shorthands['maj7']
ivs[maj_third & min_seventh] = _shorthands['7']
majdimfifth = maj_third & ~perf_fifth & dim_fifth
ivs[majdimfifth] = interval_list('(1,3,b5)')
ivs[majdimfifth & maj_seventh] = interval_list('(1,3,b5,7)')
ivs[majdimfifth & min_seventh] = interval_list('(1,3,b5,b7)')
majaugfifth = maj_third & ~perf_fifth & aug_fifth
aug_ivs = _shorthands['aug']
ivs[majaugfifth] = _shorthands['aug']
ivs[majaugfifth & maj_seventh] = interval_list('(7)', aug_ivs.copy())
ivs[majaugfifth & min_seventh] = interval_list('(b7)', aug_ivs.copy())
if not keep_bass:
reduced_chords['bass'] = 0
else:
# remove bass notes if they are not part of the intervals anymore
reduced_chords['bass'] *= ivs[range(len(reduced_chords)),
reduced_chords['bass']]
# keep -1 in bass for no chords
reduced_chords['bass'][no_chord] = -1
return reduced_chords
def select_majmin(chords):
"""
Compute a mask that selects all major, minor, and
"no chords" with a 1, and all other chords with a 0.
Parameters
----------
chords : numpy structured array
Chords to compute the mask for.
Returns
-------
mask : numpy array (boolean)
Selection mask for major, minor, and "no chords".
"""
return ((chords['intervals'] == _shorthands['maj']).all(axis=1) |
(chords['intervals'] == _shorthands['min']).all(axis=1) |
(chords['intervals'] == NO_CHORD[-1]).all(axis=1))
def select_sevenths(chords):
"""
Compute a mask that selects all major, minor, seventh, and
"no chords" with a 1, and all other chords with a 0.
Parameters
----------
chords : numpy structured array
Chords to compute the mask for.
Returns
-------
mask : numpy array (boolean)
Selection mask for major, minor, seventh, and "no chords".
"""
return (select_majmin(chords) |
(chords['intervals'] == _shorthands['7']).all(axis=1) |
(chords['intervals'] == _shorthands['min7']).all(axis=1) |
(chords['intervals'] == _shorthands['maj7']).all(axis=1))
def adjust(det_chords, ann_chords):
"""
Adjust the length of detected chord segments to the annotation
length.
Discard detected chords that start after the annotation ended,
and shorten the last detection to fit the last annotation;
discared detected chords that end before the annotation begins,
and shorten the first detection to match the first annotation.
Parameters
----------
det_chords : numpy structured array
Detected chord segments.
ann_chords : numpy structured array
Annotated chord segments.
Returns
-------
det_chords : numpy structured array
Adjusted detected chord segments.
"""
det_start = det_chords[0]['start']
ann_start = ann_chords[0]['start']
if det_start > ann_start:
filler = np.array((ann_start, det_start, chord('N')),
dtype=CHORD_ANN_DTYPE)
det_chords = np.hstack([filler, det_chords])
elif det_start < ann_start:
det_chords = det_chords[det_chords['end'] > ann_start]
det_chords[0]['start'] = ann_start
det_end = det_chords[-1]['end']
ann_end = ann_chords[-1]['end']
if det_end < ann_end:
filler = np.array((det_end, ann_end, chord('N')),
dtype=CHORD_ANN_DTYPE)
det_chords = np.hstack([det_chords, filler])
elif det_end > ann_end:
det_chords = det_chords[det_chords['start'] < ann_end]
det_chords[-1]['end'] = ann_chords[-1]['end']
return det_chords
def segmentation(ann_starts, ann_ends, det_starts, det_ends):
"""
Compute the normalized Hamming divergence between chord
segmentations as defined in [1]_ (Eqs. 8.37 and 8.38).
Parameters
----------
ann_starts : list or numpy array
Start times of annotated chord segments.
ann_ends : list or numpy array
End times of annotated chord segments.
det_starts : list or numpy array
Start times of detected chord segments.
det_ends : list or numpy array
End times of detected chord segments.
Returns
-------
distance : float
Normalised Hamming divergence between annotated and
detected chord segments.
References
----------
.. [1] Christopher Harte, "Towards Automatic Extraction of Harmony
Information from Music Signals." Dissertation,
Department for Electronic Engineering, Queen Mary University of
London, 2010.
"""
est_ts = np.unique(np.hstack([det_starts, det_ends]))
seg = 0.
for start, end in zip(ann_starts, ann_ends):
dur = end - start
seg_ts = np.hstack([
start, est_ts[(est_ts > start) & (est_ts < end)], end])
seg += dur - np.diff(seg_ts).max()
return seg / (ann_ends[-1] - ann_starts[0])
class ChordEvaluation(EvaluationMixin):
"""
Provide various chord evaluation scores.
Parameters
----------
detections : str
File containing chords detections.
annotations : str
File containing chord annotations.
name : str, optional
Name of the evaluation object (e.g., the name of the song).
"""
METRIC_NAMES = [
('root', 'Root'),
('majmin', 'MajMin'),
('majminbass', 'MajMinBass'),
('sevenths', 'Sevenths'),
('seventhsbass', 'SeventhsBass'),
('segmentation', 'Segmentation'),
('oversegmentation', 'OverSegmentation'),
('undersegmentation', 'UnderSegmentation'),
]
def __init__(self, detections, annotations, name=None, **kwargs):
self.name = name or ''
self.ann_chords = merge_chords(encode(annotations))
self.det_chords = merge_chords(adjust(encode(detections),
self.ann_chords))
self.annotations, self.detections, self.durations = evaluation_pairs(
self.det_chords, self.ann_chords)
self._underseg = None
self._overseg = None
@property
def length(self):
"""Length of annotations."""
return self.ann_chords['end'][-1] - self.ann_chords['start'][0]
@property
def root(self):
"""Fraction of correctly detected chord roots."""
return np.average(score_root(self.detections, self.annotations),
weights=self.durations)
@property
def majmin(self):
"""
Fraction of correctly detected chords that can be reduced to major
or minor triads (plus no-chord). Ignores the bass pitch class.
"""
det_triads = reduce_to_triads(self.detections)
ann_triads = reduce_to_triads(self.annotations)
majmin_sel = select_majmin(ann_triads)
return np.average(score_exact(det_triads, ann_triads),
weights=self.durations * majmin_sel)
@property
def majminbass(self):
"""
Fraction of correctly detected chords that can be reduced to major
or minor triads (plus no-chord). Considers the bass pitch class.
"""
det_triads = reduce_to_triads(self.detections, keep_bass=True)
ann_triads = reduce_to_triads(self.annotations, keep_bass=True)
majmin_sel = select_majmin(ann_triads)
return np.average(score_exact(det_triads, ann_triads),
weights=self.durations * majmin_sel)
@property
def sevenths(self):
"""
Fraction of correctly detected chords that can be reduced to a seventh
tetrad (plus no-chord). Ignores the bass pitch class.
"""
det_tetrads = reduce_to_tetrads(self.detections)
ann_tetrads = reduce_to_tetrads(self.annotations)
sevenths_sel = select_sevenths(ann_tetrads)
return np.average(score_exact(det_tetrads, ann_tetrads),
weights=self.durations * sevenths_sel)
@property
def seventhsbass(self):
"""
Fraction of correctly detected chords that can be reduced to a seventh
tetrad (plus no-chord). Considers the bass pitch class.
"""
det_tetrads = reduce_to_tetrads(self.detections, keep_bass=True)
ann_tetrads = reduce_to_tetrads(self.annotations, keep_bass=True)
sevenths_sel = select_sevenths(ann_tetrads)
return np.average(score_exact(det_tetrads, ann_tetrads),
weights=self.durations * sevenths_sel)
@property
def undersegmentation(self):
"""
Normalized Hamming divergence (directional) between annotations and
detections. Captures missed chord segments.
"""
if self._underseg is None:
self._underseg = 1 - segmentation(
self.det_chords['start'], self.det_chords['end'],
self.ann_chords['start'], self.ann_chords['end'],
)
return self._underseg
@property
def oversegmentation(self):
"""
Normalized Hamming divergence (directional) between detections and
annotations. Captures how fragmented the detected chord segments are.
"""
if self._overseg is None:
self._overseg = 1 - segmentation(
self.ann_chords['start'], self.ann_chords['end'],
self.det_chords['start'], self.det_chords['end'],
)
return self._overseg
@property
def segmentation(self):
"""Minimum of `oversegmentation` and `undersegmentation`."""
return min(self.undersegmentation, self.oversegmentation)
def tostring(self, **kwargs):
"""
Format the evaluation metrics as a human readable string.
Returns
-------
eval_string : str
Evaluation metrics formatted as a human readable string.
"""
ret = (
'{}\n'
' Root: {:5.2f} MajMin: {:5.2f} MajMinBass: {:5.2f} '
'Sevenths: {:5.2f} SeventhsBass: {:5.2f}\n'
' Seg: {:5.2f} UnderSeg: {:5.2f} OverSeg: {:5.2f}'.format(
self.name,
self.root * 100, self.majmin * 100, self.majminbass * 100,
self.sevenths * 100, self.seventhsbass * 100,
self.segmentation * 100, self.undersegmentation * 100,
self.oversegmentation * 100)
)
return ret
class ChordSumEvaluation(ChordEvaluation):
"""
Class for averaging Chord evaluation scores, considering the lengths
of the pieces. For a detailed description of the available metrics,
refer to ChordEvaluation.
Parameters
----------
eval_objects : list
Evaluation objects.
name : str, optional
Name to be displayed.
"""
# pylint: disable=super-init-not-called
def __init__(self, eval_objects, name=None):
self.name = name or 'weighted mean for %d files' % len(eval_objects)
self.annotations = np.hstack([e.annotations for e in eval_objects])
self.detections = np.hstack([e.detections for e in eval_objects])
self.durations = np.hstack([e.durations for e in eval_objects])
un_segs = [e.undersegmentation for e in eval_objects]
over_segs = [e.oversegmentation for e in eval_objects]
segs = [e.segmentation for e in eval_objects]
lens = [e.length for e in eval_objects]
self._underseg = np.average(un_segs, weights=lens)
self._overseg = np.average(over_segs, weights=lens)
self._seg = np.average(segs, weights=lens)
self._length = sum(lens)
def length(self):
"""Length of all evaluation objects."""
return self._length
@property
def segmentation(self):
return self._seg
class ChordMeanEvaluation(ChordEvaluation):
"""
Class for averaging chord evaluation scores, averaging piecewise (i.e.
ignoring the lengths of the pieces). For a detailed description of the
available metrics, refer to ChordEvaluation.
Parameters
----------
eval_objects : list
Evaluation objects.
name : str, optional
Name to be displayed.
"""
# pylint: disable=super-init-not-called
def __init__(self, eval_objects, name=None):
self.name = name or 'piecewise mean for %d files' % len(eval_objects)
self.eval_objects = eval_objects
def length(self):
"""Number of evaluation objects."""
return len(self.eval_objects)
@property
def root(self):
return np.mean([e.root for e in self.eval_objects])
@property
def majmin(self):
return np.mean([e.majmin for e in self.eval_objects])
@property
def majminbass(self):
return np.mean([e.majminbass for e in self.eval_objects])
@property
def sevenths(self):
return np.mean([e.sevenths for e in self.eval_objects])
@property
def seventhsbass(self):
return np.mean([e.seventhsbass for e in self.eval_objects])
@property
def undersegmentation(self):
return np.mean([e.undersegmentation for e in self.eval_objects])
@property
def oversegmentation(self):
return np.mean([e.oversegmentation for e in self.eval_objects])
@property
def segmentation(self):
return np.mean([e.segmentation for e in self.eval_objects])
def add_parser(parser):
"""
Add a chord evaluation sub-parser to an existing parser.
Parameters
----------
parser : argparse parser instance
Existing argparse parser object.
Returns
-------
sub_parser : argparse sub-parser instance
Chord evaluation sub-parser.
"""
import argparse
# add chord evaluation sub-parser to the existing parser
p = parser.add_parser(
'chords', help='chord evaluation',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
This program evaluates pairs of files containing the chord annotations and
predictions. Suffixes can be given to filter them from the list of files.
Each line represents a chord and must have the following format with values
being separated by whitespace (chord_label follows the syntax as defined
by Harte 2010):
`start_time end_time chord_label`
''')
# set defaults
p.set_defaults(eval=ChordEvaluation, sum_eval=ChordSumEvaluation,
mean_eval=ChordMeanEvaluation, load_fn=load_chords)
# file I/O
evaluation_io(p, ann_suffix='.chords', det_suffix='.chords.txt')
# return the sub-parser and evaluation argument group
return p
|
{"hexsha": "90fadfd6250c35127c876ead5da9eeb19509a614", "size": 32062, "ext": "py", "lang": "Python", "max_stars_repo_path": "venv/lib/python3.6/site-packages/madmom/evaluation/chords.py", "max_stars_repo_name": "metu-sparg/higrid", "max_stars_repo_head_hexsha": "ebee0f35ea1712a01f3fdbaae132127ce4833baf", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-04-27T01:19:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-21T03:31:01.000Z", "max_issues_repo_path": "venv/lib/python3.9/site-packages/madmom/evaluation/chords.py", "max_issues_repo_name": "nitin-hugar/ForestMIR", "max_issues_repo_head_hexsha": "6fc2731c0364c3fb89661df0c94346d51fb59d56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-08T06:03:51.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-08T06:03:51.000Z", "max_forks_repo_path": "venv/lib/python3.9/site-packages/madmom/evaluation/chords.py", "max_forks_repo_name": "nitin-hugar/ForestMIR", "max_forks_repo_head_hexsha": "6fc2731c0364c3fb89661df0c94346d51fb59d56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-04-27T01:19:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-20T15:15:19.000Z", "avg_line_length": 31.7445544554, "max_line_length": 79, "alphanum_fraction": 0.6229804753, "include": true, "reason": "import numpy", "num_tokens": 8225}
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for uisrnn.py."""
import tempfile
import unittest
import numpy as np
import uisrnn
class TestUISRNN(unittest.TestCase):
"""Test the UISRNN class."""
def test_fit_concatenated_and_predict_single_label(self):
"""Train and test model while training dataset has single label.
Training dataset have already been concatenated.
"""
model_args, training_args, inference_args = uisrnn.parse_arguments()
model_args.rnn_depth = 1
model_args.rnn_hidden_size = 8
model_args.observation_dim = 16
training_args.learning_rate = 0.01
training_args.train_iteration = 50
inference_args.test_iteration = 1
# generate fake training dataset, assume already concatenated
train_sequence = np.random.rand(1000, model_args.observation_dim)
train_cluster_id = np.array(['A'] * 1000)
model = uisrnn.UISRNN(model_args)
# training
model.fit(train_sequence, train_cluster_id, training_args)
# testing, where dataset has less variation than training
test_sequence = np.random.rand(10, model_args.observation_dim) / 10.0
predicted_label = model.predict(test_sequence, inference_args)
self.assertListEqual([0] * 10, predicted_label)
# testing on two sequences
test_sequence1 = np.random.rand(10, model_args.observation_dim) / 10.0
test_sequence2 = np.random.rand(10, model_args.observation_dim) / 10.0
predicted_cluster_ids = model.predict(
[test_sequence1, test_sequence2], inference_args)
self.assertIsInstance(predicted_cluster_ids, list)
self.assertEqual(2, len(predicted_cluster_ids))
self.assertListEqual([0] * 10, predicted_cluster_ids[0])
self.assertListEqual([0] * 10, predicted_cluster_ids[1])
def test_fit_list_and_predict_single_label(self):
"""Train and test model while training dataset has single label.
Training dataset are not concatenated.
"""
model_args, training_args, inference_args = uisrnn.parse_arguments()
model_args.rnn_depth = 1
model_args.rnn_hidden_size = 8
model_args.observation_dim = 16
training_args.learning_rate = 0.01
training_args.train_iteration = 50
inference_args.test_iteration = 1
# generate fake training dataset, as a list
train_sequences = [
np.random.rand(100, model_args.observation_dim),
np.random.rand(200, model_args.observation_dim),
np.random.rand(300, model_args.observation_dim)]
train_cluster_ids = [
np.array(['A'] * 100),
np.array(['A'] * 200),
np.array(['A'] * 300), ]
model = uisrnn.UISRNN(model_args)
# training
model.fit(train_sequences, train_cluster_ids, training_args)
# testing, where dataset has less variation than training
test_sequence = np.random.rand(10, model_args.observation_dim) / 10.0
predicted_label = model.predict(test_sequence, inference_args)
self.assertListEqual([0] * 10, predicted_label)
def test_fit_with_wrong_dim(self):
"""Training dataset has wrong dimension."""
model_args, training_args, _ = uisrnn.parse_arguments()
model_args.rnn_depth = 1
model_args.rnn_hidden_size = 8
model_args.observation_dim = 16
training_args.learning_rate = 0.01
training_args.train_iteration = 5
# generate fake dataset
train_sequence = np.random.rand(1000, 18)
train_cluster_id = np.array(['A'] * 1000)
model = uisrnn.UISRNN(model_args)
# training
with self.assertRaises(ValueError):
model.fit(train_sequence, train_cluster_id, training_args)
def test_predict_with_wrong_dim(self):
"""Testing dataset has wrong dimension."""
model_args, training_args, inference_args = uisrnn.parse_arguments()
model_args.rnn_depth = 1
model_args.rnn_hidden_size = 8
model_args.observation_dim = 16
training_args.learning_rate = 0.01
training_args.train_iteration = 50
# generate fake dataset
train_sequence = np.random.rand(1000, model_args.observation_dim)
train_cluster_id = np.array(['A'] * 1000)
model = uisrnn.UISRNN(model_args)
# training
model.fit(train_sequence, train_cluster_id, training_args)
# testing
test_sequence = np.random.rand(10, 18)
with self.assertRaises(ValueError):
model.predict(test_sequence, inference_args)
def test_save_and_load(self):
"""Save model and load it."""
model_args, _, _ = uisrnn.parse_arguments()
model_args.observation_dim = 16
model_args.transition_bias = 0.5
model_args.sigma2 = 0.05
model = uisrnn.UISRNN(model_args)
temp_file_path = tempfile.mktemp()
model.save(temp_file_path)
model.load(temp_file_path)
self.assertEqual(0.5, model.transition_bias)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "7d83f743d58959574f0f773535ba00b4370bbeb3", "size": 5703, "ext": "py", "lang": "Python", "max_stars_repo_path": "uisrnn/tests/uisrnn_test.py", "max_stars_repo_name": "RoyalStorm/Speaker-Diarization", "max_stars_repo_head_hexsha": "1080449decb535d1eebe8064c2fcf9877da9655c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-22T11:58:26.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-22T11:58:26.000Z", "max_issues_repo_path": "uisrnn/tests/uisrnn_test.py", "max_issues_repo_name": "RoyalStorm/Speaker-Diarization", "max_issues_repo_head_hexsha": "1080449decb535d1eebe8064c2fcf9877da9655c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "uisrnn/tests/uisrnn_test.py", "max_forks_repo_name": "RoyalStorm/Speaker-Diarization", "max_forks_repo_head_hexsha": "1080449decb535d1eebe8064c2fcf9877da9655c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-09T18:55:51.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-09T18:55:51.000Z", "avg_line_length": 37.2745098039, "max_line_length": 78, "alphanum_fraction": 0.6777134841, "include": true, "reason": "import numpy", "num_tokens": 1297}
|
import numpy as np
from dbspy.core import base
from dbspy.core.analyze import _analyze as analyze
from dbspy.core.utils.indexing import search_nearest, index_nearest
from dbspy.core.utils.neighborhood import neighborhood
from dbspy.core.utils.variance import add_var, sum_var, divide_var
# define
class Conf(analyze.Conf):
w_modes = ('all', 'right', 'left')
def __init__(self, s_radius=0.9, w_radius=2.9, a_radius=4.1, w_mode: str = 'all'):
self.s_radius = s_radius
self.w_radius = w_radius
self.a_radius = a_radius
self.w_mode = w_mode
@staticmethod
def create_process(cluster_block):
return Process(cluster_block)
class Process(base.ElementProcess):
def __init__(self, cluster_block):
super().__init__(process_func, Conf(), cluster_block)
def process_func(sp_result_list, conf: Conf):
return tuple(
compute_sw(x, y, var, conf.s_radius, conf.w_radius, conf.a_radius, conf.w_mode)
for (x, y, var), _ in sp_result_list)
# utils
def compute_sw(x, y, var, s_radius, w_radius, a_radius, w_mode):
center_i = np.argmax(y)
center = x[center_i]
s_range_i = index_nearest(neighborhood(center, s_radius), x)
w_range_i = index_nearest(neighborhood(center, w_radius), x)
a_range_i = (0, len(x)) if a_radius is None \
else index_nearest(neighborhood(center, a_radius), x)
s, s_var = rate_var(y, var, *s_range_i)
w, w_var, w_range_i = compute_w(y, var, w_range_i, a_range_i, w_mode)
return (s, s_var, s_range_i), (w, w_var, w_range_i)
def compute_w(y, var, w_range_i, a_range_i, w_mode):
if w_mode == 'left':
wl_range_i = a_range_i[0], w_range_i[0]
wl, wl_var = rate_var(y, var, *wl_range_i)
return wl, wl_var, wl_range_i
elif w_mode == 'right':
wr_range_i = w_range_i[1], a_range_i[1]
wr, wr_var = rate_var(y, var, *wr_range_i)
return wr, wr_var, wr_range_i
elif w_mode == 'all':
wl, wl_var, wl_range_i = compute_w(y, var, w_range_i, a_range_i, 'left')
wr, wr_var, wr_range_i = compute_w(y, var, w_range_i, a_range_i, 'right')
w, w_var = add_var(wl, wl_var, wr, wr_var)
return w, w_var, (wl_range_i, wr_range_i)
else:
raise TypeError("Unsupported w_mode: " + w_mode)
def surround_nearest(ys, center_i, s):
nearest_radius_i, _ = search_nearest(
0, min(center_i, len(ys) - center_i) + 1, 1, s,
lambda radius_i: np.sum(ys[center_i - radius_i:center_i + radius_i]))
return nearest_radius_i
def rate_var(ys, ys_var, head, tail):
if head is None:
head = 0
if tail is None:
tail = len(ys)
if not (0 <= head <= tail <= len(ys)):
raise RuntimeError("Index out of bounds.")
a, a_var = sum_var(ys, ys_var)
s, s_var = sum_var(ys[head:tail], ys_var[head:tail])
return divide_var(s, s_var, a, a_var)
|
{"hexsha": "ac8e0c3b05967be56bf735adce1195df104054b2", "size": 2929, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/dbspy/core/analyze/sw/_sw.py", "max_stars_repo_name": "ZhengKeli/PositronSpector", "max_stars_repo_head_hexsha": "be0281fe50fe634183b6f239f03b7140c1dc0b7f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-18T09:23:42.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-18T09:23:42.000Z", "max_issues_repo_path": "src/dbspy/core/analyze/sw/_sw.py", "max_issues_repo_name": "ZhengKeli/DBSpy", "max_issues_repo_head_hexsha": "be0281fe50fe634183b6f239f03b7140c1dc0b7f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/dbspy/core/analyze/sw/_sw.py", "max_forks_repo_name": "ZhengKeli/DBSpy", "max_forks_repo_head_hexsha": "be0281fe50fe634183b6f239f03b7140c1dc0b7f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1868131868, "max_line_length": 87, "alphanum_fraction": 0.6555138272, "include": true, "reason": "import numpy", "num_tokens": 861}
|
# This code is used in the paper
# "Model-based exploration of the frontier of behaviours for deep learning system testing"
# by V. Riccio and P. Tonella
# https://doi.org/10.1145/3368089.3409730
import numpy as np
from random import randint
from typing import List, Tuple
from shapely.geometry import Point
import math
def catmull_rom_spline(p0, p1, p2, p3, num_points=20):
"""p0, p1, p2, and p3 should be (x,y) point pairs that define the Catmull-Rom spline.
num_points is the number of points to include in this curve segment."""
# Convert the points to numpy so that we can do array multiplication
p0, p1, p2, p3 = map(np.array, [p0, p1, p2, p3])
# Calculate t0 to t4
# For knot parametrization
alpha = 0.5
def tj(ti, p_i, p_j):
xi, yi = p_i
xj, yj = p_j
return (((xj - xi) ** 2 + (yj - yi) ** 2) ** 0.5) ** alpha + ti
# Knot sequence
t0 = 0
t1 = tj(t0, p0, p1)
t2 = tj(t1, p1, p2)
t3 = tj(t2, p2, p3)
# Only calculate points between p1 and p2
t = np.linspace(t1, t2, num_points)
# Reshape so that we can multiply by the points p0 to p3
# and get a point for each value of t.
t = t.reshape(len(t), 1)
a1 = (t1 - t) / (t1 - t0) * p0 + (t - t0) / (t1 - t0) * p1
a2 = (t2 - t) / (t2 - t1) * p1 + (t - t1) / (t2 - t1) * p2
a3 = (t3 - t) / (t3 - t2) * p2 + (t - t2) / (t3 - t2) * p3
b1 = (t2 - t) / (t2 - t0) * a1 + (t - t0) / (t2 - t0) * a2
b2 = (t3 - t) / (t3 - t1) * a2 + (t - t1) / (t3 - t1) * a3
c = (t2 - t) / (t2 - t1) * b1 + (t - t1) / (t2 - t1) * b2
return c
def catmull_rom_chain(points: List[tuple], num_spline_points=20) -> List:
"""Calculate Catmull-Rom for a chain of points and return the combined curve."""
# The curve cr will contain an array of (x, y) points.
cr = []
for i in range(len(points) - 3):
c = catmull_rom_spline(points[i], points[i + 1], points[i + 2], points[i + 3], num_spline_points)
if i > 0:
c = np.delete(c, [0], axis=0)
cr.extend(c)
return cr
def catmull_rom_2d(points: List[tuple], num_points=20) -> List[tuple]:
if len(points) < 4:
raise ValueError("points should have at least 4 points")
np_points_array = catmull_rom_chain(points, num_points)
return [(p[0], p[1]) for p in np_points_array]
def catmull_rom(points: List[tuple], num_spline_points=20) -> List[tuple]:
if len(points) < 4:
raise ValueError("points should have at least 4 points")
assert all(x[3] == points[0][3] for x in points)
np_point_array = catmull_rom_chain([(p[0], p[1]) for p in points], num_spline_points)
z0 = points[0][2]
width = points[0][3]
return [(p[0], p[1], z0, width) for p in np_point_array]
Tuple4F = Tuple[float, float, float, float]
Tuple2F = Tuple[float, float]
class ControlNodesGenerator:
"""Generate random roads given the configuration parameters"""
NUM_INITIAL_SEGMENTS_THRESHOLD = 2
NUM_UNDO_ATTEMPTS = 20
def __init__(self, num_control_nodes=15, max_angle=None, seg_length=None,
num_spline_nodes=None, initial_node=(10.0, 0.0, -28.0, 8.0)):
assert num_control_nodes > 1 and num_spline_nodes > 0
assert 0 <= max_angle <= 360
assert seg_length > 0
assert len(initial_node) == 4
self.num_control_nodes = num_control_nodes
self.num_spline_nodes = num_spline_nodes
self.initial_node = initial_node
self.max_angle = max_angle
self.seg_length = seg_length
def generate_control_nodes(self, num_control_nodes=None) -> List[Tuple4F]:
if not num_control_nodes:
num_control_nodes = self.num_control_nodes
nodes = [self._get_initial_control_node(), self.initial_node]
# +2 is added to reflect the two initial nodes that are necessary for catmull_rom
while len(nodes) < num_control_nodes + 2:
nodes.append(self._get_next_node(nodes[-2], nodes[-1], self._get_next_max_angle(len(nodes) - 2)))
return nodes
def generate(self, num_control_nodes=None):
control_nodes = self.generate_key_control_nodes(num_control_nodes)
return self.control_nodes_to_road(control_nodes)
def generate_key_control_nodes(self, num_control_nodes):
# original call to is_valid and loop was removed since the pipeline is in charge of testing that
control_nodes = self.generate_control_nodes(num_control_nodes=num_control_nodes)
control_nodes = control_nodes[2:]
return control_nodes
def control_nodes_to_road(self, control_nodes):
nodes = [self.initial_node] + control_nodes
sample_nodes = catmull_rom(nodes, self.num_spline_nodes)
road = [(node[0], node[1]) for node in sample_nodes]
return road
def _get_initial_point(self) -> Point:
return Point(self.initial_node[0], self.initial_node[1])
def _get_initial_control_node(self) -> Tuple4F:
x0, y0, z, width = self.initial_node
x, y = self._get_next_xy(x0, y0, 270)
return x, y, z, width
def _get_next_node(self, first_node, second_node: Tuple4F, max_angle) -> Tuple4F:
v = np.subtract(second_node, first_node)
start_angle = int(np.degrees(np.arctan2(v[1], v[0])))
angle = randint(start_angle - max_angle, start_angle + max_angle)
x0, y0, z0, width0 = second_node
x1, y1 = self._get_next_xy(x0, y0, angle)
return x1, y1, z0, width0
def _get_next_xy(self, x0: float, y0: float, angle: float) -> Tuple2F:
angle_rad = math.radians(angle)
return x0 + self.seg_length * math.cos(angle_rad), y0 + self.seg_length * math.sin(angle_rad)
def _get_next_max_angle(self, i: int, threshold=NUM_INITIAL_SEGMENTS_THRESHOLD) -> float:
if i < threshold or i == self.num_control_nodes - 1:
return 0
else:
return self.max_angle
|
{"hexsha": "ace17a4933cd4d41ec0226d29a12a3bbaac691fd", "size": 5937, "ext": "py", "lang": "Python", "max_stars_repo_path": "roadsearch/utils/catmull.py", "max_stars_repo_name": "ERATOMMSD/roadsearch", "max_stars_repo_head_hexsha": "e5b32b70835a51d56d10547720d90e34ade08564", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "roadsearch/utils/catmull.py", "max_issues_repo_name": "ERATOMMSD/roadsearch", "max_issues_repo_head_hexsha": "e5b32b70835a51d56d10547720d90e34ade08564", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "roadsearch/utils/catmull.py", "max_forks_repo_name": "ERATOMMSD/roadsearch", "max_forks_repo_head_hexsha": "e5b32b70835a51d56d10547720d90e34ade08564", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8152866242, "max_line_length": 109, "alphanum_fraction": 0.6400538993, "include": true, "reason": "import numpy", "num_tokens": 1806}
|
# -*- coding: utf-8 -*-
"""
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
parametrization by error is still in progress
"""
from time import time
import numpy as np
from scipy import linalg
import matplotlib.pyplot as pl
def unsparse(v, idx, length):
"""Transform a vector-index pair to a dense representation"""
x = np.zeros(length)
x[idx] = v
return x
def orthogonal_mp(D, x, m, eps=None):
"""Orthogonal matching pursuit (OMP)
Solves [1] min || D * gamma - x ||_2 subject to || gamma ||_0 <= m
or [2] min || gamma ||_0 subject to || D * gamma - x || <= eps
Parameters
----------
D, array of shape n_features x n_components
x, vector of length n_features
m, integer <= n_features
eps, float (supersedes m)
"""
residual = x
idx = []
if eps == None:
stopping_condition = lambda: len(idx) == m
else:
stopping_condition = lambda: np.inner(residual, residual) <= eps
while not stopping_condition():
lam = np.abs(np.dot(residual, D)).argmax()
idx.append(lam)
gamma, _, _, _ = linalg.lstsq(D[:, idx], x)
residual = x - np.dot(D[:, idx], gamma)
return gamma, idx
def cholesky_omp(D, x, m, eps=None):
if eps == None:
stopping_condition = lambda: it == m # len(idx) == m
else:
stopping_condition = lambda: np.inner(residual, residual) <= eps
alpha = np.dot(x, D)
#first step:
it = 1
lam = np.abs(np.dot(x, D)).argmax()
idx = [lam]
L = np.ones((1,1))
gamma = linalg.lstsq(D[:, idx], x)[0]
residual = x - np.dot(D[:, idx], gamma)
while not stopping_condition():
lam = np.abs(np.dot(residual, D)).argmax()
w = linalg.solve_triangular(L, np.dot(D[:, idx].T, D[:, lam]),
lower=True, unit_diagonal=True)
# should the diagonal be unit in theory? It crashes without it
L = np.r_[np.c_[L, np.zeros(len(L))],
np.atleast_2d(np.append(w, np.sqrt(1 - np.dot(w.T, w))))]
idx.append(lam)
it += 1
#gamma = linalg.solve(np.dot(L, L.T), alpha[idx], sym_pos=True)
# what am I, stupid??
Ltc = linalg.solve_triangular(L, alpha[idx], lower=True)
gamma = linalg.solve_triangular(L, Ltc, trans=1, lower=True)
residual = x - np.dot(D[:, idx], gamma)
return gamma, idx
def _batch_omp_step(G, alpha_0, m, eps_0=None, eps=None):
idx = []
L = np.ones((1, 1))
alpha = alpha_0
eps_curr = eps_0
delta = 0
it = 0
if eps == None:
stopping_condition = lambda: it == m
else:
stopping_condition = lambda: eps_curr <= eps
while not stopping_condition():
lam = np.abs(alpha).argmax()
if len(idx) > 0:
w = linalg.solve_triangular(L, G[idx, lam],
lower=True, unit_diagonal=True)
L = np.r_[np.c_[L, np.zeros(len(L))],
np.atleast_2d(np.append(w, np.sqrt(1 - np.inner(w, w))))]
idx.append(lam)
it += 1
Ltc = linalg.solve_triangular(L, alpha_0[idx], lower=True)
gamma = linalg.solve_triangular(L, Ltc, trans=1, lower=True)
beta = np.dot(G[:, idx], gamma)
alpha = alpha_0 - beta
if eps != None:
eps_curr += delta
delta = np.inner(gamma, beta[idx])
eps_curr -= delta
return gamma, idx
def batch_omp(D, X, m, eps=None):
"""Precomputations for batch OMP"""
Alpha = np.dot(D.T, X)
# Eps = np.dot(X.T, X) sh**!
G = np.dot(D.T, D)
func = lambda a: unsparse(*_batch_omp_step(G, a, m), length=D.shape[1])
V = np.apply_along_axis(func, axis=0, arr=Alpha)
return V
def generate_dict(n_features, n_components):
# generate random dictionary
D = np.random.randn(n_components, n_features)
D /= np.apply_along_axis(lambda x: np.sqrt(np.dot(x.T, x)), 0, D)
return D
def generate_data(D, sparsity):
n_features = D.shape[1]
# generate sparse signal
x = np.zeros(n_features)
indices = np.random.randint(0, n_features, sparsity)
x[indices] = np.random.normal(0, 5, sparsity)
return (indices, x), np.dot(D, x)
def bench_plot():
np.random.seed(42)
n_features, n_components = 512, 1024
print "generating dictionary..."
D = generate_dict(n_features, n_components)
sparsities = np.arange(50, 200, 15)
print "generating signals..."
Y = np.zeros((n_components, len(sparsities)))
X = np.zeros((n_features, len(sparsities)))
for i, sp in enumerate(sparsities):
(_, X[:, i]), Y[:, i] = generate_data(D, sp)
print "precomputing..."
G = np.dot(D.T, D)
A = np.dot(D.T, Y)
naive, cholesky, batch = [], [], []
naive_err, cholesky_err, batch_err = [], [], []
for i in xrange(len(sparsities)):
print "sparsity: ", sparsities[i]
t0 = time()
x, idx = orthogonal_mp(D, Y[:, i], sparsities[i])
naive.append(time() - t0)
naive_err.append(linalg.norm(X[:, i] - unsparse(x, idx, n_features)))
t0 = time()
x, idx = cholesky_omp(D, Y[:, i], sparsities[i])
cholesky.append(time() - t0)
cholesky_err.append(linalg.norm(X[:, i] - unsparse(x, idx, n_features)))
t0 = time()
x, idx = _batch_omp_step(G, A[:, i], sparsities[i])
batch.append(time() - t0)
batch_err.append(linalg.norm(X[:, i] - unsparse(x, idx, n_features)))
pl.figure()
pl.subplot(1, 2, 1)
pl.xlabel('Sparsity level')
pl.ylabel('Time')
pl.plot(sparsities, naive, 'o-', label="Naive implementation")
pl.plot(sparsities, cholesky, 'o-', label="Cholesky update OMP")
pl.plot(sparsities, batch, 'o-', label="Batch OMP")
pl.legend()
pl.subplot(1, 2, 2)
pl.xlabel('Sparsity level')
pl.ylabel('Error')
pl.plot(sparsities, naive_err, 'o-', label="Naive implementation")
pl.plot(sparsities, batch_err, 'o-', label="Batch OMP")
pl.plot(sparsities, cholesky_err, 'o-', label="Cholesky update OMP")
pl.legend()
pl.show()
def plot_reconstruction():
# init
np.random.seed(42)
D = generate_dict(n_features=512, n_components=100)
sparsity = 17
(indices, x), y = generate_data(D, sparsity)
pl.subplot(3, 1, 1)
pl.title("Sparse signal")
pl.stem(indices, x[indices])
y_noise = y + np.random.normal(0, 0.05, y.shape)
#x_r, i_r = cholesky_omp(D, y, sparsity)
x_r, i_r = _batch_omp_step(np.dot(D.T, D), np.dot(D.T,y), sparsity)
pl.subplot(3, 1, 2)
pl.title("Recovered signal from noise-free measurements")
pl.stem(i_r, x_r)
#x_r, i_r = cholesky_omp(D, y_noise, sparsity)
x_r, i_r = _batch_omp_step(np.dot(D.T, D), np.dot(D.T,y_noise), sparsity)
pl.subplot(3, 1, 3)
pl.title("Recovered signal from noisy measurements")
pl.stem(i_r, x_r)
pl.show()
if __name__ == '__main__':
bench_plot()
plot_reconstruction()
|
{"hexsha": "7e1334a01d35eede71c0c9d30a26d3f7f5ef981e", "size": 7117, "ext": "py", "lang": "Python", "max_stars_repo_path": "hard-gists/996771/snippet.py", "max_stars_repo_name": "jjhenkel/dockerizeme", "max_stars_repo_head_hexsha": "eaa4fe5366f6b9adf74399eab01c712cacaeb279", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2019-07-08T08:26:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T23:53:25.000Z", "max_issues_repo_path": "hard-gists/996771/snippet.py", "max_issues_repo_name": "jjhenkel/dockerizeme", "max_issues_repo_head_hexsha": "eaa4fe5366f6b9adf74399eab01c712cacaeb279", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-06-15T14:47:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-26T05:02:56.000Z", "max_forks_repo_path": "hard-gists/996771/snippet.py", "max_forks_repo_name": "jjhenkel/dockerizeme", "max_forks_repo_head_hexsha": "eaa4fe5366f6b9adf74399eab01c712cacaeb279", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2019-05-16T03:50:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-14T14:35:12.000Z", "avg_line_length": 34.0526315789, "max_line_length": 80, "alphanum_fraction": 0.5759449206, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2066}
|
import json
import os
import time
import numpy as np
from .base import BaseExperiment, OUTPUT_DIRECTORY
import solvers
if not os.path.exists(OUTPUT_DIRECTORY + '/Q'):
os.makedirs(OUTPUT_DIRECTORY + '/Q')
if not os.path.exists(OUTPUT_DIRECTORY + '/Q/pkl'):
os.makedirs(OUTPUT_DIRECTORY + '/Q/pkl')
if not os.path.exists(OUTPUT_DIRECTORY + '/images/Q'):
os.makedirs(OUTPUT_DIRECTORY + '/images/Q')
class QLearnerExperiment(BaseExperiment):
def __init__(self, details, verbose=False):
self.max_episodes = 5000
super(QLearnerExperiment, self).__init__(details, verbose)
def convergence_check_fn(self, solver, step_count):
return solver.has_converged()
def perform(self):
# Q-Learner
self._details.env.reset()
map_desc = self._details.env.unwrapped.desc
grid_file_name = '{}/Q/{}_grid.csv'.format(OUTPUT_DIRECTORY, self._details.env_name)
with open(grid_file_name, 'w') as f:
f.write("params,time,steps,reward_mean,reward_median,reward_min,reward_max,reward_std\n")
alphas = [0.1, 0.5, 0.9]
q_inits = ['random', 0]
epsilons = [0.1, 0.3, 0.5]
epsilon_decays = [0.0001]
discount_factors = np.round(np.linspace(0, 0.9, num=10), 2)
dims = len(discount_factors) * len(alphas) * len(q_inits) * len(epsilons) * len(epsilon_decays)
self.log("Searching Q in {} dimensions".format(dims))
runs = 1
for alpha in alphas:
for q_init in q_inits:
for epsilon in epsilons:
for epsilon_decay in epsilon_decays:
for discount_factor in discount_factors:
t = time.clock()
print("{}/{} Processing Q with alpha {}, q_init {}, epsilon {}, epsilon_decay {},"
" discount_factor {}".format(
runs, dims, alpha, q_init, epsilon, epsilon_decay, discount_factor
))
self.log("{}/{} Processing Q with alpha {}, q_init {}, epsilon {}, epsilon_decay {},"
" discount_factor {}".format(
runs, dims, alpha, q_init, epsilon, epsilon_decay, discount_factor
))
qs = solvers.QLearningSolver(self._details.env, self.max_episodes,
discount_factor=discount_factor,
alpha=alpha,
epsilon=epsilon, epsilon_decay=epsilon_decay,
q_init=q_init, verbose=self._verbose)
stats = self.run_solver_and_collect(qs, self.convergence_check_fn)
self.log("Took {} episodes".format(len(stats.steps)))
stats.to_csv('{}/Q/{}_{}_{}_{}_{}_{}.csv'.format(OUTPUT_DIRECTORY, self._details.env_name,
alpha, q_init, epsilon, epsilon_decay,
discount_factor))
print('Should have written data to .csv')
print('{}/Q/{}_{}_{}_{}_{}_{}.csv'.format(OUTPUT_DIRECTORY, self._details.env_name,
alpha, q_init, epsilon, epsilon_decay,
discount_factor))
stats.pickle_results('{}/Q/pkl/{}_{}_{}_{}_{}_{}_{}.pkl'.format(OUTPUT_DIRECTORY,
self._details.env_name,
alpha, q_init, epsilon,
epsilon_decay,
discount_factor,
'{}'), map_desc.shape,
step_size=self.max_episodes/20.0)
stats.plot_policies_on_map('{}/images/Q/{}_{}_{}_{}_{}_{}_{}.png'.format(OUTPUT_DIRECTORY,
self._details.env_name,
alpha, q_init, epsilon,
epsilon_decay,
discount_factor,
'{}_{}'),
map_desc, self._details.env.colors(),
self._details.env.directions(),
'Q-Learner', 'Episode', self._details,
step_size=self.max_episodes / 20.0,
only_last=True)
# We have extra stats about the episode we might want to look at later
episode_stats = qs.get_stats()
episode_stats.to_csv('{}/Q/{}_{}_{}_{}_{}_{}_episode.csv'.format(OUTPUT_DIRECTORY,
self._details.env_name,
alpha, q_init, epsilon,
epsilon_decay,
discount_factor))
optimal_policy_stats = self.run_policy_and_collect(qs, stats.optimal_policy)
self.log('{}'.format(optimal_policy_stats))
optimal_policy_stats.to_csv('{}/Q/{}_{}_{}_{}_{}_{}_optimal.csv'.format(OUTPUT_DIRECTORY,
self._details.env_name,
alpha, q_init, epsilon,
epsilon_decay,
discount_factor))
with open(grid_file_name, 'a') as f:
f.write('"{}",{},{},{},{},{},{},{}\n'.format(
json.dumps({
'alpha': alpha,
'q_init': q_init,
'epsilon': epsilon,
'epsilon_decay': epsilon_decay,
'discount_factor': discount_factor,
}).replace('"', '""'),
time.clock() - t,
len(optimal_policy_stats.rewards),
optimal_policy_stats.reward_mean,
optimal_policy_stats.reward_median,
optimal_policy_stats.reward_min,
optimal_policy_stats.reward_max,
optimal_policy_stats.reward_std,
))
runs += 1
|
{"hexsha": "6aef0e71798a119026ef87dd71bbd5189cbc6010", "size": 8193, "ext": "py", "lang": "Python", "max_stars_repo_path": "assignment4/experiments/q_learner.py", "max_stars_repo_name": "jonhilgart22/CS-7641-assignments", "max_stars_repo_head_hexsha": "a69eca1f7a6f82f80674d98188d11910b0673c13", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignment4/experiments/q_learner.py", "max_issues_repo_name": "jonhilgart22/CS-7641-assignments", "max_issues_repo_head_hexsha": "a69eca1f7a6f82f80674d98188d11910b0673c13", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignment4/experiments/q_learner.py", "max_forks_repo_name": "jonhilgart22/CS-7641-assignments", "max_forks_repo_head_hexsha": "a69eca1f7a6f82f80674d98188d11910b0673c13", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 63.511627907, "max_line_length": 121, "alphanum_fraction": 0.3722690101, "include": true, "reason": "import numpy", "num_tokens": 1178}
|
//
// Copyright 2020 Mateusz Loskot <mateusz at loskot dot net>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
#include <boost/gil/color_convert.hpp>
#include <boost/gil/gray.hpp>
#include <boost/gil/rgb.hpp>
#include <boost/gil/rgba.hpp>
#include <boost/gil/cmyk.hpp>
#include <boost/mp11.hpp>
#include <boost/core/lightweight_test.hpp>
#include <type_traits>
#include "test_utility_output_stream.hpp"
#include "core/pixel/test_fixture.hpp"
namespace gil = boost::gil;
namespace mp11 = boost::mp11;
#ifdef BOOST_GIL_TEST_DEBUG
#include <boost/core/demangle.hpp>
#include <iostream>
namespace {
template <class T>
std::string name() { return boost::core::demangle(typeid(T).name()); }
}
#endif
template <typename ColorSpaces>
struct test_roundtrip_convertible
{
template<typename Src, typename Dst>
void operator()(mp11::mp_list<Src, Dst> const&) const
{
#ifdef BOOST_GIL_TEST_DEBUG
std::cout << "test_lossless_roundtrip:\n"
<< "\tsrc: " << name<Src>() << "\n\tdst: " << name<Dst>() << std::endl;
#endif
using pixel_src_t = gil::pixel<std::uint8_t, gil::layout<Src>>;
using pixel_dst_t = gil::pixel<std::uint8_t, gil::layout<Dst>>;
pixel_src_t src1{};
pixel_dst_t dst1{};
gil::default_color_converter_impl<Src, Dst> convert_to;
convert_to(src1, dst1);
gil::default_color_converter_impl<Dst, Src> convert_from;
pixel_src_t src2{};
convert_from(dst1, src2);
BOOST_TEST_EQ(src1, src2);
}
static void run()
{
boost::mp11::mp_for_each
<
mp11::mp_product<mp11::mp_list, ColorSpaces, ColorSpaces>
>(test_roundtrip_convertible{});
}
};
template <typename ColorSpaces>
struct test_convertible
{
template<typename Src, typename Dst>
void operator()(mp11::mp_list<Src, Dst> const&) const
{
#ifdef BOOST_GIL_TEST_DEBUG
std::cout << "test_all:\n"
<< "\tsrc: " << name<Src>() << "\n\tdst: " << name<Dst>() << std::endl;
#endif
using pixel_src_t = gil::pixel<std::uint8_t, gil::layout<Src>>;
using pixel_dst_t = gil::pixel<std::uint8_t, gil::layout<Dst>>;
pixel_src_t src{};
pixel_dst_t dst{};
gil::default_color_converter_impl<Src, Dst> convert_to;
convert_to(src, dst);
}
static void run()
{
boost::mp11::mp_for_each
<
mp11::mp_product<mp11::mp_list, ColorSpaces, ColorSpaces>
>(test_convertible{});
}
};
int main()
{
test_convertible
<
mp11::mp_list<gil::cmyk_t, gil::gray_t, gil::rgb_t, gil::rgba_t>
>::run();
test_roundtrip_convertible
<
mp11::mp_list<gil::gray_t, gil::rgb_t>
>::run();
test_roundtrip_convertible<mp11::mp_list<gil::cmyk_t>>::run();
test_roundtrip_convertible<mp11::mp_list<gil::gray_t>>::run();
test_roundtrip_convertible<mp11::mp_list<gil::rgba_t>>::run();
return ::boost::report_errors();
}
|
{"hexsha": "a2ea7fd9ef3413855b07c7c002c9cdbc68ba7cc4", "size": 3093, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "venv/boost_1_73_0/libs/gil/test/core/color/default_color_converter_impl.cpp", "max_stars_repo_name": "uosorio/heroku_face", "max_stars_repo_head_hexsha": "7d6465e71dba17a15d8edaef520adb2fcd09d91e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 153.0, "max_stars_repo_stars_event_min_datetime": "2015-02-03T06:03:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T15:06:34.000Z", "max_issues_repo_path": "test/core/color/default_color_converter_impl.cpp", "max_issues_repo_name": "DevanshSGit/gil", "max_issues_repo_head_hexsha": "2b1e4665d15bc5c58d657879ac52890611c7d77e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 429.0, "max_issues_repo_issues_event_min_datetime": "2015-03-22T09:49:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T08:32:08.000Z", "max_forks_repo_path": "Libs/boost_1_76_0/libs/gil/test/core/color/default_color_converter_impl.cpp", "max_forks_repo_name": "Antd23rus/S2DE", "max_forks_repo_head_hexsha": "47cc7151c2934cd8f0399a9856c1e54894571553", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 215.0, "max_forks_repo_forks_event_min_datetime": "2015-03-15T09:20:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T12:40:07.000Z", "avg_line_length": 27.3716814159, "max_line_length": 89, "alphanum_fraction": 0.6453281604, "num_tokens": 870}
|
From mathcomp
Require Import ssreflect ssrbool ssrnat eqtype seq ssrfun.
From fcsl
Require Import prelude pred pcm unionmap heap.
From HTT
Require Import stmod stsep stlog stlogR.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
Definition llist (T : Type) := ptr.
Section LList.
Variable T : Type.
Notation llist := (llist T).
Fixpoint lseg (p q : ptr) (xs : seq T): Pred heap :=
if xs is x::xt then
[Pred h | exists r h',
h = p :-> x \+ (p .+ 1 :-> r \+ h') /\ h' \In lseg r q xt]
else [Pred h | p = q /\ h = Unit].
Lemma lseg_null xs q h :
valid h -> h \In lseg null q xs ->
[/\ q = null, xs = [::] & h = Unit].
Proof.
case: xs=>[|x xs] D /= H; first by case: H=><- ->.
case: H D=>r [h'][->] _.
by rewrite validPtUn.
Qed.
Definition lseq p := lseg p null.
Program Definition insert p x :
{xs}, STsep (lseq p xs, [vfun y => lseq y (x::xs)]) :=
Do (q <-- allocb p 2;
q ::= x;;
ret q).
Next Obligation.
apply: ghR=>i xs H _.
heval=>x1.
rewrite unitR -joinA.
heval.
Qed.
Lemma lseq_null xs h : valid h -> h \In lseq null xs -> xs = [::] /\ h = Unit.
Proof. by move=>D; case/(lseg_null D)=>_ ->. Qed.
Lemma lseq_pos xs p h :
p != null -> h \In lseq p xs ->
exists x, exists r, exists h',
[/\ xs = x :: behead xs,
p :-> x \+ (p .+ 1 :-> r \+ h') = h & h' \In lseq r (behead xs)].
Proof.
case: xs=>[|x xs] /= H [].
- move=>E.
by rewrite E eq_refl in H.
by move=>y [h'][->] H1; heval.
Qed.
Program Definition
remove p : {xs}, STsep (lseq p xs, [vfun y => lseq y (behead xs)]) :=
Do (if p == null then ret p
else pnext <-- !(p .+ 1);
dealloc p;;
dealloc p .+ 1;;
ret pnext).
Next Obligation.
apply: ghR=>i xs H V; case: ifP H=>H1.
- by rewrite (eqP H1); case/(lseq_null V)=>->->; heval.
case/(lseq_pos (negbT H1))=>x [q][h][->] <- /= H2.
by heval; rewrite 2!unitL.
Qed.
End LList.
(*******************************************************************)
(** * Exercices * *)
(*******************************************************************)
(**
---------------------------------------------------------------------
Exercise [Swapping two values]
---------------------------------------------------------------------
Implement in HTT a function that takes as arguments two pointers, [x]
and [y], which point to natural numbers, and swaps their
values. Reflect this effect in the function's specification and verify
it.
Hint: Instead of reading the value of a pointer into a variable [t]
using the [t <-- !p] notation, you might need to specify the _type_ of
the expected value explicitly by using the "de-sugared" version of the
command [t <-- read T p], where [T] is the expected type. This way,
the proof will be more straightforward.
*)
Program Definition swap (x y : ptr):
{(a b : nat)},
STsep (fun h => h = x :-> a \+ y :-> b,
[vfun (_: unit) h => h = x :-> b \+ y :-> a]) :=
Do (vx <-- read nat x;
vy <-- read nat y;
x ::= vy;;
y ::= vx).
Next Obligation.
by apply:ghR=>_ [a b]->/= _; heval.
Qed.
(**
---------------------------------------------------------------------
Exercise [Swapping two values without heval]
---------------------------------------------------------------------
Try to redo the previous exercise _without_ using the automation
provided by the [heval] tactic. The goal of this exercise is to
explore the library of HTT lemmas, mimicking the rules of the
separation logic. You can alway displat the whole list of the
available lemmas by running the command [Search _ (verify _ _ _)] and
then refine the query for specific programs (e.g., [read] or [write]).
*)
Program Definition swap' (x y : ptr):
{(a b : nat)},
STsep (fun h => h = x :-> a \+ y :-> b,
[vfun _ h => h = x :-> b \+ y :-> a]) :=
Do (vx <-- read nat x;
vy <-- read nat y;
x ::= vy;;
y ::= vx).
Next Obligation.
apply:ghR=>_ [a b]-> _.
apply: bnd_seq; apply: val_read => _.
apply: bnd_seq; apply: val_readR => _.
apply: bnd_seq; apply: val_write => _.
by apply: val_writeR.
Qed.
(**
---------------------------------------------------------------------
Exercise [Imperative procedure for Fibonacci numbers]
---------------------------------------------------------------------
The following program is an implementation in pseudocode of an
efficient imperative implementation of the function [fib] that
computes the [N]th Fibonacci number.
fun fib (N : nat): nat = {
if N == 0 then ret 0
else if N == 1 then ret 1
else n <-- alloc 2;
x <-- alloc 1;
y <-- alloc 1;
res <--
(fix loop (_ : unit).
n' <-- !n;
y' <-- !y;
if n' == N then ret y'
else tmp <-- !x;
x ::= y';;
x' <-- !x;
y ::= x' + tmp;;
n ::= n' + 1;;
loop(tt))(tt).
dealloc n;;
dealloc x;;
dealloc y;;
ret res
}
Your task will be to prove its correctness with respect to the "pure"
function [fib_pure] (which you should define in plain Coq) as well as
the fact that it starts and ends in an empty heap.
Hint: What is the loop invariant of the recursive computation defined
by means of the [loop] function?
Hint: Try to decompose the reasoning into verification of several code
pieces as in the factorial example and then composing them together in
the "main" function.
*)
Fixpoint fib_pure n :=
if n is n'.+1 then
if n' is n''.+1 then fib_pure n' + fib_pure n'' else 1
else 0.
Definition fib_inv (n x y : ptr) (N : nat) h : Prop :=
exists n' x' y': nat,
[/\ h = n :-> n'.+1 \+ x :-> x' \+ y :-> y',
x' = fib_pure n' & y' = fib_pure (n'.+1)].
Definition fib_tp n x y N :=
unit ->
STsep (fib_inv n x y N,
[vfun (res : nat) h => fib_inv n x y N h /\ res = fib_pure N]).
Program Definition fib_acc (n x y : ptr) N: fib_tp n x y N :=
Fix (fun (loop : fib_tp n x y N) (_ : unit) =>
Do (n' <-- read nat n;
y' <-- !y;
if n' == N then ret y'
else tmp <-- !x;
x ::= y';;
x' <-- !x;
y ::= x' + tmp;;
n ::= n' + 1;;
loop tt)).
Next Obligation.
move=>h /=[n'][_][_][->{h}]->->.
heval; case X: (n'.+1 == N)=>//.
- by apply: val_ret=>_; move/eqP: X=><-/=.
heval; apply: val_doR=>//. (* This line takes a while due to automation. *)
move=>_.
exists (n'.+1), (fib_pure (n'.+1)), (fib_pure n'.+1.+1).
by rewrite addn1.
Qed.
Program Definition fib N :
STsep ([Pred h | h = Unit],
[vfun res h => res = fib_pure N /\ h = Unit]) :=
Do (
if N == 0 then ret 0
else if N == 1 then ret 1
else n <-- alloc 2;
x <-- alloc 1;
y <-- alloc 1;
res <-- fib_acc n x y N tt;
dealloc n;;
dealloc x;;
dealloc y;;
ret res).
Next Obligation.
move=>_ /= ->.
case N1: (N == 0)=>//; first by move/eqP: N1=>->; apply:val_ret.
case N2: (N == 1)=>//; first by move/eqP: N2=>->; apply:val_ret.
heval=>n; heval=>x; heval=>y; rewrite unitR joinC [x:->_ \+ _]joinC.
apply: bnd_seq=>/=.
apply: val_doR; last first=>//[res h|].
- case; case=>n'[_][_][->{h}]->->->_.
by heval; rewrite !unitR.
by exists 1, 1, 1.
Qed.
(**
---------------------------------------------------------------------
Exercise [Value-returning list beheading]
---------------------------------------------------------------------
Define and verify function [remove_val] which is similar to remove,
but also returns the value of the last "head" of the list before
removal, in addition to the "next" pointer. Use Coq's [option] type to
account for the possibility of an empty list in the result.
*)
Program Definition remove_val T p : {xs},
STsep (lseq p xs,
[vfun y h => lseq y.1 (behead xs) h /\
y.2 = (if xs is x::xs' then Some x else None)]) :=
Do (if p == null then ret (p, None)
else x <-- read T p;
pnext <-- !(p .+ 1);
dealloc p;;
dealloc p .+ 1;;
ret (pnext, Some x)).
Next Obligation.
apply: ghR=>i xs H V; case: ifP H=>H1.
- by rewrite (eqP H1); case/(lseq_null V)=>->->; heval.
case/(lseq_pos (negbT H1))=>x [q][h][->] <- /= H2.
by heval; rewrite 2!unitL.
Qed.
(**
---------------------------------------------------------------------
Exercise [Imperative in-place map]
---------------------------------------------------------------------
Define, specify and verify the imperative higher-order function
[list_map] that takes arguments two types, [S] and [T], a pure
function [f : T -> S] and a head [p] of a single-linked list,
described by a predicate [lseq], and changes the list in place by
applying [f] to each of its elements, while preserving the list's
structure. The specification should reflect the fact that the new
"logical" contents of the single-linked list are an [f] map-image of
the old content.
Hint: The lemmas [lseq_null] and [lseq_pos], proved previously, might
be useful in the proof of the established specification.
Hint: A tail-recursive call can be verified via HTT's [val_do] lemma,
reminiscent to the rule %\Rule{App}%. However, the heap it operates
with should be "massaged" appropriately via PCM's lemmas [joinC] and
[joinA].
Hint: A boolean lemma [negbT] can be useful to switch between
different representations of inequality.
*)
Definition mapT T S (f : T -> S) : Type :=
forall p, {xs}, STsep (@lseq T p xs,
[vfun y h => y = tt /\ @lseq S p (map f xs) h]).
Program Definition list_map T S p (f : T -> S) :
{xs}, STsep (@lseq T p xs,
[vfun y h => y = tt /\ @lseq S p (map f xs) h]) :=
Fix (fun (loop : mapT f) (p : ptr) =>
Do (if p == null
then ret tt
else x <-- read T p;
next <-- (read ptr p .+ 1);
p ::= f x;;
loop next)) p.
Next Obligation.
apply: ghR=>h xs H V.
case X: (p == null).
- apply: val_ret=>_ /=; split=>//.
by move/eqP: X H=>-> /=; case/(lseq_null V)=>->->.
case/negbT /lseq_pos /(_ H): X=>x[next][h'][Z1] Z2 H1; subst h.
heval.
move/validR: V=> V1; apply: (gh_ex (behead xs)).
rewrite [_ \+ h']joinC joinC -joinA; apply: val_do=>//=.
case=>m[_]H2 V2; split=>//.
rewrite [_ \+ p :-> _]joinC joinC.
by rewrite Z1 /=; exists next, m; rewrite joinA.
Qed.
(**
---------------------------------------------------------------------
Exercise [In-place list reversal]
---------------------------------------------------------------------
Let us define the following auxiliary predicates, where [shape_rev]
splits the heap into two disjoint linked lists (by means of the
separating conjunction [#]).
*)
Definition shape_rev T p s := [Pred h | h \In @lseq T p.1 s.1 # @lseq T p.2 s.2].
(**
Then the in-place list reversal is implemented by means of the
recursive function [reverse] with a loop invariant expressed using the
type [revT].
*)
Definition revT T : Type :=
forall p, {ps}, STsep (@shape_rev T p ps, [vfun y => lseq y (rev ps.1 ++ ps.2)]).
Program Definition
reverse T p : {xs}, STsep (@lseq T p xs, [vfun y => lseq y (rev xs)]) :=
Do (let: reverse := Fix (fun (reverse : revT T) p =>
Do (if p.1 == null then ret p.2
else xnext <-- !p.1 .+ 1;
p.1 .+ 1 ::= p.2;;
reverse (xnext, p.1)))
in reverse (p, null)).
(**
You're invited to conduct the verification of [reverse], proving
that it satisfies the given specification.
Hint: It might be a good idea to make use of the previously proved
lemmas [lseq_null] and [lseq_pos].
Hint: Be careful with the logical values of variables passed to the
[gh_ex] lemma before verifying a recursive call of [reverse].
Hint: A verification goal to a function defined via [Fix] can be
reduced via the [val_doR] lemma or similar ones.
Hint: The [shape_rev] predicate is in fact an existential in disguise:
it can be proved by providing appropriate witnesses.
Hint: Lemmas [rev_cons], [cat_rcons] and [cats0] from the [seq]
library will be useful for establishing equalities between lists.
*)
Next Obligation.
apply:ghR=>i[xs1 xs2]; case=>h1[h2][->{i}]/=[H1 H2] V1.
case X: (p == null) H1=>H1.
- apply: val_ret=>/=_; move/eqP: X=>X; subst p.
move/validL: (V1)=>V3; case:(lseq_null V3 H1)=>Z1 Z2; subst xs1 h1=>/=.
by rewrite unitL.
case/negbT /(lseq_pos) /(_ H1): X=>x[next][h'][Exs]Z H3; subst h1.
heval; rewrite -!joinA -!(joinCA h'); apply: (gh_ex (behead xs1, x::xs2)).
apply: val_doR=>//=[V2|].
- exists h', (p :-> x \+ (p .+ 1 :-> p1 \+ h2)); split=>//; split=>//.
by exists p1, h2; rewrite !joinA.
by move=> z m H4 _; rewrite Exs rev_cons cat_rcons.
Qed.
Next Obligation.
apply: ghR=>i xs H V /=; apply: (gh_ex (xs, [::])).
apply: val_doR=>//=[_|]; first by exists i, Unit; rewrite unitR.
by rewrite cats0.
Qed.
|
{"author": "ilyasergey", "repo": "pnp", "sha": "dc32861434e072ed825ba1952cbb7acc4a3a4ce0", "save_path": "github-repos/coq/ilyasergey-pnp", "path": "github-repos/coq/ilyasergey-pnp/pnp-dc32861434e072ed825ba1952cbb7acc4a3a4ce0/solutions/HTT_solutions.v"}
|
from os import listdir
from os.path import isfile, join
import string
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import nltk
import re
from nltk.tokenize import RegexpTokenizer
#download latest stopwords
nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
################# Step 1 ##########################
original_path = "20_newsgroups"
#creating a list of folder names to make valid pathnames later
folders = [f for f in listdir(original_path)]
#creating a 2D list to store list of all files in different folders
files = []
for folder_name in folders:
my_path = join(original_path, folder_name)
files.append([f for f in listdir(my_path)])
#creating a list of pathnames of all the documents
#this would serve to split our dataset into train & test later without any bias
pathname_list = []
for fo in range(len(folders)):
for fi in files[fo]:
pathname_list.append(join(original_path, join(folders[fo], fi)))
#making an array containing the classes each of the documents belong to
Y = []
for folder_name in folders:
folder_path = join(original_path, folder_name)
num_of_files= len(listdir(folder_path))
for i in range(num_of_files):
Y.append(folder_name)
####### step-1 ends ###########
####### step 2 ########################
doc_train, doc_test, Y_train, Y_test = train_test_split(pathname_list, Y, random_state=0, test_size=0.5)
def flatten(list):
new_list = []
for i in list:
for j in i:
new_list.append(j)
return new_list
#function to convert a document into list of words
def doc_tokenize(path):
#load document as a list of lines
f = open(path, 'r')
text_lines = f.readlines()
#initiazing an array to hold all the words in a document
doc_words = []
tokenizer = RegexpTokenizer(r'\w+')
#traverse over all the lines and tokenize each one with the help of helper function: tokenize_sentence
for line in text_lines:
doc_words.append(tokenizer.tokenize(line))
return doc_words
def hasNumbers(inputString):
return bool(re.search(r'\d', inputString))
############## step 2 ends ##############
######## step 3 #########################
list_of_words = []
for document in doc_train:
list_of_words.append(flatten(doc_tokenize(document)))
def preprocess_data(words_by_document):
#remove stop words
stp_removed_words = np.array([word for word in words_by_document if not word in stop_words])
#remove any digits
dig_removed_words = np.array([word for word in stp_removed_words if not word.isdigit()])
#remove words of length 1
len1_removed_words = np.array([word for word in dig_removed_words if not len(word) == 1])
#remove words of length 2
len2_removed_words = np.array([word for word in len1_removed_words if len(word) > 2])
#remove words if it is not a string
ntstr_removed_words = np.array([str for str in len2_removed_words if str])
#remove words if it is alphanumeric
alpnum_removed_words = np.array([word for word in ntstr_removed_words if word.isalnum()])
#remove words if it has numbers in one of its characters
charnum_removed_words = np.array([word for word in alpnum_removed_words if not hasNumbers(word)])
#convert the preprocessed words to lowercase
preprocessed_words = np.array([word.lower() for word in charnum_removed_words])
return preprocessed_words
flatten_to1D_words = np.asarray(flatten(list_of_words))
list_of_words_train = preprocess_data(flatten_to1D_words)
Xtrain_by_each_doc = [list(preprocess_data(doc_arr)) for doc_arr in list_of_words]
def get_features(preprocessed_words, feature_count):
#get the word frequency or value count
word_counts = nltk.FreqDist(preprocessed_words)
#get the least common words - words that were not repeated at all, were present only once
least_repeated = []
for keyVal in word_counts.keys():
if(word_counts[keyVal] == 1):
least_repeated.append(keyVal)
#get the sorted most common words
unique_words = np.array(list(dict(word_counts.most_common()).keys()))
#remove the least common from unique words list
most_repeated_words = np.array([word for word in unique_words if not word in least_repeated])
#get the featured words based on feature selection count that is set
featured_words = most_repeated_words[0:feature_count]
return featured_words
#extract features from train data
feature_selection_count = 10000
features = get_features(list_of_words_train, feature_selection_count)
######## step 3 ends ##############################
#################### step 4 ##################
# get a dictionary of words for each document with respect to each word count in the document, in the whole train set of documents
train_dict = {}
doc_num = 1
for doc_words in Xtrain_by_each_doc:
#print(doc_words)
np_doc_words = np.asarray(doc_words)
w, c = np.unique(np_doc_words, return_counts=True)
train_dict[doc_num] = {}
for i in range(len(w)):
train_dict[doc_num][w[i]] = c[i]
doc_num = doc_num + 1
#now we make a 2D array having the frequency of each word of our feature set in each individual documents
X_train = []
for k in train_dict.keys():
row = []
for f in features:
if(f in train_dict[k].keys()):
#if word f is present in the dictionary of the document as a key, its value is copied
#this gives us no. of occurences
row.append(train_dict[k][f])
else:
#if not present, the no. of occurences is zero
row.append(0)
X_train.append(row)
#we convert the X and Y into np array for concatenation and conversion into dataframe
X_train = np.asarray(X_train)
Y_train = np.asarray(Y_train)
#do the same to get test_x
test_tokenize = []
for document in doc_test:
test_tokenize.append(flatten(doc_tokenize(document)))
Xtest_by_each_doc = [list(preprocess_data(doc_arr)) for doc_arr in test_tokenize]
test_dict = {}
doc_num = 1
for doc_words in Xtest_by_each_doc:
#print(doc_words)
np_doc_words = np.asarray(doc_words)
w, c = np.unique(np_doc_words, return_counts=True)
test_dict[doc_num] = {}
for i in range(len(w)):
test_dict[doc_num][w[i]] = c[i]
doc_num = doc_num + 1
#now we make a 2D array having the frequency of each word of our feature set in each individual documents
X_test = []
for k in test_dict.keys():
row = []
for f in features:
if(f in test_dict[k].keys()):
#if word f is present in the dictionary of the document as a key, its value is copied
#this gives us no. of occurences
row.append(test_dict[k][f])
else:
#if not present, the no. of occurences is zero
row.append(0)
X_test.append(row)
X_test = np.asarray(X_test)
Y_test = np.asarray(Y_test)
#################### step 4 ends ##################
############### step 5 ###################
#function to create a training dictionary out of the text files for training set, consisiting the frequency of
#words in our feature set (vocabulary) in each class or label of the 20 newsgroup
def fitTrainData(X_train, Y_train):
result = {}
classes, counts = np.unique(Y_train, return_counts=True)
for i in range(len(classes)):
curr_class = classes[i]
result["TOTAL_DATA"] = len(Y_train)
result[curr_class] = {}
X_tr_curr = X_train[Y_train == curr_class]
num_features = 10000
for j in range(num_features):
result[curr_class][features[j]] = X_tr_curr[:,j].sum()
result[curr_class]["TOTAL_COUNT"] = counts[i]
return result
#function for calculating naive bayesian log probablity for each test document being in a particular class
def find_class_probablity(dictionary_train, x, curr_class):
output = np.log(dictionary_train[curr_class]["TOTAL_COUNT"]) - np.log(dictionary_train["TOTAL_DATA"])
num_words = len(x)
for j in range(num_words):
if(x[j] in dictionary_train[curr_class].keys()):
xj = x[j]
count_curr_class_equal_xj = dictionary_train[curr_class][xj] + 1
count_curr_class = dictionary_train[curr_class]["TOTAL_COUNT"] + len(dictionary_train[curr_class].keys())
curr_xj_prob = np.log(count_curr_class_equal_xj) - np.log(count_curr_class)
output = output + curr_xj_prob
else:
continue
return output
#helper function for the predict() function that predicts the class or label for one test document at a time
def predict_each_class(dictionary_train, x):
classes = dictionary_train.keys()
best_p = -10000
best_class = -1
for curr_class in classes:
if(curr_class == "TOTAL_DATA"):
continue
p_curr_class = find_class_probablity(dictionary_train, x, curr_class)
if(p_curr_class > best_p):
best_p = p_curr_class
best_class = curr_class
return best_class
#predict function that predicts the class or label of test documents using train dictionary made using the fit() function
def predict(dictionary_train, X_test):
Y_pred = []
for x in X_test:
y_predicted = predict_each_class(dictionary_train, x)
Y_pred.append(y_predicted)
#print(Y_pred)
return Y_pred
trainData_dict = fitTrainData(X_train, Y_train)
X_test = []
for key in test_dict.keys():
X_test.append(list(test_dict[key].keys()))
my_predictions = predict(trainData_dict, X_test)
my_predictions = np.asarray(my_predictions)
print(accuracy_score(Y_test, my_predictions))
print(classification_report(Y_test, my_predictions))
############## step 5 ends #######################
|
{"hexsha": "a0638da06fc2dacfa1f5d7d7f42bc107ede8dec5", "size": 10032, "ext": "py", "lang": "Python", "max_stars_repo_path": "MachineLearning/NaiveBayes_NewsGroup/NaiveBayes_NewsGroup.py", "max_stars_repo_name": "sindura93/SchoolProjects", "max_stars_repo_head_hexsha": "13cdca18c7d1711072373b50e25ad84ff124cfa5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-06T07:54:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-06T22:25:37.000Z", "max_issues_repo_path": "MachineLearning/NaiveBayes_NewsGroup/NaiveBayes_NewsGroup.py", "max_issues_repo_name": "sindura93/SchoolProjects", "max_issues_repo_head_hexsha": "13cdca18c7d1711072373b50e25ad84ff124cfa5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MachineLearning/NaiveBayes_NewsGroup/NaiveBayes_NewsGroup.py", "max_forks_repo_name": "sindura93/SchoolProjects", "max_forks_repo_head_hexsha": "13cdca18c7d1711072373b50e25ad84ff124cfa5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7128027682, "max_line_length": 130, "alphanum_fraction": 0.6761363636, "include": true, "reason": "import numpy", "num_tokens": 2330}
|
import numpy as np
from collections import defaultdict
results = '/Users/tdmeeste/workspace/inferbeddings/logs/synth/synth_paper_closedform_aggregated.txt'
models_lst = ['DistMult', 'ComplEx']
clauses_lst = ['symm', 'impl', 'impl_inv']
confs_lst = ['0.0']
versions_lst = ['v0', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7', 'v8', 'v9']
disc_epochs_lst = ['10']
clause_weight_lst = ['1.0'] #['0.01','0.1','1.0','10.0', '100.0', '1000.0']
def string(s):
return {'TransE': r"\mdl{TransE}",
'DistMult' : r"\mdl{DistM.}",
'ComplEx' : r"\mdl{Compl.}",
'symm': r"\multirow{ 2}{*}{ $\begin{array} {l@{}} r(X_1, X_2) \\ \quad\Rightarrow r(X_2, X_1) \end{array}$ }",
'impl': r"\multirow{ 2}{*}{ $\begin{array} {l@{}} r(X_1, X_2) \\ \quad\Rightarrow s(X_1, X_2) \end{array}$ }",
'impl_inv' : r"\multirow{ 2}{*}{ $\begin{array} {l@{}} r(X_1, X_2) \\ \quad\Rightarrow s(X_2, X_1) \end{array}$ }"
}[s]
# ('symm','DistMult','cube'): "$ 0 $",
# ('symm', 'DistMult','sphere'): "$ 0 $",
# ('symm', 'ComplEx', 'cube'): "$ 2\sum_i \vert r_i^{\text{I}}\vert $",
# ('symm', 'ComplEx', 'sphere'): "$ \max_i \left\{\vert r_i^{\text{I}}\vert\sqrt{2} \right\} $",
# ('impl', 'DistMult', 'cube'): "$ \sum_i\max\{0,\delta_i\} $",
# ('impl', 'DistMult', 'sphere'): "$ \max_i\{\vert\delta_i\vert\} $",
# ('impl', 'ComplEx', 'cube'): "$ \sum_i\max\{0,\delta_i^{\text{R}}\} + \max\{\delta_i^{\text{R}},\vert\delta_i^{\text{I}}\vert\}$",
# ('impl', 'ComplEx', 'sphere'): "$ \max_i\left\{ \sqrt{{\delta_i^{\text{R}}}^2+{\delta_i^{\text{I}}}^2} \right\}$",
# ('impl_inv', 'DistMult', 'cube'): "$ $",
# ('impl_inv', 'DistMult', 'sphere'): "$ $",
# ('impl_inv', 'ComplEx', 'cube'): "$ $",
# ('impl_inv', 'ComplEx', 'sphere'): "$ $"
def id2clause(id):
if 'tag=impl_inv' in id:
return 'impl_inv' #first!!
elif 'tag=impl' in id:
return 'impl'
for clause in ['symm', 'trans_single', 'trans_diff']:
if 'tag=%s'%clause in id:
return clause
return None
def id2model(id):
for model in models_lst:
if 'model=%s'%model in id:
return model
return None
def id2adv_init_ground(id):
if 'adv_init_ground=True' in id:
return True
elif 'adv_init_ground=False' in id:
return False
else:
return None
def id2conf(id):
for conf in confs_lst:
if '_c%s'%conf in id:
return conf
return None
def id2version(id):
for version in versions_lst:
if '_%s_use'%version in id:
return version
return None
def id2disc_epochs(id):
for disc_epoch in disc_epochs_lst:
if 'disc_epochs=%s_'%disc_epoch in id:
return disc_epoch
return None
def id2use_clauses(id):
return 'use_clauses=True' in id
def id2entity_space(id):
return 'unit_sphere' if 'unit-sphere' in id else 'unit_cube'
def id2clause_weight(id):
for clause_weight in clause_weight_lst:
if 'clause_weight=%s_'%clause_weight in id:
return clause_weight
from time import sleep
ID2AUC = {}
found = False
with open(results) as rID:
for line in rID:
auc, id = line.strip().split('\t')
clause = id2clause(id)
model = id2model(id)
conf = id2conf(id)
disc_epochs = id2disc_epochs(id)
use_clauses = id2use_clauses(id)
clause_weight = id2clause_weight(id)
entity_space = id2entity_space(id)
version = id2version(id)
if not None in (clause, model, conf, disc_epochs, use_clauses, clause_weight, entity_space, version):
ID2AUC[(clause, model, conf, disc_epochs, use_clauses, clause_weight, entity_space, version)] = float(auc)
else:
print((clause, model, conf, disc_epochs, use_clauses, clause_weight, entity_space, version))
ID2AUC_versions = {}
for (clause, model, conf, disc_epochs, use_clauses, clause_weight, entity_space, version), auc in ID2AUC.items():
if not (clause, model, conf, disc_epochs, use_clauses, clause_weight, entity_space) in ID2AUC_versions:
ID2AUC_versions[(clause, model, conf, disc_epochs, use_clauses, clause_weight, entity_space)] = []
ID2AUC_versions[(clause, model, conf, disc_epochs, use_clauses, clause_weight, entity_space)].append(auc)
ID2MEAN = defaultdict(lambda: -1)
for k in ID2AUC_versions:
ID2MEAN[k] = np.mean(ID2AUC_versions[k])
#construct table:
header = lambda title: r"""
\begin{table}[t!]
\centering
\caption{ """ \
+ title \
+ r""" }
\vspace{1em}
\resizebox{.7\columnwidth}{!}{%
\begin{tabular}{llcc}
\toprule
\multirow{ 2}{*}{Clauses} & \multirow{ 2}{*}{Model} & $\alpha=1$ & $\alpha=1$ \\
&& cube & sphere \\
\midrule
"""
footer = r"""
\bottomrule
\end{tabular}
}
\end{table}
"""
caption = r"PR-AUC results on synthetic datasets for adversarial training with closed form expressions."
for conf in confs_lst:
for clause_weight in clause_weight_lst:
def results_line(clause, model):
res = string(model) + " & "
res_STD_sphere = ID2MEAN[(clause, model, conf, '10', False, '1.0', 'unit_sphere')]
res_STD_cube = ID2MEAN[(clause, model, conf, '10', False, '1.0', 'unit_cube')]
res_CF_sphere = ID2MEAN[(clause, model, conf, '10', True, clause_weight, 'unit_sphere')]
res_CF_cube = ID2MEAN[(clause, model, conf, '10', True, clause_weight, 'unit_cube')]
#resu = [res_STD_sphere, res_STD_cube, res_CF_sphere, res_CF_cube]
resu = [res_CF_cube, res_CF_sphere]
resu = [np.round(1000*res)/10. for res in resu]
maxvalue = max(resu)
resu_str = ["\\textbf{%.1f}"%res if res == maxvalue else "%.1f"%res for res in resu]
res += " & ".join(resu_str)
return res + r" \\"
print(header(caption))
for clause in clauses_lst:
for model in models_lst:
show_clause = string(clause) if model == models_lst[0] else ""
line = show_clause + " & " + results_line(clause, model)
print(line)
if not clause == clauses_lst[-1]:
print(r"\midrule")
print(footer)
|
{"hexsha": "22231c63bc7c5c92ebd676f35bb159798c86c481", "size": 6208, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/synth/create_table_closed_form.py", "max_stars_repo_name": "issca/inferbeddings", "max_stars_repo_head_hexsha": "80492a7aebcdcac21e758514c8af403d77e8594a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2017-07-25T14:31:00.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-06T09:18:00.000Z", "max_issues_repo_path": "scripts/synth/create_table_closed_form.py", "max_issues_repo_name": "issca/inferbeddings", "max_issues_repo_head_hexsha": "80492a7aebcdcac21e758514c8af403d77e8594a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-08-22T13:49:30.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-22T13:49:30.000Z", "max_forks_repo_path": "scripts/synth/create_table_closed_form.py", "max_forks_repo_name": "issca/inferbeddings", "max_forks_repo_head_hexsha": "80492a7aebcdcac21e758514c8af403d77e8594a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-10-05T08:50:45.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-18T12:40:56.000Z", "avg_line_length": 32.8465608466, "max_line_length": 132, "alphanum_fraction": 0.5972938144, "include": true, "reason": "import numpy", "num_tokens": 1926}
|
\section{Introduction}
Intro into the topic. Brief overview of paper structure.
|
{"hexsha": "a931b37e15c1616a5284cb86deb6f8a65881b453", "size": 82, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "paper/sections/intro.tex", "max_stars_repo_name": "0ortmann/tex-template", "max_stars_repo_head_hexsha": "808f156ee36c0a5a71661941c990db5d6b169e41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-14T11:21:08.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-14T11:21:08.000Z", "max_issues_repo_path": "paper/sections/intro.tex", "max_issues_repo_name": "0ortmann/tex-template", "max_issues_repo_head_hexsha": "808f156ee36c0a5a71661941c990db5d6b169e41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paper/sections/intro.tex", "max_forks_repo_name": "0ortmann/tex-template", "max_forks_repo_head_hexsha": "808f156ee36c0a5a71661941c990db5d6b169e41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3333333333, "max_line_length": 58, "alphanum_fraction": 0.7926829268, "num_tokens": 17}
|
println("£")
println("\302\243"); # works if your terminal is utf-8
|
{"hexsha": "50965815a3a8abcbc81db9028fad67c2bb1556c3", "size": 68, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "lang/Julia/terminal-control-display-an-extended-character.jl", "max_stars_repo_name": "ethansaxenian/RosettaDecode", "max_stars_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lang/Julia/terminal-control-display-an-extended-character.jl", "max_issues_repo_name": "ethansaxenian/RosettaDecode", "max_issues_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lang/Julia/terminal-control-display-an-extended-character.jl", "max_forks_repo_name": "ethansaxenian/RosettaDecode", "max_forks_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.6666666667, "max_line_length": 54, "alphanum_fraction": 0.6617647059, "num_tokens": 20}
|
#include <boost/fusion/container.hpp>
|
{"hexsha": "cdadc164910bade89d3c7c659a03e71f48dc331e", "size": 38, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_fusion_container.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_fusion_container.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_fusion_container.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 19.0, "max_line_length": 37, "alphanum_fraction": 0.7894736842, "num_tokens": 8}
|
# coding:utf-8
import os
import logging
import json
from collections import Counter, OrderedDict
from itertools import product
import copy
import numpy as np
import json
from cotk.metric import MetricChain, BleuCorpusMetric
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from utils import debug, try_cache, cuda_init, Storage, padding_id
from utils.cotk_private.dataloader.predefined_language_generation import PredefinedStyleTransfer
from utils.cotk_private.metric.name_changer import NameChanger
from classifier import Classifier
from languagemodel import LanguageModel
from itertools import zip_longest
import torch
def zip_equal(*iterables):
sentinel = object()
for combo in zip_longest(*iterables, fillvalue=sentinel):
if sentinel in combo:
raise ValueError('Iterables have different lengths')
yield combo
def run(*argv):
import argparse
import time
from utils import Storage
parser = argparse.ArgumentParser(description='evaluation code')
args = Storage()
parser.add_argument('--dataid', type=str, default='../data/yelp')
parser.add_argument('--dev0', type=str, default=None)
parser.add_argument('--dev1', type=str, default=None)
parser.add_argument('--test0', type=str, default=None)
parser.add_argument('--test1', type=str, default=None)
parser.add_argument('--output', type=str, default="result.txt")
parser.add_argument('--clsrestore', type=str, default="cls_yelp_best")
parser.add_argument('--lmrestore', type=str, default="lm_yelp_best")
parser.add_argument('--allow_unk', action="store_true")
parser.add_argument('--cache', action='store_true',
help='Cache the dataloader')
parser.add_argument('--debug', action='store_true',
help='Enter debug mode (using ptvsd).')
parser.add_argument('--seed', type=int, default=0,
help='Specify random seed. Default: 0')
parser.add_argument('--name', type=str, default=None)
cargs = parser.parse_args(argv)
# Editing following arguments to bypass command line.
cuda_init(0, True)
args.dataid = cargs.dataid
args.dev0 = cargs.dev0
args.dev1 = cargs.dev1
args.test0 = cargs.test0
args.test1 = cargs.test1
args.debug = cargs.debug
args.cache = cargs.cache
args.seed = cargs.seed
args.clsrestore = cargs.clsrestore
args.output = cargs.output
args.allow_unk = cargs.allow_unk
args.lmrestore = cargs.lmrestore
if args.dev0 is None and args.dev1 is None and args.test0 is None and args.test1 is None:
args.test0 = f"./output/{cargs.name}/best.neg2pos.txt"
args.test1 = f"./output/{cargs.name}/best.pos2neg.txt"
import random
random.seed(cargs.seed)
import torch
torch.manual_seed(cargs.seed)
import numpy as np
np.random.seed(cargs.seed)
eval_main(args)
def eval_main(args):
logging.basicConfig(\
filename=0,\
level=logging.DEBUG,\
format='%(asctime)s %(filename)s[line:%(lineno)d] %(message)s',\
datefmt='%H:%M:%S')
if args.debug:
debug()
data_class = PredefinedStyleTransfer
data_arg = Storage()
data_arg.file_id = args.dataid
data_arg.max_sent_length = None
data_arg.fields = {
"train_0": OrderedDict([("sent", "SentenceDefault")]),
"train_1": OrderedDict([("sent", "SentenceDefault")]),
"dev_0": OrderedDict([("sent", "SentenceDefault")]),
"dev_1": OrderedDict([("sent", "SentenceDefault")]),
"test_0": OrderedDict([("sent", "SentenceDefault"), ("ref", "SessionDefault")]),
"test_1": OrderedDict([("sent", "SentenceDefault"), ("ref", "SessionDefault")]),
}
def load_dataset(data_arg):
dm = data_class(**data_arg)
return dm
if args.cache:
dm = try_cache(load_dataset, (data_arg,),
"./cache", "eval" + data_class.__name__)
else:
dm = load_dataset(data_arg)
import run_cls
cls_param = Storage()
cls_param.args = run_cls.run("--dryrun", "--restore", args.clsrestore, "--cuda", "--dataid", args.dataid)
cls_param.volatile = Storage()
cls_param.volatile.load_exclude_set = []
cls_param.volatile.restoreCallback = None
cls_param.volatile.dm = dm
classifier = Classifier(cls_param)
import run_lm
lm_param = Storage()
lm_param.args = run_lm.run("--dryrun", "--restore", args.lmrestore, "--dataid", args.dataid)
lm_param.volatile = Storage()
lm_param.volatile.load_exclude_set = []
lm_param.volatile.restoreCallback = None
lm_param.volatile.dm = dm
lm = LanguageModel(lm_param)
# read target file
def read_target(filename):
sent_str = []
sent = []
unk_num = 0
with open(filename, "r") as f:
for line in f:
sstr = line.strip().lower()
sent_str.append(sstr)
sent_id = dm.convert_sentence_to_ids(sstr)
if dm.unk_id in sent_id:
unk_num += 1
print("unk detected")
print(sstr)
print(dm.convert_ids_to_sentence(sent_id))
if not args.allow_unk:
raise RuntimeError("unk in generated file")
sent.append(sent_id)
print("unk percent:%.4f" % (unk_num / len(sent)))
return sent_str, sent
def calc(filename, field_key, domain, haveref=True):
sent_str, sent = read_target(filename)
# acc
predict_class = []
for chunk_str in [sent_str[i:i + 64] for i in range(0, len(sent_str), 64)]:
predict_class.extend(classifier.predict_str(chunk_str))
acc = (np.array(predict_class) == 1 - domain).astype(float).mean()
# ppl
losses_arr = []
token_nums_arr = []
for chunk_str in [sent_str[i:i + 64] for i in range(0, len(sent_str), 64)]:
losses, target_nums = lm.predict_str(chunk_str)
losses_arr.append(losses)
token_nums_arr.append(target_nums)
metric = MetricChain()
# metric.add_metric(NgramPerplexityMetric(dm, dm.get_all_batch(f"train_{1-domain}")['sent_allvocabs'], 4, gen_key="sent"))
if haveref:
metric.add_metric(BleuCorpusMetric(dm, 4, reference_num=4))
metric.add_metric(NameChanger(BleuCorpusMetric(dm, 4, reference_num=1, reference_allvocabs_key="sent_allvocabs"), "self"))
for data, chunk_sent in zip_equal(dm.get_batches(f"{field_key}_{domain}", 64, shuffle=False),\
[sent[i:i + 64] for i in range(0, len(sent), 64)]):
data["gen"] = padding_id(chunk_sent)[0].transpose(1, 0)
metric.forward(data)
mres = Storage(metric.close())
res = Storage()
res.acc = acc
res.self_bleu = mres.selfbleu
res.ppl = np.exp(np.sum(np.concatenate(losses_arr)) / np.sum(np.concatenate(token_nums_arr)))
def getmean(*param):
summ = np.array(param) + 1e-10
g2 = np.exp(np.mean(np.log(summ)))
h2 = np.exp(np.sum(np.log(summ))) * len(summ) / np.sum(summ)
return g2, h2
res.self_g2, res.self_h2 = getmean(res.acc, res.self_bleu)
if haveref:
res.ref_bleu = mres.bleu
res.g2, res.h2 = getmean(res.acc, res.ref_bleu)
res.overall = res.g2
else:
res.overall = res.self_g2
return res
with open(args.output, "w") as g:
metric_names = ["acc", "self_bleu", "ref_bleu", "ppl", "self_g2", "self_h2", "g2", "h2", "overall"]
# print("\t".join(["domain"] + metric_names))
g.write("\t".join(["domain"] + metric_names) + "\n")
for set_name, domain in product(["dev", "test"], [0, 1]):
filepath = args[f"{set_name}{domain}"]
print(f"evaluating {filepath}...")
if filepath:
with torch.no_grad():
res = calc(filepath, set_name, domain)
output_value = [f"{set_name}{domain}"] + [(("%.3f" % res[key]) if key in res else "n/a") for key in metric_names]
# print("\t".join(output_value))
g.write("\t".join([x for x in output_value]) + "\n")
print(f"output to {args.output}")
with open(args.output) as g:
for line in g:
print(line.strip())
if __name__ == '__main__':
import sys
run(*sys.argv[1:])
|
{"hexsha": "212f963959ee523ae759732ed0b0b45a45627a03", "size": 7493, "ext": "py", "lang": "Python", "max_stars_repo_path": "eval/eval_yelp.py", "max_stars_repo_name": "thu-coai/NAST", "max_stars_repo_head_hexsha": "ef765d412f6e9a2ebdcc7d62c99ec2e883d0e17a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-06-04T07:31:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T03:51:44.000Z", "max_issues_repo_path": "eval/eval_yelp.py", "max_issues_repo_name": "thu-coai/NAST", "max_issues_repo_head_hexsha": "ef765d412f6e9a2ebdcc7d62c99ec2e883d0e17a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-09-13T16:33:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-26T09:20:54.000Z", "max_forks_repo_path": "eval/eval_yelp.py", "max_forks_repo_name": "thu-coai/NAST", "max_forks_repo_head_hexsha": "ef765d412f6e9a2ebdcc7d62c99ec2e883d0e17a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.885106383, "max_line_length": 124, "alphanum_fraction": 0.7030561858, "include": true, "reason": "import numpy", "num_tokens": 2101}
|
/*!
* Copyright (c) 2021 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for
* license information.
*/
#include "FreeForm2Result.h"
#include <boost/lexical_cast.hpp>
#include <iomanip>
#include <sstream>
#include "FreeForm2Assert.h"
#include "FreeForm2Tokenizer.h"
#include "ResultIteratorImpl.h"
#include "TypeImpl.h"
#include "TypeUtil.h"
using namespace FreeForm2;
using namespace boost;
FreeForm2::Result::~Result() {}
int FreeForm2::Result::Compare(const Result &p_other) const {
if (!GetType().GetImplementation().IsSameAs(
p_other.GetType().GetImplementation(), true)) {
std::ostringstream err;
err << "Mismatched compare between " << GetType() << " and "
<< p_other.GetType();
throw std::runtime_error(err.str());
}
switch (GetType().Primitive()) {
case Type::Bool: {
bool left = GetBool();
bool right = p_other.GetBool();
if (left == right) {
return 0;
} else if (right) {
return -1;
} else {
return 1;
}
break;
}
case Type::Int: {
Result::IntType left = GetInt();
Result::IntType right = p_other.GetInt();
if (left < right) {
return -1;
} else if (left > right) {
return 1;
} else {
return 0;
}
break;
}
case Type::UInt64: {
Result::UInt64Type left = GetUInt64();
Result::UInt64Type right = p_other.GetUInt64();
if (left < right) {
return -1;
} else if (left > right) {
return 1;
} else {
return 0;
}
break;
}
case Type::Int32: {
int left = GetInt32();
int right = p_other.GetInt32();
if (left < right) {
return -1;
} else if (left > right) {
return 1;
} else {
return 0;
}
break;
}
case Type::UInt32: {
unsigned int left = GetUInt32();
unsigned int right = p_other.GetUInt32();
if (left < right) {
return -1;
} else if (left > right) {
return 1;
} else {
return 0;
}
break;
}
case Type::Float: {
return CompareFloat(GetFloat(), p_other.GetFloat());
}
case Type::Array: {
ResultIterator leftPos = BeginArray();
ResultIterator leftEnd = EndArray();
ResultIterator rightPos = p_other.BeginArray();
ResultIterator rightEnd = p_other.EndArray();
while (leftPos != leftEnd && rightPos != rightEnd) {
int cmp = leftPos->Compare(*rightPos);
if (cmp != 0) {
return cmp;
}
++leftPos;
++rightPos;
}
if (leftPos == leftEnd && rightPos == rightEnd) {
return 0;
} else if (leftPos == leftEnd) {
return -1;
} else {
return 1;
}
break;
}
default: {
std::ostringstream err;
err << "Comparison of unknown type '" << GetType() << "'";
throw std::runtime_error(err.str());
}
}
}
void FreeForm2::Result::Print(std::ostream &p_out) const {
switch (GetType().Primitive()) {
case Type::Bool: {
p_out << (GetBool() ? "true" : "false");
break;
}
case Type::Int: {
p_out << GetInt();
break;
}
case Type::UInt64: {
p_out << GetUInt64();
break;
}
case Type::Int32: {
p_out << GetInt32();
break;
}
case Type::UInt32: {
p_out << GetUInt32();
break;
}
case Type::Float: {
const std::streamsize savePrecision = p_out.precision();
p_out << std::setprecision(9) << GetFloat();
// Restore precision.
p_out << std::setprecision(savePrecision);
break;
}
case Type::Array: {
p_out << "[";
ResultIterator end = EndArray();
bool first = true;
for (ResultIterator iter = BeginArray(); iter != end; ++iter) {
p_out << (first ? "" : " ");
first = false;
iter->Print(p_out);
}
p_out << "]";
break;
}
default: {
std::ostringstream err;
err << "Printing unknown type '" << GetType() << "'";
throw std::runtime_error(err.str());
}
}
}
std::ostream &FreeForm2::operator<<(std::ostream &p_out,
const Result &p_result) {
p_result.Print(p_out);
return p_out;
}
int FreeForm2::Result::CompareFloat(FloatType p_left, FloatType p_right) {
// This value was chosen to be compatible with the old freeforms.
const Result::FloatType relativeError = 1E-6F;
bool equal = false;
// Check for identical values (this is needed to compare infinity).
if (p_left == p_right) {
return 0;
}
// Check whether right operand is small.
if (p_right < relativeError && p_right > -relativeError) {
// Right is small, so they're equal iff left is small.
equal = (p_left < relativeError && p_left > -relativeError);
} else {
// Right isn't small, so check the difference between the two
// (related to right operand). They're equal iff the difference is
// small.
const Result::FloatType diff = (p_left - p_right) / p_right;
equal = (diff < relativeError && diff > -relativeError);
}
if (equal) {
return 0;
} else if (p_left < p_right) {
return -1;
} else {
return 1;
}
}
FreeForm2::ResultIterator::ResultIterator(
std::auto_ptr<ResultIteratorImpl> p_impl)
: m_impl(p_impl) {}
FreeForm2::ResultIterator::ResultIterator(const ResultIterator &p_other)
: m_impl(p_other.m_impl->Clone()) {}
FreeForm2::ResultIterator::~ResultIterator() {}
void FreeForm2::ResultIterator::increment() { return m_impl->increment(); }
void FreeForm2::ResultIterator::decrement() { return m_impl->decrement(); }
bool FreeForm2::ResultIterator::equal(const ResultIterator &p_other) const {
return m_impl->Position() == p_other.m_impl->Position() &&
m_impl->ElementSize() == p_other.m_impl->ElementSize();
}
const Result &FreeForm2::ResultIterator::dereference() const {
return m_impl->dereference();
}
void FreeForm2::ResultIterator::advance(std::ptrdiff_t p_distance) {
m_impl->advance(p_distance);
}
std::ptrdiff_t FreeForm2::ResultIterator::distance_to(
const ResultIterator &p_other) const {
FF2_ASSERT(m_impl->ElementSize() == p_other.m_impl->ElementSize());
return p_other.m_impl->Position().second - m_impl->Position().second;
}
|
{"hexsha": "59cffa6c795a366c4169a9b0cefca1372ba2e3ad", "size": 6445, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/DynamicRank.FreeForm.Library/libs/External/FreeForm2Result.cpp", "max_stars_repo_name": "ltxtech/lightgbm-transform", "max_stars_repo_head_hexsha": "ca3bdaae4e594c1bf74503c5ec151f2b794f855c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17.0, "max_stars_repo_stars_event_min_datetime": "2021-11-02T13:52:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T07:43:38.000Z", "max_issues_repo_path": "src/DynamicRank.FreeForm.Library/libs/External/FreeForm2Result.cpp", "max_issues_repo_name": "ltxtech/lightgbm-transform", "max_issues_repo_head_hexsha": "ca3bdaae4e594c1bf74503c5ec151f2b794f855c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2022-01-23T16:15:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T15:54:34.000Z", "max_forks_repo_path": "src/DynamicRank.FreeForm.Library/libs/External/FreeForm2Result.cpp", "max_forks_repo_name": "ltxtech/lightgbm-transform", "max_forks_repo_head_hexsha": "ca3bdaae4e594c1bf74503c5ec151f2b794f855c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-01-21T09:42:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T09:42:59.000Z", "avg_line_length": 24.138576779, "max_line_length": 76, "alphanum_fraction": 0.5869666408, "num_tokens": 1650}
|
!
! CalculiX - A 3-dimensional finite element program
! Copyright (C) 1998-2021 Guido Dhondt
!
! This program is free software; you can redistribute it and/or
! modify it under the terms of the GNU General Public License as
! published by the Free Software Foundation(version 2);
!
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with this program; if not, write to the Free Software
! Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
!
subroutine e_c3d_v1rhs(co,nk,konl,lakonl,p1,p2,omx,bodyfx,
& nbody,ff,nelem,nmethod,rhcon,nrhcon,ielmat,ntmat_,vold,vcon,
& dtimef,mi,ttime,time,istep,shcon,nshcon,
& iturbulent,nelemface,sideface,nface,compressible,
& ipvar,var,ipvarf,varf,ithermal,ipface,nelemload,
& sideload,xload,nload,ifreesurface,depth,dgravity,cocon,
& ncocon,ipompc,nodempc,coefmpc,nmpc,ikmpc,ilmpc,iinc,
& theta1,bb,physcon,reltimef,xloadold)
!
! computation of the velocity element matrix and rhs for the element with
! element with the topology in konl: step 1 (correction *)
!
! ff: rhs
!
implicit none
!
character*1 sideface(*)
character*8 lakonl
character*20 sideload(*)
!
integer konl(8),ifaceq(8,6),nk,nbody,nelem,ithermal(*),mi(*),
& i,j,k,i1,i2,j1,nmethod,ii,jj,id,ipointer,idf,
& ig,kk,nrhcon(*),ielmat(mi(3),*),nshcon(*),ntmat_,nope,
& nopes,imat,iturbulent,compressible,ipface(*),nelemload(2,*),
& mint2d,mint3d,ifacet(6,4),ifacew(8,5),istep,nload,
& k1,nelemface(*),nface,ipvar(*),index,ipvarf(*),igl,
& ifreesurface,ncocon(2,*),ipompc(*),nodempc(3,*),nmpc,
& ikmpc(*),ilmpc(*),iinc,iscale,jltyp,nfield,iemchange,
& iflux,node
!
real*8 co(3,*),shp(4,8),dvi,p1(3),p2(3),x2d3,dep,dvel,
& bodyfx(3),ff(0:mi(2),8),bf(3),q(3),c1,c2,xsjmod,dtimef2,fric,
& rhcon(0:1,ntmat_,*),vel(3),div,shcon(0:3,ntmat_,*),cp,
& voldl(0:mi(2),8),xsj2(3),shp2(7,8),omcor,shps(8),ctuf,
& vold(0:mi(2),*),om,omx,const,xsj,temp,tt(3),areaj,enthalpy,
& vcon(nk,0:mi(2)),vconl(0:mi(2),8),rho,shpv(8),t(3,3),
& cvel(3),vkl(3,3),corio(3),xkin,umttot,xload(2,*),f1,f1m,
& xtuf,vort,y,f2,unt,a1,arg2,arg1,gamm,
& var(*),varf(*),tu,depth(*),dgravity,gamm1,
& beta,beta1,beta2,betas,bfv,c3,c4,cdktuf,ckin,cond,gamm2,
& cocon(0:6,ntmat_,*),coefmpc(*),press,skin,skin1,skin2,stuf,
& stuf1,stuf2,theta1,tuk,turbprandl,tut,umsk,umst,umt,xkappa,
& xtu,tvar(2),rhovel(3),dtem(3),dpress(3),aux(3),coords(3),
& bb(3,8),tv(3),pgauss(3),physcon(*),dxkin(3),dxtuf(3),
& dxsj2,field,reltimef,sinktemp,tvn,xloadold(2,*),tvnk,tvnt,
& xsjmodk,xsjmodt,tvk(3),tvt(3)
!
real*8 dtimef,ttime,time
!
ifaceq=reshape((/4,3,2,1,11,10,9,12,
& 5,6,7,8,13,14,15,16,
& 1,2,6,5,9,18,13,17,
& 2,3,7,6,10,19,14,18,
& 3,4,8,7,11,20,15,19,
& 4,1,5,8,12,17,16,20/),(/8,6/))
ifacet=reshape((/1,3,2,7,6,5,
& 1,2,4,5,9,8,
& 2,3,4,6,10,9,
& 1,4,3,8,10,7/),(/6,4/))
ifacew=reshape((/1,3,2,9,8,7,0,0,
& 4,5,6,10,11,12,0,0,
& 1,2,5,4,7,14,10,13,
& 2,3,6,5,8,15,11,14,
& 4,6,3,1,12,15,9,13/),(/8,5/))
!
tvar(1)=time
tvar(2)=ttime+dtimef
!
! parameter to express 2.d0/3.d0
!
x2d3=2.d0/3.d0
dtimef2=dtimef/2.d0
!
! turbulence constants (SST: iturbulent=4)
!
if(iturbulent.gt.0) then
a1=0.31d0
if(iturbulent.eq.4) then
skin1=0.85d0
else
skin1=0.5d0
endif
skin2=1.d0
stuf1=0.5d0
stuf2=0.856d0
beta1=0.075d0
beta2=0.0828d0
betas=0.09d0
xkappa= 0.41d0
!
gamm1=beta1/betas-stuf1*xkappa*xkappa/dsqrt(betas)
gamm2=beta2/betas-stuf2*xkappa*xkappa/dsqrt(betas)
!
xtu=10.d0*physcon(5)/physcon(8)
xtu=xtu*xtu
c3=betas*xtu*10.d0**(-3.5d0)
endif
!
imat=ielmat(1,nelem)
!
if(lakonl(4:4).eq.'4') then
nope=4
mint3d=1
elseif(lakonl(4:4).eq.'6') then
nope=6
mint3d=2
elseif(lakonl(4:5).eq.'8R') then
nope=8
mint3d=1
elseif(lakonl(4:4).eq.'8') then
nope=8
mint3d=8
endif
!
! initialisation for distributed forces
!
do i1=1,nope
do i2=0,mi(2)
ff(i2,i1)=0.d0
enddo
enddo
do i1=1,nope
do i2=1,3
bb(i2,i1)=0.d0
enddo
enddo
!
! temperature, velocity, conservative variables
! (rho*velocity and rho) and if turbulent
! rho*turbulence variables
!
do i1=1,nope
do i2=0,mi(2)
voldl(i2,i1)=vold(i2,konl(i1))
enddo
do i2=0,mi(2)
vconl(i2,i1)=vcon(konl(i1),i2)
enddo
c if(nelem.eq.1) then
c write(*,*) 'voldl(5..6) ',voldl(5,i1),voldl(6,i1)
c write(*,*) 'vconl(5..6) ',vconl(5,i1),vconl(6,i1)
c endif
enddo
!
! computation of the matrix: loop over the Gauss points
!
index=ipvar(nelem)
do kk=1,mint3d
!
! copying the shape functions, their derivatives and the
! Jacobian determinant from field var
!
do jj=1,nope
do ii=1,4
index=index+1
shp(ii,jj)=var(index)
enddo
enddo
index=index+1
xsj=var(index)
index=index+1
y=var(index)
!
xsjmod=dtimef*xsj
!
! the temperature temp
! the velocity vel(*)
! rho times the velocity rhovel(*)
! the temperature gradient dtem(*)
! the velocity gradient vkl(*,*)
! the pressure gradient dpress(*)
!
temp=0.d0
do j1=1,3
vel(j1)=0.d0
rhovel(j1)=0.d0
dtem(j1)=0.d0
do k1=1,3
vkl(j1,k1)=0.d0
enddo
dpress(j1)=0.d0
enddo
!
do i1=1,nope
temp=temp+shp(4,i1)*voldl(0,i1)
do j1=1,3
vel(j1)=vel(j1)+shp(4,i1)*voldl(j1,i1)
rhovel(j1)=rhovel(j1)+shp(4,i1)*vconl(j1,i1)
dtem(j1)=dtem(j1)+shp(j1,i1)*voldl(0,i1)
do k1=1,3
vkl(j1,k1)=vkl(j1,k1)+shp(k1,i1)*voldl(j1,i1)
enddo
dpress(j1)=dpress(j1)+shp(j1,i1)*voldl(4,i1)
enddo
enddo
!
! the divergence of the velocity div
!
if(compressible.eq.1) then
div=vkl(1,1)+vkl(2,2)+vkl(3,3)
else
div=0.d0
endif
!
! the divergence of the shape function times the velocity shpv(*)
! the convective enthalpy
! the convective velocity cvel(*)
!
shpv(1)=0.d0
enthalpy=0.d0
do j1=1,3
cvel(j1)=0.d0
enddo
!
do i1=1,nope
shpv(i1)=shp(1,i1)*vel(1)+shp(2,i1)*vel(2)+
& shp(3,i1)*vel(3)+shp(4,i1)*div
enthalpy=enthalpy+shpv(i1)*(vconl(0,i1)+voldl(4,i1))
do j1=1,3
cvel(j1)=cvel(j1)+shpv(i1)*vconl(j1,i1)
enddo
enddo
!
! creating auxiliary variable shps
!
do i1=1,nope
shps(i1)=xsjmod*(shp(4,i1)+dtimef2*shpv(i1))
enddo
!
! material data (dynamic viscosity)
!
call materialdata_dvifem(imat,ntmat_,temp,shcon,nshcon,dvi)
!
! determining the dissipative stress
!
do i1=1,3
do j1=i1,3
t(i1,j1)=vkl(i1,j1)+vkl(j1,i1)
enddo
if(compressible.eq.1) t(i1,i1)=t(i1,i1)-x2d3*div
enddo
!
! calculation of the density in case of body forces and/or
! turbulence
!
if((nbody.ne.0).or.(iturbulent.ne.0)) then
if(compressible.eq.1) then
!
! gas
!
rho=0.d0
do i1=1,nope
rho=rho+shp(4,i1)*vconl(4,i1)
enddo
else
!
! liquid
!
call materialdata_rho(rhcon,nrhcon,imat,rho,
& temp,ntmat_,ithermal)
endif
endif
!
! calculation of the turbulent kinetic energy, turbulence
! frequency and their spatial derivatives for gases and liquids
!
if(iturbulent.gt.0) then
xkin=0.d0
xtuf=0.d0
do i1=1,nope
xkin=xkin+shp(4,i1)*voldl(5,i1)
xtuf=xtuf+shp(4,i1)*voldl(6,i1)
enddo
!
! adding the turbulent stress
!
! factor F2
!
c1=dsqrt(xkin)/(0.09d0*xtuf*y)
c2=500.d0*dvi/(y*y*xtuf*rho)
!
! kinematic turbulent viscosity
!
if(iturbulent.eq.4) then
!
! vorticity
!
vort=dsqrt((vkl(3,2)-vkl(2,3))**2+
& (vkl(1,3)-vkl(3,1))**2+
& (vkl(2,1)-vkl(1,2))**2)
arg2=max(2.d0*c1,c2)
f2=dtanh(arg2*arg2)
unt=a1*xkin/max(a1*xtuf,vort*f2)
else
unt=xkin/xtuf
endif
!
! calculating the production (anisotropic part of
! the turbulent stress is, apart from the dynamic
! viscosity, identical to the viscous stress)
!
tu=t(1,1)*vkl(1,1)+t(1,2)*t(1,2)+t(1,3)*t(1,3)+
& t(2,2)*vkl(2,2)+t(2,3)*t(2,3)+t(3,3)*vkl(3,3)
!
! correction for compressible fluids
!
if(compressible.eq.1) then
tu=tu-x2d3*xkin*div/unt
endif
!
! calculating the turbulent stress
!
c umttot=dvi
umttot=dvi+unt*rho
do i1=1,3
do j1=i1,3
t(i1,j1)=umttot*t(i1,j1)
enddo
t(i1,i1)=t(i1,i1)-x2d3*rho*xkin
enddo
else
!
do i1=1,3
do j1=i1,3
t(i1,j1)=dvi*t(i1,j1)
enddo
enddo
endif
!
! 1a. rhs of the first part of the momentum equation
!
do jj=1,nope
!
! convective + diffusive
!
ff(1,jj)=ff(1,jj)-cvel(1)*shps(jj)-xsjmod*
& (shp(1,jj)*t(1,1)+shp(2,jj)*t(1,2)+shp(3,jj)*t(1,3))
ff(2,jj)=ff(2,jj)-cvel(2)*shps(jj)-xsjmod*
& (shp(1,jj)*t(1,2)+shp(2,jj)*t(2,2)+shp(3,jj)*t(2,3))
ff(3,jj)=ff(3,jj)-cvel(3)*shps(jj)-xsjmod*
& (shp(1,jj)*t(1,3)+shp(2,jj)*t(2,3)+shp(3,jj)*t(3,3))
enddo
!
! computation of contribution due to body forces
!
if(nbody.ne.0) then
!
! initialisation for the body forces
!
om=omx*rho
omcor=2.d0*rho*dsqrt(omx)
!
if(om.gt.0.d0) then
do i1=1,3
!
! computation of the global coordinates of the gauss
! point
!
q(i1)=0.d0
do j1=1,nope
q(i1)=q(i1)+shp(4,j1)*co(i1,konl(j1))
enddo
!
q(i1)=q(i1)-p1(i1)
enddo
const=q(1)*p2(1)+q(2)*p2(2)+q(3)*p2(3)
!
! Coriolis forces
!
omcor=2.d0*rho*dsqrt(omx)
corio(1)=vel(2)*p2(3)-vel(3)*p2(2)
corio(2)=vel(3)*p2(1)-vel(1)*p2(3)
corio(3)=vel(1)*p2(2)-vel(2)*p2(1)
endif
!
if(ifreesurface.eq.0) then
do ii=1,3
bf(ii)=bodyfx(ii)*rho
enddo
!
! inclusion of the centrifugal force into the body force
!
if(om.gt.0.d0) then
do i1=1,3
bf(i1)=bf(i1)+(q(i1)-const*p2(i1))*om+
& corio(i1)*omcor
enddo
endif
else
!
! shallow water calculation
! effect of varying depth;
! the effect of the centrifugal force on dgravity is neglected
!
dep=0.d0
do j1=1,3
bf(j1)=0.d0
enddo
!
do i1=1,nope
dep=dep+shp(4,i1)*depth(konl(i1))
do j1=1,3
bf(j1)=bf(j1)+shp(j1,i1)*depth(konl(i1))
enddo
enddo
do j1=1,3
bf(j1)=bf(j1)*(rho-dep)*dgravity
enddo
!
if(om.gt.0.d0) then
do i1=1,2
bf(i1)=bf(i1)+(q(i1)-const*p2(i1))*om+
& corio(i1)*omcor
enddo
endif
!
! bottom friction
!
c fric=0.02d0
fric=0.01d0
dvel=dsqrt(vel(1)*vel(1)+vel(2)*vel(2)+vel(3)*vel(3))
do j1=1,3
bf(j1)=bf(j1)-fric*dvel*vel(j1)/8.d0
enddo
endif
!
! storing the body force
!
bfv=bf(1)*vel(1)+bf(2)*vel(2)+bf(3)*vel(3)
!
! 1b. rhs of the first part of the momentum equation:
! body force contribution
!
do jj=1,nope
ff(1,jj)=ff(1,jj)+bf(1)*shps(jj)
ff(2,jj)=ff(2,jj)+bf(2)*shps(jj)
ff(3,jj)=ff(3,jj)+bf(3)*shps(jj)
enddo
endif
!
! 2. rhs of the mass equation
!
do j1=1,3
aux(j1)=xsjmod*(rhovel(j1)-dtimef*theta1*dpress(j1))
enddo
!
do jj=1,nope
ff(4,jj)=ff(4,jj)+
& shp(1,jj)*aux(1)+shp(2,jj)*aux(2)+shp(3,jj)*aux(3)
enddo
!
! 3. rhs of the second part of the momentum equation:
!
if(compressible.eq.1) then
!
! explicit compressible
!
do jj=1,nope
bb(1,jj)=bb(1,jj)-dpress(1)*shps(jj)
bb(2,jj)=bb(2,jj)-dpress(2)*shps(jj)
bb(3,jj)=bb(3,jj)-dpress(3)*shps(jj)
enddo
else
!
! implicit incompressible
!
do jj=1,nope
bb(1,jj)=bb(1,jj)-xsjmod*shp(4,jj)*dpress(1)
bb(2,jj)=bb(2,jj)-xsjmod*shp(4,jj)*dpress(2)
bb(3,jj)=bb(3,jj)-xsjmod*shp(4,jj)*dpress(3)
enddo
endif
!
! 4. rhs of the energy equation:
!
if(ithermal(1).gt.0) then
!
! viscous conductivity
!
call materialdata_cond(imat,ntmat_,temp,cocon,ncocon,cond)
!
! adding the turbulent conductivity
!
if(iturbulent.gt.0) then
call materialdata_cp(imat,ntmat_,temp,shcon,nshcon,cp)
turbprandl=0.9d0
cond=cond+cp*rho*unt/turbprandl
endif
!
! calculating the total dissipative stress x velocity
! (viscous + turbulent)
!
tv(1)=t(1,1)*vel(1)+t(1,2)*vel(2)+t(1,3)*vel(3)
tv(2)=t(1,2)*vel(1)+t(2,2)*vel(2)+t(2,3)*vel(3)
tv(3)=t(1,3)*vel(1)+t(2,3)*vel(2)+t(3,3)*vel(3)
!
! determining stress x velocity + conductivity x
! temperature gradient
!
do i1=1,3
tv(i1)=tv(i1)+cond*dtem(i1)
enddo
!
! determination of the rhs of the energy equations
!
do jj=1,nope
ff(0,jj)=ff(0,jj)-shps(jj)*enthalpy-xsjmod*
& (shp(1,jj)*tv(1)+shp(2,jj)*tv(2)+shp(3,jj)*tv(3))
enddo
!
! computation of contribution due to body forces
!
if(nbody.ne.0) then
do jj=1,nope
ff(0,jj)=ff(0,jj)+shps(jj)*bfv
enddo
endif
!
! distributed heat flux
!
if(nload.gt.0) then
call nident2(nelemload,nelem,nload,id)
areaj=xsj
do
if((id.eq.0).or.(nelemload(1,id).ne.nelem)) exit
if(sideload(id)(1:2).ne.'BF') then
id=id-1
cycle
endif
if(sideload(id)(3:4).eq.'NU') then
do j=1,3
pgauss(j)=0.d0
do i1=1,nope
pgauss(j)=pgauss(j)+
& shp(4,i1)*co(j,konl(i1))
enddo
enddo
jltyp=1
iscale=1
call dflux(xload(1,id),temp,istep,iinc,tvar,
& nelem,kk,pgauss,jltyp,temp,press,sideload(id),
& areaj,vold,co,lakonl,konl,ipompc,nodempc,coefmpc,
& nmpc,ikmpc,ilmpc,iscale,mi)
endif
do jj=1,nope
ff(0,jj)=ff(0,jj)+shps(jj)*xload(1,id)
enddo
exit
enddo
endif
endif
!
! 5. rhs of the turbulence equations:
!
if(iturbulent.gt.0) then
!
! convective turbulent kinetic energy: ckin
! convective turbulence frequency: ctuf
!
ckin=0.d0
ctuf=0.d0
do i1=1,nope
ckin=ckin+shpv(i1)*vconl(5,i1)
ctuf=ctuf+shpv(i1)*vconl(6,i1)
enddo
c if(nelem.eq.1) then
c write(*,*) 'ckin ',ckin
c write(*,*) 'ctuf ',ctuf
c endif
!
! gradient of k and omega
!
do j1=1,3
dxkin(j1)=0.d0
dxtuf(j1)=0.d0
enddo
do i1=1,nope
do j1=1,3
dxkin(j1)=dxkin(j1)+shp(j1,i1)*voldl(5,i1)
dxtuf(j1)=dxtuf(j1)+shp(j1,i1)*voldl(6,i1)
enddo
enddo
c if(nelem.eq.1) then
c write(*,*) 'dxkin ',(dxkin(j1),j1=1,3)
c write(*,*) 'dxtuf ',(dxtuf(j1),j1=1,3)
c endif
!
! auxiliary variable
!
c4=2.d0*rho*stuf2*
& (dxkin(1)*dxtuf(1)+dxkin(2)*dxtuf(2)+dxkin(3)*dxtuf(3))/
& xtuf
!
! dynamic turbulent viscosity
!
umt=unt*rho
!
! factor F1
!
if(iturbulent.eq.1) then
!
! k-epsilon model
!
f1=0.d0
elseif(iturbulent.eq.2) then
!
! k-omega model
!
f1=1.d0
else
!
! BSL/SST model
!
cdktuf=max(c4,1.d-20)
arg1=min(max(c1,c2),4.d0*rho*stuf2*xkin/(cdktuf*y*y))
f1=dtanh(arg1**4.d0)
endif
f1m=1.d0-f1
!
! interpolation of the constants
!
skin=f1*skin1+f1m*skin2
stuf=f1*stuf1+f1m*stuf2
beta=f1*beta1+f1m*beta2
gamm=f1*gamm1+f1m*gamm2
!
! source terms: productivity - dissipation
!
umsk=dvi+skin*umt
umst=dvi+stuf*umt
!
! production limiter active: P=unt*tu<=20*betas*k*omega
! Menter, F.R., "Zonal Two Equation k-omega Turbulence Models for
! Aerodynamic Flows," AIAA Paper 93-2906, July 1993.
!
tuk=rho*(unt*tu-betas*xtuf*xkin)
tut=rho*(gamm*tu-beta*xtuf*xtuf)+f1m*c4
!
! add controlled decay
! Spalart, P.R. and Rumsey, C.L., "Effective Inflow Conditions for
! Turbulence Models in Aerodynamic Calculations," AIAA Journal,
! Vol. 45, No. 10, 2007,pp.2544-2553.
!
tuk=tuk+c3*dvi
tut=tut+beta*xtu*rho
!
do i1=1,3
dxkin(i1)=dxkin(i1)*umsk
dxtuf(i1)=dxtuf(i1)*umst
enddo
!
! determination of rhs
!
do jj=1,nope
!
ff(5,jj)=ff(5,jj)-shps(jj)*(ckin-tuk)-xsjmod*
& (shp(1,jj)*dxkin(1)+shp(2,jj)*dxkin(2)
& +shp(3,jj)*dxkin(3))
ff(6,jj)=ff(6,jj)-shps(jj)*(ctuf-tut)-xsjmod*
& (shp(1,jj)*dxtuf(1)+shp(2,jj)*dxtuf(2)
& +shp(3,jj)*dxtuf(3))
enddo
endif
!
enddo
!
! area integrals
!
if(nface.ne.0) then
index=ipvarf(nelem)
c write(*,*) 'e_c3d_v1rhs nelem ipvarf(nelem)',nelem,index
!
! external boundaries
!
nopes=0
idf=ipface(nelem)
do
if((idf.eq.0).or.(nelemface(idf).ne.nelem)) exit
ig=ichar(sideface(idf)(1:1))-48
!
! check for distributed flux
! an adiabatic face must be declared as a face with
! distributed flux zero!
!
iflux=0
call nident2(nelemload,nelem,nload,id)
do
if((id.eq.0).or.(nelemload(1,id).ne.nelem)) exit
if((sideload(id)(1:1).ne.'F').and.
& (sideload(id)(1:1).ne.'R').and.
& (sideload(id)(1:1).ne.'S')) then
id=id-1
cycle
endif
igl=ichar(sideload(id)(2:2))-48
if(igl.ne.ig) then
id=id-1
cycle
endif
iflux=1
exit
enddo
!
if(nopes.eq.0) then
if(lakonl(4:4).eq.'4') then
nopes=3
mint2d=1
elseif(lakonl(4:4).eq.'6') then
mint2d=1
elseif(lakonl(4:5).eq.'8R') then
nopes=4
mint2d=1
elseif(lakonl(4:4).eq.'8') then
nopes=4
mint2d=4
endif
endif
!
if(lakonl(4:4).eq.'6') then
if(ig.le.2) then
nopes=3
else
nopes=4
endif
endif
!
c write(*,*) 'e_c3d_v1rhs ',index,4*nope+nopes+4
do i=1,mint2d
!
! facial shape functions
! local surface normal
!
do i1=1,nopes
index=index+1
shp2(4,i1)=varf(index)
enddo
do i1=1,3
index=index+1
xsj2(i1)=varf(index)
enddo
!
! derivative of the volumetric shape functions
! needed for the temperature, velocity gradients and
! gradients of k and omega (turbulence)
!
do i1=1,nope
do j1=1,4
index=index+1
shp(j1,i1)=varf(index)
enddo
enddo
index=index+1
y=varf(index)
!
! calculating of
! the temperature temp
! the velocity vel(*)
! rho times the velocity rhovel(*)
! the velocity gradient vkl
!
temp=0.d0
do j1=1,3
vel(j1)=0.d0
rhovel(j1)=0.d0
do k1=1,3
vkl(j1,k1)=0.d0
enddo
enddo
!
do i1=1,nope
temp=temp+shp(4,i1)*voldl(0,i1)
do j1=1,3
vel(j1)=vel(j1)+shp(4,i1)*voldl(j1,i1)
c write(*,*) 'e_c3d_v1rhs ',shp(4,i1)
rhovel(j1)=rhovel(j1)+shp(4,i1)*vconl(j1,i1)
do k1=1,3
vkl(j1,k1)=vkl(j1,k1)+shp(k1,i1)*voldl(j1,i1)
enddo
enddo
enddo
!
if(iflux.eq.0) then
!
! calculating of the temperature gradient dtem
! in the integration point
!
do j1=1,3
dtem(j1)=0.d0
enddo
do i1=1,nope
do j1=1,3
dtem(j1)=dtem(j1)+shp(j1,i1)*voldl(0,i1)
enddo
enddo
endif
!
if(compressible.eq.1) then
div=vkl(1,1)+vkl(2,2)+vkl(3,3)
else
div=0.d0
endif
!
! material data (dynamic viscosity)
!
call materialdata_dvifem(imat,ntmat_,temp,shcon,nshcon,
& dvi)
!
! determining the dissipative stress
!
do i1=1,3
do j1=i1,3
t(i1,j1)=vkl(i1,j1)+vkl(j1,i1)
enddo
if(compressible.eq.1) t(i1,i1)=t(i1,i1)-x2d3*div
enddo
!
! calculation of the density for gases
!
! calculation of the turbulent kinetic energy, turbulence
! frequency and their spatial derivatives for gases and liquids
!
if(iturbulent.gt.0) then
if(compressible.eq.1) then
!
! gas
!
rho=0.d0
do i1=1,nope
rho=rho+shp(4,i1)*vconl(4,i1)
enddo
else
!
! liquid
!
call materialdata_rho(rhcon,nrhcon,imat,rho,
& temp,ntmat_,ithermal)
!
! calculation of k, omega an y
!
endif
xkin=0.d0
xtuf=0.d0
do i1=1,nope
xkin=xkin+shp(4,i1)*voldl(5,i1)
xtuf=xtuf+shp(4,i1)*voldl(6,i1)
enddo
!
! calculation of turbulent auxiliary variables
!
! factor F2
!
if(y.gt.0.d0) then
c1=dsqrt(xkin)/(0.09d0*xtuf*y)
c2=500.d0*dvi/(y*y*xtuf*rho)
endif
!
! kinematic and dynamic turbulent viscosity
!
if(iturbulent.eq.4) then
!
! vorticity
!
vort=dsqrt((vkl(3,2)-vkl(2,3))**2+
& (vkl(1,3)-vkl(3,1))**2+
& (vkl(2,1)-vkl(1,2))**2)
if(y.gt.0.d0) then
arg2=max(2.d0*c1,c2)
f2=dtanh(arg2*arg2)
else
f2=1.d0
endif
unt=a1*xkin/max(a1*xtuf,vort*f2)
else
unt=xkin/xtuf
endif
!
umttot=dvi+unt*rho
do i1=1,3
do j1=i1,3
t(i1,j1)=umttot*t(i1,j1)
enddo
t(i1,i1)=t(i1,i1)-x2d3*rho*xkin
enddo
else
!
do i1=1,3
do j1=i1,3
t(i1,j1)=dvi*t(i1,j1)
enddo
enddo
endif
!
! stress vector
!
tt(1)=(t(1,1)*xsj2(1)+t(1,2)*xsj2(2)+t(1,3)*xsj2(3))*
& dtimef
tt(2)=(t(1,2)*xsj2(1)+t(2,2)*xsj2(2)+t(2,3)*xsj2(3))*
& dtimef
tt(3)=(t(1,3)*xsj2(1)+t(2,3)*xsj2(2)+t(3,3)*xsj2(3))*
& dtimef
!
! stress x velocity
!
tv(1)=t(1,1)*vel(1)+t(1,2)*vel(2)+t(1,3)*vel(3)
tv(2)=t(1,2)*vel(1)+t(2,2)*vel(2)+t(2,3)*vel(3)
tv(3)=t(1,3)*vel(1)+t(2,3)*vel(2)+t(3,3)*vel(3)
!
! adding conductivity in case the flux is not given by a
! *DFLUX, *FILM or *RADIATE card
!
if(iflux.eq.0) then
do i1=1,3
tv(i1)=tv(i1)+cond*dtem(i1)
enddo
endif
!
tvn=tv(1)*xsj2(1)+tv(2)*xsj2(2)+tv(3)*xsj2(3)
!
! modifying tvn in case of a *DFLUX, *FILM or *RADIATE
! card
!
if(iflux.eq.1) then
dxsj2=dsqrt(xsj2(1)*xsj2(1)+xsj2(2)*xsj2(2)+
& xsj2(3)*xsj2(3))
areaj=dxsj2
sinktemp=xload(2,id)
!
! for nonuniform load: determine the coordinates of the
! point (transferred into the user subroutine)
!
if((sideload(id)(3:4).eq.'NU').or.
& (sideload(id)(5:6).eq.'NU')) then
if(nope.eq.8) then
do k=1,3
coords(k)=0.d0
do j=1,nopes
coords(k)=coords(k)+
& co(k,konl(ifaceq(j,ig)))*shp2(4,j)
enddo
enddo
elseif(nope.eq.4) then
do k=1,3
coords(k)=0.d0
do j=1,nopes
coords(k)=coords(k)+
& co(k,konl(ifacet(j,ig)))*shp2(4,j)
enddo
enddo
else
do k=1,3
coords(k)=0.d0
do j=1,nopes
coords(k)=coords(k)+
& co(k,konl(ifacew(j,ig)))*shp2(4,j)
enddo
enddo
endif
jltyp=ichar(sideload(id)(2:2))-48
jltyp=jltyp+10
if(sideload(id)(1:1).eq.'S') then
iscale=1
call dflux(xload(1,id),temp,istep,iinc,tvar,
& nelem,i,coords,jltyp,temp,press,
& sideload(id),areaj,vold,co,lakonl,konl,
& ipompc,nodempc,coefmpc,nmpc,ikmpc,ilmpc,
& iscale,mi)
if((nmethod.eq.1).and.(iscale.ne.0))
& xload(1,id)=xloadold(1,id)+
& (xload(1,id)-xloadold(1,id))*reltimef
elseif(sideload(id)(1:1).eq.'F') then
call film(xload(1,id),sinktemp,temp,istep,
& iinc,tvar,nelem,i,coords,jltyp,field,
& nfield,sideload(id),node,areaj,vold,mi)
if(nmethod.eq.1) xload(1,id)=xloadold(1,id)+
& (xload(1,id)-xloadold(1,id))*reltimef
elseif(sideload(id)(1:1).eq.'R') then
call radiate(xload(1,id),xload(2,id),temp,istep,
& iinc,tvar,nelem,i,coords,jltyp,field,
& nfield,sideload(id),node,areaj,vold,mi,
& iemchange)
if(nmethod.eq.1) xload(1,id)=xloadold(1,id)+
& (xload(1,id)-xloadold(1,id))*reltimef
endif
endif
!
if(sideload(id)(1:1).eq.'S') then
!
! flux INTO the face is positive (input deck convention)
! this is different from the convention in the theory
!
tvn=tvn+xload(1,id)*dxsj2
elseif(sideload(id)(1:1).eq.'F') then
tvn=tvn-xload(1,id)*(temp-sinktemp)*dxsj2
elseif(sideload(id)(1:1).eq.'R') then
tvn=tvn-physcon(2)*
& xload(1,id)*((temp-physcon(1))**4-
& (xload(2,id)-physcon(1))**4)*dxsj2
endif
endif
!
xsjmod=tvn*dtimef
!
if(iturbulent.gt.0) then
!
! calculation of the spatial derivatives of the turbulent kinetic energy
! and the turbulence frequency for gases and liquids
!
do j1=1,3
dxkin(j1)=0.d0
dxtuf(j1)=0.d0
enddo
do i1=1,nope
do j1=1,3
dxkin(j1)=dxkin(j1)+shp(j1,i1)*voldl(5,i1)
dxtuf(j1)=dxtuf(j1)+shp(j1,i1)*voldl(6,i1)
enddo
enddo
!
! auxiliary variable
!
c4=2.d0*rho*stuf2*
& (dxkin(1)*dxtuf(1)+dxkin(2)*dxtuf(2)+
& dxkin(3)*dxtuf(3))/xtuf
!
! dynamic turbulent viscosity
!
umt=unt*rho
!
! factor F1
!
if(iturbulent.eq.1) then
!
! k-epsilon model
!
f1=0.d0
elseif(iturbulent.eq.2) then
!
! k-omega model
!
f1=1.d0
else
!
! BSL/SST model
!
if(y.gt.0.d0) then
!
! finite distance from wall
!
cdktuf=max(c4,1.d-20)
arg1=
& min(max(c1,c2),4.d0*rho*stuf2*xkin/(cdktuf*y*y))
f1=dtanh(arg1**4.d0)
else
!
! wall
!
f1=1.d0
endif
endif
f1m=1.d0-f1
!
! interpolation of the constants
!
skin=f1*skin1+f1m*skin2
stuf=f1*stuf1+f1m*stuf2
!
! auxiliary quantities
!
umsk=dvi+skin*umt
umst=dvi+stuf*umt
!
! determining the stress and and stress x velocity + conductivity x
! temperature gradient
!
do i1=1,3
tvk(i1)=umsk*dxkin(i1)
tvt(i1)=umsk*dxtuf(i1)
enddo
!
tvnk=tvk(1)*xsj2(1)+tvk(2)*xsj2(2)+tvk(3)*xsj2(3)
tvnt=tvt(1)*xsj2(1)+tvt(2)*xsj2(2)+tvt(3)*xsj2(3)
!
xsjmodk=tvnk*dtimef
xsjmodt=tvnt*dtimef
endif
!
do k=1,nopes
if(nope.eq.8) then
ipointer=ifaceq(k,ig)
elseif(nope.eq.4) then
ipointer=ifacet(k,ig)
else
ipointer=ifacew(k,ig)
endif
!
! 1a. rhs of the first part of the momentum equation
!
ff(1,ipointer)=ff(1,ipointer)+shp2(4,k)*tt(1)
ff(2,ipointer)=ff(2,ipointer)+shp2(4,k)*tt(2)
ff(3,ipointer)=ff(3,ipointer)+shp2(4,k)*tt(3)
!
! 2. rhs of the mass equation
!
ff(4,ipointer)=ff(4,ipointer)-shp2(4,k)*
& (rhovel(1)*xsj2(1)+rhovel(2)*xsj2(2)+
& rhovel(3)*xsj2(3))*dtimef
!
! 4. rhs of the energy equation:
!
if(ithermal(1).gt.0) then
ff(0,ipointer)=ff(0,ipointer)+shp2(4,k)*xsjmod
endif
!
! 5. rhs of the turbulence equations:
!
if(iturbulent.gt.0) then
ff(5,ipointer)=ff(5,ipointer)+shp2(4,k)*xsjmodk
ff(6,ipointer)=ff(6,ipointer)+shp2(4,k)*xsjmodt
endif
enddo
enddo
!
idf=idf-1
enddo
endif
!
c if(nelem.eq.1) then
c write(*,*) 'e_c3d_v1rhs'
c do k=1,8
c write(*,*) nelem,k,(ff(j,k),j=5,6)
c enddo
c endif
return
end
|
{"hexsha": "9621e925c4ed8f218699c5656a9846aadfbcadc0", "size": 33480, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ccx_prool/CalculiX/ccx_2.19/src/e_c3d_v1rhs.f", "max_stars_repo_name": "alleindrach/calculix-desktop", "max_stars_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ccx_prool/CalculiX/ccx_2.19/src/e_c3d_v1rhs.f", "max_issues_repo_name": "alleindrach/calculix-desktop", "max_issues_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ccx_prool/CalculiX/ccx_2.19/src/e_c3d_v1rhs.f", "max_forks_repo_name": "alleindrach/calculix-desktop", "max_forks_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0372940156, "max_line_length": 78, "alphanum_fraction": 0.4644265233, "num_tokens": 11764}
|
SUBROUTINE clawpack5_setaux_manifold(mbc,mx,my, &
xlower,ylower,dx,dy,maux,aux, &
xnormals,ynormals,edgelengths,area)
IMPLICIT NONE
INTEGER mx,my,mbc,maux
DOUBLE PRECISION xlower,ylower,dx,dy
DOUBLE PRECISION aux(maux,1-mbc:mx+mbc,1-mbc:my+mbc)
DOUBLE PRECISION area(-mbc:mx+mbc+1,-mbc:my+mbc+1)
DOUBLE PRECISION xnormals(-mbc:mx+mbc+2,-mbc:my+mbc+2,3)
DOUBLE PRECISION ynormals(-mbc:mx+mbc+2,-mbc:my+mbc+2,3)
DOUBLE PRECISION edgelengths(-mbc:mx+mbc+2,-mbc:my+mbc+2,2)
DOUBLE PRECISION rho,bulk,cc,zz
COMMON /cparam/ rho,bulk,cc,zz
INTEGER i,j
DO j = 1-mbc,my+mbc
DO i = 1-mbc,mx+mbc
aux(1,i,j) = xnormals(i,j,1)
aux(2,i,j) = xnormals(i,j,2)
aux(3,i,j) = edgelengths(i,j,1)/dy
aux(4,i,j) = ynormals(i,j,1)
aux(5,i,j) = ynormals(i,j,2)
aux(6,i,j) = edgelengths(i,j,2)/dx
aux(7,i,j) = area(i,j)/(dx*dy)
aux(8,i,j) = cc
aux(9,i,j) = zz
ENDDO
ENDDO
RETURN
END SUBROUTINE clawpack5_setaux_manifold
|
{"hexsha": "5a221a084c5ffd88bef7d5f53847692d07a467d1", "size": 1043, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "applications/clawpack/acoustics/2d/radial/user_5.0/setaux.f90", "max_stars_repo_name": "ECLAIRWaveS/ForestClaw", "max_stars_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2017-09-26T13:39:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T08:56:23.000Z", "max_issues_repo_path": "applications/clawpack/acoustics/2d/radial/user_5.0/setaux.f90", "max_issues_repo_name": "ECLAIRWaveS/ForestClaw", "max_issues_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 75, "max_issues_repo_issues_event_min_datetime": "2017-08-02T19:56:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T12:36:32.000Z", "max_forks_repo_path": "applications/clawpack/acoustics/2d/radial/user_5.0/setaux.f90", "max_forks_repo_name": "ECLAIRWaveS/ForestClaw", "max_forks_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2018-02-21T00:10:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T19:08:36.000Z", "avg_line_length": 28.9722222222, "max_line_length": 62, "alphanum_fraction": 0.6164908917, "num_tokens": 454}
|
import unittest
import numpy as np
from tests import genImage
class TestHyImage(unittest.TestCase):
def test_image(self):
# create test image
image = genImage(dimx = 1464, dimy=401, nbands=10)
self.assertEqual(image.xdim(), 1464)
self.assertEqual(image.ydim(), 401)
self.assertEqual(image.band_count(), 10)
self.assertEqual(image.aspx(), 401 / 1464)
# todo - add test code for georeferencing code
# get_extent, set_projection, set_projection_EPSG, get_projection_EPSG, pix_to_world, world_to_pix
image.flip(axis='y')
image.data[10,10,:] = np.nan
image.fill_holes()
self.assertEqual( np.isfinite( image.data ).all(), True )
image.blur()
# resize
nx,ny = int(1464/2), int(401/2)
image.resize(newdims=(nx, ny))
self.assertEqual(image.xdim(), nx)
self.assertEqual(image.ydim(), ny)
self.assertEqual(image.band_count(), 10)
# extract features
k, d = image.get_keypoints( band=0 )
src, dst = image.match_keypoints(k,k,d,d)
self.assertGreater(len(src), 0 ) # make sure there are some matches...
# masking
image.mask( np.sum(image.data,axis=2) > 0.75 )
self.assertEqual(np.isfinite(image.data).all(), False)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "60db65fed59c77b8e73caacad69d240ae7ec016a", "size": 1369, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_hyimage.py", "max_stars_repo_name": "npucino/hylite", "max_stars_repo_head_hexsha": "dff1314a2a0c281fd2fc1a5ee03bdba3e0c49f28", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2020-11-19T12:53:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T21:02:02.000Z", "max_issues_repo_path": "tests/test_hyimage.py", "max_issues_repo_name": "npucino/hylite", "max_issues_repo_head_hexsha": "dff1314a2a0c281fd2fc1a5ee03bdba3e0c49f28", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-22T07:02:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T10:55:32.000Z", "max_forks_repo_path": "tests/test_hyimage.py", "max_forks_repo_name": "npucino/hylite", "max_forks_repo_head_hexsha": "dff1314a2a0c281fd2fc1a5ee03bdba3e0c49f28", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-01-12T09:46:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T12:36:33.000Z", "avg_line_length": 31.8372093023, "max_line_length": 106, "alphanum_fraction": 0.6216216216, "include": true, "reason": "import numpy", "num_tokens": 342}
|
// Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma
// de Barcelona (UAB).
//
// This work is licensed under the terms of the MIT license.
// For a copy, see <https://opensource.org/licenses/MIT>.
#include "carla/streaming/detail/tcp/Client.h"
#include "carla/Debug.h"
#include "carla/Logging.h"
#include "carla/Time.h"
#include <boost/asio/connect.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/write.hpp>
namespace carla {
namespace streaming {
namespace detail {
namespace tcp {
// ===========================================================================
// -- Decoder ----------------------------------------------------------------
// ===========================================================================
class Decoder {
public:
boost::asio::mutable_buffer header() {
return boost::asio::buffer(&_size, sizeof(_size));
}
boost::asio::mutable_buffer body() {
DEBUG_ASSERT(_size > 0u);
DEBUG_ASSERT(_message == nullptr);
_message = std::make_shared<Message>(_size);
return _message->buffer();
}
auto size() const {
return _size;
}
auto pop() {
return std::move(_message);
}
private:
message_size_type _size = 0u;
std::shared_ptr<Message> _message;
};
// ===========================================================================
// -- Client -----------------------------------------------------------------
// ===========================================================================
Client::Client(
boost::asio::io_service &io_service,
endpoint ep,
stream_id_type stream_id,
callback_function_type callback)
: _endpoint(std::move(ep)),
_stream_id(stream_id),
_callback(std::move(callback)),
_socket(io_service),
_strand(io_service),
_connection_timer(io_service) {
Connect();
}
Client::~Client() {
Stop();
}
void Client::Stop() {
_connection_timer.cancel();
_strand.post([this]() {
_done = true;
if (_socket.is_open()) {
_socket.close();
}
});
}
void Client::Connect() {
_strand.post([this]() {
if (_done) {
return;
}
using boost::system::error_code;
if (_socket.is_open()) {
_socket.close();
}
auto handle_connect = [=](error_code ec) {
if (!ec) {
// Send the stream id to subscribe to the stream.
log_debug("streaming client: sending stream id", _stream_id);
boost::asio::async_write(
_socket,
boost::asio::buffer(&_stream_id, sizeof(_stream_id)),
_strand.wrap([=](error_code ec, size_t DEBUG_ONLY(bytes)) {
if (!ec) {
DEBUG_ASSERT_EQ(bytes, sizeof(_stream_id));
// If succeeded start reading data.
ReadData();
} else {
// Else try again.
log_debug("streaming client: failed to send stream id:", ec.message());
Connect();
}
}));
} else {
log_debug("streaming client: connection failed:", ec.message());
Reconnect();
}
};
log_debug("streaming client: connecting to", _endpoint);
_socket.async_connect(_endpoint, _strand.wrap(handle_connect));
});
}
void Client::Reconnect() {
_connection_timer.expires_from_now(time_duration::seconds(1u));
_connection_timer.async_wait([this](boost::system::error_code ec) {
if (!ec) {
Connect();
}
});
}
void Client::ReadData() {
_strand.post([this]() {
if (_done) {
return;
}
log_debug("streaming client: Client::ReadData");
auto encoder = std::make_shared<Decoder>();
auto handle_read_data = [=](boost::system::error_code ec, size_t DEBUG_ONLY(bytes)) {
DEBUG_ONLY(log_debug("streaming client: Client::ReadData.handle_read_data", bytes, "bytes"));
if (!ec) {
DEBUG_ASSERT_EQ(bytes, encoder->size());
DEBUG_ASSERT_NE(bytes, 0u);
// Move the buffer to the callback function and start reading
// the next
// piece of data.
log_debug("streaming client: success reading data, calling the callback");
_socket.get_io_service().post([this, encoder]() { _callback(encoder->pop()); });
ReadData();
} else {
// As usual, if anything fails start over from the very top.
log_debug("streaming client: failed to read data:", ec.message());
Connect();
}
};
auto handle_read_header = [=](boost::system::error_code ec, size_t DEBUG_ONLY(bytes)) {
DEBUG_ONLY(log_debug("streaming client: Client::ReadData.handle_read_header", bytes, "bytes"));
if (!ec && (encoder->size() > 0u)) {
DEBUG_ASSERT_EQ(bytes, sizeof(message_size_type));
// Now that we know the size of the coming buffer, we can
// allocate
// our buffer and start putting data into it.
boost::asio::async_read(
_socket,
encoder->body(),
_strand.wrap(handle_read_data));
} else {
log_debug("streaming client: failed to read header:", ec.message());
DEBUG_ONLY(log_debug("size = ", encoder->size()));
DEBUG_ONLY(log_debug("bytes = ", bytes));
Connect();
}
};
// Read the size of the buffer that is coming.
boost::asio::async_read(
_socket,
encoder->header(),
_strand.wrap(handle_read_header));
});
}
} // namespace tcp
} // namespace detail
} // namespace streaming
} // namespace carla
|
{"hexsha": "454acfecde6d484b3b71fbdd15288f19e91b19e9", "size": 5736, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "LibCarla/source/carla/streaming/detail/tcp/Client.cpp", "max_stars_repo_name": "edufford/carla", "max_stars_repo_head_hexsha": "427a77e895b6da40581ea73a6cec25420eb6b498", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-04-02T13:55:54.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-03T06:53:37.000Z", "max_issues_repo_path": "LibCarla/source/carla/streaming/detail/tcp/Client.cpp", "max_issues_repo_name": "edufford/carla", "max_issues_repo_head_hexsha": "427a77e895b6da40581ea73a6cec25420eb6b498", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5.0, "max_issues_repo_issues_event_min_datetime": "2018-05-14T20:31:57.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-01T15:40:37.000Z", "max_forks_repo_path": "LibCarla/source/carla/streaming/detail/tcp/Client.cpp", "max_forks_repo_name": "edufford/carla", "max_forks_repo_head_hexsha": "427a77e895b6da40581ea73a6cec25420eb6b498", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2018-06-13T23:18:09.000Z", "max_forks_repo_forks_event_max_datetime": "2018-12-08T14:31:32.000Z", "avg_line_length": 29.2653061224, "max_line_length": 103, "alphanum_fraction": 0.5373082287, "num_tokens": 1227}
|
\subsection{Torsion tensor}
|
{"hexsha": "bc3927d87407ea4583b44b09ef0eec450073edde", "size": 31, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/pug/theory/geometry/manifoldsRiemann/02-02-torsion.tex", "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pug/theory/geometry/manifoldsRiemann/02-02-torsion.tex", "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_forks_repo_path": "src/pug/theory/geometry/manifoldsRiemann/02-02-torsion.tex", "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 6.2, "max_line_length": 27, "alphanum_fraction": 0.7419354839, "num_tokens": 8}
|
\documentclass[12pt,a4paper]{article}
\usepackage[a4paper,text={16.5cm,25.2cm},centering]{geometry}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{bm}
\usepackage{graphicx}
\usepackage{microtype}
\usepackage{hyperref}
\setlength{\parindent}{0pt}
\setlength{\parskip}{1.2ex}
\hypersetup
{ pdfauthor = { Marco Fasondini },
pdftitle={ foo },
colorlinks=TRUE,
linkcolor=black,
citecolor=blue,
urlcolor=blue
}
\usepackage{upquote}
\usepackage{listings}
\usepackage{xcolor}
\lstset{
basicstyle=\ttfamily\footnotesize,
upquote=true,
breaklines=true,
breakindent=0pt,
keepspaces=true,
showspaces=false,
columns=fullflexible,
showtabs=false,
showstringspaces=false,
escapeinside={(*@}{@*)},
extendedchars=true,
}
\newcommand{\HLJLt}[1]{#1}
\newcommand{\HLJLw}[1]{#1}
\newcommand{\HLJLe}[1]{#1}
\newcommand{\HLJLeB}[1]{#1}
\newcommand{\HLJLo}[1]{#1}
\newcommand{\HLJLk}[1]{\textcolor[RGB]{148,91,176}{\textbf{#1}}}
\newcommand{\HLJLkc}[1]{\textcolor[RGB]{59,151,46}{\textit{#1}}}
\newcommand{\HLJLkd}[1]{\textcolor[RGB]{214,102,97}{\textit{#1}}}
\newcommand{\HLJLkn}[1]{\textcolor[RGB]{148,91,176}{\textbf{#1}}}
\newcommand{\HLJLkp}[1]{\textcolor[RGB]{148,91,176}{\textbf{#1}}}
\newcommand{\HLJLkr}[1]{\textcolor[RGB]{148,91,176}{\textbf{#1}}}
\newcommand{\HLJLkt}[1]{\textcolor[RGB]{148,91,176}{\textbf{#1}}}
\newcommand{\HLJLn}[1]{#1}
\newcommand{\HLJLna}[1]{#1}
\newcommand{\HLJLnb}[1]{#1}
\newcommand{\HLJLnbp}[1]{#1}
\newcommand{\HLJLnc}[1]{#1}
\newcommand{\HLJLncB}[1]{#1}
\newcommand{\HLJLnd}[1]{\textcolor[RGB]{214,102,97}{#1}}
\newcommand{\HLJLne}[1]{#1}
\newcommand{\HLJLneB}[1]{#1}
\newcommand{\HLJLnf}[1]{\textcolor[RGB]{66,102,213}{#1}}
\newcommand{\HLJLnfm}[1]{\textcolor[RGB]{66,102,213}{#1}}
\newcommand{\HLJLnp}[1]{#1}
\newcommand{\HLJLnl}[1]{#1}
\newcommand{\HLJLnn}[1]{#1}
\newcommand{\HLJLno}[1]{#1}
\newcommand{\HLJLnt}[1]{#1}
\newcommand{\HLJLnv}[1]{#1}
\newcommand{\HLJLnvc}[1]{#1}
\newcommand{\HLJLnvg}[1]{#1}
\newcommand{\HLJLnvi}[1]{#1}
\newcommand{\HLJLnvm}[1]{#1}
\newcommand{\HLJLl}[1]{#1}
\newcommand{\HLJLld}[1]{\textcolor[RGB]{148,91,176}{\textit{#1}}}
\newcommand{\HLJLs}[1]{\textcolor[RGB]{201,61,57}{#1}}
\newcommand{\HLJLsa}[1]{\textcolor[RGB]{201,61,57}{#1}}
\newcommand{\HLJLsb}[1]{\textcolor[RGB]{201,61,57}{#1}}
\newcommand{\HLJLsc}[1]{\textcolor[RGB]{201,61,57}{#1}}
\newcommand{\HLJLsd}[1]{\textcolor[RGB]{201,61,57}{#1}}
\newcommand{\HLJLsdB}[1]{\textcolor[RGB]{201,61,57}{#1}}
\newcommand{\HLJLsdC}[1]{\textcolor[RGB]{201,61,57}{#1}}
\newcommand{\HLJLse}[1]{\textcolor[RGB]{59,151,46}{#1}}
\newcommand{\HLJLsh}[1]{\textcolor[RGB]{201,61,57}{#1}}
\newcommand{\HLJLsi}[1]{#1}
\newcommand{\HLJLso}[1]{\textcolor[RGB]{201,61,57}{#1}}
\newcommand{\HLJLsr}[1]{\textcolor[RGB]{201,61,57}{#1}}
\newcommand{\HLJLss}[1]{\textcolor[RGB]{201,61,57}{#1}}
\newcommand{\HLJLssB}[1]{\textcolor[RGB]{201,61,57}{#1}}
\newcommand{\HLJLnB}[1]{\textcolor[RGB]{59,151,46}{#1}}
\newcommand{\HLJLnbB}[1]{\textcolor[RGB]{59,151,46}{#1}}
\newcommand{\HLJLnfB}[1]{\textcolor[RGB]{59,151,46}{#1}}
\newcommand{\HLJLnh}[1]{\textcolor[RGB]{59,151,46}{#1}}
\newcommand{\HLJLni}[1]{\textcolor[RGB]{59,151,46}{#1}}
\newcommand{\HLJLnil}[1]{\textcolor[RGB]{59,151,46}{#1}}
\newcommand{\HLJLnoB}[1]{\textcolor[RGB]{59,151,46}{#1}}
\newcommand{\HLJLoB}[1]{\textcolor[RGB]{102,102,102}{\textbf{#1}}}
\newcommand{\HLJLow}[1]{\textcolor[RGB]{102,102,102}{\textbf{#1}}}
\newcommand{\HLJLp}[1]{#1}
\newcommand{\HLJLc}[1]{\textcolor[RGB]{153,153,119}{\textit{#1}}}
\newcommand{\HLJLch}[1]{\textcolor[RGB]{153,153,119}{\textit{#1}}}
\newcommand{\HLJLcm}[1]{\textcolor[RGB]{153,153,119}{\textit{#1}}}
\newcommand{\HLJLcp}[1]{\textcolor[RGB]{153,153,119}{\textit{#1}}}
\newcommand{\HLJLcpB}[1]{\textcolor[RGB]{153,153,119}{\textit{#1}}}
\newcommand{\HLJLcs}[1]{\textcolor[RGB]{153,153,119}{\textit{#1}}}
\newcommand{\HLJLcsB}[1]{\textcolor[RGB]{153,153,119}{\textit{#1}}}
\newcommand{\HLJLg}[1]{#1}
\newcommand{\HLJLgd}[1]{#1}
\newcommand{\HLJLge}[1]{#1}
\newcommand{\HLJLgeB}[1]{#1}
\newcommand{\HLJLgh}[1]{#1}
\newcommand{\HLJLgi}[1]{#1}
\newcommand{\HLJLgo}[1]{#1}
\newcommand{\HLJLgp}[1]{#1}
\newcommand{\HLJLgs}[1]{#1}
\newcommand{\HLJLgsB}[1]{#1}
\newcommand{\HLJLgt}[1]{#1}
\def\qqand{\qquad\hbox{and}\qquad}
\def\qqfor{\qquad\hbox{for}\qquad}
\def\qqas{\qquad\hbox{as}\qquad}
\def\half{ {1 \over 2} }
\def\D{ {\rm d} }
\def\I{ {\rm i} }
\def\E{ {\rm e} }
\def\C{ {\mathbb C} }
\def\R{ {\mathbb R} }
\def\H{ {\mathbb H} }
\def\Z{ {\mathbb Z} }
\def\CC{ {\cal C} }
\def\FF{ {\cal F} }
\def\HH{ {\cal H} }
\def\LL{ {\cal L} }
\def\vc#1{ {\mathbf #1} }
\def\bbC{ {\mathbb C} }
\def\fR{ f_{\rm R} }
\def\fL{ f_{\rm L} }
\def\qqqquad{\qquad\qquad}
\def\qqwhere{\qquad\hbox{where}\qquad}
\def\Res_#1{\underset{#1}{\rm Res}\,}
\def\sech{ {\rm sech}\, }
\def\acos{ {\rm acos}\, }
\def\asin{ {\rm asin}\, }
\def\atan{ {\rm atan}\, }
\def\Ei{ {\rm Ei}\, }
\def\upepsilon{\varepsilon}
\def\Xint#1{ \mathchoice
{\XXint\displaystyle\textstyle{#1} }%
{\XXint\textstyle\scriptstyle{#1} }%
{\XXint\scriptstyle\scriptscriptstyle{#1} }%
{\XXint\scriptscriptstyle\scriptscriptstyle{#1} }%
\!\int}
\def\XXint#1#2#3{ {\setbox0=\hbox{$#1{#2#3}{\int}$}
\vcenter{\hbox{$#2#3$}}\kern-.5\wd0} }
\def\ddashint{\Xint=}
\def\dashint{\Xint-}
% \def\dashint
\def\infdashint{\dashint_{-\infty}^\infty}
\def\addtab#1={#1\;&=}
\def\ccr{\\\addtab}
\def\ip<#1>{\left\langle{#1}\right\rangle}
\def\dx{\D x}
\def\dt{\D t}
\def\dz{\D z}
\def\ds{\D s}
\def\rR{ {\rm R} }
\def\rL{ {\rm L} }
\def\norm#1{\left\| #1 \right\|}
\def\pr(#1){\left({#1}\right)}
\def\br[#1]{\left[{#1}\right]}
\def\abs#1{\left|{#1}\right|}
\def\fpr(#1){\!\pr({#1})}
\def\sopmatrix#1{ \begin{pmatrix}#1\end{pmatrix} }
\def\endash{–}
\def\emdash{—}
\def\mdblksquare{\blacksquare}
\def\lgblksquare{\blacksquare}
\def\scre{\E}
\def\mapengine#1,#2.{\mapfunction{#1}\ifx\void#2\else\mapengine #2.\fi }
\def\map[#1]{\mapengine #1,\void.}
\def\mapenginesep_#1#2,#3.{\mapfunction{#2}\ifx\void#3\else#1\mapengine #3.\fi }
\def\mapsep_#1[#2]{\mapenginesep_{#1}#2,\void.}
\def\vcbr[#1]{\pr(#1)}
\def\bvect[#1,#2]{
{
\def\dots{\cdots}
\def\mapfunction##1{\ | \ ##1}
\sopmatrix{
\,#1\map[#2]\,
}
}
}
\def\vect[#1]{
{\def\dots{\ldots}
\vcbr[{#1}]
} }
\def\vectt[#1]{
{\def\dots{\ldots}
\vect[{#1}]^{\top}
} }
\def\Vectt[#1]{
{
\def\mapfunction##1{##1 \cr}
\def\dots{\vdots}
\begin{pmatrix}
\map[#1]
\end{pmatrix}
} }
\def\addtab#1={#1\;&=}
\def\ccr{\\\addtab}
\def\questionequals{= \!\!\!\!\!\!{\scriptstyle ? \atop }\,\,\,}
\begin{document}
\textbf{Applied Complex Analysis (2021)}
\section{Lecture 0: Running Julia code}
Most of the material in this module will be illustrated in the \href{https://julialang.org/}{Julia programming language}. Programming in Julia will not be examined but you might find it useful (and fun) for the project (see the module guide) or for computational experimentation with the material in this module.
\subsection{Instructions}
\begin{itemize}
\item[1. ] Download Julia at \href{https://julialang.org/downloads/}{https://julialang.org/downloads/} and install.
\item[2. ] Run Julia. This should open the Julia REPL.
\item[3. ] Install the required packages by typing (in the Julia REPL, with patience)
\end{itemize}
\begin{lstlisting}
(*@\HLJLk{import}@*) (*@\HLJLn{Pkg}@*)
(*@\HLJLk{using}@*) (*@\HLJLn{Pkg}@*)
(*@\HLJLn{Pkg}@*)(*@\HLJLoB{.}@*)(*@\HLJLnf{add}@*)(*@\HLJLp{(}@*)(*@\HLJLs{"{}ApproxFun"{}}@*)(*@\HLJLp{)}@*)
(*@\HLJLn{Pkg}@*)(*@\HLJLoB{.}@*)(*@\HLJLnf{add}@*)(*@\HLJLp{(}@*)(*@\HLJLs{"{}Plots"{}}@*)(*@\HLJLp{)}@*)
(*@\HLJLn{Pkg}@*)(*@\HLJLoB{.}@*)(*@\HLJLnf{add}@*)(*@\HLJLp{(}@*)(*@\HLJLs{"{}GR"{}}@*)(*@\HLJLp{)}@*)
(*@\HLJLn{Pkg}@*)(*@\HLJLoB{.}@*)(*@\HLJLnf{add}@*)(*@\HLJLp{(}@*)(*@\HLJLs{"{}Plotly"{}}@*)(*@\HLJLp{)}@*)
(*@\HLJLn{Pkg}@*)(*@\HLJLoB{.}@*)(*@\HLJLnf{add}@*)(*@\HLJLp{(}@*)(*@\HLJLs{"{}PlotlyJS"{}}@*)(*@\HLJLp{)}@*)
(*@\HLJLn{Pkg}@*)(*@\HLJLoB{.}@*)(*@\HLJLnf{add}@*)(*@\HLJLp{(}@*)(*@\HLJLs{"{}Interact"{}}@*)(*@\HLJLp{)}@*)
(*@\HLJLn{Pkg}@*)(*@\HLJLoB{.}@*)(*@\HLJLnf{add}@*)(*@\HLJLp{(}@*)(*@\HLJLs{"{}IJulia"{}}@*)(*@\HLJLp{)}@*)
(*@\HLJLn{Pkg}@*)(*@\HLJLoB{.}@*)(*@\HLJLnf{add}@*)(*@\HLJLp{(}@*)(*@\HLJLs{"{}DifferentialEquations"{}}@*)(*@\HLJLp{)}@*)
(*@\HLJLn{Pkg}@*)(*@\HLJLoB{.}@*)(*@\HLJLnf{add}@*)(*@\HLJLp{(}@*)(*@\HLJLs{"{}ComplexPhasePortrait"{}}@*)(*@\HLJLp{)}@*)
(*@\HLJLn{Pkg}@*)(*@\HLJLoB{.}@*)(*@\HLJLnf{add}@*)(*@\HLJLp{(}@*)(*@\HLJLs{"{}OscillatoryIntegrals"{}}@*)(*@\HLJLp{)}@*)
(*@\HLJLn{Pkg}@*)(*@\HLJLoB{.}@*)(*@\HLJLnf{add}@*)(*@\HLJLp{(}@*)(*@\HLJLs{"{}SpecialFunctions"{}}@*)(*@\HLJLp{)}@*)
\end{lstlisting}
\begin{itemize}
\item[4. ] Open a Jupyter notebook by typing:
\end{itemize}
\begin{verbatim}
using IJulia
notebook()
\end{verbatim}
The first time you run \texttt{notebook()}, it will prompt you for whether it should install Jupyter. Hit enter to have it use the Conda.jl package to install a minimal Python+Jupyter distribution (via Miniconda) that is private to Julia (not in your PATH). If you need more detailed information on opening a Jupyter notebook, see the \href{https://github.com/JuliaLang/IJulia.jl}{IJulia page}.
\begin{itemize}
\item[5. ] In the top right of the tab that has been opened in your browser, click on \texttt{New} and then click on your version of Julia, e.g., Julia 1.5.3
\item[6. ] Let's take the Julia code in the notes for Lecture 1 as an example. In the first cell of the Jupyter notebook, type
\end{itemize}
\begin{lstlisting}
(*@\HLJLk{using}@*) (*@\HLJLn{Plots}@*)(*@\HLJLp{,}@*) (*@\HLJLn{ComplexPhasePortrait}@*)(*@\HLJLp{,}@*) (*@\HLJLn{SpecialFunctions}@*)
(*@\HLJLnf{gr}@*)(*@\HLJLp{();}@*)
\end{lstlisting}
Note that 'gr()' only needs to be included the first time you type \texttt{Using Plots}. That's why the command 'gr()` is not in the lecture notes. To run the cell, press Ctrl+Enter. Wait patiently for the packages to precompile. Then type, in a new cell,
\begin{lstlisting}
(*@\HLJLn{f}@*) (*@\HLJLoB{=}@*) (*@\HLJLn{z}@*) (*@\HLJLoB{->}@*) (*@\HLJLnf{exp}@*)(*@\HLJLp{(}@*)(*@\HLJLn{z}@*)(*@\HLJLp{)}@*)
(*@\HLJLn{u}@*) (*@\HLJLoB{=}@*) (*@\HLJLn{z}@*) (*@\HLJLoB{->}@*) (*@\HLJLnf{real}@*)(*@\HLJLp{(}@*)(*@\HLJLnf{f}@*)(*@\HLJLp{(}@*)(*@\HLJLn{z}@*)(*@\HLJLp{))}@*)
(*@\HLJLn{v}@*) (*@\HLJLoB{=}@*) (*@\HLJLn{z}@*) (*@\HLJLoB{->}@*) (*@\HLJLnf{imag}@*)(*@\HLJLp{(}@*)(*@\HLJLnf{f}@*)(*@\HLJLp{(}@*)(*@\HLJLn{z}@*)(*@\HLJLp{))}@*)
(*@\HLJLcs{{\#}}@*) (*@\HLJLcs{set}@*) (*@\HLJLcs{up}@*) (*@\HLJLcs{plotting}@*) (*@\HLJLcs{grid}@*)
(*@\HLJLn{xx}@*) (*@\HLJLoB{=}@*) (*@\HLJLnf{range}@*)(*@\HLJLp{(}@*)(*@\HLJLoB{-}@*)(*@\HLJLni{2}@*) (*@\HLJLp{;}@*) (*@\HLJLn{stop}@*)(*@\HLJLoB{=}@*)(*@\HLJLni{2}@*)(*@\HLJLp{,}@*) (*@\HLJLn{length}@*)(*@\HLJLoB{=}@*)(*@\HLJLni{100}@*)(*@\HLJLp{)}@*)
(*@\HLJLn{yy}@*) (*@\HLJLoB{=}@*) (*@\HLJLnf{range}@*)(*@\HLJLp{(}@*)(*@\HLJLoB{-}@*)(*@\HLJLni{10}@*)(*@\HLJLp{;}@*) (*@\HLJLn{stop}@*)(*@\HLJLoB{=}@*)(*@\HLJLni{10}@*)(*@\HLJLp{,}@*) (*@\HLJLn{length}@*)(*@\HLJLoB{=}@*)(*@\HLJLni{100}@*)(*@\HLJLp{)}@*)
(*@\HLJLnf{plot}@*)(*@\HLJLp{(}@*)(*@\HLJLnf{surface}@*)(*@\HLJLp{(}@*)(*@\HLJLn{xx}@*)(*@\HLJLp{,}@*) (*@\HLJLn{yy}@*)(*@\HLJLp{,}@*) (*@\HLJLn{u}@*)(*@\HLJLoB{.}@*)(*@\HLJLp{(}@*)(*@\HLJLn{xx}@*)(*@\HLJLoB{{\textquotesingle}}@*) (*@\HLJLoB{.+}@*) (*@\HLJLn{im}@*)(*@\HLJLoB{.*}@*)(*@\HLJLn{yy}@*)(*@\HLJLp{);}@*) (*@\HLJLn{title}@*)(*@\HLJLoB{=}@*)(*@\HLJLs{"{}real"{}}@*)(*@\HLJLp{),}@*)
(*@\HLJLnf{surface}@*)(*@\HLJLp{(}@*)(*@\HLJLn{xx}@*)(*@\HLJLp{,}@*) (*@\HLJLn{yy}@*)(*@\HLJLp{,}@*) (*@\HLJLn{v}@*)(*@\HLJLoB{.}@*)(*@\HLJLp{(}@*)(*@\HLJLn{xx}@*)(*@\HLJLoB{{\textquotesingle}}@*) (*@\HLJLoB{.+}@*) (*@\HLJLn{im}@*)(*@\HLJLoB{.*}@*)(*@\HLJLn{yy}@*)(*@\HLJLp{);}@*) (*@\HLJLn{title}@*)(*@\HLJLoB{=}@*)(*@\HLJLs{"{}imag"{}}@*)(*@\HLJLp{))}@*)
\end{lstlisting}
\includegraphics[width=\linewidth]{figures/Lecture0_3_1.pdf}
You can also include text and LaTeX code in Jupyter notebooks. Alternatives to Jupyter notebooks include \href{https://junolab.org/}{Juno} and \href{https://github.com/fonsp/Pluto.jl}{Pluto}.
\end{document}
|
{"hexsha": "441e624f801cf49770e20b75508d063f4d696776", "size": 12174, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "output/Lecture0.tex", "max_stars_repo_name": "MarcoFasondini/M3M6AppliedComplexAnalysis", "max_stars_repo_head_hexsha": "a29356b8d42e5ce2ca764c440386d6524b8dcd31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "output/Lecture0.tex", "max_issues_repo_name": "MarcoFasondini/M3M6AppliedComplexAnalysis", "max_issues_repo_head_hexsha": "a29356b8d42e5ce2ca764c440386d6524b8dcd31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "output/Lecture0.tex", "max_forks_repo_name": "MarcoFasondini/M3M6AppliedComplexAnalysis", "max_forks_repo_head_hexsha": "a29356b8d42e5ce2ca764c440386d6524b8dcd31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3435582822, "max_line_length": 394, "alphanum_fraction": 0.5973385904, "num_tokens": 5465}
|
subroutine minmax2(m,n,i,j,k,l)
i = min(m,n)
j = min0(m,n)
k = max(m,n)
l = max0(m,n)
print *, i, j, k, l
i = min(i, j, k, l, m, n)
print *, i
end
|
{"hexsha": "003165f643b9d891980c1aafef40120d56c50001", "size": 208, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/PIPS/validation/Semantics/minmax2.f", "max_stars_repo_name": "DVSR1966/par4all", "max_stars_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2015-01-31T01:51:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T02:01:50.000Z", "max_issues_repo_path": "packages/PIPS/validation/Semantics/minmax2.f", "max_issues_repo_name": "DVSR1966/par4all", "max_issues_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-05-29T09:29:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-11T16:01:39.000Z", "max_forks_repo_path": "packages/PIPS/validation/Semantics/minmax2.f", "max_forks_repo_name": "DVSR1966/par4all", "max_forks_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-03-26T08:05:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T02:01:51.000Z", "avg_line_length": 12.2352941176, "max_line_length": 37, "alphanum_fraction": 0.3701923077, "num_tokens": 77}
|
#!/usr/bin/env python
# Python 2.7.14
import argparse
import os
import pandas
import numpy
import matplotlib.pyplot
import matplotlib.dates
import datetime
fig_dir = 'fig'
table_dir = 'table'
class Pointing:
def __init__(self, data_path):
self.file_base, _ = os.path.splitext(os.path.basename(data_path))
self.data_raw = pandas.read_csv(data_path)
self.frequency = ''
self.use_array = ''
def read_data(self):
data = self.data_raw.copy()
if not data.offset.dtype == 'float64':
data = data[data.offset != 'ERR']
data.offset = data.offset.astype('float64')
data.hpbw = data.hpbw.astype('float64')
self.az = data[data.AZEL == 'AZ']
self.el = data[data.AZEL == 'EL']
if (len(self.az) == 0) or (len(self.el) == 0):
print 'data are insufficient: {}'.format(self.file_base)
quit()
self.get_frequency(data)
def get_frequency(self, data):
pos1 = float(data.pos1.iloc[0])
if 34. < pos1 < 36.:
f = '22GHz'
elif 19. < pos1 < 21.:
f = '43GHz'
elif 9. < pos1 < 11.:
f = '86GHz'
else:
f = '?'
if f != '43GHz':
print 'skip: frequency is {}'.format(f)
quit()
self.frequency = f
def add_params(self):
self.az['daz_all'] = self.az['qlookAutoDaz'] + self.az['manualDaz']
self.az['dd'] = self.az.daz_all - self.az.daz_all.iloc[-1]
self.az['offset2'] = self.az.offset + self.az.dd
self.el['del_all'] = self.el['qlookAutoDel'] + self.el['manualDel']
self.el['dd'] = self.el.del_all - self.el.del_all.iloc[-1]
self.el['offset2'] = self.el.offset + self.el.dd
self.az['SN1'] = self.az.IntegINT1 / self.az.rmsIntegInt1
self.el['SN1'] = self.el.IntegINT1 / self.el.rmsIntegInt1
self.az['SN2'] = self.az.IntegINT2 / self.az.rmsIntegInt2
self.el['SN2'] = self.el.IntegINT2 / self.el.rmsIntegInt2
self.az['SN3'] = self.az.IntegINT3 / self.az.rmsIntegInt3
self.el['SN3'] = self.el.IntegINT3 / self.el.rmsIntegInt3
def select_array(self):
array_list = list(self.az.ARRAY.drop_duplicates())
sn_sum = []
for a in array_list:
az_t = self.az[self.az.ARRAY == a]
el_t = self.el[self.el.ARRAY == a]
az_t = az_t.drop_duplicates(['DATE_OBS'], keep='last')
el_t = el_t.drop_duplicates(['DATE_OBS'], keep='last')
sn_sum.append(az_t.SN2.sum() + el_t.SN2.sum())
self.use_array = array_list[numpy.argmax(sn_sum)]
self.az2 = self.az[self.az.ARRAY == self.use_array]
self.el2 = self.el[self.el.ARRAY == self.use_array]
self.az2 = self.az2.drop_duplicates(['DATE_OBS'], keep='last')
self.el2 = self.el2.drop_duplicates(['DATE_OBS'], keep='last')
if (len(self.az2) == 0) or (len(self.el2) == 0):
print 'data are insufficient (2): {}'.format(self.file_base)
quit()
def calculate_offset_hpbw(self, scan):
scan_data = eval('self.{}2'.format(scan))
offset_mean = scan_data.offset2.mean()
hpbw_mean = scan_data.hpbw.mean()
if len(scan_data) == 1:
offset_std = 0.
hpbw_std = 0.
else:
offset_std = scan_data.offset2.std()
hpbw_std = scan_data.hpbw.std()
return offset_mean, offset_std, hpbw_mean, hpbw_std
def output_table(self):
pd = {}
offset_mean_az, offset_std_az, hpbw_mean_az, hpbw_std_az = self.calculate_offset_hpbw('az')
offset_mean_el, offset_std_el, hpbw_mean_el, hpbw_std_el = self.calculate_offset_hpbw('el')
pd['offset_mean_az'] = offset_mean_az
pd['offset_std_az'] = offset_std_az
pd['hpbw_mean_az'] = hpbw_mean_az
pd['hpbw_std_az'] = hpbw_std_az
pd['offset_mean_el'] = offset_mean_el
pd['offset_std_el'] = offset_std_el
pd['hpbw_mean_el'] = hpbw_mean_el
pd['hpbw_std_el'] = hpbw_std_el
data = pandas.concat([self.az2, self.el2])
pd['az'] = data.AZreal.mean()
pd['el'] = data.ELreal.mean()
pd['daz'] = self.az2.daz_all.iloc[-1]
pd['del'] = self.el2.del_all.iloc[-1]
pd['sn'] = numpy.mean([self.az2.SN2.iloc[-1], self.el2.SN2.iloc[-1]])
pd['temp'] = data.Temp.mean()
pd['ap'] = data.AirPress.mean()
pd['wv'] = data.WaterVapor.mean()
pd['ws'] = data.wind_sp.mean()
pd['ws_std'] = data.wind_sp.std()
pd['wd'] = data.wind_dir.mean()
pd['wd_std'] = data.wind_dir.std()
table_path = '{}/{}_params.txt'.format(table_dir, self.frequency)
fmt = '{offset_mean_az},{offset_std_az},{hpbw_mean_az},{hpbw_std_az},'
fmt += '{offset_mean_el},{offset_std_el},{hpbw_mean_el},{hpbw_std_el},'
fmt += '{az},{el},{daz},{del},{sn},'
fmt += '{temp},{ap},{wv},{ws},{ws_std},{wd},{wd_std}'
header = fmt.replace('{', '').replace('}', '')
if not os.path.exists(table_dir):
os.mkdir(table_dir)
if not os.path.exists(table_path):
with open(table_path, 'w') as f:
f.write(header + '\n')
with open(table_path, 'a') as f:
f.write(fmt.format(**pd) + '\n')
def plot_data(self):
matplotlib.rcParams['lines.linewidth'] = 1
matplotlib.rcParams['lines.marker'] = 'o'
matplotlib.rcParams['lines.markersize'] = 3
matplotlib.rcParams['font.family'] = 'Times New Roman'
matplotlib.rcParams['font.size'] = 12
matplotlib.rcParams['axes.grid'] = True
matplotlib.rcParams['grid.linestyle'] = ':'
matplotlib.rcParams['mathtext.fontset'] = 'cm'
fig = matplotlib.pyplot.figure(figsize=(10, 10))
ax1 = fig.add_subplot(321)
ax2 = fig.add_subplot(322)
ax3 = fig.add_subplot(323)
ax4 = fig.add_subplot(324)
az_tmp = self.az2
el_tmp = self.el2
ax1.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.offset2)
ax1.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.offset, ls='--')
ax1.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.dd)
ax2.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.offset2)
ax2.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.offset, ls='--')
ax2.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.dd)
ax3.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.hpbw)
ax4.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.hpbw)
ax5 = fig.add_subplot(6, 2, 9)
ax6 = fig.add_subplot(6, 2, 10)
ax5.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.SN2)
ax5.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.SN1)
ax5.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.SN3)
ax6.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.SN2, label='center')
ax6.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.SN1, label='pos1')
ax6.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.SN3, label='pos3')
ax6.legend(loc='lower right', ncol=3, fontsize=8)
if self.frequency == '22GHz':
min_hpbw = 50
max_hpbw = 100
elif self.frequency == '43GHz':
min_hpbw = 20
max_hpbw = 60
elif self.frequency == '86GHz':
min_hpbw = 10
max_hpbw = 30
else:
min_hpbw = 10
max_hpbw = 100
ax1.set_title('Azimuth scan', y=1.35)
ax1.set_ylabel("offset ($''$)")
ax1.set_ylim(-15, 15)
ax2.set_title('Elevation scan', y=1.35)
ax2.set_ylim(-15, 15)
ax3.set_ylabel("HPBW ($''$)")
ax3.set_ylim(min_hpbw, max_hpbw)
ax4.set_ylim(min_hpbw, max_hpbw)
ax5.set_ylabel('S/N')
object_name = az_tmp.OBJECT.iloc[0]
ax5.text(0, 0.8, object_name, transform=ax5.transAxes)
peak_az_id = az_tmp['peakTa*2'].idxmax()
peak_el_id = el_tmp['peakTa*2'].idxmax()
peak_az = az_tmp['peakTa*2'].loc[peak_az_id]
peak_el = el_tmp['peakTa*2'].loc[peak_el_id]
peak = max([peak_az, peak_el])
ax5.text(0, 0.65, 'max $T_\mathrm{A}^*$:' + '{:.1f} K'.format(peak), transform=ax5.transAxes)
if numpy.argmax([peak_az, peak_el]) == 0:
ax5.plot(pandas.to_datetime(az_tmp.DATE_OBS.loc[peak_az_id]), az_tmp.SN2.loc[peak_az_id], c='r')
elif numpy.argmax([peak_az, peak_el]) == 1:
ax6.plot(pandas.to_datetime(el_tmp.DATE_OBS.loc[peak_el_id]), el_tmp.SN2.loc[peak_el_id], c='r')
ax7 = fig.add_subplot(6, 2, 11)
ax7.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.wind_sp)
ax7.set_ylabel('wind speed (km s$^{-1}$)')
ax7.set_ylim(0, 10)
ax8 = fig.add_subplot(6, 2, 12)
ax8.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.wind_sp)
ax8.set_ylim(0, 10)
day = self.az.DATE_OBS.iloc[0].split()[0]
fig.suptitle(day)
dt_az = (pandas.to_datetime(az_tmp.DATE_OBS.iloc[-1]) - pandas.to_datetime(az_tmp.DATE_OBS.iloc[0])).seconds
dt_el = (pandas.to_datetime(el_tmp.DATE_OBS.iloc[-1]) - pandas.to_datetime(el_tmp.DATE_OBS.iloc[0])).seconds
if (dt_az == 0) or (dt_el == 0):
xlocater = matplotlib.dates.MinuteLocator(interval=1)
date_fmt = '%H:%M:%S'
xlim1_az = pandas.to_datetime(az_tmp.DATE_OBS.iloc[0]) - datetime.timedelta(minutes=1)
xlim2_az = pandas.to_datetime(az_tmp.DATE_OBS.iloc[-1]) + datetime.timedelta(minutes=1)
xlim1_el = pandas.to_datetime(el_tmp.DATE_OBS.iloc[0]) - datetime.timedelta(minutes=1)
xlim2_el = pandas.to_datetime(el_tmp.DATE_OBS.iloc[-1]) + datetime.timedelta(minutes=1)
for i in [1, 3, 5, 7]:
eval('ax{}.set_xlim(xlim1_az, xlim2_az)'.format(i))
eval('ax{}.set_xlim(xlim1_el, xlim2_el)'.format(i+1))
elif dt_az > 1800:
xlocater = matplotlib.dates.MinuteLocator(interval=10)
date_fmt = '%H:%M'
elif dt_az > 900:
xlocater = matplotlib.dates.MinuteLocator(interval=5)
date_fmt = '%H:%M'
elif dt_az > 120:
xlocater = matplotlib.dates.MinuteLocator(interval=2)
date_fmt = '%H:%M'
else:
xlocater = matplotlib.dates.MinuteLocator(interval=2)
date_fmt = '%H:%M:%S'
for i in range(1, 9):
eval('ax{}.xaxis.set_major_locator(xlocater)'.format(i))
eval('ax{}.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(date_fmt))'.format(i))
fig.text(0.05, 0.98, 'Frequency: {}'.format(self.frequency))
fig.text(0.05, 0.96, 'Array: {}'.format(self.use_array))
fig.text(0.8, 0.98, '(dAZ, dEL) = ({:+.2f}, {:+.2f})'.format(self.az2.daz_all.iloc[-1], self.el2.del_all.iloc[-1]))
ddaz2, time_ddaz2 = self.get_dd('az')
ddel2, time_ddel2 = self.get_dd('el')
text_yoffset_az = ax1.get_ylim()[1]
for t in range(len(ddaz2)):
ax1.text(pandas.Timestamp(time_ddaz2.iloc[t]), text_yoffset_az, '{:+.1f}'.format(ddaz2[t]), horizontalalignment='center', fontsize=9)
ax1.axvline(pandas.Timestamp(time_ddaz2.iloc[t]), c='k', marker='')
text_yoffset_el = ax2.get_ylim()[1]
for t in range(len(ddel2)):
ax2.text(pandas.Timestamp(time_ddel2.iloc[t]), text_yoffset_el, '{:+.1f}'.format(ddel2[t]), horizontalalignment='center', fontsize=9)
ax2.axvline(pandas.Timestamp(time_ddel2.iloc[t]), c='k', marker='')
ax9 = fig.add_axes([0.12, 0.89, 0.35, 0.07])
ax10 = fig.add_axes([0.54, 0.89, 0.35, 0.07])
self.plot_table('az', ax9)
self.plot_table('el', ax10)
self.fig = fig
def save_figure(self):
if not os.path.exists(fig_dir):
os.mkdir(fig_dir)
if not os.path.exists(os.path.join(fig_dir, self.frequency)):
os.mkdir(os.path.join(fig_dir, self.frequency))
print 'saved: {}'.format(self.file_base)
save_file = '{}/{}/{}.png'.format(fig_dir, self.frequency, self.file_base)
self.fig.savefig(save_file)
self.fig.clf()
@staticmethod
def show_figure():
matplotlib.pyplot.show()
def show_figure_gui(self):
self.fig.show()
def get_dd(self, scan):
scan_data = eval('self.{}2'.format(scan))
dd = numpy.array(scan_data['d{}_all'.format(scan)])[1:] \
- numpy.array(scan_data['d{}_all'.format(scan)])[:-1]
dd = numpy.insert(dd, 0, 0.)
dd2 = dd[numpy.where(dd != 0.)[0]]
time_dd2 = scan_data['DATE_OBS'].iloc[numpy.where(dd != 0.)[0]]
return dd2, time_dd2
def plot_table(self, scan, ax):
offset_mean, offset_std, hpbw_mean, hpbw_std = self.calculate_offset_hpbw(scan)
ax.patch.set_alpha(0)
ax.axis('off')
ax.axhline(0.1, c='k', marker='', linewidth=0.4)
ax.axhline(0.4, c='k', marker='', linewidth=0.4)
ax.axhline(0.7, c='k', marker='', linewidth=0.4)
ax.axvline(0.2, ymin=0.1, c='k', marker='', linewidth=0.4)
ax.axvline(0.5, ymin=0.1, c='k', marker='', linewidth=0.4)
ax.text(0.05, 0.45, 'offset')
ax.text(0.05, 0.15, 'HPBW')
ax.text(0.28, 0.75, 'Average')
ax.text(0.55, 0.75, 'Standard Deviation')
ax.text(0.3, 0.45, "{:+.1f}$''$".format(offset_mean))
ax.text(0.7, 0.45, "{:.1f}$''$".format(offset_std))
ax.text(0.3, 0.15, "{:+.1f}$''$".format(hpbw_mean))
ax.text(0.7, 0.15, "{:.1f}$''$".format(hpbw_std))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file_path')
args = parser.parse_args()
qlp = Pointing(args.file_path)
qlp.read_data()
qlp.add_params()
qlp.select_array()
# qlp.output_table()
qlp.plot_data()
# qlp.save_figure()
qlp.show_figure()
|
{"hexsha": "ba8c78a029782866d6ada689b3edb1e95c5ce1af", "size": 14018, "ext": "py", "lang": "Python", "max_stars_repo_path": "qlp_plot.py", "max_stars_repo_name": "mmatsuo0/qlp", "max_stars_repo_head_hexsha": "b280dc2d97ebc731e2bb14ec25a1afc736cd5a29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "qlp_plot.py", "max_issues_repo_name": "mmatsuo0/qlp", "max_issues_repo_head_hexsha": "b280dc2d97ebc731e2bb14ec25a1afc736cd5a29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qlp_plot.py", "max_forks_repo_name": "mmatsuo0/qlp", "max_forks_repo_head_hexsha": "b280dc2d97ebc731e2bb14ec25a1afc736cd5a29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2294117647, "max_line_length": 145, "alphanum_fraction": 0.5873876445, "include": true, "reason": "import numpy", "num_tokens": 4131}
|
import logging, urllib2, time, json, os, math
import pymongo, bson
import numpy as np
import StringIO, gzip
import csv
import pandas as pd
from astropy.io import fits
from astropy import wcs, coordinates as coord, units as u
from astropy.cosmology import Planck13 as cosmo
from scipy.optimize import brentq, curve_fit, leastsq
from scipy.interpolate import interp1d
from scipy.integrate import simps
from scipy import stats
from skgof import ad_test
from sklearn.metrics import r2_score
from consensus import rgz_path, data_path, db, version
from processing import *
import contour_path_object as cpo
completed_file = '%s/bending_completed%s.txt' % (rgz_path, version)
# For internal use
from pprint import pprint
import itertools
from sklearn.decomposition import PCA
from corner import corner
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.ticker import FormatStrFormatter
plt.ion()
matplotlib.rc('text', usetex=True)
matplotlib.rcParams.update({'font.size': 14})
# Connect to Mongo database
version = '_bending'
subjects = db['radio_subjects']
consensus = db['consensus{}'.format(version)]
catalog = db['catalog{}'.format(version)]
whl = db['WHL15']
rm_m = db['redmapper_members']
rm_c = db['redmapper_clusters']
amf = db['AMFDR9']
version = ''
bent_sources = db['bent_sources{}'.format(version)]
bending_15 = db['bending_15{}'.format(version)]
bending_control = db['bending_control{}'.format(version)]
sdss_sample = db['sdss_sample']
xmatch = db['sdss_whl_xmatch']
distant_sources = db['distant_sources']
# Final sample cuts
total_cuts = {'RGZ.size_arcmin':{'$lte':1.5}, 'RGZ.overedge':0, 'RGZ.radio_consensus':{'$gte':0.65}, 'using_peaks.bending_corrected':{'$lte':135.}, 'best.redshift':{'$gte':0.02, '$lte':0.8}, 'RGZ.duplicate':0}
# Get dictionary for finding the path to FITS files and WCS headers
with open('%s/first_fits.txt' % rgz_path) as f:
lines = f.readlines()
pathdict = {}
for l in lines:
spl = l.split(' ')
pathdict[spl[1].strip()] = '%s/rgz/raw_images/RGZ-full.%i/FIRST-IMGS/%s.fits' % (data_path, int(spl[0]), spl[1].strip())
### Functions ###
def get_data(subject):
'''
Returns the radio contours belonging to a single subject field
'''
link = subject['location']['contours'] # Gets url as Unicode string
# Use local file if available
jsonfile = link.split("/")[-1]
jsonfile_path = "{0}/rgz/contours/{1}".format(data_path,jsonfile)
if os.path.exists(jsonfile_path):
with open(jsonfile_path,'r') as jf:
data = json.load(jf)
# Otherwise, read from web
else:
# Reform weblink to point to the direct S3 URL, which will work even with older SSLv3
link_s3 = "http://zooniverse-static.s3.amazonaws.com/"+link.split('http://')[-1]
tryCount = 0
while(True): # In case of error, wait 10 sec and try again; give up after 5 tries
tryCount += 1
try:
compressed = urllib2.urlopen(str(link_s3)).read() #reads contents of url to str
break
except (urllib2.URLError, urllib2.HTTPError) as e:
if tryCount>5:
output('Unable to connect to Amazon Web Services; trying again in 10 min', logging.exception)
raise fn.DataAccessError(message)
logging.exception(e)
time.sleep(10)
tempfile = StringIO.StringIO(compressed) # Temporarily stores contents as file (emptied after unzipping)
uncompressed = gzip.GzipFile(fileobj=tempfile, mode='r').read() # Unzips contents to str
data = json.loads(uncompressed) # Loads JSON object
return data
def get_contours(w, ir_pos, peak_pos, data, peak_count):
'''
Returns a list of Path objects corresponding to each outer contour in the data, in RA and dec coordinates
Removes outer layers until there are two components and a disjoint IR
Removes any contours that aren't in this source
'''
assert (peak_count in [2,3]), 'Not a valid morphology'
# Assemble the contour trees
contour_trees = []
for contour in data['contours']:
tree = cpo.Node(w, contour=contour)
contour_trees.append(tree)
# Remove each contour that doesn't contain a peak from this source
contains_peak = []
for ix, tree in enumerate(contour_trees):
if any(tree.contains(peak_pos)):
contains_peak.append(ix)
contour_trees[:] = [tree for ix, tree in enumerate(contour_trees) if ix in contains_peak]
# Combine the entire source into a single tree
value_at_inf = {'arr':[{'x':-1,'y':-1}, {'x':w._naxis1+1,'y':-1}, {'x':w._naxis1+1,'y':w._naxis2+1}, {'x':-1,'y':w._naxis2+1}, {'x':-1,'y':-1}], 'k':-1}
source_tree = cpo.Node(w)
source_tree.insert(cpo.Node(w, value=value_at_inf))
source_tree.children = contour_trees
# Remove the BCG source if it's a triple
if peak_count == 3:
source_tree.remove_triple_center(ir_pos, peak_pos)
# Increase the contour level until the IR position is outside all the contours
roots = []
source_tree.get_equal_disjoint(ir_pos, roots)
source_tree.children = roots
return source_tree
def get_pos_angle(w, ir, method, method_data):
'''
Determines the position angle between the IR position and the given comparison object
Method:
Contour: from the IR position to the most distant part of the radio contour (data is contour_list)
Peak: from the IR position to the peak of each component (data is source['radio']['peaks'])
'''
if method == 'contour':
contour_sky = coord.SkyCoord(w.wcs_pix2world(method_data.vertices,1), unit=(u.deg,u.deg), frame='icrs')
separation = ir.separation(contour_sky)
pos_angle = ir.position_angle(contour_sky)[separation==np.max(separation)][0]
elif method == 'peak':
pos_angle = ir.position_angle(coord.SkyCoord(method_data['ra'], method_data['dec'], unit=(u.deg,u.deg), frame='icrs'))
return pos_angle
def get_angles(w, ir, method, method_data):
'''
Determines the opening angle of the radio tail and the position angle of the angle bisector
Method:
Contour: from the IR position to the most distant part of the radio contour (data is contour_list)
Peak: from the IR position to the peak of each component (data is source['radio']['peaks'])
'''
assert (method in ['contour', 'peak']), 'Not a valid method'
pos_angle_0 = get_pos_angle(w, ir, method, method_data[0])
pos_angle_1 = get_pos_angle(w, ir, method, method_data[1])
opening_angle = np.abs(pos_angle_1-pos_angle_0).wrap_at(2*np.pi*u.rad)
bending_angle = coord.Angle(np.abs(np.pi*u.rad - opening_angle))
bisector = (pos_angle_1+pos_angle_0)/2.
if np.abs(bisector-pos_angle_0) > np.pi/2*u.rad:
bisector += np.pi*u.rad
bending_angles = {'pos_angle_0':pos_angle_0, 'pos_angle_1':pos_angle_1, 'bending_angle':bending_angle, 'bisector':bisector.wrap_at(2*np.pi*u.rad)}
return bending_angles
def get_global_peaks(w, peak_pos, peaks, contour_tree):
'''
Determines the position of the global maximum for each component in the contour
'''
global_peaks = []
for child in contour_tree.children:
global_peak = {'flux':0}
for peak in [peaks[ix] for ix, elem in enumerate(child.contains(peak_pos)) if elem]:
if peak['flux'] > global_peak['flux']:
global_peak = peak
if global_peak['flux'] > 0:
global_peaks.append(global_peak)
return global_peaks
def curve_intersect(fun1, fun2, xmin, xmax):
'''
Finds the intersection of two curves, bounded in [xmin, xmax]
Returns an array of x values
'''
diff = lambda x: fun1(x)-fun2(x)
x_range = np.linspace(xmin, xmax, 100)
m_sign = np.sign(diff(x_range)).astype(int)
roots = x_range[np.where(m_sign[1:] - m_sign[:-1] != 0)[0] + 1]
# If they don't cross, return None
if len(roots) == 0:
return np.array([])
# If they cross exactly once, find thcone global solution
elif len(roots) == 1:
return np.array([brentq(diff, xmin, xmax)])
# If they cross multiple times, find the local solution between each root
else:
limits = np.concatenate(([xmin], roots, [xmax]))
intersections = np.empty(len(limits)-2)
for ix in range(len(intersections)):
intersections[ix] = brentq(diff, limits[ix], limits[ix+1])
return intersections
def get_colinear_separation(w, ir, peak, contour):
'''
Finds the distance from the host to the edge of the contour, passing through the peak
'''
ir_pos = w.wcs_world2pix(np.array([[ir.ra.deg,ir.dec.deg]]), 1)[0]
peak_pos = w.wcs_world2pix(np.array([[peak['ra'], peak['dec']]]), 1)[0]
# Extrapolate the line connecting the peak to the IR position
slope = (peak_pos[1]-ir_pos[1])/(peak_pos[0]-ir_pos[0])
extrap_pos = ir_pos + w._naxis1*np.array([1.,slope])
extrap_neg = ir_pos - w._naxis1*np.array([1.,slope])
# Split the contours into well-behaved functions
# Roll the array until the first index is the minimum value
x, y = contour.vertices.T
xmin_loc = np.where(x==min(x))[0][0]
x_rot = np.append(np.roll(x[:-1], len(x)-xmin_loc-1), min(x))
y_rot = np.append(np.roll(y[:-1], len(x)-xmin_loc-1), y[xmin_loc])
# Find where the contour doubles back on itself along the x-axis
m_sign = np.sign(x_rot[1:]-x_rot[:-1])
roots = np.where(m_sign[1:] - m_sign[:-1] != 0)[0] + 1
limits = np.concatenate(([0], roots, [len(x_rot)-1]))
# Split the contours at the double-back positions
domains = []
ranges = []
for ix in range(len(limits)-1):
domains.append(x_rot[limits[ix]:limits[ix+1]+1])
ranges.append(y_rot[limits[ix]:limits[ix+1]+1])
# Interpolate the contour segments
c_interps = []
for x_seg, y_seg in zip(domains, ranges):
c_interp = interp1d(x_seg, y_seg, 'linear')
c_interps.append(c_interp)
if peak_pos[0] > ir_pos[0]:
tail = np.vstack((extrap_neg, ir_pos, peak_pos, extrap_pos))
else:
tail = np.vstack((extrap_pos, ir_pos, peak_pos, extrap_neg))
tail_interp = interp1d(tail.T[0], tail.T[1], 'linear')
# Find the intersections of the contours and tail
x_intersects, y_intersects = [], []
for ix, c_interp in enumerate(c_interps):
x_intersect = curve_intersect(tail_interp, c_interp, domains[ix][0], domains[ix][-1])
y_intersect = c_interp(x_intersect)
x_intersects.append(x_intersect)
y_intersects.append(y_intersect)
intersects = np.vstack((np.hstack(x_intersects), np.hstack(y_intersects))).T
# Return the maximum separation between host and edge
intersects_sky = coord.SkyCoord(w.wcs_pix2world(intersects,1), unit=(u.deg,u.deg), frame='icrs')
return max(ir.separation(intersects_sky))
def get_tail_lengths(w, ir, method, contour_list, peaks=None):
'''
Determines angular separation between the IR position and the given comparison object
Method:
Contour: from the IR position to the most distant part of the radio contour
Peak: from the IR position to the peak of the component
'''
tail_lengths = []
if method == 'contour':
for contour in contour_list:
contour_sky = coord.SkyCoord(w.wcs_pix2world(contour.vertices,1), unit=(u.deg,u.deg), frame='icrs')
separation = ir.separation(contour_sky)
tail_lengths.append(np.max(separation))
elif method == 'peak':
assert (peaks is not None), 'No radio peaks provided'
for contour, peak in zip(contour_list, peaks):
tail_lengths.append(get_colinear_separation(w, ir, peak, contour))
return tail_lengths
def peak_edge_ratio(w, ir, peaks, tails):
'''
Calculate the ratio of the distance to the peak and to the edge of each tail (measured on the sky)
'''
ratios = []
for peak, tail in zip(peaks, tails):
peak_pos = coord.SkyCoord(peak['ra'], peak['dec'], unit=(u.deg,u.deg), frame=('icrs'))
ratios.append(ir.separation(peak_pos).deg/tail.deg)
return ratios
def get_z(source):
'''
Returns the best redshift value and uncertainty for the source (only if SDSS)
'''
if 'SDSS' in source and 'spec_redshift' in source['SDSS']:
return source['SDSS']['spec_redshift'], source['SDSS']['spec_redshift_err']
elif 'SDSS' in source and 'photo_redshift' in source['SDSS']:
return source['SDSS']['photo_redshift'], source['SDSS']['photo_redshift_err']
else:
return 0, 0
# elif 'AllWISE' in source and 'photo_redshift' in source['AllWISE']:
# return source['AllWISE']['photo_redshift'], 0
def get_whl(ir, z, z_err, transverse, dz):
'''
Find the corresponding galaxy cluster in the WHL15 catalog
If multiple clusters match, choose the one with least angular separation
'''
# If the galaxy is too close, physical separations become too great on the sky
# Restrict redshifts to at least 0.01
if z < 0.01:
return None
# Maximum separation
max_sep = float(transverse * u.Mpc / cosmo.angular_diameter_distance(z) * u.rad / u.deg)
best_sep = np.inf
cluster = None
for temp_c in whl.find({'RAdeg':{'$gt':ir.ra.deg-max_sep, '$lt':ir.ra.deg+max_sep}, 'DEdeg':{'$gt':ir.dec.deg-max_sep, '$lt':ir.dec.deg+max_sep}, \
'$or':[{'zspec':{'$gt':z-dz, '$lt':z+dz}}, {'zphot':{'$gt':z-dz, '$lt':z+dz}}]}):
current_sep = ir.separation( coord.SkyCoord(temp_c['RAdeg'], temp_c['DEdeg'], unit=(u.deg,u.deg), frame='icrs') )
if (current_sep < best_sep) and (current_sep < max_sep*u.deg):
best_sep = current_sep
cluster = temp_c
return cluster
def get_redmapper(objID, ir, z, z_err, transverse, dz):
'''
Find the corresponding galaxy cluster in the redMaPPer catalog
First check against member catalog, then check radially
If multiple clusters match, choose the one with least angular separation
'''
# If the galaxy is too close, physical separations become too great on the sky
# Restrict redshifts to at least 0.01
if z < 0.01:
return None
member = rm_m.find_one({'ObjID':objID})
if member is not None:
return rm_c.find_one({'ID':member['ID']})
# Maximum separation
max_sep = float(transverse * u.Mpc / cosmo.angular_diameter_distance(z) * u.rad / u.deg)
best_sep = np.inf
cluster = None
for temp_c in rm_c.find({'RAdeg':{'$gt':ir.ra.deg-max_sep, '$lt':ir.ra.deg+max_sep}, 'DEdeg':{'$gt':ir.dec.deg-max_sep, '$lt':ir.dec.deg+max_sep}, \
'$or':[{'zspec':{'$gt':z-dz, '$lt':z+dz}}, {'zlambda':{'$gt':z-dz, '$lt':z+dz}}]}):
current_sep = ir.separation( coord.SkyCoord(temp_c['RAdeg'], temp_c['DEdeg'], unit=(u.deg,u.deg), frame='icrs') )
if (current_sep < best_sep) and (current_sep < max_sep*u.deg):
best_sep = current_sep
cluster = temp_c
return cluster
def get_amf(ir, z, z_err, transverse, dz):
'''
Find the corresponding galaxy cluster in the WHL15 catalog
If multiple clusters match, choose the one with least angular separation
'''
# If the galaxy is too close, physical separations become too great on the sky
# Restrict redshifts to at least 0.01
if z < 0.01:
return None
# Maximum separation
max_sep = float(transverse * u.Mpc / cosmo.angular_diameter_distance(z) * u.rad / u.deg)
best_sep = np.inf
cluster = None
for temp_c in amf.find({'ra':{'$gt':ir.ra.deg-max_sep, '$lt':ir.ra.deg+max_sep}, 'dec':{'$gt':ir.dec.deg-max_sep, '$lt':ir.dec.deg+max_sep}, \
'z':{'$gt':z-dz, '$lt':z+dz}}):
current_sep = ir.separation( coord.SkyCoord(temp_c['ra'], temp_c['dec'], unit=(u.deg,u.deg), frame='icrs') )
if (current_sep < best_sep) and (current_sep < max_sep*u.deg):
best_sep = current_sep
cluster = temp_c
return cluster
def get_bending(source, peak_count):
'''
Calculate all the bending parameters that don't depend on the cluster
'''
assert (peak_count in [2, 3]), 'Not a valid morphology'
subject = subjects.find_one({'zooniverse_id':source['zooniverse_id']})
# Get pixel-to-WCS conversion
fid = subject['metadata']['source']
fits_loc = pathdict[fid]
w = wcs.WCS(fits.getheader(fits_loc, 0))
# Get the location of the source
ir = coord.SkyCoord(source['SDSS']['ra'], source['SDSS']['dec'], unit=(u.deg,u.deg), frame='icrs') if 'SDSS' in source else coord.SkyCoord(source['AllWISE']['ra'], source['AllWISE']['dec'], unit=(u.deg,u.deg), frame='icrs')
ir_pos = w.wcs_world2pix(np.array([[ir.ra.deg,ir.dec.deg]]), 1)
z, z_err = get_z(source)
peaks = source['radio']['peaks']
peak_pos = w.wcs_world2pix(np.array([ [peak['ra'],peak['dec']] for peak in peaks ]), 1)
# Get image parameters for this source
data = get_data(subject)
contour_tree = get_contours(w, ir_pos, peak_pos, data, peak_count)
peaks = get_global_peaks(w, peak_pos, peaks, contour_tree)
if len(peaks) != 2:
output("%s didn't have 2 tails" % source['zooniverse_id'])
return
contour_list = [child.path for child in contour_tree.children if any(child.contains(peak_pos))]
# Using the 'contour' method
# bending_angles = get_angles(w, ir, 'contour', contour_list)
# tail_lengths_apparent = get_tail_lengths(w, ir, 'contour', contour_list)
# tail_lengths_physical = []
# for tail in tail_lengths_apparent:
# tail_lengths_physical.append(cosmo.angular_diameter_distance(z) * tail.to(u.rad)/u.rad)
# ratios = peak_edge_ratio(w, ir, peaks, tail_lengths_apparent)
# asymmetry = ratios[1]/ratios[0]
# using_contour = {'tail_deg_0':tail_lengths_apparent[0], 'tail_deg_1':tail_lengths_apparent[1], 'size_deg':sum(tail_lengths_apparent), 'tail_kpc_0':float(tail_lengths_physical[0]/u.kpc), 'tail_kpc_1':float(tail_lengths_physical[1]/u.kpc), 'size_kpc':float(sum(tail_lengths_physical)/u.kpc), 'ratio_0':ratios[0], 'ratio_1':ratios[1], 'asymmetry':max(asymmetry,1./asymmetry)}
# using_contour.update(bending_angles)
# for key in using_contour.keys():
# if type(using_contour[key]) is coord.angles.Angle:
# using_contour[key] = using_contour[key].deg
# Using the 'peak' method
bending_angles = get_angles(w, ir, 'peak', peaks)
# tail_lengths_apparent = get_tail_lengths(w, ir, 'peak', contour_list, peaks)
# tail_lengths_physical = []
# for tail in tail_lengths_apparent:
# tail_lengths_physical.append(cosmo.angular_diameter_distance(z) * tail.to(u.rad)/u.rad)
# ratios = peak_edge_ratio(w, ir, peaks, tail_lengths_apparent)
# asymmetry = ratios[1]/ratios[0]
# using_peaks = {'tail_deg_0':tail_lengths_apparent[0], 'tail_deg_1':tail_lengths_apparent[1], 'size_deg':sum(tail_lengths_apparent), 'tail_kpc_0':float(tail_lengths_physical[0]/u.kpc), 'tail_kpc_1':float(tail_lengths_physical[1]/u.kpc), 'size_kpc':float(sum(tail_lengths_physical)/u.kpc), 'ratio_0':ratios[0], 'ratio_1':ratios[1], 'asymmetry':max(asymmetry,1./asymmetry)}
# using_peaks.update(bending_angles)
# for key in using_peaks.keys():
# if type(using_peaks[key]) is coord.angles.Angle:
# using_peaks[key] = using_peaks[key].deg
morphology = 'double' if peak_count == 2 else 'triple'
rgz = {'RGZ_id':source['catalog_id'], 'zooniverse_id':source['zooniverse_id'], 'ir_consensus':source['consensus']['ir_level'], 'radio_consensus':source['consensus']['radio_level'], 'peaks':peaks, 'components':source['radio']['components'], 'morphology':morphology, 'size_arcmin':source['radio']['max_angular_extent']/60., 'size_kpc':float((cosmo.angular_diameter_distance(z)*source['radio']['max_angular_extent']*np.pi/180.)/u.kpc)}
# entry = {'RGZ':rgz, 'using_contour':using_contour, 'using_peaks':using_peaks}
entry = {'RGZ':rgz, 'using_peaks':{}}
if 'SDSS' in source:
entry['SDSS'] = source['SDSS']
if 'AllWISE' in source:
entry['AllWISE'] = source['AllWISE']
best_ra = entry['SDSS']['ra'] if 'SDSS' in entry else entry['AllWISE']['ra']
best_dec = entry['SDSS']['dec'] if 'SDSS' in entry else entry['AllWISE']['dec']
entry['best'] = {'ra':best_ra, 'dec':best_dec, 'redshift':z}
return entry
def make_bent_sources():
'''
Generate a collection of radio sources with bending parameters that don't depend on the cluster
Once generated, various matching schemes to the clusters can be tried efficiently
'''
# Determine which sources have already been processed
completed = []
if os.path.exists(completed_file):
with open(completed_file, 'r') as f:
lines = f.readlines()
for line in lines:
completed.append(int(line))
z_range = [0.01, 0.8]
# 'ignore_bending':False
double_args = {'$and': [{'overedge':0, 'catalog_id':{'$nin':completed}}, \
{'$or': [{'radio.number_peaks':2, 'radio.number_components':1}, \
{'radio.number_components':2}]}, \
{'$or': [{'SDSS.photo_redshift':{'$gte':z_range[0], '$lt':z_range[1]}}, \
{'SDSS.spec_redshift':{'$gte':z_range[0], '$lt':z_range[1]}}, \
{'AllWISE.photo_redshift':{'$gte':z_range[0], '$lt':z_range[1]}}] }]}
triple_args = {'$and': [{'overedge':0, 'catalog_id':{'$nin':completed}}, \
{'$or': [{'radio.number_peaks':3, 'radio.number_components':{'$in':[1,2]}}, \
{'radio.number_components':3}]}, \
{'$or': [{'SDSS.photo_redshift':{'$gte':z_range[0], '$lt':z_range[1]}}, \
{'SDSS.spec_redshift':{'$gte':z_range[0], '$lt':z_range[1]}}, \
{'AllWISE.photo_redshift':{'$gte':z_range[0], '$lt':z_range[1]}}] }]}
# Find the bending parameters for each source that matches
for args,peak_count,morphology in zip([double_args,triple_args], [2,3], ['double','triple']):
count = bent_sources.find({'RGZ.morphology':morphology}).count()
with open(completed_file, 'a') as f:
for source in catalog.find(args).batch_size(50):
entry = get_bending(source, peak_count)
if entry is not None:
count += 1
output('%i %s' % (count, source['zooniverse_id']))
bent_sources.insert(entry)
print >> f, source['catalog_id']
def get_cluster_match(source):
'''
Given a source from RGZ, match it to a cluster and calculate the redshift-dependent bending parameters
'''
ir = coord.SkyCoord(source['SDSS']['ra'], source['SDSS']['dec'], unit=(u.deg,u.deg), frame='icrs') if 'SDSS' in source else \
coord.SkyCoord(source['AllWISE']['ra'], source['AllWISE']['dec'], unit=(u.deg,u.deg), frame='icrs')
z, z_err = get_z(source)
# Match to cluster catalogs
cluster_w = get_whl(ir, z, z_err, 15, 0.04*(1+z))
whl_prop = {}
if cluster_w is not None:
c_pos = coord.SkyCoord(cluster_w['RAdeg'], cluster_w['DEdeg'], unit=(u.deg,u.deg), frame='icrs')
c_sep_arc = c_pos.separation(ir)
c_sep_mpc = float(cosmo.angular_diameter_distance(cluster_w['zspec'] if 'zspec' in cluster_w else cluster_w['zphot'])/u.Mpc * c_sep_arc.to(u.rad)/u.rad)
c_pos_angle = c_pos.position_angle(ir)
if c_sep_mpc/cluster_w['r500'] < 0.01:
pop = 'BCG'
elif c_sep_mpc/cluster_w['r500'] >= 1.5:
pop = 'outer'
else:
pop = 'inner'
whl_prop = {'ra':c_pos.ra.deg, 'dec':c_pos.dec.deg, 'separation_deg':c_sep_arc.deg, 'separation_Mpc':c_sep_mpc, 'position_angle':c_pos_angle.wrap_at(2*np.pi*u.rad).deg, 'r/r500':c_sep_mpc/cluster_w['r500'], 'population':pop, 'zbest':cluster_w['zspec'] if 'zspec' in cluster_w else cluster_w['zphot']}
for key in ['_id', 'N500', 'N500sp', 'RL*500', 'name', 'r500', 'zphot', 'zspec', 'M500']:
if key in cluster_w:
whl_prop[key] = cluster_w[key]
# Only continue if a cluster was matched
if cluster_w is None: #and cluster_r is None and cluster_a is None:
output("%s didn't match to a cluster" % source['RGZ']['zooniverse_id'])
return
else:
z = cluster_w['zspec'] if 'zspec' in cluster_w else cluster_w['zphot']
# Using the 'contour' method
# tail_lengths_apparent = [source['using_contour']['tail_deg_0']*u.deg, source['using_contour']['tail_deg_1']*u.deg]
# tail_lengths_physical = []
# for tail in tail_lengths_apparent:
# tail_lengths_physical.append(cosmo.angular_diameter_distance(z) * tail.to(u.rad)/u.rad)
# using_contour = {'tail_kpc_0':float(tail_lengths_physical[0]/u.kpc), 'tail_kpc_1':float(tail_lengths_physical[1]/u.kpc), 'size_kpc':float(sum(tail_lengths_physical)/u.kpc)}
# source['using_contour'].update(using_contour)
# Using the 'peak' method
# tail_lengths_apparent = [source['using_peaks']['tail_deg_0']*u.deg, source['using_peaks']['tail_deg_1']*u.deg]
# tail_lengths_physical = []
# for tail in tail_lengths_apparent:
# tail_lengths_physical.append(cosmo.angular_diameter_distance(z) * tail.to(u.rad)/u.rad)
# using_peaks = {'tail_kpc_0':float(tail_lengths_physical[0]/u.kpc), 'tail_kpc_1':float(tail_lengths_physical[1]/u.kpc), 'size_kpc':float(sum(tail_lengths_physical)/u.kpc)}
# source['using_peaks'].update(using_peaks)
# Calculate the orientation angle
# for cluster,prop in zip([cluster_w,cluster_r,cluster_a], [whl_prop,rm_prop,amf_prop]):
# if cluster is not None:
cluster, prop = cluster_w, whl_prop
# for method,using in zip(['contour','peaks'], [source['using_contour'],source['using_peaks']]):
for method,using in zip(['peaks'], [source['using_peaks']]):
orientation = coord.angles.Angle(prop['position_angle'] - using['bisector'], unit=u.deg).wrap_at(360.*u.deg).deg
if orientation > 180.:
orientation = 360. - orientation
prop['orientation_%s' % method] = orientation
if orientation > 90.:
orientation = 180. - orientation
prop['orientation_folded'] = orientation
# Compile all results
if cluster_w is not None:
source['WHL'] = whl_prop
# if cluster_r is not None:
# source['redMaPPer'] = rm_prop
# if cluster_a is not None:
# source['AMFDR9'] = amf_prop
return source
def make_catalog():
'''
Find the sources matching the given search parameters and morphology and run the processing pipeline
'''
# Determine which sources have already been processed
completed = []
if os.path.exists(completed_file):
with open(completed_file, 'r') as f:
lines = f.readlines()
for line in lines:
completed.append(int(line))
# Find the bending and cluster results for each source that matches
for peak_count,morphology in zip([2,3], ['double','triple']):
count = bending_15.find({'RGZ.morphology':morphology}).count()
with open(completed_file, 'a') as f:
for source in bent_sources.find({'RGZ.RGZ_id': {'$nin':completed}, 'RGZ.morphology':morphology}).batch_size(50):
entry = get_cluster_match(source)
if entry is not None:
count += 1
output('%i %s' % (count, source['RGZ']['zooniverse_id']))
bending_15.insert(entry)
print >> f, source['RGZ']['RGZ_id']
# Apply the bending correction
bending_correct(plot=False, update=True, methods=['using_peaks'])
def random_control():
'''
Generate a control sample by randomizing the positions and redshifts of all the sources meeting the radio morphology requirements
'''
# Get the original location values
ras1, ras2, decs1, decs2 = [], [], [], []
for source in bent_sources.find():
ra = source['SDSS']['ra'] if 'SDSS' in source else source['AllWISE']['ra']
dec = source['SDSS']['dec'] if 'SDSS' in source else source['AllWISE']['dec']
if 90 < ra < 290:
ras1.append(ra)
decs1.append(dec)
else:
ras2.append(ra)
decs2.append(dec)
# Shuffle the location values
np.random.shuffle(ras1)
np.random.shuffle(ras2)
np.random.shuffle(decs1)
np.random.shuffle(decs2)
loc = np.vstack([ np.append(ras1,ras2), np.append(decs1,decs2) ]).T
np.random.shuffle(loc)
# Find the bending and cluster results for each source that matches
for ix, source in enumerate(bent_sources.find().batch_size(50)):
# Assign the randomized values
if 'SDSS' in source:
source['SDSS'].update({'ra':loc[ix][0], 'dec':loc[ix][1]})
else:
source['SDSS'] = {'ra':loc[ix][0], 'dec':loc[ix][1]}
entry = get_cluster_match(source)
if entry is not None:
output('%i %s' % (ix, source['RGZ']['zooniverse_id']))
bending_control.insert(entry)
def output(string, fn=logging.info):
'''
Print a string to screen and the logfile
'''
fn(string)
print string
def to_file(filename, coll, params={}):
'''
Print the bending collection to a csv file for analysis
'''
rgz_keys = ['RGZ_id', 'RGZ_name', 'zooniverse_id', 'first_id', 'morphology', 'radio_consensus', 'ir_consensus', 'size_arcmin', 'size_kpc', 'solid_angle', 'overedge']
best_keys = ['ra', 'ra_err', 'dec', 'dec_err', 'redshift', 'redshift_err']
sdss_keys = ['ra', 'dec', 'objID', 'photo_redshift', 'photo_redshift_err', 'spec_redshift', 'spec_redshift_err', 'u', 'u_err', 'g', 'g_err', 'r', 'r_err', 'i', 'i_err', 'z', 'z_err', 'morphological_class', 'spectral_class', 'number_matches']
wise_keys = ['ra', 'dec', 'designation', 'photo_redshift', 'w1mpro', 'w1sigmpro', 'w1snr', 'w2mpro', 'w2sigmpro', 'w2snr', 'w3mpro', 'w3sigmpro', 'w3snr', 'w4mpro', 'w4sigmpro', 'w4snr']
whl_keys = ['name', 'ra', 'dec', 'zphot', 'zspec', 'N500', 'N500sp', 'RL*500', 'M500', 'r500', 'separation_deg', 'separation_Mpc', 'r/r500', 'population', 'position_angle', 'orientation_contour', 'orientation_peaks', 'orientation_folded', 'P', 'P500', 'grad_P', 'alignment']
rm_keys = ['name', 'ra', 'dec', 'zlambda', 'zspec', 'S', 'lambda', 'separation_deg', 'separation_Mpc', 'position_angle', 'orientation_contour', 'orientation_peaks']
amf_keys = ['AMF_id', 'ra', 'dec', 'z', 'r200', 'richness', 'core_radius', 'concentration', 'likelihood', 'separation_deg', 'separation_Mpc', 'position_angle', 'orientation_contour', 'orientation_peaks']
bending_keys = ['pos_angle_0', 'pos_angle_1', 'bending_angle', 'bending_corrected', 'bending_err', 'bending_excess', 'bisector', 'asymmetry'] #, 'tail_deg_0', 'tail_deg_1', 'size_deg', 'tail_kpc_0', 'tail_kpc_1', 'size_kpc', 'ratio_1', 'ratio_0']
all_keys = [rgz_keys, best_keys, sdss_keys, whl_keys, bending_keys, bending_keys]
dict_names = ['RGZ', 'best', 'SDSS', 'WHL', 'using_contour', 'using_peaks']
success = 0
with open(filename, 'w') as f:
header = 'final_sample'
for superkey, key_list in zip(dict_names, all_keys):
for key in key_list:
header += ',%s.%s' % (str(superkey), str(key))
print >> f, header
for entry in coll.find(params):
try:
row = str(entry['final_sample'])
for superkey, key_list in zip(dict_names, all_keys):
for key in key_list:
if superkey in entry and key in entry[superkey]:
if type(entry[superkey][key]) is long or type(entry[superkey][key]) is bson.int64.Int64:
row += ",'%s'" % str(entry[superkey][key])
else:
row += ',%s' % str(entry[superkey][key])
else:
row += ',-99'
print >> f, row
success += 1
except BaseException as e:
output(e, logging.exception)
output('%i/%i successfully printed to %s' % (success, coll.find(params).count(), filename))
def plot_running(x_param, y_param, coll=bending_15, morph=None, pop=None, bin_by=None, bin_count=0, logx=False, logy=False, square=False, bent_cut=0, align=None, combined=False, title=True, dz_cut=None):
'''
Plot the running 1st, 2nd, and 3rd quartiles averaged over window data points
x_param, y_param, and (optional) bin_by need to be 'category.key' to search for coll[category][key]
morph can be 'double' or 'triple' to select only that morphology
pop can be 'BCG', 'non-BCG', 'inner', or 'outer' to select only that population
When selecting 'non-BCG', combined=True will combine the 'inner' and 'outer' sources before smoothing
align can be 'radial' or 'tangential' to select only that alignment
The axes can be plotted on a log scale by specifying logx and logy to True or False
Also select square=True when plotting log-log plots
Data can be binned with bin_by and bin_count
bent_cut applies a minimum corrected bending angle
'''
assert morph in [None, 'double', 'triple'], "morph must be 'double' or 'triple'"
assert pop in [None, 'BCG', 'inner', 'outer', 'separate', 'non-BCG'], "pop must be 'BCG', 'inner', 'outer', 'separate', or 'non-BCG'"
assert align in [None, 'radial', 'tangential'], "align must be 'radial' or 'tangential'"
if bin_by is not None:
assert type(bin_count) is int and bin_count>0, 'bin_count must be positive int'
# Prepare parameters for search
params = total_cuts.copy()
x_param_list = x_param.split('.')
y_param_list = y_param.split('.')
for param in [x_param, y_param]:
if param in params:
params[param]['$exists'] = True
else:
params[param] = {'$exists':True}
if morph is not None:
params['RGZ.morphology'] = morph
if align is not None:
params['WHL.alignment'] = align
if pop == 'separate':
pop_list = ['inner', 'outer', 'BCG']
elif pop == 'non-BCG':
if combined:
pop_list = [{'$ne':'BCG'}]
else:
pop_list = ['inner', 'outer']
else:
pop_list = [pop]
params['using_peaks.bending_corrected']['$gte'] = bent_cut
if dz_cut is not None:
params['WHL.dz'] = {'$gte':-1.*np.abs(dz_cut), '$lte':np.abs(dz_cut)}
# Open the plotting window
fig, ax = plt.subplots()
box = ax.get_position()
# Plot trends for each population of interest
needs_labels = True
windows = set()
if bin_by is not None:
plot_params = {'RGZ.size_arcmin': {'name':"Size (')", 'fmt':'%.2f\n - %.2f', 'width':0.91}, \
'best.redshift': {'name':'Redshift', 'fmt':'%.2f\n - %.2f', 'width':0.89}, \
'WHL.M500': {'name':'Mass', 'fmt':'%.1f\n - %.1f', 'width':0.91}}
bin_by_list = bin_by.split('.')
bins = np.arange(bin_count+1) * 100. / bin_count
vals = []
for i in coll.find(params):
vals.append(i[bin_by_list[0]][bin_by_list[1]])
samples = np.percentile(vals, bins)
ax.plot(0, 0, c='w', label=plot_params[bin_by]['name'])
for pop2 in pop_list:
if pop2 is not None:
params['WHL.population'] = pop2
for i in range(len(samples)-1):
params[bin_by] = {'$gte':samples[i], '$lt':samples[i+1]}
window_size, run_x_50, run_y_50 = get_trends(params, x_param, y_param, coll, True, pop!='BCG')
windows.add(window_size)
if logx:
run_x_50 = np.log10(run_x_50)
if logy:
run_y_50 = np.log10(run_y_50)
if needs_labels:
ax.plot(run_x_50, run_y_50, label=plot_params[bin_by]['fmt'] % (samples[i], samples[i+1]), color='C%i'%i)
ax.set_position([box.x0, box.y0, box.width * plot_params[bin_by]['width'], box.height])
else:
ax.plot(run_x_50, run_y_50, color='C%i'%i)
needs_labels = False
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
else:
for pop2 in pop_list:
if pop2 is not None:
params['WHL.population'] = pop2
window_size, run_x_50, run_y_25, run_y_50, run_y_75 = get_trends(params, x_param, y_param, coll, False, pop!='BCG')
windows.add(window_size)
if logx:
run_x_50 = np.log10(run_x_50)
if logy:
run_y_25, run_y_50, run_y_75 = np.log10(run_y_25), np.log10(run_y_50), np.log10(run_y_75)
if needs_labels:
ax.plot(run_x_50, run_y_50, label='50\%', color='C0')
ax.fill_between(run_x_50, run_y_25, run_y_75, color='C0', alpha=.5)
needs_labels = False
else:
ax.plot(run_x_50, run_y_50, color='C0')
ax.fill_between(run_x_50, run_y_25, run_y_75, color='C0', alpha=.5)
# Make the plot pretty
ax.set_xlabel(get_label(x_param, logx))
ax.set_ylabel(get_label(y_param, logy))
windows_txt = str(min(windows))
if len(windows) > 1:
windows_txt += str(-1*max(windows))
titletxt = '%s%s%ssources (window size: %s)' % (morph+' ' if type(morph) is str else '', align+' ' if type(align) is str else '', pop+' ' if type(pop) is str and pop!='separate' else '', windows_txt)
if title:
ax.set_title('All '+titletxt if titletxt[0:7]=='sources' else titletxt[0].upper()+titletxt[1:])
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
if square:
ax.set_aspect('equal', adjustable='box')
if y_param == 'using_peaks.bending_corrected':
params['WHL.population'] = 'outer'
bend = []
for i in coll.find(params):
bend.append(i['using_peaks']['bending_corrected'])
if logy:
ax.axhline(np.log10(np.median(bend)), ls=':', c='k')
else:
ax.axhline(np.median(bend), ls=':', c='k')
elif y_param == 'using_peaks.bending_excess':
ax.axhline(0, ls=':', c='k')
plt.tight_layout()
def get_trends(params, x_param, y_param, coll, binned, combine_bcg=True):
x_param_list = x_param.split('.')
y_param_list = y_param.split('.')
x, y = [], []
for i in coll.find(params).sort(x_param, 1):
x.append(i[x_param_list[0]][x_param_list[1]])
y.append(i[y_param_list[0]][y_param_list[1]])
x = np.array(x)
y = np.array(y)
window_size = min(len(x)/10, 100)
if 'WHL.population' in params and params['WHL.population'] == 'BCG' and combine_bcg:
run_x_50 = [0.01, 0.011]
run_y_25 = 2*[np.percentile(y, 25)]
run_y_50 = 2*[np.percentile(y, 50)]
run_y_75 = 2*[np.percentile(y, 75)]
else:
run_x_50 = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y_25 = np.array([np.percentile(y[ix:ix+window_size], 25) for ix in np.arange(len(y)-window_size+1)])
run_y_50 = np.array([np.percentile(y[ix:ix+window_size], 50) for ix in np.arange(len(y)-window_size+1)])
run_y_75 = np.array([np.percentile(y[ix:ix+window_size], 75) for ix in np.arange(len(y)-window_size+1)])
if binned:
return window_size, run_x_50, run_y_50
else:
return window_size, run_x_50, run_y_25, run_y_50, run_y_75
def get_label(param, log=False):
if param == 'WHL.r/r500':
label = 'Separation ($r_{500}$)'
elif param == 'WHL.M500':
label = 'Cluster mass ($10^{14}~M_\odot$)'
elif param == 'WHL.P':
label = 'ICM pressure (keV cm$^{-3}$)'
elif 'bending_angle' in param:
label = 'Bending angle (deg)'
elif 'bending_corrected' in param:
label = 'Corrected bending angle (deg)'
elif 'bending_excess' in param:
label = 'Excess bending angle (deg)'
elif 'asymmetry' in param:
label = 'Asymmetry'
elif param == 'RGZ.size_arcmin':
label = 'Size (armin)'
elif param == 'RGZ.size_kpc':
label = 'Size (kpc)'
elif param == 'WHL.grad_P':
label = 'Pressure gradient (keV cm$^{-3}$ kpc$^{-1}$)'
elif param == 'RGZ.luminosity':
label = 'Radio luminosity (W Hz$^{-1}$)'
elif param == 'best.redshift':
label = '$z$'
else:
label = param.replace('_', '\_')
if log:
label = '$\log_{10}$ (' + label.replace('(', '[').replace(')', ']') + ')'
return label
def get_params(param_list, coll=bending_15, morph=None, pop=None):
'''
Returns an array of data containing the values of the specified paramters for all sources in the sample
'''
assert morph in [None, 'double', 'triple'], "morph must be 'double' or 'triple'"
assert pop in [None, 'BCG', 'inner', 'outer'], "pop must be 'BCG', 'inner', or 'outer'"
params = total_cuts.copy()
if morph is not None:
params['RGZ.morphology'] = morph
if pop is not None:
params['WHL.population'] = pop
data = np.zeros([len(param_list), coll.find(params).count()])
for jx, gal in enumerate(coll.find(params)):
for ix, param in enumerate(param_list):
datum = gal[param[0]][param[1]]
if datum=='double':
datum = 2
elif datum=='triple':
datum = 3
elif datum=='BCG':
datum = 0
elif datum=='inner':
datum = 1
elif datum=='outer':
datum = 2
data[ix,jx] = datum
return data
def custom_exp(x, a, b):
return a*np.exp(b*x)
def bending_correct(coll=bending_15, window_size=100, plot=False, update=False, methods=None, comp_err=False):
'''
Apply a pre-determined correction for the angular size dependence to the bending angle
'''
if methods is None:
methods = ['using_peaks', 'using_contour']
elif type(methods) is str:
methods = [methods]
# Repeat for both morphologies and angle-measuring methods separately
for morph in ['double', 'triple']:
for method in methods:
# Print progress
print morph, method
# Collect data from outer region
sizes, angles, separations = [], [], []
params = total_cuts.copy()
del params['using_peaks.bending_corrected']
params['RGZ.morphology'] = morph
params[method+'.bending_angle'] = total_cuts['using_peaks.bending_corrected']
params['WHL.population'] = 'outer'
for gal in bending_15.find(params).sort('RGZ.size_arcmin', 1):
sizes.append(gal['RGZ']['size_arcmin'])
angles.append(gal[method]['bending_angle'])
separations.append(gal['WHL']['r/r500'])
sizes = np.array(sizes)
angles = np.array(angles)
separations = np.array(separations)
# Find running trend
sizes_running = np.array([np.median(sizes[ix:ix+window_size]) for ix in np.arange(len(sizes)-window_size+1)])
angles_running = np.array([np.median(angles[ix:ix+window_size]) for ix in np.arange(len(angles)-window_size+1)])
# Find best fit
med_size = np.median(sizes)
popt, pcov = curve_fit(custom_exp, sizes_running-med_size, angles_running)
perr = np.sqrt(np.diagonal(pcov))
angles_best = custom_exp(sizes-med_size, *popt)
angles_best_running = np.array([np.median(angles_best[ix:ix+window_size]) for ix in np.arange(len(angles_best)-window_size+1)])
# Save fits for comparison
if method == 'using_peaks':
if morph == 'double':
d_x = sizes_running
d_y = angles_running
d_popt = popt
d_err = perr
d_x0 = med_size
else:
t_x = sizes_running
t_y = angles_running
t_popt = popt
t_err = perr
t_x0 = med_size
# Plot fits
if plot:
fig, (ax1, ax2) = plt.subplots(2, sharex=True, gridspec_kw={'height_ratios':[3,1]})
ax1.plot(sizes_running, angles_running, label='Running median')
ax1.plot(sizes_running, angles_best_running, ls='--', label='$%.1fe^{%+.1f(x-%.2f)}$\n$R^2 = %.3f$' % (popt[0], popt[1], med_size, r2_score(angles_running, angles_best_running)))
ax1.legend(loc='upper right')
ax1.set_ylabel('Bending angle (deg)')#('$\\mathrm{%s.bending_angle}$' % method).replace('_', '\_'))
#ax1.set_title('Best fit %s sources' % morph)
ax2.plot(sizes_running, angles_running-angles_best_running, label='residual')
ax2.axhline(0, color='k', lw=1)
ax2.set_xlabel('Size (arcmin)')
ax2.set_ylabel('Residual')
ax1.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
fig.tight_layout()
plt.subplots_adjust(hspace=0.05)
# Insert to Mongo
if update:
for gal in coll.find():
best_angle = custom_exp(gal['RGZ']['size_arcmin']-med_size, *popt)
corr_angle = popt[0] * gal[method]['bending_angle'] / best_angle
coll.update({'_id':gal['_id']}, {'$set': {method+'.bending_corrected':corr_angle}})
get_bending_excess()
# Plot comparison of fits
if plot:
fig, ax = plt.subplots(1)
ax.plot(d_x, d_y, label='Double sources')
d_rms = np.sqrt(sum((d_y-custom_exp(d_x-d_x0, *d_popt))**2)/len(d_x))
ax.fill_between(d_x, custom_exp(d_x-d_x0, *d_popt)+d_rms, custom_exp(d_x-d_x0, *d_popt)-d_rms, color='C0', alpha=.6)#, label='Best fit doubles')
ax.plot(t_x, t_y, ls='--', label='Triple sources')
t_rms = np.sqrt(sum((t_y-custom_exp(t_x-t_x0, *t_popt))**2)/len(t_x))
ax.fill_between(t_x, custom_exp(t_x-t_x0, *t_popt)+t_rms, custom_exp(t_x-t_x0, *t_popt)-t_rms, color='C1', alpha=.6)#, label='Best fit triples')
ax.legend(loc='upper right')
ax.set_xlabel('Size (arcmin)')
ax.set_ylabel('Bending angle (deg)')
fig.tight_layout()
def pca_analysis(param_space, coll=bending_15, morph=None, pop=None):
assert param_space in ['bending', 'WHL']
assert morph in [None, 'double', 'triple'], "morph must be 'double' or 'triple'"
assert pop in [None, 'BCG', 'inner', 'outer', 'non-BCG'], "pop must be 'BCG', 'inner', 'outer', or 'non-BCG'"
if param_space == 'bending':
param_list = np.array([['using_peaks', 'bending_angle'], ['RGZ', 'size_arcmin'], ['RGZ', 'size_kpc'], ['WHL', 'r/r500'], ['WHL', 'M500'], ['WHL', 'orientation_folded'], ['best', 'redshift']])
names = np.array(['log(bending)', 'size_arcmin', 'size_kpc', 'r/r500', 'log(M500)', 'orientation', 'redshift'])
else:
param_list = np.array([['WHL', 'RL*500'], ['WHL', 'r500'], ['WHL', 'M500'], ['WHL', 'zbest']])
names = np.array(['log(RL*500)', 'r500', 'log(M500)', 'redshift'])
# Get data
if pop == 'non-BCG':
data1 = get_params(param_list, coll=coll, morph=morph, pop='inner')
data2 = get_params(param_list, coll=coll, morph=morph, pop='outer')
data = np.hstack([data1, data2.T[data2[3]<=10].T])
else:
data = get_params(param_list, coll=coll, morph=morph, pop=pop)
# Plot bending, mass and separation on log scales
if param_space == 'bending':
for i in [0, 4]:
data[i] = np.log10(data[i])
if pop is None:
data[3] = np.log10(data[3])
else:
for i in [0, 2]:
data[i] = np.log10(data[i])
# Normalize the data
mean = np.mean(data, 1).reshape(-1,1)*np.ones(data.shape)
std = np.std(data, 1).reshape(-1,1)*np.ones(data.shape)
data_normed = (data-mean)/std
# Run PCA
pca = PCA()
output = pca.fit_transform(data_normed.T).T
# Plot the features and PCA outputs
if param_space == 'bending' and pop is None:
names[3] = 'log(r/r500)'
corner(data.T, labels=names)
corner(output.T, labels=['PC%i'%i for i in range(len(names))])
# What the PCs are comprised of, printed as html table
output = np.hstack([names.reshape(-1,1), pca.components_])
print '<table style="width: 100%%;" border="0" width="1887" height="127"><tbody>'
for ix, row in enumerate(output.T):
if ix == 0:
out_str = '<tr><td>PC# (contribution to variance)</td><td>'
else:
out_str = '<tr><td>PC%i (%.3f)</td><td>' % (ix-1, pca.explained_variance_ratio_[ix-1])
for val in row:
try:
fval = float(val)
out_str += '%.2f</td><td>' % fval
except ValueError:
out_str += val + '</td><td>'
print out_str[:-4] + '</tr>'
print '</tbody></table>'
def make_corner_plot():
# Get data
param_list = np.array([['using_peaks', 'bending_angle'], ['RGZ', 'size_arcmin'], ['WHL', 'r/r500'], ['WHL', 'M500'], ['WHL', 'orientation_folded'], ['best', 'redshift']])
data = get_params(param_list, coll=bending_15)
# Discard BCGs
#data = data[:, data[5]!=0][:5]
# Check what outlier cuts need to be made
labels = np.array(['Bending (deg)', 'Size (arcmin)', 'Separation\n(r/r500)', 'Cluster mass\n(10^14 M_sun)', 'Redshift'])
corner(data.T, labels=labels)
# Apply readability cuts
bending_cut = data[0]<60
separation_cut = data[2]<10
mass_cut = data[3]<24
data = data[:, np.logical_and.reduce((bending_cut, separation_cut, mass_cut))]
corner(data.T, labels=labels)
plt.tight_layout()
def sample_numbers():
param_list = [{'RGZ.morphology':'double', 'RGZ.duplicate':0, 'best.redshift':{'$exists':True}}, \
{'RGZ.morphology':'double', 'RGZ.duplicate':0, 'best.redshift':{'$exists':True}}, \
{'RGZ.morphology':'double', 'RGZ.duplicate':0, 'RGZ.radio_consensus':{'$gte':.65}, 'RGZ.size_arcmin':{'$lte':1.5}, 'RGZ.overedge':0, 'using_peaks.bending_angle':{'$lte':135.}, 'best.redshift':{'$gte':0.02, '$lte':0.8}}, \
{'RGZ.morphology':'double', 'RGZ.duplicate':0, 'RGZ.radio_consensus':{'$gte':.65}, 'RGZ.size_arcmin':{'$lte':1.5}, 'RGZ.overedge':0, 'using_peaks.bending_angle':{'$lte':135.}, 'best.redshift':{'$gte':0.02, '$lte':0.8}}, \
{'RGZ.morphology':'double', 'RGZ.duplicate':0, 'RGZ.radio_consensus':{'$gte':.65}, 'RGZ.size_arcmin':{'$lte':1.5}, 'RGZ.overedge':0, 'using_peaks.bending_corrected':{'$lte':135.}, 'best.redshift':{'$gte':0.02, '$lte':0.8}}, \
{'RGZ.morphology':'double', 'RGZ.duplicate':0, 'RGZ.radio_consensus':{'$gte':.65}, 'RGZ.size_arcmin':{'$lte':1.5}, 'RGZ.overedge':0, 'using_peaks.bending_corrected':{'$lte':135.}, 'best.redshift':{'$gte':0.02, '$lte':0.8}}]
coll_list = [bent_sources, bending_15, bent_sources, bending_15, bent_sources, bending_15]
label_list = ['No cuts', 'Matched', 'Initial cuts (all)', 'Initial cuts (matched)', 'Final (all)', 'Final (matched)']
for params, coll, label in zip(param_list, coll_list, label_list):
double = coll.find(params).count()
params['RGZ.morphology'] = 'triple'
triple = coll.find(params).count()
print label, '(double, triple, total):', double, triple, double+triple
def contamination():
z = []
dz = []
with open('/home/garon/Documents/RGZdata/bending/dz_whl_with_z.csv', 'r') as f:
r = csv.reader(f)
r.next()
for row in r:
z.append(float(row[0]))
dz.append(float(row[1]))
z = np.array(z)
dz = np.array(dz)
dz_norm = dz/(1+z)
mask = np.abs(dz_norm)<0.04
comp = 0.982 # Completeness fraction using 0.04 cut
cont = 0.215 # Contamination fraction using 0.04 cut
f, ax = plt.subplots(1)
n, bins, patches = ax.hist(dz_norm[np.abs(dz_norm)<=.1], 30)
ax.axvline(0.04, c='r', lw=2, label=r'$\Delta z$ threshold')
ax.axvline(-0.04, c='r', lw=2)
ax.axhline(np.median(n[(np.abs(bins)>0.04)[:-1]]), c='k', lw=2, ls=':', label='Background')
#ax.plot(0, 0, c='w', label='Completeness: %.3f\nContamination: %.3f' % (comp, cont))
ax.legend()
ax.set_xlim(-0.1, 0.1)
ax.set_xlabel('$\Delta z$')
ax.set_ylabel('Count')
#plt.title('Full RGZ-WH15 cross-match')
plt.tight_layout()
sep, n, bins = r500_hist(bending_15, total_cuts, 20)
mean_cont = cont * sum(n) / (np.pi*np.square(bins[-1])) # Number of contaminating sources per square r500
bin_area = np.pi * np.array( [np.square(bins[0])] + [np.square(bins[i+1])-np.square(bins[i]) for i in np.arange(len(bins)-1)] )
cont_per_bin = mean_cont * bin_area
fractional_cont = cont_per_bin / n
fc = interp1d(bins, fractional_cont, kind='slinear')
print '%.3f%% at x=%.2f' % (fc(1.5), 1.5)
#for i in np.logspace(-1, 2, 20):
# xx = brentq(lambda x: fc(x)-i/100., bins[0], bins[-1])
# print '%.1f%% at x=%.2f' % (i,xx)
'''f, ax = plt.subplots(1)
density = n/bin_area
err = np.sqrt(n)/bin_area
ax.errorbar(bins, density, yerr=err, fmt='o', ms=4, label='Source density')
logx = np.log10(bins[1:-3])
logy = np.log10(density[1:-3])
logyerr = err[1:-3] / density[1:-3]
fitfunc = lambda p, x: p[0]*x + p[1]
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
out = leastsq(errfunc, [-1,1], args=(logx, logy, logyerr), full_output=1)
index, amp = out[0]
index_err = np.sqrt(out[1][1][1])
amp_err = np.sqrt(out[1][0][0])*amp
ax.plot(bins, pow(10, index*np.log10(bins)+amp), c='k', label='$\sim(r/r_{500})^{%.2f\pm%.2f}$'%(index,index_err))
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend(bbox_to_anchor=(1, 1))
ax.set_xlabel('Separation ($r_{500}$)')
ax.set_ylabel('Count / area of annulus')
#ax.set_title('Count density vs. separation')
ax.set_aspect('equal', adjustable='box')
plt.tight_layout()'''
f, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(np.log10(bins), np.log10(n), label='Observed')
ax1.plot(np.log10(bins), np.log10(cont_per_bin), ls='--', label='Contamination')
ax1.legend(loc='lower right')
ax1.set_ylim(-2.24, 3.24)
ax1.set_yticks(np.arange(-2,4))
ax1.set_ylabel('$\log_{10}$ (Count)')
#ax1.set_title('Contamination')
ax2.plot(np.log10(bins), np.log10(fractional_cont), c='C2', label='Contamination\nfraction')
ax2.legend(loc='lower right')
ax2.axhline(0, ls=':', c='k')
ax2.set_xlabel(get_label('WHL.r/r500', True))
ax2.set_ylabel('$\log_{10}$ (Fraction)')
plt.tight_layout()
def r500_hist(coll, params, bin_count=20):
fig, ax = plt.subplots(1)
sep = []
for i in coll.find(params):
sep.append(i['WHL']['r/r500'])
sep = np.array(sep)
min_sep = min(np.log10(sep))
sep = np.clip(sep, .01, None) # Combine everything less than 0.01 into one bin
n, bins, patches = ax.hist(np.log10(sep), bins=bin_count)
bins0 = bins[0]
bins[0] = min_sep
n, bins, patches = ax.hist(sep, bins=pow(10,bins))
ax.set_xscale('log')
bins[0] = pow(10,bins0)
return sep, n, (bins[:-1]+bins[1:])/2.
def orientation(bent_cutoff=None, folded=True, r_min=0.01, r_max=10):
if bent_cutoff is None:
bent_cutoff = get_bent_cut()
print 'Bending excess cutoff: %.2f' % bent_cutoff
sep = []
bend = []
ori = []
for i in bending_15.find(total_cuts):
sep.append(i['WHL']['r/r500'])
bend.append(i['using_peaks']['bending_excess'])
if folded:
ori.append(i['WHL']['orientation_folded'])
else:
ori.append(i['WHL']['orientation_peaks'])
sep = np.array(sep)
bend = np.array(bend)
ori = np.array(ori)
inner = np.logical_and(0.01<sep, sep<1.5)
outer = np.logical_and(1.5<sep, sep<10)
seps = np.vstack([inner, outer])
print sum(inner), 'inner sources,', sum(outer), 'outer sources'
straight = bend<bent_cutoff
bent = bend>=bent_cutoff
bends = np.vstack([bent, straight])
print sum(straight), 'straight sources,', sum(bent), 'bent sources'
if folded:
max_ori = 90.
else:
max_ori = 180.
f, ax = plt.subplots(1)
#ax.set_title('Orientation distribution ($%.2f<r/r_{500}<%.2f$; $\Delta\\theta<%.0f$ deg)' % (r_min, r_max, excess_cutoff))
data = ori[np.logical_and(straight, np.logical_and(sep>r_min, sep<r_max))]
ad = ad_test(data, stats.uniform(0,max_ori))
print ad.pvalue, z_score(ad.pvalue), len(data)
ax.hist(data, bins=6, fill=False, hatch='//', label='$n=%i;~p=%.3f$'%(len(data), ad.pvalue) )
ax.set_ylim(0, 358)
ax.legend(loc='upper center')
ax.set_ylabel('Count')
ax.set_xlabel('Orientation angle (deg)')
plt.tight_layout()
f, ax = plt.subplots(1)
#ax.set_title('Orientation distribution ($%.2f<r/r_{500}<%.2f$; $\Delta\\theta>%.0f$ deg)' % (r_min, r_max, excess_cutoff))
data = ori[np.logical_and(bent, np.logical_and(sep>r_min, sep<r_max))]
if folded:
ad = ad_test(data, stats.uniform(0,max_ori))
print ad.pvalue, z_score(ad.pvalue), len(data)
else:
towards = data[data<90]
away = data[data>90]
sig = stats.anderson_ksamp([towards, max_ori-away]).significance_level
print 'Symmetric: p=%.4f' % sig
n, bins, _ = ax.hist(data, bins=6, fill=False, hatch='//', label='$n=%i;~p=%.3f$'%(len(data), ad.pvalue if folded else sig) )
ax.set_ylim(0, 75)
ax.legend(loc='upper center')
ax.set_ylabel('Count')
ax.set_xlabel('Orientation angle (deg)')
plt.tight_layout()
unfolded, sep = [], []
params = total_cuts.copy()
params['using_peaks.bending_excess'] = {'$gte': bent_cutoff}
params['WHL.r/r500'] = {'$gte': r_min, '$lte': r_max}
for source in bending_15.find(params):
unfolded.append(source['using_peaks']['bisector'] - source['WHL']['position_angle'])
sep.append(source['WHL']['r/r500'])
sep = np.array(sep)
unfolded = np.array(unfolded)
tangential = np.sin(unfolded*np.pi/180)
radial = np.cos(unfolded*np.pi/180)
beta = 1 - np.var(tangential) / np.var(radial)
print 'beta = %.2f +- %.2f' % (beta, beta*np.sqrt(2./(len(unfolded)-1)))
return None
y, yerr = [], []
for i in np.arange(int(max_ori/2.)):
a = sum(data<i)
b = sum(data>max_ori-i)
y.append(a-b)
yerr.append(np.sqrt(a+b))
f, ax = plt.subplots(1)
ax.errorbar(np.arange(int(max_ori/2.)), y, yerr=yerr)
ax.axhline(0)
ax.set_xlabel('Orientation cut')
ax.set_ylabel('Count')
ax.set_title('Excess of inward-moving sources')
mask = np.logical_and(bent, np.logical_and(sep>0.01, sep<15))
n = sum(mask)
bin_count = 6
bins = int(1.*n/bin_count) * np.arange(bin_count+1)
bins[-1] = -1
sep_sort = np.sort(sep[mask])
ori_sort = ori[mask][np.argsort(sep[mask])]
x, diff, err = [], [], []
for i,j in zip(bins[:-1],bins[1:]):
inward = sum(ori_sort[i:j]<30)
outward = sum(ori_sort[i:j]>60)
x.append(np.median(sep_sort[i:j]))
diff.append(inward-outward)
err.append(np.sqrt(inward+outward))
f, ax = plt.subplots(1)
ax.errorbar(x, diff, err)
ax.axhline(0, c='k', ls='dotted')
ax.set_xscale('log')
ax.set_xlabel('Separation ($r_{500}$)')
ax.set_ylabel('Count')
ax.set_title('Excess of radially-moving sources')
def orientation_test():
excess_cutoff = get_bent_cut()
r_min, r_max = 0.01, 10
sep, bend, folded, unfolded = [], [], [], []
for i in bending_15.find(total_cuts):
sep.append(i['WHL']['r/r500'])
bend.append(i['using_peaks']['bending_excess'])
folded.append(i['WHL']['orientation_folded'])
unfolded.append(i['WHL']['orientation_peaks'])
sep = np.array(sep)
bend = np.array(bend)
folded = np.array(folded)[np.logical_and(bend>excess_cutoff, np.logical_and(sep>r_min, sep<r_max))]
unfolded = np.array(unfolded)[np.logical_and(bend>excess_cutoff, np.logical_and(sep>r_min, sep<r_max))]
n_f, bins_f, _ = plt.hist(folded, bins=6, alpha=.8, normed=True, fill=False, edgecolor='r', label='Folded')
plt.figure()
n_u, bins_u, _ = plt.hist(unfolded, bins=12, alpha=.8, normed=True, fill=False, hatch='//', label='Unfolded')
# model 1: count goes to 0 at 180 deg
class quad(stats.rv_continuous):
def _argcheck(self, a, b, c):
return np.isfinite(a) and np.isfinite(b) and np.isfinite(c)
def _pdf(self, x, a, b, c):
x0 = 180.
norm = a*x0**3/3. + b*x0**2/2. + c*x0
if type(x) is float:
return max(a*x**2 + b*x + c, 0) / norm
elif type(x) is np.ndarray:
return np.max([a*x**2 + b*x + c, np.zeros(len(x))], axis=0) / norm
else:
raise TypeError('Got %s instead' % str(type(x)))
a, b, c = n_f[0], n_f[-1]/2., 0
popt = np.polyfit([0,90,180], [a,b,c], 2)
case1 = quad(a=0, b=180, shapes='a, b, c')(*popt)
ad1 = ad_test(unfolded, case1)
print 'Model 1: p=%.2g (%.2f sigma)' % (ad1.pvalue, z_score(ad1.pvalue))
# model 2: count plateaus at 90 deg
class piecewise_lin(stats.rv_continuous):
def _pdf(self, x, a, b, c):
norm = 45.*a + 135.*c
m = (c-a) / 90.
if type(x) is float:
return (c + m*min([x-90, 0])) / norm
elif type(x) is np.ndarray:
return (c + m*np.min([x-90, np.zeros(len(x))], axis=0)) / norm
else:
raise TypeError('Got %s instead' % str(type(x)))
a, b, c = n_f[0]-n_f[-1]/2., n_f[-1]/2., n_f[-1]/2.
case2 = piecewise_lin(a=0, b=180, shapes='a, b, c')(a, b, c)
ad2 = ad_test(unfolded, case2)
print 'Model 1: p=%.2g (%.2f sigma)' % (ad2.pvalue, z_score(ad2.pvalue))
x = np.arange(181)
plt.plot(x, case1.pdf(x), c='C0', label='Model 1')
plt.plot(x, case2.pdf(x), c='C1', ls=':', label='Model 2')
plt.legend()
plt.ylabel('Normalized count')
plt.xlabel('Orientation angle (deg)')
plt.tight_layout()
def size_dependence():
params0, params1 = total_cuts.copy(), total_cuts.copy()
params0['WHL.r/r500'] = {'$gte':0.01, '$lt':1.5}
params1['WHL.r/r500'] = {'$gte':1.5, '$lt':10.}
# Get trends
window_size, run_x_50, run_y_25, size0, run_y_75 = get_trends(params0, 'WHL.r/r500', 'RGZ.size_arcmin', bending_15, False)
sep0 = (run_x_50-run_x_50[0])/(run_x_50[-1]-run_x_50[0])
window_size, run_x_50, run_y_25, size1, run_y_75 = get_trends(params1, 'WHL.r/r500', 'RGZ.size_arcmin', bending_15, False)
sep1 = (run_x_50-run_x_50[0])/(run_x_50[-1]-run_x_50[0])
# Downsample size1 to same length as size0
mask = (np.linspace(0,1,len(size0)) * (len(size1)-1)).astype(int)
# Get values
size0, size1 = [], []
for i in bending_15.find(params0):
size0.append(i['RGZ']['size_arcmin'])
for i in bending_15.find(params1):
size1.append(i['RGZ']['size_arcmin'])
# AD test the values
print 'Different separations:', stats.anderson_ksamp([size0, size1])
# Repeat for masses
params0, params1 = total_cuts.copy(), total_cuts.copy()
params0['WHL.M500'] = {'$lte':10}
params1['WHL.M500'] = {'$gte':15}
size0, size1 = [], []
for i in bending_15.find(params0):
size0.append(i['RGZ']['size_arcmin'])
for i in bending_15.find(params1):
size1.append(i['RGZ']['size_arcmin'])
# AD test the values
print 'Different masses:', stats.anderson_ksamp([size0, size1])
def trend_tests():
# Get trends for mass bins
params0, params1 = total_cuts.copy(), total_cuts.copy()
params0['WHL.r/r500'] = {'$lte':7.8}
params1['WHL.r/r500'] = {'$gte':7.8}
window_size, run_x_50, run_y_25, size0, run_y_75 = get_trends(params0, 'WHL.r/r500', 'RGZ.size_arcmin', bending_15, False)
sep0 = (run_x_50-run_x_50[0])/(run_x_50[-1]-run_x_50[0])
window_size, run_x_50, run_y_25, size1, run_y_75 = get_trends(params1, 'WHL.r/r500', 'RGZ.size_arcmin', bending_15, False)
sep1 = (run_x_50-run_x_50[0])/(run_x_50[-1]-run_x_50[0])
print stats.mannwhitneyu(size0, size1, alternative='two-sided')
def get_errs(get_errs=False):
first_err = np.sqrt(0.3**2+0.02**2) # https://arxiv.org/pdf/1501.01555.pdf
for source in bending_15.find().batch_size(100):
# Positional errors
if get_errs:
if 'SDSS' in source:
sql = 'select raerr, decerr from photoprimary where objid=%i' % source['SDSS']['objID']
df = SDSS_select(sql)
ra_err = df['raerr'][0]
dec_err = df['decerr'][0]
else:
ir_pos = coord.SkyCoord(source['AllWISE']['ra'], source['AllWISE']['dec'], unit=(u.deg,u.deg), frame='icrs')
table = Irsa.query_region(ir_pos, catalog='allwise_p3as_psd', radius=1.*u.arcsec)
ra_err = table['sigra'][0]
dec_err = table['sigdec'][0]
else:
ra_err = source['best']['ra_err']
dec_err = source['best']['dec_err']
pos_err = np.sqrt(ra_err**2 + dec_err**2 + first_err**2)
# Morphology errors
area = 0
for comp in source['RGZ']['components']:
area += comp['solid_angle']
# Total errors
size = 60.* source['RGZ']['size_arcmin']
frac_pos_err = pos_err / size / 4. # fractional pos error
morph_err = area / size**2 * 180. / np.pi # total morph error in deg
frac_morph_err = morph_err / source['using_peaks']['bending_angle']
total_err = np.sqrt(frac_pos_err**2 + frac_morph_err**2)
bend_err = total_err * source['using_peaks']['bending_angle']
bending_15.update({'_id':source['_id']}, {'$set': {'best.ra_err':ra_err, 'best.dec_err':dec_err, 'best.frac_positional_err':frac_pos_err, 'RGZ.solid_angle':area, 'RGZ.frac_morphology_err':frac_morph_err, 'using_peaks.bending_frac_err':total_err, 'using_peaks.bending_err':bend_err}})
window, size, area_25, area_50, area_75 = get_trends(total_cuts, 'RGZ.size_arcmin', 'RGZ.solid_angle', bending_15, False)
m, b = np.polyfit(size, area_50, 1)
y = m*size+b
#plt.plot(size, area_50, label='Running median')
#plt.plot(size, y, label='%.0fx%+.0f\nR^2=%.3f' % (m, b, r2_score(area_50,y)))
#plt.legend()
def rmsd(params=total_cuts.copy(), x_param='RGZ.size_arcmin', y_param='bending_angle', plot=True, coll=bending_15):
x_param_list = x_param.split('.')
y0_param_list = ['using_peaks', y_param]
y1_param_list = ['using_contour', y_param]
x, y0, y1 = [], [], []
for i in coll.find(params).sort(x_param, 1):
x.append(i[x_param_list[0]][x_param_list[1]])
y0.append(i[y0_param_list[0]][y0_param_list[1]])
y1.append(i[y1_param_list[0]][y1_param_list[1]])
x = np.array(x)
y0 = np.array(y0)
y1 = np.array(y1)
window_size = min(len(x)/10, 100)
if 'WHL.population' in params and params['WHL.population'] == 'BCG':
run_x = [0.01, 0.011]
run_y0 = 2*[np.percentile(y0, 50)]
run_y1 = 2*[np.percentile(y1, 50)]
run_rmsd = 2*[np.sqrt(sum((y0-y1)**2)/len(x))]
else:
run_x = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y0 = np.array([np.percentile(y0[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y1 = np.array([np.percentile(y1[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_rmsd = np.array([np.sqrt(sum((y0[ix:ix+window_size]-y1[ix:ix+window_size])**2)/window_size) for ix in np.arange(len(x)-window_size+1)])
if plot:
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(run_x, run_y0, label='Peak method')
ax1.plot(run_x, run_y1, label='Contour method')
ax1.legend(loc='best')
ax1.set_ylabel(get_label(y_param))
#ax1.set_title('%s comparison' % y_param)
ax2.plot(run_x, run_rmsd)
ax2.set_xlabel(get_label(x_param))
ax2.set_ylabel('RMS difference')
fig.tight_layout()
fig, ax = plt.subplots(1)
ax.plot(run_x, run_y0, label='Bending angle (peak method)')
#a, b = np.polyfit(run_x[run_x<1.05], run_rmsd[run_x<1.05], 1)
#ax.plot(run_x, a*run_x+b, label='rms difference linear fit')
ax.plot(run_x, run_rmsd, label='RMS difference', ls='--')
ax.legend(loc='best')
ax.set_xlabel(get_label(x_param))
ax.set_ylabel('Angle (deg)')
#ax.set_title('Bending error comparison')
fig.tight_layout()
return window_size, run_x, run_y0, run_y1, run_rmsd
def rmsd_debug():
params = total_cuts.copy()
params['using_peaks.bending_angle'] = params['using_peaks.bending_corrected']
del params['using_peaks.bending_corrected']
x_param = 'RGZ.size_arcmin'
y_param = 'bending_angle'
x_param_list = x_param.split('.')
y0_param_list = ['using_peaks', y_param]
y1_param_list = ['using_contour', y_param]
x, y0, y1, zid = [], [], [], []
for i in bending_15.find(params).sort(x_param, 1):
x.append(i[x_param_list[0]][x_param_list[1]])
y0.append(i[y0_param_list[0]][y0_param_list[1]])
y1.append(i[y1_param_list[0]][y1_param_list[1]])
zid.append(i['RGZ']['zooniverse_id'])
x = np.array(x)
y0 = np.array(y0)
y1 = np.array(y1)
window_size = min(len(x)/10, 100)
print 'Original sample:', len(x)
run_x = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y0 = np.array([np.percentile(y0[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y1 = np.array([np.percentile(y1[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_rmsd = np.array([np.sqrt(sum((y0[ix:ix+window_size]-y1[ix:ix+window_size])**2)/window_size) for ix in np.arange(len(x)-window_size+1)])
fig, ax = plt.subplots(1)
plt.scatter(x, y0, s=1, label='using peaks')
plt.scatter(x, y1, s=1, label='using contour')
plt.plot(run_x, run_rmsd, c='k', label='rmsd')
plt.xlabel(get_label(x_param))
plt.ylabel(get_label('%s.%s' % tuple(y0_param_list)))
'''outlier = np.logical_and(np.logical_and(x>1.17, x<1.19), np.logical_and(y1>105, y1<109))
x = x[np.logical_not(outlier)]
y1 = y1[np.logical_not(outlier)]
print np.where(outlier, zid, False)[outlier][0]
run_x = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y0 = np.array([np.percentile(y0[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y1 = np.array([np.percentile(y1[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_rmsd = np.array([np.sqrt(sum((y0[ix:ix+window_size]-y1[ix:ix+window_size])**2)/window_size) for ix in np.arange(len(x)-window_size+1)])
plt.plot(run_x, run_rmsd, c='g', label='outlier\nremoved')
ell = matplotlib.patches.Ellipse(xy=(1.18,107), width=0.06, height=13, fill=False, color='r', label='outlier')
ax.add_artist(ell)'''
logdy = np.log10(np.abs(y0-y1))
mask = logdy < 3*np.std(logdy)
outliers = np.logical_not(mask)
plt.scatter(x[outliers], y0[outliers], c='g', label='outliers', s=1)
plt.scatter(x[outliers], y1[outliers], c='g', s=1)
x = x[mask]
y0 = y0[mask]
y1 = y1[mask]
print 'Outliers:', sum(outliers)
run_x = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y0 = np.array([np.percentile(y0[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y1 = np.array([np.percentile(y1[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_rmsd = np.array([np.sqrt(sum((y0[ix:ix+window_size]-y1[ix:ix+window_size])**2)/window_size) for ix in np.arange(len(x)-window_size+1)])
plt.plot(run_x, run_rmsd, c='g', label='$<3\sigma$')
plt.legend()
def is_outlier(points, thresh=3.5):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
Parameters:
-----------
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
mask : A numobservations-length boolean array.
References:
----------
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
"""
if len(points.shape) == 1:
points = points[:,None]
median = np.median(points, axis=0)
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def z_score(p_vals, tail='two-sided'):
assert tail in ['one-sided', 'two-sided'], 'tail must be one-sided or two-sided'
if tail == 'one-sided':
return stats.norm.ppf(1-p_vals)
elif tail == 'two-sided':
return stats.norm.ppf(1-p_vals/2.)
def mass_comp(region):
params = total_cuts.copy()
params['WHL.population'] = region
mass = []
bend = []
for i in bending_15.find(params):
mass.append(i['WHL']['M500'])
bend.append(i['using_peaks']['bending_excess'])
mass = np.array(mass)
bend = np.array(bend)
rho = stats.spearmanr(mass, bend)
print 'p = ', rho.pvalue
print z_score(rho.pvalue), 'sigma'
w, run_mass, run_bend = get_trends(params, 'WHL.M500', 'using_peaks.bending_excess', bending_15, True, False)
popt = np.polyfit(run_mass, run_bend, 1)
lower, upper = 5, 20
print 'Rise of %.2f deg between %i and %i x 10^14 M_sun' % (popt[0]*(upper-lower), lower, upper)
return None
high = mass>12
low = mass<12
print sum(high), 'high mass sources,', sum(low), 'low mass sources'
print 'bending difference:', np.median(bend[high])-np.median(bend[low])
ad = stats.anderson_ksamp([bend[high], bend[low]])
print 'p =', ad.significance_level
print z_score(ad.significance_level), 'sigma'
plt.hist(bend[high], normed=True, alpha=.8, bins=11*(np.arange(13)-1), label='$M_{500} > 1.2\\times 10^{15}~M_\\odot}$')
plt.hist(bend[low], normed=True, alpha=.8, bins=11*(np.arange(13)-1), label='$M_{500} < 1.2\\times 10^{15}~M_\\odot}$')
plt.plot(0, 0, color='w', label='$p=%f$'%ad.significance_level)
plt.legend()
plt.xlabel('Excess bending angle (deg)')
plt.ylabel('Normalized count')
def mass_pop_comp():
bent_cut = get_bent_cut()
bent = []
straight = []
for i in bending_15.find(total_cuts):
if i['using_peaks']['bending_excess']>bent_cut:
bent.append(i['WHL']['M500'])
else:
straight.append(i['WHL']['M500'])
ad = stats.anderson_ksamp([bent, straight])
print 'p =', ad.significance_level
print z_score(ad.significance_level), 'sigma'
def bcg_comp():
sep = []
bend = []
for i in bending_15.find(total_cuts.copy()):
sep.append(i['WHL']['r/r500'])
bend.append(i['using_peaks']['bending_excess'])
sep = np.array(sep)
bend = np.array(bend)
bcg = sep<0.01
outer = np.logical_and(sep>1.5, sep<10)
print sum(bcg), 'BCGs,', sum(outer), 'outer sources'
ad = stats.anderson_ksamp([bend[bcg], bend[outer]])
print 'p =', ad.significance_level
print z_score(ad.significance_level), 'sigma'
plt.hist(bend[bcg], normed=True, alpha=.8, bins=10*np.arange(15), label='$r/r_{500} < 0.01$')
plt.hist(bend[outer], normed=True, alpha=.8, bins=10*np.arange(15), label='$1.5 < r/r_{500} < 10$')
plt.plot(0, 0, color='w', label='p=%.5f'%ad.significance_level)
plt.legend()
plt.xlabel('Excess bending angle (deg)')
plt.ylabel('Normalized count')
def asym_comp(align='radial', update_cut=None):
if update_cut is not None:
get_asymmetry(cut)
params = total_cuts.copy()
params['WHL.alignment'] = align
params['WHL.population'] = 'inner'
print 'Alignment:', align
sep = []
asym = []
p = []
grad_p = []
for i in bending_15.find(params):
sep.append(i['WHL']['r/r500'])
asym.append(i['using_contour']['asymmetry'])
p.append(i['WHL']['P'])
grad_p.append(i['WHL']['grad_P'])
sep = np.array(sep)
asym = np.array(asym)
p = np.array(p)
grad_p = np.array(grad_p)
print 'n:', len(sep)
popt, pcov = curve_fit(lambda x, a, b: a*x+b, sep, asym)
perr = np.sqrt(np.diag(pcov))
print 'slope:', popt[0], '+-', perr[0]
s = stats.spearmanr(sep, asym)
print 'r/r500:', s.pvalue, z_score(s.pvalue)
s = stats.spearmanr(p, asym)
#s = stats.spearmanr(p[p>5e-4)], asym[p>5e-4)])
print 'P:', s.pvalue, z_score(s.pvalue)
s = stats.spearmanr(grad_p, asym)
print 'grad_P:', s.pvalue, z_score(s.pvalue)
def excess_comp():
params = total_cuts.copy()
params['WHL.population'] = 'inner'
sep = []
bend = []
for i in bending_15.find(params):
sep.append(i['WHL']['r/r500'])
bend.append(i['using_peaks']['bending_excess'])
sep = np.array(sep)
bend = np.array(bend)
s = stats.spearmanr(sep, bend)
print s, z_score(s.pvalue)
def bend_corr_comp():
params = total_cuts.copy()
params['WHL.population'] = 'outer'
morph = []
bend = []
size = []
for source in bending_15.find(params):
morph.append(2 if source['RGZ']['morphology']=='double' else 3)
bend.append(source['using_peaks']['bending_angle'])
size.append(source['RGZ']['size_arcmin'])
morph = np.array(morph)
bend = np.array(bend)
size = np.array(size)
ad = stats.anderson_ksamp([bend[morph==2], bend[morph==3]])
print 'p =', ad.significance_level
print z_score(ad.significance_level), 'sigma'
plt.hist(bend[morph==2], normed=True, alpha=0.8, label='double', bins=20)
plt.hist(bend[morph==3], normed=True, alpha=0.8, label='triple', bins=20)
plt.xlabel(get_label('using_peaks.bending_angle'))
plt.ylabel('Normalized count')
plt.legend(loc='upper right')
min_size = max(min(size[morph==2]), min(size[morph==3]))
max_size = min(max(size[morph==2]), max(size[morph==3]))
x = np.random.uniform(min_size, max_size, 1000)
f1 = lambda x: 9.7*np.exp(-1.5*(x-0.54))
f2 = lambda x: 7.6*np.exp(-1.3*(x-0.77))
def excess(bending, baseline):
diff = bending**2 - baseline**2
return np.sign(diff) * np.sqrt(np.abs(diff))
def get_median_bend(params=total_cuts.copy()):
params['WHL.population'] = 'outer'
bend = []
for i in bending_15.find(params):
bend.append(i['using_peaks']['bending_corrected'])
return np.median(bend)
def get_bending_excess(params=total_cuts.copy()):
median_bend = get_median_bend(params)
for i in bending_15.find(params):
bend = i['using_peaks']['bending_corrected']
bending_15.update({'_id':i['_id']}, {'$set':{'using_peaks.bending_excess':excess(bend, median_bend)}})
for i in bent_sources.find(params):
bend = i['using_peaks']['bending_corrected']
bent_sources.update({'_id':i['_id']}, {'$set':{'using_peaks.bending_excess':excess(bend, median_bend)}})
def get_asymmetry(cut, coll=bending_15, params=total_cuts.copy()):
for method in ['using_peaks', 'using_contour']:
print method
if 'WHL' in coll.find_one(params):
for i in coll.find(params):
angle_c = coord.Angle(i['WHL']['position_angle']*u.deg)
angle_0 = coord.Angle(i[method]['pos_angle_0']*u.deg)
angle_1 = coord.Angle(i[method]['pos_angle_1']*u.deg)
diff_0 = min( (angle_c-angle_0).wrap_at('360d'), (angle_0-angle_c).wrap_at('360d') )
diff_1 = min( (angle_c-angle_1).wrap_at('360d'), (angle_1-angle_c).wrap_at('360d') )
if diff_0 < diff_1:
inner = i[method]['tail_deg_0']
outer = i[method]['tail_deg_1']
else:
inner = i[method]['tail_deg_1']
outer = i[method]['tail_deg_0']
if (diff_0.degree<cut and diff_1.degree>180-cut) or (diff_1.degree<cut and diff_0.degree>180-cut):
alignment = 'radial'
elif cut<diff_0.degree and diff_0.degree<180-cut and cut<diff_1.degree and diff_1.degree<180-cut:
alignment = 'tangential'
else:
alignment = 'other'
if method == 'using_contour':
coll.update({'_id':i['_id']}, {'$set':{method+'.asymmetry':inner/outer, 'WHL.alignment':alignment}})
else:
coll.update({'_id':i['_id']}, {'$set':{method+'.asymmetry':inner/outer}})
else:
for i in coll.find(params):
if np.random.randint(0,2):
inner = i[method]['tail_deg_0']
outer = i[method]['tail_deg_1']
else:
inner = i[method]['tail_deg_1']
outer = i[method]['tail_deg_0']
coll.update({'_id':i['_id']}, {'$set':{method+'.asymmetry':inner/outer}})
def pressure_calc():
'''Calculates the pressure (in keV/cm^3) using Arnaud et al. 2010'''
h70 = float(cosmo.H(0) / (70.*u.km/u.Mpc/u.s) )
alphaP = 1./0.561 - 5./3.
P0 = 8.403*pow(h70,-1.5)
c500, gamma, alpha, beta = 1.177, 0.3081, 1.0510, 5.4905
h = lambda z: float(cosmo.H(z) / cosmo.H(0))
P500 = lambda z, M500: 1.65e-3 * pow(h(z),8./3.) * pow(M500*h70/3.,2./3.) * pow(h70,2.)
pp = lambda x: P0 * pow(c500*x,-1*gamma) * pow(1+pow(c500*x,alpha),(gamma-beta)/alpha)
alphaP_prime = lambda x: 0.10 - (alphaP+0.10) * pow(2*x,3) / (1+pow(2*x,3))
P = lambda x, z, M500: P500(z,M500) * pp(x) * pow(M500*h70/3.,alphaP+alphaP_prime(x))
for source in bending_15.find():
x = source['WHL']['r/r500']
z = source['WHL']['zbest']
M500 = source['WHL']['M500']
theta0 = np.abs(source['using_peaks']['pos_angle_0'] - source['WHL']['position_angle'])
theta1 = np.abs(source['using_peaks']['pos_angle_1'] - source['WHL']['position_angle'])
if theta0>90:
theta0 = 180 - theta0
if theta1>90:
theta1 = 180 - theta1
dx0 = source['using_contour']['tail_kpc_0'] * np.cos(theta0) / 1000. / source['WHL']['r500']
dx1 = source['using_contour']['tail_kpc_1'] * np.cos(theta1) / 1000. / source['WHL']['r500']
dP = np.abs( (P(np.abs(x+dx0),z,M500) - P(np.abs(x+dx1),z,M500)) / (dx0 - dx1) )
bending_15.update({'_id':source['_id']}, {'$set':{'WHL.P500':P500(z,M500), 'WHL.P':P(x,z,M500), 'WHL.grad_P':dP}})
def get_fits(params, outd):
import shutil
if os.path.exists(outd):
count = len(os.listdir(outd))
cont = raw_input('%s exists and contains %i files; continue? (y/n) ' % (outd,count))
if cont.lower() == 'n':
return
else:
print 'Initializing', outd
os.makedirs(outd)
with open('%s/first_fits.txt' % rgz_path) as f:
lines = f.readlines()
pathdict = {}
for l in lines:
spl = l.split(' ')
pathdict[spl[1].strip()] = '%s/rgz/raw_images/RGZ-full.%i/FIRST-IMGS/%s.fits' % (data_path, int(spl[0]), spl[1].strip())
for source in bending_15.find(params).sort('WHL.r/r500', -1).limit(55):
print source['WHL']['r/r500']
zid = source['RGZ']['zooniverse_id']
fid = catalog.find_one({'zooniverse_id':zid})['first_id']
fits_loc = pathdict[fid]
shutil.copy(fits_loc, outd+fid+'.fits')
def make_sdss_sample():
if not sdss_sample.count():
sdss_sample.create_index('bestObjID', unique=True)
else:
print '%i entries already in catalog' % sdss_sample.count()
params = total_cuts.copy()
params['best.ra'] = {'$gt':100, '$lt':275}
zs, ras, decs = [], [], []
for source in bending_15.find(params):
zs.append(source['best']['redshift'])
ras.append(source['best']['ra'])
decs.append(source['best']['dec'])
zs = np.array(zs)
ras = np.array(ras)
decs = np.array(decs)
for x in [zs, ras, decs]:
x.sort()
zs_cdf = np.arange(len(zs))/float(len(zs)-1)
zs_cdf_inv = interp1d(zs_cdf, zs)
ras_cdf = np.arange(len(ras))/float(len(ras)-1)
ras_cdf_inv = interp1d(ras_cdf, ras)
decs_cdf = np.arange(len(decs))/float(len(decs)-1)
decs_cdf_inv = interp1d(decs_cdf, decs)
for i in range(20000):
probs = np.random.uniform(0,1,3)
z = zs_cdf_inv(probs[0])
ra = ras_cdf_inv(probs[1])
dec = decs_cdf_inv(probs[2])
query = '''select top 1 s.bestObjID, s.ra, s.dec, s.z, s.zErr,
g.cModelMag_u, g.cModelMag_g, g.cModelMag_r, g.cModelMag_i, g.cModelMag_z
from SpecObj as s
join GalaxyTag as g on s.bestObjID=g.objID
where s.class="GALAXY" and s.zWarning=0 and (s.z between %f and %f) and (s.ra between %f and %f) and
(s.dec between %f and %f)''' % (0.99*z, 1.01*z, 0.99*ra, 1.01*ra, 0.99*dec, 1.01*dec)
df = SDSS_select(query)
if len(df):
entry = {}
for key in df:
entry[key] = df[key][0]
try:
sdss_sample.insert(entry)
except pymongo.errors.DuplicateKeyError as e:
print e
pass
print '%i/%i' % (sdss_sample.count(), i+1)
def sdss_patch():
for s in sdss_sample.find({'cModelMag_u':{'$exists':False}}).batch_size(50):
query = '''select cModelMag_u, cModelMag_g, cModelMag_r, cModelMag_i, cModelMag_z
from GalaxyTag as g
where objID=%s''' % s['bestObjID']
df = SDSS_select(query)
update = {}
try:
for key in df:
update[key] = df[key][0]
sdss_sample.update({'_id':s['_id']}, {'$set':update})
except IndexError:
pass
for s in sdss_sample.find():
xmatch.update({'SDSS._id':s['_id']}, {'$set':{'SDSS':s}})
def plot_sdss_sample():
params = total_cuts.copy()
params['best.ra'] = {'$gt':100, '$lt':275}
params['SDSS.i'] = {'$exists':True}
zs, ras, decs, mags = [], [], [], []
for source in bending_15.find(params):
zs.append(source['best']['redshift'])
ras.append(source['best']['ra'])
decs.append(source['best']['dec'])
mags.append(source['SDSS']['i'])
z_r, ra_r, dec_r, mag_r = [], [], [], []
for source in sdss_sample.find({'cModelMag_i':{'$exists':True}}):
z_r.append(source['z'])
ra_r.append(source['ra'])
dec_r.append(source['dec'])
mag_r.append(source['cModelMag_i'])
fig, ax = plt.subplots(1)
ax.hist(zs, bins=15, alpha=.8, normed=True, label='Bending sample')
ax.hist(z_r, bins=15, alpha=.8, normed=True, label='SDSS sample')
ax.legend()
ax.set_xlabel('z')
ax.set_ylabel('Normalized count')
ax.set_title('Redshift distribution (northern region)')
fig, ax = plt.subplots(1)
ax.hist(mags, bins=15, alpha=.8, normed=True, label='Bending sample')
ax.hist(mag_r, bins=15, alpha=.8, normed=True, label='SDSS sample')
ax.legend()
ax.set_xlabel('i-band (mag)')
ax.set_ylabel('Normalized count')
ax.set_title('i-band magnitude distribution (northern region)')
fig, ax = plt.subplots(1)
ax.scatter(ras, decs, s=1, alpha=.8, label='Bending sample')
ax.scatter(ra_r, dec_r, s=1, alpha=.8, label='SDSS sample')
ax.legend()
ax.set_xlabel('RA (deg)')
ax.set_ylabel('Dec (deg)')
ax.set_title('Skymap of sources (northern region)')
def sdss_xmatch():
count = 0
for source in sdss_sample.find().batch_size(50):
count += 1
ir = coord.SkyCoord(source['ra'], source['dec'], unit=(u.deg,u.deg), frame='icrs')
cluster_w = get_whl(ir, source['z'], source['zErr'], 15, 0.04*(1+source['z']))
if cluster_w is not None:
whl_prop = {}
c_pos = coord.SkyCoord(cluster_w['RAdeg'], cluster_w['DEdeg'], unit=(u.deg,u.deg), frame='icrs')
c_sep_arc = c_pos.separation(ir)
zbest = cluster_w['zspec'] if 'zspec' in cluster_w else cluster_w['zphot']
c_sep_mpc = float(cosmo.angular_diameter_distance(zbest)/u.Mpc * c_sep_arc.to(u.rad)/u.rad)
c_pos_angle = c_pos.position_angle(ir)
r = c_sep_mpc/cluster_w['r500']
if r < 0.01:
pop = 'BCG'
elif r >= 1.5:
pop = 'outer'
else:
pop = 'inner'
whl_prop = {'ra':c_pos.ra.deg, 'dec':c_pos.dec.deg, 'separation_deg':c_sep_arc.deg, 'separation_Mpc':c_sep_mpc, 'r/r500':r, 'population':pop, 'zbest':zbest}
for key in ['_id', 'N500', 'N500sp', 'RL*500', 'name', 'r500', 'zphot', 'zspec', 'M500']:
if key in cluster_w:
whl_prop[key] = cluster_w[key]
entry = {'SDSS':source, 'WHL':whl_prop}
print '%i/%i' % (xmatch.count(), count)
xmatch.insert(entry)
def sdss_density():
params = total_cuts.copy()
#params['SDSS.spec_redshift'] = {'$exists':True}
f1, (ax1, ax2) = plt.subplots(1, 2, sharey=True, subplot_kw=dict(adjustable='datalim', aspect='equal'))
ax = f1.add_subplot(111, frameon=False)
ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
f2, ax0 = plt.subplots(1)
sep, n, bins = r500_hist(bending_15, params, 20)
bin_area = np.pi * np.array( [np.square(bins[0])] + [np.square(bins[i+1])-np.square(bins[i]) for i in np.arange(len(bins)-1)] )
density = n/bin_area
err = np.sqrt(n)/bin_area
ax1.errorbar(np.log10(bins), np.log10(density), yerr=np.vstack((np.log10(density+err) - np.log10(density), np.log10(density) - np.log10(density-err))), fmt='o', ms=4, label='RGZ')
logx = np.log10(bins[1:-3])
logy = np.log10(density[1:-3])
logyerr = err[1:-3] / density[1:-3]
if sum(np.isnan(logyerr)):
logx = logx[np.logical_not(np.isnan(logyerr))]
logy = logy[np.logical_not(np.isnan(logyerr))]
logyerr = logyerr[np.logical_not(np.isnan(logyerr))]
fitfunc = lambda p, x: p[0]*x + p[1]
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
out = leastsq(errfunc, [-1,1], args=(logx, logy, logyerr), full_output=1)
index, amp = out[0]
index_err = np.sqrt(out[1][1][1])
amp_err = np.sqrt(out[1][0][0])*amp
ax1.plot(np.log10(bins), index*np.log10(bins)+amp, c='k', label='$\\alpha = $\n$ %.2f\pm%.2f$'%(index,index_err))
err_max = np.maximum( (index+index_err)*np.log10(bins)+(amp+amp_err), (index-index_err)*np.log10(bins)+(amp+amp_err) )
err_min = np.minimum( (index+index_err)*np.log10(bins)+(amp-amp_err), (index-index_err)*np.log10(bins)+(amp-amp_err) )
#ax1.fill_between(np.log10(bins), err_min, err_max, color='k', lw=0, alpha=.5, label='$\\alpha = $\n$ %.2f\pm%.2f$'%(index,index_err))
ax1.legend(loc='upper right')
ax0.errorbar(np.log10(bins), np.log10(density), yerr=np.vstack((np.log10(density+err) - np.log10(density), np.log10(density) - np.log10(density-err))), fmt='o', ms=4, label='RGZ')
ax0.plot(np.log10(bins), index*np.log10(bins)+amp, c='k', label='$\\alpha = $\n$ %.2f\pm%.2f$'%(index,index_err))
r_bcgs = n[0]
r_cluster = sum(n[bins<10])
r_bcg_ratio = 1.*r_bcgs/r_cluster
r_bcg_err = r_bcg_ratio * (1./np.sqrt(r_bcgs) + 1./np.sqrt(r_cluster))
print 'RGZ BCGs: %i/%i (%.1f pm %.1f)%%' % (r_bcgs, r_cluster, 100*r_bcg_ratio, 100*r_bcg_err)
r_within_r500 = sum(n[bins<1][1:])
r_cluster_no_bcgs = sum(n[bins<10][1:])
r_within_ratio = 1.*r_within_r500/r_cluster_no_bcgs
r_within_err = r_within_ratio * (1./np.sqrt(r_within_r500) + 1./np.sqrt(r_cluster_no_bcgs))
print 'RGZ r500: %i/%i (%.1f pm %.1f)%%' % (r_within_r500, r_cluster_no_bcgs, 100*r_within_ratio, 100*r_within_err)
sep, n, bins = r500_hist(xmatch, {}, 20)
bin_area = np.pi * np.array( [np.square(bins[0])] + [np.square(bins[i+1])-np.square(bins[i]) for i in np.arange(len(bins)-1)] )
density = n/bin_area
err = np.sqrt(n)/bin_area
ax2.errorbar(np.log10(bins), np.log10(density), yerr=np.vstack((np.log10(density+err) - np.log10(density), np.log10(density) - np.log10(density-err))), fmt='o', ms=4, label='SDSS')
logx = np.log10(bins[1:-3])
logy = np.log10(density[1:-3])
logyerr = err[1:-3] / density[1:-3]
out = leastsq(errfunc, [-1,1], args=(logx, logy, logyerr), full_output=1)
index, amp = out[0]
index_err = np.sqrt(out[1][1][1])
amp_err = np.sqrt(out[1][0][0])*amp
ax2.plot(np.log10(bins), index*np.log10(bins)+amp, c='k', label='$\\alpha = $\n$ %.2f\pm%.2f$'%(index,index_err))
err_max = np.maximum( (index+index_err)*np.log10(bins)+(amp+amp_err), (index-index_err)*np.log10(bins)+(amp+amp_err) )
err_min = np.minimum( (index+index_err)*np.log10(bins)+(amp-amp_err), (index-index_err)*np.log10(bins)+(amp-amp_err) )
#ax2.fill_between(np.log10(bins), err_min, err_max, color='k', lw=0, alpha=.5, label='$\\alpha = $\n$ %.2f\pm%.2f$'%(index,index_err))
ax2.legend(loc='upper right')
ax0.errorbar(np.log10(bins), np.log10(density), yerr=np.vstack((np.log10(density+err) - np.log10(density), np.log10(density) - np.log10(density-err))), fmt='o', ms=4, label='SDSS')
ax0.plot(np.log10(bins), index*np.log10(bins)+amp, c='k', ls='--', label='$\\alpha = $\n$ %.2f\pm%.2f$'%(index,index_err))
ax0.legend(loc='upper right')
s_bcgs = n[0]
s_cluster = sum(n[bins<10])
s_bcg_ratio = 1.*s_bcgs/s_cluster
s_bcg_err = s_bcg_ratio * (1./np.sqrt(s_bcgs) + 1./np.sqrt(s_cluster))
print 'SDSS BCGs: %i/%i (%.1f pm %.1f)%%' % (s_bcgs, s_cluster, 100*s_bcg_ratio, 100*s_bcg_err)
s_within_r500 = sum(n[bins<1][1:])
s_cluster_no_bcgs = sum(n[bins<10][1:])
s_within_ratio = 1.*s_within_r500/s_cluster_no_bcgs
s_within_err = s_within_ratio * (1./np.sqrt(s_within_r500) + 1./np.sqrt(s_cluster_no_bcgs))
print 'SDSS r500: %i/%i (%.1f pm %.1f)%%' % (s_within_r500, s_cluster_no_bcgs, 100*s_within_ratio, 100*s_within_err)
ax.set_xlabel(get_label('WHL.r/r500', True))
ax1.set_ylabel('$\\log_{10}$ (Surface density of galaxies [$r_{500}^{-2}$])')
#ax.set_title('Surface density vs. separation')
f1.tight_layout()
ax0.set_xlabel(get_label('WHL.r/r500', True))
ax0.set_ylabel('$\\log_{10}$ (Surface density of galaxies [$r_{500}^{-2}$])')
#ax0.set_title('Surface density vs. separation')
f2.tight_layout()
r_s_bcgs = r_bcg_ratio / s_bcg_ratio
r_s_bcgs_err = r_s_bcgs * (r_bcg_err/r_bcg_ratio + s_bcg_err/s_bcg_ratio)
r_s_within = r_within_ratio / s_within_ratio
r_s_within_err = r_s_within * (r_within_err/r_within_ratio + s_within_err/s_within_ratio)
print 'BCG excess: %.3f pm %.3f\nr500 excess: %.3f pm %.3f' % (r_s_bcgs, r_s_bcgs_err, r_s_within, r_s_within_err)
def fractional_bent():
params = total_cuts.copy()
params['best.redshift']['$lte'] = 0.6
sep, bend = [], []
for source in bending_15.find(total_cuts.copy()):
sep.append(source['WHL']['r/r500'])
bend.append(source['using_peaks']['bending_excess'])
sep_field, bend_field = [], []
for source in bent_sources.find(total_cuts.copy()):
if bending_15.find_one({'RGZ.RGZ_id':source['RGZ']['RGZ_id']}) is None:
sep_field.append(99)
bend_field.append(source['using_peaks']['bending_excess'])
sep = np.array(sep+sep_field)
bend = np.array(bend+bend_field)
straight = bend<0
marginal = np.logical_and(0<bend, bend<5)
bent = bend>5
inner = sep<1.5#np.logical_and(sep>0.01, sep<1.5)
cluster = sep<10
num, denom = sum(sep<1.5), len(sep)
print 'All: %.2f +- %.2f' % (1.*num/denom, 1.*num/denom * (1./np.sqrt(num) + 1./np.sqrt(denom)))
num, denom = sum(np.logical_and(sep<1.5, bend>get_bent_cut())), sum(bend>get_bent_cut())
print 'Highly bent: %.2f +- %.2f' % (1.*num/denom, 1.*num/denom * (1./np.sqrt(num) + 1./np.sqrt(denom)))
num, denom = sum(np.logical_and(sep<1.5, bend>50)), sum(bend>50)
print 'Max: %.2f +- %.2f' % (1.*num/denom, 1.*num/denom * (1./np.sqrt(num) + 1./np.sqrt(denom)))
fig, ax = plt.subplots(1)
n_all, bins, _ = ax.hist(np.log10(bend[bent]), bins=15, label='All sources')
n_all = np.insert(n_all, 0, [sum(straight), sum(marginal)])
n_inner, _, _ = ax.hist(np.log10(bend[np.logical_and(bent,inner)]), bins=bins, label='Sources within 1.5 r500')
n_inner = np.insert(n_inner, 0, [sum(np.logical_and(straight,inner)), sum(np.logical_and(marginal,inner))])
ax.set_xlabel('log(Bending excess [deg])')
ax.set_ylabel('Count')
#ax.set_title('Distributions of bending excess')
ax.legend()
plt.tight_layout()
fig, ax = plt.subplots(1)
lin_bins = np.insert(pow(10,(bins[:-1]+bins[1:])/2.), 0, [0, np.median(bend[marginal])])
n_inner = np.array([ sum(bend[inner]>i) for i in lin_bins ], dtype=float)
n_all = np.array([ sum(bend>i) for i in lin_bins ], dtype=float)
ratio = n_inner/n_all
err = ratio * np.sqrt(1/n_inner + 1/n_all)
plt.errorbar(lin_bins, ratio, yerr=err)
ax.set_xlabel('Excess bending angle (deg)')
ax.set_ylabel('Fraction within $1.5~r_{500}$')
#ax.set_title('Fraction of sources in BCG/inner regions of clusters')
plt.tight_layout()
def histedges_equalN(x, nbin):
npt = len(x)
return np.interp(np.linspace(0, npt, nbin + 1), np.arange(npt), np.sort(x))
def import_env_csv1():
params = total_cuts.copy()
count = 0
env = {}
dirname = '/home/garon/Documents/RGZdata/bending/'
for filename in ['distantstraight_env.csv', 'distantbent_env.csv']:
with open(dirname+filename, 'r') as f:
r = csv.reader(f)
r.next()
for objID, ra, dec, z, n_objID, n_ra, n_dec, n_z, z_type, cModelMag_r, fracDeV_r, dist in r:
ra, dec, z = float(ra), float(dec), float(z)
params['best.ra'] = {'$gte':ra-1e-3, '$lte':ra+1e-3}
params['best.dec'] = {'$gte':dec-1e-3, '$lte':dec+1e-3}
params['best.redshift'] = {'$gte':z-1e-3, '$lte':z+1e-3}
source = bending_15.find_one(params)
if source is not None:
objID = source['SDSS']['objID']
if objID not in env:
env[objID] = {'objID':objID, 'ra':ra, 'dec':dec, 'z':z, 'bending_excess':source['using_peaks']['bending_excess'], 'neighbors':{}, 'r/r500':source['WHL']['r/r500']}
if np.abs(z-float(n_z))/(1+z) <= 0.04:
ADD = float(cosmo.angular_diameter_distance(z)/u.Mpc)
env[objID]['neighbors'][n_objID] = {'ra':float(n_ra), 'dec':float(n_dec), 'z':float(n_z), 'z_type':z_type, 'cModelMag_r':float(cModelMag_r), 'fracDeV_r':float(fracDeV_r), 'dist_deg':float(dist), 'dist_Mpc':float(dist)*np.pi/180*ADD}
if len(env)>count:
count += 1
print count
db.drop_collection('distant_sources')
distant_sources.create_index('objID', unique=True)
for key in env:
distant_sources.insert(env[key])
def import_env_csv2():
count = 0
env = {}
dirname = '/home/garon/Documents/RGZdata/bending/'
for filename in ['more_source_envPhotoz.csv']:
with open(dirname+filename, 'r') as f:
r = csv.reader(f)
r.next()
for objID, ra, dec, z, bending, r500, n_objID, n_ra, n_dec, n_z, z_type, cModelMag_r, fracDeV_r, dist in r:
ra, dec, z, bending, r500 = float(ra), float(dec), float(z), float(bending), float(r500)
if objID not in env:
env[objID] = {'objID':objID, 'ra':ra, 'dec':dec, 'z':z, 'bending_excess':bending, 'r/r500':r500, 'neighbors':{}}
if objID != n_objID and np.abs(z-float(n_z))/(1+z) <= 0.04:
ADD = float(cosmo.angular_diameter_distance(z)/u.Mpc)
env[objID]['neighbors'][n_objID] = {'ra':float(n_ra), 'dec':float(n_dec), 'z':float(n_z), 'z_type':z_type, 'cModelMag_r':float(cModelMag_r), 'fracDeV_r':float(fracDeV_r), 'dist_deg':float(dist), 'dist_Mpc':float(dist)*np.pi/180*ADD}
if len(env)>count:
count += 1
print count
#db.drop_collection('distant_sources')
#distant_sources.create_index('objID', unique=True)
for key in env:
distant_sources.insert(env[key])
def import_env():
import_env_csv1()
import_env_csv2()
for source in distant_sources.find({'r/r500':{'$exists':False}}):
s2 = bending_15.find_one({'SDSS.objID':source['objID']})
distant_sources.update({'_id':source['_id']}, {'$set':{'r/r500':s2['WHL']['r/r500']}})
def find_more_sources():
bent_cut = get_bent_cut()
with open('more_sources.csv', 'w') as f:
print >> f, 'objID,ra,dec,z,bending,r/r500'
count = distant_sources.find({'bending_excess':{'$gt':bent_cut}}).count()
for source in bending_15.find(total_cuts):
if source['using_peaks']['bending_excess']>30 and source['WHL']['r/r500']>6 and not distant_sources.find({'objID':source['SDSS']['objID']}).count():
print >> f, '%s,%s,%s,%s,%s,%s' % (source['SDSS']['objID'], source['best']['ra'], source['best']['dec'], source['best']['redshift'], source['using_peaks']['bending_excess'], source['WHL']['r/r500'])
count += 1
if count>=150:
break
print count, 'bent'
with open('more_sources.csv', 'a') as f:
count = distant_sources.find({'bending_excess':{'$lt':bent_cut}}).count()
for source in bending_15.find(total_cuts):
if source['using_peaks']['bending_excess']<10 and 6<source['WHL']['r/r500']<15 and not distant_sources.find({'objID':source['SDSS']['objID']}).count():
print >> f, '%s,%s,%s,%s,%s,%s' % (source['SDSS']['objID'], source['best']['ra'], source['best']['dec'], source['best']['redshift'], source['using_peaks']['bending_excess'], source['WHL']['r/r500'])
count += 1
if count>=150:
break
print count, 'straight'
def distant_env(bin_count=8, inner_cut=0.05, outer_cut=0.35):
bent_cut = get_bent_cut()
dist_bent, dist_straight, sep = [], [], []
z_bent, z_straight = [], []
for source in distant_sources.find():
if source['bending_excess']>bent_cut:
for neighbor in source['neighbors']:
dist_bent.append(source['neighbors'][neighbor]['dist_Mpc'])
z_bent.append(source['z'])
else:
for neighbor in source['neighbors']:
dist_straight.append(source['neighbors'][neighbor]['dist_Mpc'])
z_straight.append(source['z'])
sep.append(source['r/r500'])
dist_bent = np.array(dist_bent)
dist_bent = dist_bent[dist_bent>inner_cut]
dist_straight = np.array(dist_straight)
dist_straight = dist_straight[dist_straight>inner_cut]
for dist in [dist_bent, dist_straight]:
dist.sort()
n_bent = distant_sources.find({'bending_excess':{'$gt':bent_cut}}).count()
n_straight = distant_sources.find({'bending_excess':{'$lt':bent_cut}}).count()
density_bent = stats.rankdata(dist_bent) / (np.pi*dist_bent**2) / n_bent
density_straight = stats.rankdata(dist_straight) / (np.pi*dist_straight**2) / n_straight
plt.figure()
plt.plot(dist_bent, density_bent, label='Highly bent sources')
plt.plot(dist_straight, density_straight, ls='--', label='Less bent sources')
plt.legend()
plt.xlabel('Projected distance from radio galaxy (Mpc)')
plt.ylabel('Companion density (Mpc$^{-2}$)')
plt.tight_layout()
inner, outer = 2., 3.
mask = np.logical_and(inner<dist_bent, dist_bent<outer)
bg_count_bent = sum(mask)
bg_bent = bg_count_bent / np.pi / (outer**2-inner**2)# / n_bent
mask = np.logical_and(2<dist_straight, dist_straight<3)
bg_count_straight = sum(mask)
bg_straight = bg_count_straight / np.pi / (outer**2-inner**2)# / n_straight
plt.figure()
per_bin_bent, bins, _ = plt.hist(dist_bent, bins=np.linspace(inner_cut, 2, bin_count+1))
bin_bg_bent = bg_bent*np.pi*(bins[1:]**2-bins[:-1]**2)
frac_bent = per_bin_bent / bin_bg_bent
frac_err_bent = frac_bent * (1./np.sqrt(per_bin_bent) + 1./np.sqrt(bg_bent))
per_bin_straight, _, _ = plt.hist(dist_straight, bins=bins)
bin_bg_straight = bg_straight*np.pi*(bins[1:]**2-bins[:-1]**2)
frac_straight = per_bin_straight / bin_bg_straight
frac_err_straight = frac_straight * (1./np.sqrt(per_bin_straight) + 1./np.sqrt(bg_straight))
plt.figure()
plt.errorbar(bins[1:], frac_bent, frac_err_bent, label='Highly bent sources')
plt.errorbar(bins[1:]-0.01, frac_straight, frac_err_straight, ls='--', label='Less bent sources')
plt.legend(loc='upper right')
plt.xlabel('Projected distance from radio galaxy (Mpc)')
plt.ylabel('Normalized surface density')
plt.tight_layout()
# 2D statistics
area = np.pi * (outer_cut**2-inner_cut**2)
cum_bent = sum(dist_bent<outer_cut)
cum_excess_bent = cum_bent - bg_bent*area
err_bent = np.sqrt(cum_bent+bg_bent*area**2)
frac_bent = (cum_excess_bent/area) / bg_bent
frac_excess_bent = frac_bent - 1.
frac_err_bent = frac_bent * (err_bent/cum_excess_bent + 1./np.sqrt(bg_count_bent))
cum_straight = sum(dist_straight<outer_cut)
cum_excess_straight = cum_straight - bg_straight*area
err_straight = np.sqrt(cum_straight+bg_straight*area**2)
frac_straight = (cum_excess_straight/area) / bg_straight
frac_excess_straight = frac_straight - 1.
frac_err_straight = frac_straight * (err_straight/cum_excess_straight + 1./np.sqrt(bg_count_straight))
print 'Surface density between %i to %i kpc' % (int(inner_cut*1000), int(outer_cut*1000))
print 'Bent: %.3f +- %.3f (%.2f sigma)' % (frac_bent, frac_err_bent, frac_bent / frac_err_bent)
print 'Straight: %.3f +- %.3f (%.2f sigma)' % (frac_straight, frac_err_straight, frac_straight / frac_err_straight)
print 'Difference: %.3f +- %.3f (%.2f sigma)' % (frac_bent-frac_straight, np.sqrt(frac_err_bent**2 + frac_err_straight**2), (frac_bent-frac_straight) / np.sqrt(frac_err_bent**2 + frac_err_straight**2))
# 3D statistics
vol_sph = 4./3. * np.pi * (outer_cut**3-inner_cut**3)
vol_cyl = np.pi * outer * (outer_cut**2-inner_cut**2)
vol_bg = np.pi * outer * (outer**2-inner**2)
bg_bent_vol = bg_count_bent / vol_bg
cum_bent = sum(dist_bent<outer_cut)
cum_excess_bent = cum_bent - bg_bent_vol*vol_cyl
err_bent = np.sqrt(cum_bent + bg_bent_vol*vol_cyl**2)
frac_bent = (cum_excess_bent/vol_sph) / bg_bent_vol
frac_excess_bent = frac_bent - 1.
frac_err_bent = frac_bent * (err_bent/cum_excess_bent + 1./np.sqrt(bg_count_bent))
bg_straight_vol = bg_count_straight / vol_bg
cum_straight = sum(dist_straight<outer_cut)
cum_excess_straight = cum_straight - bg_straight_vol*vol_cyl
err_straight = np.sqrt(cum_straight + bg_straight_vol*vol_cyl**2)
frac_straight = (cum_excess_straight/vol_sph) / bg_straight_vol
frac_excess_straight = frac_straight - 1.
frac_err_straight = frac_straight * (err_straight/cum_excess_straight + 1./np.sqrt(bg_count_straight))
print '\nVolume density between %i to %i kpc' % (int(inner_cut*1000), int(outer_cut*1000))
print 'Bent: %.3f +- %.3f (%.2f sigma)' % (frac_bent, frac_err_bent, frac_bent / frac_err_bent)
print 'Straight: %.3f +- %.3f (%.2f sigma)' % (frac_straight, frac_err_straight, frac_straight / frac_err_straight)
print 'Difference: %.3f +- %.3f (%.2f sigma)' % (frac_bent-frac_straight, np.sqrt(frac_err_bent**2 + frac_err_straight**2), (frac_bent-frac_straight) / np.sqrt(frac_err_bent**2 + frac_err_straight**2))
med_sep = np.median(sep)
rho_bg = 500. / med_sep**2
print '%.1f, %.1f rho_crit around highly bent and less bent sources' % (frac_bent*rho_bg, frac_straight*rho_bg)
return None
step = inner_cut
x = np.arange(step, 1, step)+step
cum_excess_bent, cum_excess_straight = [], []
frac_excess_bent, frac_excess_straight = [], []
err_bent, err_straight = [], []
frac_err_bent, frac_err_straight = [], []
for i in x:
cum_bent = sum(dist_bent<i)
cum_excess_bent.append(cum_bent - bg_bent*np.pi*i**2)
err_bent.append(np.sqrt(cum_bent+bg_bent*(np.pi*i**2)**2))
frac_bent = cum_bent/(bg_bent*np.pi*i**2)
frac_excess_bent.append(frac_bent - 1.)
frac_err_bent.append(frac_bent * (1./np.sqrt(cum_bent) + 1./np.sqrt(bg_bent)))
cum_straight = sum(dist_straight<i)
cum_excess_straight.append(cum_straight - bg_straight*np.pi*i**2)
err_straight.append(np.sqrt(cum_straight+bg_straight*(np.pi*i**2)**2))
frac_straight = cum_straight/(bg_straight*np.pi*i**2)
frac_excess_straight.append(frac_straight - 1.)
frac_err_straight.append(frac_straight * (1./np.sqrt(cum_straight) + 1./np.sqrt(bg_straight)))
# normalize
cum_excess_bent_norm = np.array(cum_excess_bent)/n_bent
err_bent_norm = cum_excess_bent_norm * (np.array(err_bent)/np.array(cum_excess_bent) + 1./np.sqrt(n_bent))
cum_excess_straight_norm = np.array(cum_excess_straight)/n_straight
err_straight_norm = cum_excess_straight_norm * (np.array(err_straight)/np.array(cum_excess_straight) + 1./np.sqrt(n_straight))
# cumulative excess plot
plt.figure()
plt.errorbar(x, cum_excess_bent_norm, yerr=err_bent_norm, label='Bent sources')
plt.errorbar(x+0.15*step, cum_excess_straight_norm, yerr=err_straight_norm, ls='--', label='Straight sources')
#plt.axhline(0, ls=':', c='k')
plt.legend(loc='upper left')
plt.xlabel('Radius around radio galaxy (Mpc)')
plt.ylabel('Cumulative excess density (Mpc$^{-2}$)')
plt.tight_layout()
# fractional excess plot
plt.figure()
plt.errorbar(x, frac_excess_bent, yerr=frac_err_bent, label='Bent sources')
plt.errorbar(x+0.15*step, frac_excess_straight, yerr=frac_err_straight, ls='--', label='Straight sources')
#plt.axhline(0, ls=':', c='k')
plt.legend(loc='upper right')
plt.xlabel('Radius around radio galaxy (Mpc)')
plt.ylabel('Fractional excess density')
plt.tight_layout()
return None
def sig(i, j):
frac_b = cum_bent / bg_bent
frac_s = cum_straight / bg_straight
dfrac_b = frac_b * (1/np.sqrt(cum_bent) + 1/np.sqrt(bg_bent)) / np.sqrt(1.*i/n_bent)
dfrac_s = frac_s * (1/np.sqrt(cum_straight) + 1/np.sqrt(bg_straight)) / np.sqrt(1.*j/n_straight)
return (frac_b - frac_s) / max(dfrac_b, dfrac_s)
# statistical excess plot
plt.figure()
plt.plot(x, cum_excess_bent_norm/err_bent_norm, label='Bent sources')
plt.plot(x+0.15*step, cum_excess_straight_norm/err_straight_norm, ls='--', label='Straight sources')
plt.legend(loc='upper right')
plt.xlabel('Radius around RG (Mpc)')
plt.ylabel('Standard deviations above background')
plt.tight_layout()
# statistical difference plot
plt.figure()
plt.plot(x, (cum_excess_bent_norm-cum_excess_straight_norm)/np.max([err_bent_norm,err_straight_norm], 0))
plt.xlabel('Radius around RG (Mpc)')
plt.ylabel('Standard deviations difference')
plt.tight_layout()
# distribution of bending vs density
bent_companions, straight_companions = [], []
for source in distant_sources.find():
count = 0
for neighbor in source['neighbors']:
if 0.05 < source['neighbors'][neighbor]['dist_Mpc'] < 0.25:
count += 1
if source['bending_excess']>bent_cut:
bent_companions.append(count)
else:
straight_companions.append(count)
plt.figure()
plt.hist(bent_companions, bins=range(13), alpha=.8, normed=True, label='Bent sources')
plt.hist(straight_companions, bins=range(13), alpha=.8, normed=True, label='Straight sources')
plt.scatter(6, .1, c='w', alpha=0, label='$p=%.3f$'%stats.anderson_ksamp([bent_companions, straight_companions]).significance_level)
plt.xlabel('Number of companions between 50 and 250 kpc')
plt.ylabel('Normalized count')
plt.legend()
plt.tight_layout()
# bending vs density
bending, neighbors = [], []
for source in distant_sources.find():
bending.append(source['bending_excess'])
count = 0
for neighbor in source['neighbors']:
if 0.05 < source['neighbors'][neighbor]['dist_Mpc'] < 0.25:
count += 1
neighbors.append(count)
bending = np.array(bending)
neighbors = np.array(neighbors)
order = bending.argsort()
bg = (bg_bent+bg_straight)/(n_bent+n_straight)*np.pi*(0.25**2-0.05**2)
plt.figure()
n, bins, patches = plt.hist(bending)
plt.figure()
cut = 1.5
plt.hist(bending[neighbors/bg>cut], bins=bins, alpha=.8, normed=True, label='Density $>%.1f\\times$background\n$n=%i$'%(cut,sum(neighbors/bg>cut)))
plt.hist(bending[neighbors/bg<cut], bins=bins, alpha=.8, normed=True, label='Density $<%.1f\\times$background\n$n=%i$'%(cut,sum(neighbors/bg<cut)))
plt.xlabel('Excess bending angle (deg)')
plt.ylabel('Normalized count')
plt.legend()
plt.tight_layout()
def cluster_spotting(compress=False):
n_bent = np.array([22, 53, 81, 101, 112, 121, 113, 162, 146, 207, 208, 210, 244, 228, 270, 275, 317, 271, 330, 328, 301, 326, 354, 378, 394, 414, 419, 435, 444, 529])
n_straight = np.array([35, 51, 61, 94, 100, 114, 117, 154, 136, 169, 167, 216, 188, 234, 241, 255, 267, 282, 304, 309, 357, 335, 335, 366, 368, 411, 433, 484, 463, 516]) * 90./96. # normalized
base_step_size = 0.1 # Mpc
base_step_count = 30
if compress:
step_size = 2.*base_step_size
step_count = base_step_count/2.
n_bent = n_bent[::2] + n_bent[1::2]
n_straight = n_straight[::2] + n_straight[1::2]
else:
step_size = base_step_size
step_count = base_step_count
bg_bent = 254
bg_straight = 241
sep = step_size*(np.arange(step_count)+1)
excess_bent = n_bent - bg_bent * np.pi * (sep**2-(sep-step_size)**2)
excess_straight = n_straight - bg_straight * np.pi * (sep**2-(sep-step_size)**2)
cum_excess_bent = np.cumsum(excess_bent/90.)
cum_excess_straight = np.cumsum(excess_straight/90.)
err_bent = np.sqrt(n_bent + bg_bent)
err_straight = np.sqrt(n_straight + bg_straight)
cum_err_bent = np.sqrt(np.cumsum(err_bent/90.))
cum_err_straight = np.sqrt(np.cumsum(err_straight/90.))
print 'Bent excess within 2 Mpc: %.2f\pm%.2f\nStraight excess within 2 Mpc: %.2f\pm%.2f' % (cum_excess_bent[sep==2], cum_err_bent[sep==2], cum_excess_straight[sep==2], cum_err_straight[sep==2])
diff = cum_excess_bent[sep==2] - cum_excess_straight[sep==2]
diff_err = np.sqrt(cum_err_bent[sep==2] + cum_err_straight[sep==2])
sig_level = cum_excess_bent[sep==2] - cum_excess_straight[sep==2]
print 'Difference within 2 Mpc: %.2f\pm%.2f' % (diff, diff_err)
fig, ax = plt.subplots(1)
ax.errorbar(sep, cum_excess_bent, yerr=cum_err_bent, label='Bent sources')
ax.errorbar(sep+0.2*base_step_size, cum_excess_straight, yerr=cum_err_straight, ls='--', label='Straight sources')
ax.legend(loc='upper left')
ax.set_xlabel('Distance from radio source (Mpc)')
ax.set_ylabel('Cumulative excess per radio source')
ax.set_title('Companions per Mpc$^2$ around non-cluster radio sources')
def bending_limit():
h = 1. # jet width in kpc
p_min = np.mean([0.9, 0.6, 1.4, 1.7, 0.4, 0.6, 1.4])*0.0062415 # minimum synchrotron pressure in keV cm^-3
for source in bending_15.find():
size = source['RGZ']['size_kpc']
p_ram = source['WHL']['P']
theta = np.arcsin(0.5 * size/h * p_ram/p_min)
bending_15.update({'_id':source['_id']}, {'$set':{'WHL.bending_max':theta*180./np.pi if not np.isnan(theta) else 90.}})
def get_bent_cut():
bend = []
for i in bending_15.find(total_cuts):
bend.append(i['using_peaks']['bending_excess'])
bend = np.array(bend)
sigma = 0.682689492137
return np.percentile(bend, 100*(1+sigma)/2)
def remove_AllWISE(drop=False):
count = bending_15.count({'SDSS':{'$exists':True}})
if drop:
print bending_15.update({}, {'$unset':{'best':None}}, multi=True)
for source in bending_15.find({'best':{'$exists':False}}).batch_size(500):
z, z_err = get_z(source)
if z:
sql = 'select raerr, decerr from photoprimary where objid=%i' % source['SDSS']['objID']
df = SDSS_select(sql)
ra_err = df['raerr'][0]
dec_err = df['decerr'][0]
best = {'redshift':z, 'redshift_err':z_err, 'ra':source['SDSS']['ra'], 'ra_err':ra_err, 'dec':source['SDSS']['dec'], 'dec_err':dec_err}
bending_15.update({'_id':source['_id']}, {'$set':{'best':best}})
print '%i/%i' % (bending_15.find({'best':{'$exists':True}}).count(), count)
def sep_hist():
params = total_cuts.copy()
params['using_peaks.bending_angle'] = params['using_peaks.bending_corrected']
del params['using_peaks.bending_corrected']
sep = []
for source in bending_15.find(total_cuts):
sep.append(source['WHL']['r/r500'])
sep = np.array(sep)
plt.figure()
n, bins, _ = plt.hist(np.log10(sep), bins=20, fill=False, hatch='//')
area = np.pi * (pow(10, bins)[1:]**2 - pow(10, bins)[:-1]**2)
outer = np.percentile(sep, 95)
density = sum(sep<outer)/(np.pi*outer**2)
x = (pow(10, bins)[1:]+pow(10, bins)[:-1])/2.
y = density*area
popt = np.polyfit(x, y, 2)
newx = np.logspace(np.log10(min(sep)), np.log10(outer), 100)
plt.plot(np.log10(newx), popt[0]*newx**2 + popt[1]*newx + popt[2], label='Assuming uniform \ndistribution on the sky', c='b', ls=':')
plt.ylim(ymax=1300)
#plt.figure()
#plt.hist(sep, bins=pow(10,bins), fill=False, hatch='//')
#plt.xscale('log')
plt.xlabel(get_label('WHL.r/r500', True))
plt.ylabel('Count')
plt.legend(loc='upper left')
plt.tight_layout()
def density_ratio(x1, x2):
c500, gamma, alpha, beta = 1.177, 0.3081, 1.0510, 5.4905
rho1 = (c500*x1)**gamma * (1 + (c500*x1)**alpha)**(1.*(beta-gamma)/alpha)
rho2 = (c500*x2)**gamma * (1 + (c500*x2)**alpha)**(1.*(beta-gamma)/alpha)
return rho2 / rho1
def v500():
# orbital velocity at r500
c = np.sqrt(1.327e25) # km^1.5 s^-1
m500, r500 = [], []
for source in bending_15.find(total_cuts):
m500.append(source['WHL']['M500'])
r500.append(source['WHL']['r500'])
m500 = np.array(m500)
r500 = np.array(r500)
v = c * np.sqrt(m500/r500/3.086e19) # km s^-1
return np.median(v)
def scatter():
bending, mass, sep, pop = [], [], [], []
for source in bending_15.find(total_cuts):
bending.append(source['using_peaks']['bending_corrected'])
mass.append(source['WHL']['M500'])
sep.append(source['WHL']['r/r500'])
pop.append(source['WHL']['population'])
bending = np.array(bending)
mass = np.array(mass)
sep = np.array(sep)
pop = np.array(pop)
plt.scatter(mass[pop=='outer'], bending[pop=='outer'], s=1, label='outer region')
plt.scatter(mass[pop=='BCG'], bending[pop=='BCG'], s=1, label='BCGs')
plt.scatter(mass[pop=='inner'], bending[pop=='inner'], s=1, label='inner region')
plt.axhline(np.median(bending), ls=':', c='k')
plt.legend()
plt.xlabel(get_label('WHL.M500'))
plt.ylabel(get_label('using_peaks.bending_corrected'))
plt.tight_layout()
def get_phot_matches():
z_r, z_w, cid = [], [], []
for source in bending_15.find(total_cuts):
z_r.append(source['best']['redshift'])
z_w.append(source['WHL']['zbest'])
cid.append(source['RGZ']['RGZ_id'])
z_r = np.array(z_r)
z_w = np.array(z_w)
cid = np.array(cid)
bad_cid = list(cid[np.abs(z_w - z_r) > .04*(1+z_r)])
return bad_cid
def flag_dups(coll=bending_15):
coll.update({}, {'$set':{'RGZ.duplicate':0}}, multi=True)
cid, name = [], []
for source in coll.find(total_cuts):
name.append(source['RGZ']['RGZ_name'])
for n in set(name):
name.remove(n)
for source in coll.find({'RGZ.RGZ_name':{'$in':name}}):
cid.append(source['RGZ']['RGZ_id'])
for source in coll.find({'RGZ.RGZ_id':{'$in':cid}}):
if source['RGZ']['RGZ_name'] in name:
coll.update({'RGZ.RGZ_id':source['RGZ']['RGZ_id']}, {'$set':{'RGZ.duplicate':1}})
name.remove(source['RGZ']['RGZ_name'])
def print_supplement(filename='/home/garon/Documents/RGZdata/bending/data_supplement.csv'):
bad_cid = get_phot_matches()
cids = []
with open(filename[:-4] + '_table1' + filename[-4:], 'w') as f1:
with open(filename[:-4] + '_sample1' + filename[-4:], 'w') as g1:
for source in bending_15.find(total_cuts).sort('RGZ.RGZ_name', 1):
cids.append(source['RGZ']['RGZ_id'])
morph = 2 if source['RGZ']['morphology'] == 'double' else 3
ztype = 's' if 'spec_redshift' in source['SDSS'] else 'p'
align = 'r' if source['WHL']['alignment'] == 'radial' else ('t' if source['WHL']['alignment'] == 'tangential' else 'o')
if source['RGZ']['RGZ_id'] in bad_cid:
w_z = source['WHL']['zphot']
w_ztype = 'p'
else:
w_z = source['WHL']['zbest']
w_ztype = 's' if 'zspec' in source['WHL'] else 'p'
datastr = '%s,%s,%i,%.3f,%.5f,%.5f,%.4f,%.4f,%s,%.1f,%.1f,%.1f,%.2f,%s,%.5f,%.5f,%.4f,%s,%.2f,%.2f,%.2f,%.2f,%.1f,%s' % (source['RGZ']['RGZ_name'][3:], source['RGZ']['zooniverse_id'], morph, source['RGZ']['size_arcmin'], source['best']['ra'], source['best']['dec'], source['best']['redshift'], source['best']['redshift_err'], ztype, source['using_peaks']['bending_angle'], source['using_peaks']['bending_corrected'], source['using_peaks']['bending_excess'], source['using_contour']['asymmetry'], source['WHL']['name'], source['WHL']['ra'], source['WHL']['dec'], w_z, w_ztype, source['WHL']['r/r500'], source['WHL']['r500'], source['WHL']['M500'], np.log10(source['WHL']['P']), source['WHL']['orientation_peaks'], align)
print >> f1, datastr
print >> g1, to_tex(datastr)
with open(filename[:-4] + '_table2' + filename[-4:], 'w') as f2:
with open(filename[:-4] + '_sample2' + filename[-4:], 'w') as g2:
params = total_cuts.copy()
params['RGZ.RGZ_id'] = {'$nin':cids}
for source in bent_sources.find(params).sort('RGZ.RGZ_name', 1):
morph = 2 if source['RGZ']['morphology'] == 'double' else 3
ztype = 's' if 'spec_redshift' in source['SDSS'] else 'p'
datastr = '%s,%s,%i,%.3f,%.5f,%.5f,%.4f,%.4f,%s,%.1f,%.1f,%.1f,%.2f' % (source['RGZ']['RGZ_name'][3:], source['RGZ']['zooniverse_id'], morph, source['RGZ']['size_arcmin'], source['best']['ra'], source['best']['dec'], source['best']['redshift'], source['best']['redshift_err'], ztype, source['using_peaks']['bending_angle'], source['using_peaks']['bending_corrected'], source['using_peaks']['bending_excess'], source['using_contour']['asymmetry'])
print >> f2, datastr
print >> g2, to_tex(datastr)
def to_tex(s):
keylist = [(',',' & '), ('& s','& $s$'), ('& p','& $p$'), ('& r','& $r$'), ('& t','& $t$'), ('& o','& $o$')]
for key in keylist:
s = s.replace(*key)
return s + ' \\\\'
def make_all_figs():
contamination()
sep_hist()
sdss_density()
rmsd()
bending_correct(plot=True, methods='using_peaks')
plot_running('WHL.r/r500', 'using_peaks.bending_excess', logx=True, pop='separate', title=False)
plot_running('WHL.M500', 'using_peaks.bending_excess', pop='inner', title=False)
plot_running('WHL.M500', 'using_peaks.bending_excess', pop='BCG', title=False)
plot_running('WHL.P', 'using_peaks.bending_excess', logx=True, pop='non-BCG', combined=True, title=False)
orientation()
orientation_test()
fractional_bent()
distant_env()
def update_SDSS(coll=catalog, start=0):
for entry in coll.find({'SDSS':{'$exists':False}, 'consensus.ir_ra':{'$exists':True}, 'catalog_id':{'$gte':start}}).sort('catalog_id', 1).batch_size(20):
print entry['catalog_id']
sdss_match = getSDSS(entry)
if sdss_match is not None:
z = sdss_match['spec_redshift'] if 'spec_redshift' in sdss_match else (sdss_match['photo_redshift'] if 'photo_redshift' in sdss_match else 0)
if z > 0:
radio = entry['radio']
radio_nested = {'radio':radio}
physical = getPhysical(z, radio_nested)
radio.update(physical)
print catalog.update({'_id':entry['_id']}, {'$set':{'radio':radio, 'SDSS':sdss_match}})
else:
print catalog.update({'_id':entry['_id']}, {'$set':{'SDSS':sdss_match}})
def vienna(data_in=False, data_out=False):
infile = '/home/garon/Downloads/vienna.csv'
outfile = '/home/garon/Downloads/vienna_matches.csv'
if data_in:
ix, ra, dec, z = [], [], [], []
with open(infile, 'r') as f:
r = csv.reader(f)
r.next()
for row in r:
ix.append(row[0])
ra.append(row[1])
dec.append(row[2])
z.append(row[3])
ix = np.array(ix, dtype=int)
ra = np.array(ra, dtype=float)
dec = np.array(dec, dtype=float)
z = np.array(z, dtype=float)
if data_out:
with open(outfile, 'w') as f:
print >> f, 'source#,radeg,decdeg,z,sep_mpc,sep_r500'
for i in range(len(idd)):
loc = coord.SkyCoord(ra[i], dec[i], unit=(u.deg,u.deg))
w = get_whl(loc, z[i], 0, 15, 0.04*(1+z[i]))
if w is not None:
loc2 = coord.SkyCoord(w['RAdeg'], w['DEdeg'], unit=(u.deg,u.deg))
sep_deg = loc2.separation(loc)
sep_mpc = float(cosmo.angular_diameter_distance(w['zspec'] if 'zspec' in w else w['zphot'])/u.Mpc * sep_deg.to(u.rad)/u.rad)
sep_r500 = sep_mpc / w['r500']
else:
sep_mpc, sep_r500 = 99., 99.
print >> f, '%i,%f,%f,%f,%f,%f' % (ix[i], ra[i], dec[i], z[i], sep_mpc, sep_r500)
def plot_annotations(source, peak_count=None):
entry = catalog.find_one({'catalog_id':source['RGZ']['RGZ_id']})
subject = subjects.find_one({'zooniverse_id':entry['zooniverse_id']})
fid = subject['metadata']['source']
fits_loc = pathdict[fid]
w = wcs.WCS(fits.getheader(fits_loc, 0))
ir = coord.SkyCoord(source['SDSS']['ra'], source['SDSS']['dec'], unit=(u.deg,u.deg), frame='icrs') if 'SDSS' in source else coord.SkyCoord(source['AllWISE']['ra'], source['AllWISE']['dec'], unit=(u.deg,u.deg), frame='icrs')
ir_pos = w.wcs_world2pix(np.array([[ir.ra.deg,ir.dec.deg]]), 1)
z, z_err = get_z(source)
peaks = entry['radio']['peaks']
peak_pos = w.wcs_world2pix(np.array([ [peak['ra'],peak['dec']] for peak in peaks ]), 1)
data = get_data(subject)
if peak_count is None:
if len(entry['radio']['peaks'])==2:
peak_count = 2
elif len(entry['radio']['peaks'])==3 or len(entry['radio']['components'])==3:
peak_count = 3
else:
print 'Not enough info for peak counting'
return
contour_tree = get_contours(w, ir_pos, peak_pos, data, peak_count)
contour_tree.print_contours()
plt.scatter(*ir_pos.T, marker='x')
x = [peak_pos.T[0][0], ir_pos[0][0], peak_pos.T[0][1]]
y = [peak_pos.T[-1][0], ir_pos[0][-1], peak_pos.T[-1][1]]
plt.plot(x, y)
for i in data['contours']:
for j in i:
print j['level']
def scale_region_file(infile):
scale_factor = 500./132
outfile = infile.split('.')
outfile[-2] += '_scaled'
outfile = '.'.join(outfile)
with open(infile, 'r') as f:
lines = f.readlines()
with open(outfile, 'w') as f:
for line in lines:
if line[:7]=='polygon':
scaled = []
for val in line[8:-2].split(','):
scaled.append(scale_factor*float(val))
print >> f, 'polygon(%s)' % ','.join(np.array(scaled, dtype=str))
else:
print >> f, line[:-1]
def update_dz():
for source in bending_15.find({'best':{'$exists':True}}):
z1 = source['best']['redshift']
z2 = source['WHL']['zbest']
dz = (z1-z2)/(1.+z1)
bending_15.update({'_id':source['_id']}, {'$set':{'WHL.dz':dz}})
def correlation(x_param, y_param, pop=None, bin_by=None, bin_count=0):
'''Significance of correlation between two parameters'''
assert pop in [None, 'BCG', 'inner', 'outer', 'separate'], "pop must be 'BCG', 'inner', 'outer', or 'separate'"
if bin_by is not None:
assert type(bin_count) is int and bin_count>0, 'bin_count must be positive int'
params = total_cuts.copy()
if pop is 'separate':
for pop2 in ['BCG', 'inner', 'outer']:
correlation(x_param, y_param, pop2, bin_by, bin_count)
return
elif pop in ['BCG', 'inner', 'outer']:
params['WHL.population'] = pop
if bin_by is not None:
bin_by_list = bin_by.split('.')
bins = np.arange(bin_count+1) * 100. / bin_count
vals = []
for i in bending_15.find(params):
vals.append(i[bin_by_list[0]][bin_by_list[1]])
samples = np.percentile(vals, bins)
print 'Pop: %s, binned by %s' % (pop, bin_by)
for i in range(len(samples)-1):
params[bin_by] = {'$gte':samples[i], '$lt':samples[i+1]}
_, x, _, y, _ = get_trends(params, x_param, y_param, bending_15, False, False)
rho = stats.spearmanr(x,y)
print ' Bin: %f-%f, corr: %f, sigma: %f' % (samples[i], samples[i+1], rho.correlation, z_score(rho.pvalue))
else:
_, x, _, y, _ = get_trends(params, x_param, y_param, bending_15, False, False)
rho = stats.spearmanr(x,y)
print 'Pop: %s, corr: %f, sigma: %f' % (pop, rho.correlation, z_score(rho.pvalue))
def size_comp(bin_count=1):
pop, z, size = [], [], []
for source in bending_15.find(total_cuts):
pop.append(source['WHL']['population'])
z.append(source['best']['redshift'])
size.append(source['RGZ']['size_kpc'])
pop = np.array(pop)
z = np.array(z)
size = np.array(size)
bins = np.arange(bin_count+1) * 100. / bin_count
samples = np.percentile(z, bins)
for i in range(len(samples)-1):
mask = np.logical_and(z>=samples[i], z<samples[i+1])
inner = np.logical_and(mask, pop=='inner')
outer = np.logical_and(mask, pop=='outer')
p = stats.anderson_ksamp([size[inner], size[outer]]).significance_level
print 'z = %.2f-%.2f' % (samples[i], samples[i+1])
print ' Inner: %.0f +- %.0f kpc (n=%i)' % (np.mean(size[inner]), np.std(size[inner]), len(size[inner]))
print ' Outer: %.0f +- %.0f kpc (n=%i)' % (np.mean(size[outer]), np.std(size[outer]), len(size[outer]))
print ' Same pop: %.2f sigma' % z_score(p)
def get_headtails():
params = total_cuts.copy()
params['RGZ.morphology'] = 'double'
near, far = [], []
count = 0
tot = bending_15.find(params).count()
for source in bending_15.find(params).batch_size(100):
count += 1
print '%i/%i' % (count, tot)
ir = coord.SkyCoord(source['best']['ra'], source['best']['dec'], unit=(u.deg,u.deg), frame='icrs')
rad0 = coord.SkyCoord(source['RGZ']['peaks'][0]['ra'], source['RGZ']['peaks'][0]['dec'], unit=(u.deg,u.deg), frame='icrs')
rad1 = coord.SkyCoord(source['RGZ']['peaks'][1]['ra'], source['RGZ']['peaks'][1]['dec'], unit=(u.deg,u.deg), frame='icrs')
sep0 = ir.separation(rad0).arcsec
sep1 = ir.separation(rad1).arcsec
near.append(min(sep0, sep1))
far.append(max(sep0, sep1))
near = np.array(near)
far = np.array(far)
return near, far
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians. https://stackoverflow.com/a/6802723
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def projection_effects(theta=None, plot=False):
if theta is None:
theta = np.random.rand() * 90.
M = np.array([[-1, 0, 0], [0, 0, 0], [np.cos(theta*np.pi/180), np.sin(theta*np.pi/180), 0]])
proj = []
for i in np.arange(0, 360, 2)*np.pi/180:
rotx = rotation_matrix([1,0,0], i)
for j in np.arange(0, 360, 2)*np.pi/180:
roty = rotation_matrix([0,1,0], j)
Mprime = np.matmul(np.matmul(M, rotx), roty)
thetaprime = math.atan(Mprime[2,1] / Mprime[2,0])
proj.append(thetaprime)
proj = np.array(proj)
if plot:
plt.figure()
plt.hist(np.abs(proj), 180)
plt.axvline(theta*np.pi/180, color='k', label='True $\\theta$')
plt.axvline(np.mean(np.abs(proj)), color='r', label='Median observed $\\theta$')
plt.legend()
plt.xlabel('Observed bending angle (rad)')
plt.ylabel('Count')
plt.tight_layout()
return theta, np.median(np.abs(proj))
def total_projection(plot=True):
theta = []
true = np.arange(0, 180, 2)
for i in true:
theta.append(projection_effects(i)[1])
observed = np.array(theta)*180/np.pi
if plot:
plt.figure()
plt.plot(true[:45], true[:45], label='True $\\theta$')
plt.plot(true, observed, label='Median observed $\\theta$')
plt.legend()
plt.xlabel('True bending angle (deg)')
plt.ylabel('Median observed bending angle (deg)')
plt.tight_layout()
plt.figure()
plt.plot(true, observed-true, label='Observed-true')
plt.fill_between(true, -1*get_median_bend(), get_median_bend(), alpha=.5, label='Uncertainty')
plt.legend(loc='lower left')
plt.xlabel('True bending angle (deg)')
plt.ylabel('Error (deg)')
plt.tight_layout()
return true, observed
if __name__ == '__main__':
logging.basicConfig(filename='%s/bending.log' % rgz_path, level=logging.DEBUG, format='%(asctime)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.captureWarnings(True)
logging.info('Bending run from command line')
# Run options
calculate_bending = True
match_to_clusters = True
control = False
# Generate the collection of bent sources
if calculate_bending:
done = False
while not done:
try:
if not bent_sources.count():
bent_sources.create_index('RGZ.RGZ_id', unique=True)
output('Processing sources from RGZ')
make_bent_sources()
output('%i double sources processed' % bent_sources.find({'RGZ.morphology':'double'}).count())
output('%i triple sources processed' % bent_sources.find({'RGZ.morphology':'triple'}).count())
done = True
except pymongo.errors.CursorNotFound as c:
time.sleep(10)
output('Cursor timed out; starting again.')
except BaseException as e:
logging.exception(e)
raise
with open(completed_file, 'w'): pass
# Match the sources in bent_sources to the cluster catalogs
if match_to_clusters:
done = False
while not done:
try:
if not bending_15.count():
bending_15.create_index('RGZ.RGZ_id', unique=True)
output('Matching sources to WHL')
make_catalog()
output('%i double sources matched to WHL' % bending_15.find({'RGZ.morphology':'double'}).count())
output('%i triple sources matched to WHL' % bending_15.find({'RGZ.morphology':'triple'}).count())
to_file('%s/csv/bending_catalog_15.csv' % rgz_path, bending_15)
done = True
except pymongo.errors.CursorNotFound as c:
time.sleep(10)
output('Cursor timed out; starting again.')
except BaseException as e:
logging.exception(e)
raise
# Generate a control sample by shuffling the positions of the sources
if control:
try:
if not bending_control.count():
bending_control.create_index('RGZ.RGZ_id', unique=True)
output('Generating control sources')
random_control()
output('%i double sources matched to clusters' % bending_control.find({'RGZ.morphology':'double'}).count())
output('%i triple sources matched to clusters' % bending_control.find({'RGZ.morphology':'double'}).count())
to_file('%s/csv/bending_control_15.csv' % rgz_path, bending_control)
except BaseException as e:
logging.exception(e)
raise
|
{"hexsha": "25088cc76d03bd607e6d63a5b1b6f8bdfe8710b3", "size": 126005, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/bending_analysis.py", "max_stars_repo_name": "willettk/rgz-analysis", "max_stars_repo_head_hexsha": "11c34b1b2d0eb8b9c1c71757e6e2f771c169e993", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-02-23T01:24:38.000Z", "max_stars_repo_stars_event_max_datetime": "2017-07-09T03:34:29.000Z", "max_issues_repo_path": "python/bending_analysis.py", "max_issues_repo_name": "willettk/rgz-analysis", "max_issues_repo_head_hexsha": "11c34b1b2d0eb8b9c1c71757e6e2f771c169e993", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2015-02-24T03:05:54.000Z", "max_issues_repo_issues_event_max_datetime": "2016-06-27T19:36:36.000Z", "max_forks_repo_path": "python/bending_analysis.py", "max_forks_repo_name": "willettk/rgz-analysis", "max_forks_repo_head_hexsha": "11c34b1b2d0eb8b9c1c71757e6e2f771c169e993", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-03-02T02:45:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-23T02:01:34.000Z", "avg_line_length": 40.3732777956, "max_line_length": 723, "alphanum_fraction": 0.6723145907, "include": true, "reason": "import numpy,from scipy,from astropy", "num_tokens": 40823}
|
from abc import ABC, abstractmethod
from fastquant import get_stock_data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
from scipy.ndimage.filters import gaussian_filter
class Model(ABC):
# params should be a dict of your parameters that you want to pass to the model
# name should be a string (used for saving results)
# params dict *must* include {'name':name}
def __init__(self, params):
self.model = None
self.name = params['name']
# wrapper model function for collecting fastquant data
def get_data(self, ticker, start_date, end_date):
return get_stock_data(ticker, start_date, end_date)
# generate closing prices from fractional change predictions
def gen_prices(self, preds):
pred_close = []
closes = self.test_data['close'].values
opens = self.test_data['open'].values[1:]
for i,pred in enumerate(preds):
if i == 0:
pred_close.append(pred*self.train_data['close'].values[-1]+self.train_data['close'].values[-1])
else:
pred_close.append(pred*closes[i-1]+closes[i-1])
truth = self.test_data['close'].values[1:]
return pred_close, truth.flatten()
# plotting function for price prediction
def plot_prices(self, preds, title, folder):
pred_close, truth = self.gen_prices(preds)
fig, ax = plt.subplots(figsize=(15,5))
ax.set_title(title)
time = range(len(preds))
ax.plot(time,truth,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual')
ax.plot(time,pred_close,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='preds')
ax.set_xlabel('time')
ax.set_ylabel('stock price ($)')
ax.set_xticks(np.arange(0,len(preds)+10,10))
ax.set_xlim(0,len(preds)+10)
ax.xaxis.grid(True,ls='--')
ax.yaxis.grid(True,ls='--')
ax.legend()
plt.savefig(f'../imgs/{folder}/{title}')
# plotting function for fractional change
def plot_change(self, preds, actual, title, folder):
fig, ax = plt.subplots(figsize=(15,5))
ax.set_title(title)
time = range(len(preds))
ax.plot(time,actual,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual')
ax.plot(time,preds,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='preds')
ax.set_xlabel('time')
ax.set_ylabel('stock price ($)')
ax.set_xticks(np.arange(0,len(preds)+10,10))
ax.set_xlim(0,len(preds)+10)
ax.xaxis.grid(True,ls='--')
ax.yaxis.grid(True,ls='--')
ax.legend()
plt.savefig(f'../imgs/{folder}/{title}')
# function to get error of the model based on preds and true values
def mean_abs_percent_error(self, y_pred, y_true):
return (1.0)/(len(y_pred))*((np.abs(y_pred-y_true)/np.abs(y_true))*100).sum()
# percent change for data
def data_prep(self, data):
return data['close'].pct_change().iloc[1:].values
# training function for the model, create the model, train it, and store in self.model
def train(self, train_data):
# save for later
self.train_data = train_data
# prep the data / pre process
self.train_obs = self.data_prep(train_data)
self.train_obs = gaussian_filter(self.train_obs, sigma=self.sigma)
# build the x as the observation from (O_i,...,O_i+d)
# y is O_i+d
x_train, y_train = [],[]
for i in range(self.d, len(self.train_obs)):
x_train.append(self.train_obs[i-self.d:i])
y_train.append(self.train_obs[i])
x_train,y_train = np.array(x_train),np.array(y_train)
y_train = np.reshape(y_train, (*y_train.shape,1))
self.model.fit(x_train, y_train)
# prediction function for the model, return the preds and y_true given the test data
def predict(self, test_data):
# add last row of training data to testing data
last = self.train_data.iloc[-1].to_dict()
row = pd.DataFrame(last, index=[0])
row['dt'] = None
self.test_data = test_data.reset_index()
self.test_data = pd.concat([row,self.test_data], ignore_index=True)
# convert the testing data and smooth
test_obs = self.data_prep(self.test_data)
test_labels = test_obs.copy()
test_obs = gaussian_filter(test_obs, sigma=self.sigma)
# add the last training observations to test observations for full test set predicting
test_obs = np.concatenate((self.train_obs[-self.d:], test_obs), axis=0)
# build the x as the observation from (O_i,...,O_i+d)
# y is O_i+d
x_test, y_test = [],[]
index = 0
for i in range(self.d, len(test_obs)):
x_test.append(test_obs[i-self.d:i])
y_test.append(test_labels[index])
index += 1
x_test,y_test = np.array(x_test),np.array(y_test)
y_test = np.reshape(y_test, (*y_test.shape,1))
# predict testing data
preds = self.model.predict(x_test)
return x_test,preds,y_test
# generate the model and store in self.model
@abstractmethod
def gen_model(self):
pass
|
{"hexsha": "babf032a8e004b5102a6e592d515822c327d0f2d", "size": 5341, "ext": "py", "lang": "Python", "max_stars_repo_path": "frac_change_forecasting/regressors/model.py", "max_stars_repo_name": "rlavelle/stock-forecasting", "max_stars_repo_head_hexsha": "732df75e9802e9c2ce24ae305565df96a649d760", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "frac_change_forecasting/regressors/model.py", "max_issues_repo_name": "rlavelle/stock-forecasting", "max_issues_repo_head_hexsha": "732df75e9802e9c2ce24ae305565df96a649d760", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "frac_change_forecasting/regressors/model.py", "max_forks_repo_name": "rlavelle/stock-forecasting", "max_forks_repo_head_hexsha": "732df75e9802e9c2ce24ae305565df96a649d760", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.562962963, "max_line_length": 112, "alphanum_fraction": 0.6287212133, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1302}
|
#!/path/to/your/python3/interpreter
import sys
from os import stat
from PIL import Image
from numpy import (uint8,diag,asarray,array,zeros)
from numpy.linalg import svd
from time import perf_counter
def image_compressor(image_path,output,rank):
with Image.open(image_path) as image:
print('Compressing the given image...')
t0 = perf_counter()
r,g,b = image.split() # splitting the red, green, blue byte arrays
r = asarray(r)
g = asarray(g)
b = asarray(b)
#---SVD decompositions follow---#
Ur,sr,Vrh = svd(r,full_matrices=False)
Ug,sg,Vgh = svd(g,full_matrices=False)
Ub,sb,Vbh = svd(b,full_matrices=False)
#---Computing the low rank approximations for the given rank---#
red = (Ur[:,:rank]@diag(sr)[:rank,:rank]@Vrh[:rank,:]).astype(uint8)
green = (Ug[:,:rank]@diag(sg)[:rank,:rank]@Vgh[:rank,:]).astype(uint8)
blue = (Ub[:,:rank]@diag(sb)[:rank,:rank]@Vbh[:rank,:]).astype(uint8)
im_red = Image.fromarray(red,mode=None)
im_green = Image.fromarray(green,mode=None)
im_blue = Image.fromarray(blue,mode=None)
im_new = Image.merge('RGB',(im_red,im_green,im_blue))
t1 = perf_counter()
print(f'Compression complete!\n The process took {round((t1-t0),3)} seconds.')
output_name = f'{rank}{output}'
im_new.save(output_name)
return output_name
if __name__ == '__main__':
if len(sys.argv)!=4:
sys.exit(f'Usage :{sys.argv[0]} <image_path> <output_path> <low_rank>')
else:
original_size= stat(sys.argv[1]).st_size
compressed=image_compressor(sys.argv[1],sys.argv[2],int(sys.argv[3]))
compressed_size=stat(compressed).st_size
print(f'original_size = {original_size*1e-3} kilobytes.\ncompressed_size = {compressed_size*1e-3} kilobytes.')
print(f'Compression rate = {round(100 - (compressed_size/original_size)*100,2)}%')
|
{"hexsha": "ca9dcd93702afe58cb741672f87ad71e0ba3f29f", "size": 1955, "ext": "py", "lang": "Python", "max_stars_repo_path": "svd_compression.py", "max_stars_repo_name": "grafdim/SVD-Image-Compressor", "max_stars_repo_head_hexsha": "2310a869a45a48b350003275e1f71b38abbe0c9d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "svd_compression.py", "max_issues_repo_name": "grafdim/SVD-Image-Compressor", "max_issues_repo_head_hexsha": "2310a869a45a48b350003275e1f71b38abbe0c9d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "svd_compression.py", "max_forks_repo_name": "grafdim/SVD-Image-Compressor", "max_forks_repo_head_hexsha": "2310a869a45a48b350003275e1f71b38abbe0c9d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.4444444444, "max_line_length": 118, "alphanum_fraction": 0.642455243, "include": true, "reason": "from numpy", "num_tokens": 540}
|
# -*- coding: utf-8 -*-
'''
John Farmer
1. a. Done.
b. The second array is the frequency bins. The FFT algorithm I used arranged the bins in a different order, so I used a different freq. array in my plots.
I checked the normalization by verifying Parseval's theorem. I found that a normalization factor of 1/N was needed in the frequency domain, which is what I expected from the definitions of power in lecture.
2. The Fourier transform of a real function has negative frequencies that are the complex conjugate of its positive ones; for this problem I work with just the positive frequencies for clarity.
a. Done.
b. Done.
c. As expected, they overlap the most where there is zero offset. There are some edge effects visible along the other endpoints.
3. Again, I will work with the positive part of the function, but to preserve the power I need to be a bit careful.
Every entry *except* zero and the Nyquist frequency has a counterpart in the negative-frequency domain. To accurately calculate the power, I need to take this into account. The easiest way is by doubling every entry of the FFt
except the endpoints (Nyquist frequency and zero).
a. Done.
b. The RMS of the timestream gives the RMS energy. Again, compute this as in 2b, but this time we have to be a little bit careful: there is a factor of 2 from the overall normalization of using the 'real' fft.
c. Done.
'''
import random
import math
import matplotlib.pyplot as plt
import numpy as np
import pickle
import scipy.stats as stat
import scipy.special as sp
import scipy.signal as sig
import statistics as st
thelinspace=np.linspace(0,1023,1024)
deltaT=1/100
freqspace=np.fft.fftfreq(1024, deltaT)
LinearData=deltaT*thelinspace
SineData=np.sin(2*math.pi*thelinspace/(1024*deltaT))
freqarr=np.zeros(1024)
for i in range(0,1023):
freqarr[i]=(i-1024/2+1)/(1024*deltaT)
plt.plot(thelinspace, SineData)
plt.xlabel("t")
plt.ylabel("f(t)")
plt.title("Sine noise")
plt.savefig("1a_Sinenoise.png")
plt.clf()
SineDataFFT=np.fft.fft(SineData)
plt.plot(freqarr, SineDataFFT.real)
plt.xlabel("f")
plt.ylabel("F(f(t))")
plt.title("Sine wave FFT")
plt.savefig("1b_SineFFT.png")
plt.clf()
print("Sums:")
print(np.sum(np.power(np.absolute(SineDataFFT),2)/(1024)))
print(np.sum(np.power(SineData,2)))
#How to check normalization? Maybe calculate power.
#Part B
gaussfreqspace=np.fft.rfftfreq(1024,1)
gaussnoise=np.random.normal(0,1,1024)
plt.plot(thelinspace, gaussnoise)
plt.xlabel("t")
plt.ylabel("Signal")
plt.title("Gaussian time-domain noise")
plt.savefig("2a_Gausnoise.png")
plt.clf()
#Nyquist frequency is half the highest frequency... so we take the FFT and find the highest freq.
gaussnoisefft=np.fft.rfft(gaussnoise)
gaussnoisefftcopy=list(gaussnoisefft)
maxfreqarg=np.argmax(gaussfreqspace)
maxfreq=gaussfreqspace[maxfreqarg]
plt.plot(gaussfreqspace, gaussnoisefft.real)
plt.xlabel("f")
plt.ylabel("F(f(t))")
plt.title("Gaussian noise FFT")
plt.savefig("2b_gaussnosie_fft.png")
plt.clf()
for i,entry in enumerate(gaussfreqspace):
gaussnoisefft[i]=gaussnoisefft[i]*1/(1+(10*entry/maxfreq)**(2*4))
plt.plot(gaussfreqspace, 1/(1+(10*gaussfreqspace/maxfreq)**(2*4)))
plt.xlabel("Frequency")
plt.ylabel("Filter transmittance")
plt.title("Butterworth filter transmittance")
plt.savefig("2b_filt_trans.png")
plt.clf()
plt.plot(gaussfreqspace, np.absolute(gaussnoisefft))
plt.xlabel("f")
plt.ylabel("F(f(t))")
plt.title("Gaussian noise FFT, Butterworth filtered")
plt.savefig("2b_gaussnosie_fftfilt.png")
plt.clf()
filteredgaussnoise=np.fft.irfft(gaussnoisefft)
plt.plot(thelinspace, filteredgaussnoise)
plt.xlabel("t")
plt.ylabel("f(t)")
plt.title("Gaussian noise, Butterworth filtered")
plt.savefig("2b_gaussnoise_filt.png")
plt.clf()
autocorr=np.correlate(filteredgaussnoise, filteredgaussnoise, 'full')
plt.plot(np.linspace(-1024, 1024, 2047), autocorr)
plt.ylabel("Autocorrelation")
plt.xlabel("Offset")
plt.title("Filtered autocorrelation")
plt.savefig("2c_autocorr.png")
plt.clf()
#Part C:
for i in range(0,len(gaussnoisefft)):
if i != 0 and i != len(gaussnoisefft)-1:
gaussnoisefft[i]=gaussnoisefft[i]*2
psd=(1/(1*1024))*np.power(np.absolute(gaussnoisefft),2)
lsd = np.sqrt(psd)
plt.plot(gaussfreqspace,lsd)
plt.xlabel("Frequency")
plt.ylabel("LSD")
plt.title("3a: LSD")
plt.savefig("3a_LSD")
plt.clf()
#RMS of signal is equal to its mean power
rms=math.sqrt(np.mean(np.power(np.absolute(filteredgaussnoise),2)))
print(rms)
print(math.sqrt(np.sum(np.absolute(psd))/(1024*2)))
plt.plot(gaussfreqspace, 10*np.log10(psd/50))
plt.xlabel("Frequency")
plt.ylabel("PSD")
plt.title("3c: PSD")
plt.savefig("3C_PSD")
plt.clf()
|
{"hexsha": "de5c3ea0f0656416ecae3f21645233a6c4222739", "size": 4902, "ext": "py", "lang": "Python", "max_stars_repo_path": "hw6/hw6_farmer.py", "max_stars_repo_name": "farmerjm/PHYS38600", "max_stars_repo_head_hexsha": "1fe861360307efd09b3eed38d5502a3f97cc9686", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hw6/hw6_farmer.py", "max_issues_repo_name": "farmerjm/PHYS38600", "max_issues_repo_head_hexsha": "1fe861360307efd09b3eed38d5502a3f97cc9686", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hw6/hw6_farmer.py", "max_forks_repo_name": "farmerjm/PHYS38600", "max_forks_repo_head_hexsha": "1fe861360307efd09b3eed38d5502a3f97cc9686", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8522727273, "max_line_length": 231, "alphanum_fraction": 0.7133822929, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1321}
|
# -*- coding: utf-8 -*-
# CCP in Tomographic Imaging (CCPi) Core Imaging Library (CIL).
# Copyright 2017 UKRI-STFC
# Copyright 2017 University of Manchester
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from numbers import Number
import functools
from ccpi.framework import DataContainer
from ccpi.utilities import NUM_THREADS
#from ccpi.framework import AcquisitionData, ImageData
#from ccpi.optimisation.operators import Operator, LinearOperator
class BlockDataContainer(object):
'''Class to hold DataContainers as column vector
Provides basic algebra between BlockDataContainer's, DataContainer's and
subclasses and Numbers
1) algebra between `BlockDataContainer`s will be element-wise, only if
the shape of the 2 `BlockDataContainer`s is the same, otherwise it
will fail
2) algebra between `BlockDataContainer`s and `list` or `numpy array` will
work as long as the number of `rows` and element of the arrays match,
indipendently on the fact that the `BlockDataContainer` could be nested
3) algebra between `BlockDataContainer` and one `DataContainer` is possible.
It will require that all the `DataContainers` in the block to be
compatible with the `DataContainer` we want to algebra with. Should we
require that the `DataContainer` is the same type? Like `ImageData` or `AcquisitionData`?
4) algebra between `BlockDataContainer` and a `Number` is possible and it
will be done with each element of the `BlockDataContainer` even if nested
A = [ [B,C] , D]
A * 3 = [ 3 * [B,C] , 3* D] = [ [ 3*B, 3*C] , 3*D ]
'''
ADD = 'add'
SUBTRACT = 'subtract'
MULTIPLY = 'multiply'
DIVIDE = 'divide'
POWER = 'power'
AXPBY = 'axpby'
__array_priority__ = 1
__container_priority__ = 2
def __init__(self, *args, **kwargs):
''''''
self.containers = args
self.index = 0
self.geometry = None
#if len(set([i.shape for i in self.containers])):
# self.geometry = self.containers[0].geometry
shape = kwargs.get('shape', None)
if shape is None:
shape = (len(args),1)
# shape = (len(args),1)
self.shape = shape
n_elements = functools.reduce(lambda x,y: x*y, shape, 1)
if len(args) != n_elements:
raise ValueError(
'Dimension and size do not match: expected {} got {}'
.format(n_elements, len(args)))
def __iter__(self):
'''BlockDataContainer is Iterable'''
return self
def next(self):
'''python2 backwards compatibility'''
return self.__next__()
def __next__(self):
try:
out = self[self.index]
except IndexError as ie:
raise StopIteration()
self.index+=1
return out
def is_compatible(self, other):
'''basic check if the size of the 2 objects fit'''
if isinstance(other, Number):
return True
elif isinstance(other, (list, numpy.ndarray)) :
for ot in other:
if not isinstance(ot, (Number,\
numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\
numpy.float, numpy.float16, numpy.float32, numpy.float64, \
numpy.complex)):
raise ValueError('List/ numpy array can only contain numbers {}'\
.format(type(ot)))
return len(self.containers) == len(other)
elif isinstance(other, BlockDataContainer):
return len(self.containers) == len(other.containers)
else:
# this should work for other as DataContainers and children
ret = True
for i, el in enumerate(self.containers):
if isinstance(el, BlockDataContainer):
a = el.is_compatible(other)
else:
a = el.shape == other.shape
ret = ret and a
# probably will raise
return ret
def get_item(self, row):
if row > self.shape[0]:
raise ValueError('Requested row {} > max {}'.format(row, self.shape[0]))
return self.containers[row]
def __getitem__(self, row):
return self.get_item(row)
def add(self, other, *args, **kwargs):
'''Algebra: add method of BlockDataContainer with number/DataContainer or BlockDataContainer
:param: other (number, DataContainer or subclasses or BlockDataContainer
:param: out (optional): provides a placehold for the resul.
'''
out = kwargs.get('out', None)
if out is not None:
self.binary_operations(BlockDataContainer.ADD, other, *args, **kwargs)
else:
return self.binary_operations(BlockDataContainer.ADD, other, *args, **kwargs)
def subtract(self, other, *args, **kwargs):
'''Algebra: subtract method of BlockDataContainer with number/DataContainer or BlockDataContainer
:param: other (number, DataContainer or subclasses or BlockDataContainer
:param: out (optional): provides a placehold for the resul.
'''
out = kwargs.get('out', None)
if out is not None:
self.binary_operations(BlockDataContainer.SUBTRACT, other, *args, **kwargs)
else:
return self.binary_operations(BlockDataContainer.SUBTRACT, other, *args, **kwargs)
def multiply(self, other, *args, **kwargs):
'''Algebra: multiply method of BlockDataContainer with number/DataContainer or BlockDataContainer
:param: other (number, DataContainer or subclasses or BlockDataContainer
:param: out (optional): provides a placehold for the resul.
'''
out = kwargs.get('out', None)
if out is not None:
self.binary_operations(BlockDataContainer.MULTIPLY, other, *args, **kwargs)
else:
return self.binary_operations(BlockDataContainer.MULTIPLY, other, *args, **kwargs)
def divide(self, other, *args, **kwargs):
'''Algebra: divide method of BlockDataContainer with number/DataContainer or BlockDataContainer
:param: other (number, DataContainer or subclasses or BlockDataContainer
:param: out (optional): provides a placehold for the resul.
'''
out = kwargs.get('out', None)
if out is not None:
self.binary_operations(BlockDataContainer.DIVIDE, other, *args, **kwargs)
else:
return self.binary_operations(BlockDataContainer.DIVIDE, other, *args, **kwargs)
def axpby(self, a, b, y, out, dtype=numpy.float32, num_threads = NUM_THREADS):
r'''performs axpby element-wise on the BlockDataContainer containers
Does the operation .. math:: a*x+b*y and stores the result in out, where x is self
:param a: scalar
:param b: scalar
:param y: compatible (Block)DataContainer
:param out: (Block)DataContainer to store the result
:param dtype: optional, data type of the DataContainers
'''
if out is None:
raise ValueError("out container cannot be None")
kwargs = {'a':a, 'b':b, 'out':out, 'dtype': dtype, 'num_threads': NUM_THREADS}
self.binary_operations(BlockDataContainer.AXPBY, y, **kwargs)
def binary_operations(self, operation, other, *args, **kwargs):
'''Algebra: generic method of algebric operation with BlockDataContainer with number/DataContainer or BlockDataContainer
Provides commutativity with DataContainer and subclasses, i.e. this
class's reverse algebric methods take precedence w.r.t. direct algebric
methods of DataContainer and subclasses.
This method is not to be used directly
'''
if not self.is_compatible(other):
raise ValueError('Incompatible for operation {}'.format(operation))
out = kwargs.get('out', None)
if isinstance(other, Number):
# try to do algebra with one DataContainer. Will raise error if not compatible
kw = kwargs.copy()
res = []
for i,el in enumerate(self.containers):
if operation == BlockDataContainer.ADD:
op = el.add
elif operation == BlockDataContainer.SUBTRACT:
op = el.subtract
elif operation == BlockDataContainer.MULTIPLY:
op = el.multiply
elif operation == BlockDataContainer.DIVIDE:
op = el.divide
elif operation == BlockDataContainer.POWER:
op = el.power
else:
raise ValueError('Unsupported operation', operation)
if out is not None:
kw['out'] = out.get_item(i)
op(other, *args, **kw)
else:
res.append(op(other, *args, **kw))
if out is not None:
return
else:
return type(self)(*res, shape=self.shape)
elif isinstance(other, (list, numpy.ndarray, BlockDataContainer)):
# try to do algebra with one DataContainer. Will raise error if not compatible
kw = kwargs.copy()
res = []
if isinstance(other, BlockDataContainer):
the_other = other.containers
else:
the_other = other
for i,zel in enumerate(zip ( self.containers, the_other) ):
el = zel[0]
ot = zel[1]
if operation == BlockDataContainer.ADD:
op = el.add
elif operation == BlockDataContainer.SUBTRACT:
op = el.subtract
elif operation == BlockDataContainer.MULTIPLY:
op = el.multiply
elif operation == BlockDataContainer.DIVIDE:
op = el.divide
elif operation == BlockDataContainer.POWER:
op = el.power
elif operation == BlockDataContainer.AXPBY:
if not isinstance(other, BlockDataContainer):
raise ValueError("{} cannot handle {}".format(operation, type(other)))
op = el.axpby
else:
raise ValueError('Unsupported operation', operation)
if out is not None:
kw['out'] = out.get_item(i)
if operation == BlockDataContainer.AXPBY:
kw['y'] = ot
el.axpby(kw['a'], kw['b'], kw['y'], kw['out'], dtype=kw['dtype'], num_threads=kw['num_threads'])
else:
op(ot, *args, **kw)
else:
res.append(op(ot, *args, **kw))
if out is not None:
return
else:
return type(self)(*res, shape=self.shape)
return type(self)(*[ operation(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
else:
# try to do algebra with one DataContainer. Will raise error if not compatible
kw = kwargs.copy()
if operation != BlockDataContainer.AXPBY:
# remove keyworded argument related to AXPBY
for k in ['a','b','y', 'num_threads', 'dtype']:
if k in kw.keys():
kw.pop(k)
res = []
for i,el in enumerate(self.containers):
if operation == BlockDataContainer.ADD:
op = el.add
elif operation == BlockDataContainer.SUBTRACT:
op = el.subtract
elif operation == BlockDataContainer.MULTIPLY:
op = el.multiply
elif operation == BlockDataContainer.DIVIDE:
op = el.divide
elif operation == BlockDataContainer.POWER:
op = el.power
elif operation == BlockDataContainer.AXPBY:
# As out cannot be None, it is safe to continue the
# for loop after the call to axpby
kw['out'] = out.get_item(i)
el.axpby(kw['a'], kw['b'], other, kw['out'], kw['dtype'], kw['num_threads'])
continue
else:
raise ValueError('Unsupported operation', operation)
if out is not None:
kw['out'] = out.get_item(i)
op(other, *args, **kw)
else:
res.append(op(other, *args, **kw))
if out is not None:
return
else:
return type(self)(*res, shape=self.shape)
def power(self, other, *args, **kwargs):
if not self.is_compatible(other):
raise ValueError('Incompatible for power')
out = kwargs.get('out', None)
if isinstance(other, Number):
return type(self)(*[ el.power(other, *args, **kwargs) for el in self.containers], shape=self.shape)
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape)
def maximum(self,other, *args, **kwargs):
if not self.is_compatible(other):
raise ValueError('Incompatible for maximum')
out = kwargs.get('out', None)
if isinstance(other, Number):
return type(self)(*[ el.maximum(other, *args, **kwargs) for el in self.containers], shape=self.shape)
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape)
def minimum(self,other, *args, **kwargs):
if not self.is_compatible(other):
raise ValueError('Incompatible for maximum')
out = kwargs.get('out', None)
if isinstance(other, Number):
return type(self)(*[ el.minimum(other, *args, **kwargs) for el in self.containers], shape=self.shape)
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
return type(self)(*[ el.minimum(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape)
return type(self)(*[ el.minimum(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape)
## unary operations
def abs(self, *args, **kwargs):
return type(self)(*[ el.abs(*args, **kwargs) for el in self.containers], shape=self.shape)
def sign(self, *args, **kwargs):
return type(self)(*[ el.sign(*args, **kwargs) for el in self.containers], shape=self.shape)
def sqrt(self, *args, **kwargs):
return type(self)(*[ el.sqrt(*args, **kwargs) for el in self.containers], shape=self.shape)
def conjugate(self, out=None):
return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape)
## reductions
def sum(self, *args, **kwargs):
return numpy.sum([ el.sum(*args, **kwargs) for el in self.containers])
def squared_norm(self):
y = numpy.asarray([el.squared_norm() for el in self.containers])
return y.sum()
def norm(self):
return numpy.sqrt(self.squared_norm())
def pnorm(self, p=2):
if p==1:
return sum(self.abs())
elif p==2:
tmp = functools.reduce(lambda a,b: a + b*b, self.containers, self.get_item(0) * 0 ).sqrt()
return tmp
else:
return ValueError('Not implemented')
def copy(self):
'''alias of clone'''
return self.clone()
def clone(self):
return type(self)(*[el.copy() for el in self.containers], shape=self.shape)
def fill(self, other):
if isinstance (other, BlockDataContainer):
if not self.is_compatible(other):
raise ValueError('Incompatible containers')
for el,ot in zip(self.containers, other.containers):
el.fill(ot)
else:
return ValueError('Cannot fill with object provided {}'.format(type(other)))
def __add__(self, other):
return self.add( other )
# __radd__
def __sub__(self, other):
return self.subtract( other )
# __rsub__
def __mul__(self, other):
return self.multiply(other)
# __rmul__
def __div__(self, other):
return self.divide(other)
# __rdiv__
def __truediv__(self, other):
return self.divide(other)
def __pow__(self, other):
return self.power(other)
# reverse operand
def __radd__(self, other):
'''Reverse addition
to make sure that this method is called rather than the __mul__ of a numpy array
the class constant __array_priority__ must be set > 0
https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
'''
return self + other
# __radd__
def __rsub__(self, other):
'''Reverse subtraction
to make sure that this method is called rather than the __mul__ of a numpy array
the class constant __array_priority__ must be set > 0
https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
'''
return (-1 * self) + other
# __rsub__
def __rmul__(self, other):
'''Reverse multiplication
to make sure that this method is called rather than the __mul__ of a numpy array
the class constant __array_priority__ must be set > 0
https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
'''
return self * other
# __rmul__
def __rdiv__(self, other):
'''Reverse division
to make sure that this method is called rather than the __mul__ of a numpy array
the class constant __array_priority__ must be set > 0
https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
'''
return pow(self / other, -1)
# __rdiv__
def __rtruediv__(self, other):
'''Reverse truedivision
to make sure that this method is called rather than the __mul__ of a numpy array
the class constant __array_priority__ must be set > 0
https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
'''
return self.__rdiv__(other)
def __rpow__(self, other):
'''Reverse power
to make sure that this method is called rather than the __mul__ of a numpy array
the class constant __array_priority__ must be set > 0
https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
'''
return other.power(self)
def __iadd__(self, other):
'''Inline addition'''
if isinstance (other, BlockDataContainer):
for el,ot in zip(self.containers, other.containers):
el += ot
elif isinstance(other, Number):
for el in self.containers:
el += other
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
if not self.is_compatible(other):
raise ValueError('Incompatible for __iadd__')
for el,ot in zip(self.containers, other):
el += ot
return self
# __iadd__
def __isub__(self, other):
'''Inline subtraction'''
if isinstance (other, BlockDataContainer):
for el,ot in zip(self.containers, other.containers):
el -= ot
elif isinstance(other, Number):
for el in self.containers:
el -= other
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
if not self.is_compatible(other):
raise ValueError('Incompatible for __isub__')
for el,ot in zip(self.containers, other):
el -= ot
return self
# __isub__
def __imul__(self, other):
'''Inline multiplication'''
if isinstance (other, BlockDataContainer):
for el,ot in zip(self.containers, other.containers):
el *= ot
elif isinstance(other, Number):
for el in self.containers:
el *= other
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
if not self.is_compatible(other):
raise ValueError('Incompatible for __imul__')
for el,ot in zip(self.containers, other):
el *= ot
return self
# __imul__
def __idiv__(self, other):
'''Inline division'''
if isinstance (other, BlockDataContainer):
for el,ot in zip(self.containers, other.containers):
el /= ot
elif isinstance(other, Number):
for el in self.containers:
el /= other
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
if not self.is_compatible(other):
raise ValueError('Incompatible for __idiv__')
for el,ot in zip(self.containers, other):
el /= ot
return self
# __rdiv__
def __itruediv__(self, other):
'''Inline truedivision'''
return self.__idiv__(other)
def dot(self, other):
#
tmp = [ self.containers[i].dot(other.containers[i]) for i in range(self.shape[0])]
return sum(tmp)
if __name__ == '__main__':
from ccpi.framework import ImageGeometry, BlockGeometry
import numpy
N, M = 2, 3
ig = ImageGeometry(N, M)
ig1 = ImageGeometry(2*N, 4*M)
BG = BlockGeometry(ig, ig1)
U = BG.allocate('random_int')
V = BG.allocate('random_int')
print(U.geometry)
print(len(set([i.shape for i in U.containers]))==1)
# print ("test sum BDC " )
# w = U[0].as_array() + U[1].as_array()
# w1 = sum(U).as_array()
# numpy.testing.assert_array_equal(w, w1)
#
# print ("test sum BDC " )
# z = numpy.sqrt(U[0].as_array()**2 + U[1].as_array()**2)
# z1 = sum(U**2).sqrt().as_array()
# numpy.testing.assert_array_equal(z, z1)
#
# z2 = U.pnorm(2)
#
# zzz = U.dot(V)
|
{"hexsha": "310132a80544b8ae83c66dbd63f2e778ad228038", "size": 23960, "ext": "py", "lang": "Python", "max_stars_repo_path": "Wrappers/Python/ccpi/framework/BlockDataContainer.py", "max_stars_repo_name": "rijobro/CCPi-Framework", "max_stars_repo_head_hexsha": "ff08216d4e6fef84659b43155c5c52484b1dc543", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Wrappers/Python/ccpi/framework/BlockDataContainer.py", "max_issues_repo_name": "rijobro/CCPi-Framework", "max_issues_repo_head_hexsha": "ff08216d4e6fef84659b43155c5c52484b1dc543", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Wrappers/Python/ccpi/framework/BlockDataContainer.py", "max_forks_repo_name": "rijobro/CCPi-Framework", "max_forks_repo_head_hexsha": "ff08216d4e6fef84659b43155c5c52484b1dc543", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0977701544, "max_line_length": 131, "alphanum_fraction": 0.5803005008, "include": true, "reason": "import numpy", "num_tokens": 5350}
|
import cv2
import numpy as np
import operator
import keras
import solve_sudoku
import pytesseract
def preprocess_img(image, dilate_single_digit):
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Blur
blur_image = cv2.GaussianBlur(gray, (3, 3), 0)
show_image(blur_image, "blur")
# Threshold, adaptive to be light independant
thresh = cv2.adaptiveThreshold(
blur_image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 2)
show_image(thresh, "thresh")
# Bitwise_not
thresh_inv = cv2.bitwise_not(thresh)
show_image(thresh_inv, "bitwise_not")
if dilate_single_digit:
kernel = np.ones((1, 1))
dilated = cv2.dilate(thresh_inv, kernel)
show_image(dilated, "dilate")
return thresh_inv
# Dilate to fill in "holes"
kernel = np.ones((2, 2))
dilated = cv2.dilate(thresh_inv, kernel)
show_image(dilated, "dilate")
return dilated
def find_corners(image):
"""
Find the corners of the sudoko but finding the largest contour
"""
contours, h = cv2.findContours(
image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Find contours
contours = sorted(contours, key=cv2.contourArea,
reverse=True) # Sort by area, descending
polygon = contours[0] # Largest polygon
bottom_right, _ = max(
enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
top_left, _ = min(enumerate([pt[0][0] + pt[0][1]
for pt in polygon]), key=operator.itemgetter(1))
bottom_left, _ = min(
enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
top_right, _ = max(enumerate([pt[0][0] - pt[0][1]
for pt in polygon]), key=operator.itemgetter(1))
# Return an array of all 4 points using the indices
# Each point is in its own array of one coordinate
return [polygon[top_left][0], polygon[top_right][0], polygon[bottom_right][0], polygon[bottom_left][0]]
def display_points(in_img, points, radius=5, colour=(0, 0, 255)):
"""Draws circular points on an image."""
img = in_img.copy()
# Dynamically change to a colour image if necessary
if len(colour) == 3:
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for point in points:
img = cv2.circle(img, tuple(int(x) for x in point), radius, colour, -1)
# show_image(img, "corners")
return img
def show_image(img, title):
"""Shows an image until any key is pressed"""
cv2.imshow(title, img) # Display the image
# Wait for any key to be pressed (with the image window active)
def distance_between(p1, p2):
"""Returns the scalar distance between two points"""
a = p2[0] - p1[0]
b = p2[1] - p1[1]
return np.sqrt((a ** 2) + (b ** 2))
def crop_and_warp(img, crop_rect):
"""Crops and warps a rectangular section from an image into a square of similar size."""
# A rectangle described by top left, top right, bottom right and bottom left points
top_left, top_right, bottom_right, bottom_left = crop_rect[
0], crop_rect[1], crop_rect[2], crop_rect[3]
rect = np.array([top_left, top_right, bottom_right,
bottom_left], dtype='float32')
# Find the longest side in the rectangle
side = max([
distance_between(bottom_right, top_right),
distance_between(top_left, bottom_left),
distance_between(bottom_right, bottom_left),
distance_between(top_left, top_right)
])
# Create a square with side of the calculated length, this is the new perspective we want to warp to
square = np.array([[0, 0], [side - 1, 0], [side - 1, side - 1],
[0, side - 1]], dtype='float32')
# Gets the transformation matrix for skewing the image to fit a square by comparing the 4 before and after points
m = cv2.getPerspectiveTransform(rect, square)
# Performs the transformation on the original image and returns
return cv2.warpPerspective(img, m, (int(side), int(side)))
def image_size(img):
"""
Gets the size of an image eg. 350x350
"""
return tuple(img.shape[1:: -1])
def infer_grid(img):
"""Infers 81 cell grid from a square image."""
squares = []
side = img.shape[: 1]
side = side[0] / 9
for i in range(9):
for j in range(9):
p1 = (i * side, j * side) # Top left corner of a bounding box
# Bottom right corner of bounding box
p2 = ((i + 1) * side, (j + 1) * side)
squares.append((p1, p2))
for square in squares:
img = cv2.rectangle(img, tuple(int(x) for x in square[0]), tuple(
int(x) for x in square[1]), (0, 255, 0))
show_image(img, "grid")
return squares
def cut_from_rect(img, rect):
"""Cuts a rectangle from an image using the top left and bottom right points."""
return img[int(rect[0][1]): int(rect[1][1]), int(rect[0][0]): int(rect[1][0])]
def scale_and_centre(img, size, margin=0, background=0):
"""Rescales and centre an image onto a new background square."""
h, w = img.shape[: 2]
def centre_pad(length):
"""Handles centering for a given length that may be odd or even."""
if length % 2 == 0:
side1 = int((size - length) / 2)
side2 = side1
else:
side1 = int((size - length) / 2)
side2 = side1 + 1
return side1, side2
def scale(r, x):
return int(r * x)
if h > w:
t_pad = int(margin / 2)
b_pad = t_pad
ratio = (size - margin) / h
w, h = scale(ratio, w), scale(ratio, h)
l_pad, r_pad = centre_pad(w)
else:
l_pad = int(margin / 2)
r_pad = l_pad
ratio = (size - margin) / w
w, h = scale(ratio, w), scale(ratio, h)
t_pad, b_pad = centre_pad(h)
img = cv2.resize(img, (w, h))
img = cv2.copyMakeBorder(img, t_pad, b_pad, l_pad,
r_pad, cv2.BORDER_CONSTANT, None, background)
return cv2.resize(img, (size, size))
def find_largest_feature(img, scan_tl=None, scan_br=None):
"""
Uses `floodFill` function to find a bounding box of the area it filled to find the biggest
connected pixel structure in the image.
"""
img = img.copy()
height, width = img.shape[:2]
max_area = 0
seed_point = (None, None)
if scan_tl is None:
scan_tl = [0, 0]
if scan_br is None:
scan_br = [width, height]
# Loop through the image
for x in range(scan_tl[0], scan_br[0]):
for y in range(scan_tl[1], scan_br[1]):
# Only fill light or white squares
if img.item(y, x) == 255 and x < width and y < height:
area = cv2.floodFill(img, None, (x, y), 64)
if area[0] > max_area: # Gets the maximum bound area which should be the grid
max_area = area[0]
seed_point = (x, y)
# Colour everything grey (compensates for features outside of our middle scanning range
for x in range(width):
for y in range(height):
if img.item(y, x) == 255 and x < width and y < height:
cv2.floodFill(img, None, (x, y), 64)
# A Mask that is 2 pixels bigger than the image for padding
mask = np.zeros((height + 2, width + 2), np.uint8)
# fill the main feature
if all([p is not None for p in seed_point]):
cv2.floodFill(img, mask, seed_point, 255)
top, bottom, left, right = height, 0, width, 0
for x in range(width):
for y in range(height):
if img.item(y, x) == 64: # Fill with black anything that isn't the main feature
cv2.floodFill(img, mask, (x, y), 0)
# Find the bounding parameters
if img.item(y, x) == 255:
top = y if y < top else top
bottom = y if y > bottom else bottom
left = x if x < left else left
right = x if x > right else right
bbox = [[left, top], [right, bottom]]
return img, np.array(bbox, dtype='float32'), seed_point
def extract_digit(img, rect, size):
"""Extracts a digit (if one exists) from a Sudoku square."""
digit = cut_from_rect(img, rect) # Get the digit box from the whole square
height, width = digit.shape[:2]
margin = int(np.mean([height, width]) / 2.5)
_, bbox, seed = find_largest_feature(
digit, [margin, margin], [width - margin, height - margin])
digit = cut_from_rect(digit, bbox)
# Scale and pad the digit so that it fits a square of the digit size we're using for machine learning
width = bbox[1][0] - bbox[0][0]
height = bbox[1][1] - bbox[0][1]
# Ignore any small bounding boxes
if width > 0 and height > 0 and (width * height) > 100 and len(digit) > 0:
return scale_and_centre(digit, size, 4)
else:
return np.zeros((size, size), np.uint8)
def get_digits(img, squares, size):
"""Extracts digits from their cells and builds an array"""
digits = []
img = preprocess_img(img.copy(), dilate_single_digit=True)
show_image(img, "test")
for square in squares:
digits.append(extract_digit(img, square, size))
return digits
def find_sudoku(img):
"""
Entrypoint to find the digits of sudoku for the model to interpret
"""
process_img = preprocess_img(img, dilate_single_digit=False)
corners = find_corners(process_img)
display_points(sudoku, corners)
cropped_warped = crop_and_warp(sudoku, corners)
# show_image(cropped_warped, "warp")
digit_squares = infer_grid(cropped_warped)
t_digits = get_digits(cropped_warped, digit_squares, 28)
return t_digits, cropped_warped, digit_squares
def modify_input_for_model(images):
"""
Modifies the images to the input shaped used by model
"""
images = np.asarray(images, dtype=np.float32)
images = images.reshape(images.shape[0], 28, 28, 1)
images = images.astype('float32')
images /= 255
return images
def find_digits_from_images(images):
"""
Uses the CNN model to predict images
"""
t_digits = predict_with_model(images)
t_digits = correct_for_empty(images, t_digits)
return t_digits
def predict_with_model(images):
modified_images = modify_input_for_model(images)
model = keras.models.load_model("models/mnist_model")
preds = model.predict_classes(modified_images)
# Its bad with 1s and 7s, use OCR instead
custom_config = r'--oem 1 --psm 10 outputbase digits'
for i in range(len(preds)):
if preds[i] == 7:
try:
d = pytesseract.image_to_string(
images[i], config=custom_config)
if d.isdigit():
preds[i] = d
except Exception:
print("OCR fail")
return preds
def correct_for_empty(images, t_digits):
"""
If a slot contains white pixles above a certain threshold, mark it as 0 (no number contained)
"""
for i in range(len(images)):
num_white_px = np.sum(images[i] == 255)
if num_white_px / (28*28) < 0.1:
t_digits[i] = 0
return t_digits
def write_solution(t_image, t_grid, t_solved_grid, boxes):
print(solved_grid)
print(boxes[0])
boxes = np.reshape(boxes, (9, 9, 4), "F")
print(boxes[0][0][0])
for i in range(9):
for j in range(9):
if not t_grid[i][j]:
org = (int(boxes[i][j][0])+13, int(boxes[i][j][3])-13)
cv2.putText(
t_image, str(t_solved_grid[i][j]), org, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
show_image(t_image, "solve")
if __name__ == "__main__":
IMG_PATH = "imgs/sudoku7.jpg" # Image to use - .jpg or .png
sudoku = cv2.imread(IMG_PATH, 1)
digits_images, cropped_warped, squares = find_sudoku(sudoku)
digits = find_digits_from_images(digits_images)
grid = np.reshape(digits, (9, 9), "F")
solved_grid, found = solve_sudoku.solve(grid.copy())
if found:
write_solution(cropped_warped, grid, solved_grid, squares)
else:
print("Failed to find solution")
cv2.waitKey(0)
cv2.destroyAllWindows() # Close all windows
|
{"hexsha": "a6deb99a73f37866159ebf963aa9b551d5cac079", "size": 12459, "ext": "py", "lang": "Python", "max_stars_repo_path": "read_sudoku.py", "max_stars_repo_name": "carlostling/computervision-sudoku-solver", "max_stars_repo_head_hexsha": "0bd3c9aba05a49113ad82b2e93e53a55ed08dc34", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-11T10:55:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-11T10:55:14.000Z", "max_issues_repo_path": "read_sudoku.py", "max_issues_repo_name": "carlostling/computervision-sudoku-solver", "max_issues_repo_head_hexsha": "0bd3c9aba05a49113ad82b2e93e53a55ed08dc34", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-11-13T18:54:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:34:45.000Z", "max_forks_repo_path": "read_sudoku.py", "max_forks_repo_name": "carlostling/computervision-sudoku-solver", "max_forks_repo_head_hexsha": "0bd3c9aba05a49113ad82b2e93e53a55ed08dc34", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9482288828, "max_line_length": 117, "alphanum_fraction": 0.6132113332, "include": true, "reason": "import numpy", "num_tokens": 3387}
|
using MinAtar
using Test
@testset "MinAtar.jl" begin
# Write your own tests here.
env = MinAtarEnv("space_invaders")
end
|
{"hexsha": "1d81bf18aedacbc78a6dd5b183b40e16f7ed21d4", "size": 130, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "mkschleg/MinAtar.jl", "max_stars_repo_head_hexsha": "97bdf79992e74fb44a6acab6515388f6d6fa9214", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-24T18:41:31.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-24T18:41:31.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "mkschleg/MinAtar.jl", "max_issues_repo_head_hexsha": "97bdf79992e74fb44a6acab6515388f6d6fa9214", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-02-14T06:10:30.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-10T22:28:25.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "mkschleg/MinAtar.jl", "max_forks_repo_head_hexsha": "97bdf79992e74fb44a6acab6515388f6d6fa9214", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.25, "max_line_length": 38, "alphanum_fraction": 0.7153846154, "num_tokens": 41}
|
import os
import cv2
import numpy as np
from utils.error_utils import SUCCESS
from utils.recognition_definitions import *
UPOL = 1
CASIA_1 = 2
MMU = 3
UBIRIS = 4
UPOL_STR = "UPOL"
CASIA_1_STR = "CASIA 1"
MMU_STR = "MMU"
UBIRIS_STR = "UBIRIS"
UPOL_PATH = "./databases/upol/"
CASIA_1_PATH = "./databases/casia1/"
MMU_PATH = "./databases/mmu/"
UBIRIS_PATH = "./databases/ubiris/"
IMAGES_PATH = "images/"
MASKS_PATH = "masks/"
CODES_PATH = "codes/"
STD_RADII = 32
STD_ANGLES = 128
gabor_prefix = "gab"
log_gabor_prefix = "log"
zern_circ_prefix = "zcp"
zern_annu_prefix = "zap"
fourier_prefix = "fou"
mask_prefix = "msk"
mask_ext = "npy"
code_ext = "npy"
def load_code(img_name, db_type, encoding_method, use_mask, alg):
# getting database root path
base_path = get_base_path(db_type)
# if already computed, the load it from HD
encoding_prefix = get_proper_prefix(encoding_method)
img_without_ext = img_name[0:len(img_name) - 4]
code_name = "%s_%s.%s" % (encoding_prefix, img_without_ext, code_ext)
code_path = base_path + CODES_PATH + code_name
code_mask_name = "%s_%s_%s.%s" % (encoding_prefix, img_without_ext, mask_prefix, code_ext)
code_mask_path = base_path + CODES_PATH + code_mask_name
# if code and it's mask exists, then load them
if os.access(code_path, os.F_OK) and os.access(code_mask_path, os.F_OK):
return np.load(code_path), np.load(code_mask_path)
# ---------------------------------------------------------------------------
# loading the image
img_path = base_path + IMAGES_PATH + img_name
img = cv2.imread(img_path, cv2.CV_LOAD_IMAGE_UNCHANGED)
# creating proper mask
if use_mask:
mask_name = "%s.%s" % (img_without_ext, mask_ext)
mask_path = base_path + MASKS_PATH + mask_name
img_mask = np.load(mask_path)
else:
img_mask = np.ones(img.shape, np.uint8)
#encoding image
result, code, mask = alg.encode(img, img_mask)
if result != SUCCESS:
return None, None
# saving computed code
np.save(code_path, code)
np.save(code_mask_path, mask)
return code, mask
def get_base_path(db_type):
if db_type == UPOL:
return UPOL_PATH
elif db_type == CASIA_1:
return CASIA_1_PATH
elif db_type == MMU:
return MMU_PATH
elif db_type == UBIRIS:
return UBIRIS_PATH
else:
return None
def get_proper_prefix(encoding_method):
if encoding_method == GABOR_FILTERS_ENCODING:
return gabor_prefix
elif encoding_method == LOG_GABOR_ENCODING:
return log_gabor_prefix
elif encoding_method == ZCP_ENCODING:
return zern_circ_prefix
elif encoding_method == ZAP_ENCODING:
return zern_annu_prefix
elif encoding_method == FOURIER_ENCODING:
return fourier_prefix
else:
return None
def get_image_class(image_name, db_type):
if db_type == UPOL:
return image_name[0:4] # four first characters
elif db_type == CASIA_1:
return image_name[0:3] # five three characters
elif db_type == MMU:
return image_name[0:len(image_name) - 5] # removing last 5 characters
elif db_type == UBIRIS:
return image_name[0: len(image_name) - 6]
else:
return None
def compute_far_percent(false_accepted, total):
return float(false_accepted) / total * 100
def compute_frr_percent(false_rejected, total):
return float(false_rejected) / total * 100
def compute_accuracy(accepted, total):
return float(accepted) / total * 100
def compute_eer():
pass
|
{"hexsha": "5860d49593d6279bf4a739a80fcf00094f9cb7e1", "size": 3611, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/testing_utils.py", "max_stars_repo_name": "ryuzakyl/yapir", "max_stars_repo_head_hexsha": "e0a3b6f5799fbc84295004d849106f707739106f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-06-23T08:55:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-06T01:05:31.000Z", "max_issues_repo_path": "utils/testing_utils.py", "max_issues_repo_name": "ryuzakyl/yapir", "max_issues_repo_head_hexsha": "e0a3b6f5799fbc84295004d849106f707739106f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-12-05T15:32:25.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-05T15:32:25.000Z", "max_forks_repo_path": "utils/testing_utils.py", "max_forks_repo_name": "ryuzakyl/yapir", "max_forks_repo_head_hexsha": "e0a3b6f5799fbc84295004d849106f707739106f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-07-11T04:20:39.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-11T09:57:36.000Z", "avg_line_length": 23.4480519481, "max_line_length": 94, "alphanum_fraction": 0.6687898089, "include": true, "reason": "import numpy", "num_tokens": 977}
|
import numpy as np
from cachpy import cachpy
base_path = 'pickles/lda/'
@cachpy(base_path + 'sb_matrix.pickle')
def calculate_sb_matrix(mean_vectors, overall_mean, classes_matrices):
# noinspection PyPep8Naming
S_b = 0
for idx, m_v in enumerate(mean_vectors):
diff = m_v - overall_mean
outer = diff.dot(diff.T)
S_b += (classes_matrices[idx].shape[0] * outer)
return np.asmatrix(S_b)
@cachpy(base_path + 's_matrix.pickle')
def calculate_s_matrix(centered_class_matrices):
# noinspection PyPep8Naming
S = 0
for ccm in centered_class_matrices:
S += ccm.T.dot(ccm)
return np.asmatrix(S)
# noinspection PyPep8Naming
@cachpy(base_path + 's_inv_sb_matrix.pickle')
def calculate_s_inv_sb_matrix(S, S_b):
return np.linalg.inv(S).dot(S_b)
@cachpy(base_path + 'eigen_values_vectors.pickle')
def calculate_eigen_values_vectors(in_matrix):
return np.linalg.eigh(in_matrix)
# noinspection PyPep8Naming
def lda(data_matrix, classes_matrices):
mean_vectors = [np.mean(class_matrix[:, :-1], axis=0).T for class_matrix in classes_matrices]
overall_mean = np.mean(data_matrix, axis=0).T
print('calculated means')
S_b = calculate_sb_matrix(mean_vectors, overall_mean, classes_matrices)
print('calculated Sb matrix')
centered_class_matrices = [(class_matrix[:, :-1] - mean_vectors[i].T) for i, class_matrix in
enumerate(classes_matrices)]
print('calculated centered matrices')
del mean_vectors, overall_mean
S = calculate_s_matrix(centered_class_matrices)
print('calculated S matrix')
del centered_class_matrices
S_inv_Sb = calculate_s_inv_sb_matrix(S, S_b)
print('calculated S_inv_Sb')
del S, S_b
eigen_values, eigen_vectors = calculate_eigen_values_vectors(S_inv_Sb)
del S_inv_Sb
print('calculated eigen values and eigen vectors')
idx = eigen_values.argsort()[::-1]
eigen_vectors = eigen_vectors[:, idx]
print('sorted eigen values and vectors')
return np.asmatrix(eigen_vectors[:, :38])
|
{"hexsha": "9b5f8d36b92598eb7ac4898cacea8be767f74e12", "size": 2076, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/lda.py", "max_stars_repo_name": "amrufathy/face-ification", "max_stars_repo_head_hexsha": "409b851df1777dd987450ff5bf767d594f9dc8a8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-11-02T15:40:23.000Z", "max_stars_repo_stars_event_max_datetime": "2017-11-02T15:40:23.000Z", "max_issues_repo_path": "lib/lda.py", "max_issues_repo_name": "amrufathy/face-ification", "max_issues_repo_head_hexsha": "409b851df1777dd987450ff5bf767d594f9dc8a8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/lda.py", "max_forks_repo_name": "amrufathy/face-ification", "max_forks_repo_head_hexsha": "409b851df1777dd987450ff5bf767d594f9dc8a8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8333333333, "max_line_length": 97, "alphanum_fraction": 0.7153179191, "include": true, "reason": "import numpy", "num_tokens": 521}
|
__doc__ = "a module housing posterior functions used in our inference of anisotropies"
__author__ = "reed.essick@ligo.org"
#-------------------------------------------------
import healpy as hp
import numpy as np
### non-standard libraries
from gpr_isotropy import likelihood
from gpr_isotropy.utils import DEFAULT_NUM_SAMPLES
#-------------------------------------------------
class Posterior(object):
"""
general class for a posterior
extensions may skip certain steps because they can do things analytically
"""
_likelihood_function = likelihood.Likelihood ### does this mean I can't pickle this?
def __init__(self, maps, exposure, kernel, rprior):
self._likelihood = self._likelihood_function(maps, exposure)
self._kernel = kernel
self._prior = rprior
@property
def likelihood(self):
return self._likelihood
@property
def kernel(self):
return self._kernel
@property
def rprior(self):
return self._rprior
def __call__(self, Ro, eps):
return self._likelihood(Ro, eps) + self._kernel(eps) + self._rprior(Ro)
class Ro_Eps(Posterior):
_likelihood_function = likelihood.Ro_Eps
def eps_fisher(self, Ro):
"""
fisher information matrix for eps|Ro a posteriori (includes effect of kernel)
"""
return self._likelihood.eps_fisher(Ro) + self._kernel._icov
def eps_cov(self, Ro):
"""
covariance matrix for eps|Ro a posteriori (includes effect of kernel)
"""
return np.linalg.inv(self.eps_fisher(Ro))
def eps_mean(self, Ro):
"""
mean for eps|Ro a posteriori (includes effect of kernel)
"""
igamma = np.linalg.inv(self.eps_fisher(Ro))
return np.sum(igamma*Ro*self._likelihood._sum, axis=1)
class Ro(Posterior):
"""
log prob(Ro|maps, exposure, kernel, Rprior)
"""
_likelihood_function = likelihood.Ro
def __call__(self, Ro):
return self.likelihood(Ro, self._kernel) + self._rprior(Ro)
#class Eps(Ro_Eps):
# """
# log prob(eps|maps, exposure, kernel, Rprior)
#
# importance sampling to marginalize over Ro
# if Ro_samples is not supplied, we'll sample some from loglike_Ro
# """
# _likelihood_function = likelihood.Eps
#
# def __init__(self, maps, exposure, kernel, rprior, num_samples=DEFAULT_NUM_SAMPLES):
# Posterior.__init__(self, *args)
# self.sample(num_samples=num_samples)
#
# def sample(self, num_samples=DEFAULT_NUM_SAMPLES):
# self._rsamples = []
# raise NotImplementedError('sample from p(Ro|...) to get Ro samples used in numerical marginaliation in __call__')
#
# def __call__(self, eps):
# return self._likelihood(eps, Ro_samples) + self._kernel(eps)
|
{"hexsha": "29e2d4fd54b55a8ba8a0ee6529fc271a00bb6351", "size": 2787, "ext": "py", "lang": "Python", "max_stars_repo_path": "gpr_isotropy/posterior.py", "max_stars_repo_name": "reedessick/gpr-isotropy", "max_stars_repo_head_hexsha": "95fdb58328e3b3ff7d3a974f408afa2e0169c57a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gpr_isotropy/posterior.py", "max_issues_repo_name": "reedessick/gpr-isotropy", "max_issues_repo_head_hexsha": "95fdb58328e3b3ff7d3a974f408afa2e0169c57a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gpr_isotropy/posterior.py", "max_forks_repo_name": "reedessick/gpr-isotropy", "max_forks_repo_head_hexsha": "95fdb58328e3b3ff7d3a974f408afa2e0169c57a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2934782609, "max_line_length": 122, "alphanum_fraction": 0.6476498027, "include": true, "reason": "import numpy", "num_tokens": 681}
|
[STATEMENT]
lemma is_sup_binary: "is_sup x y (\<Squnion>{x, y})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_sup x y (\<Squnion>{x, y})
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. is_sup x y (\<Squnion>{x, y})
[PROOF STEP]
have "is_Sup {x, y} (\<Squnion>{x, y})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_Sup {x, y} (\<Squnion>{x, y})
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
is_Sup {x, y} (\<Squnion>{x, y})
goal (1 subgoal):
1. is_sup x y (\<Squnion>{x, y})
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
is_Sup {x, y} (\<Squnion>{x, y})
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
is_Sup {x, y} (\<Squnion>{x, y})
goal (1 subgoal):
1. is_sup x y (\<Squnion>{x, y})
[PROOF STEP]
by (simp only: is_Sup_binary)
[PROOF STATE]
proof (state)
this:
is_sup x y (\<Squnion>{x, y})
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 459, "file": null, "length": 7}
|
import numpy as np
from tomoxtal.utils import cctbx_tools
from tomoxtal.utils import phases as phases_utils
from tomoxtal.pipeline import MergeCrystals
class TestMergeCrystals:
def setup_class(self):
"""
Prepare a few simulated datasets.
"""
args = {'pdb_path':'/sdf/home/a/apeck/tomoxtal/examples/input/193l.pdb', 'resolution':6.0, 'size':250}
# generate structure factors and retrieve associated cell information
sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)
sf_data = cctbx_tools.reformat_sf(sf)
sg_symbol, sg_no, self.cell, cs = cctbx_tools.unit_cell_info(args['pdb_path'])
# add random phase shifts
hklIp1, hklIp2, hklIp3 = sf_data.copy(), sf_data.copy(), sf_data.copy()
hklIp2[:,-1], self.shifts2 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])
hklIp3[:,-1], self.shifts3 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])
# retain subset of Millers
for data in [hklIp1,hklIp2,hklIp3]:
keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))
data = data[keep_idx]
self.data1, self.data2, self.data3 = hklIp1, hklIp2, hklIp3
fshifts_list = np.random.uniform(size=(4,3))
self.fshifts_list = np.vstack((fshifts_list, 1-self.shifts2, 1-self.shifts3))
def test_common_origin_search(self):
"""
Test that the correct common phase origin is found.
"""
mc = MergeCrystals()
mc.add_crystal(self.data1, self.cell)
fs, score = mc.merge_phases(self.data2, self.cell, fshifts_list=self.fshifts_list)
assert np.allclose(fs, 1-self.shifts2)
def test_add_crystal(self):
"""
Testing that phase values and intensity values match, which equires both that
1) the correct common origin has been found and 2) that the intensities and
phases have been properly assembled.
"""
mc = MergeCrystals()
mc.add_crystal(self.data1, self.cell)
mc.add_crystal(self.data2, self.cell, fshifts_list=self.fshifts_list)
assert np.isclose(np.sum(phases_utils.wrap_phases(np.abs(np.diff(mc.phases_all,axis=1)))), 0, atol=1e-06)
assert np.isclose(np.sum(np.abs(np.diff(mc.I_all,axis=1))), 0, atol=1e-06)
mc.add_crystal(self.data3, self.cell, fshifts_list=self.fshifts_list)
assert np.isclose(np.sum(phases_utils.wrap_phases(np.abs(np.diff(mc.phases_all,axis=1)))), 0, atol=1e-06)
assert np.isclose(np.sum(np.abs(np.diff(mc.I_all,axis=1))), 0, atol=1e-06)
|
{"hexsha": "a94cd6fc8da9a0814c572fccc7e5b166858e9d33", "size": 2709, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_merge_crystals.py", "max_stars_repo_name": "apeck12/tomoxtal", "max_stars_repo_head_hexsha": "d2b3407708da2a35ecf061fb62ba397d837b980c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_merge_crystals.py", "max_issues_repo_name": "apeck12/tomoxtal", "max_issues_repo_head_hexsha": "d2b3407708da2a35ecf061fb62ba397d837b980c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_merge_crystals.py", "max_forks_repo_name": "apeck12/tomoxtal", "max_forks_repo_head_hexsha": "d2b3407708da2a35ecf061fb62ba397d837b980c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-22T18:30:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-22T18:30:30.000Z", "avg_line_length": 47.5263157895, "max_line_length": 113, "alphanum_fraction": 0.6555924695, "include": true, "reason": "import numpy", "num_tokens": 733}
|
#
# Copyright 2020 Joshua Maglione
#
# Distributed under MIT License
#
from rationalPoints import _guess_polynomial
from globalVars import _DEFAULT_VERBOSE as _verbose
# A useful function for multiple lines
_cat_with_space = lambda x, y: x + "\n" + y
# Get the name of the atlas, which is the last folder of the directory.
def _get_atlas_name(A):
direc = A.directory
b = direc.rindex("/")
if "/" in direc[:b]:
a = direc[:b].rindex("/")
else:
a = -1
return direc[a + 1:b]
# The preamble to the tex document containing all the stuff above
# "\begin{document}."
def _preamble(title="", author=""):
lines = [
"\\documentclass[a4paper]{article}\n",
"\\usepackage{enumerate}",
"\\usepackage{hyperref}",
"\\hypersetup{",
"\tcolorlinks=true,",
"\tlinkcolor=blue,",
"\tfilecolor=blue,",
"\turlcolor=blue,",
"\tcitecolor=blue,",
"}",
"\\usepackage{amsmath}",
"\\usepackage{amsthm}",
"\\usepackage{amssymb}",
"\\usepackage[margin=2cm]{geometry}",
"\\usepackage{mathpazo}",
"\\usepackage{url}",
"\\usepackage[labelformat=simple]{subcaption}",
"\\usepackage{tikz}",
"\\usepackage{pgf}",
"\\usepackage{longtable}",
"\\usepackage{multirow}",
"\\usepackage{graphicx}\n",
"\\allowdisplaybreaks\n",
"\\title{%s}" % (title),
"\\author{%s}" % (author),
"\\date{\\today}"
]
return reduce(_cat_with_space, lines)
# The introduction of the tex document.
def _intro(direc):
sing_zeta = "\\textsf{SingularZeta}"
direc_str = "\\texttt{%s}" % (direc)
Fp = "$\\mathbb{F}_p$"
intro = """
This is a report generated by %s concerning the chart data in the
directory %s. We report all the computations we undertake in computing
the cone integral associated to %s. While this report is being
developed, we provide the table of varieties for which we need to count
the number of %s-rational points. The rows with no entry under %s-points
cannot be done automatically with the current implementation of %s.
Special attention should be given to the ones with no %s-points if such
examples arise.
""" % (
sing_zeta,
direc_str,
direc_str,
Fp,
Fp,
sing_zeta,
Fp
)
return intro.replace(" ", "")
# The introduction of the tex document.
def _intro_integral(direc):
sing_zeta = "\\textsf{SingularZeta}"
direc_str = "\\texttt{%s}" % (direc)
intro = """
This is a report generated by %s concerning the integral data in the
directory %s. We report all the computations we undertake in computing
the cone integral associated to %s. This report should be read as a kind
of ``printout.'' We write down all of the details as if solving this by
hand. We start with the main integral, and then we move to the charts
that correspond to leaves in the blow-up tree of %s. From each of these
charts, we traverse the vertices of the intersection poset of the
divisors. For each vertex, we write down the simplified integral. The
goal is that all of these integrals are written down correctly and are
monomial.
""" % (
sing_zeta,
direc_str,
direc_str,
direc_str
)
return intro.replace(" ", "")
# Given a set of integers, return a latex compatible string for a set of
# integers.
def _set_to_latex(S):
content = reduce(lambda x, y: x + str(y) + ", ", S, "")
return "$\\{" + content[:-2] + "\\}$"
# Format a polynomial to a latex compatible string.
def _format_poly(f):
from sage.all import latex
return "$%s$" % (latex(f))
# Convert the dictionary output by pRationalPorints into latex output.
def _poly_data_to_latex(P):
from sage.all import latex
system = P["simplified_system"]
gens = P["simplified_ring"].gens()
def _format_system(S):
sys_str = map(lambda f: latex(f), S)
poly_sys = reduce(lambda x, y: x + y + ",\\; ", sys_str, "$")
return poly_sys[:-4] + "$"
if len(system) <= 1:
return _format_poly(system[0]), len(gens)
else:
return _format_system(system), len(gens)
# A function that returns the header for the F_p table of non-polynomial point
# counts.
def _Fp_nonpoly_header(exists=False):
Fp = "$\\mathbb{F}_p$"
sing_zeta = "\\textsf{SingularZeta}"
header = """
\\subsection{Varieties with non-polynomial %s-point counts}\n
""" % (Fp)
header.replace(" ", "")
if exists:
header += """
We separate the table of varieties with %s-point count not obviously given by a polynomial. It is possible these varieties are given by a (uniform) polynomial, but %s could not guess this.
""" % (Fp, sing_zeta)
else:
header += """
We guess that all varieties have an %s-point count that is given by a (uniform) polynomial, so we do not have a table for this section.
""" % (Fp)
return header.replace(" ", "")
# A function that returns the header for the F_p table concerning the guesses.
def _Fp_guess_header(exists=False):
Fp = "$\\mathbb{F}_p$"
header = """
\\subsection{Varieties with estimated %s-point counts}\n
""" % (Fp)
header.replace(" ", "")
if exists:
header += """
We include the table of varieties which we could not explicitly determine the polynomials for the number of %s-points. However, we explicitly computed the counts for an overfit set of primes, so we expect these polynomials to be correct for all but finitely many primes.
""" % (Fp)
else:
header += """
We were not able to guess any of the %s-point counts for the varieties in this atlas.
""" % (Fp)
return header.replace(" ", "")
# A function that returns the header for the entire F_p table.
def _Fp_table_header():
Fp = "$\\mathbb{F}_p$"
header = """
\\subsection{The %s-point counts for all varieties}
We write all the varieties for all the monomial cone integrals in one
table.
""" % (Fp)
return header.replace(" ", "")
# A function that returns the header for the entire F_p table.
def _unique_Fp_table_header():
Fp = "$\\mathbb{F}_p$"
header = """
\\subsection{The unique %s-point counts for all varieties}
By unique points, we mean the points only contained in a variety and,
thus, not contained in any other variety. We write all the varieties
for all the monomial cone integrals in
one table.
""" % (Fp)
return header.replace(" ", "")
# A function to build the F_p-table of the varieties with non-polynomial
# Fp-point counts as a latex compatible string.
def _build_nonpoly_Fp_table(chrts):
table_top = [
"\\begin{center}",
"\t\\begin{longtable}{|c|c|p{6cm}|c|c|}",
"\t\t\\hline",
"\t\t\\textbf{Chart} & " +
"\\textbf{Vertex} & " +
"\\textbf{Variety} & " +
"\\textbf{Dim.}\\\\ \\hline \\hline"
]
table_end = [
"\t\\end{longtable}",
"\\end{center}"
]
# The main function we apply to all charts.
def extraction(C):
ID = C._id
V = C.intLat.vertices
Fp = C.intLat.pRationalPoints()
data = zip(V, Fp)
def get_info(X):
info = "\t\t%s & %s & " % (ID, _set_to_latex(X[0]))
info += "%s & %s" % (_poly_data_to_latex(X[1][1]))
if not "C" in str(X[1][0]):
return ""
info += " \\\\ \\hline"
return info
chart_section = filter(lambda l: l != "", map(get_info, data))
if len(chart_section) == 0:
return [""]
chart_section[-1] = chart_section[-1] + " \\hline"
return chart_section
# Get all the chart data from extraction, and flatten it down.
table_main = reduce(lambda x, y: x + y, map(extraction, chrts), [])
table = table_top + table_main + table_end
return reduce(_cat_with_space, table)
# A function to build the F_p-table of the varieties where we guessed the
# Fp-point count with polynomials.
def _build_estimate_Fp_table(chrts):
table_top = [
"\\begin{center}",
"\t\\begin{longtable}{|c|c|p{6cm}|c|c|c|}",
"\t\t\\hline",
"\t\t\\textbf{Chart} & " +
"\\textbf{Vertex} & " +
"\\textbf{Variety} & " +
"\\textbf{Dim.} & " +
"\\textbf{Guess}\\\\ \\hline \\hline"
]
table_end = [
"\t\\end{longtable}",
"\\end{center}"
]
# The main function we apply to all charts.
def extraction(C):
ID = C._id
V = C.intLat.vertices
Fp = C.intLat.pRationalPoints()
data = zip(V, Fp)
def get_info(X):
info = "\t\t%s & %s & " % (ID, _set_to_latex(X[0]))
info += "%s & %s & " % (_poly_data_to_latex(X[1][1]))
if "X" in str(X[1][0]):
info += "{\\footnotesize " + _format_poly(X[1][0]) + "}"
else:
return ""
info += " \\\\ \\hline"
return info
chart_section = filter(lambda l: l != "", map(get_info, data))
if len(chart_section) == 0:
return [""]
chart_section[-1] = chart_section[-1] + " \\hline"
return chart_section
# Get all the chart data from extraction, and flatten it down.
table_main = reduce(lambda x, y: x + y, map(extraction, chrts), [])
table = table_top + table_main + table_end
return reduce(_cat_with_space, table)
# A function to build the entire F_p-table as a latex compatible string.
def _build_Fp_table(A):
Fp = "$\\mathbb{F}_p$"
table_top = [
"\\begin{center}",
"\t\\begin{longtable}{|c|c|p{6cm}|c|c|c|}",
"\t\t\\hline",
"\t\t\\textbf{Chart} & " +
"\\textbf{Vertex} & " +
"\\textbf{Variety} & " +
"\\textbf{Dim.}\\ & " +
"%s-\\textbf{points}\\\\ \\hline \\hline" % (Fp)
]
table_end = [
"\t\\end{longtable}",
"\\end{center}"
]
# The main function we apply to all charts.
def extraction(C):
ID = C._id
V = C.intLat.vertices
Fp = C.intLat.pRationalPoints()
data = zip(V, Fp)
def get_info(X):
info = "\t\t%s & %s & " % (ID, _set_to_latex(X[0]))
info += "%s & %s & " % (_poly_data_to_latex(X[1][1]))
if not "C" in str(X[1][0]):
info += "{\\footnotesize " + _format_poly(X[1][0]) + "}"
else:
# is_poly, f = _guess_polynomial(X[1][1]["simplified_ring"], X[1][1]["simplified_system"])
# if is_poly:
# info += "{\\footnotesize " + _format_poly(f) + "}"
# else:
info += "{\\tiny NOT POLYNOMIAL}"
info += " \\\\ \\hline"
return info
chart_section = map(get_info, data)
chart_section[-1] = chart_section[-1] + " \\hline"
return chart_section
# Get all the chart data from extraction, and flatten it down.
table_main = reduce(lambda x, y: x + y, map(extraction, A.charts), [])
# Put everything together and return a string.
table = table_top + table_main + table_end
return reduce(_cat_with_space, table)
# A function to build the entire unique F_p-table as a latex compatible string.
def _build_unique_Fp_table(A):
Fp = "$\\mathbb{F}_p$"
table_top = [
"\\begin{center}",
"\t\\begin{longtable}{|c|c|p{6cm}|c|c|c|}",
"\t\t\\hline",
"\t\t\\textbf{Chart} & " +
"\\textbf{Vertex} & " +
"\\textbf{Variety} & " +
"\\textbf{Dim.}\\ & " +
"%s-\\textbf{points}\\\\ \\hline \\hline" % (Fp)
]
table_end = [
"\t\\end{longtable}",
"\\end{center}"
]
# The main function we apply to all charts.
def extraction(C):
ID = C._id
V = C.intLat.vertices
Fp = C.intLat.pRationalPoints()
Fp = list(map(lambda x: list(x), Fp))
for i in range(len(Fp)):
Fp[i][0] = C.intLat._vertexToPoints[i]
data = zip(V, Fp)
def get_info(X):
info = "\t\t%s & %s & " % (ID, _set_to_latex(X[0]))
info += "%s & %s & " % (_poly_data_to_latex(X[1][1]))
if not "C" in str(X[1][0]):
info += "{\\footnotesize " + _format_poly(X[1][0]) + "}"
else:
# is_poly, f = _guess_polynomial(X[1][1]["simplified_ring"], X[1][1]["simplified_system"])
# if is_poly:
# info += "{\\footnotesize " + _format_poly(f) + "}"
# else:
info += "{\\tiny NOT POLYNOMIAL}"
info += " \\\\ \\hline"
return info
chart_section = map(get_info, data)
chart_section[-1] = chart_section[-1] + " \\hline"
return chart_section
# Get all the chart data from extraction, and flatten it down.
table_main = reduce(lambda x, y: x + y, map(extraction, A.charts), [])
# Put everything together and return a string.
table = table_top + table_main + table_end
return reduce(_cat_with_space, table)
def _cone_cond(C):
from sage.all import latex
first = "\\begin{align*}\n"
last = "\\end{align*}\n"
def one_line(T):
return "v_p(%s) &\leq v_p(%s) \\\\\n" % (latex(T[0]), latex(T[1]))
lines = reduce(lambda x, y: x + y, map(one_line, C), "").replace("\\left", "").replace("\\right", "")
return first + lines + last
def _main_int(A):
from sage.all import latex, gens
I = A.integrand
para = """
The main integral we aim to solve is:
\\begin{equation}
%s \\int_{S} %s \,|\mathrm{d}X|,
\\end{equation}
where $S$ is the subset of $\\mathbb{Z}_p^{%s}$ such that
%s
""" % (latex(I.pFactor().simplify()), I.InsideLatex(), len(gens(A.root.AmbientSpace())), _cone_cond(A.root.cone))
def chart_to_verts(x):
try:
return len(x.intLat.vertices)
except:
return 0
add_up = lambda x, y: x + y
Nverts = reduce(add_up, map(chart_to_verts, A.charts))
next_para = """
We use %s charts, %s of which are leaves. There are a total of %s
integrals to solve.
""" % (A.number_of_charts, len(A.charts), Nverts)
return (para + next_para).replace(" ", "")
def _birationalmap_data(O, N):
from sage.all import latex
first_line = "\\begin{align*}\n"
last_line = "\\end{align*}\n"
V = list(zip(O, N))
n = len(V) // 2 + len(V) % 2
V1 = V[:n]
V2 = V[n:]
mid = ""
for i in range(n):
if i < len(V2):
mid += "%s &\\mapsto %s & %s &\\mapsto %s %s \n" % (latex(V1[i][0]), latex(V1[i][1]), latex(V2[i][0]), latex(V2[i][1]), "\\\\"*(i != n - 1))
else:
mid += "%s &\\mapsto %s & & \n" % (latex(V1[i][0]), latex(V1[i][1]))
return first_line + mid + last_line
def _chart_data(C):
from sage.all import latex
A = C.atlas
old_vars = A.root.birationalMap
new_vars = C.birationalMap
intro = "We apply the following substitution to the initial variables:\n"
biratmap = _birationalmap_data(old_vars, new_vars)
jac = "This substitution yields a Jacobian factor equal to \n\\begin{align*}\n\\left|%s\\right|.\n\\end{align*}\n" % (latex(C.jacDet))
f = reduce(lambda x, y: x + y + ",", map(lambda z: latex(z), C.focus), "")
focus = "The focus is generated by the following:\n\\begin{align*}\n%s.\n\\end{align*}\n" % (f[:-1])
I = C.Integrand()
integral = "The integral simplifies to \n\\begin{equation}\n%s \\int_{S} %s \,|\mathrm{d}X|,\n\\end{equation}\n" % (latex(I.pFactor().simplify()), I.InsideLatex())
cone = "with cone conditions given by:\n" + _cone_cond(C.cone)
return intro + biratmap + jac + focus + integral + cone
def _subchart_section(C):
from sage.all import latex
Subs = C.Subcharts()
P = C.intLat
verts = P.vertices
prod = reduce(lambda x, y: x*y, P.divisors, 1)
subs = ""
for i in range(len(verts)):
subs += "\n\\subsection{Vertex $%s$}\n\n" % (latex(verts[i]).replace("\\left", "").replace("\\right", ""))
if len(verts[i]) < len(P.divisors):
subs += "On this subchart, we assume the following divisors are units:\n"
subs += "\\begin{align*}\n"
subs += reduce(lambda x, y: x + y + " && ", map(lambda j: latex(P.divisors[j]), [j for j in range(len(P.divisors)) if not j in verts[i]]), "")[:-3] + ",\n"
subs += "\\end{align*}\n"
else:
subs += "None of the divisors are considered to be units, "
subs += "and the following divisors are divisible by $p$:\n"
subs += "\\begin{align*}\n"
subs += reduce(lambda x, y: x + y + " && ", map(lambda j: latex(P.divisors[j]), verts[i]), "")[:-3] + ".\n"
subs += "\\end{align*}\n"
if len(verts[i]) > 0:
subs += "We make the following substitutions:\n\\begin{align*}\n"
for j in range(len(verts[i])):
subs += "%s &= pz_{%s}, &" % (latex(P.divisors[sorted(list(verts[i]))[j]]), j + 1)
subs = subs[:-3] + ".\n\\end{align*}\n"
subs += "The number of points in $\\mathbb{F}_p^{%d}$ contained in this subchart and no others is equal to\n$$\n%s.\n$$\n\n" % (len(prod.variables()), latex(P._vertexToPoints[i]))
subs += _chart_data(Subs[i])
return subs
def _chart_section(C, direc=""):
from sage.all import latex
sect_title = "\\section{Chart %s}\n\n" % (C._id)
if C.intLat:
P = C.intLat
if len(P.poset) > 0:
P_plot = P.poset.plot()
p_name = "img/Poset" + C._id + ".png"
P_plot.save(direc + p_name)
pos = "The intersection poset for this chart looks like\n\n\\begin{center}\n\\includegraphics[scale=0.5]{%s}\n\\end{center}\n" % (p_name)
pos += "The vertices of the above poset are labeled by sets of integers. The integers correspond to the following divisors: \n\\begin{enumerate}\n"
pos += reduce(lambda x, y: x + y, map(lambda d: "\\item[%s:] $%s$\n" % (P.divisors.index(d), latex(d)), P.divisors), "")
pos += "\\end{enumerate}\n"
if not P.poset.is_isomorphic(P.DivisorPoset()):
p_name_new = "img/Poset" + C._id + "_new.png"
P_plot_new = P.DivisorPoset().plot()
P_plot_new.save(direc + p_name_new)
pos += "\nThe above poset seems to be \\textbf{incorrect}. Using the same labels, the poset should be as follows\n\n\\begin{center}\n\\includegraphics[scale=0.5]{%s}\n\\end{center}\n" % (p_name_new)
subs = _subchart_section(C)
else:
pos = "The intersection poset is trivial since the integral is already monomial, and so there are no further subdivisions of this chart.\n"
subs = ""
else:
pos = "This chart does not have any data for its intersection poset.\n"
subs = ""
return sect_title + _chart_data(C) + pos + subs
# ==============================================================================
# Main function
# ==============================================================================
# The following is a function that outputs a tex file. The tex file provides
# information concerning the polynomials associated to the atlas A for which we
# cannot automatically determine the number of F_p-rational points.
def RationalReport(A, file=""):
# Take care of input
if not isinstance(file, str):
raise TypeError("Expected 'file' to be a string.")
# Make sure the file string is formatted correctly.
atlas_name = _get_atlas_name(A)
if file == "":
file = atlas_name + "_RationalReport.tex"
if not ".tex" in file:
file_name = file + ".tex"
else:
file_name = file
atlas_name_latex = atlas_name.replace("_", "\\_")
title = "Rational report for %s" % (atlas_name_latex)
with open(file_name, 'w') as tex_file:
tex_file.write(_preamble(title=title))
tex_file.write("\n\n\\begin{document}")
tex_file.write("\n\n\\maketitle")
tex_file.write("\n\\tableofcontents\n\n")
tex_file.write("\n\n\\section{Introduction}\n\n")
tex_file.write(_intro(atlas_name_latex))
with open(file_name, 'a') as tex_file:
tex_file.write("\n\n\\section{Counting $\\mathbb{F}_p$-points}")
# Determine the F_p-rational points of the atlas.
# These are stored with the intersection lattices.
# If these were previously computed, then nothing happens here.
_ = map(lambda C: C.intLat.pRationalPoints(user_input=False), A.charts)
# Get the charts with a vertex with non-polynomial point count.
def nonpoly(C):
Fp_counts = C.intLat.pRationalPoints()
counts = list(map(lambda X: X[0], Fp_counts))
return filter(lambda x: "C" in str(x), counts) != []
nonpoly_chrts = list(filter(nonpoly, A.charts))
if _verbose >= 1:
print("Guessing polynomial Fp-point counts.")
nonpoly_exists = False
guess_exists = False
# A function we apply to the nonpoly charts. If it guesses a poly it will
# replace the non-guess with a polynomial in X.
def guess_chart(C):
Fp_points = C.intLat.pRationalPoints()
def guess_func(data):
if "C" in str(data[0]):
is_poly, f = _guess_polynomial(data[1]["simplified_ring"],
data[1]["simplified_system"])
if is_poly:
guess_exists = True
return tuple([f, data[1]])
nonpoly_exists = True
return data
C.intLat.p_points = map(guess_func, Fp_points)
return C
checked_chrts = map(guess_chart, nonpoly_chrts)
# Build the non-polynomial table
nonpoly_table = _build_nonpoly_Fp_table(checked_chrts)
# Build the estimate table
estimate_table = _build_estimate_Fp_table(checked_chrts)
# Build the entire table
table = _build_Fp_table(A)
# Build the entire unique table
unique_table = _build_unique_Fp_table(A)
with open(file_name, 'a') as tex_file:
tex_file.write("\n\n" + _Fp_nonpoly_header(exists=nonpoly_exists))
tex_file.write("\n\n" + nonpoly_table + "\n\n")
tex_file.write("\n\n" + _Fp_guess_header(exists=guess_exists))
tex_file.write("\n\n" + estimate_table + "\n\n")
tex_file.write("\n\n" + _Fp_table_header())
tex_file.write("\n\n" + table + "\n\n")
tex_file.write("\n\n" + _unique_Fp_table_header())
tex_file.write("\n\n" + unique_table + "\n\n")
with open(file_name, 'a') as tex_file:
tex_file.write("\n\n\\end{document}")
return
# The following is a function that outputs a tex file. The tex file provides
# information concerning the integrals associated to the atlas A.
def IntegralReport(A, direc=""):
# Make sure the file string is formatted correctly.
atlas_name = _get_atlas_name(A)
file_name = direc + atlas_name + "_IntReport.tex"
atlas_name_latex = atlas_name.replace("_", "\\_")
title = "Integral report for %s" % (atlas_name_latex)
with open(file_name, 'w') as tex_file:
tex_file.write(_preamble(title=title))
tex_file.write("\n\n\\begin{document}")
tex_file.write("\n\n\\maketitle")
tex_file.write("\n\\tableofcontents\n\n")
tex_file.write("\n\n\\section{Introduction}\n\n")
tex_file.write(_intro_integral(atlas_name_latex))
with open(file_name, 'a') as tex_file:
tex_file.write("\n\n\\section{Main integral}\n\n")
tex_file.write(_main_int(A))
with open(file_name, 'a') as tex_file:
for C in A.charts:
tex_file.write(_chart_section(C, direc=direc))
with open(file_name, 'a') as tex_file:
tex_file.write("\n\n\\end{document}")
return
|
{"hexsha": "9f40c5127149d394a01d3ac0257bec78cf8ab683", "size": 24365, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/atlasReport.py", "max_stars_repo_name": "joshmaglione/SingularZeta", "max_stars_repo_head_hexsha": "5ff9167cec8233c575fa421e4c99b95f06eb90d0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-21T17:37:09.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-21T17:37:09.000Z", "max_issues_repo_path": "src/atlasReport.py", "max_issues_repo_name": "joshmaglione/SingularZeta", "max_issues_repo_head_hexsha": "5ff9167cec8233c575fa421e4c99b95f06eb90d0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-05-21T07:22:08.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-21T07:22:08.000Z", "max_forks_repo_path": "src/atlasReport.py", "max_forks_repo_name": "joshmaglione/SingularZeta", "max_forks_repo_head_hexsha": "5ff9167cec8233c575fa421e4c99b95f06eb90d0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2351046699, "max_line_length": 282, "alphanum_fraction": 0.5711471373, "include": true, "reason": "from sage", "num_tokens": 6677}
|
using RomanNumerals
using Test
using Random
const MT = MersenneTwister
using RomanNumerals: InvalidRomanNumeral
@testset "Construction" begin
@test RomanNumeral("I") == RomanNumeral(1)
@test RomanNumeral("V") == RomanNumeral(5)
@test RomanNumeral("X") == RomanNumeral(10)
@test RomanNumeral("L") == RomanNumeral(50)
@test RomanNumeral("C") == RomanNumeral(100)
@test RomanNumeral("D") == RomanNumeral(500)
@test RomanNumeral("M") == RomanNumeral(1000)
@test RomanNumeral("II") == RomanNumeral(2)
@test RomanNumeral("VII") == RomanNumeral(7)
@test RomanNumeral("XXX") == RomanNumeral(30)
@test RomanNumeral("LXIV") == RomanNumeral(64)
@test RomanNumeral("CCL") == RomanNumeral(250)
@test RomanNumeral("CD") == RomanNumeral(400)
@test RomanNumeral("M") == RomanNumeral(1000)
@test rn"I" == RomanNumeral(1)
@test rn"V" == RomanNumeral(5)
@test rn"X" == RomanNumeral(10)
@test rn"L" == RomanNumeral(50)
@test rn"C" == RomanNumeral(100)
@test rn"D" == RomanNumeral(500)
@test rn"M" == RomanNumeral(1000)
@test rn"MMMCDLIII" == RomanNumeral(3453)
@test rn"MCDXLIX" == RomanNumeral(1449)
@test rn"MMMCMXCIV" == RomanNumeral(3994)
@test rn"CCCCXXXXIIII" == rn"CDXLIV" == RomanNumeral(444)
@test_throws InvalidRomanNumeral RomanNumeral("Y")
@test_throws InvalidRomanNumeral RomanNumeral("XY")
end
@testset "Consts" begin
@test I == RomanNumeral(1)
@test V == RomanNumeral(5)
@test X == RomanNumeral(10)
@test L == RomanNumeral(50)
@test C == RomanNumeral(100)
@test D == RomanNumeral(500)
@test M == RomanNumeral(1000)
end
@testset "Conversion" begin
@testset "Integer" begin
@test Int(rn"I") == 1
@test Int8(rn"V") == 5
@test Int16(rn"X") == 10
@test Int32(rn"L") == 50
@test Int64(rn"C") == 100
@test Int16(rn"D") == 500
@test Int32(rn"M") == 1000
end
@testset "String" begin
@test string(rn"I") == "I"
@test string(rn"V") == "V"
@test string(rn"X") == "X"
@test string(rn"L") == "L"
@test string(rn"C") == "C"
@test string(rn"D") == "D"
@test string(rn"M") == "M"
end
end
@testset "Arithmetic" begin
@testset "Multiplication/Division" begin
@test rn"II" == 2I == RomanNumeral(2)
@test rn"III" == 3I == RomanNumeral(3)
@test rn"IV" == 4I == RomanNumeral(4)
@test rn"IX" == 9I == RomanNumeral(9)
@test rn"XX" == 2X == RomanNumeral(20)
@test rn"CCC" == 3C == RomanNumeral(300)
@test rn"MMMM" == 4M == RomanNumeral(4000)
@test I ÷ I == RomanNumeral(1)
@test X ÷ V == RomanNumeral(2)
@test L ÷ X == V
@test C ÷ L == 2I
@test C ÷ V == 2I * X
@test M ÷ L == X + X
end
@testset "Addition/Subtraction" begin
@test I + I == RomanNumeral(2)
@test I + I + I == RomanNumeral(3)
@test V - I == RomanNumeral(4)
@test X - I == RomanNumeral(9)
@test X + X == RomanNumeral(20)
@test D - 2C == RomanNumeral(300)
@test M + M + M + M == RomanNumeral(4000)
@test V - 2 == RomanNumeral(3)
@test 5 - rn"II" == RomanNumeral(3)
end
end
@testset "Random" begin
@test rand(MT(1), RomanNumeral) == rn"MCXCIX"
@test rand(MT(1), RomanNumeral, 3) == RomanNumeral.(["MCXCIX", "MCLV", "CCCLX"])
@test rand(MT(2), RomanNumeral, 3) ==
RomanNumeral.(["DCCCXCV", "DCCXXXVI", "MDCCXXXVIII"])
@test rand(RomanNumeral) isa RomanNumeral{Int}
for T = [Int8, Int16, Int32, Int64, Int128]
@test rand(RomanNumeral{T}) isa RomanNumeral{T}
end
end
|
{"hexsha": "5c0e578acca3b1e3c3e4e9bc0ac6345013511a91", "size": 3702, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "harryscholes/RomanNumerals.jl", "max_stars_repo_head_hexsha": "ee9d38cdae7d12a9afa48c6a7595e23d776de4c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-26T10:32:29.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-26T10:32:29.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "harryscholes/RomanNumerals.jl", "max_issues_repo_head_hexsha": "ee9d38cdae7d12a9afa48c6a7595e23d776de4c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-04-10T14:52:42.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-26T19:44:25.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "harryscholes/RomanNumerals.jl", "max_forks_repo_head_hexsha": "ee9d38cdae7d12a9afa48c6a7595e23d776de4c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4736842105, "max_line_length": 84, "alphanum_fraction": 0.5815775257, "num_tokens": 1305}
|
using StructJuMP, JuMP
using StructJuMPSolverInterface
include("select_solver.jl")
#############
# A sample model
#############
scen = 1
m = StructuredModel(num_scenarios=scen)
@variable(m, x[1:4])
# @variable(m, -100<=x[1:4]<=100)
@NLobjective(m, Min, 1*x[1] + 3*x[3] + 4*x[4] )
for i in 1:scen
bl = StructuredModel(parent=m)
# @variable(bl, -200<=y1[1:3]<=200)
@variable(bl, y1[1:3])
@NLconstraint(bl, -100<= 0.1*x[1] + 0.2*x[2] <= 100)
@NLconstraint(bl, -101<= 1.1*x[2] + 1.2*x[3] <= 101)
@NLconstraint(bl, -102<= 2.1*x[3] + 2.2*x[4] <= 102)
# @NLconstraint(bl, 9.1*x[1] + 9.2*x[3] -109 == 0)
# @NLconstraint(bl, 8.1*x[2] + 8.2*x[3] -108 == 0)
# @NLconstraint(bl, 7.1*x[3] + 7.2*x[4] -107 == 0)
@NLconstraint(bl, 1*x[3] + 2*x[4] == 107 )
@NLconstraint(bl, -201<= 10.1*x[1] + 10.2*x[2] - 10.3*y1[1] + 10.4*y1[2] <= 201)
@NLconstraint(bl, -202<= 11.1*x[2] + 11.2*x[3] - 11.3*y1[1] + 11.4*y1[2] + 11.5*y1[3] <= 202)
@NLconstraint(bl, -203<= 12.1*x[3] + 12.2*x[4] + 12.4*y1[2] - 12.5*y1[3] <= 203)
@NLobjective(bl, Min, 12*y1[2] + 13*y1[3])
end
structJuMPSolve(m)
getVarValue(m)
|
{"hexsha": "3befbb5502a4b9bc0eb393242e9d3ea541c87960", "size": 1400, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/pips/parmodel_eqieq_lp.jl", "max_stars_repo_name": "matbesancon/StructJuMP.jl", "max_stars_repo_head_hexsha": "0bcbdd33cbf2d881067ede924f79ea6d1d0b1a2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/pips/parmodel_eqieq_lp.jl", "max_issues_repo_name": "matbesancon/StructJuMP.jl", "max_issues_repo_head_hexsha": "0bcbdd33cbf2d881067ede924f79ea6d1d0b1a2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/pips/parmodel_eqieq_lp.jl", "max_forks_repo_name": "matbesancon/StructJuMP.jl", "max_forks_repo_head_hexsha": "0bcbdd33cbf2d881067ede924f79ea6d1d0b1a2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-10-11T02:43:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-11T00:57:29.000Z", "avg_line_length": 36.8421052632, "max_line_length": 124, "alphanum_fraction": 0.4414285714, "num_tokens": 572}
|
"""
examples to use nsdcode
"""
import os
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
from nsdcode.nsd_mapdata import NSDmapdata
from nsdcode.nsd_datalocation import nsd_datalocation
from nsdcode.nsd_output import nsd_write_fs
from nsdcode.utils import makeimagestack
# Map T1 anatomical to EPI space
# Here we map the 0.8-mm T1 to the 1-mm EPI space using cubic interpolation.
# The resulting T1 volume might be useful for viewing volume-based
# fMRI results against the anatomy.
subjix = 1
base_path = os.path.join('/path', 'to', 'NSD')
# initiate NSDmapdata
nsd = NSDmapdata(base_path)
nsd_dir = nsd_datalocation(base_path=base_path)
nsd_betas = nsd_datalocation(base_path=base_path, dir0='betas')
sourcedata = f'{nsd_dir}/ppdata/subj{subjix:02d}/anat/T1_0pt8_masked.nii.gz'
sourcespace = 'anat0pt8'
targetspace = 'func1pt0'
interpmethod = 'cubic'
targetdata = nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype=interpmethod,
badval=0,
outputfile=f'test-{sourcespace}-{targetspace}-{interpmethod}.nii.gz')
# show the resulting transform
plt.imshow(makeimagestack(targetdata))
plt.show()
"""
# test case for comparing the matlab output
import seaborn as sns
import pandas as pd
compare matlab and python
# testA.nii.gz was generated with the code above in example_nsdmapdata.m
matlab_img = nib.load(
'testA.nii.gz').get_data()
python_img = nib.load(
f'test-{sourcespace}-{targetspace}-{interpmethod}.nii.gz').get_data()
d = {'matlab': matlab_img.ravel(),
'python':python_img.ravel()}
df = pd.DataFrame(data=d)
ax = sns.scatterplot(x="matlab", y="python", data=df)
"""
# let's test going from func1pt8mm to anat0pt8, but for a 4d-nifti
sourcedata = \
f'{nsd_betas}/ppdata/subj{subjix:02d}/func1pt8mm/' + \
'betas_fithrf_GLMdenoise_RR/betas_session01.nii.gz'
sourcespace = 'func1pt8'
targetspace = 'anat0pt8'
interpmethod = 'cubic'
nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype='cubic',
badval=0,
outputfile='test4D.nii.gz')
# To confirm correctness, compare the following:
# test-anat1pt8-func1pt8-cubic.nii.gz
# ppdata/subj01/func1pt8mm/mean.nii.gz
interpmethod = 'nearest'
targetdata = nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype=interpmethod,
badval=0,
outputfile=f'test-{sourcespace}-{targetspace}-{interpmethod}.nii.gz')
# show the resulting transform
plt.imshow(makeimagestack(targetdata))
interpmethod = 'linear'
targetdata = nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype=interpmethod,
badval=0,
outputfile=f'test-{sourcespace}-{targetspace}-{interpmethod}.nii.gz')
# show the resulting transform
plt.imshow(makeimagestack(targetdata))
# test going from func1pt8 to anat0pt8
sourcespace = 'func1pt8'
sourcedata = \
f'{nsd_betas}/ppdata/subj{subjix:02d}/{sourcespace}mm/' + \
'betas_fithrf_GLMdenoise_RR/meanbeta_session01.nii.gz'
targetspace = 'anat0pt8'
interpmethod = 'cubic'
targetdata = nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype=interpmethod,
badval=0,
outputfile=f'test-{sourcespace}-{targetspace}-{interpmethod}.nii.gz')
# show the resulting transform
plt.imshow(makeimagestack(targetdata.astype(np.float32)/300), vmin=-5, vmax=5., cmap='RdBu_r')
interpmethod = 'nearest'
targetdata = nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype=interpmethod,
badval=0,
outputfile=f'test-{sourcespace}-{targetspace}-{interpmethod}.nii.gz')
# show the resulting transform
plt.imshow(makeimagestack(targetdata.astype(np.float32)/300), vmin=-5, vmax=5., cmap='RdBu_r')
interpmethod = 'linear'
targetdata = nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype=interpmethod,
badval=0,
outputfile=f'test-{sourcespace}-{targetspace}-{interpmethod}.nii.gz')
# show the resulting transform
plt.imshow(makeimagestack(targetdata.astype(np.float32)/300), vmin=-5, vmax=5., cmap='RdBu_r')
# test anat going from func1pt8 to anat0pt8
sourcespace = 'func1pt8'
sourcedata = f'{nsd_dir}/ppdata/subj{subjix:02d}/{sourcespace}mm/mean.nii.gz'
targetspace = 'anat0pt8'
interpmethod = 'cubic'
targetdata = nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype=interpmethod,
badval=0,
outputfile=f'test-{sourcespace}-{targetspace}-{interpmethod}.nii.gz')
# show the resulting transform
plt.imshow(makeimagestack(targetdata), cmap='RdBu_r')
## Map EPI results to MNI space
# Here we take the variance explained (R2) value obtained for the "betas_fithrf_GLMdenoise_RR"
# GLM model in the first NSD session in the high-resolution 1-mm functional preparation,
# and map this to MNI space (which has 1-mm resolution).
sourcespace = 'func1pt0'
sourcedata = f'{nsd_betas}/ppdata/subj{subjix:02d}/func1mm/betas_fithrf_GLMdenoise_RR/R2_session01.nii.gz'
targetspace = 'MNI'
targetdata = nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype='cubic',
badval=0,
outputfile=f'test-{sourcespace}-{targetspace}-{interpmethod}.nii.gz')
plt.imshow(makeimagestack(targetdata))
# For comparison, we repeat the same operation but for the low-resolution
# 1.8-mm functional preparation.
sourcedata = f'{nsd_betas}/ppdata/subj{subjix:02d}/func1pt8mm/betas_fithrf_GLMdenoise_RR/R2_session01.nii.gz'
sourcespace = 'func1pt8'
targetspace = 'MNI'
interpmethod = 'cubic'
nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype=interpmethod,
badval=0,
outputfile=f'test-{sourcespace}-{targetspace}-{interpmethod}.nii.gz')
# To assess the results, compare the following:
# templates/MNI152_T1_1mm.nii.gz
# testB_1mm.nii.gz
# testB_1pt8mm.nii.gz
# Notice that the high- vs. low-resolution functional preparation makes a difference.
# To confirm sanity of the transformations, we can repeat the transformations for the
# mean EPI volume in the two different functional preparations.
sourcedata = f'{nsd_dir}/ppdata/subj{subjix:02d}/func1mm/mean_session01.nii.gz'
sourcespace = 'func1pt0'
targetspace = 'MNI'
interpmethod = 'cubic'
nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype=interpmethod,
badval=0,
outputfile='testC_1mm.nii.gz')
sourcedata = f'{nsd_dir}/ppdata/subj{subjix:02d}/func1pt8mm/mean_session01.nii.gz'
nsd.fit(
subjix,
'func1pt8',
'MNI',
sourcedata,
'cubic',
badval=0,
outputfile='testC_1pt8mm.nii.gz')
# Compare the above results to:
# testC_1mm.nii.gz
# testC_1pt8mm.nii.gz
# Notice that the two mean EPI volumes are spatially consistent but differ in
# the level of spatial detail.
## Map EPI results to surface space
# Here we take the same variance explained (R2) value described above
# and map it to the mid-gray native subject surface in the left hemisphere.
# This mapping is accomplished using a cubic interpolation of the data
# at each surface vertex location.
fsdir = os.path.join(nsd_datalocation(base_path=base_path), 'freesurfer', f'subj{subjix:02d}')
sourcedata = f'{nsd_betas}/ppdata/subj{subjix:02d}/func1mm/betas_fithrf_GLMdenoise_RR/R2_session01.nii.gz'
nsd.fit(
subjix,
'func1pt0',
'lh.layerB2',
sourcedata,
'cubic',
badval=0,
outputfile='lh.testD_layerB2.mgz',
outputclass=None,
fsdir=fsdir)
# let's repeat the above test, going from 1.8pt to to the vertices
sourcedata = f'{nsd_betas}/ppdata/subj{subjix:02d}/func1pt8mm/betas_fithrf_GLMdenoise_RR/R2_session01.nii.gz'
sourcespace = 'func1pt8'
targetspace = 'lh.layerB1'
interpmethod = 'cubic'
nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype='cubic',
badval=0,
outputfile='lh.testD_layerB1.mgz',
outputclass=None,
fsdir=fsdir)
# Let's repeat the same operation but sample the data onto the other two surfaces.
# "layerB1", "layerB2", and "layerB3" correspond to 25%, 50%, and 75%
# of the distance from the pial to the white-matter surfaces, respectively.
nsd.fit(
subjix,
'func1pt0',
'lh.layerB1',
sourcedata,
'cubic',
badval=0,
outputfile='lh.testD_layerB1.mgz',
outputclass=None,
fsdir=fsdir)
nsd.fit(
subjix,
'func1pt0',
'lh.layerB3',
sourcedata,
'cubic',
badval=0,
outputfile='lh.testD_layerB3.mgz',
outputclass=None,
fsdir=fsdir)
# To assess the results, compare the following on the lh.inflated surface:
# lh.testD_layerB1.mgz
# lh.testD_layerB2.mgz
# lh.testD_layerB3.mgz
# Notice that the results depend substantially on the surface onto which
# the data are sampled.
# We can map multiple datasets in one call to nsd.fit.m. In the following
# example, the file "R2run_session01.nii.gz" contains 12 different R2 values,
# one for each of the 12 runs conducted in the first NSD session. Each volume
# is independently mapped onto the lh.layerB2 surface, and the multiple
# surface-based outputs are saved into a single .mgz file.
sourcedata = f'{nsd_betas}/ppdata/subj{subjix:02d}/' + \
'func1mm/betas_fithrf_GLMdenoise_RR/R2run_session01.nii.gz'
nsd.fit(
subjix,
'func1pt0',
'lh.layerB2',
sourcedata,
'cubic',
badval=0,
outputfile='lh.testE.mgz',
outputclass=None,
fsdir=fsdir)
# We can also perform the mapping and omit having to write out a file to disk.
# Instead, we obtain the results in our workspace.
data = nsd.fit(
subjix,
'func1pt0',
'lh.layerB2',
sourcedata,
'cubic',
badval=0)
plt.plot(np.median(data, axis=0))
plt.xlabel('Run number')
plt.ylabel('Median R2')
##
# Map native subject surface results to fsaverage
# Here we repeat the mapping for variance explained (R2) for
# the three cortical depths, # accruing results in the workspace.
subjix = 1
sourcedata = \
f'{nsd_betas}/ppdata/subj{subjix:02d}/func1mm/' + \
'betas_fithrf_GLMdenoise_RR/R2_session01.nii.gz'
data = []
for p in range(3):
data.append(
nsd.fit(
subjix,
'func1pt0',
f'lh.layerB{p+1}',
sourcedata,
'cubic',
badval=0
)
)
data = np.vstack(np.asarray(data))
# Now we average results across the three cortical depths and use
# nearest-neighbor interpolation to bring the result to fsaverage.
fsdir = os.path.join(nsd_datalocation(base_path=base_path), 'freesurfer', 'fsaverage')
nsd.fit(
subjix,
'lh.white',
'fsaverage',
np.mean(data, axis=0),
interptype=None,
badval=0,
outputfile='lh.testF.mgz',
fsdir=fsdir)
# Assess the results by inspecting on fsaverage's lh.inflated surface:
# lh.testF.mgz
# and comparing this to the native subject's lh.inflated surface:
# lh.testD_layerB2.mgz
# Inspect alignment of subjects to fsaverage
# Here we load each subject's native curvature and map it to fsaverage.
data = []
for subjix in range(1,9):
a1 = nib.load(
f'{nsd_dir}/freesurfer/subj{subjix:02d}/surf/lh.curvature.mgz').get_fdata().squeeze()
data.append(
nsd.fit(
subjix,
'lh.white',
'fsaverage',
a1,
badval=0
)
)
data = np.asarray(data)
# Write out the results to an .mgz file.
fsdir = os.path.join(nsd_dir, 'freesurfer', 'fsaverage')
nsd_write_fs(
data,
'lh.testG.mgz',
fsdir)
# Inspect on fsaverage's lh.inflated surface:
# lh.testG.mgz
# Confirm that the subjects are reasonably well aligned.
# test case for polar angle data
subjix = 1
sourcedata = f'{nsd_dir}/freesurfer/subj{subjix:02d}/label/lh.prfangle.mgz'
sourcespace = 'lh.white'
targetspace = 'fsaverage'
nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype='cubic',
badval=0,
outputfile='lh.testH.mgz',
fsdir=fsdir)
# Map surface-oriented results to volume space.
# Here take the Kastner2015 atlas (as prepared in the native subject surface's
# space), associate it with the vertices of the three cortical depth surfaces,
# and use a winner-take-all approach to convert these surface data to a 0.8-mm
# volume.
# Notice that this demonstrates the ability to aggregate data across left
# and right hemispheres before converting to a volume.
subjix = 1
sourcedata = np.r_[
np.tile(
f'{nsd_dir}/freesurfer/subj{subjix:02d}/label/lh.Kastner2015.mgz', 3),
np.tile(
f'{nsd_dir}/freesurfer/subj{subjix:02d}/label/rh.Kastner2015.mgz', 3)
].tolist()
sourcespace = [
'lh.layerB1',
'lh.layerB2',
'lh.layerB3',
'rh.layerB1',
'rh.layerB2',
'rh.layerB3'
]
targetspace = 'anat0pt8'
nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype='surfacewta',
badval=-1,
outputfile='testH.nii.gz')
# Inspect the results by comparing the following:
# ppdata/subj01/anat/T1_0pt8_masked.nii.gz
# testH.nii.gz
# Now that we have the atlas in the subject's anatomical space, we can now
# create a version that is in the subject's functional space.
sourcespace = 'anat0pt8'
targetspace = 'func1pt0'
sourcedata = 'testH.nii.gz'
nsd.fit(
subjix,
sourcespace,
targetspace,
sourcedata,
interptype='wta',
badval=-1,
outputfile='testI.nii.gz')
# Inspect the results by comparing the following:
# ppdata/subj01/func1mm/mean.nii.gz
# testI.nii.gz
|
{"hexsha": "8e925afb8dfcd5b2e8ab759b0a3027adc01a5e28", "size": 13422, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/examples_nsdmapdata.py", "max_stars_repo_name": "kjamison/nsdcode", "max_stars_repo_head_hexsha": "3f34e17bf4ee2492910bffebf104a2cb7a8fa823", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-12-24T00:23:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T02:22:15.000Z", "max_issues_repo_path": "examples/examples_nsdmapdata.py", "max_issues_repo_name": "kjamison/nsdcode", "max_issues_repo_head_hexsha": "3f34e17bf4ee2492910bffebf104a2cb7a8fa823", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2019-11-05T16:32:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T13:51:49.000Z", "max_forks_repo_path": "examples/examples_nsdmapdata.py", "max_forks_repo_name": "cvnlab/nsdcode", "max_forks_repo_head_hexsha": "3f34e17bf4ee2492910bffebf104a2cb7a8fa823", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-12-18T12:41:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-10T04:18:32.000Z", "avg_line_length": 27.3918367347, "max_line_length": 111, "alphanum_fraction": 0.7082402027, "include": true, "reason": "import numpy", "num_tokens": 4019}
|
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import sys
import os
import shutil
import tarfile
import inspect
import traceback
import numpy as np
import tensorflow as tf
if sys.version_info[0] < 3:
from Queue import Queue
else:
from queue import Queue
from tensorflow.python.ops import gen_math_ops as tf_ops
from tensorflow.contrib.layers.python.layers import layers as tf_layers
from tensorflow.python.layers import utils
RedText = "\x1b[31m"
ResetStyle = "\x1b[0m"
def tf_version_greater(major, minor):
str = tf.__version__
i = str.index('.')
j = str.index('.', i+1)
return (int(str[:i]), int(str[i+1:j])) >= (major, minor)
def print_error(msg, stack):
sys.stderr.write("%sError: %s%s\n" % (RedText, msg, ResetStyle))
if stack is not None:
traceback.print_list(stack)
def print_warning(msg):
sys.stderr.write("%sWarning: %s%s\n" % (RedText, msg, ResetStyle))
def undecorate(decorated, orig_name=None):
if orig_name is None:
orig_name = decorated.__name__
if not hasattr(decorated, "__closure__") or not decorated.__closure__:
return decorated
for obj in (c.cell_contents for c in decorated.__closure__):
if hasattr(obj, "__name__") and obj.__name__ == orig_name:
return obj
if hasattr(obj, "__closure__") and obj.__closure__:
found = undecorate(obj, orig_name)
if found:
return found
return None
class InvocationTrace:
def __init__(self, functions, handler):
self.frame = None
self.invocations = list()
self.handler = handler
self.func_names = set()
self.qualified = {}
for func in functions:
undecorated = undecorate(func)
self.func_names.add(undecorated.__name__)
self.qualified[undecorated.__module__ + '.' + undecorated.__name__] = func
def __call__(self, frame, event, result):
func_name = frame.f_code.co_name
if func_name == '__init__':
result = frame.f_locals.get('self')
if result is not None:
func_name = result.__class__.__name__
if func_name not in self.func_names:
return
mod = inspect.getmodule(frame)
if mod is None:
return
func_name = mod.__name__ + '.' + func_name
if event == 'call':
if self.frame is None:
func = self.qualified.get(func_name)
if func is not None:
arg_values = inspect.getargvalues(frame)
self.frame = frame
self.func = func
self.args = {key: value for (key,value) in arg_values.locals.items() if key in arg_values.args}
elif event == 'return':
if self.frame == frame and result is not None:
if isinstance(result, (list, dict)):
result = result.copy()
results = result if isinstance(result, tuple) else (result, )
stack = traceback.extract_stack(frame.f_back)
self.invocations.append((self.func, self.args, results, stack))
if self.handler:
self.handler(self.func, self.args, results, stack)
self.frame = None
return self
class TF2NNEFConverter:
def __init__(self, producers, exporters, reader, output_path):
self.producers = producers
self.exporters = exporters
self.reader = reader
self.consumrs = {}
self.tensor_names = {}
self.tensor_counts = {}
self.activations = []
self.output_path = output_path
self.fused = set()
for invocation in producers.values():
args = invocation[1]
for arg in args.values():
if isinstance(arg, (tf.Tensor, tf.Variable)):
self.consumrs.setdefault(arg, []).append(invocation)
def producer(self, tensor):
return self.producers.get(tensor)
def consumers(self, tensor):
return self.consumrs.get(tensor)
def consumer(self, tensor):
consumers = self.consumrs.get(tensor)
return consumers[0] if consumers is not None and len(consumers) == 1 else None
def exporter(self, func):
item = self.exporters.get(func)
return item[0] if isinstance(item, tuple) else item
def make_fused(self, tensor):
self.fused.add(tensor)
def is_fused(self, tensor):
return tensor in self.fused
def make_constant(self, tf_tensor, nnef_value):
self.tensor_names[tf_tensor] = nnef_value
def make_tensor(self, tf_tensor, nnef_name, indexed=True):
name = self.tensor_names.get(tf_tensor)
if name is not None:
return name
if indexed:
count = self.tensor_counts.get(nnef_name, 1)
self.tensor_counts[nnef_name] = count + 1
indexed_name = nnef_name + str(count)
else:
indexed_name = nnef_name
self.tensor_names[tf_tensor.value() if isinstance(tf_tensor, tf.Variable) else tf_tensor] = indexed_name
self.activations.append((tf_tensor, indexed_name))
return indexed_name
def make_passthrough_tensor(self, tf_tensor_in, tf_tensor_out):
variable = isinstance(tf_tensor_out, tf.Variable)
self.tensor_names[tf_tensor_out.value() if variable else tf_tensor_out] = self.nnef_tensor(tf_tensor_in)
def nnef_tensor(self, tf_tensor):
if isinstance(tf_tensor, (float, int)):
return str(float(tf_tensor))
elif isinstance(tf_tensor, tf.Variable):
return self.tensor_names[tf_tensor.value()]
else:
return self.tensor_names[tf_tensor]
def nnef_op(self, func):
item = self.exporters.get(func)
return item[1] if isinstance(item, tuple) else None
@staticmethod
def nnef_shape(shape, stack=None, is_filter=False, is_broadcast=False):
if isinstance(shape, tf.Tensor):
shape = tf.contrib.util.constant_value(shape)
if shape is None:
print_error('cannot handle dynamic tensor shape', stack)
return []
if isinstance(shape, tf.TensorShape):
shape = shape.as_list()
if not isinstance(shape, list):
shape = list(shape)
shape = [s.value if isinstance(s, tf.Dimension) else int(s) for s in shape]
if len(shape) == 0:
return []
elif len(shape) == 1:
return [1, shape[0]] if is_broadcast else [shape[0]]
elif len(shape) == 2:
return shape
else:
if is_filter:
return [shape[-1], shape[-2]] + shape[:-2]
else:
return [shape[0], shape[-1]] + shape[1:-1]
@staticmethod
def nnef_axis(axis, rank):
if axis < 0:
axis = rank + axis
if rank == 1:
return 1
elif rank == 2:
return axis
else:
if axis == 0:
return 0
elif axis == rank - 1:
return 1
else:
return axis + 1
@staticmethod
def nnef_axes(axis, rank):
if isinstance(axis, (list, tuple)):
return [TF2NNEFConverter.nnef_axis(a, rank) for a in axis]
else:
return [TF2NNEFConverter.nnef_axis(axis, rank)]
@staticmethod
def nnef_bool(value):
if value is None:
value = False
return 'true' if value else 'false'
@staticmethod
def nnef_array(value, rank):
if isinstance(value, list):
return value
elif isinstance(value, tuple):
return list(value)
else:
return [value] * rank
@staticmethod
def nnef_padding(padding, rank):
return [] if padding.upper() == 'SAME' else [(0, 0)] * rank
@staticmethod
def nnef_padding_ex(padding, input_sizes, filter_sizes, strides):
def same_padding(input_size, filter_size, stride):
output_size = int(np.ceil(float(input_size) / float(stride)))
pad_total = (output_size - 1) * stride + filter_size - input_size
if pad_total >= 0:
pad_front = pad_total // 2
pad_back = pad_total - pad_front
return (pad_front, pad_back)
else:
return (0, pad_total)
def valid_padding(input_size, filter_size, stride):
output_size = int(np.ceil(float(input_size - filter_size + 1) / float(stride)))
pad_total = (output_size - 1) * stride + filter_size - input_size
return (0, pad_total)
return [same_padding(input_size, filter_size, stride) if padding.upper() == 'SAME' else valid_padding(input_size, filter_size, stride)
for (input_size, filter_size, stride) in zip(input_sizes, filter_sizes, strides)]
@staticmethod
def nnef_tensor_shuffle_dims(tensor, is_filter, is_broadcast):
rank = tensor.ndim
if rank == 0:
return tensor
elif rank == 1:
return np.expand_dims(tensor, axis=0) if is_broadcast else tensor
elif rank == 2:
return tensor
else:
axes = [rank-1, rank-2] + list(range(rank-2)) if is_filter else [0, rank-1] + list(range(1,rank-1))
return np.transpose(tensor, axes)
@staticmethod
def nnef_ids(ids):
return "[" + ", ".join(map(str, ids)) + "]"
@staticmethod
def dilated_size(size, dilation):
return [(s - 1) * d + 1 for (s, d) in zip(size, dilation)]
def propagate_padding(self, input, padding, border, spatial, stack):
producer = self.producer(input)
if producer is not None and producer[0] == tf.pad:
if len(padding) == 0:
print_error("only 'VALID' padding is accepted after an explicit 'pad' operation", stack)
args = producer[1]
border = args['mode'].lower()
if border == 'symmetric':
border = 'reflect-even'
paddings = args['paddings']
paddings = paddings[1:-1] if spatial else [paddings[0], paddings[-1]] + paddings[1:-1]
padding = [tuple(p) for p in paddings]
return padding, border
def propagate_space_to_batch(self, input, dilation, padding):
producer = self.producer(input)
if producer is not None and (producer[0] == tf.space_to_batch_nd or producer[0] == tf.space_to_batch):
args = producer[1]
input = args['input']
dilation = args['block_shape'].tolist()
padding = 'SAME' if args['paddings'].any() else 'VALID'
return input, dilation, padding
def propagate_batch_to_space(self, output):
consumer = self.consumer(output)
if consumer is not None and (consumer[0] == tf.batch_to_space_nd or consumer[0] == tf.batch_to_space):
results = consumer[2]
return results[0]
return output
def is_binary_op(self, func):
return self.exporter(func) == export_binary
def is_broadcast(self, tensor):
shape = tensor.shape
if isinstance(shape, tf.TensorShape):
shape = shape.dims
if len(shape) != 1:
return False
consumers = self.consumers(tensor)
if consumers is None:
return False
for invocation in consumers:
func, args = invocation[:2]
if self.is_binary_op(func):
x = args['x']
other = args['y'] if tensor == x else x
elif func == tf.nn.bias_add:
other = args['value']
elif func == tf.nn.batch_normalization or func == tf.nn.fused_batch_norm:
other = args['x']
else:
return False
if isinstance(other, tf.Variable):
other = other.value()
if not isinstance(other, tf.Tensor):
return False
if other.shape[-1] != tensor.shape[0]:
return False
return True
def is_filter(self, tensor):
consumers = self.consumers(tensor)
if consumers is not None:
for invocation in consumers:
func, args = invocation[:2]
if func in [tf.nn.conv1d, tf.nn.atrous_conv2d, tf.nn.atrous_conv2d_transpose] \
and args['filters'] == tensor:
return True
elif func in [tf.nn.conv2d, tf.nn.conv3d, tf.nn.convolution,
tf.nn.conv2d_transpose, tf.nn.conv3d_transpose,
tf.nn.depthwise_conv2d, tf.nn.depthwise_conv2d_native,
tf.nn.depthwise_conv2d_native_backprop_input] \
and args['filter'] == tensor:
return True
elif func == tf.nn.separable_conv2d:
if args['depthwise_filter'] == tensor or args['pointwise_filter'] == tensor:
return True
return False
def is_depthwise(self, tensor):
consumers = self.consumers(tensor)
if consumers is not None:
for invocation in consumers:
func, args = invocation[:2]
if func in [tf.nn.depthwise_conv2d, tf.nn.depthwise_conv2d_native,
tf.nn.depthwise_conv2d_native_backprop_input] \
and args['filter'] == tensor:
return True
elif func == tf.nn.separable_conv2d:
if args['depthwise_filter'] == tensor:
return True
return False
def export_skip(func, args, results, stack, converter):
return None
def export_passthrough(func, args, results, stack, converter):
arg = converter.exporters.get(func)[1]
converter.make_passthrough_tensor(args[arg], results[0])
return None
def export_placeholder(func, args, results, stack, converter):
result = results[0]
shape = converter.nnef_shape(args['shape'], stack=stack, is_broadcast=converter.is_broadcast(result))
name = args['name']
if name is not None:
output = converter.make_tensor(result, name, indexed=False)
else:
output = converter.make_tensor(result, 'input')
return "{} = external(shape = {})".format(output, shape)
def export_variable(func, args, results, stack, converter):
name = args['name']
if name is None or name == '':
print_error("non-empty 'name' argument must be provided for {}".
format("tf.Variable()" if func == tf.Variable else "tf.get_variable()"), stack)
return None
if func == tf.get_variable:
initializer = args.get('initializer')
if isinstance(initializer, np.ndarray):
shape = initializer.shape
elif isinstance(initializer, (int, float)):
shape = [1,1]
else:
shape = args['shape']
else:
shape = tf.convert_to_tensor(args['initial_value']).shape
result = results[0]
is_filter = converter.is_filter(result)
is_depthwise = converter.is_depthwise(result)
is_broadcast = converter.is_broadcast(result)
shape = converter.nnef_shape(shape, stack=stack, is_filter=is_filter, is_broadcast=is_broadcast)
pos = name.rfind('/')
output = converter.make_tensor(result, name[pos + 1:] if pos != -1 else name)
if is_filter and is_depthwise:
shape[0] *= shape[1]
shape[1] = 1
if converter.reader:
key = result.name[:-2]
if converter.reader.has_tensor(key):
tensor = converter.reader.get_tensor(key)
if is_filter and is_depthwise:
tensor = np.reshape(tensor, newshape = tensor.shape[:-2] + (1, tensor.shape[-2] * tensor.shape[-1]))
filename = converter.output_path + '/' + name + '.dat'
write_nnef_tensor(filename, tensor, is_filter=is_filter, is_broadcast=is_broadcast)
else:
print_error("variable '{}' not found in checkpoint".format(key), stack)
return "{} = variable(shape = {}, label = '{}')".format(output, shape, name)
def export_constant(func, args, results, stack, converter):
shape = args['shape']
value = args['value']
result = results[0]
singular = True
if shape is not None:
for s in shape:
if s != 1:
singular = False
if not isinstance(value, (np.ndarray, list, tuple)) and singular:
converter.make_constant(results[0], float(value))
return None
if not isinstance(value, np.ndarray):
value = np.array(value, dtype=np.float32)
if value.size == 1 and singular:
converter.make_constant(results[0], value.flatten()[0])
return None
if shape is None:
shape = list(value.shape)
is_broadcast = converter.is_broadcast(result)
shape = converter.nnef_shape(shape, stack=stack, is_broadcast=is_broadcast)
value = converter.nnef_tensor_shuffle_dims(value, is_filter=False, is_broadcast=is_broadcast).flatten().tolist()
output = converter.make_tensor(results[0], 'const')
return '{} = constant(shape = {}, value = {})'.format(output, shape, value)
def export_conv(func, args, results, stack, converter):
kernel = args['filter']
if isinstance(kernel, tf.Variable):
kernel = kernel.value()
value = args.get('input')
if value is None:
value = args['value']
input = converter.nnef_tensor(value)
filter = converter.nnef_tensor(kernel)
size = kernel.shape.as_list()[:-2]
strides = list(args['strides'])[1:-1]
rate = args.get('rate', args.get('dilation_rate'))
rate = list(rate) if rate else [1] * len(size)
filter_sizes = converter.dilated_size(size, rate)
padding = args['padding']
border = 'constant'
value, rate, padding = converter.propagate_space_to_batch(value, rate, padding)
result = converter.propagate_batch_to_space(results[0])
bias = 0.0
consumers = converter.consumers(result)
if consumers is not None and len(consumers) == 1:
invocation = consumers[0]
_func, _args, _res = invocation[:3]
if _func == tf.nn.bias_add and _args["value"] == result:
bias = converter.nnef_tensor(_args["bias"])
result = _res[0]
converter.make_fused(result)
elif _func in [tf.add, tf_ops.add]:
if _args["x"] == result:
bias = converter.nnef_tensor(_args["y"])
elif _args["y"] == result:
bias = converter.nnef_tensor(_args["x"])
result = _res[0]
converter.make_fused(result)
output_shape = args.get('output_shape')
if output_shape is not None:
if isinstance(output_shape, tf.Tensor):
output_shape = tf.contrib.util.constant_value(output_shape)
if output_shape is None:
output_shape = result.get_shape()
if output_shape is not None:
output_shape = output_shape.as_list()
if None in output_shape:
output_shape = None
if output_shape is None:
print_warning("dynamic 'output_shape' cannot be evaluated, reverting to default")
value_shape = value.shape.as_list()[1:-1]
input_shape = output_shape[1:-1] if output_shape is not None else \
[utils.deconv_output_length(value_shape[i], filter_sizes[i], padding.lower(), strides[i]) for i in range(len(value_shape))]
padding = converter.nnef_padding_ex(padding, input_shape, filter_sizes, strides)
else:
padding = converter.nnef_padding(padding, len(size))
padding, border = converter.propagate_padding(value, padding, border, spatial=True, stack=stack)
op = converter.nnef_op(func)
output = converter.make_tensor(result, 'conv' if op == 'planewise_conv' else op)
return "{} = {}({}, {}, {}, padding = {}, border = '{}', stride = {}, dilation = {})" \
.format(output, op, input, filter, bias, padding, border, strides, rate)
def export_convolution(func, args, results, stack, converter):
args['strides'] = [1] + list(args['strides']) + [1]
return export_conv(func, args, results, stack, converter)
def export_separable_conv(func, args, results, stack, converter):
value = args['input']
depth_kernel = args['depthwise_filter']
point_kernel = args['pointwise_filter']
if isinstance(depth_kernel, tf.Variable):
depth_kernel = depth_kernel.value()
if isinstance(point_kernel, tf.Variable):
point_kernel = point_kernel.value()
input = converter.nnef_tensor(value)
depth_filter = converter.nnef_tensor(depth_kernel)
point_filter = converter.nnef_tensor(point_kernel)
size = depth_kernel.shape.as_list()[:-2]
strides = converter.nnef_array(args['strides'][1:-1], 2)
rate = converter.nnef_array(args['rate'], 2)
padding = converter.nnef_padding(args['padding'], len(size))
border = 'constant'
padding, border = converter.propagate_padding(value, padding, border, spatial=True, stack=stack)
output = converter.make_tensor(results[0], 'conv')
return "{} = separable_conv({}, plane_filter = {}, point_filter = {}, padding = {}, border = '{}', stride = {}, dilation = {})" \
.format(output, input, depth_filter, point_filter, padding, border, strides, rate)
def export_pool(func, args, results, stack, converter):
value = args['value']
input = converter.nnef_tensor(value)
size = converter.nnef_shape(args['ksize'], stack=stack)
strides = converter.nnef_shape(args['strides'], stack=stack)
padding = converter.nnef_padding(args['padding'], len(size))
border = 'ignore'
padding, border = converter.propagate_padding(value, padding, border, spatial=False, stack=stack)
op = converter.nnef_op(func)
output = converter.make_tensor(results[0], 'pool')
return "{} = {}({}, size = {}, padding = {}, border = '{}', stride = {})".format(output, op, input, size, padding, border, strides)
def export_activation(func, args, results, stack, converter):
x = converter.nnef_tensor(args['features'])
op = converter.nnef_op(func)
output = converter.make_tensor(results[0], op)
return '{} = {}({})'.format(output, op, x)
def export_unary(func, args, results, stack, converter):
x = converter.nnef_tensor(args['x'])
op = converter.nnef_op(func)
output = converter.make_tensor(results[0], op)
return '{} = {}({})'.format(output, op, x)
def export_binary(func, args, results, stack, converter):
x = converter.nnef_tensor(args['x'])
y = converter.nnef_tensor(args['y'])
op = converter.nnef_op(func)
output = converter.make_tensor(results[0], op)
return '{} = {}({}, {})'.format(output, op, x, y)
def export_squared_diff(func, args, results, stack, converter):
x = converter.nnef_tensor(args['x'])
y = converter.nnef_tensor(args['y'])
output = converter.make_tensor(results[0], 'diff')
return '{} = sqr({} - {})'.format(output, x, y)
def export_where(func, args, results, stack, converter):
c = converter.nnef_tensor(args['condition'])
x = converter.nnef_tensor(args['x'])
y = converter.nnef_tensor(args['y'])
if x is None or y is None:
print_error("arguments must not be None in tf.where() operation", stack)
return None
output = converter.make_tensor(results[0], 'select')
return '{} = select({}, {}, {})'.format(output, c, x, y)
def export_reduce(func, args, results, stack, converter):
tensor = args['input_tensor']
input = converter.nnef_tensor(tensor)
rank = len(tensor.shape.as_list())
axis = args['axis']
axes = sorted(converter.nnef_axes(axis, rank)) if axis else list(range(rank))
op = converter.nnef_op(func)
output = converter.make_tensor(results[0], 'reduce')
return '{} = {}({}, axes = {})'.format(output, op, input, axes)
def export_lrn(func, args, results, stack, converter):
input = converter.nnef_tensor(args['input'])
depth_radius = args['depth_radius']
depth_size = 2 * depth_radius + 1
bias = float(args['bias'])
alpha = float(args['alpha'] * depth_size)
beta = float(args['beta'])
size = [1, depth_size, 1, 1]
output = converter.make_tensor(results[0], 'norm')
return '{} = local_response_normalization({}, size = {}, alpha = {}, beta = {}, bias = {})'\
.format(output, input, size, alpha, beta, bias)
def export_batch_normalization(func, args, results, stack, converter):
input = converter.nnef_tensor(args['x'])
mean = converter.nnef_tensor(args['mean'])
variance = converter.nnef_tensor(args['variance'])
offset = converter.nnef_tensor(args['offset']) if args.get('offset') is not None else float(0)
scale = converter.nnef_tensor(args['scale']) if args.get('scale') is not None else float(1)
epsilon = float(args.get('variance_epsilon', args.get('epsilon')))
output = converter.make_tensor(results[0], 'norm')
return '{} = batch_normalization({}, mean = {}, variance = {}, offset = {}, scale = {}, epsilon = {})'\
.format(output, input, mean, variance, offset, scale, epsilon)
def export_l2_normalization(func, args, results, stack, converter):
input = converter.nnef_tensor(args['x'])
axes = sorted(converter.nnef_axes(args['dim']))
epsilon = float(args.get('epsilon'))
output = converter.make_tensor(results[0], 'norm')
return "{} = l2_normalization({}, axes = {}, bias = {})".format(output, input, axes, epsilon)
def export_matmul(func, args, results, stack, converter):
A = converter.nnef_tensor(args['a'])
B = converter.nnef_tensor(args['b'])
trA = converter.nnef_bool(args['transpose_a'])
trB = converter.nnef_bool(args['transpose_b'])
output = converter.make_tensor(results[0], 'matmul')
return '{} = matmul({}, {}, trA = {}, trB = {})'.format(output, A, B, trA, trB)
def export_assign(func, args, results, stack, converter):
ref = converter.nnef_tensor(args['ref'])
value = converter.nnef_tensor(args['value'])
output = converter.make_tensor(results[0], 'assign')
return '{} = update({}, {})'.format(output, ref, value)
def export_add_n(func, args, results, stack, converter):
inputs = args['inputs']
value = converter.nnef_ids([converter.nnef_tensor(input) for input in inputs])
output = converter.make_tensor(results[0], 'add')
return '{} = add_n({})'.format(output, value)
def export_bias_add(func, args, results, stack, converter):
input = converter.nnef_tensor(args['value'])
bias = converter.nnef_tensor(args['bias'])
output = converter.make_tensor(results[0], 'add')
return '{} = add({}, {})'.format(output, input, bias)
def export_concat(func, args, results, stack, converter):
values = args['values']
rank = values[0].shape.ndims
axis = converter.nnef_axis(args['axis'], rank)
parts = converter.nnef_ids([converter.nnef_tensor(value) for value in values])
output = converter.make_tensor(results[0], 'concat')
return '{} = concat({}, axis = {})'.format(output, parts, axis)
def export_split(func, args, results, stack, converter):
value = args['value']
whole = converter.nnef_tensor(value)
num_or_sizes = args['num_or_size_splits']
ratios = num_or_sizes if isinstance(num_or_sizes, list) else [1] * num_or_sizes
rank = value.shape.ndims
axis = converter.nnef_axis(args['axis'], rank)
output = converter.nnef_ids([converter.make_tensor(result, 'split') for result in results[0]])
return '{} = split({}, axis = {}, ratios = {})'.format(output, whole, axis, ratios)
def export_softmax(func, args, results, stack, converter):
logits = args['logits']
rank = len(logits.shape.as_list())
axis = sorted(converter.nnef_axes(args.get('dim', -1), rank))
parts = converter.nnef_tensor(logits)
output = converter.make_tensor(results[0], 'softmax')
return '{} = softmax({}, axes = {})'.format(output, parts, axis)
def export_moments(func, args, results, stack, converter):
value = args['x']
input = converter.nnef_tensor(value)
rank = value.shape.ndims
axes = sorted(converter.nnef_axes(args['axes'], rank))
mean = converter.make_tensor(results[0], 'mean')
variance = converter.make_tensor(results[1], 'variance')
return "{}, {} = moments({}, axes = {})".format(mean, variance, input, axes)
def export_reshape(func, args, results, stack, converter):
input = converter.nnef_tensor(args['tensor'])
shape = converter.nnef_shape(args['shape'], stack=stack)
output = converter.make_tensor(results[0], 'reshape')
return '{} = reshape({}, shape = {})'.format(output, input, shape)
def export_flatten(func, args, results, stack, converter):
value = args['inputs']
input = converter.nnef_tensor(value)
output = converter.make_tensor(results[0], 'reshape')
return '{} = reshape({}, shape = [0, -1])'.format(output, input)
def export_expand_dims(func, args, results, stack, converter):
value = args['input']
rank = value.shape.ndims
input = converter.nnef_tensor(value)
axis = args['axis']
if axis is None:
axis = rank
shape = value.shape.as_list()
shape.insert(axis, 1)
shape = converter.nnef_shape(shape)
output = converter.make_tensor(results[0], 'reshape')
return '{} = reshape({}, shape = {})'.format(output, input, shape)
def export_squeeze(func, args, results, stack, converter):
value = args['input']
input = converter.nnef_tensor(value)
axis = args['axis']
if axis is not None:
axis = sorted(axis)
else:
shape = value.shape.as_list()
axis = [i for i in range(len(shape)) if shape[i] == 1]
rank = value.shape.ndims
if axis == list(range(rank - 1)) or axis == list(range(1,rank - 1)):
converter.make_passthrough_tensor(value, results[0])
return None
axes = converter.nnef_axes(axis, rank)
shape = ''
for a in range(0,rank):
if a not in axes:
if len(shape) != 0:
shape += ', '
shape += 'shape_of({})[{}]'.format(input, a)
output = converter.make_tensor(results[0], 'reshape')
return '{} = reshape({}, shape = [{}])'.format(output, input, shape)
def export_transpose(func, args, results, stack, converter):
value = args['a']
input = converter.nnef_tensor(value)
rank = value.shape.ndims
perm = args['perm']
if perm is None:
perm = list(reversed(range(rank)))
perm = converter.nnef_axes(perm, rank)
p = list(perm)
for i in range(len(perm)):
perm[converter.nnef_axis(i, rank)] = p[i]
output = converter.make_tensor(results[0], 'trans')
return '{} = transpose({}, perm = {})'.format(output, input, perm)
def export_resize_images(func, args, results, stack, converter):
value = args['images']
input = converter.nnef_tensor(value)
size = args['size']
method = args['method']
aligned = args['align_corners']
if isinstance(size, tf.Tensor):
print_error('cannot handle dynamic target size in tf.image.resize()', stack)
return None
input_size = [s.value if isinstance(s,tf.Dimension) else int(s) for s in value.shape[1:-1]]
size = [s.value if isinstance(s, tf.Dimension) else int(s) for s in size]
if size[0] == input_size[0] and size[1] == input_size[1]:
converter.make_passthrough_tensor(value, results[0])
return None
if (size[0] > input_size[0] and size[1] < input_size[1]) or (size[0] < input_size[0] and size[1] > input_size[1]):
print_error("resize must be up or down-sampling", stack)
return None
if size[0] > input_size[0]:
if size[0] % input_size[0] or size[1] % input_size[1]:
print_error('only integer factor resize allowed', stack)
return None
factor = [size[0] // input_size[0], size[1] // input_size[1]]
output = converter.make_tensor(results[0], 'upsample')
if method == tf.image.ResizeMethod.BILINEAR:
return "{} = multilinear_upsample({}, factor = {}, method = '{}', border = 'replicate')"\
.format(output, input, factor, 'aligned' if aligned else 'asymmetric')
elif method == tf.image.ResizeMethod.NEAREST_NEIGHBOR:
return "{} = nearest_upsample({}, factor = {})".format(output, input, factor)
else:
print_error("unsupported upsample method '{}'".format(method), stack)
return None
else:
if input_size[0] % size[0] or input_size[1] % size[1]:
print_error('only integer factor resize allowed', stack)
return None
factor = [input_size[0] // size[0], input_size[1] // size[1]]
output = converter.make_tensor(results[0], 'downsample')
if method == tf.image.ResizeMethod.AREA:
return "{} = area_downsample({}, factor = {})".format(output, input, factor)
elif method == tf.image.ResizeMethod.NEAREST_NEIGHBOR:
return "{} = nearest_downsample({}, factor = {})".format(output, input, factor)
else:
print_error("unsupported downsample method '{}'".format(method), stack)
return None
def export_resize_bilinear(func, args, results, stack, converter):
args['method'] = tf.image.ResizeMethod.BILINEAR
return export_resize_images(func, args, results, stack, converter)
def export_resize_bicubic(func, args, results, stack, converter):
args['method'] = tf.image.ResizeMethod.BICUBIC
return export_resize_images(func, args, results, stack, converter)
def export_resize_nearest(func, args, results, stack, converter):
args['method'] = tf.image.ResizeMethod.NEAREST_NEIGHBOR
return export_resize_images(func, args, results, stack, converter)
def export_resize_area(func, args, results, stack, converter):
args['method'] = tf.image.ResizeMethod.AREA
return export_resize_images(func, args, results, stack, converter)
def export_space_to_batch(func, args, results, stack, converter):
input = converter.nnef_tensor(args['input'])
block_shape = args['block_shape']
paddings = args['paddings']
output = converter.make_tensor(results[0], 'space2batch')
return "{} = space2batch({}, block_shape = {}, paddings = {})".format(output, input, block_shape, paddings)
def export_batch_to_space(func, args, results, stack, converter):
input = converter.nnef_tensor(args['input'])
block_shape = args['block_shape']
output = converter.make_tensor(results[0], 'batch2space')
return "{} = batch2space({}, block_shape = {})".format(output, input, block_shape)
DefaultExporters =\
{
tf.Variable: (export_variable, 'variable'),
tf.get_variable: (export_variable, 'variable'),
tf.placeholder: (export_placeholder, 'external'),
tf.constant: (export_constant, 'constant'),
tf.identity: (export_passthrough, 'input'),
tf.concat: (export_concat, 'concat'),
tf.split: (export_split, 'split'),
tf.reshape: (export_reshape, 'reshape'),
tf.squeeze: (export_squeeze, 'reshape'),
tf.expand_dims: (export_expand_dims, 'reshape'),
tf.transpose: (export_transpose, 'transpose'),
tf.stop_gradient: (export_passthrough, 'input'),
tf.cast: (export_passthrough, 'x'),
tf.pad: (export_passthrough, 'tensor'),
tf.add: (export_binary, 'add'),
tf.subtract: (export_binary, 'sub'),
tf.multiply: (export_binary, 'mul'),
tf.divide: (export_binary, 'div'),
tf.pow: (export_binary, 'pow'),
tf.squared_difference: (export_squared_diff, 'sqr'),
tf.logical_and: (export_binary, 'and'),
tf.logical_or: (export_binary, 'or'),
tf.negative: (export_unary, 'neg'),
tf.logical_not: (export_unary, 'not'),
tf.abs: (export_unary, 'abs'),
tf.sign: (export_unary, 'sign'),
tf.exp: (export_unary, 'exp'),
tf.log: (export_unary, 'log'),
tf.sqrt: (export_unary, 'sqrt'),
tf.rsqrt: (export_unary, 'rsqrt'),
tf.square: (export_unary, 'sqr'),
tf.floor: (export_unary, 'floor'),
tf.ceil: (export_unary, 'ceil'),
tf.round: (export_unary, 'round'),
tf.where: (export_where, 'select'),
tf.greater: (export_binary, 'gt'),
tf.greater_equal: (export_binary, 'ge'),
tf.less: (export_binary, 'lt'),
tf.less_equal: (export_binary, 'le'),
tf.equal: (export_binary, 'eq'),
tf.not_equal: (export_binary, 'ne'),
tf.minimum: (export_binary, 'min'),
tf.maximum: (export_binary, 'max'),
tf.assign: (export_assign, 'update'),
tf_ops.add: (export_binary, 'add'),
tf_ops.sub: (export_binary, 'sub'),
tf_ops.mul: (export_binary, 'mul'),
tf_ops.div: (export_binary, 'div'),
tf_ops.real_div: (export_binary, 'div'),
tf_ops._pow: (export_binary, 'pow'),
tf_ops.logical_and: (export_binary, 'and'),
tf_ops.logical_or: (export_binary, 'or'),
tf_ops.neg: (export_unary, 'neg'),
tf_ops.reciprocal: (export_unary, 'rcp'),
tf_ops.logical_not: (export_unary, 'not'),
tf_ops._abs: (export_unary, 'abs'),
tf_ops.sign: (export_unary, 'sign'),
tf_ops.exp: (export_unary, 'exp'),
tf_ops.log: (export_unary, 'log'),
tf_ops.square: (export_unary, 'sqr'),
tf_ops.floor: (export_unary, 'floor'),
tf_ops.ceil: (export_unary, 'ceil'),
tf_ops.round: (export_unary, 'round'),
tf_ops.greater: (export_binary, 'gt'),
tf_ops.greater_equal: (export_binary, 'ge'),
tf_ops.less: (export_binary, 'lt'),
tf_ops.less_equal: (export_binary, 'le'),
tf_ops.equal: (export_binary, 'eq'),
tf_ops.not_equal: (export_binary, 'ne'),
tf_ops.sqrt: (export_unary, 'sqrt'),
tf_ops.rsqrt: (export_unary, 'rsqrt'),
tf.sigmoid: (export_unary, 'sigmoid'),
tf.tanh: (export_unary, 'tanh'),
tf.reduce_sum: (export_reduce, 'sum_reduce'),
tf.reduce_mean: (export_reduce, 'mean_reduce'),
tf.reduce_max: (export_reduce, 'max_reduce'),
tf.matmul: (export_matmul, 'matmul'),
tf.add_n: (export_add_n, 'add_n'),
tf.nn.sigmoid: (export_unary, 'sigmoid'),
tf.nn.tanh: (export_unary, 'tanh'),
tf.nn.elu: (export_activation, 'elu'),
tf.nn.relu: (export_activation, 'relu'),
tf.nn.softsign: (export_activation, 'softsign'),
tf.nn.softplus: (export_activation, 'softplus'),
tf.nn.conv1d: (export_conv, 'conv'),
tf.nn.conv2d: (export_conv, 'conv'),
tf.nn.conv3d: (export_conv, 'conv'),
tf.nn.convolution: (export_convolution, 'conv'),
tf.nn.conv2d_transpose: (export_conv, 'deconv'),
tf.nn.conv3d_transpose: (export_conv, 'deconv'),
tf.nn.depthwise_conv2d: (export_conv, 'planewise_conv'),
tf.nn.depthwise_conv2d_native: (export_conv, 'planewise_conv'),
tf.nn.separable_conv2d: (export_separable_conv, 'conv'),
tf.nn.max_pool: (export_pool, 'max_pool'),
tf.nn.max_pool_with_argmax: (export_pool, 'max_pool_with_indices'),
tf.nn.avg_pool: (export_pool, 'avg_pool'),
tf.nn.dropout: (export_passthrough, 'x'),
tf.nn.bias_add: (export_bias_add, 'add'),
tf.nn.lrn: (export_lrn, 'local_response_normalization'),
tf.nn.local_response_normalization: (export_lrn, 'local_response_normalization'),
tf.nn.batch_normalization: (export_batch_normalization, 'batch_normalization'),
tf.nn.fused_batch_norm: (export_batch_normalization, 'batch_normalization'),
tf.nn.l2_normalize: (export_l2_normalization, 'l2_normalization'),
tf.nn.softmax: (export_softmax, 'softmax'),
tf.nn.moments: (export_moments, 'moments'),
tf.image.resize_images: export_resize_images,
tf.image.resize_bilinear: export_resize_bilinear,
tf.image.resize_nearest_neighbor: export_resize_nearest,
tf.image.resize_bicubic: export_resize_bicubic,
tf.image.resize_area: export_resize_area,
tf.space_to_batch: (export_passthrough, 'input'),
tf.space_to_batch_nd: (export_passthrough, 'input'),
tf.batch_to_space: export_skip,
tf.batch_to_space_nd: export_skip,
tf_layers.softmax: (export_softmax, 'softmax'),
tf_layers.flatten: (export_flatten, 'reshape'),
}
if tf_version_greater(1,3):
DefaultExporters.update(
{
tf.sinh: (export_unary, 'sinh'),
tf.cosh: (export_unary, 'cosh')
})
def unrolled_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=tf.float32, scope=None):
if sequence_length is None:
sequence_length = tf.constant(shape=[inputs.shape[0]], value=[float(inputs.shape[1])], dtype=tf.float32)
split_inputs = tf.split(inputs, axis=1, num_or_size_splits=inputs.shape[1])
if initial_state is not None:
c, h = initial_state
else:
c = tf.zeros(shape=[inputs.shape[0], inputs.shape[2]], dtype=dtype)
h = tf.zeros(shape=[inputs.shape[0], inputs.shape[2]], dtype=dtype)
_c = c
_h = h
output_list = []
with tf.variable_scope(scope or "rnn"):
for index, input in enumerate(split_inputs):
output, (c, h) = cell(tf.squeeze(input, axis=[1]), (c, h))
output_list.append(output)
condition = tf.equal(sequence_length, index + 1)
_c = tf.where(condition, c, _c)
_h = tf.where(condition, h, _h)
outputs = tf.concat(output_list, axis=1)
return outputs, (_c, _h)
def trace_invocations(func, functions, handler=None):
systrace = sys.gettrace()
trace = InvocationTrace(functions, handler)
sys.settrace(trace)
results = func()
sys.settrace(systrace)
if isinstance(results, (tf.Tensor, tf.Variable)):
outputs = { 'output': results }
elif isinstance(results, (list, tuple)):
outputs = {}
for i, result in enumerate(results):
if isinstance(result, (tf.Tensor, tf.Variable)):
outputs['output' + str(i+1)] = result
elif isinstance(results, dict):
outputs = {}
for name, result in results.items():
if isinstance(result, (tf.Tensor, tf.Variable)):
outputs[name] = result
return trace.invocations, outputs
def enumerate_dependencies(dependencies, targets, exclusions):
q = Queue()
s = set()
def insert(tensor,func,stack):
if tensor not in s:
if isinstance(tensor, tf.Variable):
tensor = tensor.value()
q.put((tensor,func,stack))
s.add(tensor)
for target in targets:
insert(target,None,None)
while not q.empty():
tensor, func, stack = q.get()
invocation = dependencies.get(tensor)
if invocation is None:
if func:
op = func.__module__ + '.' + func.__name__
print_error("tensor '{}' used by operation {} is not the result of any exported operation".format(tensor.name, op), stack)
else:
print_error("output tensor '{}' is not the result of any exported operation".format(tensor.name), stack)
continue
func, args, results, stack = invocation
exc = exclusions.get(func)
for key, arg in args.items():
if exc is not None and key in exc:
continue
if isinstance(arg, (list, tuple)):
for a in arg:
if isinstance(a, (tf.Tensor, tf.Variable)):
insert(a, func, stack)
elif isinstance(arg, (tf.Tensor, tf.Variable)):
insert(arg, func, stack)
return s
def write_nnef_version(file, major, minor):
np.asarray([0x4E, 0xEF], dtype=np.uint8).tofile(file)
np.asarray([major,minor], dtype=np.uint8).tofile(file)
def write_nnef_hdrlen(file, rank, quantization=''):
length = 4 + 4 + (rank + 1) * 4 + 4 + len(quantization)
np.asarray([length], dtype=np.uint32).tofile(file)
def write_nnef_tensor_shape(file, shape):
np.asarray([len(shape)], dtype=np.uint32).tofile(file)
np.asarray(shape, dtype=np.uint32).tofile(file)
def write_nnef_tensor_dtype(file, bits, quantization=''):
quantized = 1 if quantization != '' else 0
np.asarray([quantized,bits], dtype=np.uint8).tofile(file)
np.asarray([len(quantization)], dtype=np.uint16).tofile(file)
if quantization is not None:
file.write(quantization)
def write_nnef_tensor(filename, tensor, is_filter, is_broadcast):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filename, "w") as file:
tensor = TF2NNEFConverter.nnef_tensor_shuffle_dims(tensor, is_filter, is_broadcast)
write_nnef_version(file, major=1, minor=0)
write_nnef_hdrlen(file, len(tensor.shape))
write_nnef_tensor_shape(file, tensor.shape)
write_nnef_tensor_dtype(file, bits=32)
tensor.astype(np.float32).tofile(file)
def export_network(net_func, checkpoint=None, custom_exporters={}, custom_fragments='', output_path=None, compress=False, verbose=True, unroll_rnn=True):
if unroll_rnn:
dynamic_rnn_func = tf.nn.dynamic_rnn
tf.nn.dynamic_rnn = unrolled_rnn
exporters = DefaultExporters
if custom_exporters:
exporters.update(custom_exporters)
if output_path is None and checkpoint is not None:
output_path = os.path.splitext(checkpoint)[0] + '-nnef'
if checkpoint is not None and not os.path.exists(output_path):
os.makedirs(output_path)
def trace_handler(func, args, results, stack):
if func == tf.Variable or func == tf.get_variable:
name = args['name']
if name is not None:
scope = tf.get_variable_scope().name
args['name'] = scope + '/' + name if len(scope) != 0 else name
if verbose:
sys.stdout.write("Tracing invocations...")
sys.stdout.flush()
invocations, outputs = trace_invocations(net_func, exporters.keys(), trace_handler)
if verbose:
sys.stdout.write(" done\n")
sys.stdout.write("Tracing dependencies...")
sys.stdout.flush()
dependencies = {}
for invocation in invocations:
results = invocation[2]
for result in results:
if isinstance(result, (list, tuple)):
for tensor in result:
tensor = tensor.value() if isinstance(tensor, tf.Variable) else tensor
if tensor not in dependencies:
dependencies[tensor] = invocation
else:
tensor = result.value() if isinstance(result, tf.Variable) else result
if tensor not in dependencies:
dependencies[tensor] = invocation
exclusions =\
{
tf.nn.conv2d_transpose: ['output_shape'],
tf.nn.conv3d_transpose: ['output_shape']
}
accessible = enumerate_dependencies(dependencies, outputs.values(), exclusions)
if verbose:
sys.stdout.write(" done\n")
reader = tf.contrib.framework.load_checkpoint(checkpoint) if checkpoint is not None else None
converter = TF2NNEFConverter(dependencies, exporters, reader, output_path)
def has_accessible_result(results):
for result in results:
if isinstance(result, (list, tuple)):
for r in result:
if (r.value() if isinstance(r, tf.Variable) else r) in accessible:
return True
elif (result.value() if isinstance(result, tf.Variable) else result) in accessible:
return True
return False
def all_results_fused(results):
for result in results:
if isinstance(result, (list, tuple)):
for r in result:
if not converter.is_fused(r.value() if isinstance(r, tf.Variable) else r):
return False
elif not converter.is_fused(result.value() if isinstance(result, tf.Variable) else result):
return False
return True
if verbose:
sys.stdout.write("Exporting invocations...")
if checkpoint is None:
sys.stdout.write('\n')
sys.stdout.flush()
params = []
operations = []
returns = []
for name, tensor in sorted(outputs.items()):
returns.append(converter.make_tensor(tensor, name, indexed=False))
for invocation in invocations:
func, args, results, stack = invocation
if has_accessible_result(results) and not all_results_fused(results):
item = exporters.get(func)
if item is not None:
exporter = item[0] if isinstance(item, tuple) else item
text = exporter(func, args, results, stack, converter)
if text is not None:
operations.append(text)
if func == tf.placeholder:
params.append(text[:text.find(' ')])
file = open(output_path + '/graph.nnef', 'w') if checkpoint is not None else sys.stdout
file.write('version 1.0\n\n')
if len(custom_fragments):
file.write(custom_fragments + '\n')
file.write('graph ')
file.write(net_func.__name__)
file.write('( ')
for i in range(len(params)):
if i > 0:
file.write(', ')
file.write(params[i])
file.write(' )')
file.write(' -> ')
file.write('( ')
for i in range(len(returns)):
if i > 0:
file.write(', ')
file.write(returns[i])
file.write(' )\n')
file.write('{\n')
for line in operations:
file.write('\t' + line + ';\n')
file.write('}\n')
if verbose and file != sys.stdout:
sys.stdout.write(" done\n")
if compress and file != sys.stdout:
if verbose and file != sys.stdout:
sys.stdout.write("Compressing files...")
filename = output_path + '.tgz'
tar = tarfile.open(filename, 'w:gz')
for file in os.listdir(output_path):
tar.add(output_path + '/' + file, file)
tar.close()
shutil.rmtree(output_path)
if verbose and file != sys.stdout:
sys.stdout.write(" done")
if unroll_rnn:
tf.nn.dynamic_rnn = dynamic_rnn_func
return converter
def export_activations(converter, checkpoint, feed_dict, output_path=None, evaluate_count_per_iter=25, verbose=True):
path = output_path if output_path is not None else os.path.splitext(checkpoint)[0] + '-activations'
if not os.path.exists(path):
os.makedirs(path)
if verbose:
sys.stdout.write('Evaluating activations..\n')
sys.stdout.flush()
activations = converter.activations
with tf.Session() as sess:
saver = tf.train.Saver()
graph = tf.get_default_graph()
total = 0
for k,v in activations:
if graph.is_fetchable(k) and isinstance(k, tf.Tensor):
total += 1
next = 0
evaluated = 0
while next < len(activations):
tensors = {}
while next < len(activations) and len(tensors) < evaluate_count_per_iter:
k,v = activations[next]
if graph.is_fetchable(k) and isinstance(k, tf.Tensor):
tensors[v] = k
evaluated += 1
next += 1
saver.restore(sess, checkpoint)
values = sess.run(tensors, feed_dict)
if verbose:
sys.stdout.write("Evaluated {}/{}\n".format(evaluated, total))
sys.stdout.flush()
for k, v in values.items():
filename = path + '/' + k + '.dat'
write_nnef_tensor(filename, v, is_filter=False, is_broadcast=converter.is_broadcast(tensors[k]))
|
{"hexsha": "6dba4ecf9df8b917131dad5bcd0356a37b65271b", "size": 52508, "ext": "py", "lang": "Python", "max_stars_repo_path": "tutorial_exercises/vgg16-nnef-openvx/tf2nnef.py", "max_stars_repo_name": "relrotciv/openvx_tutorial", "max_stars_repo_head_hexsha": "0e08191776e5c8012f72f63137dc370534e673ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 220, "max_stars_repo_stars_event_min_datetime": "2016-03-20T00:48:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T09:46:21.000Z", "max_issues_repo_path": "tutorial_exercises/vgg16-nnef-openvx/tf2nnef.py", "max_issues_repo_name": "relrotciv/openvx_tutorial", "max_issues_repo_head_hexsha": "0e08191776e5c8012f72f63137dc370534e673ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2016-06-16T19:17:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-16T16:19:18.000Z", "max_forks_repo_path": "tutorial_exercises/vgg16-nnef-openvx/tf2nnef.py", "max_forks_repo_name": "relrotciv/openvx_tutorial", "max_forks_repo_head_hexsha": "0e08191776e5c8012f72f63137dc370534e673ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 84, "max_forks_repo_forks_event_min_datetime": "2016-03-24T01:13:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T04:37:03.000Z", "avg_line_length": 35.5023664638, "max_line_length": 153, "alphanum_fraction": 0.6215052944, "include": true, "reason": "import numpy", "num_tokens": 12484}
|
[STATEMENT]
lemma generalized_sfwSomeD: "generalized_sfw fw p = Some (r,d) \<Longrightarrow> (r,d) \<in> set fw \<and> simple_matches r p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. generalized_sfw fw p = Some (r, d) \<Longrightarrow> (r, d) \<in> set fw \<and> simple_matches r p
[PROOF STEP]
unfolding generalized_sfw_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. find (\<lambda>(m, a). simple_matches m p) fw = Some (r, d) \<Longrightarrow> (r, d) \<in> set fw \<and> simple_matches r p
[PROOF STEP]
by(induction fw) (simp split: if_split_asm)+
|
{"llama_tokens": 225, "file": "Simple_Firewall_Generic_SimpleFw", "length": 2}
|
using EzXML
function getxml(; from="", until="")
baseuri = "http://export.arxiv.org/oai2?verb=ListRecords"
uri = "$baseuri&metadataPrefix=arXiv"
if !isempty(from)
@assert !isempty(until)
uri = "$uri&from=$from&until=$until"
end
while true
xml = readxml(download(uri))
nodes = find(xml, "//resumptionToken")
isempty(nodes) && break
token = nodecontent(nodes[1])
uri = "$baseuri&resumptionToken=$token"
end
end
function readxml(path::String)
lines = open(readlines, path)
@assert startswith(lines[2],"<OAI-PMH")
lines[2] = "<OAI-PMH>"
for i = 1:length(lines)
startswith(lines[i]," <arXiv") && (lines[i] = " <arXiv>")
end
parsexml(join(lines,"\n"))
end
|
{"hexsha": "7e61c70cee25fd2bb0d9f22f4d2c057e28d7d317", "size": 765, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/download.jl", "max_stars_repo_name": "hshindo/ArXivTools.jl", "max_stars_repo_head_hexsha": "c5c7d4e0b44291a07691e760e4d8a2c39b9dd127", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/download.jl", "max_issues_repo_name": "hshindo/ArXivTools.jl", "max_issues_repo_head_hexsha": "c5c7d4e0b44291a07691e760e4d8a2c39b9dd127", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/download.jl", "max_forks_repo_name": "hshindo/ArXivTools.jl", "max_forks_repo_head_hexsha": "c5c7d4e0b44291a07691e760e4d8a2c39b9dd127", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3793103448, "max_line_length": 65, "alphanum_fraction": 0.5947712418, "num_tokens": 224}
|
#include "modelvisu.h"
#ifndef GLM_ENABLE_EXPERIMENTAL
#define GLM_ENABLE_EXPERIMENTAL
#endif
#include <glm/gtx/euler_angles.hpp>
#include "constants.h"
#include "recenter.h"
#include <iostream>
// eigen stuf
#include <Eigen/Dense>
#include <Eigen/Eigenvalues>
#include "barycenter.h"
ModelVisu::ModelVisu( QWidget *parent )
: QOpenGLWidget( parent )
{
int major = 3;
int minor = 2;
QSurfaceFormat format;
format.setDepthBufferSize( 24 );
format.setStencilBufferSize( 8 );
format.setVersion( major, minor );
format.setProfile( QSurfaceFormat::CoreProfile );
setFormat( format );
create();
}
void ModelVisu::initializeGL()
{
glewExperimental = GL_TRUE;
GLenum initGlew{glewInit()};
if ( initGlew != GLEW_OK )
{
throw std::runtime_error(
reinterpret_cast<const char *>( glewGetErrorString( initGlew ) ) );
}
simpleShader_.setVertexShader( "shader/color.vert" );
simpleShader_.setFragmentShader( "shader/color.frag" );
try
{
simpleShader_.Load();
}
catch ( std::exception const &e )
{
std::cerr << e.what() << "\n";
}
catch ( std::string const &errorStr )
{
std::cerr << errorStr << "\n";
}
catch ( ... )
{
std::cerr << "unknown exception\n";
}
glClearColor( .0f, 0.f, 0.f, .0f );
computeCloudAndPca_();
#if 0
ellipsoid.setScale( glm::vec3{2.f, 1.f, 3.f} );
ellipsoid.setCenter( glm::vec3{-5.f, 0.f, 0.f} );
ellipsoid.setOrientation( transformation_.rotate( glm::vec3{0.f, 1.f, 0.f}, M_PI / 6.f ) );
cloud_ = ellipsoid.computeTransform();
cloud_.generateCloud();
#endif
look_ = transformation_.lookAt( glm::vec3{0.f, -10.f, 0.f}, glm::vec3{0.f, 0.f, 0.f},
glm::vec3{0.f, 0.f, 1.f} );
#if 0
cloudNode_ = std::make_shared<MeshNode<Nuage>>( cloud_ );
cloud_.refreshBuffer();
cloudNode_->updateVertexBuffer();
pca.computePca( std::cbegin( cloud_ ), std::cend( cloud_ ) );
std::cout << "Eigen values are: \n";
auto const &eigenValues = pca.eigenValues();
std::cout << eigenValues << "\n";
std::cout << "Eigen vectors are: \n";
auto const &eigenVector = pca.eigenVectors();
std::cout << eigenVector << "\n";
#endif
}
void ModelVisu::setCenterX( double x )
{
center_[ dirX ] = static_cast<float>( x );
}
void ModelVisu::setCenterY( double y )
{
center_[ dirY ] = static_cast<float>( y );
}
void ModelVisu::setCenterZ( double z )
{
center_[ dirZ ] = static_cast<float>( z );
}
void ModelVisu::setScaleX( double a )
{
scale_[ dirX ] = 2.f * a;
}
void ModelVisu::setScaleY( double b )
{
scale_[ dirY ] = 2.f * b;
}
void ModelVisu::setScaleZ( double c )
{
scale_[ dirZ ] = 2.f * c;
}
void ModelVisu::setRotateAxisX( double x )
{
rotateAxis_[ dirX ] = static_cast<float>( x );
}
void ModelVisu::setRotateAxisY( double y )
{
rotateAxis_[ dirY ] = static_cast<float>( y );
}
void ModelVisu::setRotateAxisZ( double z )
{
rotateAxis_[ dirZ ] = static_cast<float>( z );
}
void ModelVisu::setRotateAngle( double theta )
{
rotateAngle_ = static_cast<float>( theta );
}
void ModelVisu::setNpoints( int nPoint )
{
nPoints_ = nPoint;
}
void ModelVisu::setDmin( double dmin )
{
dmin_ = static_cast<float>( dmin );
}
void ModelVisu::computeCloud()
{
makeCurrent();
computeCloudAndPca_();
doneCurrent();
update();
}
void ModelVisu::setCamPos1X( double x )
{
pos1_[ dirX ] = static_cast<float>( x );
}
void ModelVisu::setCamPos1Y( double y )
{
pos1_[ dirY ] = static_cast<float>( y );
}
void ModelVisu::setCamPos1Z( double z )
{
pos1_[ dirZ ] = static_cast<float>( z );
}
void ModelVisu::setCamTarget1X( double x )
{
target1_[ dirX ] = static_cast<float>( x );
}
void ModelVisu::setCamTarget1Y( double y )
{
target1_[ dirY ] = static_cast<float>( y );
}
void ModelVisu::setCamTarget1Z( double z )
{
target1_[ dirZ ] = static_cast<float>( z );
}
void ModelVisu::setCamUp1X( double x )
{
up1_[ dirX ] = static_cast<float>( x );
}
void ModelVisu::setCamUp1Y( double y )
{
up1_[ dirY ] = static_cast<float>( y );
}
void ModelVisu::setCamUp1Z( double z )
{
up1_[ dirZ ] = static_cast<float>( z );
}
void ModelVisu::computeCam1()
{
look1_ = transformation_.lookAt( pos1_, target1_, up1_ );
}
void ModelVisu::showCam1()
{
look_ = look1_;
update();
}
void ModelVisu::computeCloudAndPca_()
{
ellipsoid.setScale( scale_ );
ellipsoid.setCenter( center_ );
ellipsoid.setOrientation(
transformation_.rotate( glm::normalize( rotateAxis_ ), rotateAngle_ ) );
ellipsoid.computeCloud( nPoints_, dmin_ );
cloud_ = ellipsoid.computeTransform();
cloud_.generateCloud();
cloudNode_ = std::make_shared<MeshNode<Nuage>>( cloud_ );
cloud_.refreshBuffer();
cloudNode_->updateVertexBuffer();
pca.computePca( std::cbegin( cloud_ ), std::cend( cloud_ ) );
std::cout << "Eigen values are: \n";
auto const &eigenValues = pca.eigenValues();
std::cout << eigenValues << "\n";
std::cout << "Eigen vectors are: \n";
auto const &eigenVector = pca.eigenVectors();
std::cout << eigenVector << "\n";
}
void ModelVisu::resizeGL( int width, int height )
{
//
float near = 0.01;
float far = 100;
float fov = 70.;
projection_
= transformation_.perspective( fov, static_cast<float>( width ) / height, near, far );
}
void ModelVisu::paintGL()
{
//
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glEnable( GL_DEPTH_TEST );
simpleShader_.Enable();
glm::mat4 mvp = projection_ * look_;
auto mvpLoc = simpleShader_.GetUniformLocation( "MVP" );
glUniformMatrix4fv( mvpLoc, 1, GL_FALSE, glm::value_ptr( mvp ) );
cloudNode_->drawPoints();
simpleShader_.Disable();
}
|
{"hexsha": "7f57434df4b79bd1563a937c10a22dea64142916", "size": 5889, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/gui/modelvisu.cpp", "max_stars_repo_name": "fossabot/datura", "max_stars_repo_head_hexsha": "d8a09c4d5ae13a6984a5a8e89c69ecb8a6023037", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/gui/modelvisu.cpp", "max_issues_repo_name": "fossabot/datura", "max_issues_repo_head_hexsha": "d8a09c4d5ae13a6984a5a8e89c69ecb8a6023037", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-02-12T13:12:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-12T13:12:00.000Z", "max_forks_repo_path": "src/gui/modelvisu.cpp", "max_forks_repo_name": "fossabot/datura", "max_forks_repo_head_hexsha": "d8a09c4d5ae13a6984a5a8e89c69ecb8a6023037", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-02-12T13:10:06.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-12T13:10:06.000Z", "avg_line_length": 20.4479166667, "max_line_length": 95, "alphanum_fraction": 0.6303277297, "num_tokens": 1729}
|
import os
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from collections import OrderedDict
from tensorflow.python.keras.models import load_model
from pkg_resources import resource_filename
from transomaly.prepare_input import PrepareInputArrays
from transomaly.loss_functions import mean_squared_error, chisquare_loss, mean_squared_error_over_error
matplotlib.use('TkAgg')
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
COLPB = {'g': 'tab:green', 'r': 'tab:red'}
MARKPB = {'g': 'o', 'r': 's', 'z': 'd'}
ALPHAPB = {'g': 0.3, 'r': 1., 'z': 1}
CLASS_COLOR = {'SNIa-norm': 'tab:green', 'SNIbc': 'tab:orange', 'SNII': 'tab:blue', 'SNIIn': 'blue',
'SNIa-91bg': 'tab:red', 'SNIa-x': 'bisque', 'point-Ia': 'tab:brown', 'Kilonova': '#aaffc3',
'SLSN-I': 'tab:olive', 'PISN': 'tab:cyan', 'ILOT': '#FF1493', 'CART': 'navy', 'TDE': 'tab:pink',
'AGN': 'tab:purple'}
# npred = 7
# model_filepath_onepoint_infuture = "/Users/danmuth/OneDrive - University of Cambridge/PycharmProjects/transomaly/plots/model__ci()_ns1_c(1,)/keras_model_epochs300_onepoint_pred7timesteps_infuture_normalised_predict_last49_timesteps_nodropout_100lstmneurons/keras_model_epochs300_onepoint_pred7timesteps_infuture_normalised_predict_last49_timesteps_nodropout_100lstmneurons.hdf5"
model_filepath = "/Users/danmuth/OneDrive - University of Cambridge/PycharmProjects/transomaly/plots/model__ci()_ns1_c(1,)/keras_model_epochs500_pred2timesteps_normalised_predict_last49_timesteps_nodropout_100lstmneurons/keras_model_epochs500_pred2timesteps_normalised_predict_last49_timesteps_nodropout_100lstmneurons.hdf5"
model = load_model(model_filepath, custom_objects={'loss': mean_squared_error()})
passbands = ('g','r')
contextual_info = ()
X = np.array([np.array([[0.09158034, 0.07176773],
[0.09008677, 0.07137485],
[0.08917016, 0.0727186 ],
[0.09029362, 0.07340094],
[0.08999084, 0.07224263],
[0.08900606, 0.07197019],
[0.08739904, 0.07095805],
[0.08682939, 0.07071227],
[0.08567506, 0.07031706],
[0.08386102, 0.07021409],
[0.0836062 , 0.0698832 ],
[0.08332578, 0.07050169],
[0.08397429, 0.06937913],
[0.0860095 , 0.0716971 ],
[0.09076107, 0.07780995],
[0.09871331, 0.08934081],
[0.11663096, 0.11012718],
[0.15700709, 0.15327507],
[0.23417453, 0.22872161],
[0.35862168, 0.33963544],
[0.51792838, 0.48456903],
[0.68616083, 0.64401235],
[0.81866526, 0.79203408],
[0.90768435, 0.89154676],
[0.93726188, 0.94250969],
[0.92312219, 0.95238174],
[0.85896026, 0.93542825],
[0.74499556, 0.87812468],
[0.62114839, 0.78786493],
[0.50529031, 0.6932507 ],
[0.40645818, 0.6036984 ],
[0.32993884, 0.52479434],
[0.27163636, 0.45807863],
[0.22780797, 0.40140124],
[0.19817062, 0.35339631],
[0.17674938, 0.31370617],
[0.16142444, 0.27973599],
[0.14923072, 0.25118097],
[0.13910386, 0.22674725],
[0.13288632, 0.20717057],
[0.12727512, 0.191151 ],
[0.12253798, 0.17674126],
[0.11748478, 0.16368262],
[0.11428479, 0.15247652],
[0.110435 , 0.14285344],
[0.10726069, 0.13410306],
[0.10317767, 0.12546451],
[0.10019489, 0.11729226],
[0.09654251, 0.11025106]])])
y = np.array([np.array([[0.09008677, 0.07137485],
[0.08917016, 0.0727186 ],
[0.09029362, 0.07340094],
[0.08999084, 0.07224263],
[0.08900606, 0.07197019],
[0.08739904, 0.07095805],
[0.08682939, 0.07071227],
[0.08567506, 0.07031706],
[0.08386102, 0.07021409],
[0.0836062 , 0.0698832 ],
[0.08332578, 0.07050169],
[0.08397429, 0.06937913],
[0.0860095 , 0.0716971 ],
[0.09076107, 0.07780995],
[0.09871331, 0.08934081],
[0.11663096, 0.11012718],
[0.15700709, 0.15327507],
[0.23417453, 0.22872161],
[0.35862168, 0.33963544],
[0.51792838, 0.48456903],
[0.68616083, 0.64401235],
[0.81866526, 0.79203408],
[0.90768435, 0.89154676],
[0.93726188, 0.94250969],
[0.92312219, 0.95238174],
[0.85896026, 0.93542825],
[0.74499556, 0.87812468],
[0.62114839, 0.78786493],
[0.50529031, 0.6932507 ],
[0.40645818, 0.6036984 ],
[0.32993884, 0.52479434],
[0.27163636, 0.45807863],
[0.22780797, 0.40140124],
[0.19817062, 0.35339631],
[0.17674938, 0.31370617],
[0.16142444, 0.27973599],
[0.14923072, 0.25118097],
[0.13910386, 0.22674725],
[0.13288632, 0.20717057],
[0.12727512, 0.191151 ],
[0.12253798, 0.17674126],
[0.11748478, 0.16368262],
[0.11428479, 0.15247652],
[0.110435 , 0.14285344],
[0.10726069, 0.13410306],
[0.10317767, 0.12546451],
[0.10019489, 0.11729226],
[0.09654251, 0.11025106],
[0.09630747, 0.10437187]])])
timesX = np.array([np.array([-70., -67., -64., -61., -58., -55., -52., -49., -46., -43., -40.,
-37., -34., -31., -28., -25., -22., -19., -16., -13., -10., -7.,
-4., -1., 2., 5., 8., 11., 14., 17., 20., 23., 26.,
29., 32., 35., 38., 41., 44., 47., 50., 53., 56., 59.,
62., 65., 68., 71., 74., 77.])])
objids = np.array(['median_Ia'])
X = np.array([np.array([[0.00448964, 0. ],
[0. , 0.02469979],
[0.00713832, 0.0510595 ],
[0.02991802, 0.08449011],
[0.05011106, 0.09195332],
[0.05702283, 0.07957474],
[0.05513788, 0.05611958],
[0.04902122, 0.0294538 ],
[0.04288751, 0.00993177],
[0.04127183, 0.01818543],
[0.05353569, 0.04212558],
[0.06324364, 0.06195425],
[0.05741975, 0.07571231],
[0.03920042, 0.08415738],
[0.02232076, 0.08873083],
[0.01912765, 0.08509039],
[0.03159054, 0.07017496],
[0.04909139, 0.05580469],
[0.06360378, 0.05487265],
[0.08042658, 0.08175915],
[0.14612581, 0.15545786],
[0.32463802, 0.29112003],
[0.5616513 , 0.4847463 ],
[0.78721409, 0.70134517],
[0.94289831, 0.88820695],
[1. , 0.98685417],
[0.9617599 , 1. ],
[0.84395464, 0.95538324],
[0.70269411, 0.87417796],
[0.56370832, 0.77777756],
[0.43422203, 0.6788543 ],
[0.31954263, 0.58431753],
[0.22572829, 0.5003608 ],
[0.15997532, 0.43230079],
[0.13003542, 0.3766077 ],
[0.11756719, 0.32962512],
[0.1001643 , 0.29821954],
[0.08371692, 0.28562479],
[0.07356015, 0.27645137],
[0.06571214, 0.2572178 ],
[0.05947788, 0.23700196],
[0.05453582, 0.21703187],
[0.05062684, 0.19788037],
[0.04754242, 0.17990707],
[0.04511494, 0.16331596],
[0.04320991, 0.14819947],
[0.04171951, 0.1345717 ],
[0.04055748, 0.12239351],
[0.03965491, 0.11159111]])])
y = np.array([np.array([[0. , 0.02469979],
[0.00713832, 0.0510595 ],
[0.02991802, 0.08449011],
[0.05011106, 0.09195332],
[0.05702283, 0.07957474],
[0.05513788, 0.05611958],
[0.04902122, 0.0294538 ],
[0.04288751, 0.00993177],
[0.04127183, 0.01818543],
[0.05353569, 0.04212558],
[0.06324364, 0.06195425],
[0.05741975, 0.07571231],
[0.03920042, 0.08415738],
[0.02232076, 0.08873083],
[0.01912765, 0.08509039],
[0.03159054, 0.07017496],
[0.04909139, 0.05580469],
[0.06360378, 0.05487265],
[0.08042658, 0.08175915],
[0.14612581, 0.15545786],
[0.32463802, 0.29112003],
[0.5616513 , 0.4847463 ],
[0.78721409, 0.70134517],
[0.94289831, 0.88820695],
[1. , 0.98685417],
[0.9617599 , 1. ],
[0.84395464, 0.95538324],
[0.70269411, 0.87417796],
[0.56370832, 0.77777756],
[0.43422203, 0.6788543 ],
[0.31954263, 0.58431753],
[0.22572829, 0.5003608 ],
[0.15997532, 0.43230079],
[0.13003542, 0.3766077 ],
[0.11756719, 0.32962512],
[0.1001643 , 0.29821954],
[0.08371692, 0.28562479],
[0.07356015, 0.27645137],
[0.06571214, 0.2572178 ],
[0.05947788, 0.23700196],
[0.05453582, 0.21703187],
[0.05062684, 0.19788037],
[0.04754242, 0.17990707],
[0.04511494, 0.16331596],
[0.04320991, 0.14819947],
[0.04171951, 0.1345717 ],
[0.04055748, 0.12239351],
[0.03965491, 0.11159111],
[0.03895684, 0.10206975]])])
timesX = np.array([np.array([-70., -67., -64., -61., -58., -55., -52., -49., -46., -43., -40.,
-37., -34., -31., -28., -25., -22., -19., -16., -13., -10., -7.,
-4., -1., 2., 5., 8., 11., 14., 17., 20., 23., 26.,
29., 32., 35., 38., 41., 44., 47., 50., 53., 56., 59.,
62., 65., 68., 71., 74., 77.])])
objids = np.array(['1_6222236'])
# X[0,:,1][25:] = 0
# X[0,:,0][25:] = 0
# y[0,:,1][24:] = 0
# y[0,:,0][24:] = 0
# X[0,:,2] = 0.04
npred = 2
tidx = -npred+1 if npred != 1 else None
y_pred = model.predict(X[:,:tidx,:2])
idx = 0
sidx = 0
for p in range(50-npred):
# y_pred_correct_format = np.zeros(y.shape)
# y_pred_correct_format[:, :, 0] = np.copy(y_predict[:, p, :49])
# y_pred_correct_format[:, :, 1] = np.copy(y_predict[:, p, :])
fig, (ax1) = plt.subplots(nrows=1, ncols=1)
for pbidx, pb in enumerate(passbands):
plotlabeltest = "ytest:{}".format(pb)
plotlabelpred = "ypred:{}".format(pb)
marker = None # MARKPB[pb] if s == 0 else None
ax1.plot(timesX[sidx][1:], y[sidx][:, pbidx], c=COLPB[pb], lw=1,
label=plotlabeltest, marker=None, markersize=10, alpha=1, linestyle='-')
ax1.plot(timesX[sidx][1:tidx], y_pred[sidx][:, pbidx+2], c=COLPB[pb], lw=1,
label=plotlabelpred, marker=None, markersize=10, alpha=1, linestyle=':')
# ax1.plot(timesX[sidx][(1+p):(1+p+npred)], y_pred[sidx][p][pbidx::2], c=COLPB[pb], lw=1,
# label=plotlabelpred, marker=None, markersize=10, alpha=1, linestyle=':')
ax1.axvspan(timesX[sidx][1+p], timesX[sidx][-1], alpha=0.2, color='grey')
# pbmask = lc['passband'] == pb
# sortedidx = np.argsort(lc[pbmask]['time'].data)
# time = lc[pbmask]['time'].data[sortedidx]
# flux = lc[pbmask]['flux'].data[sortedidx]
# fluxerr = lc[pbmask]['fluxErr'].data[sortedidx]
# ax1.errorbar(time, flux, yerr=fluxerr,
# fmt=".", capsize=0, color=COLPB[pb], label='_nolegend_')
ax1.set_ylabel("Relative flux")
ax1.set_xlabel("Time since trigger [days]")
ax1.legend()
figdir = 'interesting plots examining what RNN has learned/predict_npred_timesteps_ateachstep/'
plt.savefig(figdir+f'predict-{npred}-timesteps-{objids[0]}_ateachstep_{p}.png')
# plt.show()
|
{"hexsha": "aed2e03907d2feb0e60a11d7939e84c63632febd", "size": 11229, "ext": "py", "lang": "Python", "max_stars_repo_path": "transomaly/predict_several_timesteps_at_each_timestep.py", "max_stars_repo_name": "daniel-muthukrishna/transomaly", "max_stars_repo_head_hexsha": "5ecccd958a11b9c13100190116a8e6ff5fff1fae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "transomaly/predict_several_timesteps_at_each_timestep.py", "max_issues_repo_name": "daniel-muthukrishna/transomaly", "max_issues_repo_head_hexsha": "5ecccd958a11b9c13100190116a8e6ff5fff1fae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "transomaly/predict_several_timesteps_at_each_timestep.py", "max_forks_repo_name": "daniel-muthukrishna/transomaly", "max_forks_repo_head_hexsha": "5ecccd958a11b9c13100190116a8e6ff5fff1fae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5551839465, "max_line_length": 380, "alphanum_fraction": 0.5638970523, "include": true, "reason": "import numpy", "num_tokens": 4560}
|
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from PyQt5.QtWidgets import *
import numpy as np
import cv2
import math
class Paint_CV:
def __init__(self):
pass
def Filter(self, image, flag, Ksize=None, depth=None, colspace=None, contrast=None, sharpen=None, bitLevel=None, customFilter=None):
if flag==3: #Gaussian Blur
image = cv2.GaussianBlur(image, (Ksize, Ksize), 0)
elif flag==4: #Median Blur
image = cv2.medianBlur(image, Ksize)
elif flag==5: #Average Blur
image = cv2.blur(image, (Ksize, Ksize))
elif flag==6: #Box Filter
image = cv2.boxFilter(image, 0, (Ksize, Ksize))
elif flag==7: #Bilateral Filter
image = cv2.bilateralFilter(image, depth, colspace, colspace)
elif flag==8:
image = cv2.addWeighted(image, contrast, np.zeros(image.shape, image.dtype), 0,0)
elif flag==9:
kernel = np.ones((Ksize, Ksize), np.float32) * (-1)
kernel[math.floor(Ksize / 2), math.floor(Ksize / 2)] = sharpen
image = cv2.filter2D(image, -1, kernel)
elif flag==10: #Emboss
filter = np.array([[0,1,0],[0,0,0],[0,-1,0]])
image = cv2.filter2D(image, -1, filter)
image += 128
elif flag==11: #Sepia
filter = np.array([[0.272, 0.534, 0.131], [0.349, 0.686, 0.168], [0.393, 0.769, 0.189]])
image = cv2.transform(image, filter)
elif flag==12: #Mexican
filter = np.array([[0,0,-1,0,0],[0,-1,-2,-1,0],[-1,-2,16,-2,-1],[0,-1,-2,-1,0],[0,0,-1,0,0]])
image = cv2.filter2D(image, -1, filter)
elif flag==13:
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
r, c = gray_img.shape
x = np.zeros((r, c, 8), dtype=np.uint8)
x[:, :, bitLevel] = 2 ** bitLevel
r = np.zeros((r, c, 8), dtype=np.uint8)
r[:, :, bitLevel] = cv2.bitwise_and(gray_img, x[:, :, bitLevel])
mask = r[:, :, bitLevel] > 0
r[mask] = 255
img = r[:, :, bitLevel]
image = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif flag==14:
filter = np.array(customFilter)
image = cv2.filter2D(image, -1, filter)
return image
def Histogram(self, image, type, flag):
if flag==1: #Equalize
img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])
image = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
elif flag==2:
img_lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
img_lab[:,:,0] = clahe.apply(img_lab[:,:,0])
image = cv2.cvtColor(img_lab, cv2.COLOR_LAB2BGR)
image = self.ConvertColor(type, image)
return image
def CropImage(self, image, coords):
return image[min(coords[1], coords[3]):max(coords[1], coords[3])+1, min(coords[0], coords[2]):max(coords[0], coords[2])+1]
def SaveImage(self, filename, image):
return cv2.imwrite(filename, image)
def LoadImage(self, filepath):
return cv2.imread(filepath)
def ResizeImage(self, image, dim):
return cv2.resize(image, (dim[0], dim[1]))
def ConvertColor(self, type, image):
if type==0:
return image
elif type==1:
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
elif type==2:
return cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif type==3:
return cv2.cvtColor(image, cv2.COLOR_RGB2HSV)[:,:,0]
elif type==4:
return cv2.cvtColor(image, cv2.COLOR_RGB2HSV)[:,:,1]
elif type==5:
return cv2.cvtColor(image, cv2.COLOR_RGB2HSV)[:,:,2]
elif type==6:
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif type==7:
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS)[:,:,1]
elif type == 8:
return cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
elif type == 9:
return cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif type == 10:
return cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
elif type == 11:
return cv2.cvtColor(image, cv2.COLOR_RGB2XYZ)
def OverlayImage(self, image, background, coords):
top, bottom, left, right = coords[1], coords[1] + image.shape[0], coords[0], coords[0] + image.shape[1]
if left>background.shape[1] or right<0 or top>background.shape[0] or bottom<0:
return background
if left<0:
image = self.CropImage(image, (abs(left), 0, image.shape[1], image.shape[0]))
left = 0
if right>background.shape[1]:
image = self.CropImage(image, (0, 0, image.shape[1]-(right-background.shape[1])-1, image.shape[0]))
right = background.shape[1]
if top<0:
image = self.CropImage(image, (0, abs(top), image.shape[1], image.shape[0]))
top = 0
if bottom>background.shape[0]:
image = self.CropImage(image, (0, 0, image.shape[1], image.shape[0]-(bottom-background.shape[0])-1))
bottom = background.shape[0]
background[top:bottom, left:right] = image
return background
def RotateImage(self, image, coords, index):
if index < 4:
ang = 0
lst = []
center = (coords[0] + image.shape[1] / 2, coords[1] + image.shape[0] / 2)
if index == 1:
ang = -90, 3
elif index == 2:
ang = 90, 1
elif index == 3:
ang = 180, 2
M = cv2.getRotationMatrix2D(center, ang[0], 1)
coordes = np.array([[coords[0], coords[1], 1], [coords[0] + image.shape[1], coords[1], 1], [coords[0] + image.shape[1], coords[1] + image.shape[0], 1], [coords[0], coords[1] + image.shape[0], 1]])
for coord in coordes:
lst.append(np.array(np.round(M.dot(coord), 0)).astype(int).tolist())
coords = (min(lst)[0], min(lst)[1], max(lst)[0],max(lst)[1])
image = np.rot90(image, ang[1]).copy()
else:
if index == 4:
image = cv2.flip(image, 0) # flip vertical
elif index == 5:
image = cv2.flip(image, 1) # flip horizontal
return image, coords
def drawPrimitive(self, image, coords, type, color=(255,255,255), thick=None): # dotted square, line, filled-square, square,
if type == 1:
color = (150,150,150)
width = 5
thick = 1
LR, UD, dst = self.calcRegion(coords)
if sum(dst) == 0:
return
gap = dst[0] / width
for i in range(math.ceil(gap / 2)):
cv2.line(image, (coords[0] + width * 2 * LR * i, coords[1]), (coords[0] + width * 2 * LR * i + width * LR, coords[1]), color, thick, cv2.LINE_AA)
cv2.line(image, (coords[2] + width * 2 * LR * i * -1, coords[3]), (coords[2] + width * 2 * LR * i * -1 + width * LR * -1, coords[3]), color, thick, cv2.LINE_AA)
gap = dst[1] / width
for i in range(math.ceil(gap / 2)):
cv2.line(image, (coords[0], coords[1] + width * 2 * UD * i), (coords[0], coords[1] + width * 2 * UD * i + width * UD), color, thick, cv2.LINE_AA)
cv2.line(image, (coords[2], coords[3] + width * 2 * UD * i * -1), (coords[2], coords[3] + width * 2 * UD * i * -1 + width * UD * -1), color, thick, cv2.LINE_AA)
elif type==2:
color = (150, 150, 150)
width = 2
LR, UD, dst = self.calcRegion(coords)
gap = dst[0] / width
for i in range(math.ceil(gap / 2)):
cv2.line(image, (coords[0] + width * 2 * LR * i, coords[1]), (coords[0] + width * 2 * LR * i + width * LR, coords[1]), color, 1, cv2.LINE_AA)
gap = dst[1] / width
for i in range(math.ceil(gap / 2)):
cv2.line(image, (coords[0], coords[1] + width * 2 * UD * i), (coords[0], coords[1] + width * 2 * UD * i + width * UD), color, 1, cv2.LINE_AA)
elif type == 3:
cv2.line(image, (coords[0], coords[1]), (coords[2], coords[3]), color, thick, cv2.LINE_AA)
elif type == 5:
cv2.rectangle(image, (coords[0], coords[1]), (coords[2], coords[3]), color, thick, cv2.LINE_AA)
elif type == 4:
center, radius = self.recalc_Center_Radius(coords)
cv2.circle(image, center, max(radius), color, thick, cv2.LINE_AA)
elif type == 6:
cv2.polylines(image, [self.Triangle(coords)], True, color, thick, cv2.LINE_AA)
elif type == 8:
cv2.fillPoly(image, [self.Triangle(coords)], color)
elif type == 7:
cv2.polylines(image, [self.Diamond(coords)], True, color, thick, cv2.LINE_AA)
elif type == 9:
cv2.fillPoly(image, [self.Diamond(coords)], color)
def drawText(self, image, text, coords, fontstyle, scale, color, thick):
font = None
if fontstyle == 0:
font = cv2.FONT_HERSHEY_COMPLEX
elif fontstyle == 1:
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
elif fontstyle == 2:
font = cv2.FONT_HERSHEY_DUPLEX
elif fontstyle == 3:
font = cv2.FONT_HERSHEY_PLAIN
elif fontstyle == 4:
font = cv2.FONT_HERSHEY_SCRIPT_COMPLEX
elif fontstyle == 5:
font = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX
elif fontstyle == 6:
font = cv2.FONT_HERSHEY_TRIPLEX
elif fontstyle == 7:
font = cv2.FONT_ITALIC
cv2.putText(image, text, coords, font, scale, color, thick)
def recalc_Center_Radius(self, coords):
LR, UD, dst = self.calcRegion(coords)
radius = [dst[0]//2, dst[1]//2]
center = (int(coords[0]+radius[0]*LR), int(coords[1]+radius[1]*UD))
return center, radius
def Triangle(self, coords):
center, radius = self.recalc_Center_Radius(coords)
c = [center[0], center[1]-radius[1]]
b = [center[0] +radius[0], center[1]+radius[1]]
a = [center[0] -radius[0], center[1]+radius[1]]
return np.array([a,b,c], np.int32)
def Diamond(self, coords):
center, radius = self.recalc_Center_Radius(coords)
return np.array([[center[0]-radius[0], center[1]], [center[0], center[1]-radius[1]], [center[0]+radius[0], center[1]], [center[0], center[1]+radius[1]]], np.int32)
def ReLocateCoords(self, coords):
LR, UD, dst = self.calcRegion(coords)
if LR==-1:
coords[0] -= dst[0]
coords[2] += dst[0]
if UD == -1:
coords[1] -= dst[1]
coords[3] += dst[1]
return coords
def calcRegion(self, coords):
LR = UD = 0
dst = [0, 0]
x1 = coords[0]
y1 = coords[1]
x2 = coords[2]
y2 = coords[3]
if x2 < x1:
LR = -1
dst[0] = x1 - x2
elif x2 > x1:
LR = 1
dst[0] = x2 - x1
if y2 < y1:
UD = -1
dst[1] = y1 - y2
elif y2 > y1:
UD = 1
dst[1] = y2 - y1
return LR, UD, dst
def Color_picker(self, color):
image = np.zeros((300, 300, 3), np.uint8)
image[:] = color
self.drawPrimitive(image, (int(300*.01), int(300*.01), int(300*.99), int(300*.99)), 5, (0,0,0), 10)
self.drawPrimitive(image, (int(300*.1), int(300*.1), int(300*.9), int(300*.9)), 5, (255,255,255), 20)
self.SaveImage("TP_assets/color.png", image)
class HistogramPlot(QWidget):
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.setLayout(QVBoxLayout())
self.canvas = PlotCanvas()
self.layout().addWidget(self.canvas)
def Plot(self, image):
self.canvas.plot(image)
class PlotCanvas(FigureCanvas):
def __init__(self):
fig = Figure(figsize=(4, 4), dpi=72)
FigureCanvas.__init__(self, fig)
self.axes = fig.add_subplot(111)
self.axes.axis('off')
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def plot(self, image):
self.axes.clear()
self.axes.hist(image.ravel(), 256, [0, 256], color='black')
if len(image.shape)==3:
color = ('b', 'g', 'r')
for i, col in enumerate(color):
histr = cv2.calcHist([image], [i], None, [256], [0, 256])
self.axes.plot(histr, color=col)
self.axes.set_ylim(ymin=0)
self.axes.set_xlim(xmin=0, xmax=256)
self.axes.set_position([0, 0, 1, 1])
self.draw()
|
{"hexsha": "7fd3d050b37453d71e81d1e5aeda0bb3904a7d20", "size": 13257, "ext": "py", "lang": "Python", "max_stars_repo_path": "ToothPaint_CV.py", "max_stars_repo_name": "JunHong-1998/OpenCV-ToothPaint2-Digital-Image-Editor", "max_stars_repo_head_hexsha": "30b0c902f41aca43e98c09b7af479016760075bc", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ToothPaint_CV.py", "max_issues_repo_name": "JunHong-1998/OpenCV-ToothPaint2-Digital-Image-Editor", "max_issues_repo_head_hexsha": "30b0c902f41aca43e98c09b7af479016760075bc", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ToothPaint_CV.py", "max_forks_repo_name": "JunHong-1998/OpenCV-ToothPaint2-Digital-Image-Editor", "max_forks_repo_head_hexsha": "30b0c902f41aca43e98c09b7af479016760075bc", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3377926421, "max_line_length": 209, "alphanum_fraction": 0.5302858867, "include": true, "reason": "import numpy", "num_tokens": 3777}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_OutDetectFPdependence [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_OutDetectFPdependence&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerMVEOutlier).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
import numpy as np
from numpy import arange, array, zeros, ceil, log, exp, tile, r_, linspace
from numpy import sum as npsum
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, plot, bar, legend, xlim, ylim, scatter, ylabel, \
xlabel, xticks, yticks
import matplotlib.dates as mdates
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import save_plot, struct_to_dict, date_mtop
from FPmeancov import FPmeancov
from PlotTwoDimEllipsoid import PlotTwoDimEllipsoid
from Price2AdjustedPrice import Price2AdjustedPrice
from GarchResiduals import GarchResiduals
from BlowSpinFP import BlowSpinFP
from ColorCodedFP import ColorCodedFP
from RemoveFarthestOutlierFP import RemoveFarthestOutlierFP
from FarthestOutlier import FarthestOutlier
# -
# ## Upload the database
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_Stocks'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_Stocks'), squeeze_me=True)
StocksSPX = struct_to_dict(db['StocksSPX'])
# -
# ## Compute the dividend-adjusted returns of two stocks
# +
n_ = 2
t_ = 500
_, x_1 = Price2AdjustedPrice(StocksSPX.Date.reshape(1,-1), StocksSPX.Prices[[25],:], StocksSPX.Dividends[25]) # Cisco Systems Inc returns
_, x_2 = Price2AdjustedPrice(StocksSPX.Date.reshape(1,-1), StocksSPX.Prices[[5],:], StocksSPX.Dividends[5]) # General Electric returns
date = StocksSPX.Date[1:]
x_1 = x_1[[0],-t_:]
x_2 = x_2[[0],-t_:]
date = date[-t_:]
# -
# ## Compute the invariants using GARCH(1,1) fit
epsi = GarchResiduals(r_[x_1,x_2])
# ## Compute the Flexible Probability profiles using Blow-Spin method
b = 1 # number of blows
s = 2 # number of spins
p, _ = BlowSpinFP(epsi, b, s, [1, 1], .8)
q_ = b + s
# ## Remove the worst historical outliers from the dataset to guarantee clarity in static figures
# +
for k in range(int(ceil(t_ / 15))):
epsi, p, date = RemoveFarthestOutlierFP(epsi, p, date)
p=p / tile(npsum(p, 1,keepdims=True), (1, p.shape[1])) # normalize the FP profiles
ens = exp(npsum(-p * log(p), 1,keepdims=True)) # compute the effective number of scenarios
# -
# ## Detect the worst outlier for each FP profile then compute HFP mean and covariance
t_tilde = zeros(q_,dtype=int)
mu_out = zeros((n_, q_))
sigma2_out = zeros((n_, n_, q_))
for q in range(q_):
t_tilde[q] = FarthestOutlier(epsi, p[[q],:]) # where the time subscript of the worst outlier
# compute historical mean and covariance of the dataset without outlier
epsi_temp = np.delete(epsi,t_tilde[q], axis=1)
p_temp = np.delete(p[[q],:],t_tilde[q], axis=1)
[mu_out[:, [q]], sigma2_out[:,:, q]] = FPmeancov(epsi_temp, p_temp / npsum(p_temp))
# ## Generate static figures showing how the detected outlier changes along with the FP profile considered
# +
greyrange = arange(0.1,0.91,0.01)
date_dt = array([date_mtop(i) for i in date])
myFmt = mdates.DateFormatter('%d-%b-%Y')
t_new = len(date_dt)
epslim1 = [min(epsi[0]) - .3, max(epsi[0])+.3]
epslim2 = [min(epsi[1]) - .3, max(epsi[1])+.3]
for q in range(q_):
f = figure()
# Scatter plot of observations, outlier and HFP-ellipsoid
plt.subplot2grid((4,1),(0,0),rowspan=3)
[CM, C] = ColorCodedFP(p[[q],:], None, None, greyrange, 0, 1, [0.6, 0.1])
# colormap(CM)
obs = scatter(epsi[0], epsi[1], 8, c=C, marker='.',cmap=CM)
shobs = plot(-1000, 1000, color='k',marker='.',markersize=8,linestyle='none')
xlim(epslim1)
ylim(epslim2)
out = scatter(epsi[0, t_tilde[q]], epsi[1, t_tilde[q]], 50, 'r','o',lw=2)
shout = plot(-1000, 1000, markersize= 6, color='r',marker='o',lw=2,linestyle='none')
ell = PlotTwoDimEllipsoid(mu_out[:, [q]], sigma2_out[:,:, q], 1, None, None, 'r', 2)
xlabel('$\epsilon_1$')
ylabel('$\epsilon_2$')
plt.grid(True)
leg = legend(['historical observations','worst outlier','HFP ellipsoid'])
# Flexible Probability profile
plt.subplot(4,1,4)
b = bar(date_dt, p[q, :],width=date_dt[1].toordinal()-date_dt[0].toordinal(), facecolor=[.6, .6, .6], edgecolor=[.6, .6, .6])
d = linspace(0,t_new-1,3,dtype=int)
xlim([min(date_dt), max(date_dt)])
xticks(date_dt[d])
plt.gca().xaxis.set_major_formatter(myFmt)
ylim([0, max(p[q,:])])
yticks([])
ylabel('FP')
ensT = 'Effective Num.Scenarios = % 3.0f'%ens[q]
plt.tight_layout();
plt.text(date_dt[-1], max(p[q,:])+max(p[q, :]) / 10, ensT, color = 'k',horizontalalignment='right',verticalalignment='bottom')
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
|
{"hexsha": "a3a4e87c197e0d9b78db94b4a37b78716bde23ff", "size": 5390, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/sources/S_OutDetectFPdependence.py", "max_stars_repo_name": "dpopadic/arpmRes", "max_stars_repo_head_hexsha": "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-04-10T13:24:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T08:20:42.000Z", "max_issues_repo_path": "scripts/sources/S_OutDetectFPdependence.py", "max_issues_repo_name": "dpopadic/arpmRes", "max_issues_repo_head_hexsha": "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/sources/S_OutDetectFPdependence.py", "max_forks_repo_name": "dpopadic/arpmRes", "max_forks_repo_head_hexsha": "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-08-13T22:02:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T17:49:12.000Z", "avg_line_length": 34.1139240506, "max_line_length": 217, "alphanum_fraction": 0.6820037106, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1715}
|
from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
import jax.scipy.stats as jstats
from jax import grad
def main():
# various beta distribution shapes
x_vals = np.linspace(0.0, 1.0, 100)
plt.plot(x_vals, stats.beta.pdf(x_vals, a=0.5, b=0.5), label=f'a={0.5}, b={0.5}')
plt.plot(x_vals, stats.beta.pdf(x_vals, a=1.0, b=1.0), label=f'a={1.0}, b={1.0}')
plt.plot(x_vals, stats.beta.pdf(x_vals, a=10.0, b=10.0), label=f'a={10.0}, b={10.0}')
plt.plot(x_vals, stats.beta.pdf(x_vals, a=5.0, b=20.0), label=f'a={5.0}, b={20.0}')
plt.plot(x_vals, stats.beta.pdf(x_vals, a=20.0, b=5.0), label=f'a={20.0}, b={5.0}')
plt.ylabel('Beta PDF')
plt.xlabel('Action')
plt.legend()
plt.show()
# action distribution
plt.plot(x_vals, stats.beta.pdf(x_vals, a=2.0, b=2.0), label=f'a={2.0}, b={2.0}')
plt.axvline(0.1, color='red')
plt.legend()
plt.xlabel('Action')
plt.ylabel('Beta PDF')
plt.show()
# beta distribution at x=0.1 for varying values of a
a_vals = np.linspace(0.1, 2.0, 100)
pdf_at_x = [stats.beta.pdf(x=0.1, a=a, b=2.0) for a in a_vals]
plt.plot(a_vals, pdf_at_x)
plt.xlabel('a')
plt.ylabel('Beta PDF @ x=0.1, b=2.0')
plt.xlim((0.1, 2.0))
plt.show()
# modified beta distribution to increase the density at x=0.1
plt.plot(x_vals, stats.beta.pdf(x_vals, a=2.0, b=2.0), label=f'a={2.0}, b={2.0}')
plt.plot(x_vals, stats.beta.pdf(x_vals, a=0.6, b=2.0), label=f'a={0.6}, b={2.0}')
plt.axvline(0.1, color='red')
plt.legend()
plt.xlabel('Action')
plt.ylabel('Beta PDF')
plt.show()
# 1. Define the function for which we want a gradient.
def jax_beta_pdf(
x: float,
a: float,
b: float
):
return jstats.beta.pdf(x=x, a=a, b=b, loc=0.0, scale=1.0)
# 2. Ask JAX for the gradient with respect to the second argument (shape parameter a).
jax_beta_pdf_grad = grad(jax_beta_pdf, argnums=1)
# 3. Calculate the gradient that we want.
print(f'{jax_beta_pdf_grad(0.1, 2.0, b=2.0)}')
if __name__ == '__main__':
main()
|
{"hexsha": "d1907c6be743f6fc87bda4e9ec5e12a4bfb20130", "size": 2135, "ext": "py", "lang": "Python", "max_stars_repo_path": "docs/case_studies/mountain-car-continuous-figs/beta-dist.py", "max_stars_repo_name": "MatthewGerber/rl", "max_stars_repo_head_hexsha": "c323524be2a541b43b420a3da58e4675521b594f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/case_studies/mountain-car-continuous-figs/beta-dist.py", "max_issues_repo_name": "MatthewGerber/rl", "max_issues_repo_head_hexsha": "c323524be2a541b43b420a3da58e4675521b594f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/case_studies/mountain-car-continuous-figs/beta-dist.py", "max_forks_repo_name": "MatthewGerber/rl", "max_forks_repo_head_hexsha": "c323524be2a541b43b420a3da58e4675521b594f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8461538462, "max_line_length": 90, "alphanum_fraction": 0.6032786885, "include": true, "reason": "import numpy,from scipy,import jax,from jax", "num_tokens": 748}
|
import numpy as np # Import numpy library
# Integers:
i = 10 # integer
print(type(i)) # Print out the data type of 1
print(" ")
a_i = np.zeros(i,dtype=int) #declare an array of ints. Otherwise will be floats
print(type(a_i)) #will return ndarray?
print(type(a_i[0])) #will return integers of 64 bits?
print(" ")
# Floats:
x = 119.0 #Floating point number
print(type(x)) #Print out the data type of x
print(" ")
y = 1.19e2 #floating 119 in sci. notation
print(type(y)) #print out the data type of y
print(" ")
z = np.zeros(i,dtype=float) #declare array of floats
print(type(z)) #will return nd array
print(type(z[0])) #will return float64
|
{"hexsha": "339f149c47a5fff07e205e4262e8d9197047a71a", "size": 748, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_types.py", "max_stars_repo_name": "spausanc/astr-119-hw-1", "max_stars_repo_head_hexsha": "f2e17dbea70f0eebdd3555718285cafce2ac3cf4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_types.py", "max_issues_repo_name": "spausanc/astr-119-hw-1", "max_issues_repo_head_hexsha": "f2e17dbea70f0eebdd3555718285cafce2ac3cf4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-10-25T23:42:12.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-25T23:42:12.000Z", "max_forks_repo_path": "data_types.py", "max_forks_repo_name": "spausanc/astr-119-hw-2", "max_forks_repo_head_hexsha": "f2e17dbea70f0eebdd3555718285cafce2ac3cf4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-10-18T01:53:25.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-18T01:53:25.000Z", "avg_line_length": 27.7037037037, "max_line_length": 83, "alphanum_fraction": 0.6002673797, "include": true, "reason": "import numpy", "num_tokens": 213}
|
import os
import sys
import sympy
from sympy.galgebra.GA import MV, ZERO, ONE, HALF
from sympy import collect, symbols
def F(x, n, nbar):
"""
Conformal Mapping Function
"""
Fx = HALF*((x*x)*n + 2*x - nbar)
return(Fx)
if __name__ == '__main__':
|
{"hexsha": "41b3bb5c672cebf6b950e023ad510f815fffcc24", "size": 287, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/src/modules/galgebra/GA/headerGAtest.py", "max_stars_repo_name": "eriknw/sympy", "max_stars_repo_head_hexsha": "b7544e2bb74c011f6098a7e886fd77f41776c2c4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2015-01-14T06:55:33.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-11T14:43:52.000Z", "max_issues_repo_path": "doc/src/modules/galgebra/GA/headerGAtest.py", "max_issues_repo_name": "pbeltran/sympy-1", "max_issues_repo_head_hexsha": "94f92b36731c2bebe6de1037c063c2a258a8a399", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-02-19T04:56:04.000Z", "max_issues_repo_issues_event_max_datetime": "2018-02-19T04:56:04.000Z", "max_forks_repo_path": "doc/src/modules/galgebra/GA/headerGAtest.py", "max_forks_repo_name": "pbeltran/sympy-1", "max_forks_repo_head_hexsha": "94f92b36731c2bebe6de1037c063c2a258a8a399", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2016-04-24T14:39:22.000Z", "max_forks_repo_forks_event_max_datetime": "2016-04-24T14:39:22.000Z", "avg_line_length": 17.9375, "max_line_length": 49, "alphanum_fraction": 0.5923344948, "include": true, "reason": "import sympy,from sympy", "num_tokens": 86}
|
[STATEMENT]
lemma ipurge_fail_aux_t_intro_2:
"\<lbrakk>ipurge_fail_aux_t_inv_2 I D U xs X Y; ipurge_fail_aux_t_form Y\<rbrakk> \<Longrightarrow>
snd (ipurge_fail_aux_t_out Y) = ipurge_ref_aux I D U xs X"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>ipurge_fail_aux_t_inv_2 I D U xs X Y; ipurge_fail_aux_t_form Y\<rbrakk> \<Longrightarrow> snd (ipurge_fail_aux_t_out Y) = ipurge_ref_aux I D U xs X
[PROOF STEP]
proof (simp add: ipurge_fail_aux_t_inv_2_def ipurge_fail_aux_t_form_def
ipurge_fail_aux_t_out_def)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>if ListOp Y = None then List Y = xs \<and> Set Y = X else ipurge_ref_aux_less I D U (List Y) (Set Y) = ipurge_ref_aux I D U xs X; case ListOp Y of None \<Rightarrow> False | Some ys \<Rightarrow> List Y = []\<rbrakk> \<Longrightarrow> Set Y = ipurge_ref_aux I D U xs X
[PROOF STEP]
qed (simp add: ipurge_ref_aux_less_def split: option.split_asm)
|
{"llama_tokens": 412, "file": "Noninterference_Sequential_Composition_Propaedeutics", "length": 2}
|
#! /usr/bin/python
import random,argparse,sys,subprocess,os
parser = argparse.ArgumentParser()
import numpy as np
random.seed(0)
import time
input_file_ls = ["data/maze/grid10.txt","data/maze/grid20.txt","data/maze/grid30.txt","data/maze/grid40.txt","data/maze/grid50.txt","data/maze/grid60.txt","data/maze/grid70.txt","data/maze/grid80.txt","data/maze/grid90.txt","data/maze/grid100.txt"]
#input_flies_ls = ["data/maze/grid30.txt"]
class MazeVerifyOutput:
def __init__(self,algo):
counter = 1
for in_file in input_file_ls:
start = time.time()
print("\n\ntest instance",counter,"-"*100)
counter+=1
cmd_encode = "python","encoder.py","--grid",in_file
cmd_planner = "python","planner.py","--mdp","mdpFile","--algorithm",algo
cmd_decode = "python","decoder.py","--grid",in_file,"--value_policy","value_and_policy_file"
print("Executing..."," ".join(cmd_encode))
mdpFile = subprocess.check_output(cmd_encode,universal_newlines=True)
fw = open("mdpFile",'w');fw.write(mdpFile);fw.close()
print("Executing..."," ".join(cmd_planner))
value_and_policy_file = subprocess.check_output(cmd_planner,universal_newlines=True)
fw = open("value_and_policy_file",'w');fw.write(value_and_policy_file);fw.close()
print("Executing..."," ".join(cmd_decode))
shortestPath = subprocess.check_output(cmd_decode,universal_newlines=True)
#fr = open(in_file.replace("grid","solution"),'r')
#shortestPath = fr.read()
#fr.close()
mistakeFlag = self.traversePath(shortestPath,in_file)
if not mistakeFlag:
print("ALL CHECKS PASSED! \nChecking the correctness of you solution...")
self.verifyOutput(shortestPath,in_file)
print("Time taken:", time.time() - start)
def traversePath(self,path,in_file):
mistakeFlag = False
gridData = np.loadtxt(in_file,delimiter=" ",dtype=int)
#print(gridData)
path_ls = path.split()
#print(path_ls)
startIndex = np.where(gridData==2)
x = startIndex[0][0]
y = startIndex[1][0]
direction_dict = {'N':[-1,0], 'E':[0,1],'W':[0,-1],'S':[1,0]}
direction_ls = ['N','E','W','S']
for i in path_ls:
#Check1: Direction check
if not i in direction_ls:
mistakeFlag = True
print("\n","*"*10,"Mistake:Invalid direction printed:",i)
break
x+=direction_dict[i][0]
y+=direction_dict[i][1]
#Check2: Traverse check
if gridData[x][y]==1:
print("\n","*"*10,"Mistake:Wall ahead. Unable to traverse your path","*"*10)
mistakeFlag = True
break
#Check3: check wether we reached end state or not
endIndex = np.where(gridData==3)
reachedFlag = False
for i in range(len(endIndex[0])):
if (x== endIndex[0][i] and y==endIndex[1][i]):
reachedFlag = True
if not reachedFlag:
print("\n","*"*10,"Mistake: Invalid path","*"*10)
mistakeFlag = True
return mistakeFlag
def verifyOutput(self,shortestPath,in_file):
sol_file = in_file.replace("grid","solution")
fr = open(sol_file,'r');base = fr.read();fr.close()
base = base.split()
est = shortestPath.split()
direction_ls = ['N','E','W','S']
mistakeFlag = False
if not mistakeFlag:
if len(base)<len(est):
print("Your path is not shortestPath")
elif len(base)==len(est):
print("OK. You have printed the correct shortest path")
else:
print("You your path is shorter than shortest path! This should not happen")
print("base path: ",base)
print("your path:",est)
if __name__ == "__main__":
parser.add_argument("--algorithm",type=str,default="hpi")
args = parser.parse_args()
algo = MazeVerifyOutput(args.algorithm)
|
{"hexsha": "c0148cb20e418fe0b6cfb9aec0bd8cc004c61b75", "size": 4513, "ext": "py", "lang": "Python", "max_stars_repo_path": "assignment2/MazeVerifyOutput.py", "max_stars_repo_name": "cybershiptrooper/CS747-assignments", "max_stars_repo_head_hexsha": "5b4b2bce8321b8fc48e578615034bb16df3ca88e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignment2/MazeVerifyOutput.py", "max_issues_repo_name": "cybershiptrooper/CS747-assignments", "max_issues_repo_head_hexsha": "5b4b2bce8321b8fc48e578615034bb16df3ca88e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignment2/MazeVerifyOutput.py", "max_forks_repo_name": "cybershiptrooper/CS747-assignments", "max_forks_repo_head_hexsha": "5b4b2bce8321b8fc48e578615034bb16df3ca88e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5354330709, "max_line_length": 248, "alphanum_fraction": 0.5360070906, "include": true, "reason": "import numpy", "num_tokens": 1023}
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from .model import User
from .twitter import BASILICA
def predict_user(user1_name, user2_name, tweet_text):
"""
Determine and return which user is more likeley to say a given TWEEN
ex__ run: predict('austen, "e;on", 'lambda school')
Returns 1 (corresponding to first user passed in) or 0 (second)
"""
# get first user, and second user objects
# equivalent to query *
user1 = User.query.filter(User.name == user1_name).one()
user2 = User.query.filter(User.name == user2_name).one()
user1_embeddings = np.array([tweet.embedding for tweet in user1.tweets])
user2_embeddings = np.array([tweet.embedding for tweet in user2.tweets])
embeddings = np.vstack([user1_embeddings, user2_embeddings])
labels = np.concatenate([np.ones(len(user1.tweets)),
np.zeros(len(user2.tweets))])
log_reg = LogisticRegression().fit(embeddings, labels)
# we don data science and made a prediction
tweet_embedding = BASILICA.embed_sentence(tweet_text, model='twitter')
return log_reg.predict(np.array(tweet_embedding).reshape(1, -1))
|
{"hexsha": "b15d8042308cc0eb1451b7297fa207225d9ffb99", "size": 1176, "ext": "py", "lang": "Python", "max_stars_repo_path": "twitoff/predict.py", "max_stars_repo_name": "JonRivera/TwitOff", "max_stars_repo_head_hexsha": "69bb121139e8a76ffba62d51cb0ef4c215c45167", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "twitoff/predict.py", "max_issues_repo_name": "JonRivera/TwitOff", "max_issues_repo_head_hexsha": "69bb121139e8a76ffba62d51cb0ef4c215c45167", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "twitoff/predict.py", "max_forks_repo_name": "JonRivera/TwitOff", "max_forks_repo_head_hexsha": "69bb121139e8a76ffba62d51cb0ef4c215c45167", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.2307692308, "max_line_length": 76, "alphanum_fraction": 0.7108843537, "include": true, "reason": "import numpy", "num_tokens": 282}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 25 16:24:34 2017
@author: C Winkler
"""
import pandas as pd
import numpy as np
from cycler import cycler
import matplotlib.pyplot as plt
import glob, os
from scipy.signal import argrelextrema
from statsmodels.nonparametric.smoothers_lowess import lowess
plt.rc('axes', prop_cycle=(cycler('color', ['b','k','k', 'r', 'k', 'k'])))
#print(os.listdir())
columns = ['Patient','Emax_BL', 'Emin_BL', 'Emax_dobu', 'Emin_dobu']
df = pd.DataFrame(columns=columns)
df['Patient'] = pd.Series(os.listdir())
def read_and_plot_data():
pv = pd.read_excel("conductance_measurement.xls", header = 1)
plot_pvloop(pv)
def plot_pvloop(pv):
pv=pv.iloc[1:]
pv.columns=["beat", "time", "pressure", "volume", "d(pressure)/dt"]
pv["elastance"] = pv["pressure"]/pv["volume"]
#m = max(pv["elastance"])
maxima = max(pv["elastance"].values)
minima = min(pv["elastance"].values)
print(maxima)
## Filter
filtered_pressure = lowess(pv["pressure"].values, pv["time"].values, is_sorted=True, frac=0.025, it=0)
filtered_volume = lowess(pv["volume"].values, pv["time"].values, is_sorted=True, frac=0.025, it=0)
#filtered_pv = lowess(pv["pressure"].values, pv["volume"].values, is_sorted=True, frac=0.025, it=0)
## Maxima and minima of filtered signal
maxima_possible = [filtered_volume[:,1][i] for i in argrelextrema(filtered_volume[:,1], np.greater)]
maxima_volume = [i for i in maxima_possible[0] if i > np.mean(pv["volume"].values)]
maxima_possible = [filtered_pressure[:,1][i] for i in argrelextrema(filtered_pressure[:,1], np.greater)]
maxima_pressure = [i for i in maxima_possible[0] if i > np.mean(pv["pressure"].values)]
#print(minima)
beats = str(len(maxima_pressure)) + " beats"
# ESPVR
v = np.linspace(-20, max(pv["volume"].values))
y = 1.047619048*(v+18.57)
plt.plot(v, y, color = "k")
# PV loops
plt.plot(pv["volume"], pv["pressure"], color = "r", label = "PV - Loop (BL) " + beats)
# plt.xlim([0, 60])
# plt.ylim([-10, 70])
plt.grid()
plt.xlabel(r"Volume [ml]")
plt.ylabel(r"Pressure [mmHg]")
plt.legend()
plt.savefig("espvr.png", dpi = 500)
plt.close()
try:
read_and_plot_data()
except:
print("error")
|
{"hexsha": "db20bd7f9b0720e498e066ae96d26fa442f7937f", "size": 2368, "ext": "py", "lang": "Python", "max_stars_repo_path": "2_espvr/espvr.py", "max_stars_repo_name": "xi2pi/elastance-function", "max_stars_repo_head_hexsha": "ac3422b55a1958fe0ce579a2b49a977545159ccd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-10T23:31:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T08:22:47.000Z", "max_issues_repo_path": "2_espvr/espvr.py", "max_issues_repo_name": "xi2pi/elastance-function", "max_issues_repo_head_hexsha": "ac3422b55a1958fe0ce579a2b49a977545159ccd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2_espvr/espvr.py", "max_forks_repo_name": "xi2pi/elastance-function", "max_forks_repo_head_hexsha": "ac3422b55a1958fe0ce579a2b49a977545159ccd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3111111111, "max_line_length": 108, "alphanum_fraction": 0.6245777027, "include": true, "reason": "import numpy,from scipy,from statsmodels", "num_tokens": 726}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 12 21:06:37 2018
@author: user
"""
# %% libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%% İnformation
data = pd.read_csv("oasis_cross-sectional.csv")
data.info()
data.head()
data.describe()
#%% WE need to filling NAn data so we work with mean. Because We need to all data
data["Delay"].fillna(value = 20.55000 ,inplace = True)
data["Educ"].fillna(value = 3.178723 ,inplace = True)
data["SES"].fillna(value = 2.490741,inplace = True)
data["MMSE"].fillna(value = 27.06383,inplace = True)
data["CDR"].fillna(value = 0.285106,inplace = True)
data['Gender'] = data['M/F']
data.drop(["ID","Hand", "M/F"],axis=1,inplace = True)
# %% Some İmportant Settings
data.Gender = [1 if each == "M" else 0 for each in data.Gender]
y = data.Gender.values
x_data = data.drop(["Gender"],axis=1)
# %% normalization
# (x - min(x))/(max(x)-min(x))
x = (x_data - np.min(x_data))/(np.max(x_data)-np.min(x_data)).values
# %% train test split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.2,random_state=42)
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
print("x_train: ",x_train.shape)
print("x_test: ",x_test.shape)
print("y_train: ",y_train.shape)
print("y_test: ",y_test.shape)
# %% parameter initialize and sigmoid function
# dimension = 30
def initialize_weights_and_bias(dimension):
w = np.full((dimension,1),0.01)
b = 0.0
return w,b
# w,b = initialize_weights_and_bias(30)
def sigmoid(z):
y_head = 1/(1+ np.exp(-z))
return y_head
# print(sigmoid(0))
# %%
def forward_backward_propagation(w,b,x_train,y_train):
# forward propagation
z = np.dot(w.T,x_train) + b
y_head = sigmoid(z)
loss = -y_train*np.log(y_head)-(1-y_train)*np.log(1-y_head)
cost = (np.sum(loss))/x_train.shape[1] # x_train.shape[1] is for scaling
# backward propagation
derivative_weight = (np.dot(x_train,((y_head-y_train).T)))/x_train.shape[1] # x_train.shape[1] is for scaling
derivative_bias = np.sum(y_head-y_train)/x_train.shape[1] # x_train.shape[1] is for scaling
gradients = {"derivative_weight": derivative_weight, "derivative_bias": derivative_bias}
return cost,gradients
#%% Updating(learning) parameters
def update(w, b, x_train, y_train, learning_rate,number_of_iterarion):
cost_list = []
cost_list2 = []
index = []
# updating(learning) parameters is number_of_iterarion times
for i in range(number_of_iterarion):
# make forward and backward propagation and find cost and gradients
cost,gradients = forward_backward_propagation(w,b,x_train,y_train)
cost_list.append(cost)
# lets update
w = w - learning_rate * gradients["derivative_weight"]
b = b - learning_rate * gradients["derivative_bias"]
if i % 10 == 0:
cost_list2.append(cost)
index.append(i)
print ("Cost after iteration %i: %f" %(i, cost))
# we update(learn) parameters weights and bias
parameters = {"weight": w,"bias": b}
plt.plot(index,cost_list2)
plt.xticks(index,rotation='vertical')
plt.xlabel("Number of Iterarion")
plt.ylabel("Cost")
plt.show()
return parameters, gradients, cost_list
#%% # prediction
def predict(w,b,x_test):
# x_test is a input for forward propagation
z = sigmoid(np.dot(w.T,x_test)+b)
Y_prediction = np.zeros((1,x_test.shape[1]))
# if z is bigger than 0.5, our prediction is sign one (y_head=1),
# if z is smaller than 0.5, our prediction is sign zero (y_head=0),
for i in range(z.shape[1]):
if z[0,i]<= 0.5:
Y_prediction[0,i] = 0
else:
Y_prediction[0,i] = 1
return Y_prediction
# %% logistic_regression
def logistic_regression(x_train, y_train, x_test, y_test, learning_rate , num_iterations):
# initialize
dimension = x_train.shape[0] # that is 30
w,b = initialize_weights_and_bias(dimension)
# do not change learning rate
parameters, gradients, cost_list = update(w, b, x_train, y_train, learning_rate,num_iterations)
y_prediction_test = predict(parameters["weight"],parameters["bias"],x_test)
# Print test Errors
print("test accuracy: {} %".format(100 - np.mean(np.abs(y_prediction_test - y_test)) * 100))
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 1, num_iterations = 300)
#%% sklearn with LR
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(x_train.T,y_train.T)
print("Test Accuracy {}".format(lr.score(x_test.T,y_test.T)))
|
{"hexsha": "07d35ef163f96b0bce7bec3f9fb81d0644571434", "size": 5057, "ext": "py", "lang": "Python", "max_stars_repo_path": "alzheimers_machine_learning.py", "max_stars_repo_name": "tolgakurtulus/Machine-Learning-For-Alzehimers-Gender", "max_stars_repo_head_hexsha": "c29079849c3e97e614a7d6b0c1f0d5912cc6b30e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "alzheimers_machine_learning.py", "max_issues_repo_name": "tolgakurtulus/Machine-Learning-For-Alzehimers-Gender", "max_issues_repo_head_hexsha": "c29079849c3e97e614a7d6b0c1f0d5912cc6b30e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "alzheimers_machine_learning.py", "max_forks_repo_name": "tolgakurtulus/Machine-Learning-For-Alzehimers-Gender", "max_forks_repo_head_hexsha": "c29079849c3e97e614a7d6b0c1f0d5912cc6b30e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.5191489362, "max_line_length": 115, "alphanum_fraction": 0.6282380858, "include": true, "reason": "import numpy", "num_tokens": 1333}
|
import theano
import theano.tensor as T
import lasagne
from lasagne import init
from lasagne import nonlinearities
from .common import get_common_nonlinearity
__all__ = [
'RestrictedDenseLayer',
'rdense'
]
class RestrictedDenseLayer(lasagne.layers.DenseLayer):
def __init__(self, incoming, num_units, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.LeakyRectify(0.1),
num_leading_axes=1,
normalization='global', eps=1.0e-6,
**kwargs):
super(RestrictedDenseLayer, self).__init__(
incoming, num_units, W, b, nonlinearity, num_leading_axes, **kwargs
)
self.normalization = normalization
self.eps = eps
if normalization not in ['unit', 'global']:
raise ValueError("normalization must be one of ['unit', 'global']")
def restricted_dense_kernel(self):
return self.W
def restricted_kernel(self):
return self.W
def get_output_for(self, input, **kwargs):
num_leading_axes = self.num_leading_axes
if num_leading_axes < 0:
num_leading_axes += input.ndim
if input.ndim > num_leading_axes + 1:
# flatten trailing axes (into (n+1)-tensor for num_leading_axes=n)
input = input.flatten(num_leading_axes + 1)
normalized_W = None
if self.normalization == 'global':
W_norm = T.sqrt(T.sum(self.W ** 2) + self.eps)
normalized_W = self.W / W_norm
elif self.normalization == 'unit':
W_norm = T.sqrt(T.sum(self.W ** 2, axis=(0, )) + self.eps)
normalized_W = self.W / W_norm[None, :]
activation = T.dot(input, normalized_W)
if self.b is not None:
activation = activation + self.b
return self.nonlinearity(activation)
rdense = lambda num_units, f=None, normalization='global': lambda incoming: RestrictedDenseLayer(
incoming=incoming,
num_units=num_units,
nonlinearity=get_common_nonlinearity(f),
normalization=normalization
)
|
{"hexsha": "5040c033c65cb7c5b4ff95ad38666b04f9542816", "size": 1943, "ext": "py", "lang": "Python", "max_stars_repo_path": "craynn/layers/rdense.py", "max_stars_repo_name": "maxim-borisyak/craynn", "max_stars_repo_head_hexsha": "fceabd33f5969033fb3605f894778c42c42f3e08", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "craynn/layers/rdense.py", "max_issues_repo_name": "maxim-borisyak/craynn", "max_issues_repo_head_hexsha": "fceabd33f5969033fb3605f894778c42c42f3e08", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "craynn/layers/rdense.py", "max_forks_repo_name": "maxim-borisyak/craynn", "max_forks_repo_head_hexsha": "fceabd33f5969033fb3605f894778c42c42f3e08", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8923076923, "max_line_length": 97, "alphanum_fraction": 0.6881111683, "include": true, "reason": "import theano", "num_tokens": 501}
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'PHM.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
import os
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PyQt5 import QtCore, QtWidgets
from matplotlib.figure import Figure
from tfdata import get_dataset
from tftest import InferModel
from tftrain import trainModel
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Ui_TrainTestSelect(object):
def setupUi(self, TrainTestSelect):
TrainTestSelect.setObjectName("TrainTestSelect")
TrainTestSelect.resize(735, 710)
TrainTestSelect.setLayoutDirection(QtCore.Qt.LeftToRight)
self.groupBox = QtWidgets.QGroupBox(TrainTestSelect)
self.groupBox.setGeometry(QtCore.QRect(400, 30, 231, 231))
self.groupBox.setObjectName("groupBox")
self.BatchSize = QtWidgets.QLabel(self.groupBox)
self.BatchSize.setGeometry(QtCore.QRect(10, 30, 71, 31))
self.BatchSize.setObjectName("BatchSize")
self.Epochs = QtWidgets.QLabel(self.groupBox)
self.Epochs.setGeometry(QtCore.QRect(10, 70, 71, 31))
self.Epochs.setObjectName("Epochs")
self.LR = QtWidgets.QLabel(self.groupBox)
self.LR.setGeometry(QtCore.QRect(10, 110, 101, 31))
self.LR.setObjectName("LR")
self.Dropout = QtWidgets.QLabel(self.groupBox)
self.Dropout.setGeometry(QtCore.QRect(10, 150, 101, 31))
self.Dropout.setObjectName("Dropout")
self.BatchSizeHolder = QtWidgets.QTextEdit(self.groupBox)
self.BatchSizeHolder.setGeometry(QtCore.QRect(110, 30, 71, 31))
self.BatchSizeHolder.setObjectName("BatchSizeHolder")
self.EpochsHolder = QtWidgets.QTextEdit(self.groupBox)
self.EpochsHolder.setGeometry(QtCore.QRect(110, 70, 71, 31))
self.EpochsHolder.setObjectName("EpochsHolder")
self.LRHolder = QtWidgets.QTextEdit(self.groupBox)
self.LRHolder.setGeometry(QtCore.QRect(110, 110, 71, 31))
self.LRHolder.setObjectName("LRHolder")
self.DropoutHolder = QtWidgets.QTextEdit(self.groupBox)
self.DropoutHolder.setGeometry(QtCore.QRect(110, 150, 71, 31))
self.DropoutHolder.setObjectName("DropoutHolder")
self.groupBox_2 = QtWidgets.QGroupBox(TrainTestSelect)
self.groupBox_2.setGeometry(QtCore.QRect(40, 30, 271, 231))
self.groupBox_2.setObjectName("groupBox_2")
self.AugmentPhysics = QtWidgets.QCheckBox(self.groupBox_2)
self.AugmentPhysics.setGeometry(QtCore.QRect(60, 30, 161, 21))
self.AugmentPhysics.setLayoutDirection(QtCore.Qt.RightToLeft)
self.AugmentPhysics.setTristate(False)
self.AugmentPhysics.setObjectName("AugmentPhysics")
self.radioButton = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton.setGeometry(QtCore.QRect(60, 130, 111, 23))
self.radioButton.setLayoutDirection(QtCore.Qt.LeftToRight)
self.radioButton.setObjectName("radioButton")
self.radioButton_2 = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton_2.setGeometry(QtCore.QRect(60, 160, 111, 23))
self.radioButton_2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.radioButton_2.setChecked(True)
self.radioButton_2.setObjectName("radioButton_2")
self.radioButton_3 = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton_3.setGeometry(QtCore.QRect(60, 190, 111, 23))
self.radioButton_3.setLayoutDirection(QtCore.Qt.LeftToRight)
self.radioButton_3.setObjectName("radioButton_3")
self.AugmentPhysics_2 = QtWidgets.QCheckBox(self.groupBox_2)
self.AugmentPhysics_2.setGeometry(QtCore.QRect(60, 60, 161, 21))
self.AugmentPhysics_2.setLayoutDirection(QtCore.Qt.RightToLeft)
self.AugmentPhysics_2.setTristate(False)
self.AugmentPhysics_2.setObjectName("AugmentPhysics_2")
self.AugmentPhysics_3 = QtWidgets.QCheckBox(self.groupBox_2)
self.AugmentPhysics_3.setGeometry(QtCore.QRect(60, 90, 161, 21))
self.AugmentPhysics_3.setLayoutDirection(QtCore.Qt.RightToLeft)
self.AugmentPhysics_3.setTristate(False)
self.AugmentPhysics_3.setObjectName("AugmentPhysics_3")
self.pushButton_3 = QtWidgets.QPushButton(TrainTestSelect)
self.pushButton_3.setGeometry(QtCore.QRect(520, 320, 111, 31))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_3.clicked.connect(self.InferClicked)
self.pushButton_4 = QtWidgets.QPushButton(TrainTestSelect)
self.pushButton_4.setGeometry(QtCore.QRect(520, 360, 111, 31))
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_4.clicked.connect(self.TrainTestEvalClicked)
self.pushButton_5 = QtWidgets.QPushButton(TrainTestSelect)
self.pushButton_5.setGeometry(QtCore.QRect(520, 440, 111, 31))
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_5.clicked.connect(self.LoadRealTimeClicked)
self.pushButton_6 = QtWidgets.QPushButton(TrainTestSelect)
self.pushButton_6.setGeometry(QtCore.QRect(200, 270, 111, 31))
self.pushButton_6.setObjectName("pushButton_6")
self.pushButton_6.clicked.connect(self.LoadLotClicked)
self.pushButton_7 = QtWidgets.QPushButton(TrainTestSelect)
self.pushButton_7.setGeometry(QtCore.QRect(520, 270, 111, 31))
self.pushButton_7.setObjectName("pushButton_7")
self.pushButton_7.clicked.connect(self.TrainInsightsClicked)
self.pushButton_8 = QtWidgets.QPushButton(TrainTestSelect)
self.pushButton_8.setGeometry(QtCore.QRect(520, 400, 111, 31))
self.pushButton_8.setObjectName("pushButton_8")
self.pushButton_8.clicked.connect(self.DeploymentClicked)
self.pushButton_9 = QtWidgets.QPushButton(TrainTestSelect)
self.pushButton_9.setGeometry(QtCore.QRect(520, 480, 111, 31))
self.pushButton_9.setObjectName("pushButton_9")
self.pushButton_9.clicked.connect(self.ExitClicked)
self.tableWidget = QtWidgets.QTableWidget(TrainTestSelect)
self.tableWidget.setGeometry(QtCore.QRect(40, 320, 471, 360))
self.tableWidget.setTextElideMode(QtCore.Qt.ElideMiddle)
self.tableWidget.setRowCount(0)
self.tableWidget.setColumnCount(0)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.verticalHeader().setVisible(False)
self.tableWidget.verticalHeader().setHighlightSections(False)
self.tableWidget.setRowCount(0)
self.tableWidget.setColumnCount(5)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.horizontalHeader().setDefaultSectionSize(90)
self.tableWidget.horizontalHeader().setMinimumSectionSize(57)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.verticalHeader().setVisible(False)
self.tableWidget.verticalHeader().setHighlightSections(False)
self.tableWidget.verticalHeader().setStretchLastSection(True)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(4, item)
self.pushButton = QtWidgets.QPushButton(TrainTestSelect)
self.pushButton.setGeometry(QtCore.QRect(400, 270, 111, 31))
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(self.TrainClicked)
self.retranslateUi(TrainTestSelect)
QtCore.QMetaObject.connectSlotsByName(TrainTestSelect)
def retranslateUi(self, TrainTestSelect):
_translate = QtCore.QCoreApplication.translate
TrainTestSelect.setWindowTitle(_translate("TrainTestSelect", "NASA-Turbojet-PHM"))
self.groupBox.setTitle(_translate("TrainTestSelect", "Training Hyperparameters"))
self.BatchSize.setText(_translate("TrainTestSelect", "Batch Size"))
self.Epochs.setText(_translate("TrainTestSelect", "Epochs"))
self.LR.setText(_translate("TrainTestSelect", "Learning Rate"))
self.Dropout.setText(_translate("TrainTestSelect", "Dropout"))
self.BatchSizeHolder.setHtml(_translate("TrainTestSelect",
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">1024</p></body></html>"))
self.EpochsHolder.setHtml(_translate("TrainTestSelect",
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">60</p></body></html>"))
self.LRHolder.setHtml(_translate("TrainTestSelect",
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">0.0001</p></body></html>"))
self.DropoutHolder.setHtml(_translate("TrainTestSelect",
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">0.2</p></body></html>"))
self.groupBox_2.setTitle(_translate("TrainTestSelect", "Training Configuration"))
self.AugmentPhysics.setText(_translate("TrainTestSelect", "Augment Physics"))
self.radioButton.setText(_translate("TrainTestSelect", "BaseCNN "))
self.radioButton_2.setText(_translate("TrainTestSelect", "CNN-LSTM "))
self.radioButton_3.setText(_translate("TrainTestSelect", "CNN-2LSTM "))
self.AugmentPhysics_2.setText(_translate("TrainTestSelect", "Batch Normalisation"))
self.AugmentPhysics_3.setText(_translate("TrainTestSelect", "Resume Traning"))
self.pushButton_3.setText(_translate("TrainTestSelect", "Infer"))
self.pushButton_4.setText(_translate("TrainTestSelect", "Train-Test Eval"))
self.pushButton_5.setText(_translate("TrainTestSelect", "Load RealTime"))
self.pushButton_6.setText(_translate("TrainTestSelect", "Load Data"))
self.pushButton_7.setText(_translate("TrainTestSelect", "Train Insights"))
self.pushButton_8.setText(_translate("TrainTestSelect", "Deployment"))
self.pushButton_9.setText(_translate("TrainTestSelect", "Exit"))
self.pushButton.setText(_translate("TrainTestSelect", "Train"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("Dialog", "Unit"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("Dialog", "IsTrain"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("Dialog", "RMSE"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("Dialog", "s x 10^6"))
item = self.tableWidget.horizontalHeaderItem(4)
item.setText(_translate("Dialog", "Deploy Cycle"))
def LoadLotClicked(self):
if self.AugmentPhysics.checkState():
augmentPhy = 1
fpath = "./data/DS02-KalmanNew.h5"
self.attrs = 49
else:
augmentPhy = 0
fpath = "./data/N-CMAPSS_DS02-006.h5"
self.attrs = 35
self.fpath = fpath
with h5py.File(fpath, 'r') as hdf:
A_dev = np.array(hdf.get("A_dev"))
A_test = np.array(hdf.get("A_test"))
self.y_dev = np.array(hdf.get("Y_dev"))
self.y_test = np.array(hdf.get("Y_test"))
unit_devarray = np.array(A_dev[:, 0], dtype=np.int32)
unit_testarray = np.array(A_test[:, 0], dtype=np.int32)
self.unit_devarray = unit_devarray
self.unit_testarray = unit_testarray
self.unit_devunique = list(np.unique(unit_devarray))
self.unit_testunique = list(np.unique(unit_testarray))
print(self.unit_testunique)
print(self.unit_devunique)
self.tftrain_ds = get_dataset(fpath, [], augmentPhy, 1)
self.tftest_ds = get_dataset(fpath, [], augmentPhy, 0)
self.updateTable()
def LoadRealTimeClicked(self):
suffix = "test"
fpath = "./data/N-CMAPSS_DS02-006.h5"
with h5py.File(fpath, 'r') as hdf:
W_in = np.array(hdf.get("W_" + suffix))
X_s_in = np.array(hdf.get("X_s_" + suffix))
X_v_in = np.array(hdf.get("X_v_" + suffix))
T_in = np.array(hdf.get("T_" + suffix))
fig, ax = plt.subplots(4, 4)
fig.suptitle("Real Time Sensor Measurements", fontsize=20)
fig.text(0.5, 0.04, 'Time (seconds)', ha='center', fontsize=20)
fig.text(0.04, 0.5, 'Sensor readings (units)', va='center', rotation='vertical', fontsize=20)
ypred_test = self.ypred_test
for t in range(10000):
W_in_curr = np.array(W_in[t:t + 50, :])
X_s_in_curr = np.array(X_s_in[t:t + 50, :])
X_v_in_curr = np.array(X_v_in[t:t + 50, :])
T_in_curr = np.array(T_in[t:t + 50])
Ypred_in = np.array(ypred_test[t:t + 50])
ax[0, 0].set_title("Altitude (ft)")
ax[0, 0].plot(W_in_curr[:, 0])
ax[0, 0].set_xticklabels([])
ax[0, 1].set_title("Mach Number (-)")
ax[0, 1].plot(W_in_curr[:, 1])
ax[0, 1].set_xticklabels([])
ax[0, 2].set_title("Throttle-Resolver Angle (%)")
ax[0, 2].plot(W_in_curr[:, 2])
ax[0, 2].set_xticklabels([])
ax[0, 3].set_title("Temp. Fan Inlet (R)")
ax[0, 3].plot(W_in_curr[:, 3])
ax[0, 3].set_xticklabels([])
ax[1, 0].set_title("Fuel Flow (pps)")
ax[1, 0].plot(X_s_in_curr[:, 0])
ax[1, 0].set_xticklabels([])
ax[1, 1].set_title("Fan Speed (rpm)")
ax[1, 1].plot(X_s_in_curr[:, 1])
ax[1, 1].set_xticklabels([])
ax[1, 2].set_title("Core Speed (rpm)")
ax[1, 2].plot(X_s_in_curr[:, 2])
ax[1, 2].set_xticklabels([])
ax[1, 3].set_title("LPC outlet Temp (R)")
ax[1, 3].plot(X_s_in_curr[:, 3])
ax[1, 3].set_xticklabels([])
ax[2, 0].set_title("HPC outlet Temp (R)")
ax[2, 0].plot(X_s_in_curr[:, 4])
ax[2, 0].set_xticklabels([])
ax[2, 1].set_title("HPT outlet Temp (R)")
ax[2, 1].plot(X_s_in_curr[:, 5])
ax[2, 1].set_xticklabels([])
ax[2, 2].set_title("LPT outlet Temp (R)")
ax[2, 2].plot(X_s_in_curr[:, 6])
ax[2, 2].set_xticklabels([])
ax[2, 3].set_title("Bypass-duct Pressure (psia)")
ax[2, 3].plot(X_s_in_curr[:, 7])
ax[2, 3].set_xticklabels([])
ax[3, 0].set_title("Fan outlet Pressure (psia)")
ax[3, 0].plot(X_s_in_curr[:, 8])
ax[3, 1].set_title("LPC outlet Pressure (psia)")
ax[3, 1].plot(X_s_in_curr[:, 9])
ax[3, 2].set_title("HPC outlet Pressure (psia)")
ax[3, 2].plot(X_s_in_curr[:, 10])
ax[3, 3].set_title("RUL (cycles) current = " + "{0:.2f}".format(Ypred_in[-1]))
if (Ypred_in[0] > 15):
ax[3, 3].plot(Ypred_in, "g")
else:
ax[3, 3].plot(Ypred_in, "r")
plt.pause(0.00001)
# mng = plt.get_current_fig_manager()
# mng.window.showMaximized()
for i in range(4):
for j in range(4):
ax[i, j].cla()
plt.show()
return
for i in list_i:
y = np.random.random()
plt.plot(i)
plt.pause(2)
plt.cla()
plt.show()
def TrainInsightsClicked(self):
fig = Figure()
df_in = pd.read_csv("./losses/lossTrend.csv")
plt.title("Loss Trend For Training", fontsize=20)
plt.plot(list(df_in["Epochs"]), list(df_in["Loss"]))
plt.xlabel("Epochs", fontsize=20)
plt.ylabel("Loss", fontsize=20)
plt.show()
def __rmse__(self, pred, true):
return np.sqrt(np.mean((pred - true) ** 2))
def __nasafn__(self, pred, true):
sum_in = 0
for i in range(len(pred)):
if pred[i] < true[i]:
sum_in += np.exp((1 / 13) * (np.abs(pred[i] - true[i])))
else:
sum_in += np.exp((1 / 10) * (np.abs(pred[i] - true[i])))
return sum_in / (10 ** 6)
def TrainTestEvalClicked(self):
units = self.unit_devunique + self.unit_testunique
arrys = list(self.unit_devarray) + list(self.unit_testarray)
indexes = [arrys.index(x) - i * 50 for i, x in
enumerate(list(set(self.unit_devarray)) + list(set(self.unit_testarray)))]
indexes = indexes + [len(arrys) - len(units) * 50]
print(indexes)
self.indexes = indexes
c = 0
ypred_train = np.loadtxt("./output/y_predtrain.out")
ypred_test = np.loadtxt("./output/y_predtest.out")
ytrue_train = np.loadtxt("./output/y_truetrain.out")
ytrue_test = np.loadtxt("./output/y_truetest.out")
ytrue = list(ytrue_train) + list(ytrue_test)
ypred = list(ypred_train) + list(ypred_test)
self.ytrue_test = list(ytrue_test)
self.ypred_test = list(ypred_test)
# ytrue = list(self.y_dev) + list(self.y_test)
rmse_train = self.__rmse__(ypred_train, ytrue_train)
nasa_train = self.__nasafn__(ypred_train, ypred_train)
rmse_test = self.__rmse__(ypred_test, ytrue_test)
nasa_test = self.__nasafn__(ypred_test, ytrue_test)
fig, ax = plt.subplots(3, 3)
fig.suptitle("Unit Wise RUL Analysis", fontsize=20)
fig.text(0.5, 0.04, 'Time (seconds)', ha='center', fontsize=20)
fig.text(0.04, 0.5, 'RUL (cycles)', va='center', rotation='vertical', fontsize=20)
rmses = []
nasas = []
for i in range(3):
for j in range(3):
if c > 5:
title = "Unit " + str(units[c]) + " (Test) "
else:
title = "Unit " + str(units[c]) + " (Train) "
ax[i, j].set_title(title)
ax[i, j].plot(ytrue[indexes[c]:indexes[c + 1]], label="True", c="Green")
ax[i, j].plot(ypred[indexes[c]:indexes[c + 1]], label="Predicted", c="Blue")
ax[i, j].legend()
ax[i, j].ticklabel_format(scilimits=(0, 5), useOffset=True)
rmses.append(self.__rmse__(np.array(ypred[indexes[c]:indexes[c + 1]]),
np.array(ytrue[indexes[c]:indexes[c + 1]])))
nasas.append(self.__nasafn__(np.array(ypred[indexes[c]:indexes[c + 1]]),
np.array(ytrue[indexes[c]:indexes[c + 1]])))
c += 1
self.rmse_final = list(rmses[:3]) + [rmse_test] + list(rmses[3:]) + [rmse_train]
self.nasa_final = list(nasas[:3]) + [nasa_test] + list(nasas[3:]) + [nasa_train]
for i in range(len(units) + 2):
self.tableWidget.setItem(i, 2, QtWidgets.QTableWidgetItem("{0:.2f}".format(self.rmse_final[i])))
self.tableWidget.setItem(i, 3, QtWidgets.QTableWidgetItem("{0:.2f}".format(self.nasa_final[i])))
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
X = pd.DataFrame()
X["RMSE"] = self.rmse_final
X["Nasa"] = self.nasa_final
X.to_csv("./losses/Evals.csv", index=False)
def DeploymentClicked(self):
units = self.unit_testunique
arrys = list(self.unit_testarray)
indexes = [arrys.index(x) - i * 50 for i, x in enumerate(list(set(self.unit_testarray)))]
indexes.append(len(arrys) - len(units) * 50)
fig, ax = plt.subplots(3, 1)
fig.suptitle("Deployment Analysis of Test Units", fontsize=20)
fig.text(0.5, 0.04, 'Cycles', ha='center', fontsize=20)
fig.text(0.04, 0.5, 'RUL Deviation (cycles)', va='center', rotation='vertical', fontsize=20)
ypred_test = np.loadtxt("./output/y_predtest.out")
ytrue_test = np.loadtxt("./output/y_truetest.out")
self.ypred_test = list(ypred_test)
c = 0
deploy_cycle = []
for i in range(3):
true_in = ytrue_test[indexes[c]:indexes[c + 1]]
pred_in = ypred_test[indexes[c]:indexes[c + 1]]
inds = [list(true_in).index(x) for x in ((set(list(true_in))))]
inds.append(len(true_in))
uniques = list(set(true_in))[::-1]
ycap = []
y = []
# t = 0
err = []
err_max = []
err_min = []
for j in uniques:
arr_p = pred_in[true_in == j]
y_max = np.max(arr_p)
y_min = np.min(arr_p)
ycap_in = np.mean(arr_p)
y_in = j
err.append(y_in - ycap_in)
err_max.append(y_in - y_max)
err_min.append(y_in - y_min)
y.append(y_in)
ycap.append(ycap_in)
cycle_in = 0
for k in range(len(err) - 1):
if np.abs(err[-k - 1]) > 8:
cycle_in = len(y) - k
break
deploy_cycle.append(cycle_in)
ax[i].set_title("Unit " + str(units[c]))
ax[i].plot(err, 'bo-')
ax[i].plot(err_max, 'g-')
ax[i].plot(err_min, 'g-')
ax[i].axhline(y=8, color='r', linestyle='--')
ax[i].axhline(y=-8, color='r', linestyle='--')
ax[i].axvline(x=cycle_in, color='black', linestyle='--')
c += 1
for p, d in enumerate(deploy_cycle):
self.tableWidget.setItem(p, 4, QtWidgets.QTableWidgetItem(str(d)))
self.tableWidget.setItem(len(deploy_cycle), 4,
QtWidgets.QTableWidgetItem("{0:.2f}".format(np.mean(deploy_cycle))))
# mng = plt.get_current_fig_manager()
# mng.window.showMaximized()
plt.show()
def ExitClicked(self):
exit()
def InferClicked(self):
if self.radioButton.isChecked():
Architecture = "BaseCNN"
elif self.radioButton_2.isChecked():
Architecture = "CNN-LSTM"
elif self.radioButton_3.isChecked():
Architecture = "CNN-2LSTM"
InferModel(self.tftest_ds, Architecture + str(self.attrs), 0)
InferModel(self.tftrain_ds, Architecture + str(self.attrs), 1)
def updateTable(self):
self.tableWidget.setColumnCount(5)
self.tableWidget.setRowCount(len(self.unit_devunique) + len(self.unit_testunique) + 2)
row_in = 0
for i in range(len(self.unit_testunique)):
self.tableWidget.setItem(row_in, 0, QtWidgets.QTableWidgetItem(str(self.unit_testunique[i])))
chkBoxItem = QtWidgets.QTableWidgetItem()
chkBoxItem.setFlags(QtCore.Qt.ItemIsEnabled)
chkBoxItem.setCheckState(QtCore.Qt.Unchecked)
self.tableWidget.setItem(row_in, 1, chkBoxItem)
row_in += 1
self.tableWidget.setItem(row_in, 0, QtWidgets.QTableWidgetItem("TestUnits"))
self.tableWidget.setItem(row_in, 1, QtWidgets.QTableWidgetItem("---"))
row_in += 1
for i in range(len(self.unit_devunique)):
self.tableWidget.setItem(row_in, 0, QtWidgets.QTableWidgetItem(str(self.unit_devunique[i])))
chkBoxItem = QtWidgets.QTableWidgetItem()
chkBoxItem.setFlags(QtCore.Qt.ItemIsEnabled)
chkBoxItem.setCheckState(QtCore.Qt.Checked)
self.tableWidget.setItem(row_in, 1, chkBoxItem)
row_in += 1
self.tableWidget.setItem(row_in, 0, QtWidgets.QTableWidgetItem("TrainUnits"))
self.tableWidget.setItem(row_in, 1, QtWidgets.QTableWidgetItem("---"))
def TrainClicked(self):
if self.radioButton.isChecked():
Architecture = "BaseCNN"
elif self.radioButton_2.isChecked():
Architecture = "CNN-LSTM"
elif self.radioButton_3.isChecked():
Architecture = "CNN-2LSTM"
if self.AugmentPhysics_3.checkState():
resume_train = 1
else:
resume_train = 0
self.arch = Architecture
params = {"Epochs": int(self.EpochsHolder.toPlainText()), "BatchSize": int(self.BatchSizeHolder.toPlainText()),
"LearningRate": float(self.LRHolder.toPlainText()),
"Dropout": float(self.DropoutHolder.toPlainText()), "Architecture": Architecture,
"BatchNorm": self.AugmentPhysics_2.checkState(), "Attrs": self.attrs, "ResumeTraining": resume_train}
print(params)
trainModel(self.tftrain_ds, params)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
TrainTestSelect = QtWidgets.QDialog()
ui = Ui_TrainTestSelect()
ui.setupUi(TrainTestSelect)
TrainTestSelect.show()
sys.exit(app.exec_())
|
{"hexsha": "43e5211ba7ea9f86a1499a9e735c86e9b742a4a4", "size": 27633, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/TF-gui/PHM.py", "max_stars_repo_name": "jeetsagar/turbojet", "max_stars_repo_head_hexsha": "9b17edde0a7e01d0fa320261fbc2734ce53577d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/TF-gui/PHM.py", "max_issues_repo_name": "jeetsagar/turbojet", "max_issues_repo_head_hexsha": "9b17edde0a7e01d0fa320261fbc2734ce53577d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/TF-gui/PHM.py", "max_forks_repo_name": "jeetsagar/turbojet", "max_forks_repo_head_hexsha": "9b17edde0a7e01d0fa320261fbc2734ce53577d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-20T05:47:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-24T07:44:37.000Z", "avg_line_length": 43.7231012658, "max_line_length": 195, "alphanum_fraction": 0.6023956863, "include": true, "reason": "import numpy", "num_tokens": 6845}
|
/**
* @file cpp_ptr.cpp
* @author Maximilian Harr <maximilian.harr@daimler.com>
* @date 21.11.2016
*
* @brief Investigation of smart pointers.
* Smart pointers mimic "normal" pointers (by means of operator overloading),
* but furthermore provide additional memory management features (deletion, ...).
* They are intended to reduce bugs caused by misuse of pointers.
* https://en.wikipedia.org/wiki/Smart_pointer
* https://msdn.microsoft.com/de-de/library/hh279676.aspx
*
* Coding Standard:
* wiki.ros.org/CppStyleGuide
* https://google.github.io/styleguide/cppguide.html
*
*
* @bug
*
*
* @todo
*
*
*/
// PRAGMA
// SYSTEM INCLUDES
#include <iostream> /* Defines the standard input/output stream objects */
#include <cstdlib> /* Defines several general purpose functions */
#include <memory> /* Defines smart pointers (unique_ptr, shared_ptr, ...) */
#include <boost/scoped_ptr.hpp>
// PROJECT INCLUDES
// LOCAL INCLUDES
// FORWARD REFERENCES
// FUNCTION PROTOTYPES
using namespace std;
class myData{
public:
int x,y;
myData(){};
};
//// MAIN //////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char* argv[])
{
// RAW POINTER
/* This is a standard C++ pointer which has to be explictly deleted.
* Other than eg. a class instance (which can be pointed to) which automatically gets deleted */
int* r_ptr0 = new int (5); // raw pointer (has to be deleted)
*r_ptr0 = 5;
delete r_ptr0;
// SMART POINTERS
/* See below: unique_prt, shared_ptr, weak_ptr, scoped_ptr
* When to use smart pointers: https://stackoverflow.com/questions/6675651/when-should-i-use-raw-pointers-over-smart-pointers
*/
// UNIQUE POINTER (introduced in C++11)
/* Properties:
* - prevents copying of its contained pointer
* - std::move can transfer ownership to another unique_ptr
* - Cannot be copied (copy constructor and assignment operators explicitly deleted)
* - ONLY ONE OWNER PER UNIQUE_PTR is allowed
*/
/* Create unique int-pointer */
unique_ptr<int> ptr0 = make_unique<int>();
ptr0 = make_unique<int>(4); // set element to 4. Also works with *ptr0 = 4;
int a1 = 1;
int *p1, *p2;
/* Store address of int a1 in pointer p1 works. Can be copied in pointer p2 */
p1 = &a1;
p2 = p1;
/* The address of ptr0 cannot be copied > error. But the data can be copied in int a1 */
// p2 = ptr0; // compile error
a1 = *ptr0;
printf("Value of c1: %d \n",*p2);
printf("Value of ptr0: %d \n", *ptr0);
/* Ownership of contained pointer (ptr0) can be moved to another unique_ptr (ptr1) */
unique_ptr<int> ptr1 = std::move(ptr0);
printf("Value of ptr1: %d \n", *ptr1);
/* unique_ptr can also be used for a class or array */
unique_ptr<myData> my_array = make_unique<myData>();
unique_ptr<int[]> my_int = make_unique<int[]>(5);
for (unsigned int iter_i = 1; iter_i < 5; iter_i++){
my_int[iter_i] = iter_i;
}
printf("Int-value %d \n",my_int[3]);
/* Release ownership of ptr0
* unique_ptr gives up ownership of raw pointer and WILL NOT be delted therefore
* get() instead of release() and does not give up ownership */
int* p = ptr0.release();
delete p; // deletion required as ownership has been released to raw pointer
// SHARED POINTER
/* Properties:
* - counting ownership of its contained pointer
* - will be destroyed when all copies of the shared_ptr have been destroyed
*/
int* val = new int (5); // necessary to use new to ensure that shared_ptr can free memory and is not done automatically
std::shared_ptr<int> s_ptr_do_not_do_this( val ); // operator new allocates dynamic memory ( http://www.cplusplus.com/doc/tutorial/dynamic/ )
std::shared_ptr<int> s_ptr_do_not_do_this2( val );
s_ptr_do_not_do_this.reset(); // s_ptr = nullptr; does the same
// now s_ptr_do_not_do_this2 is deleted !
// USE MAKE_SHARED ! [ https://www.youtube.com/watch?v=qUDAkDvoLas&spfreload=1 ]
/* Note: Raw pointer (as val above) should not be used and then linked to shared_ptr
* as anouther shared_ptr may remove the object. Faster and safer is: */
std::shared_ptr<int> s_ptr0 = make_shared<int>(5);
std::shared_ptr<int> s_ptr1 = s_ptr0; //Both now own the memory.
*s_ptr0 = 10;
std::cout << *s_ptr1 << std::endl;
/* Caution: Not initializing with new results in double free or corruption error
* Since shared_ptr frees memory but p also frees memory. Initialize with NEW !
* int p[5];
* std::shared_ptr<int> a (p);
*/
/* Using custom deleters for shared_ptr */
shared_ptr<int> s_ptr2 (new int[3]); // s_ptr2 points to int[0], so int[1] and int[2] will NOT be deleted!
shared_ptr<int> s_ptr3 (new int[3],
[](int* p){delete[] p;}); // lambda function destructor of shared_ptr
/* Get raw pointer of shared_ptr */
int* raw_ptr = s_ptr2.get(); // ONLY do this if you know what you are doing ! unique_ptr has release() instead of get()
// WEAK POINTER
/* Properties:
* - copy of a shared_ptr
* - existence / destruction of weak_ptr have no effect on the shared_ptr
* - After copies of shared_ptr have been destroyed, all weak_ptr copies become empty.
*/
std::weak_ptr<int> w_ptr = s_ptr0; // s_ptr0 still owns the memory
/* Weak pointer is invalid when all instances of shared_ptr on val are resetted */
s_ptr0.reset();
std::cout << "weak_pointer: " << std::endl << w_ptr.expired() << std::endl;
s_ptr1.reset();
std::cout << "weak_pointer: " << std::endl << w_ptr.expired() << std::endl;
// BOOST SCOPED POINTER (https://stackoverflow.com/questions/106508/what-is-a-smart-pointer-and-when-should-i-use-one)
/* Properties:
* - Can not be copied (because it would prevend it from beeing deleted)
* - Pass by reference to other functions is possible
*/
int* bs_val = new int (111);
{
boost::scoped_ptr<int> bs_ptr(bs_val);
std::cout << "bs_val: " << *bs_val << std::endl;
} /* boost::scopted_ptr goes out of scope --
* bs_val is destroyed */
// std::cout << "bs_val: " << *bs_val << std::endl; // undefined behavior, bs_value is 0 or random number
return 0;
}
//// FUNCTION DEFINITIONS //////////////////////////////////////////////////////////////////////////
|
{"hexsha": "258bcdef124a38c0b3badd3dacfdcb17e1735d9a", "size": 6286, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cpp/cpp_stdlib_boost/src/cpp_ptr.cpp", "max_stars_repo_name": "maximilianharr/code_snippets", "max_stars_repo_head_hexsha": "8b271e6fa9174e24200e88be59e417abd5f2f59a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cpp/cpp_stdlib_boost/src/cpp_ptr.cpp", "max_issues_repo_name": "maximilianharr/code_snippets", "max_issues_repo_head_hexsha": "8b271e6fa9174e24200e88be59e417abd5f2f59a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cpp/cpp_stdlib_boost/src/cpp_ptr.cpp", "max_forks_repo_name": "maximilianharr/code_snippets", "max_forks_repo_head_hexsha": "8b271e6fa9174e24200e88be59e417abd5f2f59a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7159090909, "max_line_length": 143, "alphanum_fraction": 0.6597200127, "num_tokens": 1656}
|
from src.utils.fft.fft import fft
from src.utils.fft.ifft import ifft
from tools import mirror, halve
import numpy as np
import pandas as pd
# Calculates the spectral derivative from x
def compute_spectral_derivative(x, dt, mirroring=True):
"""
x (DataFrame): State measurements
dt (Float): Sampling period
"""
# if isinstance(x, pd.DataFrame):
x = np.array(x)
if mirroring:
x = mirror(x)
omega, x_hat = fft(x, dt)
dxdt_hat = 1j * omega * x_hat # Fourier image of the derivative of x
dxdt = np.real(ifft(dxdt_hat))
if mirroring:
dxdt = halve(dxdt)
return dxdt
|
{"hexsha": "152e220279229708ff50df95a3ab02c8b2c06b02", "size": 632, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils/differentiation/spectral_derivative.py", "max_stars_repo_name": "BystrickyK/SINDy", "max_stars_repo_head_hexsha": "f5b887d230079ffd60eacfe0221b47d1c288342e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-12T18:22:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-12T18:22:54.000Z", "max_issues_repo_path": "src/utils/differentiation/spectral_derivative.py", "max_issues_repo_name": "BystrickyK/SINDy", "max_issues_repo_head_hexsha": "f5b887d230079ffd60eacfe0221b47d1c288342e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-03-03T14:34:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-13T14:03:08.000Z", "max_forks_repo_path": "src/utils/differentiation/spectral_derivative.py", "max_forks_repo_name": "BystrickyK/SINDy", "max_forks_repo_head_hexsha": "f5b887d230079ffd60eacfe0221b47d1c288342e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7931034483, "max_line_length": 73, "alphanum_fraction": 0.6598101266, "include": true, "reason": "import numpy", "num_tokens": 169}
|
import os
import sys
import time
import math
import threading
from ctypes import *
from typing import List
import cv2
import numpy as np
import xir
import vart
from utils import preprocess_one_image_fn
from resnet_thread import ResNetThread
global THREAD_NUM
THREAD_NUM = 1
def get_child_subgraph_dpu(graph: "Graph") -> List["Subgraph"]:
""""Obtain DPU subgrah."""
assert graph is not None, "'graph' should not be None."
root_subgraph = graph.get_root_subgraph()
assert (
root_subgraph is not None
), "Failed to get root subgraph of input Graph object."
if root_subgraph.is_leaf:
return []
child_subgraphs = root_subgraph.toposort_child_subgraph()
assert child_subgraphs is not None and len(child_subgraphs) > 0
return [
cs
for cs in child_subgraphs
if cs.has_attr("device") and cs.get_attr("device").upper() == "DPU"
]
def main(argv):
global THREAD_NUM
if len(argv) >= 2:
THREAD_NUM = int(argv[1])
if len(argv) >= 3:
model_file = argv[2]
image_dir = "./images/"
model_file = "/usr/share/vitis_ai_library/models/resnet50/resnet50.xmodel"
# deserialized to the Graph object.
g = xir.Graph.deserialize(model_file)
# Get the subgraph that run in dpu
subgraphs = get_child_subgraph_dpu(g)
assert len(subgraphs) == 1 # only one DPU kernel
# Create DPU runner
all_dpu_runners = []
for i in range(int(THREAD_NUM)):
all_dpu_runners.append(vart.Runner.create_runner(subgraphs[0], "run"))
# input data
list_images = os.listdir(image_dir)
images = list(map(preprocess_one_image_fn,
[os.path.join(image_dir, image) for image in list_images]))
"""cnt variable
The cnt variable is used to control the number of times a single-thread DPU runs.
Users can modify the value according to actual needs. It is not recommended to use
too small number when there are few input images, for example:
1. If users can only provide very few images, e.g. only 1 image, they should set
a relatively large number such as 360 to measure the average performance;
2. If users provide a huge dataset, e.g. 50000 images in the directory, they can
use the variable to control the test time, and no need to run the whole dataset.
"""
cnt = 360
threads = []
time_start = time.time()
for i in range(THREAD_NUM):
threads.append(ResNetThread(
all_dpu_runners[i], images, cnt, f"thread_{i}"))
threads[i].start()
for thread in threads:
thread.join()
del all_dpu_runners
time_end = time.time()
time_total = time_end - time_start
total_frames = cnt * THREAD_NUM
fps = float(total_frames / time_total)
print(f"FPS={fps:.2f}, \
total frames={total_frames:.2f}, \
time={time_total:.6f} seconds")
if __name__ == "__main__":
if len(sys.argv) != 3:
thread_num = 1
argv = []
if len(sys.argv) == 2:
thread_num = sys.argv[1]
argv = sys.argv
print("usage : python3 main.py <thread_number> <resnet50_xmodel_file>")
print(f"use case: \
\n\tthread_number={thread_num}, \
\n\tresnet50_xmodel_file=/usr/share/vitis_ai_library/models/resnet50/resnet50.xmodel")
main(argv)
else:
print(f"use case: \
\n\tthread_number={sys.argv[1]}, \
\n\tresnet50_xmodel_file={sys.argv[2]}")
main(sys.argv)
|
{"hexsha": "7e68195b768b639822012e0e845cbbe0e73a7128", "size": 3579, "ext": "py", "lang": "Python", "max_stars_repo_path": "AIoT/Vitis-AI/VART/example/resnet50_py/main.py", "max_stars_repo_name": "kaka-lin/ML-Notes", "max_stars_repo_head_hexsha": "047b88d59346b2ec719b1b3e2fcd605e1ccfaf91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "AIoT/Vitis-AI/VART/example/resnet50_py/main.py", "max_issues_repo_name": "kaka-lin/ML-Notes", "max_issues_repo_head_hexsha": "047b88d59346b2ec719b1b3e2fcd605e1ccfaf91", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "AIoT/Vitis-AI/VART/example/resnet50_py/main.py", "max_forks_repo_name": "kaka-lin/ML-Notes", "max_forks_repo_head_hexsha": "047b88d59346b2ec719b1b3e2fcd605e1ccfaf91", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0975609756, "max_line_length": 106, "alphanum_fraction": 0.6415199776, "include": true, "reason": "import numpy", "num_tokens": 907}
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Contains building blocks for normalization layers."""
import collections
from jax import lax
from jax import random
from jax.example_libraries import stax
import jax.numpy as np
from oryx.core import kwargs_util
from oryx.experimental.nn import base
__all__ = [
'BatchNorm',
]
class BatchNormParams(collections.namedtuple(
'BatchNormParams', ('beta', 'gamma'))):
pass
class BatchNormState(collections.namedtuple(
'BatchNormState',
('moving_mean', 'moving_var'))):
pass
class BatchNormInfo(collections.namedtuple(
'BatchNormInfo',
('axis', 'epsilon', 'center', 'scale', 'decay', 'shape'))):
pass
class BatchNorm(base.Layer):
"""Layer for Batch Normalization."""
@classmethod
def initialize(cls, key, in_spec, axis=(0, 1), momentum=0.99,
epsilon=1e-5, center=True, scale=True,
beta_init=stax.zeros, gamma_init=stax.ones):
in_shape = in_spec.shape
axis = (axis,) if np.isscalar(axis) else axis
decay = 1.0 - momentum
shape = tuple(d for i, d in enumerate(in_shape) if i not in axis)
moving_shape = tuple(1 if i in axis else d for i, d in enumerate(in_shape))
k1, k2, k3, k4 = random.split(key, 4)
beta = base.create_parameter(k1, shape, init=beta_init) if center else ()
gamma = base.create_parameter(k2, shape, init=gamma_init) if scale else ()
moving_mean = base.create_parameter(k3, moving_shape, init=stax.zeros)
moving_var = base.create_parameter(k4, moving_shape, init=stax.ones)
params = BatchNormParams(beta, gamma)
info = BatchNormInfo(axis, epsilon, center, scale, decay, in_shape)
state = BatchNormState(moving_mean, moving_var)
return base.LayerParams(params, info, state)
@classmethod
def spec(cls, in_spec, axis=(0, 1), momentum=0.99,
epsilon=1e-5, center=True, scale=True,
beta_init=stax.zeros, gamma_init=stax.ones):
return in_spec
def _call_and_update_batched(self, *args, has_rng=False, **kwargs):
if has_rng:
rng, args = args[0], args[1:]
kwargs = dict(kwargs, rng=rng)
call_kwargs = kwargs_util.filter_kwargs(self._call_batched, kwargs)
update_kwargs = kwargs_util.filter_kwargs(self._update_batched, kwargs)
layer = self.replace(state=lax.stop_gradient(self.state))
return (layer._call_batched(*args, **call_kwargs), # pylint: disable=protected-access
layer._update_batched(*args, **update_kwargs)) # pylint: disable=protected-access
def _call(self, x, training=True):
if len(x.shape) != len(self.info.shape):
raise ValueError('Need to `jax.vmap` in order to batch')
if training:
# BatchNorm on a single example while training=True is a no-op
# The tracer will pass through this and hand off to _call_batched
return x
return self._call_batched(x[np.newaxis], training=False)[0]
def _call_batched(self, x, training=True):
params, info, state = self.params, self.info, self.state
beta, gamma = params.beta, params.gamma
axis = (0,) + tuple(a + 1 for a in info.axis)
epsilon, center, scale = info.epsilon, info.center, info.scale
ed = tuple(None if i in axis else slice(None) for i in range(np.ndim(x)))
if center:
beta = beta[ed]
if scale:
gamma = gamma[ed]
if training:
mean = np.mean(x, axis, keepdims=True)
var = np.mean(x**2, axis, keepdims=True) - mean**2
else:
mean, var = state.moving_mean, state.moving_var
z = (x - mean) / np.sqrt(var + epsilon)
if center and scale:
output = gamma * z + beta
elif center:
output = z + beta
elif scale:
output = gamma * z
else:
output = z
return output
def _update(self, x):
return self._update_axis(x, self.info.axis)
def _update_batched(self, x):
axis = self.info.axis
axis_diff = np.ndim(x) - len(self.info.shape)
axis = tuple(range(axis_diff)) + tuple(a + axis_diff for a in axis)
return self._update_axis(x, axis)
def _update_axis(self, x, axis):
info, state = self.info, self.state
decay = info.decay
mean = np.mean(x, axis, keepdims=True)
var = np.mean(x**2, axis, keepdims=True) - mean**2
mean, var = mean[0], var[0]
moving_mean, moving_var = state.moving_mean, state.moving_var
moving_mean -= (moving_mean - mean) * decay
moving_var -= (moving_var - var) * decay
new_state = BatchNormState(moving_mean, moving_var)
return self.replace(state=new_state)
|
{"hexsha": "640aeb7eada9945fd631e8ad6e015939b0ec3eb1", "size": 5146, "ext": "py", "lang": "Python", "max_stars_repo_path": "spinoffs/oryx/oryx/experimental/nn/normalization.py", "max_stars_repo_name": "jakee417/probability-1", "max_stars_repo_head_hexsha": "ae7117f37ac441bc7a888167ea23e5e620c5bcde", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3670, "max_stars_repo_stars_event_min_datetime": "2018-02-14T03:29:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T01:19:52.000Z", "max_issues_repo_path": "spinoffs/oryx/oryx/experimental/nn/normalization.py", "max_issues_repo_name": "jakee417/probability-1", "max_issues_repo_head_hexsha": "ae7117f37ac441bc7a888167ea23e5e620c5bcde", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1395, "max_issues_repo_issues_event_min_datetime": "2018-02-24T02:28:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:12:06.000Z", "max_forks_repo_path": "spinoffs/oryx/oryx/experimental/nn/normalization.py", "max_forks_repo_name": "jakee417/probability-1", "max_forks_repo_head_hexsha": "ae7117f37ac441bc7a888167ea23e5e620c5bcde", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1135, "max_forks_repo_forks_event_min_datetime": "2018-02-14T01:51:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T02:24:11.000Z", "avg_line_length": 35.7361111111, "max_line_length": 94, "alphanum_fraction": 0.6737271667, "include": true, "reason": "import jax,from jax", "num_tokens": 1337}
|
/* LOOT
A load order optimisation tool for
Morrowind, Oblivion, Skyrim, Skyrim Special Edition, Skyrim VR,
Fallout 3, Fallout: New Vegas, Fallout 4 and Fallout 4 VR.
Copyright (C) 2014 WrinklyNinja
This file is part of LOOT.
LOOT is free software: you can redistribute
it and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
LOOT is distributed in the hope that it will
be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with LOOT. If not, see
<https://www.gnu.org/licenses/>.
*/
#include "gui/state/loot_state.h"
#include <unordered_set>
#ifdef _WIN32
#ifndef UNICODE
#define UNICODE
#endif
#ifndef _UNICODE
#define _UNICODE
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#endif
#include <spdlog/sinks/basic_file_sink.h>
#include <boost/algorithm/string.hpp>
#include <boost/format.hpp>
#include <boost/locale.hpp>
#include "gui/helpers.h"
#include "gui/state/game/game_detection_error.h"
#include "gui/state/logging.h"
#include "gui/state/loot_paths.h"
#include "gui/version.h"
#include "loot/api.h"
using boost::format;
using boost::locale::translate;
using std::exception;
using std::locale;
using std::lock_guard;
using std::mutex;
using std::string;
using std::vector;
namespace fs = std::filesystem;
namespace loot {
void apiLogCallback(LogLevel level, const char* message) {
auto logger = getLogger();
if (!logger) {
return;
}
switch (level) {
case LogLevel::trace:
logger->trace(message);
break;
case LogLevel::debug:
logger->debug(message);
break;
case LogLevel::info:
logger->info(message);
break;
case LogLevel::warning:
logger->warn(message);
break;
case LogLevel::error:
logger->error(message);
break;
case LogLevel::fatal:
logger->critical(message);
break;
default:
logger->trace(message);
break;
}
}
LootState::LootState(const std::filesystem::path& lootAppPath,
const std::filesystem::path& lootDataPath) :
LootPaths(lootAppPath, lootDataPath) {}
void LootState::init(const std::string& cmdLineGame, bool autoSort) {
if (autoSort && cmdLineGame.empty()) {
initErrors_.push_back(translate(
"Error: --auto-sort was passed but no --game parameter was provided."));
} else {
setAutoSort(autoSort);
}
// Do some preliminary locale / UTF-8 support setup here, in case the settings
// file reading requires it.
// Boost.Locale initialisation: Specify location of language dictionaries.
boost::locale::generator gen;
gen.add_messages_path(LootPaths::getL10nPath().u8string());
gen.add_messages_domain("loot");
// Boost.Locale initialisation: Generate and imbue locales.
locale::global(gen("en.UTF-8"));
// Check if the LOOT local app data folder exists, and create it if not.
if (!fs::exists(LootPaths::getLootDataPath())) {
try {
fs::create_directory(LootPaths::getLootDataPath());
} catch (exception& e) {
initErrors_.push_back(
(format(
translate("Error: Could not create LOOT settings file. %1%")) %
e.what())
.str());
}
}
// Initialise logging.
fs::remove(LootPaths::getLogPath());
setLogPath(LootPaths::getLogPath());
SetLoggingCallback(apiLogCallback);
// Load settings.
if (fs::exists(LootPaths::getSettingsPath())) {
try {
LootSettings::load(LootPaths::getSettingsPath(), LootPaths::getLootDataPath());
} catch (exception& e) {
initErrors_.push_back(
(format(translate("Error: Settings parsing failed. %1%")) % e.what())
.str());
}
}
// Apply debug logging settings.
enableDebugLogging(isDebugLoggingEnabled());
// Log some useful info.
auto logger = getLogger();
if (logger) {
logger->info(
"LOOT Version: {}+{}", gui::Version::string(), gui::Version::revision);
logger->info("LOOT API Version: {}+{}",
LootVersion::GetVersionString(),
LootVersion::revision);
}
#ifdef _WIN32
// Check if LOOT is being run through Mod Organiser.
bool runFromMO = GetModuleHandle(ToWinWide("hook.dll").c_str()) != NULL;
if (runFromMO && logger) {
logger->info("LOOT is being run through Mod Organiser.");
}
#endif
// Now that settings have been loaded, set the locale again to handle
// translations.
if (getLanguage() != MessageContent::defaultLanguage) {
if (logger) {
logger->debug("Initialising language settings.");
logger->debug("Selected language: {}", getLanguage());
}
// Boost.Locale initialisation: Generate and imbue locales.
locale::global(gen(getLanguage() + ".UTF-8"));
}
// Detect games & select startup game
//-----------------------------------
// Detect installed games.
if (logger) {
logger->debug("Detecting installed games.");
}
LoadInstalledGames(getGameSettings(), LootPaths::getLootDataPath());
try {
SetInitialGame(cmdLineGame);
if (logger) {
logger->debug("Game selected is {}", GetCurrentGame().Name());
}
} catch (std::exception& e) {
if (logger) {
logger->error("Game-specific settings could not be initialised: {}",
e.what());
}
initErrors_.push_back(
(format(translate(
"Error: Game-specific settings could not be initialised. %1%")) %
e.what())
.str());
}
}
const std::vector<std::string>& LootState::getInitErrors() const {
return initErrors_;
}
void LootState::save(const std::filesystem::path& file) {
try {
storeLastGame(GetCurrentGame().FolderName());
} catch (std::runtime_error& e) {
auto logger = getLogger();
if (logger) {
logger->error("Couldn't set last game: {}", e.what());
}
}
updateLastVersion();
LootSettings::save(file);
}
std::optional<std::filesystem::path> LootState::FindGamePath(const GameSettings& gameSettings) const {
return gameSettings.FindGamePath();
}
void LootState::InitialiseGameData(gui::Game& game) {
game.Init();
}
void LootState::SetInitialGame(std::string preferredGame) {
if (preferredGame.empty()) {
// Get preferred game from settings.
if (getGame() != "auto")
preferredGame = getGame();
else if (getLastGame() != "auto")
preferredGame = getLastGame();
}
if (!preferredGame.empty()) {
SetCurrentGame(preferredGame);
return;
}
auto firstInstalledGame = GetFirstInstalledGameFolderName();
if (!firstInstalledGame.has_value()) {
// No games installed, throw an exception.
throw GameDetectionError("None of the supported games were detected.");
}
SetCurrentGame(firstInstalledGame.value());
}
void LootState::storeGameSettings(std::vector<GameSettings> gameSettings) {
lock_guard<mutex> guard(mutex_);
gameSettings = LoadInstalledGames(gameSettings, LootPaths::getLootDataPath());
LootSettings::storeGameSettings(gameSettings);
}
}
|
{"hexsha": "b05d61ab13fd7b83b02c5adb2aca1071efffa183", "size": 7332, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "examples/shared/src/gui/state/loot_state.cpp", "max_stars_repo_name": "haifengkao/HugeLoot", "max_stars_repo_head_hexsha": "ef30c828d4fffc55a1fdedebaea8cb843f4e32cb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/shared/src/gui/state/loot_state.cpp", "max_issues_repo_name": "haifengkao/HugeLoot", "max_issues_repo_head_hexsha": "ef30c828d4fffc55a1fdedebaea8cb843f4e32cb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/shared/src/gui/state/loot_state.cpp", "max_forks_repo_name": "haifengkao/HugeLoot", "max_forks_repo_head_hexsha": "ef30c828d4fffc55a1fdedebaea8cb843f4e32cb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7727272727, "max_line_length": 102, "alphanum_fraction": 0.663938898, "num_tokens": 1773}
|
import numpy as np
import pdb
def sum_product_p(uscores, bscores, umargs, bmargs):
"""Apply the sum-product algorithm on a chain
:param uscores: array T*K, (unary) scores on individual nodes
:param bscores: array (T-1)*K*K, (binary) scores on the edges
:return: log-marginals on nodes, log-marginals on edges, log-partition
"""
def logsumexp(arr, axis=None):
themax = np.max(arr, axis=axis, keepdims=True)
out = np.sum(np.exp(arr - themax), axis=axis)
out = themax.flatten() + np.log(out)
return out
# I keep track of the islog messages instead of the messages
# This is more stable numerically
#
length, nb_class = uscores.shape
if length == 1:
log_partition = logsumexp(uscores[0])
umargs[0] = uscores[0] - log_partition
bmargs = np.zeros([length - 1, nb_class, nb_class])
return 0
bm = np.zeros([length - 1, nb_class]) # backward_messages
fm = np.zeros([length - 1, nb_class]) # forward_messages
# backward pass
bm[-1] = logsumexp(bscores[-1] + uscores[-1], axis=-1)
for t in range(length - 3, -1, -1):
bm[t] = logsumexp(bscores[t] + uscores[t + 1] + bm[t + 1], axis=-1)
# we compute the log-partition and include it in the forward messages
log_partition = logsumexp(bm[0] + uscores[0])
# forward pass
fm[0] = logsumexp(bscores[0].T + uscores[0] - log_partition, axis=-1)
for t in range(1, length - 1):
fm[t] = logsumexp(bscores[t].T + uscores[t] + fm[t - 1], axis=-1)
# unary marginals
# umargs = np.empty([length, nb_class])
umargs[0] = uscores[0] + bm[0] - log_partition
umargs[-1] = fm[-1] + uscores[-1]
for t in range(1, length - 1):
umargs[t] = fm[t - 1] + uscores[t] + bm[t]
# binary marginals
# bmargs = np.empty([length - 1, nb_class, nb_class])
if length == 2:
# pdb.set_trace()
bmargs[0] = uscores[0, :, np.newaxis] + bscores[0] + uscores[1] - log_partition
else:
bmargs[0] = uscores[0, :, np.newaxis] + bscores[0] + uscores[1] + bm[1] - log_partition
bmargs[-1] = fm[-2, :, np.newaxis] + uscores[-2, :, np.newaxis] + bscores[-1] + uscores[-1]
for t in range(1, length - 2):
bmargs[t] = fm[t - 1, :, np.newaxis] + uscores[t, :, np.newaxis] + bscores[t] + \
uscores[t + 1] + bm[t + 1]
# pdb.set_trace()
def viterbi_p(score, trans_score, path):
"""First-order Viterbi algorithm.
Parameters
----------
score : array, shape = (n_samples, n_states)
Scores per sample/class combination; in a linear model, X * w.T.
May be overwritten.
trans_score : array, shape = (n_samples, n_states, n_states), optional
Scores per sample/transition combination.
References
----------
L. R. Rabiner (1989). A tutorial on hidden Markov models and selected
applications in speech recognition. Proc. IEEE 77(2):257-286.
"""
n_samples, n_states = score.shape[0], score.shape[1]
backp = np.zeros((n_samples, n_states), dtype=np.intc)
# Forward recursion. score is reused as the DP table.
for i in range(1, n_samples):
for k in range(n_states):
maxind = 0
maxval = -np.inf
for j in range(n_states):
candidate = score[i - 1, j] + score[i, k] + trans_score[j, k]
# candidate = score[i - 1, j] + score[i, k] + trans_score[j, k]
if candidate > maxval:
maxind = j
maxval = candidate
score[i, k] = maxval
backp[i, k] = maxind
# Path backtracking
# path = np.empty(n_samples, dtype=np.intc)
themax = score[n_samples - 1, 0]
# path[n_samples - 1] = score[n_samples - 1, :].argmax()
path[n_samples - 1] = 0
# compute the argmax
for i in range(n_states):
if themax < score[n_samples - 1, i]:
path[n_samples - 1] = i
themax = score[n_samples - 1, i]
for i in range(n_samples - 2, -1, -1):
path[i] = backp[i + 1, path[i + 1]]
|
{"hexsha": "c2de769f95247bfc8965bbfc3f4537849880d78c", "size": 4139, "ext": "py", "lang": "Python", "max_stars_repo_path": "struntho/inference/sum_product_chain.py", "max_stars_repo_name": "alexnowakvila/maxminloss", "max_stars_repo_head_hexsha": "15c45da5b8c4c214ba2aa596931aff998e3f1c92", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-07-28T12:13:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T10:35:10.000Z", "max_issues_repo_path": "struntho/inference/sum_product_chain.py", "max_issues_repo_name": "alexnowakvila/maxminloss", "max_issues_repo_head_hexsha": "15c45da5b8c4c214ba2aa596931aff998e3f1c92", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-12T15:10:19.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-12T15:10:19.000Z", "max_forks_repo_path": "struntho/inference/sum_product_chain.py", "max_forks_repo_name": "alexnowakvila/maxminloss", "max_forks_repo_head_hexsha": "15c45da5b8c4c214ba2aa596931aff998e3f1c92", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-10-05T16:48:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T13:59:24.000Z", "avg_line_length": 36.6283185841, "max_line_length": 99, "alphanum_fraction": 0.5750181203, "include": true, "reason": "import numpy", "num_tokens": 1258}
|
# -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev"
__date__ = "1 Oct 2015"
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
def plot_NOM_2D(fname):
xL, yL, zL = np.loadtxt(fname+'.dat', unpack=True)
nX = (yL == yL[0]).sum()
nY = (xL == xL[0]).sum()
x = xL[:nX]
y = yL[::nX]
print(nX, nY)
z = zL.reshape((nY, nX))
zmax = abs(z).max()
print(z.shape)
# print(zmax)
fig = plt.figure(figsize=(16, 8))
rect_2D = [0.1, 0.1, 0.72, 0.6]
rect_1Dx = [0.1, 0.72, 0.72, 0.26]
rect_1Dy = [0.83, 0.1, 0.13, 0.6]
extent = [x[0], x[-1], y[0], y[-1]]
ax2D = plt.axes(rect_2D)
ax2D.set_xlabel('x (mm)')
ax2D.set_ylabel('y (mm)')
ax2D.imshow(
z, aspect='auto', cmap='jet', extent=extent,
# interpolation='nearest',
interpolation='none',
origin='lower', figure=fig)
ax1Dx = plt.axes(rect_1Dx, sharex=ax2D)
ax1Dy = plt.axes(rect_1Dy, sharey=ax2D)
ax1Dx.set_ylabel('h (nm)')
ax1Dy.set_xlabel('h (nm)')
plt.setp(ax1Dx.get_xticklabels() + ax1Dy.get_yticklabels(),
visible=False)
# ax1Dx.plot(x, x*0, 'gray')
kl, = ax1Dx.plot(x, z.sum(axis=0)/nY, 'k')
ax1Dx.plot(x, z[0, :], 'r')
ax1Dx.plot(x, z[nY//2, :], 'g')
ax1Dx.plot(x, z[nY-1, :], 'b')
ax1Dx.legend([kl], ['average over y'], loc='upper left', frameon=False)
# ax1Dy.plot(y*0, y, 'gray')
ax1Dy.plot(z.sum(axis=1)/nX, y, 'k')
ax1Dy.plot(z[:, 0], y, 'y')
ax1Dy.plot(z[:, nX//2], y, 'c')
ax1Dy.plot(z[:, nX-1], y, 'm')
ax2D.set_xlim(extent[0], extent[1])
ax2D.set_ylim(extent[2], extent[3])
ax1Dx.set_ylim(-zmax, zmax)
ax1Dy.set_xlim(-zmax, zmax)
ax2D.annotate('', (0, 0), (-0.03, 0), size=10,
xycoords="axes fraction",
arrowprops=dict(alpha=1, fc='r', ec='r', headwidth=10,
frac=0.4))
ax2D.annotate('', (0, 0.5), (-0.03, 0.5), size=10,
xycoords="axes fraction",
arrowprops=dict(alpha=1, fc='g', ec='g', headwidth=10,
frac=0.4))
ax2D.annotate('', (0, 1), (-0.03, 1), size=10,
xycoords="axes fraction",
arrowprops=dict(alpha=1, fc='b', ec='b', headwidth=10,
frac=0.4))
ax2D.annotate('', (0, 0), (0, -0.06), size=10,
xycoords="axes fraction",
arrowprops=dict(alpha=1, fc='y', ec='y', headwidth=10,
frac=0.4))
ax2D.annotate('', (0.5, 0), (0.5, -0.06), size=10,
xycoords="axes fraction",
arrowprops=dict(alpha=1, fc='c', ec='c', headwidth=10,
frac=0.4))
ax2D.annotate('', (1, 0), (1, -0.06), size=10,
xycoords="axes fraction",
arrowprops=dict(alpha=1, fc='m', ec='m', headwidth=10,
frac=0.4))
b, a = np.gradient(z)
dx = x[1] - x[0]
dy = y[1] - y[0]
a /= dx
b /= dy
rmsA = ((a**2).sum() / (nX * nY))**0.5
rmsB = ((b**2).sum() / (nX * nY))**0.5
aveZ = z.sum() / (nX * nY)
fig.text(0.91, 0.95,
u'rms slope errors:\ndz/dx = {0:.2f} µrad\n'
u'dz/dy = {1:.2f} µrad\n\n'
u'mean figure error:\n<z> = {2:.2f} pm\n'
.format(rmsA, rmsB, aveZ*1e3),
transform=fig.transFigure, size=12, color='r', ha='center',
va='top')
# this is the way how the surface is used in ray-tracing/wave-propagation:
# the hight and the directions are spline-interpolated (the spline coefficients
# are pre-calculated) and then the spline piece-wise polinomials are used to
# reconstruct the height and the two directions and arbitrary (x, y) points.
splineZ = ndimage.spline_filter(z.T)
splineA = ndimage.spline_filter(a.T)
splineB = ndimage.spline_filter(b.T)
nrays = 1e3
xnew = np.random.uniform(x[0], x[-1], nrays)
ynew = np.random.uniform(y[0], y[-1], nrays)
coords = np.array([(xnew-x[0]) / (x[-1]-x[0]) * (nX-1),
(ynew-y[0]) / (y[-1]-y[0]) * (nY-1)])
znew = ndimage.map_coordinates(splineZ, coords, prefilter=True)
anew = ndimage.map_coordinates(splineA, coords, prefilter=True)
bnew = ndimage.map_coordinates(splineB, coords, prefilter=True)
ax2D.scatter(xnew, ynew, c=znew, marker='o', color='gray', s=50,
cmap='jet')
ax2D.quiver(xnew, ynew, -anew, -bnew, edgecolor='gray', color='gray',
# headaxislength=5,
scale=200, lw=0.2)
fig.savefig(fname+'.png')
plt.show()
def plot_NOM_3D(fname):
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
xL, yL, zL = np.loadtxt(fname+'.dat', unpack=True)
nX = (yL == yL[0]).sum()
nY = (xL == xL[0]).sum()
x = xL.reshape((nY, nX))
y = yL.reshape((nY, nX))
z = zL.reshape((nY, nX))
x1D = xL[:nX]
y1D = yL[::nX]
# z += z[::-1, :]
zmax = abs(z).max()
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False, alpha=0.5)
ax.set_zlim(-zmax, zmax)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
splineZ = ndimage.spline_filter(z.T)
nrays = 1e3
xnew = np.random.uniform(x1D[0], x1D[-1], nrays)
ynew = np.random.uniform(y1D[0], y1D[-1], nrays)
coords = np.array([(xnew-x1D[0]) / (x1D[-1]-x1D[0]) * (nX-1),
(ynew-y1D[0]) / (y1D[-1]-y1D[0]) * (nY-1)])
znew = ndimage.map_coordinates(splineZ, coords, prefilter=True)
ax.scatter(xnew, ynew, znew, c=znew, marker='o', color='gray', s=50,
cmap=cm.coolwarm)
fig.savefig(fname+'_3d.png')
plt.show()
if __name__ == '__main__':
fname = 'mock_surface'
plot_NOM_2D(fname)
# plot_NOM_3D(fname)
|
{"hexsha": "c4fb74084e7d2299ee994e7017e3deeae468633a", "size": 6163, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/raycing/read_NOM_maps.py", "max_stars_repo_name": "adinatan/xrt", "max_stars_repo_head_hexsha": "75b884c0cba7e1aac15b30f2d0d803597328a208", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 71, "max_stars_repo_stars_event_min_datetime": "2016-07-04T06:40:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:03:07.000Z", "max_issues_repo_path": "tests/raycing/read_NOM_maps.py", "max_issues_repo_name": "adinatan/xrt", "max_issues_repo_head_hexsha": "75b884c0cba7e1aac15b30f2d0d803597328a208", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 99, "max_issues_repo_issues_event_min_datetime": "2016-07-10T16:39:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T07:50:10.000Z", "max_forks_repo_path": "tests/raycing/read_NOM_maps.py", "max_forks_repo_name": "adinatan/xrt", "max_forks_repo_head_hexsha": "75b884c0cba7e1aac15b30f2d0d803597328a208", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2016-07-08T17:12:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T06:54:21.000Z", "avg_line_length": 35.2171428571, "max_line_length": 79, "alphanum_fraction": 0.5365893234, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2161}
|
module MechGluecode
using Requires
export value, ODE_DEFAULT_NORM, UNITLESS_ABS2, Unitfu, norm
function __init__()
@require MechanicalUnits = "e6be9192-89dc-11e9-36e6-5dbcb28f419e" begin
@require Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" begin
@info "Plots => using MechGluePlots"
@eval using MechGluePlots
end
@require Interpolations = "a98d9a8b-a2ab-59e6-89dd-64a1c18fca59" begin
@info "Interpolations => using MechGlueInterpolations"
@eval using MechGlueInterpolations
end
@require ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" begin
#@info "ModelingToolkit => using MechGlueModelingToolkit"
#@eval using MechGlueModelingToolkit
end
@require RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" begin
# @info "RecursiveArrayTools => using MechGlueRecursiveArrayTools"
# @eval using MechGlueDiffEqBase
# @info "RecursiveArrayTools => using MechGlueRecursiveArrayTools"
# @eval using MechGlueRecursiveArrayTools
end
@require DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" begin
@info "DiffEqBase => using MechGlueDiffEqBase"
@eval using MechGlueDiffEqBase
end
end
@info "MechGluecode init"
end
end
|
{"hexsha": "43819f0c49777cca7d12347fce949c1c747c74db", "size": 1363, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/MechGluecode.jl", "max_stars_repo_name": "hustf/MechGluecode.jl", "max_stars_repo_head_hexsha": "e631d09f13428c70d46d2a3b903e1c6936b30c43", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/MechGluecode.jl", "max_issues_repo_name": "hustf/MechGluecode.jl", "max_issues_repo_head_hexsha": "e631d09f13428c70d46d2a3b903e1c6936b30c43", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MechGluecode.jl", "max_forks_repo_name": "hustf/MechGluecode.jl", "max_forks_repo_head_hexsha": "e631d09f13428c70d46d2a3b903e1c6936b30c43", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.303030303, "max_line_length": 83, "alphanum_fraction": 0.677916361, "num_tokens": 428}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import os
import pytest
import numpy as np
from matrixprofile.algorithms.stomp import stomp
def test_stomp_window_size_less_than_4():
ts = np.array([1, 2, 3, 4, 5, 6, 7, 8])
w = 2
with pytest.raises(ValueError) as excinfo:
stomp(ts, w)
assert 'window size must be at least 4.' in str(excinfo.value)
def test_stomp_window_size_too_small():
ts = np.array([1, 2, 3, 4, 5, 6, 7, 8])
w = 8
with pytest.raises(ValueError) as excinfo:
stomp(ts, w)
assert 'Time series is too short' in str(excinfo.value)
def test_stomp_small_series_self_join_single_threaded():
ts = np.array([0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0])
w = 4
desired = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])
desired_pi = np.array([4, 5, 6, 7, 0, 1, 2, 3, 0])
desired_lmp = np.array([np.inf, np.inf, np.inf, 4, 2.82842712, 0, 0, 0, 0])
desired_lpi = np.array([0, 0, 0, 1, 1, 1, 2, 3, 4])
desired_rmp = np.array([0, 0, 0, 0, 0, 2.82842712, np.inf, np.inf, np.inf])
desired_rpi = np.array([4, 5, 6, 7, 8, 8, 0, 0, 0])
profile = stomp(ts, w, n_jobs=1)
np.testing.assert_almost_equal(profile['mp'], desired)
np.testing.assert_almost_equal(profile['pi'], desired_pi)
np.testing.assert_almost_equal(profile['lmp'], desired_lmp)
np.testing.assert_almost_equal(profile['lpi'], desired_lpi)
np.testing.assert_almost_equal(profile['rmp'], desired_rmp)
np.testing.assert_almost_equal(profile['rpi'], desired_rpi)
def test_stomp_small_series_self_join_multi_threaded():
ts = np.array([0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0])
w = 4
desired = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])
desired_pi = np.array([4, 5, 6, 7, 0, 1, 2, 3, 0])
desired_lmp = np.array([np.inf, np.inf, np.inf, 4, 2.82842712, 0, 0, 0, 0])
desired_lpi = np.array([0, 0, 0, 1, 1, 1, 2, 3, 4])
desired_rmp = np.array([0, 0, 0, 0, 0, 2.82842712, np.inf, np.inf, np.inf])
desired_rpi = np.array([4, 5, 6, 7, 8, 8, 0, 0, 0])
profile = stomp(ts, w, n_jobs=-1)
np.testing.assert_almost_equal(profile['mp'], desired)
np.testing.assert_almost_equal(profile['pi'], desired_pi)
np.testing.assert_almost_equal(profile['lmp'], desired_lmp)
np.testing.assert_almost_equal(profile['lpi'], desired_lpi)
np.testing.assert_almost_equal(profile['rmp'], desired_rmp)
np.testing.assert_almost_equal(profile['rpi'], desired_rpi)
|
{"hexsha": "24abe958f075c7152dee35a73299f3c67f437846", "size": 2692, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_stomp.py", "max_stars_repo_name": "KSaiRahul21/matrixprofile", "max_stars_repo_head_hexsha": "d8250e30d90ed0453bb7c35bb34ab0c04ae7b334", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_stomp.py", "max_issues_repo_name": "KSaiRahul21/matrixprofile", "max_issues_repo_head_hexsha": "d8250e30d90ed0453bb7c35bb34ab0c04ae7b334", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_stomp.py", "max_forks_repo_name": "KSaiRahul21/matrixprofile", "max_forks_repo_head_hexsha": "d8250e30d90ed0453bb7c35bb34ab0c04ae7b334", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-10T19:15:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-10T19:15:17.000Z", "avg_line_length": 32.4337349398, "max_line_length": 79, "alphanum_fraction": 0.6500742942, "include": true, "reason": "import numpy", "num_tokens": 985}
|
"""analyze.py: Runs the FF-trEFM Analysis for a set of given files."""
__author__ = "Rajiv Giridharagopal"
__copyright__ = "Copyright 2019, Ginger Lab"
__maintainer__ = "Rajiv Giridharagopal"
__email__ = "rgiri@uw.edu"
__status__ = "Development"
import os
import sys
import time
import multiprocessing
import logging
import argparse as ap
import numpy as np
import ffta.line as line
from .pixel_utils import load
from .pixel_utils import badpixels
# Plotting imports
import matplotlib as mpl
# mpl.use('WxAgg')
from matplotlib import pyplot as plt
from matplotlib import gridspec as gs
def process_line(args):
"""
Wrapper function for line class, used in parallel processing.
:param args:
:type args:
:returns: tuple (tfp, shift)
WHERE
[type] tfp is...
[type] shift is...
"""
signal_file, params, n_pixels = args
signal_array = load.signal(signal_file)
line_inst = line.Line(signal_array, params, n_pixels)
tfp, shift, _ = line_inst.analyze()
return tfp, shift
def main(argv=None):
"""
Main function of the executable file.
:param argv:
:type argv:
"""
logging.basicConfig(filename='error.log', level=logging.INFO)
# Get the CPU count to display in help.
cpu_count = multiprocessing.cpu_count()
if argv is None:
argv = sys.argv[1:]
# Parse arguments from the command line, and print out help.
parser = ap.ArgumentParser(description='Analysis software for FF-trEFM')
parser.add_argument('path', nargs='?', default=os.getcwd(),
help='path to directory')
parser.add_argument('-p', help='parallel computing option should be'
'followed by the number of CPUs.', type=int,
choices=range(2, cpu_count + 1))
parser.add_argument('-v', action='version',
version='FFtr-EFM 2.0 Release Candidate')
args = parser.parse_args(argv)
# Scan the path for .ibw and .cfg files.
path = args.path
filelist = os.listdir(path)
data_files = [os.path.join(path, name)
for name in filelist if name[-3:] == 'ibw']
config_file = [os.path.join(path, name)
for name in filelist if name[-3:] == 'cfg'][0]
# Load parameters from .cfg file.
n_pixels, parameters = load.configuration(config_file)
print('Recombination: ', parameters['recombination'])
if 'phase_fitting' in parameters:
print('Phase fitting: ', parameters['phase_fitting'])
print('ROI: ', parameters['roi'])
if not args.p:
# Initialize arrays.
tfp = np.zeros((len(data_files), n_pixels))
shift = np.zeros((len(data_files), n_pixels))
# Initialize plotting.
plt.ion()
fig = plt.figure(figsize=(12, 6), tight_layout=True)
grid = gs.GridSpec(1, 2)
tfp_ax = plt.subplot(grid[0, 0])
shift_ax = plt.subplot(grid[0, 1])
plt.setp(tfp_ax.get_xticklabels(), visible=False)
plt.setp(tfp_ax.get_yticklabels(), visible=False)
plt.setp(shift_ax.get_xticklabels(), visible=False)
plt.setp(shift_ax.get_yticklabels(), visible=False)
tfp_ax.set_title('tFP Image')
shift_ax.set_title('Shift Image')
kwargs = {'origin': 'lower', 'aspect': 'equal'}
tfp_image = tfp_ax.imshow(tfp * 1e6, cmap='afmhot', **kwargs)
shift_image = shift_ax.imshow(shift, cmap='cubehelix', **kwargs)
text = plt.figtext(0.4, 0.1, '')
plt.show()
# Load every file in the file list one by one.
for i, data_file in enumerate(data_files):
signal_array = load.signal(data_file)
line_inst = line.Line(signal_array, parameters, n_pixels)
tfp[i, :], shift[i, :], _ = line_inst.analyze()
# line_inst = line.Line(signal_array, parameters, n_pixels,fitphase=True)
# tfpphase[i, :], _, _ = line_inst.analyze()
tfp_image = tfp_ax.imshow(tfp * 1e6, cmap='inferno', **kwargs)
shift_image = shift_ax.imshow(shift, cmap='cubehelix', **kwargs)
tfp_sc = tfp[tfp.nonzero()] * 1e6
tfp_image.set_clim(vmin=tfp_sc.min(), vmax=tfp_sc.max())
shift_sc = shift[shift.nonzero()]
shift_image.set_clim(vmin=shift_sc.min(), vmax=shift_sc.max())
tfpmean = 1e6 * tfp[i, :].mean()
tfpstd = 1e6 * tfp[i, :].std()
string = ("Line {0:.0f}, average tFP (us) ="
" {1:.2f} +/- {2:.2f}".format(i + 1, tfpmean, tfpstd))
text.remove()
text = plt.figtext(0.35, 0.1, string)
plt.draw()
plt.pause(0.0001)
del line_inst # Delete the instance to open up memory.
elif args.p:
print('Starting parallel processing, using {0:1d} \
CPUs.'.format(args.p))
start_time = time.time() # Keep when it's started.
# Create a pool of workers.
pool = multiprocessing.Pool(processes=args.p)
# Create the iterable and map onto the function.
n_files = len(data_files)
iterable = zip(data_files, [parameters] * n_files,
[n_pixels] * n_files)
result = pool.map(process_line, iterable)
# Do not forget to close spawned processes.
pool.close()
pool.join()
# Unzip the result.
tfp_list, shift_list = zip(*result)
# Initialize arrays.
tfp = np.zeros((n_files, n_pixels))
shift = np.zeros((n_files, n_pixels))
# Convert list of arrays to 2D array.
for i in range(n_files):
tfp[i, :] = tfp_list[i]
shift[i, :] = shift_list[i]
elapsed_time = time.time() - start_time
print('It took {0:.1f} seconds.'.format(elapsed_time))
# Filter bad pixels
tfp_fixed, _ = badpixels.fix_array(tfp, threshold=2)
tfp_fixed = np.array(tfp_fixed)
# Save csv files.
os.chdir(path)
np.savetxt('tfp.csv', np.fliplr(tfp).T, delimiter=',')
np.savetxt('shift.csv', np.fliplr(shift).T, delimiter=',')
np.savetxt('tfp_fixed.csv', np.fliplr(tfp_fixed).T, delimiter=',')
return
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{"hexsha": "9991cbe97dd49127289d4e8e36658ae42dceda6e", "size": 6327, "ext": "py", "lang": "Python", "max_stars_repo_path": "ffta/_legacy_functions/analyze.py", "max_stars_repo_name": "GingerLabUW/FFTA", "max_stars_repo_head_hexsha": "576591d6ba23731c26f7dfa90591e94795f1b288", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-05T17:36:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-05T19:37:00.000Z", "max_issues_repo_path": "ffta/_legacy_functions/analyze.py", "max_issues_repo_name": "GingerLabUW/FFTA", "max_issues_repo_head_hexsha": "576591d6ba23731c26f7dfa90591e94795f1b288", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-05-08T05:41:18.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-13T01:44:30.000Z", "max_forks_repo_path": "ffta/_legacy_functions/analyze.py", "max_forks_repo_name": "GingerLabUW/FFTA", "max_forks_repo_head_hexsha": "576591d6ba23731c26f7dfa90591e94795f1b288", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-05T19:20:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-19T20:05:57.000Z", "avg_line_length": 31.0147058824, "max_line_length": 96, "alphanum_fraction": 0.6009167062, "include": true, "reason": "import numpy", "num_tokens": 1609}
|
module POMDPPolicies
using LinearAlgebra
using Random
using StatsBase # for Weights
using SparseArrays # for sparse vectors in alpha_vector.jl
using Parameters
using Distributions # For logpdf extenstion in playback policy
using Printf
using POMDPs
import POMDPs: action, value, solve, updater
using BeliefUpdaters
using POMDPModelTools
using Base.Iterators # for take
"""
actionvalues(p::Policy, s)
returns the values of each action at state s in a vector
"""
function actionvalues end
export
actionvalues
export
AlphaVectorPolicy,
alphavectors,
alphapairs
include("alpha_vector.jl")
export
FunctionPolicy,
FunctionSolver
include("function.jl")
export
RandomPolicy,
RandomSolver
include("random.jl")
export
VectorPolicy,
VectorSolver,
ValuePolicy
include("vector.jl")
export
StochasticPolicy,
UniformRandomPolicy,
CategoricalTabularPolicy
include("stochastic.jl")
export LinearDecaySchedule,
EpsGreedyPolicy,
SoftmaxPolicy,
ExplorationPolicy,
loginfo
include("exploration_policies.jl")
export
PolicyWrapper,
payload
include("utility_wrapper.jl")
export
showpolicy
include("pretty_printing.jl")
export
PlaybackPolicy
include("playback.jl")
export
BlindPolicySolver
include("blind_policy.jl")
end
|
{"hexsha": "e798f8994467f4165fa14a9b565f5b6d9dca52a1", "size": 1332, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/POMDPPolicies.jl", "max_stars_repo_name": "Wu-Chenyang/POMDPPolicies.jl", "max_stars_repo_head_hexsha": "51a83b8193adac8a92afb83aa0e1abe987593f55", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/POMDPPolicies.jl", "max_issues_repo_name": "Wu-Chenyang/POMDPPolicies.jl", "max_issues_repo_head_hexsha": "51a83b8193adac8a92afb83aa0e1abe987593f55", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/POMDPPolicies.jl", "max_forks_repo_name": "Wu-Chenyang/POMDPPolicies.jl", "max_forks_repo_head_hexsha": "51a83b8193adac8a92afb83aa0e1abe987593f55", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.4782608696, "max_line_length": 62, "alphanum_fraction": 0.7492492492, "num_tokens": 330}
|
% !TEX root = ../main.tex
% = = = = = = = = = = = = = = = = = = = %
% Introduction %
% = = = = = = = = = = = = = = = = = = = %
\let\clearpage\relax
\chapter{Introduction}
\section{Figures}
\subsection{Single Figure}
\begin{figure}[!htp]
\centering
\includegraphics[scale=0.5]{example-image-a}
\caption{Figure-A.}
\label{fig:a}
\end{figure}
\subsection{Subfigures}
\begin{figure}[!htp]
\begin{minipage}{0.48\textwidth}
\centering
% include first image
\includegraphics[scale=0.5]{example-image-a}
\caption{Subfigure-A.}
\label{fig:sub_a}
\end{minipage}\hfill
\begin{minipage}{0.48\textwidth}
\centering
% include second image
\includegraphics[scale=0.5]{example-image-b}
\caption{Subfigure-B.}
\label{fig:sub_b}
\end{minipage}
\end{figure}
\section{Table}
\begin{table}[!htp]
\begin{center}
\caption{Table-1.}
\label{tab:tab-1}
\begin{tabular}{cc}
\toprule
Item & Value \\
\midrule
A & 1,000 \\
B & 2,000 \\
C & 3,000 \\
\bottomrule
\end{tabular}
\end{center}
\end{table}
\section{Citation}
Single document with available page number~\quickcite{kopka1995guide}.
Single document without available page number~\quickcitenopage{kottwitz2015latex}.
Two documents at the same time~\quickcitetwo{lamport1985i1}{kopka1995guide}.
\section{Source Code}
\subsection{Pseudocode}
\begin{algorithm}
\caption{Algorithm 1.}
\label{alg:alg-1}
\KwIn{integers $a,b$}
\KwOut{integers $c$}
\If {$a > b$} {
$c = a - b$ \;
\tcp{This is a comment.}
}
\Else {
$c = a + b$ \;
}
\end{algorithm}
\subsection{Source Code}
\begin{minted}[mathescape,
linenos,
numbersep=5pt,
gobble=2,
frame=lines,
framesep=1mm]{python}
def my_function():
print("Hello from a function")
\end{minted}
|
{"hexsha": "a342d63d3270e749a6e9972bf6467835847b77d5", "size": 2098, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "sections/2-intro.tex", "max_stars_repo_name": "Mars-tin/SJTUThesis", "max_stars_repo_head_hexsha": "189d447e0f43a9774727cc70655ce8821cd7215e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2021-07-28T03:00:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-21T20:49:58.000Z", "max_issues_repo_path": "sections/2-intro.tex", "max_issues_repo_name": "Mars-tin/SJTUThesis", "max_issues_repo_head_hexsha": "189d447e0f43a9774727cc70655ce8821cd7215e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sections/2-intro.tex", "max_forks_repo_name": "Mars-tin/SJTUThesis", "max_forks_repo_head_hexsha": "189d447e0f43a9774727cc70655ce8821cd7215e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-07-30T11:20:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-04T18:02:04.000Z", "avg_line_length": 22.5591397849, "max_line_length": 82, "alphanum_fraction": 0.5448045758, "num_tokens": 658}
|
# -*- coding: utf-8 -*-
#
#
# Created by: PyQt5 UI code generator 5.12.3
#
#FIB needle rotation calculator v0.1
#Written by Tao Ma, taoma@umich.edu
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import matplotlib
matplotlib.use('Qt5Agg')
#%from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
#from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import numpy as np
import matplotlib.pyplot as plt
import math
from matplotlib.widgets import Slider, Button
class Ui_geom_cal(QtWidgets.QMainWindow):
def __init__(self):
super(Ui_geom_cal,self).__init__()
self.setupUi(self)
self.retranslateUi(self)
def setupUi(self, GeomCal):
GeomCal.setObjectName("GeomCal")
GeomCal.resize(800, 233)
self.label = QtWidgets.QLabel(GeomCal)
self.label.setGeometry(QtCore.QRect(20, 10, 131, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(GeomCal)
self.label_2.setGeometry(QtCore.QRect(20, 40, 141, 20))
self.label_2.setObjectName("label_2")
self.lineEdit = QtWidgets.QLineEdit(GeomCal)
self.lineEdit.setGeometry(QtCore.QRect(170, 40, 31, 20))
self.lineEdit.setObjectName("lineEdit")
self.label_3 = QtWidgets.QLabel(GeomCal)
self.label_3.setGeometry(QtCore.QRect(290, 40, 451, 20))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(GeomCal)
self.label_4.setGeometry(QtCore.QRect(220, 40, 55, 20))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(GeomCal)
self.label_5.setGeometry(QtCore.QRect(20, 70, 140, 16))
self.label_5.setObjectName("label_5")
self.lineEdit_2 = QtWidgets.QLineEdit(GeomCal)
self.lineEdit_2.setGeometry(QtCore.QRect(170, 70, 31, 20))
self.lineEdit_2.setObjectName("lineEdit_2")
self.label_6 = QtWidgets.QLabel(GeomCal)
self.label_6.setGeometry(QtCore.QRect(220, 70, 55, 20))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(GeomCal)
self.label_7.setGeometry(QtCore.QRect(290, 70, 451, 20))
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(GeomCal)
self.label_8.setGeometry(QtCore.QRect(20, 100, 131, 16))
self.label_8.setObjectName("label_8")
self.lineEdit_3 = QtWidgets.QLineEdit(GeomCal)
self.lineEdit_3.setGeometry(QtCore.QRect(170, 100, 31, 20))
self.lineEdit_3.setObjectName("lineEdit_3")
self.label_9 = QtWidgets.QLabel(GeomCal)
self.label_9.setGeometry(QtCore.QRect(220, 100, 55, 20))
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(GeomCal)
self.label_10.setGeometry(QtCore.QRect(290, 100, 500, 20))
self.label_10.setObjectName("label_10")
self.label_11 = QtWidgets.QLabel(GeomCal)
self.label_11.setGeometry(QtCore.QRect(20, 130, 131, 16))
self.label_11.setObjectName("label_11")
self.lineEdit_4 = QtWidgets.QLineEdit(GeomCal)
self.lineEdit_4.setGeometry(QtCore.QRect(170, 130, 31, 20))
self.lineEdit_4.setObjectName("lineEdit_4")
self.label_12 = QtWidgets.QLabel(GeomCal)
self.label_12.setGeometry(QtCore.QRect(220, 130, 55, 20))
self.label_12.setObjectName("label_12")
self.label_13 = QtWidgets.QLabel(GeomCal)
self.label_13.setGeometry(QtCore.QRect(290, 130, 510, 20))
self.label_13.setObjectName("label_13")
self.pushButton_1 = QtWidgets.QPushButton(GeomCal)
self.pushButton_1.setGeometry(QtCore.QRect(20, 160, 131, 41))
self.pushButton_1.setObjectName("pushButton_2")
self.pushButton_2 = QtWidgets.QPushButton(GeomCal)
self.pushButton_2.setGeometry(QtCore.QRect(450, 160, 131, 41))
self.pushButton_2.setObjectName("pushButton_3")
self.label_14 = QtWidgets.QLabel(GeomCal)
self.label_14.setGeometry(QtCore.QRect(170, 160, 261, 41))
self.label_14.setObjectName("label_14")
self.label_15 = QtWidgets.QLabel(GeomCal)
self.label_15.setGeometry(QtCore.QRect(600, 160, 141, 41))
self.label_15.setObjectName("label_15")
self.label_19 = QtWidgets.QLabel(GeomCal)
self.label_19.setGeometry(QtCore.QRect(160, 210, 510, 16))
self.label_19.setObjectName("label_19")
self.retranslateUi(GeomCal)
QtCore.QMetaObject.connectSlotsByName(GeomCal)
#=======Connect all the functions=============================================
self.pushButton_1.clicked.connect(self.view_geom)
self.pushButton_2.clicked.connect(self.geom_free_rotation)
def retranslateUi(self, GeomCal):
_translate = QtCore.QCoreApplication.translate
GeomCal.setWindowTitle(_translate("geom_cal", "FIB Geometry Calculator v0.1"))
self.label.setText(_translate("geom_cal", "Geometry setting"))
self.label_2.setText(_translate("geom_cal", "Ion-beam offset angle"))
self.lineEdit.setText(_translate("geom_cal", "52"))
self.label_3.setText(_translate("geom_cal", "The angle between the ion beam and e-beam, usually 52 on TFS FIBs."))
self.label_4.setText(_translate("geom_cal", "Degrees"))
self.label_5.setText(_translate("geom_cal", "Needle insertion angle"))
self.lineEdit_2.setText(_translate("geom_cal", "45"))
self.label_6.setText(_translate("geom_cal", "Degrees"))
self.label_7.setText(_translate("geom_cal", "TFS Helios G4/PFIB: 45; TFS Helios G3/650 or older models: 50."))
self.label_8.setText(_translate("geom_cal", "Sample tilt"))
self.lineEdit_3.setText(_translate("geom_cal", "0"))
self.label_9.setText(_translate("geom_cal", "Degrees"))
self.label_10.setText(_translate("geom_cal", "Equal to the stage tilt when the lamella is welded and lifted out by the easyLift."))
self.label_11.setText(_translate("geom_cal", "Needle rotation angle"))
self.lineEdit_4.setText(_translate("geom_cal", "0"))
self.label_12.setText(_translate("geom_cal", "Degrees"))
self.label_13.setText(_translate("geom_cal", "Rotate the needle to see the geometry. Positive number rotates counterclockwise."))
self.pushButton_1.setText(_translate("geom_cal", "View"))
self.pushButton_2.setText(_translate("geom_cal", "Free rotation"))
self.label_14.setText(_translate("geom_cal", "View the geometry with the above settings."))
self.label_15.setText(_translate("geom_cal", "Rotate the needle live!"))
self.label_19.setText(_translate("geom_cal", "FIB geometry calculator v0.1 Released: 12/9/2021 by Dr. Tao Ma taoma@umich.edu"))
#=================== Define botton functions =================================
def view_geom(self):
#Set up the original geometry
N_theta = float(self.lineEdit_2.text()) #Needle insertion angle in degree. PFIB: 45; Helios 650/G3: 50
N_theta_r = math.radians(N_theta)
I_ang = float(self.lineEdit.text()) #Ion-beam offset angle from the e-beam
S_x, S_y = 10, 5 #A rectangle TEM lamella, 10 x 5 in arb. unit
Tilt = float(self.lineEdit_3.text())
#Sample tilt relative to the y-z plane, equivalent to the stage tilt
Tilt_r = math.radians(Tilt)
#Calculate the coordinates of the sample
A = (-S_y * math.sin(Tilt_r), 0, -S_y * math.cos(Tilt_r))
B = (-S_y * math.sin(Tilt_r), S_x, -S_y * math.cos(Tilt_r))
C = (0, S_x, 0)
l = 10 #Needle length in arb. unit
N_A = (0, 0, 0)#Needle point A at the origin
N_B = (0, -l*math.cos(N_theta_r), l*math.sin(N_theta_r))#Needle point B
N_a = float(self.lineEdit_4.text()) #Needle rotation angle
#Apply rotation for A, B, and C
A1 = coo_rot(A, N_a, N_theta)
B1 = coo_rot(B, N_a, N_theta)
C1 = coo_rot(C, N_a, N_theta)
title = 'Geometry after rotating the needle by {} degrees'.format(N_a)
plot_view(A1, B1, C1, N_A, N_B, title, I_ang)
plt.show()
def geom_free_rotation(self):
#Set up the original geometry
N_theta = float(self.lineEdit_2.text()) #Needle insertion angle in degree. PFIB: 45; Helios 650/G3: 50
N_theta_r = math.radians(N_theta)
I_ang = float(self.lineEdit.text()) #Ion-beam offset angle from the e-beam
S_x, S_y = 10, 5 #A rectangle TEM lamella, 10 x 5 in arb. unit
Tilt = float(self.lineEdit_3.text())
#Sample tilt relative to the y-z plane, equivalent to the stage tilt
Tilt_r = math.radians(Tilt)
#Calculate the coordinates of the sample
A = (-S_y * math.sin(Tilt_r), 0, -S_y * math.cos(Tilt_r))
B = (-S_y * math.sin(Tilt_r), S_x, -S_y * math.cos(Tilt_r))
C = (0, S_x, 0)
l = 10 #Needle length in arb. unit
N_A = (0, 0, 0)#Needle point A at the origin
N_B = (0, -l*math.cos(N_theta_r), l*math.sin(N_theta_r))#Needle point B
#Add a slider for easy viewing
# Define initial parameters
N_ini = 0
# Create the figure and the line that we will manipulate
title = 'Free needle rotation with the slider'
fig4, ax1, ax2, ax3, ax4, ax5, line1, line2, line3, line4, line5 = plot_view(A, B, C, N_A, N_B, title, I_ang)
# adjust the main plot to make room for the sliders
plt.subplots_adjust(bottom=0.25)
# Make a horizontal slider to control the frequency.
ax_ang = plt.axes([0.25, 0.1, 0.65, 0.03])
ang_slider = Slider(
ax=ax_ang,
label='Rotation (degree)',
valmin=0,
valmax=360,
valinit=N_ini,
)
# The function to be called anytime a slider's value changes
def update(val):
A1 = coo_rot(A, ang_slider.val, N_theta)
B1 = coo_rot(B, ang_slider.val, N_theta)
C1 = coo_rot(C, ang_slider.val, N_theta)
x1, y1, z1 = zip((0,0,0), A1, B1, C1, (0,0,0))
line1.set_data_3d(x1, y1, z1)
line2.set_data_3d(x1, y1, z1)
line3.set_data_3d(x1, y1, z1)
line4.set_data_3d(x1, y1, z1)
line5.set_data_3d(x1, y1, z1)
fig4.canvas.draw_idle()
# register the update function with the slider
ang_slider.on_changed(update)
# Create a `matplotlib.widgets.Button` to reset the sliders to initial values.
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', hovercolor='0.975')
def reset(event):
ang_slider.reset()
button.on_clicked(reset)
resetax._button = button #Create a dummy reference to hold the button variable
plt.show()
#==================== Modules and helper functions ===================================
#Define a coordinate calculation function for needle rotation
def coo_rot(A, n, N_theta): #A: Coordinate (x,y,z); n: Needle rotation anle in degree (counterclockwise)
#Define rotation matrix for needle rotation
#First, rotate the needle clockwise by 90-N_theta to align the z axis
r1 = math.radians(90 - N_theta) #Rotation angle for this operation
R1 = [[1, 0, 0],[0, math.cos(-r1), -math.sin(-r1)],[0,math.sin(-r1),math.cos(-r1)]]#Rotation matrix by counterclockwise by -r1
#Then, rotate along the z axis counterclockwise by a desired angle (Needle rotation angle)
r2 = math.radians(n)
R2 = [[math.cos(r2), -math.sin(r2), 0], [math.sin(r2), math.cos(r2), 0], [0, 0, 1]]
#Finally, rotate the needle counterclockwise by 90-N_theta back to the original direction
R3 = [[1, 0, 0],[0, math.cos(r1), -math.sin(r1)],[0, math.sin(r1), math.cos(r1)]]
#Apply the rotation
A1 = np.dot(R1,A)
A2 = np.dot(R2,A1)
A3 = np.dot(R3,A2)
return A3
#Define a function for easy plotting
def plot_view(A, B, C, N_A, N_B, title, I_ang):
fig = plt.figure(dpi=150) #Create 2 x 3 subplots
ax1 = fig.add_subplot(2, 3, 1, projection='3d', proj_type='ortho')
fig.suptitle(title)
x, y, z = zip(N_A, A, B, C, N_A)
#Geometry view
ax1.view_init(azim=10, elev=20)
ax1.set_box_aspect([1,1,1])
ax1.plot3D((N_A[0],N_B[0]),(N_A[1],N_B[1]),(N_A[2],N_B[2]),'b')
line1, = ax1.plot3D(x, y, z, 'r')
ax1.set_xlim(-8,8)
ax1.set_ylim(-3,13)
ax1.set_zlim(-11,5)
ax1.set_title('Geometry')
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.set_zticklabels([])
#Front view
ax2 = fig.add_subplot(2,3,2, projection='3d', proj_type='ortho')
line2, = ax2.plot3D(x, y, z, 'r')
ax2.view_init(azim=0, elev=0)
ax2.set_box_aspect([1,1,1])
ax2.plot3D((N_A[0],N_B[0]),(N_A[1],N_B[1]),(N_A[2],N_B[2]),'b')
ax2.set_xlim(-8,8)
ax2.set_ylim(-3,13)
ax2.set_zlim(-11,5)
ax2.set_title('Front View')
ax2.set_xticklabels([])
ax2.set_yticklabels([])
ax2.set_zticklabels([])
#Side view
ax3 = fig.add_subplot(2,3,3, projection='3d', proj_type='ortho')
line3, = ax3.plot3D(x, y, z, 'r')
ax3.view_init(azim=-90, elev=0)
ax3.set_box_aspect([1,1,1])
ax3.plot3D((N_A[0],N_B[0]),(N_A[1],N_B[1]),(N_A[2],N_B[2]),'b')
ax3.set_xlim(-8,8)
ax3.set_ylim(-3,13)
ax3.set_zlim(-11,5)
ax3.set_title('Side View')
ax3.set_xticklabels([])
ax3.set_yticklabels([])
ax3.set_zticklabels([])
#E-beam view
ax4 = fig.add_subplot(2,3,4, projection='3d', proj_type='ortho')
line4, = ax4.plot3D(x, y, z, 'r')
ax4.view_init(azim=0, elev=-90)
ax4.set_box_aspect([1,1,1])
ax4.plot3D((N_A[0],N_B[0]),(N_A[1],N_B[1]),(N_A[2],N_B[2]),'b')
ax4.set_xlim(-8,8)
ax4.set_ylim(-3,13)
ax4.set_zlim(5,-11)
ax4.set_title('E-beam View')
ax4.set_xticklabels([])
ax4.set_yticklabels([])
ax4.set_zticklabels([])
#Ion-beam View
ax5 = fig.add_subplot(2,3,5, projection='3d', proj_type='ortho')
line5, = ax5.plot3D(x, y, z, 'r')
ax5.view_init(azim=0, elev=-(90-I_ang))
ax5.set_box_aspect([1,1,1])
ax5.plot3D((N_A[0],N_B[0]),(N_A[1],N_B[1]),(N_A[2],N_B[2]),'b')
ax5.set_xlim(-8,8)
ax5.set_ylim(-3,13)
ax5.set_zlim(5,-11)
ax5.set_title('Ion-beam View')
ax5.set_xticklabels([])
ax5.set_yticklabels([])
ax5.set_zticklabels([])
return fig, ax1, ax2, ax3, ax4, ax5, line1, line2, line3, line4, line5
#====Application entry==================================
def main():
import sys
app = QtWidgets.QApplication(sys.argv)
GeomCal = QtWidgets.QWidget()
ui = Ui_geom_cal()
ui.setupUi(GeomCal)
GeomCal.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
{"hexsha": "d943988fc9125968cd565d276578ef839e607a47", "size": 15375, "ext": "py", "lang": "Python", "max_stars_repo_path": "Main/FIB_geom.py", "max_stars_repo_name": "matao1984/FIB-geom-calculator", "max_stars_repo_head_hexsha": "993de21a745d5398968bb5b7bebb3df884b60b9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Main/FIB_geom.py", "max_issues_repo_name": "matao1984/FIB-geom-calculator", "max_issues_repo_head_hexsha": "993de21a745d5398968bb5b7bebb3df884b60b9e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Main/FIB_geom.py", "max_forks_repo_name": "matao1984/FIB-geom-calculator", "max_forks_repo_head_hexsha": "993de21a745d5398968bb5b7bebb3df884b60b9e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5652173913, "max_line_length": 140, "alphanum_fraction": 0.6189918699, "include": true, "reason": "import numpy", "num_tokens": 4424}
|
#tbeg = time()
using LinearAlgebra
BLAS.set_num_threads(4)
using Flux, Statistics # Flux.Data.MNIST
using Flux: onehotbatch, onecold, crossentropy, throttle
using Base.Iterators: repeated, partition
using BSON, HDF5, JLD, Random
using MLDataUtils
using NPZ
include("genDataScripts.jl")
n_sites = 100
periods = [2,3,4,5,6,7,8,9,10,11]
all_signals = gen_signals(n_sites, periods)
neu = 100
#trainingSizes = [50, 100, 250, 500, 1000, 2000, 4000]
trainSizes = [50, 100, 250, 500, 1000, 2000, 4000]
sigmasAll = collect(0.05:0.05:4)
indice = parse(Int64, ARGS[1])
#σv = round(σvt*0.05, digits=3)
#σv = 2.5 # 1.0
grid = []
for ts in trainSizes, σv in sigmasAll
push!(grid, [ts, σv])
end
tssize = Int64(grid[indice][1])
trainingSizes = [tssize]
σv = grid[indice][2]
iteraciones = 50
# HHG DATA
x_data9k = npzread("./data/data_simulation2_optimized_9669.npy")
labels9k = npzread("./data/labels_simulation2_optimized_9669.npy")
ycold9k = Int64.(labels9k[:,1,1])
nsamples = size(x_data9k)[1]
npts = 100
data_log9k = zeros(npts, nsamples)
for i in 1:nsamples
input_log = log10.(x_data9k[i,:,2])
input_log = input_log .+ minimum(input_log)*(-1)
#input_log = input_log/maximum(input_log)
data_log9k[:,i] .= input_log
end
accuracy_tcold(x,y,model) = mean(onecold(model(x)) .== y)
function signs100k(σv)
Random.seed!(1234)
signals_mas_ruido = zeros(npts,nsamples)
for i in 1:nsamples
signals_mas_ruido[:,i] .= signal_plus_noise(data_log9k[:,i], σv, true)
end
for _ in 1:9
signals_mas_ruidox = zeros(npts, nsamples)
for i in 1:nsamples
signals_mas_ruidox[:,i] .= signal_plus_noise(data_log9k[:,i], σv, true)
end
signals_mas_ruido = hcat(signals_mas_ruido, signals_mas_ruidox)
end
signals_mas_ruido
end
ycold9kx = repeat(ycold9k, 10)
# Synthetic data
function synthData(σv)
all_signals = gen_signals(n_sites, periods)
Random.seed!(1234)
num_test = 10000
x_test, y_test = gen_data_set(all_signals, σv, num_test,true)
xtest_temp, ytest = reshape(x_test, (100, num_test, 10)), reshape(y_test, (num_test, 10))
x_test = reshape(xtest_temp, (n_sites, 10*num_test))
labels_test = reshape(ytest, (10*num_test))
x_test, labels_test
end
Random.seed!(1234)
t_size = 1_000_000 # 4_000_000 old
num_train = Int64(round.(t_size *0.8/10)) # number of samples for each class,
#this just works for 10 classes
num_val = Int64(round.(t_size *0.2/10))
num_test = 1000
x_test, y_test = gen_data_set(all_signals, σv, num_test,true)
x_train, y_train = gen_data_set(all_signals, σv, num_train,true)
x_vali, y_vali = gen_data_set(all_signals, σv, num_val,true)
x_test = reshape(x_test, (100, num_test, 10))
y_test = reshape(y_test, (num_test, 10))
x_train = reshape(x_train, (100, num_train, 10))
y_train = reshape(y_train, (num_train, 10))
x_vali = reshape(x_vali, (100, num_val, 10))
y_vali = reshape(y_vali, (num_val, 10))
xtest_temp = x_test[:,1:num_test,:]
ytest_temp = y_test[1:num_test,:]
x_test = reshape(xtest_temp, (n_sites, 10*num_test))
labels_test = reshape(ytest_temp, (10*num_test))
X_test = x_test # |> gpu
Y_test = onehotbatch(labels_test, 1:10); #|> gpu
for t_size in trainingSizes
tmtrain = time()
num_train = Int64(round.(t_size *0.8/10)) #number of samples for each class,
#this just works for 10 classes
num_val = Int64(round.(t_size *0.2/10))
xtrain_temp = x_train[:,1:num_train,:]
ytrain_temp = y_train[1:num_train,:]
x_data = reshape(xtrain_temp, (n_sites, 10*num_train))
labels = reshape(ytrain_temp, (10*num_train))
xvali_temp = x_vali[:,1:num_val,:]
yvali_temp = y_vali[1:num_val,:]
x_val = reshape(xvali_temp, (n_sites, 10*num_val))
labels_val = reshape(yvali_temp, (10*num_val))
# Data preparation
X = x_data # |> gpu
Y = onehotbatch(labels, 1:10); #|> gpu
#dataset = [(X, Y)]
X_val = x_val # |> gpu
Y_val = onehotbatch(labels_val, 1:10); #|> gpu
#mini-batch
batch_size = 256 #512 is ok too, similar performance at test accuracy
mb_idxs = partition(1:size(X)[2], batch_size)
#train_set = [(X[:,p], Y[:,p]) for p in mb_idxs]
for itera in 51:iteraciones+50
# saving training history
epochs = 1000
t_loss = fill(NaN, epochs)
v_loss = fill(NaN, epochs)
va_acc = fill(NaN, epochs)
#saving best models
dict_models = Dict()
#dict_models["model_iter"] = NaN
dict_models["iter_epoch_num"] = 0
#saving best model
best_val = 5.0
last_improvement = 0
patience=50
Random.seed!(10*itera)
model = Chain(
Dense(n_sites, neu, Flux.relu),
Dense(neu, 10),
Flux.softmax) # |> gpu
opt = ADAM()
loss(x, y) = crossentropy(model(x), y)
global accuracyT(x, y) = mean(onecold(model(x)) .== onecold(y))
for epoch_indx in 1:epochs
xs, ys = shuffleobs((X, Y))
train_set = [(xs[:,p], ys[:,p]) for p in mb_idxs]
Flux.train!(loss, params(model), train_set, opt)
validation_loss = Tracker.data(loss(X_val, Y_val))
t_loss[epoch_indx] = Tracker.data(loss(X, Y))
v_loss[epoch_indx] = validation_loss
va_acc[epoch_indx] = accuracyT(X_val, Y_val)
# If this is the best val_loss we've seen so far, save the model out
if validation_loss <= best_val
#@info(" -> New best val_loss! Saving model out to model_iter$(r).bson")
last_improvement = epoch_indx
dict_models["iter_epoch_num"] = last_improvement
best_val = validation_loss
end
if epoch_indx - last_improvement >= patience
#@info(" -> Early-exiting iteration $(r) and epoch $(epoch_indx): no more patience")
break
end
end
test_acc = accuracyT(X_test, Y_test)
BSON.@save "modelsN$(neu)ESP_ts$(t_size)_σ$(σv)_ite$(itera).bson" model t_loss v_loss va_acc test_acc
error_TS_ruido = zeros(length(sigmasAll))
for (i,σvd) in enumerate(sigmasAll)
signals_mas_ruido = signs100k(σvd)
error_TS_ruido[i] = (1.0 .- accuracy_tcold(signals_mas_ruido, ycold9kx, model))
end
npzwrite("error_hhg_$(t_size)_1n_sig_$(σv)_ite$(itera).npy", error_TS_ruido)
error_TS_ruidox = zeros(length(sigmasAll))
for (i,σvd) in enumerate(sigmasAll)
x_test, labels_test = synthData(σvd)
error_TS_ruidox[i] = (1.0 .- accuracy_tcold(x_test, labels_test, model))
end
npzwrite("error_GtoG100k_$(t_size)_1n_sig_$(σv)_ite$(itera).npy", error_TS_ruidox)
end
end
#println(time() - tbeg)
|
{"hexsha": "1ef2c866f4ff654db23b273e313e2582e2ba4c84", "size": 6757, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/mainJob.jl", "max_stars_repo_name": "lazarusA/noisySignals", "max_stars_repo_head_hexsha": "ebbf36c2b92f0ca5351eabc00746c89d111fb70d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mainJob.jl", "max_issues_repo_name": "lazarusA/noisySignals", "max_issues_repo_head_hexsha": "ebbf36c2b92f0ca5351eabc00746c89d111fb70d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mainJob.jl", "max_forks_repo_name": "lazarusA/noisySignals", "max_forks_repo_head_hexsha": "ebbf36c2b92f0ca5351eabc00746c89d111fb70d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6169154229, "max_line_length": 109, "alphanum_fraction": 0.6488086429, "num_tokens": 2124}
|
# https://github.com/marcharper/python-ternary
import ternary
import random
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import numpy as np
def random_points(num_points=25, scale=40):
points = []
for i in range(num_points):
x = random.randint(1, scale)
y = random.randint(0, scale - x)
z = scale - x - y
points.append((x,y,z))
return points
def generate_random_heatmap_data(scale=5):
from ternary.helpers import simplex_iterator
d = dict()
for (i,j,k) in simplex_iterator(scale):
d[(i,j)] = random.random()
return d
def generate_data(function,n):
x = list(np.linspace(0.0, 1.0, num=n))
y = list(np.linspace(0.0, 1.0, num=n))
d = dict()
for i in range(0,len(x)):
for j in range(0,len(y)):
k = 1.0 - x[i] - y[j]
inputs = [x[i],y[j],k]
d[(i,j)] = function(inputs)
return d
def heatmap(function,scale=100,title="Title"):
figure, tax = ternary.figure(scale=scale)
figure.set_size_inches(10, 8)
tax.heatmapf(function, boundary=True, style="triangular")
tax.boundary(linewidth=2.0)
tax.set_title(title)
tax.ticks(axis='lbr', linewidth=1, multiple=5)
fontsize = 20
tax.left_axis_label("Straw", fontsize=fontsize)
tax.right_axis_label("Clay", fontsize=fontsize)
tax.bottom_axis_label("Sand", fontsize=fontsize)
tax.clear_matplotlib_ticks()
tax.show()
def heatmap_data(data,scale=100,title="Title"):
figure, tax = ternary.figure(scale=scale)
figure.set_size_inches(10, 8)
tax.heatmap(data, scale=scale, style="triangular")
tax.boundary(linewidth=2.0)
tax.set_title(title)
tax.ticks(axis='lbr', linewidth=1, multiple=5)
fontsize = 20
tax.left_axis_label("Straw", fontsize=fontsize)
tax.right_axis_label("Clay", fontsize=fontsize)
tax.bottom_axis_label("Sand", fontsize=fontsize)
tax.clear_matplotlib_ticks()
tax.show()
def scatter(points):
### Scatter Plot
scale = 100
figure, tax = ternary.figure(scale=scale)
ax = tax.get_axes()
tax.set_title("Scatter Plot", fontsize=20)
tax.boundary(linewidth=2.0)
tax.gridlines(multiple=5, color="blue")
# point = (bottomAxis,rightAxis,leftAxis)
p1 = (10,60,30)
p2 = (70,0, 30,)
tax.line(p1, p2, linewidth=3., marker='s', color='green', linestyle=":")
tax.left_parallel_line(80, linewidth=2., color='red', linestyle="--")
# Plot a few different styles with a legend
points = random_points(30, scale=scale)
tax.scatter(points, marker='s', color='red', label="Experimental Results")
#tax.legend()
tax.ticks(axis='lbr', linewidth=1, multiple=5)
fontsize = 20
tax.left_axis_label("Straw", fontsize=fontsize)
tax.right_axis_label("Clay", fontsize=fontsize)
tax.bottom_axis_label("Sand", fontsize=fontsize)
#plt.ylim(ymin=-20)
# Remove default Matplotlib Axes
tax.clear_matplotlib_ticks()
tax.show()
|
{"hexsha": "eccac528adc5fe5dcfd2511a27cb896436579eb2", "size": 2975, "ext": "py", "lang": "Python", "max_stars_repo_path": "TernaryPlots.py", "max_stars_repo_name": "Wright4TheJob/CobModelGPR", "max_stars_repo_head_hexsha": "714c8d85d91817bd1abb560359afe4abda116996", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TernaryPlots.py", "max_issues_repo_name": "Wright4TheJob/CobModelGPR", "max_issues_repo_head_hexsha": "714c8d85d91817bd1abb560359afe4abda116996", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TernaryPlots.py", "max_forks_repo_name": "Wright4TheJob/CobModelGPR", "max_forks_repo_head_hexsha": "714c8d85d91817bd1abb560359afe4abda116996", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9892473118, "max_line_length": 78, "alphanum_fraction": 0.6578151261, "include": true, "reason": "import numpy", "num_tokens": 830}
|
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
np.random.seed(0)
def convert_str_columns_to_float(df):
df['expected_outcome_st_treatment'] = df['expected_outcome_st_treatment'].str[1:-1]
df['expected_outcome_st_treatment'] = df['expected_outcome_st_treatment'].astype(np.float64)
df['expected_outcome_st_no_treatment'] = df['expected_outcome_st_no_treatment'].str[1:-1]
df['expected_outcome_st_no_treatment'] = df['expected_outcome_st_no_treatment'].astype(np.float64)
return df
def tokenize_documents(documents,max_df0=0.8, min_df0=0.01,print_vocabulary=False,outfolder=None,output_vocabulary_fname='vocabulary.dat'):
from nltk.corpus import stopwords
'''
From a list of documents raw text build a matrix DxV
D: number of docs
V: size of the vocabulary, i.e. number of unique terms found in the whole set of docs
'''
stop = stopwords.words('english')
count_vect = CountVectorizer(stop_words=stop,max_df=max_df0, min_df=min_df0)
corpus = count_vect.fit_transform(documents)
vocabulary = count_vect.get_feature_names()
return corpus,vocabulary,count_vect
def assign_split(df, num_splits=10, col_to_add='split'):
df[col_to_add] = np.random.randint(0, num_splits, size=df.shape[0])
return df
def filter_imbalanced_terms(df, term_counts, imbalance=0.1, key='post_index'):
t_indices = []
n_terms = term_counts.shape[1]
for t in range(n_terms):
ind_occur = np.nonzero(term_counts[:,t])[0]
subset = df[df[key].isin(ind_occur)]
if subset.shape[0] != 0:
prop_men = subset[subset.treatment==1].shape[0]/subset.shape[0]
prop_women = subset[subset.treatment==0].shape[0]/subset.shape[0]
if abs(prop_women-prop_men)>=imbalance:
t_indices.append(t)
return t_indices
|
{"hexsha": "abbf84b283c5fc28ebcd22dbfd65cfc29c62e928", "size": 1740, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/result_processing/helpers.py", "max_stars_repo_name": "dveni/causal-text-embeddings", "max_stars_repo_head_hexsha": "82104f3fb6fd540cf98cb4ca0fd5b5d1fb5f757a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 114, "max_stars_repo_stars_event_min_datetime": "2019-05-31T03:54:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T06:37:27.000Z", "max_issues_repo_path": "src/result_processing/helpers.py", "max_issues_repo_name": "dveni/causal-text-embeddings", "max_issues_repo_head_hexsha": "82104f3fb6fd540cf98cb4ca0fd5b5d1fb5f757a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-08-12T01:35:22.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-23T17:32:46.000Z", "max_forks_repo_path": "src/result_processing/helpers.py", "max_forks_repo_name": "dveni/causal-text-embeddings", "max_forks_repo_head_hexsha": "82104f3fb6fd540cf98cb4ca0fd5b5d1fb5f757a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2019-06-03T05:33:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-04T19:34:41.000Z", "avg_line_length": 34.1176470588, "max_line_length": 139, "alphanum_fraction": 0.7683908046, "include": true, "reason": "import numpy", "num_tokens": 467}
|
effective_particles(pf) = effective_particles(expweights(pf))
effective_particles(we::AbstractVector) = 1/sum(abs2, we)
function shouldresample(pf::AbstractParticleFilter)
resample_threshold(pf) == 1 && (return true)
th = num_particles(pf)*resample_threshold(pf)
ne = effective_particles(pf)
return ne < th
end
resample(pf::AbstractParticleFilter, M::Int=num_particles(pf)) = resample(resampling_strategy(pf), expweights(pf), state(pf).j, state(pf).bins, M)
resample(T::Type{<:ResamplingStrategy}, s::PFstate, M::Int=num_particles(s)) = resample(T, s.we, s.j, s.bins, M)
resample(T::Type{<:ResamplingStrategy}, we::AbstractVector, M::Int=length(we)) = resample(T, we, zeros(Int,M), zeros(length(we)), M)
resample(we::AbstractArray) = resample(ResampleSystematic,we)
function resample(::Type{ResampleSystematic}, we, j, bins, M = length(we))
N = length(we)
bins[1] = we[1]
for i = 2:N
bins[i] = bins[i-1] + we[i]
end
r = rand()*bins[end]/N
s = r:(1/M):(bins[N]+r) # Added r in the end to ensure correct length (r < 1/N)
bo = 1
for i = 1:M
@inbounds for b = bo:N
if s[i] < bins[b]
j[i] = b
bo = b
break
end
end
end
return j
end
# """
# There is probably lots of room for improvement here. All bins need not be formed in the beginning.
# One only has to keep 1 values, the current upper limit, no array needed.
# """
"""
draw_one_categorical(pf,w)
Obs! This function expects log-weights
"""
function draw_one_categorical(pf,w)
bins = state(pf).bins
logsumexp!(w,bins)
for i = 2:length(w)
bins[i] += bins[i-1]
end
@assert bins[end] ≈ 1 "All expweigths 0"
s = rand()*bins[end]
# ind = findfirst(x->bins[x]>=s, 1:length(bins))
midpoint = length(bins)÷2
if s < bins[midpoint]
for b = 1:midpoint
if s <= bins[b]
return b
end
end
else
for b = midpoint:length(bins)
if s <= bins[b]
return b
end
end
end
length(bins)
end
|
{"hexsha": "e1264644ab015119e5277e163463062414db02c4", "size": 2145, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/resample.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/LowLevelParticleFilters.jl-d9d29d28-c116-5dba-9239-57a5fe23875b", "max_stars_repo_head_hexsha": "b4077d2f27520e32a755a1ecebc2a54c5e0e2bfa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2019-01-03T14:43:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T10:18:01.000Z", "max_issues_repo_path": "src/resample.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/LowLevelParticleFilters.jl-d9d29d28-c116-5dba-9239-57a5fe23875b", "max_issues_repo_head_hexsha": "b4077d2f27520e32a755a1ecebc2a54c5e0e2bfa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 39, "max_issues_repo_issues_event_min_datetime": "2019-04-04T11:36:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-10T15:28:33.000Z", "max_forks_repo_path": "src/resample.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/LowLevelParticleFilters.jl-d9d29d28-c116-5dba-9239-57a5fe23875b", "max_forks_repo_head_hexsha": "b4077d2f27520e32a755a1ecebc2a54c5e0e2bfa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2019-05-14T18:54:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-09T22:44:52.000Z", "avg_line_length": 29.7916666667, "max_line_length": 146, "alphanum_fraction": 0.5944055944, "num_tokens": 629}
|
[STATEMENT]
lemma D_imp_CR: assumes "\<forall>P. (peak ars P \<longrightarrow> (\<exists> \<sigma>' \<tau>'. DD ars r (fst P,snd P,\<sigma>',\<tau>')))" shows "CR (unlabel ars)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. CR (unlabel ars)
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a b c. \<lbrakk>a \<in> UNIV; (a, b) \<in> (unlabel ars)\<^sup>*; (a, c) \<in> (unlabel ars)\<^sup>*\<rbrakk> \<Longrightarrow> (b, c) \<in> (unlabel ars)\<^sup>\<down>
[PROOF STEP]
fix a b c
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a b c. \<lbrakk>a \<in> UNIV; (a, b) \<in> (unlabel ars)\<^sup>*; (a, c) \<in> (unlabel ars)\<^sup>*\<rbrakk> \<Longrightarrow> (b, c) \<in> (unlabel ars)\<^sup>\<down>
[PROOF STEP]
assume A: "(a,b) \<in> (unlabel ars)^*" and B: "(a,c) \<in> (unlabel ars)^*"
[PROOF STATE]
proof (state)
this:
(a, b) \<in> (unlabel ars)\<^sup>*
(a, c) \<in> (unlabel ars)\<^sup>*
goal (1 subgoal):
1. \<And>a b c. \<lbrakk>a \<in> UNIV; (a, b) \<in> (unlabel ars)\<^sup>*; (a, c) \<in> (unlabel ars)\<^sup>*\<rbrakk> \<Longrightarrow> (b, c) \<in> (unlabel ars)\<^sup>\<down>
[PROOF STEP]
show "(b,c) \<in> (unlabel ars)\<^sup>\<down>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (b, c) \<in> (unlabel ars)\<^sup>\<down>
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (b, c) \<in> (unlabel ars)\<^sup>\<down>
[PROOF STEP]
obtain ss1 ss2 where " peak ars (ss1,ss2)" and b: "lst ss1 = b" and c: "lst ss2 = c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>ss1 ss2. \<lbrakk>peak ars (ss1, ss2); lst ss1 = b; lst ss2 = c\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding peak_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>ss1 ss2. \<lbrakk>let (\<tau>, \<sigma>) = (ss1, ss2) in {\<tau>, \<sigma>} \<subseteq> seq ars \<and> fst \<tau> = fst \<sigma>; lst ss1 = b; lst ss2 = c\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using A B
[PROOF STATE]
proof (prove)
using this:
(a, b) \<in> (unlabel ars)\<^sup>*
(a, c) \<in> (unlabel ars)\<^sup>*
goal (1 subgoal):
1. (\<And>ss1 ss2. \<lbrakk>let (\<tau>, \<sigma>) = (ss1, ss2) in {\<tau>, \<sigma>} \<subseteq> seq ars \<and> fst \<tau> = fst \<sigma>; lst ss1 = b; lst ss2 = c\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding seq_vs_steps
[PROOF STATE]
proof (prove)
using this:
\<exists>ss. fst ss = a \<and> lst ss = b \<and> ss \<in> seq ars
\<exists>ss. fst ss = a \<and> lst ss = c \<and> ss \<in> seq ars
goal (1 subgoal):
1. (\<And>ss1 ss2. \<lbrakk>let (\<tau>, \<sigma>) = (ss1, ss2) in {\<tau>, \<sigma>} \<subseteq> seq ars \<and> fst \<tau> = fst \<sigma>; lst ss1 = b; lst ss2 = c\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
peak ars (ss1, ss2)
lst ss1 = b
lst ss2 = c
goal (1 subgoal):
1. (b, c) \<in> (unlabel ars)\<^sup>\<down>
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
peak ars (ss1, ss2)
lst ss1 = b
lst ss2 = c
[PROOF STEP]
obtain ss3 ss4 where dia: "diagram ars (ss1,ss2,ss3,ss4)"
[PROOF STATE]
proof (prove)
using this:
peak ars (ss1, ss2)
lst ss1 = b
lst ss2 = c
goal (1 subgoal):
1. (\<And>ss3 ss4. diagram ars (ss1, ss2, ss3, ss4) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms(1)
[PROOF STATE]
proof (prove)
using this:
peak ars (ss1, ss2)
lst ss1 = b
lst ss2 = c
\<forall>P. peak ars P \<longrightarrow> (\<exists>\<sigma>' \<tau>'. DD ars r (fst P, snd P, \<sigma>', \<tau>'))
goal (1 subgoal):
1. (\<And>ss3 ss4. diagram ars (ss1, ss2, ss3, ss4) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding DD_def
[PROOF STATE]
proof (prove)
using this:
peak ars (ss1, ss2)
lst ss1 = b
lst ss2 = c
\<forall>P. peak ars P \<longrightarrow> (\<exists>\<sigma>' \<tau>'. diagram ars (fst P, snd P, \<sigma>', \<tau>') \<and> D2 r (fst P, snd P, \<sigma>', \<tau>'))
goal (1 subgoal):
1. (\<And>ss3 ss4. diagram ars (ss1, ss2, ss3, ss4) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<And>a b aa ba. diagram ars (ss1, ss2, (a, b), aa, ba) \<Longrightarrow> thesis; peak ars (ss1, ss2); \<forall>a b aa ba. peak ars ((a, b), aa, ba) \<longrightarrow> (\<exists>ab bb ac bc. diagram ars ((a, b), (aa, ba), (ab, bb), ac, bc) \<and> D2 r ((a, b), (aa, ba), (ab, bb), ac, bc)); b = lst ss1; c = lst ss2\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
using surjective_pairing
[PROOF STATE]
proof (prove)
using this:
?t = (fst ?t, snd ?t)
goal (1 subgoal):
1. \<lbrakk>\<And>a b aa ba. diagram ars (ss1, ss2, (a, b), aa, ba) \<Longrightarrow> thesis; peak ars (ss1, ss2); \<forall>a b aa ba. peak ars ((a, b), aa, ba) \<longrightarrow> (\<exists>ab bb ac bc. diagram ars ((a, b), (aa, ba), (ab, bb), ac, bc) \<and> D2 r ((a, b), (aa, ba), (ab, bb), ac, bc)); b = lst ss1; c = lst ss2\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
diagram ars (ss1, ss2, ss3, ss4)
goal (1 subgoal):
1. (b, c) \<in> (unlabel ars)\<^sup>\<down>
[PROOF STEP]
from dia
[PROOF STATE]
proof (chain)
picking this:
diagram ars (ss1, ss2, ss3, ss4)
[PROOF STEP]
obtain d where ss3: "ss3 \<in> seq ars" and ss4: "ss4 \<in> seq ars"
and ss3_1: "fst ss3 = b" and ss3_2: "lst ss3 = d" and ss4_1: "fst ss4 = c" and ss4_2:"lst ss4 = d"
[PROOF STATE]
proof (prove)
using this:
diagram ars (ss1, ss2, ss3, ss4)
goal (1 subgoal):
1. (\<And>d. \<lbrakk>ss3 \<in> seq ars; ss4 \<in> seq ars; fst ss3 = b; lst ss3 = d; fst ss4 = c; lst ss4 = d\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using b c
[PROOF STATE]
proof (prove)
using this:
diagram ars (ss1, ss2, ss3, ss4)
lst ss1 = b
lst ss2 = c
goal (1 subgoal):
1. (\<And>d. \<lbrakk>ss3 \<in> seq ars; ss4 \<in> seq ars; fst ss3 = b; lst ss3 = d; fst ss4 = c; lst ss4 = d\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding diagram_def
[PROOF STATE]
proof (prove)
using this:
let (\<tau>, \<sigma>, \<sigma>', \<tau>') = (ss1, ss2, ss3, ss4) in {\<sigma>, \<tau>, \<sigma>', \<tau>'} \<subseteq> seq ars \<and> fst \<sigma> = fst \<tau> \<and> lst \<sigma> = fst \<tau>' \<and> lst \<tau> = fst \<sigma>' \<and> lst \<sigma>' = lst \<tau>'
lst ss1 = b
lst ss2 = c
goal (1 subgoal):
1. (\<And>d. \<lbrakk>ss3 \<in> seq ars; ss4 \<in> seq ars; fst ss3 = b; lst ss3 = d; fst ss4 = c; lst ss4 = d\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
ss3 \<in> seq ars
ss4 \<in> seq ars
fst ss3 = b
lst ss3 = d
fst ss4 = c
lst ss4 = d
goal (1 subgoal):
1. (b, c) \<in> (unlabel ars)\<^sup>\<down>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (b, c) \<in> (unlabel ars)\<^sup>\<down>
[PROOF STEP]
using seq_imp_steps[OF ss3 ss3_1 ss3_2] seq_imp_steps[OF ss4 ss4_1 ss4_2]
[PROOF STATE]
proof (prove)
using this:
(b, d) \<in> (unlabel ars)\<^sup>*
(c, d) \<in> (unlabel ars)\<^sup>*
goal (1 subgoal):
1. (b, c) \<in> (unlabel ars)\<^sup>\<down>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(b, c) \<in> (unlabel ars)\<^sup>\<down>
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(b, c) \<in> (unlabel ars)\<^sup>\<down>
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3357, "file": "Decreasing-Diagrams_Decreasing_Diagrams", "length": 27}
|
"""
Implement wrapper that uses pseudo relevance feedback to expand the initial query with additional terms
"""
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from preprocessing import Corpus
from retrieval_algorithms import RetrievalAlgorithm
from .identity import identity
class PRFWrapper(RetrievalAlgorithm):
def __init__(self,
retrieval_algorithm: RetrievalAlgorithm,
num_relevant_docs: int,
num_expansion_terms: int,
expansion_weight: int,
max_ngram=2):
self.retrieval_algorithm = retrieval_algorithm
self.num_relevant_docs = num_relevant_docs
self.num_expansion_terms = num_expansion_terms
self.expansion_weight = expansion_weight
self.vectorizer = TfidfVectorizer(
analyzer="word",
tokenizer=identity,
preprocessor=identity,
ngram_range=(1, max_ngram),
min_df=2
)
self.ids = None
self.vectorized_corpus = None
self.id_to_word = None
def prepare(self, corpus: Corpus):
self.ids = pd.Series(corpus.ids, name="id")
self.retrieval_algorithm.prepare(corpus)
self.vectorized_corpus = self.vectorizer.fit_transform(corpus.data)
self.id_to_word = np.array(self.vectorizer.get_feature_names())
def get_ranking(self, query: str) -> pd.DataFrame:
ranked_documents = self.retrieval_algorithm.get_ranking(query)
if len(ranked_documents) == 0:
return ranked_documents
word_counts = self.vectorized_corpus[
ranked_documents.index[:self.num_relevant_docs], :].sum(
axis=0).getA1()
top_words = np.argsort(word_counts)[::-1][:self.num_expansion_terms]
top_words_count = word_counts[top_words]
expansion_terms = self.id_to_word[top_words]
ranked_documents_expanded = self.retrieval_algorithm.get_ranking(
expansion_terms, top_words_count / top_words_count.sum())
joined_documents = pd.merge(ranked_documents, ranked_documents_expanded, on="id",
how="outer")
joined_documents = joined_documents.fillna(0)
score_x = self.expansion_weight * joined_documents["score_x"]
score_y = (1 - self.expansion_weight) * joined_documents["score_y"]
joined_documents["score"] = (score_x + score_y)
joined_documents.sort_values(by="score", ascending=False, inplace=True)
return joined_documents[["id", "score"]]
def get_expansion_terms(self, query):
ranked_documents = self.retrieval_algorithm.get_ranking(query)
if len(ranked_documents) == 0:
return ranked_documents
word_counts = self.vectorized_corpus[
ranked_documents.index[:self.num_relevant_docs], :].sum(
axis=0).getA1()
top_words = np.argsort(word_counts)[::-1][:self.num_expansion_terms]
top_words_count = word_counts[top_words]
expansion_terms = self.id_to_word[top_words]
return dict([term for term in zip(expansion_terms, top_words_count) if
term[0] not in query.split(" ")])
|
{"hexsha": "f35dd8f5c17d69dbdf552eb9ba0e800405db9a07", "size": 3264, "ext": "py", "lang": "Python", "max_stars_repo_path": "paper_retrieval/retrieval_algorithms/prf_wrapper.py", "max_stars_repo_name": "JNKielmann/Master-Thesis", "max_stars_repo_head_hexsha": "47475d15a2d63e11320405cc60b0e49ccda2c468", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "paper_retrieval/retrieval_algorithms/prf_wrapper.py", "max_issues_repo_name": "JNKielmann/Master-Thesis", "max_issues_repo_head_hexsha": "47475d15a2d63e11320405cc60b0e49ccda2c468", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-17T11:20:51.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-17T11:20:51.000Z", "max_forks_repo_path": "paper_retrieval/retrieval_algorithms/prf_wrapper.py", "max_forks_repo_name": "JNKielmann/Master-Thesis", "max_forks_repo_head_hexsha": "47475d15a2d63e11320405cc60b0e49ccda2c468", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-17T21:24:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-17T21:24:55.000Z", "avg_line_length": 43.52, "max_line_length": 103, "alphanum_fraction": 0.6645220588, "include": true, "reason": "import numpy", "num_tokens": 682}
|
import numpy as np
import pkg_resources
DTYPE = np.float64
ICA_THRESHOLD_CONST = 0.005
PATH_TO_SESSION = "MNINonLinear/Results"
SESSION_IDS = [('1', 'LR'), ('1', 'RL'), ('2', 'LR'), ('2', 'RL')]
PATH_TO_SESSIONS = "MNINonlinear/Results"
SESSION_NAME_TEMPLATE = "rfMRI_REST%s_%s/rfMRI_REST%s_%s_Atlas_MSMAll_hp2000_clean.dtseries.nii"
SESSION_DIRS = [SESSION_NAME_TEMPLATE % (num, side, num, side) for num, side in SESSION_IDS]
DEFAULT_ICA_BOTH_RESULT_PATH = pkg_resources.resource_filename(__name__, "resources/ica_both_lowdim.dtseries.nii")
DEFAULT_ICA_SEPERATED_RESULT_PATH = pkg_resources.resource_filename(__name__, "resources/ica_LR_MATCHED.dtseries.nii")
DEFAULT_STRUCTURE_ICA_RESULT_PATH = pkg_resources.resource_filename(__name__, "resources/SC_clusters.dtseries.nii")
EXAMPLE_FILE_PATH = pkg_resources.resource_filename(__name__, "resources/example.dtseries.nii")
|
{"hexsha": "c4d51a9c65f7ca2a694cecca60cc8e811e2e12c4", "size": 877, "ext": "py", "lang": "Python", "max_stars_repo_path": "neuralocalize/utils/constants.py", "max_stars_repo_name": "kessido/Neuroscience-seminar", "max_stars_repo_head_hexsha": "09c137638e49a12f9389fb37ed1a810be3cbb7be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-04-18T12:59:37.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-07T10:46:17.000Z", "max_issues_repo_path": "neuralocalize/utils/constants.py", "max_issues_repo_name": "kessido/Neuroscience-seminar", "max_issues_repo_head_hexsha": "09c137638e49a12f9389fb37ed1a810be3cbb7be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "neuralocalize/utils/constants.py", "max_forks_repo_name": "kessido/Neuroscience-seminar", "max_forks_repo_head_hexsha": "09c137638e49a12f9389fb37ed1a810be3cbb7be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-10-07T10:38:40.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-18T12:55:29.000Z", "avg_line_length": 48.7222222222, "max_line_length": 118, "alphanum_fraction": 0.8061573546, "include": true, "reason": "import numpy", "num_tokens": 228}
|
Require Import floyd.proofauto.
Require Import sha.sha.
Require Import sha.SHA256.
Require Import sha.spec_sha.
Require Import sha.sha_lemmas.
Require Import sha.bdo_lemmas.
Local Open Scope logic.
Definition block_data_order_loop2 :=
nth 1 (loops (fn_body f_sha256_block_data_order)) Sskip.
Fixpoint Xarray' (b: list int) (i k: nat) : list int :=
match k with
| O => nil
| S k' => W (nthi b) (Z.of_nat i - 16 + (16-(Z.of_nat k)- Z.of_nat i) mod 16) ::
Xarray' b i k'
end.
Definition Xarray (b: list int) (i: nat) := Xarray' b i 16.
Lemma Xarray_simpl:
forall b, length b = 16%nat -> Xarray b 16 = b.
Proof.
intros.
assert (forall n, (n<=16)%nat -> Xarray' b 16 n = skipn (16-n) b);
[ | apply H0; auto ].
induction n; intros.
clear H0. rewrite skipn_short by omega. reflexivity.
unfold Xarray'; fold Xarray'.
rewrite IHn by omega. clear IHn.
change (Z.of_nat 16) with 16%Z.
assert (H1: firstn 1 (skipn (16 - S n) b) =
W (nthi b) (16 - 16 + (Z.of_nat (16 - S n) - 16) mod 16) :: nil). {
unfold firstn.
destruct (skipn (16 - S n) b) eqn:?.
pose proof (skipn_length b (16 - S n)).
rewrite Heql in H1.
simpl length in H1.
omega.
f_equal.
pose proof (nth_skipn 0 (16 - S n)%nat b Int.zero).
rewrite Heql in H1.
unfold nth at 1 in H1.
subst.
rewrite Z.sub_diag. rewrite Z.add_0_l.
rewrite plus_0_l.
rewrite Zminus_mod.
rewrite Z.mod_same by omega. rewrite Z.sub_0_r.
rewrite Z.mod_mod by omega.
assert (0 <= (Z.of_nat (16 - S n))mod 16 < 16)%Z by (apply Z.mod_pos_bound; omega).
rewrite W_equation.
rewrite if_true by omega.
rewrite Z.mod_small.
unfold nthi.
rewrite Nat2Z.id.
reflexivity.
split; try omega.
change (Z.of_nat (16 - S n) < Z.of_nat 16)%Z.
apply Nat2Z.inj_lt.
omega.
}
assert (H2 := skipn_skipn 1 (16 - S n) b).
replace (16 - S n + 1)%nat with (16 - n)%nat in H2 by omega.
rewrite <- H2.
rewrite <- (firstn_skipn 1 (skipn (16 - S n) b)) at 2.
rewrite H1.
unfold app.
rewrite Nat2Z.inj_sub by omega.
reflexivity.
Qed.
Lemma length_Xarray:
forall b i, length (Xarray b i) = 16%nat.
Proof.
intros. reflexivity.
Qed.
Lemma nth_Xarray:
forall b i k,
(0 <= k < 16)%Z ->
nthi (Xarray b i) k = W (nthi b) (Z.of_nat i - 16 + (k- Z.of_nat i) mod 16)%Z .
Proof.
intros.
unfold nthi at 1.
remember (Z.to_nat k) as k'.
rewrite <- (Nat2Z.id k') in Heqk'.
apply Z2Nat.inj in Heqk'; try omega.
subst k.
assert (k'<16)%nat by omega.
clear H.
do 16 (destruct k'; try reflexivity).
elimtype False; omega.
Qed.
Lemma extract_from_b:
forall b i n,
length b = 16%nat ->
(16 <= i < 64) ->
(0 <= n < 16) ->
nthi (Xarray b (Z.to_nat i)) ((i + n) mod 16) = W (nthi b) (i - 16 + n).
Proof.
intros.
rewrite nth_Xarray by (apply Z.mod_pos_bound; omega).
f_equal.
f_equal.
rewrite Z2Nat.id by omega. auto.
rewrite Z2Nat.id by omega.
rewrite Zminus_mod.
rewrite Zmod_mod.
rewrite Zplus_mod.
rewrite <- Zminus_mod.
rewrite (Zmod_small n) by omega.
replace (i mod 16 + n - i) with (i mod 16 - i + n) by omega.
rewrite Zplus_mod.
rewrite Zminus_mod.
rewrite Zmod_mod.
rewrite Z.sub_diag.
rewrite (Zmod_small 0) by omega.
rewrite Z.add_0_l.
repeat rewrite Zmod_mod.
apply Zmod_small; omega.
Qed.
Global Opaque Xarray.
Lemma Xarray_update:
forall i b,
Zlength b = LBLOCKz ->
16 <= i < 64 ->
upd_Znth (i mod 16) (map Vint (Xarray b (Z.to_nat i)))
(Vint (W (nthi b) i))
= map Vint (Xarray b (Z.to_nat i+1)).
Proof.
intros.
unfold upd_Znth.
rewrite !sublist_map.
rewrite <- !map_cons, <- !map_app.
f_equal.
repeat match type of H0 with
| (64 <= _ < _)%Z => elimtype False; omega
| (?A <= _ < _)%Z =>
assert (H9: i=A \/ (A+1 <= i < 64)%Z) by omega;
clear H0; destruct H9 as [H0|H0];
[subst i; reflexivity
| simpl in H0 ]
end.
Qed.
Lemma W_unfold:
forall i b,
16 <= i < 64 ->
W (nthi b) (i) =
Int.add (W (nthi b) (i - 16 + 0))
(Int.add
(Int.add (sigma_0 (W (nthi b) (i - 16 + 1)))
(sigma_1 (W (nthi b) (i - 16 + 14))))
(W (nthi b) (i - 16 + 9))).
Proof.
intros.
rewrite W_equation.
rewrite if_false by omega.
rewrite Z.add_0_r;
rewrite (Int.add_commut (W (nthi b) (i - 16)));
repeat rewrite <- Int.add_assoc; f_equal;
rewrite Int.add_commut; repeat rewrite Int.add_assoc; f_equal;
[do 2 f_equal; omega | ];
f_equal; [do 2 f_equal; omega | f_equal; omega].
Qed.
Lemma sha256_block_data_order_loop2_proof:
forall (Espec : OracleKind)
(b: list int) ctx (regs: list int) kv Xv
(Hregs: length regs = 8%nat),
Zlength b = LBLOCKz ->
semax (initialized _i Delta_loop1)
(PROP ()
LOCAL (temp _ctx ctx; temp _i (Vint (Int.repr 16));
temp _a (Vint (nthi (Round regs (nthi b) (LBLOCKz-1)) 0));
temp _b (Vint (nthi (Round regs (nthi b) (LBLOCKz-1)) 1));
temp _c (Vint (nthi (Round regs (nthi b) (LBLOCKz-1)) 2));
temp _d (Vint (nthi (Round regs (nthi b) (LBLOCKz-1)) 3));
temp _e (Vint (nthi (Round regs (nthi b) (LBLOCKz-1)) 4));
temp _f (Vint (nthi (Round regs (nthi b) (LBLOCKz-1)) 5));
temp _g (Vint (nthi (Round regs (nthi b) (LBLOCKz-1)) 6));
temp _h (Vint (nthi (Round regs (nthi b) (LBLOCKz-1)) 7));
gvar _K256 kv; lvar _X (tarray tuint LBLOCKz) Xv)
SEP ( K_vector kv;
data_at Tsh (tarray tuint LBLOCKz) (map Vint b) Xv))
block_data_order_loop2
(normal_ret_assert
(PROP ()
LOCAL(temp _ctx ctx;
temp _a (Vint (nthi (Round regs (nthi b) 63) 0));
temp _b (Vint (nthi (Round regs (nthi b) 63) 1));
temp _c (Vint (nthi (Round regs (nthi b) 63) 2));
temp _d (Vint (nthi (Round regs (nthi b) 63) 3));
temp _e (Vint (nthi (Round regs (nthi b) 63) 4));
temp _f (Vint (nthi (Round regs (nthi b) 63) 5));
temp _g (Vint (nthi (Round regs (nthi b) 63) 6));
temp _h (Vint (nthi (Round regs (nthi b) 63) 7));
gvar _K256 kv; lvar _X (tarray tuint LBLOCKz) Xv)
SEP (K_vector kv;
data_at_ Tsh (tarray tuint LBLOCKz) Xv))).
Proof.
intros.
unfold Delta_loop1.
unfold block_data_order_loop2; simpl nth.
rewrite semax_skip_seq.
match goal with
| |- context [Ssequence ?s1 (Sloop (Ssequence (Sifthenelse ?e Sskip Sbreak) ?s2) ?s3)] =>
fold (Sfor s1 e s2 s3)
end.
abbreviate_semax.
change 16%nat with LBLOCK.
forward_for_simple_bound 64%Z
(EX i:Z,
PROP ((16 <= i)%Z )
LOCAL (temp _ctx ctx;
temp _a (Vint (nthi (Round regs (nthi b) (i - 1)) 0));
temp _b (Vint (nthi (Round regs (nthi b) (i - 1)) 1));
temp _c (Vint (nthi (Round regs (nthi b) (i - 1)) 2));
temp _d (Vint (nthi (Round regs (nthi b) (i - 1)) 3));
temp _e (Vint (nthi (Round regs (nthi b) (i - 1)) 4));
temp _f (Vint (nthi (Round regs (nthi b) (i - 1)) 5));
temp _g (Vint (nthi (Round regs (nthi b) (i - 1)) 6));
temp _h (Vint (nthi (Round regs (nthi b) (i - 1)) 7));
lvar _X (tarray tuint LBLOCKz) Xv;
gvar _K256 kv)
SEP (K_vector kv;
data_at Tsh (tarray tuint LBLOCKz) (map Vint (Xarray b (Z.to_nat i))) Xv)).
*
forward. (* skip; *)
Exists 16.
entailer!.
rewrite Xarray_simpl; auto.
apply Zlength_length in H; auto.
*
Intros.
destruct H0 as [_ H2].
assert (H0: LBLOCKz <= i < 64) by (change LBLOCKz with 16%Z; omega).
clear H2 H1.
assert (H': length b = 16%nat) by (apply Zlength_length in H; auto).
assert (LBE := LBLOCK_zeq).
change LBLOCKz with 16%Z in H0.
change (tarray tuint LBLOCKz) with (tarray tuint 16).
change LBLOCKz with 16%Z in H.
forward. (*s0 = X[(i+1)&0x0f]; *)
autorewrite with sublist. rewrite Zland_15.
forward. (* s0 = sigma0(s0); *)
rewrite extract_from_b by auto; rewrite Int.and_mone; rewrite <- sigma_0_eq.
forward. (* s1 = X[(i+14)&0x0f]; *)
autorewrite with sublist. rewrite Zland_15.
forward. (* s1 = sigma1(s1); *)
rewrite extract_from_b by auto; rewrite Int.and_mone; rewrite <- sigma_1_eq.
forward. (* T1 = X[i&0xf]; *)
autorewrite with sublist. rewrite Zland_15.
replace (nthi (Xarray b (Z.to_nat i)) (i mod 16))
with (W (nthi b) (i - 16 + 0))
by (replace (i mod 16) with ((i + 0) mod 16)
by (rewrite Z.add_0_r; auto);
rewrite extract_from_b; try omega; auto).
forward. (* t = X[(i+9)&0xf]; *)
autorewrite with sublist. rewrite Zland_15.
rewrite extract_from_b by (try assumption; try omega).
forward. (* T1 += s0 + s1 + t; *)
pattern (i-16) at 1; rewrite <- (Z.add_0_r (i - 16)).
rewrite <- (W_unfold i b) by auto.
forward. (* X[i&0xf] = T1; *)
rewrite Zland_15.
rewrite Xarray_update by assumption.
unfold K_vector.
change CBLOCKz with 64%Z.
assert (LEN: Zlength K256 = 64%Z) by reflexivity.
forward. (* Ki=K256[i]; *)
autorewrite with sublist.
rename b into bb.
assert (Hregs' := length_Round _ (nthi bb) (i-1) Hregs).
remember (Round regs (nthi bb) (i - 1)) as regs' eqn:H1.
(destruct regs' as [ | a [ | b [ | c [ | d [ | e [ | f [ | g [ | h [ | ]]]]]]]]];
try now inversion Hregs'); [ clear Hregs' ].
change (nthi [a;b;c;d;e;f;g;h]) with (fun t => nth (Z.to_nat t) [a;b;c;d;e;f;g;h] Int.zero);
cbv beta; simpl nth.
forward. (* T1 += h + Sigma1(e) + Ch(e,f,g) + Ki; *)
rewrite <- Sigma_1_eq, <- Ch_eq.
forward. (* T2 = Sigma0(a) + Maj(a,b,c); *)
rewrite <- Sigma_0_eq, <- Maj_eq.
repeat forward.
rewrite Z.add_simpl_r.
rewrite Z2Nat.inj_add by omega.
entailer!.
clear - H H0 H1.
rewrite Round_equation.
forget (W (nthi bb) i) as Wbbi.
rewrite if_false by omega.
rewrite <- H1; clear H1.
unfold rnd_function, nthi; simpl.
repeat split; try reflexivity.
+
repable_signed.
+
f_equal.
rewrite <- Int.add_assoc; symmetry; rewrite <- Int.add_assoc.
f_equal. f_equal.
rewrite Int.add_commut. rewrite !Int.add_assoc. reflexivity.
+
f_equal. f_equal. rewrite Int.add_commut. f_equal.
*
entailer!.
Qed.
|
{"author": "rbowden91", "repo": "cs260r-fp", "sha": "a1593bdcd91b5aa2e4977e67cbf0c34bc8fa561e", "save_path": "github-repos/coq/rbowden91-cs260r-fp", "path": "github-repos/coq/rbowden91-cs260r-fp/cs260r-fp-a1593bdcd91b5aa2e4977e67cbf0c34bc8fa561e/seplog/VST/sha/verif_sha_bdo7.v"}
|
#
# Mosaic.py -- Mosaic plugin for Ginga reference viewer
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import math
import time
import numpy
import os.path
import threading
from ginga import AstroImage
from ginga.util import mosaic
from ginga.util import wcs, iqcalc, dp
from ginga import GingaPlugin
from ginga.gw import Widgets
try:
import astropy.io.fits as pyfits
have_pyfits = True
except ImportError:
have_pyfits = False
class Mosaic(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Mosaic, self).__init__(fv, fitsimage)
self.mosaic_count = 0
self.img_mosaic = None
self.bg_ref = 0.0
self.ev_intr = threading.Event()
self.lock = threading.RLock()
self.read_elapsed = 0.0
self.process_elapsed = 0.0
self.ingest_count = 0
# holds processed images to be inserted into mosaic image
self.images = []
self.total_files = 0
self.dc = self.fv.getDrawClasses()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(False)
canvas.add_callback('drag-drop', self.drop_cb)
canvas.setSurface(fitsimage)
#canvas.ui_setActive(True)
self.canvas = canvas
self.layertag = 'mosaic-canvas'
# Load plugin preferences
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_Mosaic')
self.settings.setDefaults(annotate_images=False, fov_deg=0.2,
match_bg=False, trim_px=0,
merge=False, num_threads=4,
drop_creates_new_mosaic=False,
mosaic_hdus=False, skew_limit=0.1,
allow_expand=True, expand_pad_deg=0.01,
max_center_deg_delta=2.0,
make_thumbs=True, reuse_image=False)
self.settings.load(onError='silent')
# channel where mosaic should appear (default=ours)
self.mosaic_chname = self.chname
# hook to allow special processing before inlining
self.preprocess = lambda x: x
self.gui_up = False
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
vbox, sw, orientation = Widgets.get_oriented_box(container)
vbox.set_border_width(4)
vbox.set_spacing(2)
self.msgFont = self.fv.getFont("sansFont", 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(self.msgFont)
self.tw = tw
fr = Widgets.Expander("Instructions")
fr.set_widget(tw)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame("Mosaic")
captions = [
("FOV (deg):", 'label', 'Fov', 'llabel', 'set_fov', 'entry'),
("New Mosaic", 'button', "Allow expansion", 'checkbutton'),
("Label images", 'checkbutton', "Match bg", 'checkbutton'),
("Trim Pixels:", 'label', 'Trim Px', 'llabel',
'trim_pixels', 'entry'),
("Num Threads:", 'label', 'Num Threads', 'llabel',
'set_num_threads', 'entry'),
("Merge data", 'checkbutton', "Drop new",
'checkbutton'),
("Mosaic HDUs", 'checkbutton'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
fov_deg = self.settings.get('fov_deg', 1.0)
b.fov.set_text(str(fov_deg))
#b.set_fov.set_length(8)
b.set_fov.set_text(str(fov_deg))
b.set_fov.add_callback('activated', self.set_fov_cb)
b.set_fov.set_tooltip("Set size of mosaic FOV (deg)")
b.allow_expansion.set_tooltip("Allow image to expand the FOV")
allow_expand = self.settings.get('allow_expand', True)
b.allow_expansion.set_state(allow_expand)
b.allow_expansion.add_callback('activated', self.allow_expand_cb)
b.new_mosaic.add_callback('activated', lambda w: self.new_mosaic_cb())
labelem = self.settings.get('annotate_images', False)
b.label_images.set_state(labelem)
b.label_images.set_tooltip("Label tiles with their names (only if allow_expand=False)")
b.label_images.add_callback('activated', self.annotate_cb)
trim_px = self.settings.get('trim_px', 0)
match_bg = self.settings.get('match_bg', False)
b.match_bg.set_tooltip("Try to match background levels")
b.match_bg.set_state(match_bg)
b.match_bg.add_callback('activated', self.match_bg_cb)
b.trim_pixels.set_tooltip("Set number of pixels to trim from each edge")
b.trim_px.set_text(str(trim_px))
b.trim_pixels.add_callback('activated', self.trim_pixels_cb)
#b.trim_pixels.set_length(8)
b.trim_pixels.set_text(str(trim_px))
num_threads = self.settings.get('num_threads', 4)
b.num_threads.set_text(str(num_threads))
#b.set_num_threads.set_length(8)
b.set_num_threads.set_text(str(num_threads))
b.set_num_threads.set_tooltip("Number of threads to use for mosaicing")
b.set_num_threads.add_callback('activated', self.set_num_threads_cb)
merge = self.settings.get('merge', False)
b.merge_data.set_tooltip("Merge data instead of overlay")
b.merge_data.set_state(merge)
b.merge_data.add_callback('activated', self.merge_cb)
drop_new = self.settings.get('drop_creates_new_mosaic', False)
b.drop_new.set_tooltip("Dropping files on image starts a new mosaic")
b.drop_new.set_state(drop_new)
b.drop_new.add_callback('activated', self.drop_new_cb)
mosaic_hdus = self.settings.get('mosaic_hdus', False)
b.mosaic_hdus.set_tooltip("Mosaic data HDUs in each file")
b.mosaic_hdus.set_state(mosaic_hdus)
b.mosaic_hdus.add_callback('activated', self.mosaic_hdus_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
vbox2 = Widgets.VBox()
# Mosaic evaluation status
hbox = Widgets.HBox()
hbox.set_spacing(4)
hbox.set_border_width(4)
label = Widgets.Label()
self.w.eval_status = label
hbox.add_widget(self.w.eval_status, stretch=0)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox2.add_widget(hbox, stretch=0)
# Mosaic evaluation progress bar and stop button
hbox = Widgets.HBox()
hbox.set_spacing(4)
hbox.set_border_width(4)
btn = Widgets.Button("Stop")
btn.add_callback('activated', lambda w: self.eval_intr())
btn.set_enabled(False)
self.w.btn_intr_eval = btn
hbox.add_widget(btn, stretch=0)
self.w.eval_pgs = Widgets.ProgressBar()
hbox.add_widget(self.w.eval_pgs, stretch=1)
vbox2.add_widget(hbox, stretch=0)
vbox2.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(vbox2, stretch=1)
self.w.vbox = Widgets.VBox()
vbox.add_widget(self.w.vbox, stretch=0)
spacer = Widgets.Label('')
vbox.add_widget(spacer, stretch=1)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(3)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def set_preprocess(self, fn):
if fn is None:
fn = lambda x: x
self.preprocess = fn
def prepare_mosaic(self, image, fov_deg, name=None):
"""Prepare a new (blank) mosaic image based on the pointing of
the parameter image
"""
header = image.get_header()
ra_deg, dec_deg = header['CRVAL1'], header['CRVAL2']
data_np = image.get_data()
#dtype = data_np.dtype
dtype = None
self.bg_ref = iqcalc.get_median(data_np)
# TODO: handle skew (differing rotation for each axis)?
skew_limit = self.settings.get('skew_limit', 0.1)
(rot_deg, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header,
skew_threshold=skew_limit)
self.logger.debug("image0 rot=%f cdelt1=%f cdelt2=%f" % (
rot_deg, cdelt1, cdelt2))
# TODO: handle differing pixel scale for each axis?
px_scale = math.fabs(cdelt1)
cdbase = [numpy.sign(cdelt1), numpy.sign(cdelt2)]
reuse_image = self.settings.get('reuse_image', False)
if (not reuse_image) or (self.img_mosaic is None):
self.logger.debug("creating blank image to hold mosaic")
self.fv.gui_do(self._prepare_mosaic1, "Creating blank image...")
# GC old mosaic
self.img_mosaic = None
img_mosaic = dp.create_blank_image(ra_deg, dec_deg,
fov_deg, px_scale,
rot_deg,
cdbase=cdbase,
logger=self.logger,
pfx='mosaic',
dtype=dtype)
if name is not None:
img_mosaic.set(name=name)
imname = img_mosaic.get('name', image.get('name', "NoName"))
# avoid making a thumbnail of this if seed image is also that way
nothumb = not self.settings.get('make_thumbs', False)
if nothumb:
img_mosaic.set(nothumb=True)
else:
# image is not on disk, set indication for other plugins
img_mosaic.set(path=None)
# TODO: fill in interesting/select object headers from seed image
self.img_mosaic = img_mosaic
self.fv.gui_call(self.fv.add_image, imname, img_mosaic,
chname=self.mosaic_chname)
else:
# <-- reuse image (faster)
self.logger.debug("Reusing previous mosaic image")
self.fv.gui_do(self._prepare_mosaic1, "Reusing previous mosaic image...")
img_mosaic = dp.recycle_image(self.img_mosaic,
ra_deg, dec_deg,
fov_deg, px_scale,
rot_deg,
cdbase=cdbase,
logger=self.logger,
pfx='mosaic')
header = img_mosaic.get_header()
(rot, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header,
skew_threshold=skew_limit)
self.logger.debug("mosaic rot=%f cdelt1=%f cdelt2=%f" % (
rot, cdelt1, cdelt2))
return img_mosaic
def _prepare_mosaic1(self, msg):
self.canvas.deleteAllObjects()
self.update_status(msg)
def _inline(self, images):
self.fv.assert_gui_thread()
# Get optional parameters
trim_px = self.settings.get('trim_px', 0)
match_bg = self.settings.get('match_bg', False)
merge = self.settings.get('merge', False)
allow_expand = self.settings.get('allow_expand', True)
expand_pad_deg = self.settings.get('expand_pad_deg', 0.010)
annotate = self.settings.get('annotate_images', False)
bg_ref = None
if match_bg:
bg_ref = self.bg_ref
time_intr1 = time.time()
# Add description for ChangeHistory
iminfo = self.chinfo.get_image_info(self.img_mosaic.get('name'))
iminfo.reason_modified = 'Added {0}'.format(
','.join([im.get('name') for im in images]))
loc = self.img_mosaic.mosaic_inline(images,
bg_ref=bg_ref,
trim_px=trim_px,
merge=merge,
allow_expand=allow_expand,
expand_pad_deg=expand_pad_deg,
suppress_callback=False)
(xlo, ylo, xhi, yhi) = loc
# annotate ingested image with its name?
if annotate and (not allow_expand):
x, y = (xlo+xhi)//2, (ylo+yhi)//2
self.canvas.add(self.dc.Text(x, y, imname, color='red'),
redraw=False)
time_intr2 = time.time()
self.process_elapsed += time_intr2 - time_intr1
# special hack for GUI responsiveness during entire ingestion
# process
#self.fv.update_pending(timeout=0.0)
def close(self):
self.img_mosaic = None
self.fv.stop_local_plugin(self.chname, str(self))
self.gui_up = False
return True
def instructions(self):
self.tw.set_text("""Set the FOV and drag files onto the window.""")
def start(self):
self.instructions()
# insert layer if it is not already
p_canvas = self.fitsimage.get_canvas()
try:
obj = p_canvas.getObjectByTag(self.layertag)
except KeyError:
# Add canvas layer
p_canvas.add(self.canvas, tag=self.layertag)
self.resume()
def stop(self):
self.canvas.ui_setActive(False)
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.deleteObjectByTag(self.layertag)
except:
pass
# dereference potentially large mosaic image
self.img_mosaic = None
self.fv.showStatus("")
def pause(self):
# comment this to NOT disable the UI for this plugin
# when it loses focus
#self.canvas.ui_setActive(False)
pass
def resume(self):
self.canvas.ui_setActive(True)
def new_mosaic_cb(self):
self.img_mosaic = None
self.fitsimage.onscreen_message("Drag new files...",
delay=2.0)
def drop_cb(self, canvas, paths, *args):
self.logger.info("files dropped: %s" % str(paths))
new_mosaic = self.settings.get('drop_creates_new_mosaic', False)
self.fv.nongui_do(self.fv.error_wrap, self.mosaic, paths,
new_mosaic=new_mosaic)
return True
def annotate_cb(self, widget, tf):
self.settings.set(annotate_images=tf)
def allow_expand_cb(self, widget, tf):
self.settings.set(allow_expand=tf)
def ingest_one(self, image):
with self.lock:
self.images.append(image)
self.ingest_count += 1
count = self.ingest_count
self.update_progress(float(count)/self.total_files)
if count == self.total_files:
self.end_progress()
self.update_status("Inserting into mosaic...")
images, self.images = self.images, []
self.fv.gui_do(self._inline, images)
total_elapsed = time.time() - self.start_time
msg = "Done. Total=%.2f Process=%.2f (sec)" % (
total_elapsed, self.process_elapsed)
self.update_status(msg)
def mosaic_some(self, paths, image_loader=None):
if image_loader is None:
image_loader = self.fv.load_image
for url in paths:
if self.ev_intr.isSet():
break
mosaic_hdus = self.settings.get('mosaic_hdus', False)
if mosaic_hdus:
self.logger.debug("mosaicing hdus")
# User wants us to mosaic HDUs
# TODO: do this in a different thread?
with pyfits.open(url, 'readonly') as in_f:
i = 0
for hdu in in_f:
i += 1
# TODO: I think we need a little more rigorous test
# than just whether the data section is empty
if hdu.data is None:
continue
self.logger.debug("ingesting hdu #%d" % (i))
image = AstroImage.AstroImage(logger=self.logger)
image.load_hdu(hdu)
image.set(name='hdu%d' % (i))
image = self.preprocess(image)
self.ingest_one(image)
else:
image = image_loader(url)
image = self.preprocess(image)
self.ingest_one(image)
def mosaic(self, paths, new_mosaic=False, name=None, image_loader=None):
if image_loader is None:
image_loader = self.fv.load_image
# NOTE: this runs in a non-gui thread
self.fv.assert_nongui_thread()
# Initialize progress bar
self.total_files = len(paths)
if self.total_files == 0:
return
self.ingest_count = 0
self.images = []
self.ev_intr.clear()
self.process_elapsed = 0.0
self.init_progress()
self.start_time = time.time()
image = image_loader(paths[0])
time_intr1 = time.time()
fov_deg = self.settings.get('fov_deg', 0.2)
max_center_deg_delta = self.settings.get('max_center_deg_delta', None)
# If there is no current mosaic then prepare a new one
if new_mosaic or (self.img_mosaic is None):
self.prepare_mosaic(image, fov_deg, name=name)
elif max_center_deg_delta is not None:
# get our center position
ctr_x, ctr_y = self.img_mosaic.get_center()
ra1_deg, dec1_deg = self.img_mosaic.pixtoradec(ctr_x, ctr_y)
# get new image's center position
ctr_x, ctr_y = image.get_center()
ra2_deg, dec2_deg = image.pixtoradec(ctr_x, ctr_y)
# distance between our center and new image's center
dist = wcs.deltaStarsRaDecDeg(ra1_deg, dec1_deg,
ra2_deg, dec2_deg)
# if distance is greater than trip setting, start a new mosaic
if dist > max_center_deg_delta:
self.prepare_mosaic(image, fov_deg, name=name)
self.update_status("Loading images...")
#self.fv.gui_call(self.fv.error_wrap, self.ingest_one, image)
#self.update_progress(float(self.ingest_count)/self.total_files)
time_intr2 = time.time()
self.process_elapsed += time_intr2 - time_intr1
num_threads = self.settings.get('num_threads', 4)
groups = dp.split_n(paths, num_threads)
self.logger.info("len groups=%d" % (len(groups)))
for group in groups:
self.fv.nongui_do(self.mosaic_some, group,
image_loader=image_loader)
return self.img_mosaic
def set_fov_cb(self, w):
fov_deg = float(w.get_text())
self.settings.set(fov_deg=fov_deg)
self.w.fov.set_text(str(fov_deg))
def trim_pixels_cb(self, w):
trim_px = int(w.get_text())
self.w.trim_px.set_text(str(trim_px))
self.settings.set(trim_px=trim_px)
def match_bg_cb(self, w, tf):
self.settings.set(match_bg=tf)
def merge_cb(self, w, tf):
self.settings.set(merge=tf)
def drop_new_cb(self, w, tf):
self.settings.set(drop_creates_new_mosaic=tf)
def mosaic_hdus_cb(self, w, tf):
self.settings.set(mosaic_hdus=tf)
def set_num_threads_cb(self, w):
num_threads = int(w.get_text())
self.w.num_threads.set_text(str(num_threads))
self.settings.set(num_threads=num_threads)
def update_status(self, text):
if self.gui_up:
self.fv.gui_do(self.w.eval_status.set_text, text)
def init_progress(self):
def _foo():
self.w.btn_intr_eval.set_enabled(True)
self.w.eval_pgs.set_value(0.0)
if self.gui_up:
self.fv.gui_do(_foo)
def update_progress(self, pct):
if self.gui_up:
self.fv.gui_do(self.w.eval_pgs.set_value, pct)
def end_progress(self):
if self.gui_up:
self.fv.gui_do(self.w.btn_intr_eval.set_enabled, False)
def eval_intr(self):
self.ev_intr.set()
def __str__(self):
return 'mosaic'
#END
|
{"hexsha": "c7e5ff3ff620c9b6c96d824b7ec5e69b64a045ad", "size": 20700, "ext": "py", "lang": "Python", "max_stars_repo_path": "ginga/misc/plugins/Mosaic.py", "max_stars_repo_name": "Cadair/ginga", "max_stars_repo_head_hexsha": "5afdd8824f27c7ae7d8d82b5013b0ff0068bd8b8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ginga/misc/plugins/Mosaic.py", "max_issues_repo_name": "Cadair/ginga", "max_issues_repo_head_hexsha": "5afdd8824f27c7ae7d8d82b5013b0ff0068bd8b8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ginga/misc/plugins/Mosaic.py", "max_forks_repo_name": "Cadair/ginga", "max_forks_repo_head_hexsha": "5afdd8824f27c7ae7d8d82b5013b0ff0068bd8b8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3157894737, "max_line_length": 95, "alphanum_fraction": 0.5779710145, "include": true, "reason": "import numpy,import astropy", "num_tokens": 4602}
|
import numpy as np
import math
from scipy.special import hyp2f1
################################################################
################ Define some helper functions ##################
################################################################
def lennard_jones(r , sigma , epsilon , LJ_form = 'standard'):
"""
Calculate a Lennard-Jones potential.
Inputs:
r: numpy array. Contains distances between interacting particles in units of m.
sigma: float or int. Distance at which potential energy is zero in units of m.
epsilon: float or int. 'Dispersion energy' (or depth of potential well) for the potential in units of J.
LJ_form: string. Specifies whether the user wants the 'standard' 6-12 L.J potential or the modified version.
Outputs:
V: numpy array. This contains the generate potential.
F: numpy array. This contains the force corresponding to the generated potential.
"""
if LJ_form == 'standard':
# Generate the standard '6-12' Lennard-Jones potential.
V = 4 * epsilon * ( np.divide(sigma , r) ** 12 - np.divide(sigma , r) ** 6 )
F = 24 * epsilon / sigma * ( np.divide(sigma , r) ** 13 - np.divide(sigma , r) ** 7 )
elif LJ_form == 'modified':
# Generate a modified version of the '6-12' Lennard-Jones potential.
V = - epsilon * ( 2 * np.divide(sigma , r) ** 6 - np.divide(sigma , r) ** 12 )
F = - 12 * epsilon / sigma * ( np.divide(sigma , r) ** 7 - np.divide(sigma , r) ** 13 )
return V , F
def Force_from_Potential(r , V):
"""
Calculates the force from a given potential using the 'negative gradient' technique from Physics I.
Inputs:
r: numpy array. Distance from the potential's origin in units of m.
V: numpy array. Potential values for corresponding distances in r.
Outputs:
The output is the force corresponding to the input potential in units of N. Length of output vector is one less than the input vectors.
Note: This function uses the strict 'difference quotient' method of determining the derivative for consistency with other published numeric methods.
Another option (shown below in comment form) would be to use Numpy's built-in gradient function. This would result in an output vector whose
length is equal to that of the input vectors.
"""
#return -np.gradient(V , r)
return - np.diff(V) / np.diff(r)
def frequency_shift(z_ltp , f_0 , k , A , E_bond , sigma , potential_type = 'Lennard-Jones-Modified'):
"""
Calculates simulated frequency shift data to model an FM-AFM experiment.
Inputs:
z_tlp: numpy array. Contains distances of closest approach of an AFM tip to a sample in units of m.
f_0: float or int. Resonance frequency of unloaded cantilever. Should be in units of Hz.
k: float or int. Effective spring constant of cantilever. Units of N/m.
A: float or int. The oscillation amplitude of the AFM cantilever during experiment. Should be in units of m.
E_bond: float or int. 'Dispersion energy' (or depth of potential well) for the potential in units of J.
sigma: float or int. Distance at which potential energy is zero in units of m.
potential_type: string. Specifies what type of potential the frequency shift data should be generated for.
Outputs:
The output is the simulated frequency shift in units of Hz as a numpy array.
Note: For more information regarding the calculations performed by this function, see the following journal (as well as additional articles cited therein):
J. Welker, E. Illek and F. Giessibl
"Analysis of force-deconvolution methods in frequency-modulation atomic force microscopy"
Beilstein Journal of Nanotechnology, 2012, 3, 238–248.
DOI: https://doi.org/10.3762/bjnano.3.27
"""
if potential_type == 'Lennard-Jones-Modified':
# Simulate the data based on the modified '6-12' Lennard-Jones potential.
prefactor = - 12 * f_0 * E_bond / (k * A * sigma)
argument = np.divide(- 2 * A , z_ltp)
term1 = np.multiply( np.divide(sigma , z_ltp)**7 , hyp2f1(7 , 0.5 , 1 , argument) - hyp2f1(7 , 1.5 , 2 , argument) )
term2 = np.multiply( np.divide(sigma , z_ltp)**13 , hyp2f1(13 , 0.5 , 1 , argument) - hyp2f1(13 , 1.5 , 2 , argument) )
freq_shift = prefactor * (term1 - term2)
elif potential_type == 'Morse':
print('Warning: This feature has not been implemented yet.')
freq_shift = math.nan
return freq_shift
################################################################
############ Define Sader-Jarvis Method Function ###############
################################################################
def saderF(z , Delta_f , A , k , f_0):
"""
Performs force recovery on frequency-modulated AFM data using the Sader-Jarvis method.
Inputs:
z: numpy array. Contains tip height data. Should be in units of m.
Delta_f: numpy array. Contains frequency shift data. Should be in units of Hz.
A: float or int. The oscillation amplitude of the AFM cantilever during experiment. Should be in units of m.
k: float or int. Effective spring constant of cantilever. Units of N/m.
f_0: float or int. Resonance frequency of unloaded cantilever. Should be in units of Hz.
Outputs:
z: numpy array. Truncated version of the input height data. Included as convenience for plotting recovered force data. Has units of m.
F_recovered: numpy array. Recovered force in units of N.
Note: This function was adapted from MATLAB code written by the authors of the following journal article. The original MATLAB code can be found in the supplementary info section of the journal article on the publisher's webpage.
Source Journal Article:
J. Welker, E. Illek and F. Giessibl
"Analysis of force-deconvolution methods in frequency-modulation atomic force microscopy"
Beilstein Journal of Nanotechnology, 2012, 3, 238–248.
DOI: https://doi.org/10.3762/bjnano.3.27
Theory Reference:
J. E. Sader and S. P. Jarvis
"Accurate formulas for interaction force and energy in frequency modulation force spectroscopy"
Applied Physics Letters, 84, 1801-1803 (2004).
DOI: https://doi.org/10.1063/1.1667267
Other Reference:
J. E. Sader and S. P. Jarvis
Mathematica notebook for implementation of formulas
http://www.ampc.ms.unimelb.edu.au/afm/bibliography.html#FMAFM.
"""
# Calculate spatial derivative of frequency shift. This code uses the difference quotient method of calculating the derivative rather than Numpy's gradient method.
derivative = np.diff(Delta_f) / np.diff(z)
# The input vectors need to have their length adjusted to match the derivative vector, since the difference quotient method outputs a derivative vector which is one element shorter than the inputs.
z = z[:len(derivative)]
Delta_f = Delta_f[:len(derivative)]
# Calculate prefactor
prefactor = 2 * k / f_0
# Initialize a vector to store the recovered force values in.
F_recovered = np.zeros(len(z) - 1)
# Calculate the recovered force for each data point. j is the index of the z value under consideration for any particular iteration of the loop.
for j in range(0 , len(z) - 1): # Iterate over the whole (shortened) z vector except the last element. Skip the last element because this serves as the upper limit of the integral ("infinity"), which we won't use in a numeric integration.
# Define t as the z range to be integrated over. Ranges from the lowest z value (i.e. the jth) under consideration, which is treated as the distance of closest approach.
t = z[j+1:] # Skip the first z value (corresponding to z_j) to avoid the pole at t = z_j.
# Pick out the portions of vectors whose z values correspond to those in t.
Delta_f_j = Delta_f[j+1:]
derivative_j = derivative[j+1:]
# Calculate the integrand.
g_j1 = (1 + np.divide(math.sqrt(A) , 8 * math.sqrt(math.pi) * np.sqrt(t - z[j]))) * Delta_f_j
g_j2 = np.divide(A**(3/2) , math.sqrt(2) * np.sqrt(t - z[j])) * derivative_j
g_j = g_j1 - g_j2
# Perform numeric integration using the trapezoidal rule
integral = np.trapz(g_j, t - z[j] )
# Calculate correction factor term-by-term
corr1 = Delta_f[j] * (z[j+1] - z[j])
corr2 = 2 * math.sqrt(A) / ( 8 * math.sqrt(math.pi)) * Delta_f[j] * math.sqrt(z[j+1] - z[j])
corr3 = 2 * A**(3/2) / math.sqrt(2) * math.sqrt(z[j+1] - z[j]) * (Delta_f[j+1] - Delta_f[j]) / (z[j+1] - z[j])
# Calculate the recovered force for the jth data point.
F_recovered[j] = prefactor * (corr1 + corr2 + corr3 + integral)
return z[:-1] , F_recovered
|
{"hexsha": "3de3e2c254e2ad1600ecbddf4fd6e6a5aadef3a7", "size": 8876, "ext": "py", "lang": "Python", "max_stars_repo_path": "nanoscopy/afm/AFMForceRecovery.py", "max_stars_repo_name": "darianSmalley/NanoscoPy", "max_stars_repo_head_hexsha": "dfb6784f5ad3f439765bfb0fb67d9cde5aec87d5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nanoscopy/afm/AFMForceRecovery.py", "max_issues_repo_name": "darianSmalley/NanoscoPy", "max_issues_repo_head_hexsha": "dfb6784f5ad3f439765bfb0fb67d9cde5aec87d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nanoscopy/afm/AFMForceRecovery.py", "max_forks_repo_name": "darianSmalley/NanoscoPy", "max_forks_repo_head_hexsha": "dfb6784f5ad3f439765bfb0fb67d9cde5aec87d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.7901234568, "max_line_length": 242, "alphanum_fraction": 0.653785489, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2231}
|
module TestBasisPursuit
using Test
using LinearAlgebra
using SparseArrays
using CompressedSensing: bp, bp_candes, bp_ard, bpd, bpd_candes, bpd_ard, sparse_data, perturb
n, m = 32, 48
k = 3
A, x, b = sparse_data(n = n, m = m, k = k, rescaled = true)
δ = 1e-2
y = perturb(b, δ/2)
@testset "Basis Pursuit" begin
# equality constrained l1 minimization
xl = bp(A, b)
@test xl.nzind == x.nzind
xc = bp_candes(A, b)
@test xc.nzind == x.nzind
xard = bp_ard(A, b)
@test xard.nzind == x.nzind
end
@testset "Basis Pursuit Denoising" begin
xl = bpd(A, y, δ)
droptol!(xl, 1e-2) # sometimes has spurious coefficients above perturbation level
@test xl.nzind == x.nzind
xc = bpd_candes(A, y, δ, maxiter = 3)
droptol!(xc, 1e-6)
@test xc.nzind == x.nzind
xard = bpd_ard(A, y, δ, maxiter = 16)
droptol!(xard, 1e-6)
@test xard.nzind == x.nzind
end
using CompressedSensing: ista, fista
@testset "ISTA" begin
λ = δ/10
xista = ista(A, y, λ, maxiter = 1024, stepsize = 1e-1)
# droptol!(xista, δ)
# @test xista.nzind == x.nzind
# @test xista.nzind ⊆ x.nzind
@test norm(A*xista - y) < δ
# TODO: FISTA
end
end
|
{"hexsha": "338a52f95749129cb2cd41d15df58dfbc01420dc", "size": 1184, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/basispursuit.jl", "max_stars_repo_name": "SebastianAment/CompressedSensing.jl", "max_stars_repo_head_hexsha": "ea296503966c4c50ba943bed1f9bb8168683bb2b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2021-12-13T11:51:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T12:41:15.000Z", "max_issues_repo_path": "test/basispursuit.jl", "max_issues_repo_name": "SebastianAment/CompressedSensing.jl", "max_issues_repo_head_hexsha": "ea296503966c4c50ba943bed1f9bb8168683bb2b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-12-11T23:57:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T20:54:22.000Z", "max_forks_repo_path": "test/basispursuit.jl", "max_forks_repo_name": "SebastianAment/CompressedSensing.jl", "max_forks_repo_head_hexsha": "ea296503966c4c50ba943bed1f9bb8168683bb2b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.2156862745, "max_line_length": 94, "alphanum_fraction": 0.6300675676, "num_tokens": 463}
|
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import os
from pandas.tseries.offsets import MonthEnd
from qcmr.parse.utils import get_fiscal_months
from .. import utils
from ..style import default_style, palette
__all__ = [
"monthly_actuals_this_quarter",
"historical_accuracy_of_revenue_projection",
"historical_accuracy_of_spending_projection",
"historical_annual_cash_flows",
"fund_balance_revisions",
"annual_TRAN",
"historical_monthly_cash_flows",
]
def historical_monthly_cash_flows(report, filename):
"""
"""
from qcmr.cash import get_GF_revenues, get_GF_spending
def add_date(df):
year = df["fiscal_year"].where(df["month"] < 7, df["fiscal_year"] - 1)
df["Date"] = df["month"].astype(str).str.cat(year.astype(str), sep="/")
df["Date"] = pd.to_datetime(df["Date"]) + MonthEnd(1)
return df
# Revenues
R = get_GF_revenues().query("quarter == 4")[
["Total Cash Receipts", "fiscal_year", "month"]
]
R = add_date(R)[["Date", "Total Cash Receipts"]].rename(
columns={"Total Cash Receipts": "Revenue"}
)
# Spending
S = get_GF_spending().query("quarter == 4")[
["Total Disbursements", "fiscal_year", "month"]
]
S = add_date(S)[["Date", "Total Disbursements"]].rename(
columns={"Total Disbursements": "Expenditures"}
)
data = pd.merge(R, S, on="Date")
xticks = data["Date"].dt.year[(data["Date"].dt.month == 7)]
dates = data["Date"].dt.strftime("%m/%y")
with plt.style.context(default_style):
# Initialize the figure/axes
fig, axs = plt.subplots(
nrows=2,
ncols=1,
figsize=(6, 5),
gridspec_kw=dict(top=0.88, left=0.13, bottom=0.15, right=0.99, hspace=1.0),
)
for i, col in enumerate(["Revenue", "Expenditures"]):
ax = axs[i]
# spending
if i == 1:
label = "Expenditures"
ax.plot(dates, data[col].values, color=palette["love-park-red"], lw=3)
else: # revenue
label = "Revenues"
ax.plot(dates, data[col].values, color=palette["black"], lw=3)
# format grid
sns.despine(left=True, ax=ax)
ax.grid(b=False, axis="x")
# x label
ax.text(
-0.13,
1.15,
"Monthly General Fund\nCash " + label,
weight="bold",
fontsize=11,
transform=ax.transAxes,
)
ax.set_xticks(xticks.index.tolist())
ax.set_xticklabels(xticks.values + 1, fontsize=11, rotation=90)
ax.set_ylim(0, 1050)
ax.set_yticks([0, 200, 400, 600, 800, 1000])
ax.set_yticklabels(
[utils.format_currency(x, "{:,.0f}M") for x in ax.get_yticks()],
fontsize=11,
)
ax.set_xlabel("Fiscal Year", fontsize=12, weight="bold")
ax.grid(True)
fig.savefig(filename, dpi=300)
def annual_TRAN(report, filename):
"""
Plot a bar graph showing the total amount of TRAN taken out
each fiscal year.
Parameters
----------
report : CashReport
the cash report object
filename : str
the file name to save the file to
"""
from qcmr.cash import get_GF_balance_sheet
# Load the data
df = get_GF_balance_sheet()
df = (
df.query("quarter==4")
.groupby("fiscal_year")["TRAN"]
.apply(lambda x: sum(abs(x)))
// 2
)
df = (
df.reset_index()
.assign(fiscal_year=lambda df: df.fiscal_year.astype(int))
.rename(columns={"fiscal_year": "Fiscal Year"})
)
with plt.style.context(default_style):
# Initialize the figure/axes
fig, ax = plt.subplots(
figsize=(5, 3),
gridspec_kw=dict(top=0.8, left=0.13, bottom=0.25, right=0.98),
)
# bar plot
sns.barplot(
ax=ax,
x=df["Fiscal Year"],
y=df["TRAN"],
color=palette["dark-ben-franklin"],
saturation=1.0,
)
# Add the y-axis label
ax.text(
-0.13,
1.12,
"Short-term General Fund\nBorrowing Amounts",
weight="bold",
fontsize=12,
transform=ax.transAxes,
)
# Format
ax.set_ylim(0, 410)
plt.setp(ax.get_xticklabels(), fontsize=11, rotation=90)
ax.set_yticks([0, 100, 200, 300, 400])
ax.set_yticklabels(["$%.0fM" % x for x in ax.get_yticks()], fontsize=11)
ax.set_xlabel("Fiscal Year", fontsize=12, weight="bold")
ax.set_ylabel("")
ax.xaxis.labelpad = 7
plt.savefig(filename, dpi=300)
def fund_balance_revisions(report, filename):
"""
Plot a scatter chart with estimated uncertainty showing the relationship between
the modified accrual and cash General Fund balances.
Parameters
----------
report : CashReport
the cash report object
filename : str
the file name to save the file to
"""
CURRENT_FY = report.year
CURRENT_Q = report.quarter
this_year = f"FY{str(CURRENT_FY)[2:]}"
last_year = f"FY{str(CURRENT_FY - 1)[2:]}"
# load the data
df = report.fund_balance_revisions()
with plt.style.context(default_style):
# Initialize the figure/axes
fig, ax = plt.subplots(
figsize=(6, 4),
gridspec_kw=dict(top=0.85, left=0.03, bottom=0.12, right=0.85),
)
# Plot the historical data points
X = df.dropna()
color = palette["black"]
fmt = {
"c": "white",
"zorder": 11,
"marker": "o",
"edgecolors": color,
"linewidth": 2,
}
min_year = f"FY{str(X['Fiscal Year'].astype(int).min())[2:]}"
max_year = f"FY{str(X['Fiscal Year'].astype(int).max())[2:]}"
label = f"Annual Historical Data\nfrom {min_year} to {max_year}"
ax.scatter(X["Q4 Cash Balance"], X["Q1 Actual"], label=label, **fmt)
# Plot the uncertainty
ax.fill_between(
df["Q4 Cash Balance"],
df["Q1 Actual (Lower)"],
df["Q1 Actual (Upper)"],
color=palette["light-ben-franklin"],
alpha=0.5,
zorder=10,
label="Estimated Uncertainty",
)
ax.plot(
df["Q4 Cash Balance"],
df["Q1 Actual (Upper)"],
color=palette["light-ben-franklin"],
lw=3,
zorder=10,
)
ax.plot(
df["Q4 Cash Balance"],
df["Q1 Actual (Lower)"],
color=palette["light-ben-franklin"],
lw=3,
zorder=10,
)
# Vertical line for this year
df_this_year = df.query(f"`Fiscal Year` == {report.year}")
ax.axvline(
x=df_this_year["Q4 Cash Balance"].squeeze(),
lw=2,
color=palette["love-park-red"],
zorder=11,
)
# Mark upper/lower estimates for this year
labels = ["Lower estimate", "Upper estimate"]
offsets = [(10, -20), (-30, 15)]
has = ["left", "right"]
vas = ["top", "bottom"]
for i, col in enumerate(["Q1 Actual (Lower)", "Q1 Actual (Upper)"]):
color = palette["love-park-red"]
x = df_this_year["Q4 Cash Balance"]
y = df_this_year[col]
ax.scatter(
x,
y,
color="white",
zorder=12,
marker="o",
edgecolors=color,
linewidth=2,
)
ax.annotate(
labels[i] + f"\nfor {this_year}: " + "$%.0fM" % y,
xy=(x, y),
xycoords="data",
xytext=offsets[i],
textcoords="offset points",
ha=has[i],
va=vas[i],
fontsize=10,
zorder=12,
weight="bold",
arrowprops=dict(
arrowstyle="->", color="k", lw=2, connectionstyle="arc3,rad=0.1"
),
bbox=dict(facecolor="white", pad=0),
)
# Format
ax.set_xlim(-395, 1100)
ax.set_yticklabels(
[
utils.format_currency(x, "{:,.0f}M", plus_sign=True)
for x in ax.get_yticks()
]
)
ax.set_xticklabels(
[
utils.format_currency(x, "{:,.0f}M", plus_sign=False)
for x in ax.get_xticks()
]
)
plt.setp(ax.get_yticklabels(), ha="left")
# Axis labels
ax.set_xlabel("Q4 Cash Balance", weight="bold", fontsize=11)
ax.text(
-0.02,
1.06,
"Final Modified Accrual\nFund Balance",
fontsize=11,
transform=ax.transAxes,
ha="left",
va="bottom",
weight="bold",
)
ax.text(
df_this_year["Q4 Cash Balance"].squeeze(),
370.0,
f"{this_year} Cash Balance",
fontsize=10,
ha="right",
va="top",
weight="bold",
rotation=90,
bbox=dict(facecolor="white"),
)
# Add arrow for last year
df_last_year = df.query(f"`Fiscal Year` == {report.year-1}")
x1 = df_last_year["Q4 Cash Balance"].squeeze()
y1 = df_last_year["Q1 Actual"].squeeze()
x2, y2 = 550, 550
ax.annotate(
last_year,
xy=(x1, y1),
xycoords="data",
xytext=(x2, y2),
textcoords="data",
ha="left",
va="bottom",
zorder=12,
fontsize=10,
weight="bold",
arrowprops=dict(
arrowstyle="->", color="k", lw=2, connectionstyle="arc3,rad=-0.3"
),
bbox=dict(facecolor="white", pad=0),
)
# Make the y=0,x=0 grid lines darker
ax.axhline(y=0, lw=1, color="#2a3135", zorder=1)
ax.axvline(x=0, lw=1, color="#2a3135", zorder=1)
# Add the legend
ax.legend(
ncol=2,
fontsize=10,
bbox_transform=fig.transFigure,
loc="lower right",
bbox_to_anchor=(1.01, 0.87),
)
plt.savefig(filename, dpi=300)
def historical_annual_cash_flows(report, filename):
"""
Plot the historical annual revenue and spending cash flows.
Parameters
----------
report : CashReport
the cash report object
filename : str
the file name to save the file to
"""
# load the data
data = (
report.annual_general_fund_totals()
.query("Name in ['Total Disbursements', 'Total Cash Receipts']")
.pivot(index="Fiscal Year", values="Total", columns="Name")
.rename(
columns={
"Total Cash Receipts": "Revenue",
"Total Disbursements": "Expenditures",
}
)
)
with plt.style.context(default_style):
# Initialize
fig, ax = plt.subplots(
figsize=(6, 3.75), gridspec_kw=dict(left=0.05, bottom=0.20, top=0.82)
)
# revenue and spending
y1 = data["Revenue"]
y2 = data["Expenditures"]
# revenue
color = palette["black"]
ax.plot(y1.index, y1.values, color=color, lw=3, label="Revenues")
fmt = {
"c": "white",
"zorder": 10,
"marker": "o",
"edgecolors": color,
"linewidth": 2,
}
ax.scatter(y1.index, y1.values, **fmt)
# spending
color = palette["love-park-red"]
ax.plot(y2.index, y2.values, color=color, lw=3, label="Expenditures")
fmt = {
"c": "white",
"zorder": 10,
"marker": "o",
"edgecolors": color,
"linewidth": 2,
}
ax.scatter(y2.index, y2.values, **fmt)
# in the black
ax.fill_between(
y1.index,
y1,
y2,
where=y1 > y2,
color=palette["medium-gray"],
interpolate=True,
label="Fund Balance Increases",
)
# in the red
ax.fill_between(
y1.index,
y1,
y2,
where=y1 < y2,
color=palette["light-red"],
interpolate=True,
label="Fund Balance Decreases",
)
# format grid
sns.despine(left=True, bottom=True, ax=ax)
# ylabel and y limits
ax.text(
0.005,
0.99,
"Annual General Fund\nCash Flows",
weight="bold",
fontsize=14,
ha="left",
va="top",
transform=fig.transFigure,
)
# Format y-axis
ax.set_ylim(3.45e3, 5.05e3)
ax.set_yticklabels(["$%.1fB" % (x / 1e3) for x in ax.get_yticks()], fontsize=14)
plt.setp(ax.get_yticklabels(), ha="left")
# Format x-axis
ax.set_xticks(y1.index.tolist())
plt.setp(ax.get_xticklabels(), fontsize=14, rotation=90)
ax.set_xlabel("Fiscal Year", fontsize=14, weight="bold")
ax.set_xlim(left=2005)
# Add a legend
leg = plt.legend(
ncol=2,
loc="upper right",
frameon=False,
fontsize=11.5,
bbox_to_anchor=(1.01, 0.97),
bbox_transform=fig.transFigure,
)
# Save
plt.savefig(filename, dpi=300)
def monthly_actuals_this_quarter(report, filename):
"""
Plot the monthly revenue/spending actuals for this quarter.
This is a two panel chart with line charts showing the monthly totals
for the total cash receipts and total disbursements from the General Fund.
Parameters
----------
report : CashReport
the cash report object
filename : str
the file name to save the file to
"""
CURRENT_FY = report.year
CURRENT_Q = report.quarter
this_year = f"FY{str(CURRENT_FY)[2:]}"
last_year = f"FY{str(CURRENT_FY - 1)[2:]}"
quarter_months = [
month.capitalize()
for month in get_fiscal_months()[(CURRENT_Q - 1) * 3 : CURRENT_Q * 3]
]
# load the data
data = report.compare_to_last_year()
sel = data["Name"].isin(["Total Disbursements", "Total Cash Receipts"])
sel &= data["Month"].isin(quarter_months)
data = data.loc[sel]
with plt.style.context(default_style):
# loop over tags
tags = ["Revenue", "Spending"]
labels = ["Cash Revenue", "Cash Spending"]
for i, tag in enumerate(tags):
# initalize the figure/axes
grid_kws = dict(
top=0.8,
left=0.12,
right=0.9,
bottom=0.12,
width_ratios=[3, 1],
wspace=0.3,
)
fig, axs = plt.subplots(
ncols=2, nrows=1, figsize=(5, 3), gridspec_kw=grid_kws
)
# select this type of data
df = data.query(f"Kind == '{tag}'")
ax = axs[0]
# plot last year
color = palette["medium-gray"]
ax.plot(
df["Month"],
df[last_year],
label=last_year,
color=color,
marker="o",
markerfacecolor="white",
markeredgecolor=color,
markeredgewidth=1.5,
markersize=6,
clip_on=False,
lw=2.25,
zorder=10,
)
# plot this year
if tag == "Revenue":
color = "black"
else:
color = palette["love-park-red"]
ax.plot(
df["Month"],
df[this_year],
label=this_year,
color=color,
marker="o",
markerfacecolor="white",
markeredgecolor=color,
markeredgewidth=1.5,
markersize=6,
lw=2.25,
zorder=10,
clip_on=False,
)
# format
ax.set_xticks(df["Month"].tolist())
plt.setp(ax.get_xticklabels(), fontsize=14)
ax.set_yticklabels(
[utils.format_currency(x, "{:,.0f}M") for x in ax.get_yticks()],
fontsize=11,
)
# add a legend
leg = ax.legend(
ncol=2,
loc="lower right",
bbox_to_anchor=(1.05, 0.95),
frameon=False,
bbox_transform=ax.transAxes,
fontsize=12,
)
# add the text
ax = axs[1]
ax.axis("off")
total = df[this_year].sum()
diff = df[this_year].sum() - df[last_year].sum()
growth = diff / df[last_year].sum() * 100
ax.text(
0.5,
0.95,
f"Total Q{report.quarter} {tag}",
ha="center",
va="center",
fontsize=14,
weight="bold",
transform=ax.transAxes,
)
ax.text(
0.5,
0.7,
this_year + "\n" + "${:,.1f}M".format(total),
ha="center",
va="center",
fontsize=14,
)
color = palette["love-park-red"] if diff < 0 else palette["phanatic-green"]
ax.text(
0.5,
0.4,
"${:,.1f}M".format(abs(diff)),
color=color,
fontsize=14,
va="center",
ha="center",
)
if diff < 0:
t = f"less than {last_year}"
else:
t = f"more than {last_year}"
ax.text(0.5, 0.3, t, fontsize=14, va="center", ha="center", color=color)
ax.text(
0.5,
0.2,
"(+%.1f%%)" % growth,
ha="center",
color=color,
fontsize=14,
va="top",
)
# add a title
fig.text(
0.5,
0.94,
f"General Fund Cash {tag} in {this_year} Q{CURRENT_Q} vs. {last_year} Q{CURRENT_Q}",
weight="bold",
fontsize=14,
ha="center",
)
basename, ext = os.path.splitext(filename)
plt.savefig(f"{basename}_{tag}{ext}", dpi=300)
def historical_accuracy_of_revenue_projection(report, filename):
"""
Plot the historical accuracy of the current quarter's projection
for the annual revenue total.
This is a line chart which shows the projected year-over-year change
from the current quarter as well as the actual year-over-year change.
Parameters
----------
report : CashReport
the cash report object
filename : str
the file name to save the file to
"""
_historical_projection_accuracy(report, filename, "revenue")
def historical_accuracy_of_spending_projection(report, filename):
"""
Plot the historical accuracy of the current quarter's projection
for the annual spending total.
This is a line chart which shows the projected year-over-year change
from the current quarter as well as the actual year-over-year change.
Parameters
----------
report : CashReport
the cash report object
filename : str
the file name to save the file to
"""
_historical_projection_accuracy(report, filename, "spending")
def _historical_projection_accuracy(report, filename, kind):
"""
Internal function to plot the historical accuracy of the
current quarter's projection for the annual revenue/spending total.
"""
kind = kind.lower()
assert kind in ["revenue", "spending"]
# load the data
df = report.actual_vs_projected_changes()
with plt.style.context(default_style):
bottom = 0.28 if report.quarter == 1 else 0.24
# initialize the figure
grid_kws = dict(left=0.18, right=0.95, top=0.825, bottom=bottom, hspace=0.95)
fig, ax = plt.subplots(figsize=(4, 2.7), gridspec_kw=grid_kws)
if kind == "revenue":
name = "Total Cash Receipts"
label = "Year-over-Year Change in\nGeneral Fund Revenue"
else:
name = "Total Disbursements"
label = "Year-over-Year Change in\nGeneral Fund Expenditures"
# trim the data
data = df.loc[(df["Name"] == name)]
# plot projected change
color = palette["dark-gray"]
ax.plot(
data["Fiscal Year"],
data["Projected Change"],
label=f"Q{report.quarter} Projection",
color=color,
marker="o",
markerfacecolor="white",
markeredgecolor=color,
markeredgewidth=1.5,
markersize=5,
lw=2,
linestyle="dashed",
zorder=2,
)
# plot actual change
color = palette["electric-blue"]
ax.plot(
data["Fiscal Year"],
data["Actual Change"],
label="Actual Change",
color=color,
marker="o",
markerfacecolor="white",
markeredgecolor=color,
markeredgewidth=1.5,
markersize=5,
lw=2,
zorder=10,
)
# add zero line
ax.axhline(y=0, c=palette["medium-gray"], lw=2, zorder=1)
# Format x-axis
ax.set_xticks(data["Fiscal Year"].tolist())
ax.set_xlabel("Fiscal Year", fontsize=11, weight="bold")
plt.setp(ax.get_xticklabels(), fontsize=11, rotation=90)
# Format y-axis
if kind == "revenue":
ylims = (-400, 400)
else:
ylims = (-400, 600)
PAD = 25
ax.set_ylim(ylims[0] - PAD, ylims[1] + PAD)
ax.set_yticks(np.arange(ylims[0], ylims[1] + 1, 200))
ax.set_yticklabels(
[utils.format_currency(x, "{:,.0f}M") for x in ax.get_yticks()], fontsize=11
)
# Add a y-axis label
ax.text(
0.005,
0.99,
label,
weight="bold",
fontsize=10,
transform=fig.transFigure,
ha="left",
va="top",
)
# Add a legend
ax.legend(
ncol=1,
loc="upper right",
bbox_to_anchor=(1.0, 1.02),
frameon=False,
bbox_transform=fig.transFigure,
fontsize=9,
)
if report.quarter == 1:
caption = "Note: Q1 projection for Fiscal Year 2015 now shown due to the budgeted sale of Philadelphia Gas Works that did not occur."
fig.text(
0.0, 0.005, caption, ha="left", va="bottom", color="#666666", fontsize=5
)
# save
plt.savefig(filename, dpi=300)
|
{"hexsha": "f404e843d15a68e4a1e5f05b51417b60e4649362", "size": 23581, "ext": "py", "lang": "Python", "max_stars_repo_path": "cash_viz/general_fund/core.py", "max_stars_repo_name": "PhiladelphiaController/cash_viz", "max_stars_repo_head_hexsha": "2124d1d7859fc20e2a7f79754697fd5a41b2ee47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cash_viz/general_fund/core.py", "max_issues_repo_name": "PhiladelphiaController/cash_viz", "max_issues_repo_head_hexsha": "2124d1d7859fc20e2a7f79754697fd5a41b2ee47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cash_viz/general_fund/core.py", "max_forks_repo_name": "PhiladelphiaController/cash_viz", "max_forks_repo_head_hexsha": "2124d1d7859fc20e2a7f79754697fd5a41b2ee47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.112345679, "max_line_length": 145, "alphanum_fraction": 0.4986641788, "include": true, "reason": "import numpy", "num_tokens": 5740}
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime
import pickle
def plot_pies(df, drop_columns=None): # kwargs for subplots call?
if drop_columns:
columns = df.columns.drop(drop_columns)
else:
columns = df.columns
rows = 4
plt.subplots(len(columns)//rows+1, rows, figsize=(16,8))
for idx, col in enumerate(columns):
plt.subplot(len(columns)//rows+1, rows, idx+1)
plt.title(f'{col}')
counts = df[col].value_counts()
plt.pie(counts, labels=counts.index, autopct='%1.1f%%',) #makes the MARITAL_STATUS_CODE column disappear..?
plt.tight_layout()
plt.show()
|
{"hexsha": "f5d56ea1fb54654df903e47210f9ed4019734275", "size": 678, "ext": "py", "lang": "Python", "max_stars_repo_path": "dtcj/plot_pies.py", "max_stars_repo_name": "np1919/DTCJ", "max_stars_repo_head_hexsha": "766d67dc73a0bc5ea59972c52cb9c7db81c43daf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dtcj/plot_pies.py", "max_issues_repo_name": "np1919/DTCJ", "max_issues_repo_head_hexsha": "766d67dc73a0bc5ea59972c52cb9c7db81c43daf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dtcj/plot_pies.py", "max_forks_repo_name": "np1919/DTCJ", "max_forks_repo_head_hexsha": "766d67dc73a0bc5ea59972c52cb9c7db81c43daf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0769230769, "max_line_length": 115, "alphanum_fraction": 0.6548672566, "include": true, "reason": "import numpy", "num_tokens": 172}
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from rapids_triton import Client
from rapids_triton.testing import get_random_seed, arrays_close
TOTAL_SAMPLES = 8192
def valid_shm_modes():
modes = [None]
if os.environ.get('CPU_ONLY', 0) == 0:
modes.append('cuda')
return modes
@pytest.fixture(scope='session')
def client():
client = Client()
client.wait_for_server(60)
return client
@pytest.fixture
def model_inputs():
np.random.seed(get_random_seed())
return {
input_name:
np.random.rand(TOTAL_SAMPLES, 1).astype('float32')
for input_name in ('input__0',)
}
@pytest.fixture
def model_output_sizes():
return {'output__0': TOTAL_SAMPLES * np.dtype('float32').itemsize}
def get_ground_truth(inputs):
return {'output__0': inputs['input__0']}
@pytest.mark.parametrize("model_name", ['identity'])
@pytest.mark.parametrize("shared_mem", valid_shm_modes())
def test_model(client, model_name, shared_mem, model_inputs, model_output_sizes):
result = client.predict(
model_name, model_inputs, model_output_sizes, shared_mem=shared_mem
)
ground_truth = get_ground_truth(model_inputs)
for output_name in sorted(ground_truth.keys()):
arrays_close(
result[output_name],
ground_truth[output_name],
atol=1e-5,
assert_close=True
)
|
{"hexsha": "9379ff501ac1942dd32834ae720c90e37097b877", "size": 1974, "ext": "py", "lang": "Python", "max_stars_repo_path": "qa/L0_e2e/test_model.py", "max_stars_repo_name": "divyegala/rapids-triton", "max_stars_repo_head_hexsha": "8ff2a8dbad029e9379d9e7808d868924c4b60590", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-23T23:38:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T23:38:40.000Z", "max_issues_repo_path": "qa/L0_e2e/test_model.py", "max_issues_repo_name": "divyegala/rapids-triton", "max_issues_repo_head_hexsha": "8ff2a8dbad029e9379d9e7808d868924c4b60590", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2021-09-20T21:23:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:53:30.000Z", "max_forks_repo_path": "qa/L0_e2e/test_model.py", "max_forks_repo_name": "divyegala/rapids-triton", "max_forks_repo_head_hexsha": "8ff2a8dbad029e9379d9e7808d868924c4b60590", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-01-27T20:58:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T23:07:41.000Z", "avg_line_length": 28.2, "max_line_length": 81, "alphanum_fraction": 0.7092198582, "include": true, "reason": "import numpy", "num_tokens": 451}
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib
matplotlib.rcParams.update({'font.size': 20})
type_colors = { 'transient_ON': 'green', 'transient_OFF': 'magenta', 'transient_ON_OFF': 'cyan' }
map_df = pd.read_csv('../build/ll2_inputs_from_LGN.csv', sep=' ')
gids = np.array(list(set(map_df['index'].values)))
filters_data_fname = '../../6-LGN_firing_rates_and_spikes/LGN_spike_trains/LGN2_visual_space_positions_and_cell_types.dat'
'''
# Find cells with a relatively large number of ON_OFF inputs.
ON_OFF_3 = []
for gid in gids:
if (gid >= 8500):
break
if (gid % 100 == 0):
print 'Processing gid %d.' % (gid)
tmp_df = map_df.loc[map_df['index'] == gid]
N_ON_OFF = len(tmp_df.loc[tmp_df['src_type'] == 'transient_ON_OFF'].index)
if (N_ON_OFF >= 3):
ON_OFF_3.append(gid)
print len(ON_OFF_3)
'''
# Plot positions of the input LGN filters (in the visual space) for the selected cell.
filters_data = pd.read_csv(filters_data_fname, sep=' ')
filters_data.columns = ['LGN_type', 'x', 'y', 'x_offset', 'y_offset', 'sigma_c', 'sigma_s', 'r0', 'scaling_factor', 'k_alpha']
for cell_gid in [107]: #[107, 1232, 2102, 4621, 4731, 7192]: #ON_OFF_3: #[300, 690]: #xrange(0, 8500, 10): #Cells with 5 ON_OFF inputs: [622, 653, 7252]
tmp_df = map_df.loc[map_df['index'] == cell_gid]
print 'N_LGN_filters =', len(tmp_df.index)
# Print out the parameters of LGN source filters associated with the cell_gid.
src_gids = tmp_df['src_gid'].values
print filters_data.ix[src_gids] # Use src_gids as indices in the data frame; gids are not saved in the original file, but entries are saved in order of gids.
src_types = list(set(tmp_df['src_type'].values))
for type in src_types:
x = tmp_df[tmp_df['src_type'] == type]['src_vis_x'].values
y = tmp_df[tmp_df['src_type'] == type]['src_vis_y'].values
plt.scatter(x, y, s=500, c=type_colors[type], label=type)
plt.gca().set_aspect('equal')
plt.legend()
plt.title('Cell gid %d' % (cell_gid))
plt.savefig('plot_LGN_vis_space_positions/ll2_LGN_src.eps', format='eps')
plt.show()
|
{"hexsha": "73680e1338e774056e6be5eb1e15bffc35d045cd", "size": 2161, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis_codes_v2/plot_LGN_vis_space_positions.py", "max_stars_repo_name": "zqwei/LIF_Vis_model", "max_stars_repo_head_hexsha": "16f651ac827ba5f0feb40a0e619e600f1251d009", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis_codes_v2/plot_LGN_vis_space_positions.py", "max_issues_repo_name": "zqwei/LIF_Vis_model", "max_issues_repo_head_hexsha": "16f651ac827ba5f0feb40a0e619e600f1251d009", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis_codes_v2/plot_LGN_vis_space_positions.py", "max_forks_repo_name": "zqwei/LIF_Vis_model", "max_forks_repo_head_hexsha": "16f651ac827ba5f0feb40a0e619e600f1251d009", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5576923077, "max_line_length": 161, "alphanum_fraction": 0.6820916242, "include": true, "reason": "import numpy", "num_tokens": 641}
|
#include <kazen/camera.h>
#include <kazen/rfilter.h>
#include <kazen/warp.h>
#include <Eigen/Geometry>
NAMESPACE_BEGIN(kazen)
/**
* \brief Perspective camera with depth of field
*
* This class implements a simple perspective camera model. It uses an
* infinitesimally small aperture, creating an infinite depth of field.
*/
class PerspectiveCamera : public Camera {
public:
PerspectiveCamera(const PropertyList &propList) {
/* Width and height in pixels. Default: 720p */
m_outputSize.x() = propList.getInteger("width", 1280);
m_outputSize.y() = propList.getInteger("height", 720);
m_invOutputSize = m_outputSize.cast<float>().cwiseInverse();
/* Specifies an optional camera-to-world transformation. Default: none */
m_cameraToWorld = propList.getTransform("toWorld", Transform());
/* Horizontal field of view in degrees */
m_fov = propList.getFloat("fov", 30.0f);
/* Near and far clipping planes in world-space units */
m_nearClip = propList.getFloat("nearClip", 1e-4f);
m_farClip = propList.getFloat("farClip", 1e4f);
m_rfilter = NULL;
}
void activate() {
float aspect = m_outputSize.x() / (float) m_outputSize.y();
/* Project vectors in camera space onto a plane at z=1:
*
* xProj = cot * x / z
* yProj = cot * y / z
* zProj = (far * (z - near)) / (z * (far-near))
* The cotangent factor ensures that the field of view is
* mapped to the interval [-1, 1].
*/
float recip = 1.0f / (m_farClip - m_nearClip),
cot = 1.0f / std::tan(math::degToRad(m_fov / 2.0f));
Eigen::Matrix4f perspective;
perspective <<
cot, 0, 0, 0,
0, cot, 0, 0,
0, 0, m_farClip * recip, -m_nearClip * m_farClip * recip,
0, 0, 1, 0;
/**
* Translation and scaling to shift the clip coordinates into the
* range from zero to one. Also takes the aspect ratio into account.
*/
m_sampleToCamera = Transform(
Eigen::DiagonalMatrix<float, 3>(Vector3f(-0.5f, -0.5f * aspect, 1.0f)) *
Eigen::Translation<float, 3>(-1.0f, -1.0f/aspect, 0.0f) * perspective).inverse();
/* If no reconstruction filter was assigned, instantiate a Gaussian filter */
if (!m_rfilter)
m_rfilter = static_cast<ReconstructionFilter *>(
ObjectFactory::createInstance("gaussian", PropertyList()));
}
Color3f sampleRay(Ray3f &ray,
const Point2f &samplePosition,
const Point2f &apertureSample) const {
/* Compute the corresponding position on the
near plane (in local camera space) */
Point3f nearP = m_sampleToCamera * Point3f(
samplePosition.x() * m_invOutputSize.x(),
samplePosition.y() * m_invOutputSize.y(), 0.0f);
/* Turn into a normalized ray direction, and
adjust the ray interval accordingly */
Vector3f d = nearP.normalized();
float invZ = 1.0f / d.z();
ray.o = m_cameraToWorld * Point3f(0, 0, 0);
ray.d = m_cameraToWorld * d;
ray.mint = m_nearClip * invZ;
ray.maxt = m_farClip * invZ;
ray.update();
return Color3f(1.0f);
}
void addChild(Object *obj) {
switch (obj->getClassType()) {
case EReconstructionFilter:
if (m_rfilter)
throw Exception("Camera: tried to register multiple reconstruction filters!");
m_rfilter = static_cast<ReconstructionFilter *>(obj);
break;
default:
throw Exception("Camera::addChild(<{}>) is not supported!",
classTypeName(obj->getClassType()));
}
}
/// Return a human-readable summary
std::string toString() const {
return fmt::format(
"PerspectiveCamera[\n"
" cameraToWorld = {},\n"
" outputSize = {},\n"
" fov = {},\n"
" clip = [{}, {}],\n"
" rfilter = {}\n"
"]",
string::indent(m_cameraToWorld.toString(), 18),
m_outputSize.toString(),
m_fov,
m_nearClip, m_farClip,
string::indent(m_rfilter->toString())
);
}
private:
Vector2f m_invOutputSize;
Transform m_sampleToCamera;
Transform m_cameraToWorld;
float m_fov;
float m_nearClip;
float m_farClip;
};
class ThinlensCamera : public Camera {
public:
ThinlensCamera(const PropertyList &propList) {
/* Width and height in pixels. Default: 720p */
m_outputSize.x() = propList.getInteger("width", 1280);
m_outputSize.y() = propList.getInteger("height", 720);
m_apertureRadius = propList.getFloat("apertureRadius", 1.0);
m_focusDistance = propList.getFloat("focusDistance", 0.0);
m_invOutputSize = m_outputSize.cast<float>().cwiseInverse();
/* Specifies an optional camera-to-world transformation. Default: none */
m_cameraToWorld = propList.getTransform("toWorld", Transform());
/* Horizontal field of view in degrees */
m_fov = propList.getFloat("fov", 30.0f);
/* Near and far clipping planes in world-space units */
m_nearClip = propList.getFloat("nearClip", 1e-4f);
m_farClip = propList.getFloat("farClip", 1e4f);
m_rfilter = NULL;
}
void activate() {
float aspect = m_outputSize.x() / (float) m_outputSize.y();
/* Project vectors in camera space onto a plane at z=1:
*
* xProj = cot * x / z
* yProj = cot * y / z
* zProj = (far * (z - near)) / (z * (far-near))
* The cotangent factor ensures that the field of view is
* mapped to the interval [-1, 1].
*/
float recip = 1.0f / (m_farClip - m_nearClip),
cot = 1.0f / std::tan(math::degToRad(m_fov / 2.0f));
Eigen::Matrix4f perspective;
perspective <<
cot, 0, 0, 0,
0, cot, 0, 0,
0, 0, m_farClip * recip, -m_nearClip * m_farClip * recip,
0, 0, 1, 0;
/**
* Translation and scaling to shift the clip coordinates into the
* range from zero to one. Also takes the aspect ratio into account.
*/
m_sampleToCamera = Transform(
Eigen::DiagonalMatrix<float, 3>(Vector3f(-0.5f, -0.5f * aspect, 1.0f)) *
Eigen::Translation<float, 3>(-1.0f, -1.0f/aspect, 0.0f) * perspective).inverse();
/* If no reconstruction filter was assigned, instantiate a Gaussian filter */
if (!m_rfilter)
m_rfilter = static_cast<ReconstructionFilter *>(
ObjectFactory::createInstance("gaussian", PropertyList()));
}
Color3f sampleRay(Ray3f &ray,
const Point2f &samplePosition,
const Point2f &apertureSample) const {
Point2f tmp = Warp::squareToUniformDisk(apertureSample) * m_apertureRadius;
/* Compute the corresponding position on the
near plane (in local camera space) */
Point3f nearP = m_sampleToCamera * Point3f(
samplePosition.x() * m_invOutputSize.x(),
samplePosition.y() * m_invOutputSize.y(), 0.0f);
/* Aperture position */
Point3f apertureP(tmp.x(), tmp.y(), 0.0);
/* Sampled position on the focal plane */
Point3f focusP = nearP * (m_focusDistance / nearP.z());
/* Turn into a normalized ray direction, and
adjust the ray interval accordingly */
Vector3f d = focusP - apertureP;
d.normalize();
float invZ = 1.0f / d.z();
ray.o = m_cameraToWorld * apertureP;
ray.d = m_cameraToWorld * d;
ray.mint = m_nearClip * invZ;
ray.maxt = m_farClip * invZ;
ray.update();
return Color3f(1.0f);
}
void addChild(Object *obj) {
switch (obj->getClassType()) {
case EReconstructionFilter:
if (m_rfilter)
throw Exception("Camera: tried to register multiple reconstruction filters!");
m_rfilter = static_cast<ReconstructionFilter *>(obj);
break;
default:
throw Exception("Camera::addChild(<%s>) is not supported!",
classTypeName(obj->getClassType()));
}
}
/// Return a human-readable summary
std::string toString() const {
return fmt::format(
"ThinlensCamera[\n"
" cameraToWorld = %s,\n"
" outputSize = %s,\n"
" fov = %f,\n"
" clip = [%f, %f],\n"
" apertureRadius = %f,\n"
" focusDistance = %f,\n"
" rfilter = %s\n"
"]",
string::indent(m_cameraToWorld.toString(), 18),
m_outputSize.toString(),
m_fov,
m_nearClip,
m_farClip,
m_apertureRadius,
m_focusDistance,
string::indent(m_rfilter->toString())
);
}
private:
Vector2f m_invOutputSize;
Transform m_sampleToCamera;
Transform m_cameraToWorld;
float m_fov;
float m_nearClip;
float m_farClip;
float m_apertureRadius;
float m_focusDistance;
};
KAZEN_REGISTER_CLASS(PerspectiveCamera, "perspective");
KAZEN_REGISTER_CLASS(ThinlensCamera, "thinlens");
NAMESPACE_END(kazen)
|
{"hexsha": "c8c7caacb46e2b6141c47d31c4b9a000cff15209", "size": 9587, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/kazen/camera.cpp", "max_stars_repo_name": "ZhongLingXiao/nano-kazen", "max_stars_repo_head_hexsha": "0f4311b6cfe1d964af4e49263e8cc9b089d53e2e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-01-17T01:57:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T01:57:59.000Z", "max_issues_repo_path": "src/kazen/camera.cpp", "max_issues_repo_name": "ZhongLingXiao/nano-kazen", "max_issues_repo_head_hexsha": "0f4311b6cfe1d964af4e49263e8cc9b089d53e2e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-12-15T06:37:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T10:44:45.000Z", "max_forks_repo_path": "src/kazen/camera.cpp", "max_forks_repo_name": "ZhongLingXiao/nano-kazen", "max_forks_repo_head_hexsha": "0f4311b6cfe1d964af4e49263e8cc9b089d53e2e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8618181818, "max_line_length": 98, "alphanum_fraction": 0.57035569, "num_tokens": 2425}
|
# coding: utf-8
from __future__ import print_function
import scipy.io
import tensorflow as tf
from numpy import *
import os
from pylab import *
import numpy as np
import matplotlib
import PIL
from PIL import ImageFile
from PIL import Image
ImageFile.LOAD_TRUNCATED_IMAGES = True
import matplotlib.pyplot as plt
import shutil
net_data = load("bvlc_alexnet.npy").item()
DROPOUT = 0.5
LEARNING_RATE = 0.01
VALIDATION_SIZE = 0
TRAINING_ITERATIONS = 100000
WEIGHT_DECAY = 0.0005
NB_EPOCH = 200
batch_size = 50
num_classes = 50
label_id = 0
train_dir = list() # list directory of train images
train_labels = list() # list label of train images
train_dir_tmp = list() # it is used for shuffling train data
train_labels_tmp = list()
svm_dir = list() # list directory of svm images
svm_labels = list() # list label of svm images
test_dir = list() # list directory of test images
test_labels = list() # list label of test images
species_dict = dict() # dictionary for species id. It map real species id to new neat id ([0, 1, 2, ..., 49])
opt = sys.argv[1] #option
organ = sys.argv[2] # leaf, flower, entire or branch
if (opt == '--organ'):
if (organ not in ['leaf', 'flower', 'entire', 'branch']):
sys.exit('Plant argument must be leaf, flower, entire or branch')
else:
sys.exit('option ' + opt + ' does not exist, please choose: --organ')
data_folder = '../plant_data/' + organ
file_id = '50_' + organ + '_pretrained'
flag_train = False
for sub_dir_flower in os.listdir(data_folder + '/Training/'):
for img in os.listdir(data_folder + '/Training/' + sub_dir_flower):
try:
pic = Image.open(os.path.join(data_folder + '/Training/' + sub_dir_flower, img))
except IOError:
continue
if (pic.format != 'JPEG'):
continue
# rgb_pic = pic.convert('RGB')
# rgb_pic.save(os.path.join(data_folder + '/Training/' + sub_dir_flower, img))
train_dir_tmp.append(os.path.join(data_folder + '/Training/' + sub_dir_flower, img))
train_labels_tmp.append(label_id)
species_dict[sub_dir_flower] = label_id
label_id = label_id + 1
print(species_dict)
index_shuf = range(len(train_dir_tmp))
shuffle(index_shuf)
for i in index_shuf:
train_dir.append(train_dir_tmp[i])
train_labels.append(train_labels_tmp[i])
for sub_dir_flower in os.listdir(data_folder + '/SvmInput/'):
for img in os.listdir(data_folder + '/SvmInput/' + sub_dir_flower):
pic = Image.open(os.path.join(data_folder + '/SvmInput/' + sub_dir_flower, img))
if (pic.format != 'JPEG'):
print((os.path.join(data_folder + '/SvmInput/' + sub_dir_flower, img)))
# rgb_pic = pic.convert('RGB')
# rgb_pic.save(os.path.join(data_folder + '/SvmInput/' + sub_dir_flower, img))
else:
svm_dir.append(os.path.join(data_folder + '/SvmInput/' + sub_dir_flower, img))
svm_labels.append(species_dict[sub_dir_flower])
for sub_dir_flower in os.listdir(data_folder + '/Testing/'):
for img in os.listdir(data_folder + '/Testing/' + sub_dir_flower):
pic = Image.open(os.path.join(data_folder + '/Testing/' + sub_dir_flower, img))
if (pic.format != 'JPEG'):
print((os.path.join(data_folder + '/Testing/' + sub_dir_flower, img)))
# rgb_pic = pic.convert('RGB')
# rgb_pic.save(os.path.join(data_folder + '/Testing/' + sub_dir_flower, img))
else:
test_dir.append(os.path.join(data_folder + '/Testing/' + sub_dir_flower, img))
test_labels.append(species_dict[sub_dir_flower])
# Un-comment this block if you want to use an available svm and test directory
'''
svm_dir = list() # list directory of svm images
svm_labels = list() # list label of svm images
test_dir = list() # list directory of test images
test_labels = list() # list label of test images
f = open(organ + '_test', 'r')
for line in f:
test_dir.append(line.split(';')[0])
test_labels.append(species_dict[line.strip().split(';')[1]])
f.close()
f = open(organ + '_svm', 'r')
for line in f:
svm_dir.append(line.split(';')[0])
svm_labels.append(species_dict[line.strip().split(';')[1]])
f.close()
'''
print(len(train_dir))
print(len(test_dir))
print(len(svm_dir))
def print_activations(t):
'''
print a tensor shape
:param t: is a tensor
'''
print(t.op.name, ' ', t.get_shape().as_list())
def dense_to_one_hot(labels_dense, num_classes):
'''
make the one-hot matrix for label list
input: a list of label id. For example [1, 2, 3]
output: a one-hot maxtrix. [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
Note: you can do it in the tensorflow model, but I want to do it here because
I use it for feeding the model. Therefor, my model can be sped up so much.
:param t: is a tensor
'''
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def read_images_from_disk(input_queue):
'''
This function is used for reading images to tensors vector in tensorflow.
Input: a (directory, label) queue of some images
Output: a tensor vector for each image; a label list for each image
:param input_queue: (directory, label) queue
'''
label = input_queue[1]
file_contents = tf.read_file(input_queue[0])
example = tf.image.decode_jpeg(file_contents, channels=3) #read image with jpge extension
return example, label
# weight initialization
def weight_variable(shape, name):
'''
init weight variable for CNN model
Input: shape and name of our variable
Ouput:
:param shape: the shape of the expect variable
:param shape: the name of the expect variable
'''
initial = tf.truncated_normal(shape, stddev=0.01, name=name)
return tf.Variable(initial)
def bias_variable(shape, name):
initial = tf.constant(0.1, shape=shape, name=name)
return tf.Variable(initial)
# convolution
def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding="VALID", group=1):
'''From https://github.com/ethereon/caffe-tensorflow
'''
c_i = input.get_shape()[-1]
assert c_i % group == 0
assert c_o % group == 0
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
if group == 1:
conv = convolve(input, kernel)
else:
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
conv = tf.concat(3, output_groups)
return tf.reshape(tf.nn.bias_add(conv, biases), [-1] + conv.get_shape().as_list()[1:])
def conv2d(x, W, stride_h, stride_w, padding='SAME'):
return tf.nn.conv2d(x, W, strides=[1, stride_h, stride_w, 1], padding=padding)
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def max_pool_3x3(x):
return tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
def max_pool_4x4(x):
return tf.nn.max_pool(x, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME')
num_examples = len(train_dir)
train_accuracies = list()
train_costs = list()
validation_accuracies = list()
x_range = list()
# CNN modeling
graph = tf.Graph()
with graph.as_default():
y_test = np.asarray(test_labels)
y_valid = np.asarray(svm_labels)
y_train = np.asarray(train_labels)
y_test = dense_to_one_hot(y_test, num_classes)
y_valid = dense_to_one_hot(y_valid, num_classes)
y_train = dense_to_one_hot(y_train, num_classes)
x_test = test_dir
x_valid = svm_dir
x_train = train_dir
input_queue_test = tf.train.slice_input_producer([x_test, y_test],
num_epochs=None,
shuffle=False)
x_test, y_test = read_images_from_disk(input_queue_test)
x_test = tf.image.resize_images(x_test, [227, 227], method=1)
# x_test = tf.image.per_image_whitening(x_test)
x_test, y_test = tf.train.batch([x_test, y_test], batch_size=len(test_dir), allow_smaller_final_batch=True)
input_queue_valid = tf.train.slice_input_producer([x_valid, y_valid],
num_epochs=None,
shuffle=False)
x_valid, y_valid = read_images_from_disk(input_queue_valid)
x_valid = tf.image.resize_images(x_valid, [227, 227], method=1)
x_valid, y_valid = tf.train.batch([x_valid, y_valid], batch_size=batch_size, allow_smaller_final_batch=True)
input_queue_train = tf.train.slice_input_producer([x_train, y_train],
num_epochs=None,
shuffle=True)
x_train, y_train = read_images_from_disk(input_queue_train)
x_train = tf.image.resize_images(x_train, [227, 227], method=1)
# x_train_rot = tf.image.rot90(x_train, k=1)
# x_train = tf.image.per_image_whitening(x_train)
x_train, y_train = tf.train.shuffle_batch([x_train, y_train], batch_size=batch_size, num_threads=4,
capacity=5000, min_after_dequeue=1000,
allow_smaller_final_batch=True)
x = tf.placeholder('float', shape=[None, 227, 227, 3])
y_ = tf.placeholder('float', shape=[None, num_classes])
x_testdata = tf.placeholder('float', shape=[None, 227, 227, 3])
y_testdata = tf.placeholder('float', shape=[None, num_classes])
conv1W = tf.Variable(net_data["conv1"][0])
conv1b = tf.Variable(net_data["conv1"][1])
conv2W = tf.Variable(net_data["conv2"][0])
conv2b = tf.Variable(net_data["conv2"][1])
conv3W = tf.Variable(net_data["conv3"][0])
conv3b = tf.Variable(net_data["conv3"][1])
conv4W = tf.Variable(net_data["conv4"][0])
conv4b = tf.Variable(net_data["conv4"][1])
conv5W = tf.Variable(net_data["conv5"][0])
conv5b = tf.Variable(net_data["conv5"][1])
fc6W = tf.Variable(net_data["fc6"][0])
fc6b = tf.Variable(net_data["fc6"][1])
fc7W = tf.Variable(net_data["fc7"][0])
fc7b = tf.Variable(net_data["fc7"][1])
fc8W = weight_variable([4096, num_classes], 'W_fc8')
fc8b = bias_variable([num_classes], 'b_fc8')
keep_prob = tf.placeholder('float')
def model(x):
# conv1
# conv(11, 11, 96, 4, 4, padding='VALID', name='conv1')
k_h = 11
k_w = 11
c_o = 96
s_h = 4
s_w = 4
conv1_in = conv(x, conv1W, conv1b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=1)
conv1 = tf.nn.relu(conv1_in)
# lrn1
# lrn(2, 2e-05, 0.75, name='norm1')
radius = 5
alpha = 0.0001
beta = 0.75
bias = 1.0
lrn1 = tf.nn.local_response_normalization(conv1,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias)
# maxpool1
# max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
k_h = 3
k_w = 3
s_h = 2
s_w = 2
padding = 'VALID'
maxpool1 = tf.nn.max_pool(lrn1, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
# conv2
# conv(5, 5, 256, 1, 1, group=2, name='conv2')
k_h = 5
k_w = 5
c_o = 256
s_h = 1
s_w = 1
group = 2
conv2_in = conv(maxpool1, conv2W, conv2b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv2 = tf.nn.relu(conv2_in)
# lrn2
# lrn(2, 2e-05, 0.75, name='norm2')
radius = 5
alpha = 0.0001
beta = 0.75
bias = 1.0
lrn2 = tf.nn.local_response_normalization(conv2,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias)
# maxpool2
# max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
k_h = 3
k_w = 3
s_h = 2
s_w = 2
padding = 'VALID'
maxpool2 = tf.nn.max_pool(lrn2, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
# conv3
# conv(3, 3, 384, 1, 1, name='conv3')
k_h = 3
k_w = 3
c_o = 384
s_h = 1
s_w = 1
group = 1
conv3_in = conv(maxpool2, conv3W, conv3b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv3 = tf.nn.relu(conv3_in)
# conv4
# conv(3, 3, 384, 1, 1, group=2, name='conv4')
k_h = 3
k_w = 3
c_o = 384
s_h = 1
s_w = 1
group = 2
conv4_in = conv(conv3, conv4W, conv4b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv4 = tf.nn.relu(conv4_in)
# conv5
# conv(3, 3, 256, 1, 1, group=2, name='conv5')
k_h = 3
k_w = 3
c_o = 256
s_h = 1
s_w = 1
group = 2
conv5_in = conv(conv4, conv5W, conv5b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv5 = tf.nn.relu(conv5_in)
# maxpool5
# max_pool(3, 3, 2, 2, padding='VALID', name='pool5')
k_h = 3
k_w = 3
s_h = 2
s_w = 2
padding = 'VALID'
maxpool5 = tf.nn.max_pool(conv5, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
# fc6
# fc(4096, name='fc6')
fc6 = tf.nn.relu_layer(tf.reshape(maxpool5, [-1, int(prod(maxpool5.get_shape()[1:]))]), fc6W, fc6b)
fc6_drop = tf.nn.dropout(fc6, keep_prob)
# fc7
# fc(4096, name='fc7')
fc7 = tf.nn.relu_layer(fc6_drop, fc7W, fc7b)
fc7_drop = tf.nn.dropout(fc7, keep_prob)
# fc8
# fc(1000, relu=False, name='fc8')
fc8 = (tf.nn.xw_plus_b(fc7_drop, fc8W, fc8b))
# prob
# softmax(name='prob'))
# prob = tf.nn.softmax(fc8)
return fc8
logits = model(x)
# Choose softmax or sigmoid cross entropy
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, y_))
regularizers = tf.nn.l2_loss(conv1W) + tf.nn.l2_loss(conv1b) + \
tf.nn.l2_loss(conv2W) + tf.nn.l2_loss(conv2b) + \
tf.nn.l2_loss(conv3W) + tf.nn.l2_loss(conv3b) + \
tf.nn.l2_loss(conv4W) + tf.nn.l2_loss(conv4b) + \
tf.nn.l2_loss(conv5W) + tf.nn.l2_loss(conv5b) + \
tf.nn.l2_loss(fc6W) + tf.nn.l2_loss(fc6b) + \
tf.nn.l2_loss(fc7W) + tf.nn.l2_loss(fc7b) + \
tf.nn.l2_loss(fc8W) + tf.nn.l2_loss(fc8b)
loss = tf.reduce_mean(cross_entropy + WEIGHT_DECAY * regularizers)
# optimisation function
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(LEARNING_RATE, global_step, 1000, 0.65, staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(logits), 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
saver = tf.train.Saver()
with tf.device('/cpu:0'):
logits_test = model(x_testdata)
prediction_vector_score = tf.nn.softmax(logits_test)
prediction_test = tf.argmax(prediction_vector_score, 1)
accuracy_test = tf.reduce_mean(tf.cast(tf.equal(prediction_test, tf.argmax(y_testdata, 1)), 'float'))
top_5_correct_prediction = tf.nn.in_top_k(tf.nn.softmax(logits_test), tf.argmax(y_testdata, 1), k=5)
top_5_accuracy = tf.reduce_mean(tf.cast(top_5_correct_prediction, 'float'))
iter_per_epoch = len(train_dir) / batch_size + 1
# Make a session for training CNN model
print('Training ...')
with tf.Session(graph=graph, config=tf.ConfigProto(log_device_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
saver.restore(sess, '../ckpt_alexnet/alex_model_' + file_id + '.ckpt')
if (flag_train):
for i in range(TRAINING_ITERATIONS):
xtrain, ytrain = sess.run([x_train, y_train])
_, train_accuracy, cost = sess.run([train_step, accuracy, loss], feed_dict={x: xtrain, y_: ytrain, keep_prob: 0.5})
print('training_accuracy => %.3f, cost value => %.5f for step %d, learning_rate => %.5f' % (
train_accuracy, cost, i, learning_rate.eval()))
if i % iter_per_epoch == 0:
train_accuracies.append(train_accuracy)
train_costs.append(cost)
if i // iter_per_epoch > NB_EPOCH:
break
plt.plot(train_accuracies)
axes = plt.gca()
axes.set_ylim([0, 1.2])
plt.title(file_id + ' batch train accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.savefig('../chart/accuracy_' + file_id + '.png')
plt.close()
plt.plot(train_costs)
plt.title(file_id + ' batch train loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('../chart/loss_' + file_id + '.png')
plt.close()
saver.save(sess, '../ckpt_alexnet/alex_model_' + file_id + '.ckpt')
xtest, ytest = sess.run([x_test, y_test])
test_accuracy, test_prediction, test_vector_score, test_vector_logits, test_top_5_accuracy = sess.run(
[accuracy_test, prediction_test, prediction_vector_score, logits_test, top_5_accuracy],
feed_dict={x_testdata: xtest,
y_testdata: ytest,
keep_prob: 1.0})
del xtest
fs = open('../result/test_result_' + file_id + '.txt', 'w')
fl = open('../result/test_result_' + file_id + '_logits.txt', 'w')
for it in range(len(test_dir)):
for itt in range(len(test_vector_score[it])):
fs.write(test_dir[it] + ' ' + str(test_labels[it]) + ' ' + str(itt + 1) + ' ' + str(
test_vector_score[it][itt]))
fs.write('\n')
fl.write(test_dir[it] + ' ' + str(test_labels[it]) + ' ' + str(itt + 1) + ' ' + str(
test_vector_logits[it][itt]))
fl.write('\n')
fs.close()
fl.close()
fs = open('../result/svm_result_' + file_id + '.txt', 'w')
fl = open('../result/svm_result_' + file_id + '_logits.txt', 'w')
for it in range(len(svm_dir) // batch_size):
xvalid, yvalid = sess.run([x_valid, y_valid])
valid_accuracy, valid_prediction, valid_vector_score, valid_vector_logits = sess.run(
[accuracy_test, prediction_test, prediction_vector_score, logits_test],
feed_dict={x_testdata: xvalid,
y_testdata: yvalid,
keep_prob: 1.0})
for k in range(batch_size):
for itt in range(len(valid_vector_score[k])):
fs.write(svm_dir[it * batch_size + k] + ' ' + str(svm_labels[it * batch_size + k]) + ' ' + \
str(itt + 1) + ' ' + str(valid_vector_score[k][itt]))
fs.write('\n')
fl.write(svm_dir[it * batch_size + k] + ' ' + str(svm_labels[it * batch_size + k]) + ' ' + \
str(itt + 1) + ' ' + str(valid_vector_logits[k][itt]))
fl.write('\n')
fs.close()
fl.close()
print('test_accuracy => %.3f' % test_accuracy)
print('top 5 test_accuracy => %.3f' % test_top_5_accuracy)
coord.request_stop()
coord.join(threads)
sess.close()
|
{"hexsha": "114d4696439f4bfc13faca156556517970676bca", "size": 19940, "ext": "py", "lang": "Python", "max_stars_repo_path": "alexnet/alexnet_50_species.py", "max_stars_repo_name": "peace195/latefusion", "max_stars_repo_head_hexsha": "bc2b6a06613a9d979bb95538b62334471c1b008c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2017-10-15T15:55:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T16:31:20.000Z", "max_issues_repo_path": "alexnet/alexnet_50_species.py", "max_issues_repo_name": "peace195/latefusion", "max_issues_repo_head_hexsha": "bc2b6a06613a9d979bb95538b62334471c1b008c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "alexnet/alexnet_50_species.py", "max_forks_repo_name": "peace195/latefusion", "max_forks_repo_head_hexsha": "bc2b6a06613a9d979bb95538b62334471c1b008c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-03-01T08:13:31.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-08T05:43:47.000Z", "avg_line_length": 37.34082397, "max_line_length": 128, "alphanum_fraction": 0.5982948847, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 5537}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.