text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import Lean
syntax (name := test) "test%" ident : command
open Lean.Elab
open Lean.Elab.Command
@[commandElab test] def elabTest : CommandElab := fun stx => do
let id ← resolveGlobalConstNoOverloadWithInfo stx[1]
liftTermElabM none do
IO.println (repr (← Lean.Meta.Match.getEquationsFor id))
return ()
def f (x : List Nat) : Nat :=
match x with
| [] => 1
| [a] => 2
| _ => 3
test% f.match_1
#check @f.match_1
#check @f.match_1.splitter
theorem ex (x : List Nat) : f x > 0 := by
simp [f]
split <;> decide
test% Std.RBNode.balance1.match_1
#check @Std.RBNode.balance1.match_1.splitter
|
{"author": "Kha", "repo": "lean4-nightly", "sha": "b4c92de57090e6c47b29d3575df53d86fce52752", "save_path": "github-repos/lean/Kha-lean4-nightly", "path": "github-repos/lean/Kha-lean4-nightly/lean4-nightly-b4c92de57090e6c47b29d3575df53d86fce52752/tests/lean/run/matchEqs.lean"}
|
#! /usr/bin/env python
import sys
import numpy as np
from matplotlib import pyplot as plt
import ogr
import gdal
import geolib
import extract_profile
#Input DEM
dem_fn = sys.argv[1]
dem_ds = gdal.Open(dem_fn)
dem_srs = geolib.get_srs(dem_ds)
#TLS location
tls_srs = geolib.wgs_srs
tls_coord = (0,0,0)
tls_height = 2.0
tls_vfov = (-30.0, 30.0)
tls_range = (5.0, 6000.0)
fig = plt.figure()
#Plot center point
plt.plot((0,tls_coord[2], color='b')
dl = 100.0
geom_list = []
d_az = 90.0
az_range = (0.0, 360.0)
#az_list = (0.0, 90.0, 180.0, 270.0)
az_list = np.linspace(*az_range, d_az)
d_ele = 1.0
ele_list = np.linspace(*tls_vfov, d_ele)
for az in az_list:
mx = (tls_coord[0], tls_range[1]*np.sin(np.deg2rad(az)))
my = (tls_coord[1], tls_range[1]*np.cos(np.deg2rad(az)))
line = ogr.Geometry(ogr.wkbLineString)
geom_wkt = 'LINE(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if not tls_srs.IsSame(dem_srs):
geolib.geom_transform(geom, dem_srs)
geom.AssignSpatialReference(dem_srs)
#Generate points in map coordinates for geom
l, mX, mY = geolib.line2pts(geom,dl)
z = extract_profile.getZ(dem, dem_ds.GetGeoTransform(), mX, mY)
plt.plot(l, z, color='k')
d_min_idx = 1
d_out = []
z_out = []
for ele in ele_list:
for d,n in enumerate(l[d_min_idx:]):
z_ele = d*np.tan(np.deg2rad(ele))
z_d = z[idx+1]
if z_d >= z_ele:
d_min_idx = n
d_out.append(d)
z_out.append(z_d)
plt.plot(d_out, z_out, color='r')
|
{"hexsha": "1b62be35ef8af26e49d96558c7848e1769329b1b", "size": 1743, "ext": "py", "lang": "Python", "max_stars_repo_path": "tls_planner.py", "max_stars_repo_name": "dshean/tls_tools", "max_stars_repo_head_hexsha": "1bfdb1ea80a8106e797deed046a5cc35d685beb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tls_planner.py", "max_issues_repo_name": "dshean/tls_tools", "max_issues_repo_head_hexsha": "1bfdb1ea80a8106e797deed046a5cc35d685beb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tls_planner.py", "max_forks_repo_name": "dshean/tls_tools", "max_forks_repo_head_hexsha": "1bfdb1ea80a8106e797deed046a5cc35d685beb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-31T05:41:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-31T05:41:10.000Z", "avg_line_length": 23.8767123288, "max_line_length": 90, "alphanum_fraction": 0.5892139989, "include": true, "reason": "import numpy", "num_tokens": 565}
|
#
# Copyright 2021 Johannes Hörmann
# 2020-2021 Lars Pastewka
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import logging
from math import pi, sqrt
_sqrt3 = sqrt(3)
import numpy as np
from gi.repository import GLib, GObject, Gdk, Gtk
from ..models.simple_graph import GraphLayout
from ..utils.query import dump_single_line_query_text
from .graph_popover import DtoolGraphPopover
logger = logging.getLogger(__name__)
def circle(context, x, y):
context.arc(x, y, 0.5, 0, 2 * pi)
context.close_path()
def square(context, x, y):
context.rectangle(x - 0.4, y - 0.4, 0.8, 0.8)
def triangle(context, x, y):
height = 0.5*_sqrt3
context.move_to(x, y - 0.5*height)
context.line_to(x + 0.5, y + 0.5*height)
context.line_to(x - 0.5, y + 0.5*height)
context.close_path()
class DtoolGraphWidget(Gtk.DrawingArea):
__gtype_name__ = 'DtoolGraphWidget'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._timer = None
self._graph = None
self._layout = None
self._search_by_uuid = None
self._popover = DtoolGraphPopover(on_show_clicked=self.on_show_clicked)
self._popover.set_relative_to(self)
self.connect('realize', self.on_realize)
self.connect('draw', self.on_draw)
self.connect('motion-notify-event', self.on_motion_notify)
self.set_events(Gdk.EventMask.POINTER_MOTION_MASK)
@property
def search_by_uuid(self):
return self._search_by_uuid
@search_by_uuid.setter
def search_by_uuid(self, func):
self._search_by_uuid = func
@property
def graph(self):
return self._graph
@graph.setter
def graph(self, graph):
self._graph = graph
self._graph.set_vertex_properties('state', np.zeros(self._graph.nb_vertices, dtype=bool))
self._layout = GraphLayout(self._graph)
if self._timer is None:
self._timer = GObject.timeout_add(10, self.on_timeout, self)
def __del__(self):
if self._timer is not None:
GObject.source_remove(self._timer)
def _cairo_scale(self, area, context):
w, h = area.get_allocated_width(), area.get_allocated_height()
positions = self._layout.positions
min_x = np.min(positions[:, 0]) - 1
max_x = np.max(positions[:, 0]) + 1
min_y = np.min(positions[:, 1]) - 1
max_y = np.max(positions[:, 1]) + 1
s = min(w / (max_x - min_x), h / (max_y - min_y))
context.scale(s, s)
context.translate((w / s - min_x - max_x) / 2,
(h / s - min_y - max_y) / 2)
def on_realize(self, area):
pass
def on_draw(self, area, context):
if self._graph is None or self._layout is None:
return
context.set_source_rgb(1, 1, 1)
context.paint()
# Set scale transformation
self._cairo_scale(area, context)
# Get positions from layouter
positions = self._layout.positions
kind = self._graph.get_vertex_properties('kind')
state = self._graph.get_vertex_properties('state')
# Draw vertices
root_color = Gdk.color_parse('lightgreen')
does_not_exist_color = Gdk.color_parse('red')
dependency_color = Gdk.color_parse('lightblue')
for i, ((x, y), k, s) in enumerate(zip(positions, kind, state)):
if k == 'root':
context.set_source_rgb(*root_color.to_floats())
square(context, x, y)
elif k == 'does-not-exist':
context.set_source_rgb(*does_not_exist_color.to_floats())
triangle(context, x, y)
else:
context.set_source_rgb(*dependency_color.to_floats())
circle(context, x, y)
if s:
context.fill_preserve()
context.set_source_rgb(0, 0, 0)
context.set_line_width(0.1)
context.stroke()
else:
context.fill()
# Draw edges
context.set_source_rgb(0, 0, 0)
context.set_line_width(0.1)
for i, j in self._graph.edges:
# Start and end position of arrow
i_pos = positions[i].copy()
j_pos = positions[j].copy()
# Adjust to radius of circle
ij = i_pos - j_pos
normal = ij / np.linalg.norm(ij)
perpendicular = np.array([normal[1], -normal[0]])
i_pos -= 0.5 * normal
j_pos += 0.5 * normal
# Draw line
context.move_to(*(i_pos - 0.05 * normal))
context.line_to(*(j_pos + 0.1 * normal))
context.stroke()
# Draw arrow head
context.move_to(*i_pos)
context.line_to(*(i_pos - 0.2 * normal - 0.2 * perpendicular))
context.line_to(*(i_pos - 0.2 * normal + 0.2 * perpendicular))
context.fill()
context.close_path()
def on_motion_notify(self, area, event):
context = area.get_window().cairo_create()
self._cairo_scale(area, context)
positions = self._layout.positions
state = np.array(self._graph.get_vertex_properties('state'))
uuids = np.array(self._graph.get_vertex_properties('uuid'))
names = np.array(self._graph.get_vertex_properties('name'))
cursor_pos = np.array(context.device_to_user(event.x, event.y))
dist_sq = np.sum((positions - cursor_pos) ** 2, axis=1)
new_state = dist_sq < 0.25
if np.any(new_state != state):
state = new_state
self._graph.set_vertex_properties('state', state)
self.queue_draw()
if np.any(state):
# Show popover
positions = self._layout.positions
x, y = positions[state][0]
rect = Gdk.Rectangle()
rect.x, rect.y = context.user_to_device(x, y + 0.5)
self._popover.set_pointing_to(rect)
self._current_uuid = uuids[state][0]
self._popover.uuid = self._current_uuid
self._popover.name = names[state][0]
self._popover.show()
if not np.any(state):
# Hide popover if no node is active
self._popover.hide()
def on_show_clicked(self, user_data):
self._popover.hide()
# use an action to evoke search. If an according action does not exist, then nothing happens.
search_text = dump_single_line_query_text({"uuid": self._current_uuid})
self.get_action_group("win").activate_action('search-select-show', GLib.Variant.new_string(search_text))
def on_timeout(self, user_data):
try:
self._layout.iterate()
except Exception as e:
logger.error(str(e))
self.queue_draw()
return True
GObject.type_register(DtoolGraphWidget)
|
{"hexsha": "13515c271987da1bcfeae12fba0ee766ba8b7aa6", "size": 7984, "ext": "py", "lang": "Python", "max_stars_repo_path": "dtool_lookup_gui/widgets/graph_widget.py", "max_stars_repo_name": "IMTEK-Simulation/dtool-lookup-gui", "max_stars_repo_head_hexsha": "60e0824b2e883d756e57d933e4657645dfa7c6d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-07-04T19:44:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T10:05:24.000Z", "max_issues_repo_path": "dtool_lookup_gui/widgets/graph_widget.py", "max_issues_repo_name": "IMTEK-Simulation/dtool-lookup-gui", "max_issues_repo_head_hexsha": "60e0824b2e883d756e57d933e4657645dfa7c6d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 47, "max_issues_repo_issues_event_min_datetime": "2020-10-07T10:08:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T15:30:56.000Z", "max_forks_repo_path": "dtool_lookup_gui/widgets/graph_widget.py", "max_forks_repo_name": "IMTEK-Simulation/dtool-lookup-gui", "max_forks_repo_head_hexsha": "60e0824b2e883d756e57d933e4657645dfa7c6d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-29T19:07:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-29T19:07:17.000Z", "avg_line_length": 34.7130434783, "max_line_length": 112, "alphanum_fraction": 0.6179859719, "include": true, "reason": "import numpy", "num_tokens": 1897}
|
import os
import argparse
import json
import numpy as np
from torch import device
from torch.cuda import is_available
if is_available():
DEVICE = device("cuda")
else:
DEVICE = device("cpu")
from multiml import logger
def main(opts):
logger.set_level(opts.loglevel)
global DEVICE
from utils import load_config
from run_utils import get_multi_loss, set_seed
config = load_config(opts.config)
verbose = 1
if opts.seed is not None:
config['seed'] = opts.seed
if opts.gpu_index is not None and DEVICE == device('cuda'):
DEVICE = device(f'cuda:{opts.gpu_index}')
if opts.data_path is not None:
config['dataset']['params']['data_path'] = opts.data_path
if opts.event is not None:
config['dataset']['params']['max_events'] = int(opts.event)
if opts.clip_value is not None :
config['ASNG']['clip'] = opts.clip_value
if opts.alpha is not None :
config['ASNG']['alpha'] = opts.alpha
if opts.lam is not None :
config['ASNG']['lam'] = opts.lam
if opts.delta is not None :
config['ASNG']['delta'] = opts.delta
if opts.epochs is not None :
config['ASNG']['epochs'] = opts.epochs
set_seed(config.seed)
if opts.do_pretrain :
jobid = 'pretrain_' + opts.jobid
else :
jobid = 'no_train_' + opts.jobid
save_dir = f'output/{os.path.basename(__file__)[:-3]}_{opts.event}evt_weight{opts.weight}_{jobid}'
use_multi_loss, loss_weights = get_multi_loss(opts.weight)
from run_utils import preprocessing
saver, storegate, task_scheduler, metric = preprocessing(
save_dir=save_dir,
config=config,
device=DEVICE,
tau4vec_tasks=['conv2D', 'MLP', 'SF'],
higgsId_tasks=['lstm', 'mlp', 'mass'],
)
# Time measurements
from timer import timer
timer_reg = {}
phases = ['test'] if opts.load_weights else ['train', 'valid', 'test']
# Agent
logger.info(f'lambda / alpha / delta is {config.ASNG.lam} / {config.ASNG.alpha} / {config.ASNG.delta}')
from multiml.agent.pytorch import PytorchASNGNASAgent
with timer(timer_reg, "initialize"):
from my_tasks import mapping_truth_corr
config['ASNG']['connectiontask_args']['phases'] = phases
config['ASNG']['connectiontask_args']['variable_mapping'] = mapping_truth_corr
config['ASNG']['connectiontask_args']['device'] = DEVICE
config['ASNG']['connectiontask_args']['loss_weights'] = loss_weights
agent = PytorchASNGNASAgent(
verbose = verbose,
num_epochs = config.ASNG.epochs,
max_patience = config.ASNG.patience,
batch_size = config.ASNG.batch_size,
asng_args = config.ASNG.asng_args,
optimizer = config.ASNG.optimizer.name,
optimizer_args = config.ASNG.optimizer.params,
scheduler = config.ASNG.scheduler,
# BaseAgent
saver=saver,
storegate=storegate,
task_scheduler=task_scheduler,
metric=metric,
# EnsembleAgent
# ConnectionSimpleAgent
freeze_model_weights=False,
do_pretraining = opts.do_pretrain,
connectiontask_args= config.ASNG.connectiontask_args,
)
with timer(timer_reg, "execute"):
agent.execute()
with timer(timer_reg, "finalize"):
agent.finalize()
results = agent.results_json
results['walltime'] = timer_reg['execute'][1]
results['timer_reg'] = timer_reg
results['seed'] = opts.seed
results['nevents'] = opts.event*2
def print_dict(key, val) :
if type(val) is dict :
for k, v in val.items():
print_dict( f'{key} {k}', v)
else :
logger.info(f'{key: <30} : {val}')
for key, val in results.items() :
print_dict(key, val)
with open(f'{saver.save_dir}/result.run_connection_asngnas_{opts.event}evt_weight{opts.weight}.json', 'w') as fo :
json.dump([results], fo, indent=2)
if not opts.load_weights:
with open(f"{saver.save_dir}/timer.pkl", 'wb') as f:
import pickle
pickle.dump(timer_reg, f)
### post processing
variables = []
from my_tasks import corr_tau_4vec
variables.extend(corr_tau_4vec)
variables.extend(['probability'])
for phase in phases :
# dump prediction
storegate.set_data_id("")
y_pred = np.array( storegate.get_data(phase = phase, var_names = variables ) )
os.makedirs(f'{saver.save_dir}/pred/{phase}', exist_ok = True )
for i, v in enumerate(variables):
np.save(f'{saver.save_dir}/pred/{phase}/{v}', y_pred[i])
if __name__ == '__main__':
from distutils.util import strtobool
parser = argparse.ArgumentParser( description = 'This is a script to run xtrk_ntuple_maker' )
parser.add_argument('-c', '--config', action = 'store', dest = 'config', required = True, type = str, default = None, help = 'text path for config file(yaml)')
parser.add_argument('-s', '--seed', action = 'store', dest = 'seed', required = False, type = int, default = None, help = 'seed integer')
parser.add_argument('-g', '--gpu_index', action = 'store', dest = 'gpu_index', required = False, type = int, default = None, help = 'gpu index')
parser.add_argument('-ev', '--event', action = 'store', dest = 'event', required = False, type = int, default = None, help = 'number of event ')
parser.add_argument('-ep', '--epochs', action = 'store', dest = 'epochs', required = False, type = int, default = None, help = 'number of epochs ')
parser.add_argument('-cl', '--clip_value', action = 'store', dest = 'clip_value', required = False, type = int, default = None, help = 'clip value of grad ')
parser.add_argument('-d', '--delta', action = 'store', dest = 'delta', required = False, type = float, default = None, help = 'clip value of grad ')
parser.add_argument('-a', '--alpha', action = 'store', dest = 'alpha', required = False, type = float, default = None, help = 'alpha of ASNG ')
parser.add_argument('-w', '--weight', action = 'store', dest = 'weight', required = False, type = float, default = None, help = 'weight of task1 ')
parser.add_argument('-l', '--lam', action = 'store', dest = 'lam', required = False, type = int, default = None, help = 'lambda value ')
parser.add_argument("-j", '--jobid', action = 'store', dest = 'jobid', required = True, type = str, default = 'default', help = 'job id ')
parser.add_argument('-dp', '--data_path', action = 'store', dest = 'data_path', required = False, type = str, default = None, help = 'data path')
parser.add_argument('-p', '--do_pretrain', action = 'store', dest = 'do_pretrain', required = False, type = strtobool, help = 'do pretraining')
parser.add_argument('-lw', '--load_weights', action = 'store', dest = 'load_weights', required = False, type = bool, help = 'load weight')
parser.add_argument('-ll', '--loglevel', action = 'store', dest = 'loglevel', required = False, choices = ('DEBUG', 'INFO', 'WARN', 'ERROR', 'DISABLED'), default = 'INFO', help = 'msg level to use. Valid choices are [""], default is "INFO"')
# parser.add_argument('-p', '--property', action = 'store', dest = 'properties', required = False, nargs = '*' )
opts = parser.parse_args()
main( opts )
|
{"hexsha": "3bf66d953c6f3fc96549411bf3d3611db4c64af2", "size": 7871, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/pytorch/run_multi_connection_asngnas.py", "max_stars_repo_name": "UTokyo-ICEPP/multiml_htautau", "max_stars_repo_head_hexsha": "5f926c2291a55f57419aa0130d07e2a793fc7353", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/pytorch/run_multi_connection_asngnas.py", "max_issues_repo_name": "UTokyo-ICEPP/multiml_htautau", "max_issues_repo_head_hexsha": "5f926c2291a55f57419aa0130d07e2a793fc7353", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/pytorch/run_multi_connection_asngnas.py", "max_forks_repo_name": "UTokyo-ICEPP/multiml_htautau", "max_forks_repo_head_hexsha": "5f926c2291a55f57419aa0130d07e2a793fc7353", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.8670212766, "max_line_length": 253, "alphanum_fraction": 0.5915385593, "include": true, "reason": "import numpy", "num_tokens": 1946}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Implemetation of the tracker described in paper
% "MEEM: Robust Tracking via Multiple Experts using Entropy Minimization",
% Jianming Zhang, Shugao Ma, Stan Sclaroff, ECCV, 2014
%
% Copyright (C) 2014 Jianming Zhang
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
%
% If you have problems about this software, please contact: jmzhang@bu.edu
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function expertsDo(I_vf,lambda,sigma)
global sampler;
global svm_tracker;
global experts;
global config
roi_reg = sampler.roi; roi_reg(3:4) = sampler.roi(3:4)-sampler.roi(1:2);
feature_map = imresize(I_vf,config.ratio,'nearest'); %
ratio_x = size(I_vf,2)/size(feature_map,2);
ratio_y = size(I_vf,1)/size(feature_map,1);
% tmp_mask = zeros(sampler.template_size(1:2));
% tmp_mask(1:2:end,1:2:end) = 1;
% tmp_mask = repmat(tmp_mask,[1,1,size(I_vf,3)]);
patterns = im2colstep(feature_map,[sampler.template_size(1:2), size(I_vf,3)],[1, 1, size(I_vf,3)]);
% patterns = patterns(tmp_mask(:)>0,:); % columnwise
x_sz = size(feature_map,2)-sampler.template_size(2)+1;
y_sz = size(feature_map,1)-sampler.template_size(1)+1;
[X Y] = meshgrid(1:x_sz,1:y_sz);
temp = repmat(svm_tracker.output,[numel(X),1]);
temp(:,1) = (X(:)-1)*ratio_x + sampler.roi(1);
temp(:,2) = (Y(:)-1)*ratio_y + sampler.roi(2);
state = temp;
%% select expert
label_prior = fspecial('gaussian',[y_sz,x_sz],sigma);
label_prior_neg = ones(size(label_prior))/numel(label_prior);
% compute log likelihood and entropy
n = numel(experts);
score_temp = zeros(n,1);
rect_temp = zeros(n,4);
if config.debug
loglik_vec=[];
ent_vec=[];
figure(3)
end
kernel_size = sampler.template_size(1:2)*0.5;%half template size;
rad = 0.5*min(sampler.template_size(1:2));
mask_temp = zeros(y_sz,x_sz);
idx_temp = [];
svm_scores = [];
svm_score = {};
svm_density = {};
peaks_collection = {};
peaks = zeros(n,2);
peaks_pool = [];
[X Y] = meshgrid(1:round(rad):x_sz,1:round(rad):y_sz);
for i = 1:n
% find the highest peak
svm_score{i} = -(experts{i}.w*patterns+experts{i}.Bias);
svm_density{i} = normcdf(svm_score{i},0,1).*label_prior(:)';
[val idx] = max(svm_density{i});
best_rect = state(idx,:);
rect_temp(i,:) = best_rect;
svm_scores(i) = svm_score{i}(idx);
idx_temp(i) = idx;
[r c] = ind2sub(size(mask_temp),idx);
peaks(i,:) = [r c];
% find the possible peaks
density_map = reshape(svm_density{i},y_sz,[]);
density_map = (density_map - min(density_map(:)))/(max(density_map(:)) - min(density_map(:)));
mm = (imdilate(density_map,strel('square',round(rad))) == density_map) & density_map > 0.9;
[rn cn] = ind2sub(size(mask_temp),find(mm));
peaks_pool = cat(1,peaks_pool,[rn cn]);
peaks_collection{i} = [rn cn];
% mask_temp(r,c) = 1;
end
peaks_orig = peaks;
% merg peaks
peaks = mergePeaks(peaks,rad);
peaks_pool = mergePeaks(peaks_pool,rad);
mask_temp(sub2ind(size(mask_temp),round(peaks(:,1)),round(peaks(:,2)))) = 1;
%%
for i = 1:n
dis = pdist2(peaks_pool,peaks_collection{i});
[rr cc] = ind2sub([size(peaks_pool,1),size(peaks_collection{i},1)],find(dis < rad));
[C,ia,ic] = unique(cc);
peaks_temp = peaks_pool;
peaks_temp(rr(ia),:) = peaks_collection{i}(cc(ia),:);
mask = zeros(size(mask_temp));
mask(sub2ind(size(mask_temp),round(peaks_temp(:,1)),round(peaks_temp(:,2)))) = 1;
mask = mask>0;
[loglik ent] = getLogLikelihoodEntropy(svm_score{i}(mask(:)),label_prior(mask(:)),label_prior_neg(mask(:)));
if config.debug
loglik_vec(end+1) = loglik;
ent_vec(end+1) = ent;
subplot(2,4,i)
imagesc(reshape(svm_score{i},y_sz,[]));
colorbar
subplot(2,4,i+4)
imagesc(reshape(mask,y_sz,[]))
end
experts{i}.score(end+1) = loglik - lambda*ent;
score_temp(i) = sum(experts{i}.score(max(end+1-config.entropy_score_winsize,1):end));
end
%%
svm_tracker.best_expert_idx = numel(score_temp);
if numel(score_temp) >= 2 && config.use_experts
[val idx] = max(score_temp(1:end-1));
if score_temp(idx) > score_temp(end) && size(peaks,1) > 1%svm_scores(idx) > config.svm_thresh
% recover previous version
% output = svm_tracker.output;
% experts{end}.snapshot = svm_tracker;
experts{end}.score = experts{idx}.score;
svm_tracker = experts{idx}.snapshot;
% svm_tracker.output = rect_temp(idx,:);
svm_tracker.best_expert_idx = idx;
% experts([idx end]) = experts([end idx]);
end
end
svm_tracker.output = rect_temp(svm_tracker.best_expert_idx,:);
svm_tracker.confidence = svm_scores(svm_tracker.best_expert_idx);
svm_tracker.output_exp = rect_temp(end,:);
svm_tracker.confidence_exp = svm_scores(end);
% svm_tracker.w = experts{svm_tracker.best_expert_idx}.w;
% svm_tracker.Bias = experts{svm_tracker.best_expert_idx}.Bias;
if config.debug
for i = 1:n
subplot(2,4,i)
if i == svm_tracker.best_expert_idx
color = [1 0 0];
else
color = [1 1 1];
end
text(0,1,num2str(experts{i}.score(end)),'BackgroundColor',color);
text(15,1,num2str(score_temp(i)),'BackgroundColor',color);
text(0,3,num2str(loglik_vec(i)),'BackgroundColor',color);
text(15,3,num2str(ent_vec(i)),'BackgroundColor',color);
end
figure(2)
imagesc(mask_temp)
figure(1)
end
%% update training sample
% approximately 200 training samples
step = round(sqrt((y_sz*x_sz)/120));
mask_temp = zeros(y_sz,x_sz);
mask_temp(1:step:end,1:step:end) = 1;
mask_temp = mask_temp > 0;
sampler.patterns_dt = patterns(:,mask_temp(:))';
sampler.state_dt = state(mask_temp(:),:);
sampler.costs = 1 - getIOU(sampler.state_dt,svm_tracker.output);
if min(sampler.costs)~=0
sampler.state_dt = [sampler.state_dt; rect_temp(svm_tracker.best_expert_idx,:)];
sampler.patterns_dt = [sampler.patterns_dt; patterns(:,idx_temp(svm_tracker.best_expert_idx))'];
sampler.costs = [sampler.costs;0];
end
% better localization and add the predicted state and pattern
% output = svm_tracker.output;
% output(1:2) = output(1:2) - sampler.roi(1:2) + 1;
% [shift pattern_loc] = localize(I_vf,...
% -reshape(svm_tracker.w,config.template_sz(1),config.template_sz(2),[]),...
% output,5);
% svm_tracker.output(1:2) = svm_tracker.output(1:2) + shift;
end
function merged_peaks = mergePeaks(peaks, rad)
dis_mat = pdist2(peaks,peaks) + diag(inf*ones(size(peaks,1),1));
while min(dis_mat(:)) < rad && size(peaks,1) > 1
[val idx] = min(dis_mat(:));
[id1 id2] = ind2sub(size(dis_mat),idx);
merged_peak = 0.5*(peaks(id1,:) + peaks(id2,:));
peaks([id1 id2],:) = [];
peaks = [peaks;merged_peak];
dis_mat = pdist2(peaks,peaks) + diag(inf*ones(size(peaks,1),1));
end
merged_peaks = peaks;
end
|
{"author": "flyers", "repo": "drone-tracking", "sha": "c42e1833acfb858ac8f4ec69fa04ab02ac4c19ad", "save_path": "github-repos/MATLAB/flyers-drone-tracking", "path": "github-repos/MATLAB/flyers-drone-tracking/drone-tracking-c42e1833acfb858ac8f4ec69fa04ab02ac4c19ad/trackers/MEEM/expert_ensemble/expertsDo.m"}
|
# maintained by rajivak@utexas.edu
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# from mpi4py import MPI
import sys
import math
##############################################################################################################################
# function to select criticisms
# ARGS:
# K: Kernel matrix
# selectedprotos: prototypes already selected
# m : number of criticisms to be selected
# reg: regularizer type.
# is_K_sparse: True means K is the pre-computed csc sparse matrix? False means it is a dense matrix.
# RETURNS: indices selected as criticisms
##############################################################################################################################
def select_criticism_regularized(K, selectedprotos, m, reg='logdet', is_K_sparse=True):
n = np.shape(K)[0]
if reg in ['None','logdet','iterative']:
pass
else:
print("wrong regularizer :" + reg)
exit(1)
options = dict()
selected = np.array([], dtype=int)
candidates2 = np.setdiff1d(range(n), selectedprotos)
inverse_of_prev_selected = None # should be a matrix
if is_K_sparse:
colsum = np.array(K.sum(0)).ravel()/n
else:
colsum = np.sum(K, axis=0)/n
for i in range(m):
maxx = -sys.float_info.max
argmax = -1
candidates = np.setdiff1d(candidates2, selected)
s1array = colsum[candidates]
temp = K[selectedprotos, :][:, candidates]
if is_K_sparse:
s2array = temp.sum(0)
else:
s2array = np.sum(temp, axis=0)
s2array = s2array / (len(selectedprotos))
s1array = np.abs(s1array - s2array)
if reg == 'logdet':
if inverse_of_prev_selected is not None: # first call has been made already
temp = K[selected, :][:, candidates]
if is_K_sparse:
temp2 = temp.transpose().dot(inverse_of_prev_selected)
regularizer = temp.transpose().multiply(temp2)
regcolsum = regularizer.sum(1).ravel()# np.sum(regularizer, axis=0)
regularizer = np.abs(K.diagonal()[candidates] - regcolsum)
else:
# hadamard product
temp2 = np.array(np.dot(inverse_of_prev_selected, temp))
regularizer = temp2 * temp
regcolsum = np.sum(regularizer, axis=0)
regularizer = np.log(np.abs(np.diagonal(K)[candidates] - regcolsum))
s1array = s1array + regularizer
else:
if is_K_sparse:
s1array = s1array - np.log(np.abs(K.diagonal()[candidates]))
else:
s1array = s1array - np.log(np.abs(np.diagonal(K)[candidates]))
argmax = candidates[np.argmax(s1array)]
maxx = np.max(s1array)
selected = np.append(selected, argmax)
if reg == 'logdet':
KK = K[selected,:][:,selected]
if is_K_sparse:
KK = KK.todense()
inverse_of_prev_selected = np.linalg.inv(KK) # shortcut
if reg == 'iterative':
selectedprotos = np.append(selectedprotos, argmax)
return selected
##############################################################################################################################
# Function choose m of all rows by MMD as per kernelfunc
# ARGS:
# K : kernel matrix
# candidate_indices : array of potential choices for selections, returned values are chosen from these indices
# m: number of selections to be made
# is_K_sparse: True means K is the pre-computed csc sparse matrix? False means it is a dense matrix.
# RETURNS: subset of candidate_indices which are selected as prototypes
##############################################################################################################################
def greedy_select_protos(K, candidate_indices, m, is_K_sparse=False):
if len(candidate_indices) != np.shape(K)[0]:
K = K[:,candidate_indices][candidate_indices,:]
n = len(candidate_indices)
# colsum = np.array(K.sum(0)).ravel() # same as rowsum
if is_K_sparse:
colsum = 2*np.array(K.sum(0)).ravel() / n
else:
colsum = 2*np.sum(K, axis=0) / n
selected = np.array([], dtype=int)
value = np.array([])
for i in range(m):
maxx = -sys.float_info.max
argmax = -1
candidates = np.setdiff1d(range(n), selected)
s1array = colsum[candidates]
if len(selected) > 0:
temp = K[selected, :][:, candidates]
if is_K_sparse:
# s2array = temp.sum(0) *2
s2array = temp.sum(0) * 2 + K.diagonal()[candidates]
else:
s2array = np.sum(temp, axis=0) *2 + np.diagonal(K)[candidates]
s2array = s2array/(len(selected) + 1)
s1array = s1array - s2array
else:
if is_K_sparse:
s1array = s1array - (np.abs(K.diagonal()[candidates]))
else:
s1array = s1array - (np.abs(np.diagonal(K)[candidates]))
argmax = candidates[np.argmax(s1array)]
# print("max %f" %np.max(s1array))
selected = np.append(selected, argmax)
# value = np.append(value,maxx)
KK = K[selected, :][:, selected]
if is_K_sparse:
KK = KK.todense()
inverse_of_prev_selected = np.linalg.inv(KK) # shortcut
return candidate_indices[selected]
|
{"hexsha": "33dba23ed516bf17018a5d8a26ee5e8070d824aa", "size": 5607, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/mmd/MMD-critic/mmd.py", "max_stars_repo_name": "sthagen/christophM-interpretable-ml-book", "max_stars_repo_head_hexsha": "d8b82b8e6ab82c78d95de784a601e71025621ab2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4164, "max_stars_repo_stars_event_min_datetime": "2017-12-03T19:28:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T17:31:51.000Z", "max_issues_repo_path": "scripts/mmd/MMD-critic/mmd.py", "max_issues_repo_name": "sthagen/christophM-interpretable-ml-book", "max_issues_repo_head_hexsha": "d8b82b8e6ab82c78d95de784a601e71025621ab2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 257, "max_issues_repo_issues_event_min_datetime": "2017-12-04T07:19:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T06:00:07.000Z", "max_forks_repo_path": "scripts/mmd/MMD-critic/mmd.py", "max_forks_repo_name": "sthagen/christophM-interpretable-ml-book", "max_forks_repo_head_hexsha": "d8b82b8e6ab82c78d95de784a601e71025621ab2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 915, "max_forks_repo_forks_event_min_datetime": "2017-12-03T16:54:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T06:52:34.000Z", "avg_line_length": 36.4090909091, "max_line_length": 126, "alphanum_fraction": 0.5391474942, "include": true, "reason": "import numpy", "num_tokens": 1289}
|
! test with processor lattice.
program cc
integer n, i, j
parameter (n=100)
real a(n,n)
!hpf$ dynamic a
!hpf$ template t(n,n)
!hpf$ processors p(2,2)
!hpf$ align with t:: a
!hpf$ distribute t(cyclic,cyclic) onto p
a(1,1) = 1.0
!hpf$ realign a(i,j) with t(j,i)
print *, a(1,1)
end
|
{"hexsha": "20b556a9c58c78c689ce16213023bc3385a3f90d", "size": 326, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/PIPS/validation/Hpfc/cc.f", "max_stars_repo_name": "DVSR1966/par4all", "max_stars_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2015-01-31T01:51:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T02:01:50.000Z", "max_issues_repo_path": "packages/PIPS/validation/Hpfc/cc.f", "max_issues_repo_name": "DVSR1966/par4all", "max_issues_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-05-29T09:29:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-11T16:01:39.000Z", "max_forks_repo_path": "packages/PIPS/validation/Hpfc/cc.f", "max_forks_repo_name": "DVSR1966/par4all", "max_forks_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-03-26T08:05:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T02:01:51.000Z", "avg_line_length": 17.1578947368, "max_line_length": 40, "alphanum_fraction": 0.5674846626, "num_tokens": 122}
|
#=
Copyright (c) 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
=#
import CompilerTools.AstWalker
export constant_fold
binops = Set([:+; :-; :*; :/])
function constant_folder(node, symbol_table, top_level_number, is_top_level, read)
if isa(node, Expr)
if node.head == :(=)
rhs = AstWalker.AstWalk(node.args[2], constant_folder, symbol_table)
if isa(rhs, Number)
symbol_table[node.args[1]] = rhs
end
return node
elseif node.head == :call
if in(node.args[1], binops) && length(node.args)==3
node.args[2] = AstWalker.AstWalk(node.args[2], constant_folder, symbol_table)[1]
node.args[3] = AstWalker.AstWalk(node.args[3], constant_folder, symbol_table)[1]
if isa(node.args[2], Number) && isa(node.args[3], Number)
return eval(node)
end
end
end
elseif isa(node, Symbol)
if haskey(symbol_table, node)
return symbol_table[node]
end
elseif isa(node, Number)
return node
end
return CompilerTools.AstWalker.ASTWALK_RECURSE
end
function constant_fold(fn)
symbol_table = Dict{Symbol, Number}()
fn = AstWalker.AstWalk(fn, constant_folder, symbol_table)
return fn
end
#=
macro constant_fold(fn)
symbol_table = Dict{Symbol, Number}()
AstWalker.AstWalk(fn, constant_folder, symbol_table)
println(symbol_table)
println(fn)
return esc(fn)
end
@constant_fold function test(z)
a = 3
b = 4
c = a - b
d = z + c
return d
end
=#
|
{"hexsha": "fba023882bff31613e72fbc755dd640cff77dec6", "size": 2692, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/constant_fold.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/CompilerTools.jl-98f049d2-a028-5a73-bd4d-a8c50ff59ab5", "max_stars_repo_head_hexsha": "ee7f80e5dc8c6b6dfefc1b8b5b037ea39f5e4450", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2015-10-20T23:54:36.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-10T00:30:00.000Z", "max_issues_repo_path": "src/constant_fold.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/CompilerTools.jl-98f049d2-a028-5a73-bd4d-a8c50ff59ab5", "max_issues_repo_head_hexsha": "ee7f80e5dc8c6b6dfefc1b8b5b037ea39f5e4450", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-03-03T19:33:38.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T08:28:02.000Z", "max_forks_repo_path": "src/constant_fold.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/CompilerTools.jl-98f049d2-a028-5a73-bd4d-a8c50ff59ab5", "max_forks_repo_head_hexsha": "ee7f80e5dc8c6b6dfefc1b8b5b037ea39f5e4450", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2015-10-21T07:54:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T18:16:34.000Z", "avg_line_length": 33.2345679012, "max_line_length": 87, "alphanum_fraction": 0.7377414562, "num_tokens": 650}
|
program conditionalStatements
implicit none
integer a, b, c, result;
print *, "Enter value of a :";
read *, a;
print *, "Enter value of b :";
read *, b;
print *, "Enter value of c :";
read *, c;
if( a < b) then
if(a < c) then
result = a;
else
result = c;
endif
else
if(b > c) then
result = b;
else
result = c;
endif
endif
print *, "Greatest number is : ", result;
end program conditionalStatements
|
{"hexsha": "82e4df3c4960d055200f390165ba20a5f480a73b", "size": 570, "ext": "f95", "lang": "FORTRAN", "max_stars_repo_path": "III-sem/NumericalMethod/FortranProgram/Practice/if-else.f95", "max_stars_repo_name": "ASHD27/JMI-MCA", "max_stars_repo_head_hexsha": "61995cd2c8306b089a9b40d49d9716043d1145db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-03-18T16:27:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-07T12:39:32.000Z", "max_issues_repo_path": "III-sem/NumericalMethod/FortranProgram/Practice/if-else.f95", "max_issues_repo_name": "ASHD27/JMI-MCA", "max_issues_repo_head_hexsha": "61995cd2c8306b089a9b40d49d9716043d1145db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "III-sem/NumericalMethod/FortranProgram/Practice/if-else.f95", "max_forks_repo_name": "ASHD27/JMI-MCA", "max_forks_repo_head_hexsha": "61995cd2c8306b089a9b40d49d9716043d1145db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-11-11T06:49:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-07T12:41:20.000Z", "avg_line_length": 21.1111111111, "max_line_length": 46, "alphanum_fraction": 0.4596491228, "num_tokens": 145}
|
#! Demonstrates the usage of parameterized test fixtures.
#:include 'fytest.fypp'
#:block TEST_SUITE('parameterized2')
use mymath
implicit none
type :: fact_calc_t
integer :: val
integer :: expresult
end type fact_calc_t
#! This will contain the parameters of the tests, once TEST_SUITE_INIT() has been executed.
type(fact_calc_t), allocatable :: factcalcs(:)
#:contains
#! Initializes global test suite variables
#:block TEST_SUITE_INIT()
integer :: fd
integer :: icalc, ncalc
open(newunit=fd, file="factcalcs.dat", action="read", form="formatted")
read(fd, *) ncalc
allocate(factcalcs(ncalc))
do icalc = 1, ncalc
read(fd, *) factcalcs(icalc)%val, factcalcs(icalc)%expresult
end do
#:endblock
#! Parameterized test, iterator runs over a given array. The array must be either a constant
#! array, or must be initialized in the TEST_SUITE_INIT() routine.
#:block TEST_FIXTURE('special', ITERATORS=[('factcalc', 'factcalcs')], RENDERER='render')
type(fact_calc_t) :: factcalc
#:contains
#! Tests can access the fixture scope
#:block TEST('testval')
@:ASSERT(factorial(factcalc%val) == factcalc%expresult)
#:endblock
#! We define a renderer to show the number used in a given fixture
#! A renderer must have no arguments and return a string containing a human
#! readable representation of the fixture.
function render() result(str)
character(:), allocatable :: str
character(10) :: buffer
write(buffer, "(A,I0)") 'curval=', factcalc%val
str = trim(buffer)
end function render
#:endblock TEST_FIXTURE
#:endblock TEST_SUITE
#:block TEST_DRIVER()
#:endblock TEST_DRIVER
|
{"hexsha": "5b4a581585f0f399d0ee6e894ae6b476e33d356b", "size": 1726, "ext": "fpp", "lang": "FORTRAN", "max_stars_repo_path": "examples/serial/test/test_parameterized2.fpp", "max_stars_repo_name": "aradi/fytest", "max_stars_repo_head_hexsha": "9133d5dab5b582161f4fb4c4b127d7f97133e3e7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-01-13T23:34:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-19T11:25:29.000Z", "max_issues_repo_path": "examples/serial/test/test_parameterized2.fpp", "max_issues_repo_name": "aradi/fyunit", "max_issues_repo_head_hexsha": "9133d5dab5b582161f4fb4c4b127d7f97133e3e7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-23T15:59:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-23T15:59:07.000Z", "max_forks_repo_path": "examples/serial/test/test_parameterized2.fpp", "max_forks_repo_name": "aradi/fyunit", "max_forks_repo_head_hexsha": "9133d5dab5b582161f4fb4c4b127d7f97133e3e7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-02T18:31:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T18:31:41.000Z", "avg_line_length": 24.3098591549, "max_line_length": 94, "alphanum_fraction": 0.6853997683, "num_tokens": 461}
|
"""
This file contains some utility functions for using ImageNet dataset and L-OBS
Author: Chen Shangyu (schen025@e.ntu.edu.sg)
"""
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.optim as optim
import collections
from datetime import datetime
import numpy as np
import tensorflow as tf
import torch.nn.functional as F
from torch.autograd import Variable
import torch
import pickle
use_cuda = torch.cuda.is_available()
def get_error(theta_B, hessian, theta_0):
"""
Calculate \delta \theta^T H \delta \theta
:param theta_B:
:param hessian:
:param theta_0:
:param alpha:
:param sigma:
:return:
"""
delta = theta_B - theta_0
error = np.trace(np.dot(np.dot(delta.T, hessian), delta))
return error
def unfold_kernel(kernel):
"""
In pytorch format, kernel is stored as [out_channel, in_channel, height, width]
Unfold kernel into a 2-dimension weights: [height * width * in_channel, out_channel]
:param kernel: numpy ndarray
:return:
"""
k_shape = kernel.shape
weight = np.zeros([k_shape[1] * k_shape[2] * k_shape[3], k_shape[0]])
for i in range(k_shape[0]):
weight[:, i] = np.reshape(kernel[i, :, :, :], [-1])
return weight
def fold_weights(weights, kernel_shape):
"""
In pytorch format, kernel is stored as [out_channel, in_channel, width, height]
Fold weights into a 4-dimensional tensor as [out_channel, in_channel, width, height]
:param weights:
:param kernel_shape:
:return:
"""
kernel = np.zeros(shape=kernel_shape)
for i in range(kernel_shape[0]):
kernel[i,:,:,:] = weights[:, i].reshape([kernel_shape[1], kernel_shape[2], kernel_shape[3]])
return kernel
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def validate(model, val_loader, val_record, train_record, n_batch_used = 100, use_cuda = True):
monitor_freq = int(n_batch_used / 5)
# batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
criterion = nn.CrossEntropyLoss()
# switch to evaluate mode
model.eval()
# end = time.time()
for i, (input, target) in enumerate(val_loader):
if use_cuda:
target = target.cuda()
input = input.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
if i % monitor_freq == 0:
print('Test: [{0}/{1}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), loss=losses,
top1=top1, top5=top5))
if i == n_batch_used:
break
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
if val_record != None:
val_record.write('Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}\n'
.format(top1=top1, top5=top5))
if train_record != None:
train_record.write('Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}\n'
.format(top1=top1, top5=top5))
model.train()
return top1.avg, top5.avg
def adjust_mean_var(net, train_loader, train_file, n_batch_used = 500, use_cuda = True):
monitor_freq = int(n_batch_used / 5)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
criterion = nn.CrossEntropyLoss()
net.train()
# end = time.time()
for i, (input, target) in enumerate(train_loader):
if use_cuda:
target = target.cuda()
input = input.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = net(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
if (i) % monitor_freq == 0:
print('Train: [{0}/{1}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, n_batch_used, loss=losses,
top1=top1, top5=top5))
if train_file != None:
train_file.write('[%d/%d] Loss: %f, Prec@1: %f, Prec@5: %f\n' %\
(i, n_batch_used, losses.avg, top1.avg, top5.avg))
if (i) == n_batch_used:
break
def create_prune_graph(input_dimension, output_dimension):
pruned_weight_holder = tf.placeholder(tf.float32, shape=None)
hessian_inv_diag_holder = tf.placeholder(tf.float32, shape=None)
hessian_inv_holder = tf.placeholder(tf.float32, shape=[input_dimension, input_dimension])
prune_row_idx_holder = tf.placeholder(tf.float32, shape=None)
mask_holder = tf.placeholder(tf.float32, shape=[input_dimension, output_dimension])
wb_holder = tf.placeholder(tf.float32, shape=[input_dimension, output_dimension])
selection_q = tf.one_hot(indices = prune_row_idx_holder, depth = input_dimension)
get_sparse_wb_op = -pruned_weight_holder / (hessian_inv_diag_holder + 10e-6) \
* tf.matmul(a = hessian_inv_holder, b = selection_q) + wb_holder
return pruned_weight_holder, hessian_inv_diag_holder, hessian_inv_holder, prune_row_idx_holder,\
mask_holder, wb_holder, get_sparse_wb_op
def create_sparse_mul_graph(input_dimension, output_dimension):
"""
This function perform element-wise multiplication between weights matrix and mask matrix
by tensorflow backend to speed up
args:
input_dimension: first dimension of weights (mask) matrix
output_dimension: second dimension of weights (mask) matrix
Output:
mask_holder: tf holder for mask matrix
wb_holder: tf holder for weight matrix
get_sparse_wb_op: tf op for generating sparse wb
"""
mask_holder = tf.placeholder(tf.float32, shape=[input_dimension, output_dimension])
wb_holder = tf.placeholder(tf.float32, shape=[input_dimension, output_dimension])
get_sparse_wb_op = tf.multiply(wb_holder, mask_holder)
return mask_holder, wb_holder, get_sparse_wb_op
def generate_layer_list(param):
pass
|
{"hexsha": "cf6fabaa6eabb0a4a608dbdbd8384fec7b09f6c1", "size": 7059, "ext": "py", "lang": "Python", "max_stars_repo_path": "PyTorch/ImageNet/utils.py", "max_stars_repo_name": "csyhhu/L-OBS", "max_stars_repo_head_hexsha": "346e67977955f34b10b0461ab4d60ef8d35dc145", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2017-10-22T21:13:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-28T09:00:27.000Z", "max_issues_repo_path": "PyTorch/ImageNet/utils.py", "max_issues_repo_name": "csyhhu/L-OBS", "max_issues_repo_head_hexsha": "346e67977955f34b10b0461ab4d60ef8d35dc145", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2018-01-27T02:36:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-28T02:12:06.000Z", "max_forks_repo_path": "PyTorch/ImageNet/utils.py", "max_forks_repo_name": "csyhhu/L-OBS", "max_forks_repo_head_hexsha": "346e67977955f34b10b0461ab4d60ef8d35dc145", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2017-10-20T17:02:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-09T02:07:50.000Z", "avg_line_length": 27.7913385827, "max_line_length": 97, "alphanum_fraction": 0.7071823204, "include": true, "reason": "import numpy", "num_tokens": 2010}
|
# flake8: noqa
import pytest
import embedding.training_process
import numpy
import tempfile
from pathlib import Path
import unittest
import ai_training as ait
pytestmark = pytest.mark.asyncio
async def mock_w2v_call(payload, endpoint='words'):
if endpoint == "words":
return {'vectors': {"word1": [1.1, 1.2, 1.3], "word2": [0.1, 0.2, 0.3]}}
elif endpoint == "unk_words":
return {'unk_words': ["word1", "word2"]}
return {}
async def get_from_er_server(relative_url, params=None):
if relative_url == "ner":
return [{
'category': 'sys.places',
'value': 'London',
'start': 0,
'end': 6
}, {
'category': 'sys.date',
'value': 'today',
'start': 10,
'end': 17
}]
elif relative_url == "tokenize":
return ['be', 'here', 'now']
else:
return {}
@pytest.fixture
async def mocked_train(mocker, loop):
training = embedding.training_process.EmbedTrainingProcessWorker(
None, "no_aiohttp_session")
mocker.patch.object(
training.w2v_client, "w2v_call", new=mock_w2v_call)
mocker.patch.object(
training.entity_wrapper,
"get_from_er_server",
new=get_from_er_server)
training.entity_wrapper.train_entities_q = [
[{
'category': 'sys.places',
'value': 'london',
'start': 0,
'end': 6
}, {
'category': 'sys.date',
'value': 'today',
'start': 10,
'end': 17
}],
[{
'category': 'sys.places',
'value': 'paris',
'start': 0,
'end': 5
}, {
'category': 'sys.person',
'value': 'fred bloggs',
'start': 8,
'end': 18
}]]
training.entity_wrapper.train_entities_a = [
[{
'category': 'sys.places',
'value': 'london',
'start': 0,
'end': 6
}, {
'category': 'sys.date',
'value': 'today',
'start': 10,
'end': 17
}],
[{
'category': 'sys.places',
'value': 'paris',
'start': 0,
'end': 5
}, {
'category': 'sys.person',
'value': 'fred bloggs',
'start': 8,
'end': 18
}]]
training.entity_wrapper.train_labels = ["You said London today",
"You said Paris Fred Bloggs"]
return training
def test_mocks_ok(mocked_train):
pass
async def test_train_get_vectors(mocked_train):
train_data = ["this is mocked", "by the function above"]
vectors = await mocked_train.get_vectors(train_data)
word1vec = vectors["word1"]
word2vec = vectors["word2"]
assert type(word1vec) is numpy.ndarray
assert type(word2vec) is numpy.ndarray
async def test_er_entities(mocked_train):
question = "this is a dummy question that will be mocked out"
entities = await mocked_train.entity_wrapper.extract_entities(question)
assert len(entities) == 2
assert entities[0]['value'] == 'london'
assert entities[1]['value'] == 'today'
async def test_er_tokenize(mocked_train):
question = "this is a dummy question that will be mocked out"
tokens = await mocked_train.entity_wrapper.tokenize(question)
assert len(tokens) == 3
assert tokens[0] == 'be'
assert tokens[1] == 'here'
async def test_er_match_entities_none(mocked_train):
question = "this question has no matching entities"
entities = await get_from_er_server("ner")
matched_label = mocked_train.entity_wrapper.match_entities(
question, entities)
assert len(matched_label) == 0
async def test_er_match_entities_1(mocked_train):
question = "this question matches London"
entities = await get_from_er_server("ner")
matched_label = mocked_train.entity_wrapper.match_entities(
question, entities)
assert len(matched_label) == 1
assert matched_label[0][1] == "You said London today"
async def test_er_match_entities_2(mocked_train):
question = "this question matches Bloggs Fred"
entities = await get_from_er_server("ner")
matched_label = mocked_train.entity_wrapper.match_entities(
question, entities)
assert len(matched_label) == 1
assert matched_label[0][1] == "You said Paris Fred Bloggs"
async def test_train_success(mocked_train, mocker):
DUMMY_AIID = "123456"
DUMMY_TRAINING_DATA = """
hi
hihi"""
# mock out the maths/save functions so we can UT train()
mocker.patch("embedding.text_classifier_class.EmbeddingComparison.fit")
mocker.patch("embedding.text_classifier_class.EmbeddingComparison.save_model")
mocker.patch("shutil.move")
with tempfile.TemporaryDirectory() as tempdir:
ai_path = Path(tempdir)
train_file = ai_path / ait.AI_TRAINING_STANDARD_FILE_NAME
with train_file.open("w") as file_handle:
file_handle.write(DUMMY_TRAINING_DATA)
msg = ait.training_process.TrainingMessage(ai_path, DUMMY_AIID, 0)
topic = None
await mocked_train.train(msg, topic, None)
class MockCallback:
pass
async def dummy_async():
pass
async def test_train_success_with_callback(mocked_train, mocker):
DUMMY_AIID = "123456"
DUMMY_TRAINING_DATA = """
hi
hihi"""
# mock out the maths/save functions so we can UT train()
mocker.patch("embedding.text_classifier_class.EmbeddingComparison.fit")
mocker.patch("embedding.text_classifier_class.EmbeddingComparison.save_model")
mocker.patch("shutil.move")
callback = MockCallback()
mocker.patch.object(callback, "wait_to_save", create=True, new=dummy_async)
mocker.patch.object(callback, "report_progress", create=True)
mocker.patch.object(callback, "check_for_cancel", create=True)
with tempfile.TemporaryDirectory() as tempdir:
ai_path = Path(tempdir)
train_file = ai_path / ait.AI_TRAINING_STANDARD_FILE_NAME
with train_file.open("w") as file_handle:
file_handle.write(DUMMY_TRAINING_DATA)
msg = ait.training_process.TrainingMessage(ai_path, DUMMY_AIID, 0)
topic = None
await mocked_train.train(msg, topic, callback)
|
{"hexsha": "3c7cd95cc149af3c8b1a2108a6a2a912d3f88be8", "size": 6387, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/embedding/tests/test_embedding_training.py", "max_stars_repo_name": "hutomadotAI/qamatcher", "max_stars_repo_head_hexsha": "0ece9bc354aea0c104cce7f3f372aa8e83a7601b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-07-01T17:48:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-29T21:00:50.000Z", "max_issues_repo_path": "src/embedding/tests/test_embedding_training.py", "max_issues_repo_name": "hutomadotAI/qamatcher", "max_issues_repo_head_hexsha": "0ece9bc354aea0c104cce7f3f372aa8e83a7601b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/embedding/tests/test_embedding_training.py", "max_forks_repo_name": "hutomadotAI/qamatcher", "max_forks_repo_head_hexsha": "0ece9bc354aea0c104cce7f3f372aa8e83a7601b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-17T08:08:45.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-17T08:08:45.000Z", "avg_line_length": 29.8457943925, "max_line_length": 82, "alphanum_fraction": 0.6189134179, "include": true, "reason": "import numpy", "num_tokens": 1507}
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
import sys
from matplotlib import pyplot as plt
""" Change Layout Default Settings"""
def setRcParams():
params = {'axes.facecolor' : 'white',
'axes.labelsize' : 'x-large', # xx-large
'axes.titlesize' : 'x-large', # xx-large
'axes.titlepad' : '12',
'axes.formatter.limits' : '-4, 4',
'xtick.labelsize' : 'large', # xx-large
#'xtick.direction' : 'out',
'xtick.major.size' : '7',
'xtick.minor.size' : '4',
'xtick.major.width' : '1.6',
'xtick.minor.width' : '1.2',
'xtick.major.pad' : '7',
'xtick.minor.pad' : '6.8',
'ytick.major.size' : '7',
'ytick.minor.size' : '4',
'ytick.major.width' : '1.6',
'ytick.minor.width' : '1.2',
'ytick.major.pad' : '7',
'ytick.minor.pad' : '6.8',
#'ytick.direction' : 'out',
'ytick.labelsize' : 'xx-large',
'legend.fontsize' : 'x-large',
'image.cmap' : 'jet',
'savefig.dpi' : '300',
'savefig.transparent' : 'False'}
plt.rcParams.update(params)
def colorGen():
colors = ['g', 'red', 'y', 'b']
num = 0
while True:
yield colors[num]
num = (num+1) % len(colors)
def readCycleCount(fname):
workers = []
cycleMin = []
cycleMean = []
cycleMax = []
f = open(fname, 'r')
while True:
line = f.readline()
if not line:
break
if line[0] == '#':
continue
chunkSize, maxChunksPerAlloc, mallocMC, \
waR, tsR, gs, bs, arrCnt, minArr, maxArr = [int(s) for s in line.split(' ')]
workers.append(gs*bs)
cmin, csum, cmax = 2e18, 0, 0
for i in range(minArr, maxArr):
line = f.readline()
n = [int(s) for s in line.split(' ')]
cnt = n[1]
cmin = min(n[4], cmin) if cnt > 0 else cmin
csum, cmax = csum + cnt*n[5], max(n[6], cmax)
cycleMin.append(cmin)
cycleMean.append(csum/tsR)
cycleMax.append(cmax)
f.close()
return workers, cycleMin, cycleMean, cycleMax
def visualizeCycleCount(fnameList, legList, title, showLegend=True):
setRcParams()
bx = 10
fig = plt.figure("mC", figsize=(8, 6))
ax = fig.add_subplot(111)
# legList = ['new', 'MC1', 'MC2', 'MC3']
for fname, color, leg in zip(fnameList, colorGen(), legList):
print (fname)
workers, cmin, cmean, cmax = readCycleCount(fname)
ax.loglog(workers, cmin, ls='--', basex=bx, color=color, lw=0.5)
ax.loglog(workers, cmean, basex=bx, color=color, label=leg)
ax.loglog(workers, cmax, ls='--', basex=bx, color=color, lw=0.5)
ax.set_xlabel("# workers")
ax.set_ylabel("# cycles")
ax.set_title(title)
if showLegend:
ax.legend()
plt.show()
if __name__ == "__main__":
if len(sys.argv) > 2 and len(sys.argv) % 2 == 0:
visualizeCycleCount(sys.argv[2::2], sys.argv[3::2], sys.argv[1])
elif len(sys.argv) == 3:
visualizeCycleCount([sys.argv[2]], ['0'], sys.argv[1], showLegend=False)
else:
print ("Shows a single diagram of needed cycles to allocate memory")
print ("First param: diagram title")
print ("Every next two [optional params]: fname legendName")
print ("If only one file name is given the legend name can be omitted")
|
{"hexsha": "fae47b9aad467b49c40c33a5a15d839b30738e2d", "size": 3339, "ext": "py", "lang": "Python", "max_stars_repo_path": "performanceTests/results/visualize.py", "max_stars_repo_name": "formelfritz/mallocMC", "max_stars_repo_head_hexsha": "d1dba808abf63de39db56587d65b9f497f4e7a41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "performanceTests/results/visualize.py", "max_issues_repo_name": "formelfritz/mallocMC", "max_issues_repo_head_hexsha": "d1dba808abf63de39db56587d65b9f497f4e7a41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "performanceTests/results/visualize.py", "max_forks_repo_name": "formelfritz/mallocMC", "max_forks_repo_head_hexsha": "d1dba808abf63de39db56587d65b9f497f4e7a41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5, "max_line_length": 84, "alphanum_fraction": 0.5630428272, "include": true, "reason": "import numpy", "num_tokens": 1034}
|
#! /usr/bin/python
from tools import *
import numpy
import random
import math
import time
import sys
from solvers import dynamicKnapsack
'''
Parametric optimizers for 0/1 knapsack -
* Simulated annealer
* Random Search
'''
# Simulated annealer - optimizing solutions to 0/1
# knapsack - climber can get close to optimal or achieved
# if enough iterations and viable cooling factor
# Returns value of best knapsack found
def simulatedAnnealer(values,weights,limit,intervals,bound):
# Starting temperature
temp = 1000
# Cooling interval
interval = 250
# Cooling factor
coolFactor = 0.99
# Initial individual
pack = generatePack(values,weights,limit)
g = []
for x in range(len(pack)):
g.append(pack[x])
# Evaluations of fitness function performed
evals = 0
# Historical Fitness values
histFit = []
# The highest fitness observed so far
topFitness = fitness(pack,values)
while (True):
for i in range(interval):
rand = random.randint(0,len(values)-1)
if (g[rand] == 0):
if (weight(g,weights) + weights[rand] <= limit):
g[rand] = 1
f = fitness(g,values)
if (f > topFitness):
topFitness = f
else:
prob = math.exp(-values[rand]/temp)
if (random.uniform(0,1) <= prob):
g[rand] = 0
evals += 1
if (evals in intervals):
histFit.append(topFitness)
if (evals >= bound):
return histFit
temp *= coolFactor
# Randomly search state space of 0/1 knapsack
# Returns value of best knapsack found
def randomSearch(values,weights,limit,intervals,bound):
topFitness = -1
# Historical Fitness values
histFit = []
for i in range(1,bound+1):
g = generatePack(values,weights,limit)
f = fitness(g,values)
if (f > topFitness):
topFitness = f
if (i in intervals):
histFit.append(topFitness)
return histFit
|
{"hexsha": "7bb8e949bb8fff29d92d8a55ec1a8e41330dfdd9", "size": 1787, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/parametric.py", "max_stars_repo_name": "lbenning/Knapsack", "max_stars_repo_head_hexsha": "1b06409bafc04210837b984fb638804794faada6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2015-07-26T19:59:25.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-13T15:39:44.000Z", "max_issues_repo_path": "src/parametric.py", "max_issues_repo_name": "lbenning/Knapsack", "max_issues_repo_head_hexsha": "1b06409bafc04210837b984fb638804794faada6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/parametric.py", "max_forks_repo_name": "lbenning/Knapsack", "max_forks_repo_head_hexsha": "1b06409bafc04210837b984fb638804794faada6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1486486486, "max_line_length": 60, "alphanum_fraction": 0.6983771684, "include": true, "reason": "import numpy", "num_tokens": 507}
|
import tensorflow as tf
import numpy as np
EPSILON = 1e-6
class BN_Conv(object):
def __init__(self, var_scope, is_training, filter_width, in_channels, out_channels, dilation = 1):
self.var_scope = var_scope
self.filter_width = filter_width
self.in_channels = in_channels
self.out_channels = out_channels
self.dilation = dilation
self.is_training = is_training
self.decay = 0.9999
with tf.variable_scope(self.var_scope):
self.batch_mean_ema = tf.get_variable("mean_ema", [1, self.out_channels], tf.float32, tf.constant_initializer(0.0), trainable = False)
self.batch_var_ema = tf.get_variable("var_ema", [1, self.out_channels], tf.float32, tf.constant_initializer(1.0), trainable = False) #we need to implement this for validation/test.
self.filters = tf.get_variable("bn_filter", [self.filter_width, self.in_channels, self.out_channels], tf.float32, tf.random_normal_initializer(0.0, 0.05)) #tf.contrib.layers.variance_scaling_initializer())
self.gamma = tf.get_variable("bn_gamma", [1, self.out_channels], tf.float32, tf.constant_initializer(1.0))
self.beta = tf.get_variable("bn_beta", [1, self.out_channels], tf.float32, tf.constant_initializer(0.0))
def activated_on(self, x, strides = [1], padding = "SAME"):
with tf.variable_scope(self.var_scope):
pre_act = tf.nn.convolution(input = x, filter = self.filters, padding = padding, strides = strides, dilation_rate = [self.dilation])
if self.is_training:
batch_mean, batch_var = tf.nn.moments(pre_act, [0, 1])
update_mean = tf.assign(self.batch_mean_ema, self.batch_mean_ema * self.decay + batch_mean * (1. - self.decay))
update_var = tf.assign(self.batch_var_ema, self.batch_var_ema * self.decay + batch_var * (1. - self.decay))
with tf.control_dependencies([update_mean, update_var]):
transformed = (pre_act - batch_mean) / tf.sqrt(batch_var + EPSILON)
out = self.gamma * transformed + self.beta
return out
else:
transformed = (pre_act - self.batch_mean_ema) / tf.sqrt(self.batch_var_ema + EPSILON)
out = self.gamma * transformed + self.beta
return out
class BN_Conv_1x1(object):
def __init__(self, var_scope, is_training, in_channels, out_channels):
self.var_scope = var_scope
self.in_channels = in_channels
self.out_channels = out_channels
self.is_training = is_training
self.decay = 0.9999
with tf.variable_scope(self.var_scope):
self.batch_mean_ema = tf.get_variable("mean_ema", [1, self.out_channels], tf.float32, tf.constant_initializer(0.0), trainable = False)
self.batch_var_ema = tf.get_variable("var_ema", [1, self.out_channels], tf.float32, tf.constant_initializer(1.0), trainable = False) #we need to implement this for validation/test.
self.W = tf.get_variable("bn_1x1_filter", [1, self.in_channels, self.out_channels], tf.float32, tf.random_normal_initializer(0.0, 0.05)) #tf.contrib.layers.variance_scaling_initializer())
self.gamma = tf.get_variable("bn_1x1_gamma", [1, self.out_channels], tf.float32, tf.constant_initializer(1.0))
self.beta = tf.get_variable("bn_1x1_beta", [1, self.out_channels], tf.float32, tf.constant_initializer(0.0))
def activated_on(self, x, stride = 1, padding = "SAME"):
with tf.variable_scope(self.var_scope):
pre_act_x = tf.nn.conv1d(x, self.W, stride = stride, padding = padding)
if self.is_training:
batch_mean, batch_var = tf.nn.moments(pre_act_x, [0, 1])
update_mean = tf.assign(self.batch_mean_ema, self.batch_mean_ema * self.decay + batch_mean * (1. - self.decay))
update_var = tf.assign(self.batch_var_ema, self.batch_var_ema * self.decay + batch_var * (1. - self.decay))
with tf.control_dependencies([update_mean, update_var]):
transformed = (pre_act_x - batch_mean) / tf.sqrt(batch_var + EPSILON)
out = self.gamma * transformed + self.beta
return out
else: #then we're in validation/test and we want to use
transformed = (pre_act_x - self.batch_mean_ema) / tf.sqrt(self.batch_var_ema + EPSILON)
out = self.gamma * transformed + self.beta
return out
class BN_Deconv_1D(object):
def __init__(self, prefix, is_training, in_width, out_width, in_channels, out_channels, stride, batch_size):
self.batch_size = batch_size
self.in_width = in_width
self.in_channels = in_channels
self.out_width = out_width
self.out_channels = out_channels
self.stride = stride
self.var_scope = "deconv_%s"
self.is_training = is_training
self.decay = 0.9999
with tf.variable_scope(self.var_scope):
self.batch_mean_ema = tf.get_variable("mean_ema", [self.out_channels], tf.float32, tf.constant_initializer(0.0), trainable = False)
self.batch_var_ema = tf.get_variable("var_ema", [self.out_channels], tf.float32, tf.constant_initializer(1.0), trainable = False) #we need to implement this for validation/test.
self.filter = tf.get_variable("deconv_filter", [1, out_width // in_width, out_channels, in_channels], tf.float32, tf.random_normal_initializer(0.0, 0.05)) #tf.contrib.layers.variance_scaling_initializer())
self.gamma = tf.get_variable("gamma", [out_channels], tf.float32, tf.constant_initializer(1.0))
self.beta = tf.get_variable("beta", [out_channels], tf.float32, tf.constant_initializer(0.0))
def activated_on(self, x, r_max = None, d_max = None):
#x = tf.reshape(x, [self.batch_size, 1, self.in_width, self.in_channels])
with tf.variable_scope(self.var_scope):
x = tf.expand_dims(x, [1]) #adds a height dimension.
pre_act = tf.nn.conv2d_transpose(x, self.filter, output_shape = [self.batch_size, 1, self.out_width, self.out_channels], strides=self.stride)
pre_act = tf.squeeze(pre_act, [1])
if self.is_training:
batch_mean, batch_var = tf.nn.moments(pre_act, [0, 1])
update_mean = tf.assign(self.batch_mean_ema, self.batch_mean_ema * self.decay + batch_mean * (1. - self.decay))
update_var = tf.assign(self.batch_var_ema, self.batch_var_ema * self.decay + batch_var * (1. - self.decay))
with tf.control_dependencies([update_mean, update_var]):
transformed = (pre_act - batch_mean) / tf.sqrt(batch_var + EPSILON)
out = self.gamma * transformed + self.beta
return out
else:
transformed = (pre_act_x - self.batch_mean_ema) / tf.sqrt(self.batch_var_ema + EPSILON)
out = self.gamma * transformed + self.beta
return out
class BN_Dense(object):
"""Batch_Norm_Dense :: inherits object
Simple dense matrix multiplication layer with batch normalization."""
def __init__(self, var_scope, is_training, in_width, out_width):
"""__init__(self, var_scope, in_width, out_width):
var_scope -> names for variables. Good for restoration of variables after training/organization.
in_width/out_width -> width of input/hidden layers for the dense operation."""
self.var_scope = var_scope
self.in_width = in_width
self.out_width = out_width
self.is_training = is_training
self.decay = 0.9999
with tf.variable_scope(self.var_scope):
self.batch_mean_ema = tf.get_variable("mean_ema", [1, self.out_width], tf.float32, tf.constant_initializer(0.0), trainable = False)
self.batch_var_ema = tf.get_variable("var_ema", [1, self.out_width], tf.float32, tf.constant_initializer(1.0), trainable = False) #we need to implement this for validation/test.
self.W = tf.get_variable("bn_dense_W", [self.in_width, self.out_width], tf.float32, tf.random_normal_initializer(0.0, 0.05)) #tf.contrib.layers.variance_scaling_initializer())
self.gamma = tf.get_variable("bn_dense_gamma", [1, self.out_width], tf.float32, tf.constant_initializer(1.0))
self.beta = tf.get_variable("bn_dense_beta", [1, self.out_width], tf.float32, tf.constant_initializer(0.0))
def activated_on(self, x, init=False):
with tf.variable_scope(self.var_scope):
pre_act = tf.matmul(x, self.W)
if self.is_training:
batch_mean, batch_var = tf.nn.moments(pre_act, [0])
update_mean = tf.assign(self.batch_mean_ema, self.batch_mean_ema * self.decay + batch_mean * (1. - self.decay))
update_var = tf.assign(self.batch_var_ema, self.batch_var_ema * self.decay + batch_var * (1. - self.decay))
with tf.control_dependencies([update_mean, update_var]):
transformed = (pre_act - batch_mean) / tf.sqrt(batch_var + EPSILON)
normalized = self.gamma * transformed + self.beta
return normalized
else:
transformed = (pre_act - self.batch_mean_ema) / tf.sqrt(self.batch_var_ema + EPSILON)
return self.gamma * transformed + self.beta
|
{"hexsha": "ed1c014c81f500fd0da0501b2680997a244452fe", "size": 8427, "ext": "py", "lang": "Python", "max_stars_repo_path": "BN_layers.py", "max_stars_repo_name": "wanglabcumc/VariationalHomologEncoder", "max_stars_repo_head_hexsha": "b2ae5244bd651042fbe29e1a3769c07122c8c145", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BN_layers.py", "max_issues_repo_name": "wanglabcumc/VariationalHomologEncoder", "max_issues_repo_head_hexsha": "b2ae5244bd651042fbe29e1a3769c07122c8c145", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BN_layers.py", "max_forks_repo_name": "wanglabcumc/VariationalHomologEncoder", "max_forks_repo_head_hexsha": "b2ae5244bd651042fbe29e1a3769c07122c8c145", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.66875, "max_line_length": 208, "alphanum_fraction": 0.7350183933, "include": true, "reason": "import numpy", "num_tokens": 2263}
|
from couplib.myreportservice import *
from couplib.constants import *
from configuration import *
from math import *
import numpy as np
#-------------------------------------------------------------------------------
class AtomInterface():
"""Interface class for the ionfromation about atoms (primarely read from PDB file)"""
def __init__(self, x, y, z, ElSym, AtomPDBID = ATOM_PDB_ID_NONE, AtomName = ATOM_PDB_NAME_NONE, AltLoc = ATOM_PDB_ALT_LOC_NONE, ResID = ATOM_PDB_RES_ID_NONE, ResName=ATOM_PDB_RES_NAME_NONE, ChainID=ATOM_PDB_CHAIN_ID_NONE):
"""Initialize with the PDB properties"""
#Cartesian coordinates of the atom in Angstroms
self.x = x
self.y = y
self.z = z
self.ElSym = ElSym #Element symbol from the PDB file
self.AtomPDBID = AtomPDBID #Atom PDB ID (unique)
self.AtomName = AtomName #Atom PDB name
self.AltLoc = AltLoc #Alternate location indicator
self.ResID = ResID # Atom PDB residue ID (same for all atoms of given fragment)
self.ResName = ResName #Atom PDB residue name (same for all atoms of given fragment)
self.ChainID = ChainID
return
def MyPrint(self, ID = -1, ALabelLen = STR_LEN_ALABEL, Round_XYZ = INT_ROUND, XYZ_Len = STR_LEN_FLOAT):
"""Print atom"""
if (ID == -1 ):
ID = self.AtomPDBID
print("{} {} {} {}".format( str(self.ElSym+str(ID)+str(self.AltLoc)).ljust(ALabelLen), str(round(self.x,Round_XYZ)).ljust(XYZ_Len),
str(round(self.y,Round_XYZ)).ljust(XYZ_Len), str(round(self.z,Round_XYZ)).ljust(XYZ_Len)))
#-------------------------------------------------------------------------------
class OriginInterface():
"""Interface class for the information about origins of fragments"""
def __init__(self, x, y, z):
#Cartesian coordinates of the atom in Angstroms
self.x = x
self.y = y
self.z = z
return
def GetNPArray(self):
"""Return NumPy Array"""
return np.asarray([self.x, self.y, self.z])
def MyPrint(self):
"""Print origin"""
print("({} {} {})".format(str(round(self.x,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.y,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.z,INT_ROUND)).ljust(STR_LEN_FLOAT)))
return
#-------------------------------------------------------------------------------
class QDVibInterface():
"""Interface class for the information about quamtum dynamics paramerters of fragments"""
def __init__(self, ExStID, VibModeID, VibModeID_Internal, Vib_cm1, ElVibCoupl_cm1, Vib_Decay_ps1):
self.ExStID = ExStID
self.VibModeID = VibModeID
self.VibModeID_Internal = VibModeID_Internal
self.Vib_cm1 = Vib_cm1
self.ElVibCoupl_cm1 = ElVibCoupl_cm1
self.Vib_Decay_ps1 = Vib_Decay_ps1
return
def MyPrint(self):
"""Print quantum dynamics parameters"""
print("{} {} {} {} {}".format(str(self.ExStID).ljust(STR_LEN_FLOAT),
str(self.VibModeID).ljust(STR_LEN_FLOAT),
str(round(self.Vib_cm1,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.ElVibCoupl_cm1,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.Vib_Decay_ps1,INT_ROUND)).ljust(STR_LEN_FLOAT)))
return
#-------------------------------------------------------------------------------
class ExciteStateInterface():
"""Interface class for the excitate state information"""
def __init__(self, ExStID, Abs_cm1, x, y, z, Ems_cm1, El_Deph_Rate_ps1, Epsilon_M1cm1,Phi_D,FlLifetime_s, FlLifetime_sb_s):
"""Initialize with the excited state properties"""
self.ExStID = ExStID #Excited state id (1-based index, read from an external file)
self.Abs_cm1 = Abs_cm1 #Absorption maximum, cm-1
if (Abs_cm1 != 0 ):
self.Abs_nm = CM1_NM/Abs_cm1 #Absorption maximum, nm
else:
self.Abs_nm = 0.0
#Transition dipole moment components (x,y,z) a.u.
self.x = x
self.y = y
self.z = z
self.Ems_cm1 = Ems_cm1 #Emission maximum, cm-1
if (Ems_cm1 != 0 ):
self.Ems_nm = CM1_NM/Ems_cm1 #Emission maximum, nm
else:
self.Ems_nm = 0.0
self.El_Deph_Rate_ps1 = El_Deph_Rate_ps1
self.Epsilon_M1cm1 = Epsilon_M1cm1 #Exctinction coefficient
self.Phi_D = Phi_D #Fluorescence quantum yield
self.FlLifetime_s = FlLifetime_s #fluorescnce lifetime from input in s
self.FlLifetime_sb_s = FlLifetime_sb_s #Strickler-Berg fluorescnce lifetime in s
self.Abs_Spec = [] #Absorption spectrum from input
self.Ems_Spec = [] #Emission spectrum from input
self.Abs_Spec_nm = [] #Absorption spectrum from input
self.Ems_Spec_nm = [] #Emission spectrum from input
self.Abs_Int_Lim_Low_nm = 0.0 #Lower integration limit of absorption spec. in nm
self.Abs_Int_Lim_Up_nm = 0.0 #Upper integration limit of absorption spec. in nm
self.Ems_Int_Lim_Low_nm = 0.0 #Lower integration limit of emission spec. in nm
self.Ems_Int_Lim_Up_nm = 0.0 #Upper integration limit of emission spec. in nm
self.warning = "Warning: zero transition dipole moment!"
return
#-------------------------------------------------------------------------------
def MyPrint(self,JobType=""):
"""Print excited state properites (does not print quantum dynamics parameters e.g. el. dephasing rate)"""
Norm = sqrt(self.x**2+self.y**2+self.z**2)
NormD = Norm*TDM_auToDebye
Format_CM1 = 2
Format_nm = 1
INTX_LEN = 5
#Special print if calculation of lifetimes only
if ( JobType == CFG_MET_ARX_LFT):
print("{}\t{}\t{}\t{}\t{}".format(
str(self.ExStID).ljust(INTX_LEN),
str(round(self.Abs_nm,Format_nm)).ljust(STR_LEN_FLOAT),
str(round(self.Ems_nm,Format_nm)).ljust(STR_LEN_FLOAT),
str(round(self.Epsilon_M1cm1,Format_CM1)).ljust(STR_LEN_FLOAT),
str(round(self.Phi_D,Format_CM1)).ljust(STR_LEN_FLOAT)))
else:
print("{} {} ({} {} {}) {} {} {}".format( str(self.ExStID).ljust(INTX_LEN),
str(round(self.Abs_nm,Format_nm)).ljust(STR_LEN_FLOAT),
str(round(self.x,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.y,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.z,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(Norm,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(NormD,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.Ems_nm,Format_nm)).ljust(STR_LEN_FLOAT)), end=' ')
if ( Norm == 0.0):
print(self.warning)
else:
print()
return
def MyTDMPrint(self):
"""Print excited state transition dipole momment"""
Norm = sqrt(self.x**2+self.y**2+self.z**2)
print("{} ({} {} {}) {}".format( str(self.ExStID).ljust(3),
str(round(self.x,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.y,INT_ROUND)).ljust(STR_LEN_FLOAT),
str(round(self.z,INT_ROUND)).ljust(STR_LEN_FLOAT), round(Norm,INT_ROUND)), end=' ')
return
#-------------------------------------------------------------------------------
class CouplingInterface():
"""Interface class for the excitate state information"""
def __init__(self, State1 = 0, State2 = 0, R = 0, AMuAMuD = 0, OriFact = 0, OriPercent = 0, K = 0, Coupl = 0, ElScreening = 0, ScreenedCoupl = 0):
"""Initialize with the coupling properties"""
#Excited states under consideration
self.State1 = State1
self.State2 = State2
self.R = R #interfragment distnce angstrom, A
self.OriFact = OriFact # Orientation factor (-2...2) unitless
self.OriPercent = OriPercent # Normalized orientation percent
self.AMuAMuD = AMuAMuD # Product of absolute values of transtion dipole moments in a.u.^2
self.K = K # Distance independent factor (Product of absolute values of transtion dipole moments in a.u.^2 and Orientation factor)
self.Coupl = Coupl #Forster coupling in Hartrees
self.ElScreening = ElScreening #Electrostatic screening
self.ScreenedCoupl = ScreenedCoupl #Screened coupilng in Hartrees
return
#-------------------------------------------------------------------------------
class ResonanceInterface():
"""Interface class for the information about detected resonances (matching excitation energies of fragments) """
def __init__(self, Ecm1_1, Ecm1_2, Diff, Overlap, Flag):
"""Initialize with the resonance properties"""
#Excitation energies of states under consideration in cm-1
self.Ecm1_1 = Ecm1_1
self.Ecm1_2 = Ecm1_2
self.Diff = Diff #Difference of excitation energies in cm-1
self.Flag = Flag # true=resonance, false:=no resonance
return
#-------------------------------------------------------------------------------
class OverlapInterface():
"""Interface class for the information about overlap of spectra """
def __init__(self, Overlap_M1cm1nm4 = 0.0, Flag = True):
"""Initialize with overlaps"""
self.Overlap_M1cm1nm4 = Overlap_M1cm1nm4 #Spectral overlap
self.Flag = Flag #true = overlaps are available
return
#-------------------------------------------------------------------------------
class RateInterface():
"""Interface class for Forster rates """
def __init__(self, Rate_s1 = 0.0, BoltzmannFactor = 0.0, AlphaCorrection=0.0):
"""Initialize with rates"""
self.Rate_s1 = Rate_s1 #Rate in s-1
self.BoltzmannFactor = BoltzmannFactor
self.AlphaCorrection = AlphaCorrection
return
|
{"hexsha": "9c3f589d0bc8ae11c54cb56221ffbf854333066e", "size": 9047, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/interfaces.py", "max_stars_repo_name": "DKosenkov/PyFREC", "max_stars_repo_head_hexsha": "a578f649b1309f1e23412e3695cce2b9e48fda3d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/interfaces.py", "max_issues_repo_name": "DKosenkov/PyFREC", "max_issues_repo_head_hexsha": "a578f649b1309f1e23412e3695cce2b9e48fda3d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/interfaces.py", "max_forks_repo_name": "DKosenkov/PyFREC", "max_forks_repo_head_hexsha": "a578f649b1309f1e23412e3695cce2b9e48fda3d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.235, "max_line_length": 223, "alphanum_fraction": 0.6533657566, "include": true, "reason": "import numpy", "num_tokens": 2526}
|
import numpy as np
def multiclass_accuracy(prediction, ground_truth):
"""
Computes metrics for multiclass classification
Arguments:
prediction, np array of int (num_samples) - model predictions
ground_truth, np array of int (num_samples) - true labels
Returns:
accuracy - ratio of accurate predictions to total samples
"""
#accuracy = np.mean(prediction == ground_truth)
count = len(prediction)
bool_idx = np.equal(prediction, ground_truth)
accuracy = np.sum(bool_idx) / count
return accuracy
|
{"hexsha": "6add0c161a5ae57b20e87863390c4964d2fdb403", "size": 551, "ext": "py", "lang": "Python", "max_stars_repo_path": "assignments/assignment2/metrics.py", "max_stars_repo_name": "DenisYarullin/dlcourse_ai", "max_stars_repo_head_hexsha": "3e29c0c0ae59479424a14c391dca0948682d7fa5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignments/assignment2/metrics.py", "max_issues_repo_name": "DenisYarullin/dlcourse_ai", "max_issues_repo_head_hexsha": "3e29c0c0ae59479424a14c391dca0948682d7fa5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignments/assignment2/metrics.py", "max_forks_repo_name": "DenisYarullin/dlcourse_ai", "max_forks_repo_head_hexsha": "3e29c0c0ae59479424a14c391dca0948682d7fa5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0454545455, "max_line_length": 65, "alphanum_fraction": 0.7078039927, "include": true, "reason": "import numpy", "num_tokens": 117}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# nnutil2 - Tensorflow utilities for training neural networks
# Copyright (c) 2019, Abdó Roig-Maranges <abdo.roig@gmail.com>
#
# This file is part of 'nnutil2'.
#
# This file may be modified and distributed under the terms of the 3-clause BSD
# license. See the LICENSE file for details.
import unittest
import numpy as np
import tensorflow as tf
import nnutil2 as nnu
class DataMerge(tf.test.TestCase):
def test_dataset_merge_1(self):
tf.random.set_seed(42)
ds1 = tf.data.Dataset.from_tensors({
'a': tf.constant(1, dtype=tf.int32)
})
ds2 = tf.data.Dataset.from_tensors({
'b': tf.constant(2, dtype=tf.int32),
'c': tf.constant(3, dtype=tf.int32)
})
ds = nnu.data.Merge([ds1, ds2])
with self.cached_session() as sess:
it1 = iter(ds1)
feature1 = sess.run(next(it1))
self.assertEqual({'a': 1}, feature1)
it = iter(ds)
feature = sess.run(next(it))
self.assertEqual({'a': 1, 'b': 2, 'c': 3}, feature)
if __name__ == '__main__':
tf.test.main()
|
{"hexsha": "c56f071f33f99f5b560e8d112d59df536553c39a", "size": 1167, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/data_merge.py", "max_stars_repo_name": "aroig/nnutil2", "max_stars_repo_head_hexsha": "1fc77df351d4eee1166688e25a94287a5cfa27c4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/data_merge.py", "max_issues_repo_name": "aroig/nnutil2", "max_issues_repo_head_hexsha": "1fc77df351d4eee1166688e25a94287a5cfa27c4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-11-13T18:33:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-25T15:55:57.000Z", "max_forks_repo_path": "tests/data_merge.py", "max_forks_repo_name": "aroig/nnutil2", "max_forks_repo_head_hexsha": "1fc77df351d4eee1166688e25a94287a5cfa27c4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9333333333, "max_line_length": 79, "alphanum_fraction": 0.5989717224, "include": true, "reason": "import numpy", "num_tokens": 308}
|
[STATEMENT]
lemma RSubmodule_RSpan_single :
assumes "m \<in> M"
shows "RSubmodule (RSpan [m])"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. RSubmodule (RSpan [m])
[PROOF STEP]
proof (rule RSubmoduleI)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. VecGroup.Subgroup (RSpan [m])
2. \<And>r n. \<lbrakk>r \<in> R; n \<in> RSpan [m]\<rbrakk> \<Longrightarrow> r \<cdot> n \<in> RSpan [m]
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
m \<in> M
[PROOF STEP]
show "Subgroup (RSpan [m])"
[PROOF STATE]
proof (prove)
using this:
m \<in> M
goal (1 subgoal):
1. VecGroup.Subgroup (RSpan [m])
[PROOF STEP]
using Group_RSpan_single RSpan_closed[of "[m]"]
[PROOF STATE]
proof (prove)
using this:
m \<in> M
?m \<in> M \<Longrightarrow> Group (RSpan [?m])
set [m] \<subseteq> M \<Longrightarrow> RSpan [m] \<subseteq> M
goal (1 subgoal):
1. VecGroup.Subgroup (RSpan [m])
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
VecGroup.Subgroup (RSpan [m])
goal (1 subgoal):
1. \<And>r n. \<lbrakk>r \<in> R; n \<in> RSpan [m]\<rbrakk> \<Longrightarrow> r \<cdot> n \<in> RSpan [m]
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>r n. \<lbrakk>r \<in> R; n \<in> RSpan [m]\<rbrakk> \<Longrightarrow> r \<cdot> n \<in> RSpan [m]
[PROOF STEP]
fix r n
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>r n. \<lbrakk>r \<in> R; n \<in> RSpan [m]\<rbrakk> \<Longrightarrow> r \<cdot> n \<in> RSpan [m]
[PROOF STEP]
assume rn: "r \<in> R" "n \<in> RSpan [m]"
[PROOF STATE]
proof (state)
this:
r \<in> R
n \<in> RSpan [m]
goal (1 subgoal):
1. \<And>r n. \<lbrakk>r \<in> R; n \<in> RSpan [m]\<rbrakk> \<Longrightarrow> r \<cdot> n \<in> RSpan [m]
[PROOF STEP]
from rn(2)
[PROOF STATE]
proof (chain)
picking this:
n \<in> RSpan [m]
[PROOF STEP]
obtain s where s: "s \<in> R" "n = s \<cdot> m"
[PROOF STATE]
proof (prove)
using this:
n \<in> RSpan [m]
goal (1 subgoal):
1. (\<And>s. \<lbrakk>s \<in> R; n = s \<cdot> m\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using RSpan_single
[PROOF STATE]
proof (prove)
using this:
n \<in> RSpan [m]
RSpan [?m] = {r \<cdot> ?m |r. r \<in> R}
goal (1 subgoal):
1. (\<And>s. \<lbrakk>s \<in> R; n = s \<cdot> m\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
s \<in> R
n = s \<cdot> m
goal (1 subgoal):
1. \<And>r n. \<lbrakk>r \<in> R; n \<in> RSpan [m]\<rbrakk> \<Longrightarrow> r \<cdot> n \<in> RSpan [m]
[PROOF STEP]
with assms rn(1)
[PROOF STATE]
proof (chain)
picking this:
m \<in> M
r \<in> R
s \<in> R
n = s \<cdot> m
[PROOF STEP]
have "r * s \<in> R" "r \<cdot> n = (r * s) \<cdot> m"
[PROOF STATE]
proof (prove)
using this:
m \<in> M
r \<in> R
s \<in> R
n = s \<cdot> m
goal (1 subgoal):
1. r * s \<in> R &&& r \<cdot> n = (r * s) \<cdot> m
[PROOF STEP]
using mult_closed
[PROOF STATE]
proof (prove)
using this:
m \<in> M
r \<in> R
s \<in> R
n = s \<cdot> m
\<lbrakk>?r \<in> R; ?s \<in> R\<rbrakk> \<Longrightarrow> ?r * ?s \<in> R
goal (1 subgoal):
1. r * s \<in> R &&& r \<cdot> n = (r * s) \<cdot> m
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
r * s \<in> R
r \<cdot> n = (r * s) \<cdot> m
goal (1 subgoal):
1. \<And>r n. \<lbrakk>r \<in> R; n \<in> RSpan [m]\<rbrakk> \<Longrightarrow> r \<cdot> n \<in> RSpan [m]
[PROOF STEP]
thus "r \<cdot> n \<in> RSpan [m]"
[PROOF STATE]
proof (prove)
using this:
r * s \<in> R
r \<cdot> n = (r * s) \<cdot> m
goal (1 subgoal):
1. r \<cdot> n \<in> RSpan [m]
[PROOF STEP]
using RSpan_single
[PROOF STATE]
proof (prove)
using this:
r * s \<in> R
r \<cdot> n = (r * s) \<cdot> m
RSpan [?m] = {r \<cdot> ?m |r. r \<in> R}
goal (1 subgoal):
1. r \<cdot> n \<in> RSpan [m]
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
r \<cdot> n \<in> RSpan [m]
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1802, "file": "Rep_Fin_Groups_Rep_Fin_Groups", "length": 20}
|
# import some common detectron2 utilities
from detectron2.engine import DefaultPredictor, DefaultTrainer
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import register_coco_instances
from detectron2.evaluation import FLIREvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
from tools.plain_train_net import do_test
from os import listdir
from os.path import isfile, join
import numpy as np
import cv2
import os
import pdb
import torch
import pdb
from detectron2.data import build_detection_train_loader
from detectron2.data import transforms as T
from detectron2.data import detection_utils as utils
def test(cfg, dataset_name):
cfg.DATASETS.TEST = (dataset_name, )
predictor = DefaultPredictor(cfg)
evaluator_FLIR = FLIREvaluator(dataset_name, cfg, False, output_dir=out_folder, out_pr_name='pr_val.png')
#DefaultTrainer.test(cfg, trainer.model, evaluators=evaluator_FLIR)
val_loader = build_detection_test_loader(cfg, dataset_name)
inference_on_dataset(predictor.model, val_loader, evaluator_FLIR)
#Set GPU
torch.cuda.set_device(0)
# get path
dataset = 'FLIR'
# Train path
train_path = '../../../Datasets/'+ dataset +'/train/'
train_folder = '../../../Datasets/FLIR/train/'
train_json_path = '../../../Datasets/'+dataset+'/train/thermal_annotations_4_channel_no_dogs_3_class.json'
# Validation path
val_path = '../../../Datasets/'+ dataset +'/val/'
val_folder = '../../../Datasets/FLIR/val/'
val_json_path = '../../../Datasets/'+dataset+'/val/thermal_annotations_4_channel_no_dogs_3_class.json'
print(train_json_path)
# Register dataset
dataset_train = 'FLIR_train'
register_coco_instances(dataset_train, {}, train_json_path, train_folder)
FLIR_metadata_train = MetadataCatalog.get(dataset_train)
dataset_dicts_train = DatasetCatalog.get(dataset_train)
# Test on validation set
dataset_test = 'FLIR_val'
register_coco_instances(dataset_test, {}, val_json_path, val_folder)
FLIR_metadata_test = MetadataCatalog.get(dataset_test)
dataset_dicts_test = DatasetCatalog.get(dataset_test)
model = 'faster_rcnn_R_101_FPN_3x'
#files_names = [f for f in listdir(train_path) if isfile(join(train_path, f))]
out_folder = 'output_mid_fusion_3_class_1'
out_model_path = os.path.join(out_folder, 'out_model_final.pth')
if not os.path.exists(out_folder):
os.mkdir(out_folder)
# Create config
cfg = get_cfg()
cfg.OUTPUT_DIR = out_folder
cfg.merge_from_file("./configs/FLIR-Detection/faster_rcnn_R_101_FLIR.yaml")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
# Open middle level fusion
# Train config
cfg.DATASETS.TRAIN = (dataset_train,)
cfg.DATASETS.TEST = (dataset_test, )
#cfg.TEST.EVAL_PERIOD = 50
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # faster, and good enough for this toy dataset (default: 512)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set the testing threshold for this model
###### Performance tuning ########
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.001 # pick a good LR
cfg.SOLVER.MAX_ITER = 50000
#-------------------------------------------- Get pretrained RGB parameters -------------------------------------#
###### Parameter for RGB channel input ####
cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_101_FPN_3x/137851257/model_final_f6e8b1.pkl"
cfg.MODEL.BACKBONE.FREEZE_AT = 0
cfg.INPUT.FORMAT = 'BGR'
cfg.INPUT.NUM_IN_CHANNELS = 3
cfg.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675]
cfg.MODEL.PIXEL_STD = [1.0, 1.0, 1.0]
#cfg.MODEL.BLUR_RGB = True
cfg.MODEL.MAX_POOL_RGB = False
#########################################
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
from detectron2.modeling import build_model
model_ther = build_model(cfg)
param_thr = list(model_ther.backbone.bottom_up.stem.parameters())
param_thr = param_thr[0]
param_backbone = list(model_ther.backbone.parameters())
param_roi = list(model_ther.roi_heads.parameters())
param_rpn_head = list(model_ther.proposal_generator.rpn_head.parameters())
del model_ther
########### Parameters for thermal ##############
# Get thermal weights
cfg.MODEL.WEIGHTS = 'good_model/3_class/thermal_only/out_model_iter_15000.pth'
model_ther = build_model(cfg)
param_backbone_2 = list(model_ther.backbone.parameters())
del model_ther
#-------------------------------------------------- End --------------------------------------------------#
# for 6 inputs
param_rgb = param_thr.clone()
param_rgb = param_rgb.data.fill_(0)
param_cat = torch.cat((param_rgb, param_thr), 1)
# Set for training 6 inputs
cfg.INPUT.FORMAT = 'BGRTTT'
cfg.INPUT.NUM_IN_CHANNELS = 6 #4
cfg.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675, 135.438, 135.438, 135.438]
cfg.MODEL.PIXEL_STD = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_101_FPN_3x/137851257/model_final_f6e8b1.pkl"
eval_every_iter = 1000
num_loops = cfg.SOLVER.MAX_ITER // eval_every_iter
cfg.SOLVER.MAX_ITER = eval_every_iter
cfg.DATALOADER.NUM_WORKERS = 2
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
cnt = 0
with torch.no_grad():
trainer.model.backbone.weight = param_backbone
trainer.model.backbone_2.weight = param_backbone_2
trainer.model.backbone.bottom_up.stem.weight = param_cat
print("----Done loading parameters !!---")
del param_backbone, param_backbone_2, param_rpn_head, param_roi, param_rgb, param_thr, param_cat
for idx in range(num_loops):
print('============== The ', idx, ' * ', eval_every_iter, ' iterations ============')
if idx > 0:
cfg.MODEL.WEIGHTS = out_model_path
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
out_name = 'out_model_iter_'+ str(idx*eval_every_iter) +'.pth'
out_model_path = os.path.join(out_folder, out_name)
trainer.train()
torch.save(trainer.model.state_dict(), out_model_path)
cfg.MODEL.WEIGHTS = out_model_path
# Evaluation on validation set
test(cfg, dataset_train)
test(cfg, dataset_test)
del trainer
|
{"hexsha": "be251b45e14d0b39587d2e1d3dbed1b6dfaa1ac0", "size": 6166, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo/demo_train_middle_fusion.py", "max_stars_repo_name": "Jamie725/RGBT-detection", "max_stars_repo_head_hexsha": "e7741bf0a8bdfb940794248a6d3247e4a5025dc4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-04-08T07:32:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T09:57:20.000Z", "max_issues_repo_path": "demo/demo_train_middle_fusion.py", "max_issues_repo_name": "Jamie725/RGBT-detection", "max_issues_repo_head_hexsha": "e7741bf0a8bdfb940794248a6d3247e4a5025dc4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-04-28T08:22:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-13T05:35:02.000Z", "max_forks_repo_path": "demo/demo_train_middle_fusion.py", "max_forks_repo_name": "Jamie725/RGBT-detection", "max_forks_repo_head_hexsha": "e7741bf0a8bdfb940794248a6d3247e4a5025dc4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-07-18T18:47:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-01T15:59:17.000Z", "avg_line_length": 38.7798742138, "max_line_length": 114, "alphanum_fraction": 0.7366201752, "include": true, "reason": "import numpy", "num_tokens": 1615}
|
##################################################################################
# Woldenberg a la Bonica
# This code runs a Bonica-like algorithm to provide a dynamic view of Woldenberg's
# IFE. The first run has already been saved ("posterior-samples/wold23-window-results-compress.RData")
# March 19, 2013: Add party-based priors for councilors coming in from outside
##################################################################################
library (arm)
library (MCMCpack)
library (foreign)
library (car)
library (gtools)
#library (multicore)
library (R2jags)
library (mcmcplots)
library (sm)
library (lubridate)
rm(list = ls())
workdir <- c("/home/eric/Dropbox/data/rollcall/ife_cg/ife-update/data/")
setwd(workdir)
# Define colors and plotting names
ids <- matrix(c("Woldenberg", "woldenberg", "PRI", 23,
"Barragán", "barragan", "PRD", 23,
"Cantú", "cantu", "PRD", 23,
"Cárdenas", "cardenas", "PRD", 23,
"Lujambio", "lujambio", "PAN", 23,
"Merino", "merino", "PRI", 23,
"Molinar", "molinar", "PAN", 2 ,
"Peschard", "peschard", "PRI", 23,
"Zebadúa", "zebadua", "PRD", 2 ,
"Rivera", "rivera", "PRI", 3,
"Luken", "luken", "PAN", 3),
ncol = 4,
byrow = TRUE)
#
ids <- as.data.frame(ids, stringsAsFactors = FALSE)
colnames(ids) <- c("name", "column", "pty", "tenure")
ids$tenure <- as.numeric(ids$tenure)
ids <- within(ids, party <- ifelse (pty=="PRI", 1,
ifelse (pty=="PAN", 2,
ifelse (pty=="PRD", 3,
ifelse(pty=="PVEM", 4, 5)))))
ids <- within(ids, color <- ifelse (pty=="PRI", "red",
ifelse (pty=="PAN", "blue",
ifelse (pty=="PRD", "gold",
ifelse(pty=="PVEM", "green", "orangered4")))))
# select term 2, 3 or both
sel <- grep(pattern = "[23]", ids$tenure)
name <- ids$name[sel]
party <- ids$party[sel]
color <- ids$color[sel]
column <- ids$column[sel]
## rgb.23 <- c(length=11)
## rgb.23[c(1,6,8,10)] <- rgb(1, 0, 0, 0.6) #red
## rgb.23[c(2:4,9)] <- rgb(1, 215/255, 0, 0.6) #gold
## rgb.23[c(5,7,11)] <- rgb(0, 0, 1, 0.6) #blue
###############################################################################
## Read votes (includes informative votes only, exported by code/data-prep.r ##
###############################################################################
vot <-read.csv("v23.csv", header=TRUE)
#
# subset to chosen periods
sel.r <- which(vot$term %in% 2:3)
drop.c <- ids$column[grep(pattern = "[23]", ids$tenure)] # column names not in terms 2-3
drop.c <- setdiff(ids$column, drop.c)
drop.c <- which(colnames(vot) %in% drop.c)
if (length(drop.c)>0) vot <- vot[sel.r, -drop.c]
colnames(vot)
# total members
J <- length(name)
########################
## recode vote values ##
########################
vs <- vot[,1:J]
#table(v$albo, useNA = "always")
vs[vs==0] <- NA ## Version probit requiere 0s y 1s
vs[vs>2] <- NA
vs[vs==2] <- 0
# format dates
vot$date <- ymd(vot$date)
# summarize then drop uncontested votes
table(factor(vot$dunan, labels = c("contested","not")), vot$term, useNA = "ifany")
table(factor(vot$dunan, labels = c("contested","not")), useNA = "ifany")
sel <- which(vot$dunan==1)
vot <- vot[-sel,] # drop uncontested votes
vs <- vs [-sel,] # drop uncontested votes
###########################
### WOLDENBERG ###
###########################
## MODEL
model1Dj.irt <- function() {
for (j in 1:J){ ## loop over respondents
for (i in 1:I){ ## loop over items
v[j,i] ~ dbern(p[j,i]); ## voting rule
probit(p[j,i]) <- mu[j,i]; ## sets 0<p<1 as function of mu
mu[j,i] <- signal[i]*x[j] - difficulty[i]; ## utility differential
}
}
## priors ################
for (j in 1:J){
x[j] ~ dnorm (x.mean[j], x.tau[j]);
}
for (i in 1:I){
signal[i] ~ dnorm(0, 0.1);
difficulty[i] ~ dnorm(0, 0.25);
}
for (p in 1:3){
partyPos[p] <- mean (x[party[p]]); # 4mar21: should be median, unknown function in bugs?
}
}
#end model##############
# Center on vote (for date), extend windows to both sides
I <- nrow(vot)
item <- 1:I
inicio <- item-15; inicio[inicio<0] <- 0
final <- item+15; final[final>I] <- I
item.date <- vot$date
S <- length(inicio)
# Added March 19: We need a matrix showing whether each councilor is actually in IFE the moment the vote takes place
IsCouncilor <- matrix (1, ncol=J, nrow=S)
IsCouncilor[item.date > ymd(20001114), c(7,9)] <- NA # Last Molinar, Zebadua vote
IsCouncilor[item.date < ymd(20010130), c(10,11)] <- NA # First Rivera, Luken vote
# Initial ideal points to anchor ideological space
x.location <- c(1,2,0,-2,rep(0,7))
x.precision <- c(4,4,1,4,rep(1,7))
window.results <- list () ## ADD SESSION'S RESULTS TO OBJECT HOLDING ALL RESULTS
partyPlacement <- rep (NA,11)
x.mean <- numeric ()
x.tau <- numeric ()
s <- 1
## Save overall totals for use later (I J redefined to window s totals in next loop)
J.all <- J; I.all <- I
for (s in 1:S){ # <= BIG FUNCTION STARTS (loop over 552 windows)
# Added March 19: We include councilors (and their party IDs) only if they were actual councilors for at least one vote
# This means that the length of estimated ideal points is either
# 9 (for most votes) or 11 (when there is some overlap: two councilors are leaving , two are coming in)
councilor.in <- apply (IsCouncilor[inicio[s]:final[s],], 2, invalid)
councilors <- name [councilor.in==FALSE]
sponsors <- party[councilor.in==FALSE]
for (c in 1:J.all){
x.mean[c] <- ifelse (!is.na(x.location[c]), x.location[c], partyPlacement[sponsors[c]])
x.tau[c] <- ifelse (!is.na(x.precision[c]), x.precision[c], 4)
}
v <- vs[inicio[s]:final[s], 1:J.all][, councilor.in==FALSE]; ## EXTRACT 30 VOTES EACH TIME
v <- t(v) ## ROLL CALLS NEED ITEMS IN COLUMNS, LEGISLATORS IN ROWS
J <- nrow(v); I <- ncol(v) ## SESSION TOTALS
ife.data <- list ("J", "I", "v", "x.mean", "x.tau", "party")
ife.inits <- function (){
list (
x=rnorm(J),
signal=rnorm(I),
difficulty=rnorm(I)
)
}
ife.parameters <- c("x", "signal", "difficulty", "partyPos")
print(cat("Session no.", s, "of", S, ", with", I, "votes \n"))
#full JAGS run
start.time <- proc.time()
# Use dual core capabilities
results <-
# mclapply(1:2, function(x) {
# model.jags.re <- try(
jags (data=ife.data, inits=ife.inits, ife.parameters,
model.file=model1Dj.irt, n.chains=1,
# model.file=model1Dj.irt, n.chains=2,
# n.iter=600, n.burnin=300, n.thin=30)
n.iter=20000, n.burnin=10000, n.thin=100)
# )
# if(inherits(model.jags.re,"try-error")) {return()}
# return(model.jags.re)
# }, mc.cores=2 )
time.elapsed <- round(((proc.time()-start.time)[3])/60,2); rm(start.time)
print(cat("\tTime elapsed in estimation:", time.elapsed, "minutes", "\n")); rm(time.elapsed)
# ADD COUNCILOR NAMES AND VOTE INFO TO RESULTS OBJECT
results <- c(results, councilors=list(councilors)); # should be faster than results[[length(results)+1]] <- councilors;
results <- c(results, folio.date=list(vot[s,c("folio","dy","mo","yr")])); # add vote on which window is centered
window.results <- c(window.results, list(results)); # should be faster than window.results[length(window.results)+1] <- list(results) ## ADD SESSION'S RESULTS TO OBJECT HOLDING ALL RESULTS
# Update location of ideal point at time s, to be used as location prior at time s+1
x.location <- rep (NA, J.all)
x.precision <- rep (100, J.all)
# locs <- apply( rbind (results[[1]]$BUGSoutput$sims.list$x, results[[2]]$BUGSoutput$sims.list$x), 2, median)
# partyPlacement <- apply( rbind (results[[1]]$BUGSoutput$sims.list$partyPos, results[[2]]$BUGSoutput$sims.list$partyPos), 2, median)
locs <- apply( results$BUGSoutput$sims.list$x, 2, median)
partyPlacement <- apply( results$BUGSoutput$sims.list$partyPos, 2, median)
for (n in 1:J.all){
if (length( which(councilors==name[n]) )==0) { # if councilor not member current round
x.location[n] <- NA # then prior for next round set to NA
x.precision[n] <- NA # (and line above sets it to party placement)
}
else { x.location[n] <- locs[which (councilors==name[n])] } # councilor's prior for next round is current x
}
# Precision prior is always constant at 100, implying standard deviation = sqrt (1/100) = 0.1
} # <--- END OF LOOP OVER WINDOWS
## Restore overall totals
J <- J.all; I <- I.all; rm(J.all, I.all)
# rename object with posterior sims
window.results.23 <- window.results
ls()
rm(window.results)
# clean
ls()
rm(c, s, n, i, v, sel, ife.inits, ife.parameters, ife.data)
rm(councilors, sponsors, inicio, final, councilor.in)
rm(x.location, x.mean, x.precision, x.tau, item, results, item.date)
rm(color, column, locs, name, party, partyPlacement)
# save
summary(window.results)
summary(window.results[[231]]) # 9 members
summary(window.results[[232]]) # 11 members, overlap
save.image(file = "posterior-samples/wold23-window-results-compress.RData", compress = "xz")
#save(window.results.23, file = "posterior-samples/wold23-window-results-compress.RData")
x
# Save semester.results, containing all chains from all runs
# save (semester.results, file="DynWoldenbergBonicaMarch19.RData")
# save (semester.results, file="DynWoldenbergBonica.RData")
# RData file with runs carried out in Mexico, early March
# load ("DynWoldenbergBonica.RData")
# RData file with runs carried out in Wash U, March 19
# These runs omit non-sitting Councilors and party precisions for new Councilors
load ("DynWoldenbergBonicaMarch19.RData")
S <- length(semester.results)
multiGelman.hat <- numeric ()
for (i in 1:S){
chainsConv <- mcmc.list(list (as.mcmc (semester.results[[i]][[2]]$BUGSoutput$sims.list$x), as.mcmc (semester.results[[i]][[1]]$BUGSoutput$sims.list$x)))
tmp <- gelman.diag (chainsConv)[[2]]
multiGelman.hat <- c(multiGelman.hat, tmp)
}
summary (multiGelman.hat)
rm (tmp, chainsConv)
CouncilorIn <- matrix (1, nrow=11, ncol=S)
#CouncilorIn[c(7,9), all23$date[-c(1:29)] > 20001114] <- NA # Last Molinar, Zebadua vote
#CouncilorIn[c(10,11),all23$date[-c(1:29)] < 20010130] <- NA # First Rivera, Luken vote
CouncilorIn[c(7,9), item.date > ymd(20001114)] <- NA # Last Molinar, Zebadua vote
CouncilorIn[c(10,11), item.date < ymd(20010130)] <- NA # First Rivera, Luken vote
# If using DynWoldenbergBonica.RData, use the following code to extract ideal points
ideal.points <- matrix (NA, nrow=S, ncol=11)
for (i in 1:S){
ideal.points[i,] <- apply (rbind (semester.results[[i]][[1]]$BUGSoutput$sims.list$x, semester.results[[i]][[2]]$BUGSoutput$sims.list$x), 2, median)
}
# If using DynWodenbergBonicaMarch19.RData, use the following code to extract ideal points
ideal.points <- matrix (NA, nrow=S, ncol=11)
ideal.points.var <- matrix (NA, nrow=S, ncol=11)
for (i in 1:S){
for (j in 1:11){
councilor <- name[j]
num <- which (semester.results[[i]][[3]]==councilor)
if ( length (num)==0 ) {
ideal.points[i,j] <- 1
ideal.points.var[i,j] <- 0
} else {
ideal.points[i,j] <- median (c (semester.results[[i]][[1]]$BUGSoutput$sims.list$x[,num], semester.results[[i]][[2]]$BUGSoutput$sims.list$x[,num]))
ideal.points.var[i,j] <- var (c (semester.results[[i]][[1]]$BUGSoutput$sims.list$x[,num], semester.results[[i]][[2]]$BUGSoutput$sims.list$x[,num]))
}
}
}
# Get SDs of estimates, the width should be useful to plot thickness of data points
ideal.points.var <- sqrt (ideal.points.var)
# Non-smoothed ideal point time-paths
plot(c(1:S), ideal.points[1:S,1], main="", ylim=c(-3,3), type="n", xlab="", ylab="Ideal points")
for (j in 1:11){
lines(CouncilorIn[j,1:S] * ideal.points[1:S,j], lwd=4, col=color.23[j])
}
# Item indices closest to federal elections
fedEls.items <- c(
min(which(abs(item.date-ymd(19970706))==min(abs(item.date-ymd(19970706))))),
min(which(abs(item.date-ymd(20000702))==min(abs(item.date-ymd(20000702))))),
min(which(abs(item.date-ymd(20030706))==min(abs(item.date-ymd(20030706)))))
#min(which(abs(item.date-ymd(20060702))==min(abs(item.date-ymd(20060702))))),
#min(which(abs(item.date-ymd(20090705))==min(abs(item.date-ymd(20090705))))),
#min(which(abs(item.date-ymd(20120701))==min(abs(item.date-ymd(20120701)))))
)
# Item indices closest to New Years
newYear.items <- c(
min(which(abs(item.date-ymd(19970101))==min(abs(item.date-ymd(19970101))))),
min(which(abs(item.date-ymd(19980101))==min(abs(item.date-ymd(19980101))))),
min(which(abs(item.date-ymd(19990101))==min(abs(item.date-ymd(19990101))))),
min(which(abs(item.date-ymd(20000101))==min(abs(item.date-ymd(20000101))))),
min(which(abs(item.date-ymd(20010101))==min(abs(item.date-ymd(20010101))))),
min(which(abs(item.date-ymd(20020101))==min(abs(item.date-ymd(20020101))))),
min(which(abs(item.date-ymd(20030101))==min(abs(item.date-ymd(20030101)))))
## min(which(abs(item.date-ymd(20040101))==min(abs(item.date-ymd(20040101))))),
## min(which(abs(item.date-ymd(20050101))==min(abs(item.date-ymd(20050101))))),
## min(which(abs(item.date-ymd(20060101))==min(abs(item.date-ymd(20060101))))),
## min(which(abs(item.date-ymd(20070101))==min(abs(item.date-ymd(20070101))))),
## min(which(abs(item.date-ymd(20080101))==min(abs(item.date-ymd(20080101))))),
## min(which(abs(item.date-ymd(20090101))==min(abs(item.date-ymd(20090101))))),
## min(which(abs(item.date-ymd(20100101))==min(abs(item.date-ymd(20100101))))),
## min(which(abs(item.date-ymd(20110101))==min(abs(item.date-ymd(20110101))))),
## min(which(abs(item.date-ymd(20120101))==min(abs(item.date-ymd(20120101)))))
)
# Item indices for entry/exit from council
inNout <- matrix (NA, nrow=11, ncol=S)
for (j in 1:11){
inNout[j,min(which(CouncilorIn[j,]==1))] <- min(which(CouncilorIn[j,]==1))
inNout[j,max(which(CouncilorIn[j,]==1))] <- max(which(CouncilorIn[j,]==1))
}
# Function captures smooth.spline ideal point coordinates: Smooth[[j]]$x[1:s], Smooth[[j]]$y[1:s] give vote s's
Smooth <- list ()
for (j in 1:11){
# Smooth[[j]] <- smooth.spline(c(1:S), ideal.points[,j], df=10)
Smooth[[j]] <- smooth.spline(c(1:S)[!is.na(CouncilorIn[j,1:S])], ideal.points[!is.na(CouncilorIn[j,1:S]),j], df=10)
}
for (j in 1:11){
Smooth[[j]]$y[is.na(CouncilorIn[j,1:S])] <- NA
}
# Smoothed ideal point time-paths
err <- ideal.points.var*2/max(ideal.points.var) ## estimate error
spaghetti.graph <- function(progress=S){
#pdf(paste(graphdir,"woldBonicaSmoothError.pdf",sep=""), width=7, height=7)
plot(c(1:S), ideal.points[1:S,1], main="", ylim=c(-3,3), type="n", xlab="Divided vote", ylab="Ideal points")
axis(3, at=newYear.items, labels = 1997:2003, cex.axis=.6)
abline(v=fedEls.items, lty=3, col="grey50")
text(rep(fedEls.items, times=2), c(rep(3.1,times=3),rep(2.925,times=3)), c("midterm","presidential","midterm",rep("election",3)), adj=0, cex=.65, pos=1, col="grey30")
for (j in 1:11){
points(1:progress, Smooth[[j]]$y[1:progress], cex=.3, col=color.23[j])
# points(1:progress, Smooth[[j]]$y[1:progress], cex=err[,j], col=color.23[j])
# lines(1:progress, Smooth[[j]]$y[1:progress], lwd=3, col=color.23[j])
# points(inNout[j,], Smooth[[j]]$y, pch=19, col=color.23[j])
}
text(-15,Smooth[[2]]$y[1]+.15,c("Barragán"), pos=4, adj=0, cex=.75, col="black")
text(-15,Smooth[[1]]$y[1]+.4,c("Woldenberg"), pos=4, adj=0, cex=.75, col="black")
text(-15,Smooth[[1]]$y[1]+.25,c("Peschard"), pos=4, adj=0, cex=.75, col="black")
text(-15,Smooth[[1]]$y[1]+.1,c("Merino"), pos=4, adj=0, cex=.75, col="black")
text(-15,Smooth[[7]]$y[30],c("Molinar"), pos=4, adj=0, cex=.75, col="black")
text(-15,Smooth[[9]]$y[15],c("Zebadúa"), pos=4, adj=0, cex=.75, col="black")
text(-15,Smooth[[5]]$y[1]-.25,c("Lujambio"), pos=4, adj=0, cex=.75, col="black")
text(-15,Smooth[[5]]$y[1]-.4,c("Cantú"), pos=4, adj=0, cex=.75, col="black")
text(-15,Smooth[[4]]$y[70],c("Cárdenas"), pos=4, adj=0, cex=.75, col="black")
text(233-15,Smooth[[10]]$y[233]-.125,c("Rivera"), pos=4, adj=0, cex=.75, col="black")
text(233-15,Smooth[[11]]$y[233]+.075,c("Luken"), pos=4, adj=0, cex=.75, col="black")
#dev.off()
}
spaghetti.graph()
# Smoothed ideal point time-paths, daily change movie
snapshot <- seq (3, 553, by=5)
for (s in snapshot){
which.s <- which (snapshot==s)
if (which.s < 10) {name = paste('Wolden00', which.s,'plot.png',sep='')}
if (which.s >= 10 && which.s < 100) {name = paste('Wolden0', which.s,'plot.png', sep='')}
if (which.s >= 100) {name = paste('Wolden', which.s,'plot.png', sep='')}
jpeg (paste(graphdir, "animBits/", name, sep=""), quality=100, height=500, width=500)
spaghetti.graph(s)
## plot(c(1:S), ideal.points[1:S,1], main="", ylim=c(-3,3), type="n", xlab="", ylab="Ideal points")
## for (j in 1:11){
## lines ( Smooth[[j]]$x[1:s], Smooth[[j]]$y[1:s], lwd=6, col=color.23[j])
## }
legend ("bottomright", bty="n", xjust=0, legend=paste ("vote date:", item.date[s], sep=" ")) #Change the legend for the date of the vote
dev.off()
}
# Make a short film
# The number after loop controls the number of automatic replays (0 stands for infinite loop)
# The number after delay determines length of transition between slides)
setwd(paste(graphdir,"animBits/", sep=""))
system ("convert -loop 1 -delay 20 *.png IFEwoldenTheMovie.gif")
setwd(workdir)
###########################
# Regression analysis
# Identify first Sunday in July for election years
election.dates <- c(19970706,20000702,20030706,20060702,20090705,20120701)
ymd (20130308) - ymd (20130307)
Dates <- matrix (NA, nrow=length(all23$date), ncol=length(election.dates))
for (i in 1:6){
for (j in 1:length(all23$date)){
Dates[j,i] <- ymd (election.dates[i]) - ymd (all23$date[j])
}
}
Date2NextElection <- numeric ()
for (i in 1:length(all23$date)){
Date2NextElection[i] <- min (Dates[i,][Dates[i,]>0])
}
rm (Dates)
Date2NextElection <- Date2NextElection[-c(1:29)]
Ideal.Points <- matrix (NA, ncol=ncol(ideal.points), nrow=nrow(ideal.points))
for (j in 1:ncol(ideal.points)){
Ideal.Points[,j] <- CouncilorIn[j,] * ideal.points[,j]
}
WD.PRI <- WD.PRD <- WD.PAN <- numeric ()
for (i in 1:nrow(Ideal.Points)){
WD.PRI[i] <- max(Ideal.Points[i,color.23=="red"], na.rm=T) - min(Ideal.Points[i,color.23=="red"], na.rm=T)
WD.PRD[i] <- max(Ideal.Points[i,color.23=="gold"], na.rm=T) - min(Ideal.Points[i,color.23=="gold"], na.rm=T)
WD.PAN[i] <- max(Ideal.Points[i,color.23=="blue"], na.rm=T) - min(Ideal.Points[i,color.23=="blue"], na.rm=T)
}
AV.PRI <- AV.PRD <- AV.PAN <- numeric ()
for (i in 1:nrow(Ideal.Points)){
AV.PRI[i] <- mean(Ideal.Points[i,color.23=="red"], na.rm=T)
AV.PRD[i] <- mean(Ideal.Points[i,color.23=="gold"], na.rm=T)
AV.PAN[i] <- mean(Ideal.Points[i,color.23=="blue"], na.rm=T)
}
PAN.PRD <- PRD.PRI <- PRI.PAN <- numeric ()
for (i in 1:nrow(Ideal.Points)){
PAN.PRD[i] <- AV.PAN[i] - AV.PRD[i]
PRD.PRI[i] <- AV.PRD[i] - AV.PRI[i]
PRI.PAN[i] <- AV.PRI[i] - AV.PAN[i]
}
mod <- lm (PRI.PAN ~ Date2NextElection + I(Date2NextElection^2))
plot (PRI.PAN, type="l", lwd=2)
abline (v=c(163,493))
mod <- lm (PAN.PRD ~ Date2NextElection + I(Date2NextElection^2))
plot (PAN.PRD, type="l", lwd=2)
abline (v=c(163,493))
mod <- lm (PRD.PRI ~ Date2NextElection + I(Date2NextElection^2))
plot (PRD.PRI, type="l", lwd=2)
abline (v=c(163,493))
|
{"hexsha": "ff2f73f984106fc8d71fac239c54160cc75b960e", "size": 19677, "ext": "r", "lang": "R", "max_stars_repo_path": "code/ifeJagsDynWoldenbergBonica.r", "max_stars_repo_name": "grosasballina/ife-update", "max_stars_repo_head_hexsha": "174e3bfdffa6e84bff9fe70defe1e8afff05c7d6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/ifeJagsDynWoldenbergBonica.r", "max_issues_repo_name": "grosasballina/ife-update", "max_issues_repo_head_hexsha": "174e3bfdffa6e84bff9fe70defe1e8afff05c7d6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/ifeJagsDynWoldenbergBonica.r", "max_forks_repo_name": "grosasballina/ife-update", "max_forks_repo_head_hexsha": "174e3bfdffa6e84bff9fe70defe1e8afff05c7d6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-03-02T19:26:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-08T13:31:15.000Z", "avg_line_length": 41.600422833, "max_line_length": 196, "alphanum_fraction": 0.6153885247, "num_tokens": 6439}
|
import numpy as np
import pytest
from napari.components.layerlist import LayerList
from napari.layers import Image, Points
@pytest.fixture
def layer_list():
return LayerList()
@pytest.fixture
def points_layer():
return Points()
@pytest.fixture
def image_layer():
data = np.ones((10, 10))
data[::2, ::2] = 0
return Image(data)
|
{"hexsha": "b8f0f3efa562b00339650897422361c6530fc0a9", "size": 353, "ext": "py", "lang": "Python", "max_stars_repo_path": "napari/utils/context/_tests/conftest.py", "max_stars_repo_name": "Napari/napari", "max_stars_repo_head_hexsha": "2dc5aa659f875c353bfbde3b20d8f07a664ed8a8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-07-03T17:35:46.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-07T15:48:58.000Z", "max_issues_repo_path": "napari/utils/context/_tests/conftest.py", "max_issues_repo_name": "Napari/napari", "max_issues_repo_head_hexsha": "2dc5aa659f875c353bfbde3b20d8f07a664ed8a8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2018-06-03T17:17:03.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-23T18:45:05.000Z", "max_forks_repo_path": "napari/utils/context/_tests/conftest.py", "max_forks_repo_name": "Napari/napari", "max_forks_repo_head_hexsha": "2dc5aa659f875c353bfbde3b20d8f07a664ed8a8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-06-03T15:04:32.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-09T19:11:18.000Z", "avg_line_length": 15.347826087, "max_line_length": 49, "alphanum_fraction": 0.7025495751, "include": true, "reason": "import numpy", "num_tokens": 86}
|
import matplotlib
matplotlib.use('gtk')
import matplotlib.pyplot as plt
import numpy as np
import gdfmm
import skimage.io
import pdb
rgb = skimage.io.imread('/home/daniel/nyu_label/rgb/r-1294439283.377657-2381571548.png')
dep = skimage.io.imread('/home/daniel/nyu_label/rawdepth/r-1294439283.377657-2381571548.png')
dep = np.asarray(dep, dtype=np.uint16)
pdb.set_trace()
X = gdfmm.InpaintDepth(dep, rgb)
plt.imshow(X)
plt.show()
|
{"hexsha": "b074203152da53b9fe6070d33864b8305ca5663b", "size": 433, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/testgdfmm.py", "max_stars_repo_name": "xkjyeah/gifmm", "max_stars_repo_head_hexsha": "78ceeb08136abf902c37260df2366d9e85d156f1", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/testgdfmm.py", "max_issues_repo_name": "xkjyeah/gifmm", "max_issues_repo_head_hexsha": "78ceeb08136abf902c37260df2366d9e85d156f1", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/testgdfmm.py", "max_forks_repo_name": "xkjyeah/gifmm", "max_forks_repo_head_hexsha": "78ceeb08136abf902c37260df2366d9e85d156f1", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.7894736842, "max_line_length": 93, "alphanum_fraction": 0.7736720554, "include": true, "reason": "import numpy", "num_tokens": 127}
|
using CrystallographyBase: Lattice
using LinearAlgebra: I
export distortby, distort, strainstate
# See https://link.springer.com/content/pdf/10.1007%2F978-3-7091-0382-1_7.pdf and https://doi.org/10.2138/am-1997-1-207
distortby(lattice::Lattice, strain::TensorStrain) =
Lattice((I + strain.data) * lattice.data)
distortby(lattice::Lattice, strain::EngineeringStrain) =
distortby(lattice, TensorStrain(strain))
const distort = distortby # For the sake of compatibility
strainstate(old::Lattice, new::Lattice) = TensorStrain(new.data / old.data - I)
|
{"hexsha": "1990bb61cac7b78083d71384df354f2fe732af3e", "size": 559, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/distort.jl", "max_stars_repo_name": "MineralsCloud/LinearElasticity.jl", "max_stars_repo_head_hexsha": "351d7df008063d9445193cc1038840ac64c9b8bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-01T10:40:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-01T10:40:00.000Z", "max_issues_repo_path": "src/distort.jl", "max_issues_repo_name": "MineralsCloud/LinearElasticity.jl", "max_issues_repo_head_hexsha": "351d7df008063d9445193cc1038840ac64c9b8bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-08-10T00:12:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-01T00:30:44.000Z", "max_forks_repo_path": "src/distort.jl", "max_forks_repo_name": "MineralsCloud/LinearElasticity.jl", "max_forks_repo_head_hexsha": "351d7df008063d9445193cc1038840ac64c9b8bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9285714286, "max_line_length": 119, "alphanum_fraction": 0.7620751342, "num_tokens": 171}
|
# Title : SVM - draw the diagrammatic sketch
# Objective : draw the diagrammatic sketch for SVM. Note that it is not the code for SVM
# Created by: Wu Shangbin
# Created on: 2021/12/8
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
sampleNo = 20
# 二维正态分布
gaussian1 = np.random.normal(loc=[1,1], scale=0.4, size=(sampleNo,2))
gaussian2 = np.random.normal(loc=[-1,-1], scale=0.4, size=(sampleNo,2))
data_x = np.concatenate((gaussian1, gaussian2))
data_y = [0 for i in range(sampleNo)] + [1 for i in range(sampleNo)]
data_y = np.array(data_y)
scaler = StandardScaler()
data_x = scaler.fit_transform(data_x)
# --
svc = LinearSVC(C=1.0)
model = svc.fit(data_x, data_y)
# --
w = svc.coef_[0]
def draw_line(w0, w1, intercept_, **args):
"""
根据权重绘制散点图"""
k = -w[0] / w[1]
xx = np.linspace(-2, 2)
yy = k*xx - (intercept_) / w[1]
plt.plot(xx, yy, **args)
svm_w0, svm_w1, svm_intercept = svc.coef_[0][0], svc.coef_[0][1], svc.intercept_[0]
draw_line(svm_w0, svm_w1, svm_intercept, linestyle = '-', color = 'blue', label="Separating Hyperplane with max margin")
draw_line(svm_w0, svm_w1, svm_intercept-0.5, linestyle = '--', color = 'blue',)
draw_line(svm_w0, svm_w1, svm_intercept+0.5, linestyle = '--', color = 'blue', label="Separating Hyperplane")
# 绘制散点图
plt.plot(gaussian1[:,0],gaussian1[:,1],'+', color='r', )
plt.plot(gaussian2[:,0],gaussian2[:,1],'o', color='g', )
plt.legend()
plt.show()
|
{"hexsha": "cfa6107bed80aed481ef53a382cd846f597189e0", "size": 1507, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/SVM_draw_diagrammatic_sketch.py", "max_stars_repo_name": "595666666/tripping", "max_stars_repo_head_hexsha": "f448300c31de96089b855ee9774068d748b0cd31", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-04T06:07:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T16:03:16.000Z", "max_issues_repo_path": "Python/SVM_draw_diagrammatic_sketch.py", "max_issues_repo_name": "595666666/tripping", "max_issues_repo_head_hexsha": "f448300c31de96089b855ee9774068d748b0cd31", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/SVM_draw_diagrammatic_sketch.py", "max_forks_repo_name": "595666666/tripping", "max_forks_repo_head_hexsha": "f448300c31de96089b855ee9774068d748b0cd31", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0465116279, "max_line_length": 120, "alphanum_fraction": 0.6794956868, "include": true, "reason": "import numpy", "num_tokens": 510}
|
(** * Induced functors between comma categories *)
Require Import Category.Core Functor.Core NaturalTransformation.Core.
Require Import Category.Dual.
Require Import Category.Prod.
Require Import NaturalTransformation.Identity.
Require Import FunctorCategory.Core Cat.Core.
Require Import InitialTerminalCategory.Core InitialTerminalCategory.Functors.
Require Comma.Core.
Local Set Warnings Append "-notation-overridden". (* work around bug #5567, https://coq.inria.fr/bugs/show_bug.cgi?id=5567, notation-overridden,parsing should not trigger for only printing notations *)
Import Comma.Core.
Local Set Warnings Append "notation-overridden".
Require Import Comma.Projection.
Require Import Types.Prod HoTT.Tactics Types.Unit.
Set Universe Polymorphism.
Set Implicit Arguments.
Generalizable All Variables.
Set Asymmetric Patterns.
Local Open Scope functor_scope.
Local Open Scope category_scope.
(** ** Morphisms in [(A → C)ᵒᵖ × (B → C)] from [(s₀, s₁)] to [(d₀, d₁)] induce functors [(s₀ / s₁) → (d₀ / d₁)] *)
Section comma_category_induced_functor.
Context `{Funext}.
Variables A B C : PreCategory.
Definition comma_category_induced_functor_object_of s d
(m : morphism ((A -> C)^op * (B -> C)) s d)
(x : fst s / snd s)
: (fst d / snd d)
:= CommaCategory.Build_object
(fst d) (snd d)
(CommaCategory.a x)
(CommaCategory.b x)
((snd m) (CommaCategory.b x) o CommaCategory.f x o (fst m) (CommaCategory.a x)).
Lemma comma_category_induced_functor_object_of_identity s x
: comma_category_induced_functor_object_of (Category.Core.identity s) x
= x.
Proof.
let x1 := match goal with |- ?x1 = ?x2 => constr:(x1) end in
let x2 := match goal with |- ?x1 = ?x2 => constr:(x2) end in
apply (CommaCategory.path_object' x1 x2 idpath idpath).
simpl.
abstract (rewrite ?left_identity, ?right_identity; reflexivity).
Defined.
Definition comma_category_induced_functor_object_of_compose s d d'
(m : morphism ((A -> C)^op * (B -> C)) d d')
(m' : morphism ((A -> C)^op * (B -> C)) s d)
x
: comma_category_induced_functor_object_of (m o m') x
= comma_category_induced_functor_object_of
m
(comma_category_induced_functor_object_of m' x).
Proof.
let x1 := match goal with |- ?x1 = ?x2 => constr:(x1) end in
let x2 := match goal with |- ?x1 = ?x2 => constr:(x2) end in
apply (CommaCategory.path_object' x1 x2 idpath idpath).
abstract (
destruct m', m, x;
simpl in *;
rewrite !associativity;
reflexivity
).
Defined.
Definition comma_category_induced_functor_morphism_of s d m s0 d0
(m0 : morphism (fst s / snd s) s0 d0)
: morphism (fst d / snd d)
(@comma_category_induced_functor_object_of s d m s0)
(@comma_category_induced_functor_object_of s d m d0).
Proof.
simpl.
let s := match goal with |- CommaCategory.morphism ?s ?d => constr:(s) end in
let d := match goal with |- CommaCategory.morphism ?s ?d => constr:(d) end in
refine (CommaCategory.Build_morphism s d (CommaCategory.g m0) (CommaCategory.h m0) _);
simpl in *; clear.
abstract (
destruct_head prod;
destruct_head CommaCategory.morphism;
destruct_head CommaCategory.object;
simpl in *;
repeat (try_associativity_quick (rewrite <- !commutes || (progress f_ap)));
repeat (try_associativity_quick (rewrite !commutes || (progress f_ap)));
assumption
). (* 3.495 s *)
Defined.
Definition comma_category_induced_functor s d
(m : morphism ((A -> C)^op * (B -> C)) s d)
: Functor (fst s / snd s) (fst d / snd d).
Proof.
refine (Build_Functor (fst s / snd s) (fst d / snd d)
(@comma_category_induced_functor_object_of s d m)
(@comma_category_induced_functor_morphism_of s d m)
_
_
);
abstract (
intros; apply CommaCategory.path_morphism; reflexivity
).
Defined.
End comma_category_induced_functor.
(** ** Morphisms in [C] from [a] to [a'] induce functors [(C / a) → (C / a')] *)
Section slice_category_induced_functor.
Context `{Funext}.
Variable C : PreCategory.
Section slice_coslice.
Variable D : PreCategory.
(** TODO(JasonGross): See if this can be recast as an exponential law functor about how [1 → Cat] is isomorphic to [Cat], or something *)
Definition slice_category_induced_functor_nt s d (m : morphism D s d)
: NaturalTransformation !s !d.
Proof.
exists (fun _ : Unit => m);
simpl; intros; clear;
abstract (autorewrite with category; reflexivity).
Defined.
Variable F : Functor C D.
Variable a : D.
Section slice.
Definition slice_category_induced_functor F' a'
(m : morphism D a a')
(T : NaturalTransformation F' F)
: Functor (F / a) (F' / a')
:= comma_category_induced_functor
(s := (F, !a))
(d := (F', !a'))
(T, @slice_category_induced_functor_nt a a' m).
Definition slice_category_nt_induced_functor F' T
:= @slice_category_induced_functor F' a 1 T.
Definition slice_category_morphism_induced_functor a' m
:= @slice_category_induced_functor F a' m 1.
End slice.
Section coslice.
Definition coslice_category_induced_functor F' a'
(m : morphism D a' a)
(T : NaturalTransformation F F')
: Functor (a / F) (a' / F')
:= comma_category_induced_functor
(s := (!a, F))
(d := (!a', F'))
(@slice_category_induced_functor_nt a' a m, T).
Definition coslice_category_nt_induced_functor F' T
:= @coslice_category_induced_functor F' a 1 T.
Definition coslice_category_morphism_induced_functor a' m
:= @coslice_category_induced_functor F a' m 1.
End coslice.
End slice_coslice.
Definition slice_category_over_induced_functor a a' (m : morphism C a a')
: Functor (C / a) (C / a')
:= Eval hnf in slice_category_morphism_induced_functor _ _ _ m.
Definition coslice_category_over_induced_functor a a' (m : morphism C a' a)
: Functor (a \ C) (a' \ C)
:= Eval hnf in coslice_category_morphism_induced_functor _ _ _ m.
End slice_category_induced_functor.
(** ** Functors [A → A'] functors [(cat / A) → (cat / A')] *)
Section cat_over_induced_functor.
Context `{Funext}.
Variable P : PreCategory -> Type.
Context `{H0 : forall C D, P C -> P D -> IsHSet (Functor C D)}.
Local Notation cat := (@sub_pre_cat _ P H0).
Definition cat_over_induced_functor a a' (m : morphism cat a a')
: Functor (cat / a) (cat / a')
:= slice_category_over_induced_functor cat a a' m.
Definition over_cat_induced_functor a a' (m : morphism cat a' a)
: Functor (a \ cat) (a' \ cat)
:= coslice_category_over_induced_functor cat a a' m.
End cat_over_induced_functor.
|
{"author": "CPP21-Universal-Algebra-in-HoTT", "repo": "Universal-Algebra-in-HoTT", "sha": "7228b5b88684abff3c26a7eed07e1222b04fd8de", "save_path": "github-repos/coq/CPP21-Universal-Algebra-in-HoTT-Universal-Algebra-in-HoTT", "path": "github-repos/coq/CPP21-Universal-Algebra-in-HoTT-Universal-Algebra-in-HoTT/Universal-Algebra-in-HoTT-7228b5b88684abff3c26a7eed07e1222b04fd8de/theories/Categories/Comma/InducedFunctors.v"}
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import config
from .pde_base import PDE
from ..parameter import Parameter, is_parameter
import sympy
import numpy as np
__all__ = ['NavierStokes']
class NavierStokes(PDE):
"""
Navier-Stokes equation
.. math::
:nowrap:
Time-independent Navier-Stokes Equation
\\begin{eqnarray*}
&& \\frac{\\partial u}{\\partial x} + \\frac{\\partial v}{\\partial y} + \\frac{\\partial w}{\\partial z} = 0, \\\\
&& u \\frac{\\partial u}{\\partial x} + v \\frac{\partial u}{\\partial y} + w \\frac{\partial u}{\\partial z} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 u}{\\partial x^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 u}{\\partial z^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 u}{\\partial y^2} + \\frac{\\partial p}{\\partial x} = 0,\\\\
&& u \\frac{\\partial v}{\\partial x} + v \\frac{\partial v}{\\partial y} + w \\frac{\partial v}{\\partial z} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 v}{\\partial x^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 v}{\\partial z^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 v}{\\partial y^2} + \\frac{\\partial p}{\\partial y} = 0, \\\\
&& u \\frac{\\partial w}{\\partial x} + v \\frac{\partial w}{\\partial y} + w \\frac{\partial w}{\\partial z} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 w}{\\partial x^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 w}{\\partial z^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 w}{\\partial y^2} + \\frac{\\partial p}{\\partial z} = 0.
\\end{eqnarray*}
Time-dependent Navier-Stokes equation
\\begin{eqnarray*}
&& \\frac{\\partial u}{\\partial x} + \\frac{\\partial v}{\\partial y} + \\frac{\\partial w}{\\partial z} = 0, \\\\
&& \\frac{\\partial u}{\\partial t} + u \\frac{\\partial u}{\\partial x} + v \\frac{\partial u}{\\partial y} + w \\frac{\partial u}{\\partial z} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 u}{\\partial x^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 u}{\\partial z^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 u}{\\partial y^2} + \\frac{\\partial p}{\\partial x} = 0,\\\\
&& \\frac{\\partial v}{\\partial t} + u \\frac{\\partial v}{\\partial x} + v \\frac{\partial v}{\\partial y} + w \\frac{\partial v}{\\partial z} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 v}{\\partial x^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 v}{\\partial z^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 v}{\\partial y^2} + \\frac{\\partial p}{\\partial y} = 0, \\\\
&& \\frac{\\partial w}{\\partial t} + u \\frac{\\partial w}{\\partial x} + v \\frac{\partial w}{\\partial y} + w \\frac{\partial w}{\\partial z} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 w}{\\partial x^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 w}{\\partial z^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 w}{\\partial y^2} + \\frac{\\partial p}{\\partial z} = 0.
\\end{eqnarray*}
Parameters:
nu (float): kinematic viscosity.
rho (float): density.
dim (integer): dquation's dimention. 2 and 3 are supported.
time_dependent (bool): time-dependent or time-independent.
weight (optional, float or list of float or lambda function): weight in computing equation loss. The default value is 1.0.
Example:
>>> import paddlescience as psci
>>> pde = psci.pde.NavierStokes(nu=0.01, rho=1.0, dim=2)
"""
def __init__(self,
nu=0.01,
rho=1.0,
dim=2,
time_dependent=False,
weight=None):
super(NavierStokes, self).__init__(
dim + 1, time_dependent=time_dependent, weight=weight)
# parameter list
self.nu = nu
self.rho = rho
if is_parameter(nu):
self.parameter.append(nu)
if is_parameter(rho):
self.parameter.append(rho)
self.dim = dim
if dim == 2 and time_dependent == False:
# independent variable
x = sympy.Symbol('x')
y = sympy.Symbol('y')
# dependent variable
u = sympy.Function('u')(x, y)
v = sympy.Function('v')(x, y)
p = sympy.Function('p')(x, y)
# normal direction
self.normal = sympy.Symbol('n')
# continuty equation
continuty = u.diff(x) + v.diff(y)
continuty_rhs = 0
# momentum x equation
momentum_x = u * u.diff(x) + v * u.diff(y) - nu / rho * u.diff(
x).diff(x) - nu / rho * u.diff(y).diff(y) + 1.0 / rho * p.diff(
x)
momentum_y = u * v.diff(x) + v * v.diff(y) - nu / rho * v.diff(
x).diff(x) - nu / rho * v.diff(y).diff(y) + 1.0 / rho * p.diff(
y)
momentum_x_rhs = 0
momentum_y_rhs = 0
# variables in order
self.indvar = [x, y]
self.dvar = [u, v, p]
# order
self.order = 2
# equations and rhs
self.equations = list()
self.rhs = list()
self.equations.append(continuty)
self.equations.append(momentum_x)
self.equations.append(momentum_y)
self.rhs.append(continuty_rhs)
self.rhs.append(momentum_x_rhs)
self.rhs.append(momentum_y_rhs)
elif dim == 2 and time_dependent == True:
# independent variable
t = sympy.Symbol('t')
x = sympy.Symbol('x')
y = sympy.Symbol('y')
# dependent variable
u = sympy.Function('u')(t, x, y)
v = sympy.Function('v')(t, x, y)
p = sympy.Function('p')(t, x, y)
# normal direction
self.normal = sympy.Symbol('n')
# continuty equation
continuty = u.diff(x) + v.diff(y)
continuty_rhs = 0
# momentum x equation
momentum_x = u.diff(t) + u * u.diff(x) + v * u.diff(
y) - nu / rho * u.diff(x).diff(x) - nu / rho * u.diff(y).diff(
y) + 1.0 / rho * p.diff(x)
momentum_y = v.diff(t) + u * v.diff(x) + v * v.diff(
y) - nu / rho * v.diff(x).diff(x) - nu / rho * v.diff(y).diff(
y) + 1.0 / rho * p.diff(y)
momentum_x_rhs = 0
momentum_y_rhs = 0
# variables in order
self.indvar = [t, x, y]
self.dvar = [u, v, p]
# order
self.order = 2
# equations and rhs
self.equations = list()
self.rhs = list()
self.equations.append(continuty)
self.equations.append(momentum_x)
self.equations.append(momentum_y)
self.rhs.append(continuty_rhs)
self.rhs.append(momentum_x_rhs)
self.rhs.append(momentum_y_rhs)
elif dim == 3 and time_dependent == False:
# independent variable
x = sympy.Symbol('x')
y = sympy.Symbol('y')
z = sympy.Symbol('z')
# dependent variable
u = sympy.Function('u')(x, y, z)
v = sympy.Function('v')(x, y, z)
w = sympy.Function('w')(x, y, z)
p = sympy.Function('p')(x, y, z)
# normal direction
self.normal = sympy.Symbol('n')
# continuty equation
continuty = u.diff(x) + v.diff(y) + w.diff(z)
continuty_rhs = 0
# momentum x equation
momentum_x = u * u.diff(x) + v * u.diff(y) + w * u.diff(
z) - nu / rho * u.diff(x).diff(x) - nu / rho * u.diff(y).diff(
y) - nu / rho * u.diff(z).diff(z) + 1.0 / rho * p.diff(x)
momentum_y = u * v.diff(x) + v * v.diff(y) + w * v.diff(
z) - nu / rho * v.diff(x).diff(x) - nu / rho * v.diff(y).diff(
y) - nu / rho * v.diff(z).diff(z) + 1.0 / rho * p.diff(y)
momentum_z = u * w.diff(x) + v * w.diff(y) + w * w.diff(
z) - nu / rho * w.diff(x).diff(x) - nu / rho * w.diff(y).diff(
y) - nu / rho * w.diff(z).diff(z) + 1.0 / rho * p.diff(z)
momentum_x_rhs = 0
momentum_y_rhs = 0
momentum_z_rhs = 0
# variables in order
self.indvar = [x, y, z]
self.dvar = [u, v, w, p]
# order
self.order = 2
# equations and rhs
self.equations = list()
self.rhs = list()
self.equations.append(continuty)
self.equations.append(momentum_x)
self.equations.append(momentum_y)
self.equations.append(momentum_z)
self.rhs.append(continuty_rhs)
self.rhs.append(momentum_x_rhs)
self.rhs.append(momentum_y_rhs)
self.rhs.append(momentum_z_rhs)
elif dim == 3 and time_dependent == True:
# independent variable
t = sympy.Symbol('t')
x = sympy.Symbol('x')
y = sympy.Symbol('y')
z = sympy.Symbol('z')
# dependent variable
u = sympy.Function('u')(t, x, y, z)
v = sympy.Function('v')(t, x, y, z)
w = sympy.Function('w')(t, x, y, z)
p = sympy.Function('p')(t, x, y, z)
# normal direction
self.normal = sympy.Symbol('n')
# continuty equation
continuty = u.diff(x) + v.diff(y) + w.diff(z)
continuty_rhs = 0
# momentum x equation
momentum_x = u.diff(t) + u * u.diff(x) + v * u.diff(
y) + w * u.diff(z) - nu / rho * u.diff(x).diff(
x) - nu / rho * u.diff(y).diff(y) - nu / rho * u.diff(
z).diff(z) + 1.0 / rho * p.diff(x)
momentum_y = v.diff(t) + u * v.diff(x) + v * v.diff(
y) + w * v.diff(z) - nu / rho * v.diff(x).diff(
x) - nu / rho * v.diff(y).diff(y) - nu / rho * v.diff(
z).diff(z) + 1.0 / rho * p.diff(y)
momentum_z = w.diff(t) + u * w.diff(x) + v * w.diff(
y) + w * w.diff(z) - nu / rho * w.diff(x).diff(
x) - nu / rho * w.diff(y).diff(y) - nu / rho * w.diff(
z).diff(z) + 1.0 / rho * p.diff(z)
momentum_x_rhs = 0
momentum_y_rhs = 0
momentum_z_rhs = 0
# variables in order
self.indvar = [t, x, y, z]
self.dvar = [u, v, w, p]
# order
self.order = 2
# equations and rhs
self.equations = list()
self.rhs = list()
self.equations.append(continuty)
self.equations.append(momentum_x)
self.equations.append(momentum_y)
self.equations.append(momentum_z)
self.rhs.append(continuty_rhs)
self.rhs.append(momentum_x_rhs)
self.rhs.append(momentum_y_rhs)
self.rhs.append(momentum_z_rhs)
def time_discretize(self, time_method=None, time_step=None):
if time_method is None:
pde_disc = self
elif time_method == "implicit":
pde_disc = NavierStokesImplicit(
nu=self.nu,
rho=self.rho,
dim=self.dim,
time_step=time_step,
weight=self.weight)
else:
pass
# TODO: error out
return pde_disc
class NavierStokesImplicit(PDE):
def __init__(self, nu=0.01, rho=1.0, dim=2, time_step=None, weight=None):
super(NavierStokesImplicit, self).__init__(dim + 1, weight=weight)
self.time_dependent = True
self.time_disc_method = "implicit"
# parameter list
if is_parameter(nu):
self.parameter.append(nu)
if is_parameter(rho):
self.parameter.append(rho)
# time step
self.dt = time_step
dt = time_step
if dim == 2:
# independent variable
x = sympy.Symbol('x')
y = sympy.Symbol('y')
# dependent variable current time step: u^{n+1}, v^{n+1}, p^{n+1}
u = sympy.Function('u')(x, y)
v = sympy.Function('v')(x, y)
p = sympy.Function('p')(x, y)
# dependent variable previous time step: u^{n}, v^{n}, p^{n}
u_n = sympy.Function('u_n')(x, y)
v_n = sympy.Function('v_n')(x, y)
p_n = sympy.Function('p_n')(x, y)
# normal direction
self.normal = sympy.Symbol('n')
# continuty equation
continuty = u.diff(x) + v.diff(y)
continuty_rhs = 0
# momentum
momentum_x = u / dt - u_n / dt + u * u.diff(x) + v * u.diff(
y) - nu / rho * u.diff(x).diff(x) - nu / rho * u.diff(y).diff(
y) + 1.0 / rho * p.diff(x)
momentum_y = v / dt - v_n / dt + u * v.diff(x) + v * v.diff(
y) - nu / rho * v.diff(x).diff(x) - nu / rho * v.diff(y).diff(
y) + 1.0 / rho * p.diff(y)
momentum_x_rhs = 0
momentum_y_rhs = 0
# variables in order
self.indvar = [x, y]
self.dvar = [u, v, p]
self.dvar_n = [u_n, v_n]
# order
self.order = 2
# equations and rhs
self.equations = list()
self.rhs = list()
self.equations.append(continuty)
self.equations.append(momentum_x)
self.equations.append(momentum_y)
self.rhs.append(continuty_rhs)
self.rhs.append(momentum_x_rhs)
self.rhs.append(momentum_y_rhs)
elif dim == 3:
# independent variable
x = sympy.Symbol('x')
y = sympy.Symbol('y')
z = sympy.Symbol('z')
# dependent variable current time step: u^{n+1}, v^{n+1}, w^{n+1}, p^{n+1}
u = sympy.Function('u')(x, y, z)
v = sympy.Function('v')(x, y, z)
w = sympy.Function('w')(x, y, z)
p = sympy.Function('p')(x, y, z)
# dependent variable previous time step: u^{n}, v^{n}, w^{n}
u_n = sympy.Function('u_n')(x, y, z)
v_n = sympy.Function('v_n')(x, y, z)
w_n = sympy.Function('w_n')(x, y, z)
# normal direction
self.normal = sympy.Symbol('n')
# continuty equation
continuty = u.diff(x) + v.diff(y) + w.diff(z)
continuty_rhs = 0
# momentum
momentum_x = u / dt - u_n / dt + u * u.diff(x) + v * u.diff(
y) + w * u.diff(z) - nu / rho * u.diff(x).diff(
x) - nu / rho * u.diff(y).diff(y) - nu / rho * u.diff(
z).diff(z) + 1.0 / rho * p.diff(x)
momentum_y = v / dt - v_n / dt + u * v.diff(x) + v * v.diff(
y) + w * v.diff(z) - nu / rho * v.diff(x).diff(
x) - nu / rho * v.diff(y).diff(y) - nu / rho * v.diff(
z).diff(z) + 1.0 / rho * p.diff(y)
momentum_z = w / dt - w_n / dt + u * w.diff(x) + v * w.diff(
y) + w * w.diff(z) - nu / rho * w.diff(x).diff(
x) - nu / rho * w.diff(y).diff(y) - nu / rho * w.diff(
z).diff(z) + 1.0 / rho * p.diff(z)
momentum_x_rhs = 0
momentum_y_rhs = 0
momentum_z_rhs = 0
# variables in order
self.indvar = [x, y, z]
self.dvar = [u, v, w, p]
self.dvar_n = [u_n, v_n, w_n]
# order
self.order = 2
# equations and rhs
self.equations = list()
self.rhs = list()
self.equations.append(continuty)
self.equations.append(momentum_x)
self.equations.append(momentum_y)
self.equations.append(momentum_z)
self.rhs.append(continuty_rhs)
self.rhs.append(momentum_x_rhs)
self.rhs.append(momentum_y_rhs)
self.rhs.append(momentum_z_rhs)
|
{"hexsha": "71a69bf85d98a33ca2cadf8bf6026881a1b4fbd3", "size": 16870, "ext": "py", "lang": "Python", "max_stars_repo_path": "paddlescience/pde/pde_navier_stokes.py", "max_stars_repo_name": "Liu-xiandong/PaddleScience", "max_stars_repo_head_hexsha": "5e667a4fe6138c22e0ff54af81d83a0b7cae4572", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2021-12-12T08:53:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:07:18.000Z", "max_issues_repo_path": "paddlescience/pde/pde_navier_stokes.py", "max_issues_repo_name": "Liu-xiandong/PaddleScience", "max_issues_repo_head_hexsha": "5e667a4fe6138c22e0ff54af81d83a0b7cae4572", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-12-15T08:04:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T03:39:15.000Z", "max_forks_repo_path": "paddlescience/pde/pde_navier_stokes.py", "max_forks_repo_name": "Liu-xiandong/PaddleScience", "max_forks_repo_head_hexsha": "5e667a4fe6138c22e0ff54af81d83a0b7cae4572", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2021-12-10T06:52:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T10:23:53.000Z", "avg_line_length": 39.6009389671, "max_line_length": 381, "alphanum_fraction": 0.4872554831, "include": true, "reason": "import numpy,import sympy", "num_tokens": 4756}
|
#!/usr/bin/env python
'''Ray implementation - for general raytracing
David Dunn
Jan 2017 - created by splitting off from dGraph
ALL UNITS ARE IN METERS
ie 1 cm = .01
www.qenops.com
'''
__author__ = ('David Dunn')
__version__ = '1.6'
import numpy as np
from numpy import dot, cross
from numpy.linalg import norm
import operator
from dGraph.shapes import Shape
class Ray(object):
''' A ray shot from a point in a particular direction for calculating intersections in generating shadows and general rendering
point
direction
evaluate (intersect ray with all objects in scene and evaluate t for each)
possible tMin, tMax
'''
def __init__(self, point, vector):
self._pnt = point
self._vec = vector/norm(vector)
self._invec = 1.0/self._vec
@property
def point(self):
return self._pnt
@property
def vector(self):
return self._vec
def evaluate(self, scene):
''' intersect ray with all children objects of scene and return list of intersections
(walk the graph using bouding boxes on the transforms -
if it intersects:
evaluate children
if it is a surface:
if it intersects:
get point and t value (from Surface)
may intersect multiple times - need to do something about that
else return None)
That is pie in the sky, for now just brute force it, call the intersect method on every surface object in scene
'''
intersections = []
for obj in scene:
if isinstance(obj, Shape):
possibles = obj.intersection(self) # distance, point, normal, material
if possibles is not None:
for p in possibles:
p['object'] = obj
intersections.append(p)
return intersections
def render(self, world):
# evaluate the collisions the ray and find the closest
intersections = self.evaluate(world)
intersections.sort(key=operator.itemgetter('distance'))
# calculate color
if intersections == []:
return np.array([0,0,0])
else:
first = intersections[0]
return first['material'].render(first['point'], first['normal'], viewVector=self.vector, world=world)
@classmethod
def _calcVector(cls, basePoint, goalPoint):
''' calculate the vector that starts at basePoint and travels through goalPoint '''
return goalPoint - basePoint
def intersectBox(self, bMin, bMax):
tx1 = (bMin[0]-self._pnt[0])*self._invec[0]
tx2 = (bMax[0]-self._pnt[0])*self._invec[0]
tmin = min(tx1, tx2)
tmax = max(tx1, tx2)
ty1 = (bMin[1]-self._pnt[1])*self._invec[1]
ty2 = (bMax[1]-self._pnt[1])*self._invec[1]
tmin = max(tmin, min(ty1, ty2))
tmax = min(tmax, max(ty1, ty2))
tz1 = (bMin[2]-self._pnt[2])*self._invec[2]
tz2 = (bMax[2]-self._pnt[2])*self._invec[2]
tmin = max(tmin, min(tz1, tz2))
tmax = min(tmax, max(tz1, tz2))
return tmax >= max(0.0, tmin)
def intersectBoxOld(self, bMin, bMax):
cMin = [-float('inf'),-float('inf'),-float('inf')]
cMax = [float('inf'),float('inf'),float('inf')]
if self._vec[0] != 0:
cMin = self._vec*((bMin[0]-self._pnt[0])/self._vec[0]) + self._pnt # get x min
cMax = self._vec*((bMax[0]-self._pnt[0])/self._vec[0]) + self._pnt # get x max
if (cMin[0]-self._pnt[0])/self._vec[0] < 0 and (cMax[0]-self._pnt[0])/self._vec[0] < 0: # verify it crosses ray after start point
return False #, cMin, cMax
if self._vec[1] != 0:
yMin = self._vec*((bMin[1]-self._pnt[1])/self._vec[1]) + self._pnt # get y min
yMax = self._vec*((bMax[1]-self._pnt[1])/self._vec[1]) + self._pnt # get y max
if (yMin>cMax).all() or (cMin>yMax).all():
return False #, cMin, cMax
if (cMin<yMin).all():
cMin = yMin
if (cMax>yMax).all():
cMax = yMax
if (cMin[1]-self._pnt[1])/self._vec[1] < 0 and (cMax[1]-self._pnt[1])/self._vec[1] < 0: # verify it crosses ray after start point
return False #, cMin, cMax
if self._vec[2] != 0:
zMin = self._vec*((bMin[2]-self._pnt[2])/self._vec[2]) + self._pnt # get z min
zMax = self._vec*((bMax[2]-self._pnt[2])/self._vec[2]) + self._pnt # get z max
if (zMin>cMax).all() or (cMin>zMax).all():
return False #, cMin, cMax
if (cMin<zMin).all():
cMin = zMin
if (cMax>zMax).all():
cMax = zMax
if (cMin[2]-self._pnt[2])/self._vec[2] < 0 and (cMax[2]-self._pnt[2])/self._vec[2] < 0: # verify it crosses ray after start point
return False #, cMin, cMax
#if (cMin<self._pnt).all():
# cMin = self._pnt
#if (cMax>self._pnt).all():
# cMax = self._pnt
return True #, cMin, cMax
def intersectPlane(self, point, normal):
denom = dot(self._vec,normal)
if denom == 0:
return None, None
dist = (dot(point-self._pnt,normal))/denom
if dist <= 0:
return None, None
point = dist*self._vec + self._pnt
return dist, point
def intersectTriSlow(self, a, b, c, normal=None): # Moller-Trumbore algorithm
global epsilon
# Find vectors for two edges sharing point 'a'
e1 = b - a
e2 = c - a
# Begin calculating determinant - also used to calculate 'u' parameter
P = cross(self._vec, e2)
# if determinant is near zero, ray lies in plane of triangle
det = dot(e1, P)
# NOT CULLING
if det > -epsilon and det < epsilon:
return False, None, None
inv_det = 1.0 / det
# calculate distance from 'a' to ray origin
T = self._pnt - a
# Calculate u parameter and test bound
u = dot(T, P) * inv_det
# The intersection lies outside of the triangle
if u < 0 or u > 1:
return False, None, None
# Prepare to test 'v' parameter
Q = cross(T, e1)
# Calculate 'v' parameter and test bound
v = dot(self._vec, Q) * inv_det
# The intersection lies outside of the triangle
if v < 0 or u+v > 1:
return False, None, None
t = dot(e2, Q) * inv_det
if(t > epsilon): #ray intersection
dist, point = self.intersectPlane(a, normal)
return True, dist, point
# No intersection
return False, None, None
def intersectTri(self, a, b, c, normal=None):
if normal is None:
normal = PolySurface.calcTriNorm(a,b,c)
dist, point = self.intersectPlane(a, normal)
if dist is None:
return False, None, None
# calculate baycentric
v0 = c - a
v1 = b - a
v2 = point - a
# get dot products
dot00 = dot(v0, v0)
dot01 = dot(v0, v1)
dot02 = dot(v0, v2)
dot11 = dot(v1, v1)
dot12 = dot(v1, v2)
# get barycentric coordinates
invDenom = 1 / (dot00 * dot11 - dot01 * dot01)
u = (dot11 * dot02 - dot01 * dot12) * invDenom
v = (dot00 * dot12 - dot01 * dot02) * invDenom
inside = u>=0 and v>=0 and u+v<1
return inside, dist, point
def intersectRay(self, other):
# get the distance and midpoint of the line of smallest distance between two lines
v1 = self.vector
v2 = other.vector
p1 = self.point
p2 = other.point
X = np.cross(v1,v2)
ray1Pnt = p1 + np.dot(np.cross(p2-p1,v2),X)/np.dot(X,X)*v1
ray2Pnt = p2 + np.dot(np.cross(p2-p1,v1),X)/np.dot(X,X)*v2
midPnt = (ray1Pnt+ray2Pnt)/2
distance = norm(ray2Pnt-ray1Pnt)
return (distance, midPnt, ray1Pnt, ray2Pnt)
def projectPointOnRay(self, pnt):
diff = pnt - self.point
return np.dot(diff,self.vector)*self.vector+self.point
def distanceToPoint(self, pnt):
diff = self.point - pnt
return norm(diff-(np.dot(diff,self.vector)*self.vector))
|
{"hexsha": "719ebb5b90605b323272e1c462473f0ce1ae44a8", "size": 8390, "ext": "py", "lang": "Python", "max_stars_repo_path": "ray.py", "max_stars_repo_name": "qenops/dGraph", "max_stars_repo_head_hexsha": "b67c835bf60f1627a79d3e22183301f34431c5b3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-20T18:17:49.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-20T18:17:49.000Z", "max_issues_repo_path": "ray.py", "max_issues_repo_name": "qenops/dGraph", "max_issues_repo_head_hexsha": "b67c835bf60f1627a79d3e22183301f34431c5b3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ray.py", "max_forks_repo_name": "qenops/dGraph", "max_forks_repo_head_hexsha": "b67c835bf60f1627a79d3e22183301f34431c5b3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9268292683, "max_line_length": 142, "alphanum_fraction": 0.5601907032, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2424}
|
# Case I example: Bad diamond II
# Claudia January 2015
# types in "types.jl"
if !isdefined(:individualtest) individualtest = false; end
if(individualtest)
include("../src/types.jl")
include("../src/functions.jl")
end
tree = "((((8,10))#H1,7),6,(4,#H1));" # Case I Bad diamond II
#f = open("prueba_tree.txt","w")
#write(f,tree)
#close(f)
net = readTopologyLevel1(tree)
#printEdges(net)
#printNodes(net)
|
{"hexsha": "db07b4f65f769ec5d2c70e2eb9a6a8a7c8521ebc", "size": 416, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/case_i_example.jl", "max_stars_repo_name": "kwsparks/PhyloNetworks.jl", "max_stars_repo_head_hexsha": "f466c13fa599cd8bc546e5ad4f2f0e5e64756805", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-08-24T03:49:30.000Z", "max_stars_repo_stars_event_max_datetime": "2016-08-24T03:49:30.000Z", "max_issues_repo_path": "examples/case_i_example.jl", "max_issues_repo_name": "kwsparks/PhyloNetworks.jl", "max_issues_repo_head_hexsha": "f466c13fa599cd8bc546e5ad4f2f0e5e64756805", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/case_i_example.jl", "max_forks_repo_name": "kwsparks/PhyloNetworks.jl", "max_forks_repo_head_hexsha": "f466c13fa599cd8bc546e5ad4f2f0e5e64756805", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.8095238095, "max_line_length": 61, "alphanum_fraction": 0.6682692308, "num_tokens": 129}
|
/-
Copyright (c) 2020 Markus Himmel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Markus Himmel
-/
import category_theory.abelian.exact
import category_theory.over
/-!
# Pseudoelements in abelian categories
A *pseudoelement* of an object `X` in an abelian category `C` is an equivalence class of arrows
ending in `X`, where two arrows are considered equivalent if we can find two epimorphisms with a
common domain making a commutative square with the two arrows. While the construction shows that
pseudoelements are actually subobjects of `X` rather than "elements", it is possible to chase these
pseudoelements through commutative diagrams in an abelian category to prove exactness properties.
This is done using some "diagram-chasing metatheorems" proved in this file. In many cases, a proof
in the category of abelian groups can more or less directly be converted into a proof using
pseudoelements.
A classic application of pseudoelements are diagram lemmas like the four lemma or the snake lemma.
Pseudoelements are in some ways weaker than actual elements in a concrete category. The most
important limitation is that there is no extensionality principle: If `f g : X ⟶ Y`, then
`∀ x ∈ X, f x = g x` does not necessarily imply that `f = g` (however, if `f = 0` or `g = 0`,
it does). A corollary of this is that we can not define arrows in abelian categories by dictating
their action on pseudoelements. Thus, a usual style of proofs in abelian categories is this:
First, we construct some morphism using universal properties, and then we use diagram chasing
of pseudoelements to verify that is has some desirable property such as exactness.
It should be noted that the Freyd-Mitchell embedding theorem gives a vastly stronger notion of
pseudoelement (in particular one that gives extensionality). However, this theorem is quite
difficult to prove and probably out of reach for a formal proof for the time being.
## Main results
We define the type of pseudoelements of an object and, in particular, the zero pseudoelement.
We prove that every morphism maps the zero pseudoelement to the zero pseudoelement (`apply_zero`)
and that a zero morphism maps every pseudoelement to the zero pseudoelement (`zero_apply`)
Here are the metatheorems we provide:
* A morphism `f` is zero if and only if it is the zero function on pseudoelements.
* A morphism `f` is an epimorphism if and only if it is surjective on pseudoelements.
* A morphism `f` is a monomorphism if and only if it is injective on pseudoelements
if and only if `∀ a, f a = 0 → f = 0`.
* A sequence `f, g` of morphisms is exact if and only if
`∀ a, g (f a) = 0` and `∀ b, g b = 0 → ∃ a, f a = b`.
* If `f` is a morphism and `a, a'` are such that `f a = f a'`, then there is some
pseudoelement `a''` such that `f a'' = 0` and for every `g` we have
`g a' = 0 → g a = g a''`. We can think of `a''` as `a - a'`, but don't get too carried away
by that: pseudoelements of an object do not form an abelian group.
## Notations
We introduce coercions from an object of an abelian category to the set of its pseudoelements
and from a morphism to the function it induces on pseudoelements.
These coercions must be explicitly enabled via local instances:
`local attribute [instance] object_to_sort hom_to_fun`
## Implementation notes
It appears that sometimes the coercion from morphisms to functions does not work, i.e.,
writing `g a` raises a "function expected" error. This error can be fixed by writing
`(g : X ⟶ Y) a`.
## References
* [F. Borceux, *Handbook of Categorical Algebra 2*][borceux-vol2]
-/
open category_theory
open category_theory.limits
open category_theory.abelian
open category_theory.preadditive
universes v u
namespace category_theory.abelian
variables {C : Type u} [category.{v} C]
local attribute [instance] over.coe_from_hom
/-- This is just composition of morphisms in `C`. Another way to express this would be
`(over.map f).obj a`, but our definition has nicer definitional properties. -/
def app {P Q : C} (f : P ⟶ Q) (a : over P) : over Q :=
a.hom ≫ f
@[simp] lemma app_hom {P Q : C} (f : P ⟶ Q) (a : over P) : (app f a).hom = a.hom ≫ f := rfl
/-- Two arrows `f : X ⟶ P` and `g : Y ⟶ P` are called pseudo-equal if there is some object
`R` and epimorphisms `p : R ⟶ X` and `q : R ⟶ Y` such that `p ≫ f = q ≫ g`. -/
def pseudo_equal (P : C) (f g : over P) : Prop :=
∃ (R : C) (p : R ⟶ f.1) (q : R ⟶ g.1) [epi p] [epi q], p ≫ f.hom = q ≫ g.hom
lemma pseudo_equal_refl {P : C} : reflexive (pseudo_equal P) :=
λ f, ⟨f.1, 𝟙 f.1, 𝟙 f.1, by apply_instance, by apply_instance, by simp⟩
lemma pseudo_equal_symm {P : C} : symmetric (pseudo_equal P) :=
λ f g ⟨R, p, q, ep, eq, comm⟩, ⟨R, q, p, eq, ep, comm.symm⟩
variables [abelian.{v} C]
section
/-- Pseudoequality is transitive: Just take the pullback. The pullback morphisms will
be epimorphisms since in an abelian category, pullbacks of epimorphisms are epimorphisms. -/
lemma pseudo_equal_trans {P : C} : transitive (pseudo_equal P) :=
λ f g h ⟨R, p, q, ep, eq, comm⟩ ⟨R', p', q', ep', eq', comm'⟩,
begin
refine ⟨pullback q p', pullback.fst ≫ p, pullback.snd ≫ q', _, _, _⟩,
{ resetI, exact epi_comp _ _ },
{ resetI, exact epi_comp _ _ },
{ rw [category.assoc, comm, ←category.assoc, pullback.condition,
category.assoc, comm', category.assoc] }
end
end
/-- The arrows with codomain `P` equipped with the equivalence relation of being pseudo-equal. -/
def pseudoelement.setoid (P : C) : setoid (over P) :=
⟨_, ⟨pseudo_equal_refl, pseudo_equal_symm, pseudo_equal_trans⟩⟩
local attribute [instance] pseudoelement.setoid
/-- A `pseudoelement` of `P` is just an equivalence class of arrows ending in `P` by being
pseudo-equal. -/
def pseudoelement (P : C) : Type (max u v) := quotient (pseudoelement.setoid P)
namespace pseudoelement
/-- A coercion from an object of an abelian category to its pseudoelements. -/
def object_to_sort : has_coe_to_sort C :=
{ S := Type (max u v),
coe := λ P, pseudoelement P }
local attribute [instance] object_to_sort
/-- A coercion from an arrow with codomain `P` to its associated pseudoelement. -/
def over_to_sort {P : C} : has_coe (over P) (pseudoelement P) :=
⟨quot.mk (pseudo_equal P)⟩
local attribute [instance] over_to_sort
lemma over_coe_def {P Q : C} (a : Q ⟶ P) : (a : pseudoelement P) = ⟦a⟧ := rfl
/-- If two elements are pseudo-equal, then their composition with a morphism is, too. -/
lemma pseudo_apply_aux {P Q : C} (f : P ⟶ Q) (a b : over P) :
a ≈ b → app f a ≈ app f b :=
λ ⟨R, p, q, ep, eq, comm⟩,
⟨R, p, q, ep, eq, show p ≫ a.hom ≫ f = q ≫ b.hom ≫ f, by rw reassoc_of comm⟩
/-- A morphism `f` induces a function `pseudo_apply f` on pseudoelements. -/
def pseudo_apply {P Q : C} (f : P ⟶ Q) : P → Q :=
quotient.map (λ (g : over P), app f g) (pseudo_apply_aux f)
/-- A coercion from morphisms to functions on pseudoelements -/
def hom_to_fun {P Q : C} : has_coe_to_fun (P ⟶ Q) := ⟨_, pseudo_apply⟩
local attribute [instance] hom_to_fun
lemma pseudo_apply_mk {P Q : C} (f : P ⟶ Q) (a : over P) : f ⟦a⟧ = ⟦a.hom ≫ f⟧ :=
rfl
/-- Applying a pseudoelement to a composition of morphisms is the same as composing
with each morphism. Sadly, this is not a definitional equality, but at least it is
true. -/
theorem comp_apply {P Q R : C} (f : P ⟶ Q) (g : Q ⟶ R) (a : P) : (f ≫ g) a = g (f a) :=
quotient.induction_on a $ λ x, quotient.sound $
by { unfold app, rw [←category.assoc, over.coe_hom] }
/-- Composition of functions on pseudoelements is composition of morphisms. -/
theorem comp_comp {P Q R : C} (f : P ⟶ Q) (g : Q ⟶ R) : g ∘ f = f ≫ g :=
funext $ λ x, (comp_apply _ _ _).symm
section zero
/-!
In this section we prove that for every `P` there is an equivalence class that contains
precisely all the zero morphisms ending in `P` and use this to define *the* zero
pseudoelement.
-/
section
local attribute [instance] has_binary_biproducts.of_has_binary_products
/-- The arrows pseudo-equal to a zero morphism are precisely the zero morphisms -/
lemma pseudo_zero_aux {P : C} (Q : C) (f : over P) : f ≈ (0 : Q ⟶ P) ↔ f.hom = 0 :=
⟨λ ⟨R, p, q, ep, eq, comm⟩, by exactI zero_of_epi_comp p (by simp [comm]),
λ hf, ⟨biprod f.1 Q, biprod.fst, biprod.snd, by apply_instance, by apply_instance,
by rw [hf, over.coe_hom, has_zero_morphisms.comp_zero, has_zero_morphisms.comp_zero]⟩⟩
end
lemma zero_eq_zero' {P Q R : C} :
⟦((0 : Q ⟶ P) : over P)⟧ = ⟦((0 : R ⟶ P) : over P)⟧ :=
quotient.sound $ (pseudo_zero_aux R _).2 rfl
/-- The zero pseudoelement is the class of a zero morphism -/
def pseudo_zero {P : C} : P := ⟦(0 : P ⟶ P)⟧
instance {P : C} : has_zero P := ⟨pseudo_zero⟩
instance {P : C} : inhabited (pseudoelement P) := ⟨0⟩
lemma pseudo_zero_def {P : C} : (0 : pseudoelement P) = ⟦(0 : P ⟶ P)⟧ := rfl
@[simp] lemma zero_eq_zero {P Q : C} : ⟦((0 : Q ⟶ P) : over P)⟧ = (0 : pseudoelement P) :=
zero_eq_zero'
/-- The pseudoelement induced by an arrow is zero precisely when that arrow is zero -/
lemma pseudo_zero_iff {P : C} (a : over P) : (a : P) = 0 ↔ a.hom = 0 :=
by { rw ←pseudo_zero_aux P a, exact quotient.eq }
end zero
/-- Morphisms map the zero pseudoelement to the zero pseudoelement -/
@[simp] theorem apply_zero {P Q : C} (f : P ⟶ Q) : f 0 = 0 :=
by { rw [pseudo_zero_def, pseudo_apply_mk], simp }
/-- The zero morphism maps every pseudoelement to 0. -/
@[simp] theorem zero_apply {P : C} (Q : C) (a : P) : (0 : P ⟶ Q) a = 0 :=
quotient.induction_on a $ λ a',
by { rw [pseudo_zero_def, pseudo_apply_mk], simp }
/-- An extensionality lemma for being the zero arrow. -/
@[ext] theorem zero_morphism_ext {P Q : C} (f : P ⟶ Q) : (∀ a, f a = 0) → f = 0 :=
λ h, by { rw ←category.id_comp f, exact (pseudo_zero_iff ((𝟙 P ≫ f) : over Q)).1 (h (𝟙 P)) }
@[ext] theorem zero_morphism_ext' {P Q : C} (f : P ⟶ Q) : (∀ a, f a = 0) → 0 = f :=
eq.symm ∘ zero_morphism_ext f
theorem eq_zero_iff {P Q : C} (f : P ⟶ Q) : f = 0 ↔ ∀ a, f a = 0 :=
⟨λ h a, by simp [h], zero_morphism_ext _⟩
/-- A monomorphism is injective on pseudoelements. -/
theorem pseudo_injective_of_mono {P Q : C} (f : P ⟶ Q) [mono f] : function.injective f :=
λ abar abar', quotient.induction_on₂ abar abar' $ λ a a' ha, quotient.sound $
have ⟦(a.hom ≫ f : over Q)⟧ = ⟦a'.hom ≫ f⟧, by convert ha,
match quotient.exact this with ⟨R, p, q, ep, eq, comm⟩ :=
⟨R, p, q, ep, eq, (cancel_mono f).1 $ by { simp only [category.assoc], exact comm }⟩
end
/-- A morphism that is injective on pseudoelements only maps the zero element to zero. -/
lemma zero_of_map_zero {P Q : C} (f : P ⟶ Q) : function.injective f → ∀ a, f a = 0 → a = 0 :=
λ h a ha, by { rw ←apply_zero f at ha, exact h ha }
/-- A morphism that only maps the zero pseudoelement to zero is a monomorphism. -/
theorem mono_of_zero_of_map_zero {P Q : C} (f : P ⟶ Q) : (∀ a, f a = 0 → a = 0) → mono f :=
λ h, (mono_iff_cancel_zero _).2 $ λ R g hg, (pseudo_zero_iff (g : over P)).1 $ h _ $
show f g = 0, from (pseudo_zero_iff (g ≫ f : over Q)).2 hg
section
/-- An epimorphism is surjective on pseudoelements. -/
theorem pseudo_surjective_of_epi {P Q : C} (f : P ⟶ Q) [epi f] : function.surjective f :=
λ qbar, quotient.induction_on qbar $ λ q, ⟨((pullback.fst : pullback f q.hom ⟶ P) : over P),
quotient.sound $ ⟨pullback f q.hom, 𝟙 (pullback f q.hom), pullback.snd, by apply_instance,
by apply_instance, by rw [category.id_comp, ←pullback.condition, app_hom, over.coe_hom]⟩⟩
end
/-- A morphism that is surjective on pseudoelements is an epimorphism. -/
theorem epi_of_pseudo_surjective {P Q : C} (f : P ⟶ Q) : function.surjective f → epi f :=
λ h, match h (𝟙 Q) with ⟨pbar, hpbar⟩ :=
match quotient.exists_rep pbar with ⟨p, hp⟩ :=
have ⟦(p.hom ≫ f : over Q)⟧ = ⟦𝟙 Q⟧, by { rw ←hp at hpbar, exact hpbar },
match quotient.exact this with ⟨R, x, y, ex, ey, comm⟩ :=
@epi_of_epi_fac _ _ _ _ _ (x ≫ p.hom) f y ey $
by { dsimp at comm, rw [category.assoc, comm], apply category.comp_id }
end
end
end
section
/-- Two morphisms in an exact sequence are exact on pseudoelements. -/
theorem pseudo_exact_of_exact {P Q R : C} {f : P ⟶ Q} {g : Q ⟶ R} [exact f g] :
(∀ a, g (f a) = 0) ∧ (∀ b, g b = 0 → ∃ a, f a = b) :=
⟨λ a, by { rw [←comp_apply, exact.w], exact zero_apply _ _ },
λ b', quotient.induction_on b' $ λ b hb,
have hb' : b.hom ≫ g = 0, from (pseudo_zero_iff _).1 hb,
begin
-- By exactness, b factors through im f = ker g via some c
obtain ⟨c, hc⟩ := kernel_fork.is_limit.lift' (is_limit_image f g) _ hb',
-- We compute the pullback of the map into the image and c.
-- The pseudoelement induced by the first pullback map will be our preimage.
use (pullback.fst : pullback (images.factor_thru_image f) c ⟶ P),
-- It remains to show that the image of this element under f is pseudo-equal to b.
apply quotient.sound,
-- pullback.snd is an epimorphism because the map onto the image is!
refine ⟨pullback (images.factor_thru_image f) c, 𝟙 _, pullback.snd,
by apply_instance, by apply_instance, _⟩,
-- Now we can verify that the diagram commutes.
calc 𝟙 (pullback (images.factor_thru_image f) c) ≫ pullback.fst ≫ f = pullback.fst ≫ f
: category.id_comp _
... = pullback.fst ≫ images.factor_thru_image f ≫ kernel.ι (cokernel.π f)
: by rw images.image.fac
... = (pullback.snd ≫ c) ≫ kernel.ι (cokernel.π f)
: by rw [←category.assoc, pullback.condition]
... = pullback.snd ≫ b.hom
: by { rw category.assoc, congr' }
end⟩
end
lemma apply_eq_zero_of_comp_eq_zero {P Q R : C} (f : Q ⟶ R) (a : P ⟶ Q) : a ≫ f = 0 → f a = 0 :=
λ h, by simp [over_coe_def, pseudo_apply_mk, over.coe_hom, h]
section
/-- If two morphisms are exact on pseudoelements, they are exact. -/
theorem exact_of_pseudo_exact {P Q R : C} (f : P ⟶ Q) (g : Q ⟶ R) :
(∀ a, g (f a) = 0) ∧ (∀ b, g b = 0 → ∃ a, f a = b) → exact f g :=
λ ⟨h₁, h₂⟩, (abelian.exact_iff _ _).2 ⟨zero_morphism_ext _ $ λ a, by rw [comp_apply, h₁ a],
begin
-- If we apply g to the pseudoelement induced by its kernel, we get 0 (of course!).
have : g (kernel.ι g) = 0 := apply_eq_zero_of_comp_eq_zero _ _ (kernel.condition _),
-- By pseudo-exactness, we get a preimage.
obtain ⟨a', ha⟩ := h₂ _ this,
obtain ⟨a, ha'⟩ := quotient.exists_rep a',
rw ←ha' at ha,
obtain ⟨Z, r, q, er, eq, comm⟩ := quotient.exact ha,
-- Consider the pullback of kernel.ι (cokernel.π f) and kernel.ι g.
-- The commutative diagram given by the pseudo-equality f a = b induces
-- a cone over this pullback, so we get a factorization z.
obtain ⟨z, hz₁, hz₂⟩ := @pullback.lift' _ _ _ _ _ _ (kernel.ι (cokernel.π f)) (kernel.ι g) _
(r ≫ a.hom ≫ images.factor_thru_image f) q
(by { simp only [category.assoc, images.image.fac], exact comm }),
-- Let's give a name to the second pullback morphism.
let j : pullback (kernel.ι (cokernel.π f)) (kernel.ι g) ⟶ kernel g := pullback.snd,
-- Since q is an epimorphism, in particular this means that j is an epimorphism.
haveI pe : epi j := by exactI epi_of_epi_fac hz₂,
-- But is is also a monomorphism, because kernel.ι (cokernel.π f) is: A kernel is
-- always a monomorphism and the pullback of a monomorphism is a monomorphism.
-- But mono + epi = iso, so j is an isomorphism.
haveI : is_iso j := is_iso_of_mono_of_epi _,
-- But then kernel.ι g can be expressed using all of the maps of the pullback square, and we
-- are done.
rw (iso.eq_inv_comp (as_iso j)).2 pullback.condition.symm,
simp only [category.assoc, kernel.condition, has_zero_morphisms.comp_zero]
end⟩
end
/-- If two pseudoelements `x` and `y` have the same image under some morphism `f`, then we can form
their "difference" `z`. This pseudoelement has the properties that `f z = 0` and for all
morphisms `g`, if `g y = 0` then `g z = g x`. -/
theorem sub_of_eq_image {P Q : C} (f : P ⟶ Q) (x y : P) : f x = f y →
∃ z, f z = 0 ∧ ∀ (R : C) (g : P ⟶ R), (g : P ⟶ R) y = 0 → g z = g x :=
quotient.induction_on₂ x y $ λ a a' h,
match quotient.exact h with ⟨R, p, q, ep, eq, comm⟩ :=
let a'' : R ⟶ P := p ≫ a.hom - q ≫ a'.hom in ⟨a'',
⟨show ⟦((p ≫ a.hom - q ≫ a'.hom) ≫ f : over Q)⟧ = ⟦(0 : Q ⟶ Q)⟧,
by { dsimp at comm, simp [sub_eq_zero.2 comm] },
λ Z g hh,
begin
obtain ⟨X, p', q', ep', eq', comm'⟩ := quotient.exact hh,
have : a'.hom ≫ g = 0,
{ apply (epi_iff_cancel_zero _).1 ep' _ (a'.hom ≫ g),
simpa using comm' },
apply quotient.sound,
-- Can we prevent quotient.sound from giving us this weird `coe_b` thingy?
change app g (a'' : over P) ≈ app g a,
exact ⟨R, 𝟙 R, p, by apply_instance, ep, by simp [sub_eq_add_neg, this]⟩
end⟩⟩
end
variable [limits.has_pullbacks C]
/-- If `f : P ⟶ R` and `g : Q ⟶ R` are morphisms and `p : P` and `q : Q` are pseudoelements such
that `f p = g q`, then there is some `s : pullback f g` such that `fst s = p` and `snd s = q`.
Remark: Borceux claims that `s` is unique. I was unable to transform his proof sketch into
a pen-and-paper proof of this fact, so naturally I was not able to formalize the proof. -/
theorem pseudo_pullback {P Q R : C} {f : P ⟶ R} {g : Q ⟶ R} {p : P} {q : Q} : f p = g q →
∃ s, (pullback.fst : pullback f g ⟶ P) s = p ∧ (pullback.snd : pullback f g ⟶ Q) s = q :=
quotient.induction_on₂ p q $ λ x y h,
begin
obtain ⟨Z, a, b, ea, eb, comm⟩ := quotient.exact h,
obtain ⟨l, hl₁, hl₂⟩ := @pullback.lift' _ _ _ _ _ _ f g _ (a ≫ x.hom) (b ≫ y.hom)
(by { simp only [category.assoc], exact comm }),
exact ⟨l, ⟨quotient.sound ⟨Z, 𝟙 Z, a, by apply_instance, ea, by rwa category.id_comp⟩,
quotient.sound ⟨Z, 𝟙 Z, b, by apply_instance, eb, by rwa category.id_comp⟩⟩⟩
end
end pseudoelement
end category_theory.abelian
|
{"author": "JLimperg", "repo": "aesop3", "sha": "a4a116f650cc7403428e72bd2e2c4cda300fe03f", "save_path": "github-repos/lean/JLimperg-aesop3", "path": "github-repos/lean/JLimperg-aesop3/aesop3-a4a116f650cc7403428e72bd2e2c4cda300fe03f/src/category_theory/abelian/pseudoelements.lean"}
|
import os
import pickle
import numpy as np
from sklearn.metrics import precision_score, recall_score, f1_score, precision_recall_curve
import matplotlib.pyplot as plt
from utils.preprocess import down_sample
def point_adjust_k(scores, targets, thres, k=20):
"""
:param scores: anomaly score
:param targets: target label
:param thres: threshold
:param k: ratio to apply point adjust(%), 0 equals to conventional point adjust
:return: point adjusted scores
"""
# print("Point adjust renew with K: {}".format(k))
try:
scores = np.asarray(scores)
targets = np.asarray(targets)
except TypeError:
scores = np.asarray(scores.cpu())
targets = np.asarray(targets.cpu())
T = scores.shape
predict = scores > thres
actual = targets > 0.1
one_start_idx = np.where(np.diff(actual, prepend=0) == 1)[0]
zero_start_idx = np.where(np.diff(actual, prepend=0) == -1)[0]
assert len(one_start_idx) == len(zero_start_idx) + 1 or len(one_start_idx) == len(zero_start_idx)
if len(one_start_idx) == len(zero_start_idx) + 1:
predict = np.append(predict, 1)
zero_start_idx = np.append(zero_start_idx, -1)
for i in range(len(one_start_idx)):
if predict[one_start_idx[i]: zero_start_idx[i]].sum() > k / 100 * (zero_start_idx[i] - one_start_idx[i]):
predict[one_start_idx[i]: zero_start_idx[i]] = 1
return predict[:T[0]]
def best_threshold_search(scores, gt_labels, start, end=None, step_num=1):
"""
Find best F1 score
"""
if step_num is None or end is None:
end = start
step_num = 1.
search_step, search_range, search_lower_bound = step_num, end - start, start
percentile = search_lower_bound
pred_labels = np.zeros(len(scores))
best = -1.
best_threshold = 0.0
best_percentile = 0.0
for i in range(search_step):
percentile += search_range / search_step
percentile = np.round(percentile, 2)
threshold = np.percentile(scores, percentile)
pred_labels = np.zeros(len(scores))
pred_labels[scores > threshold] = 1
target = f1_score(gt_labels, pred_labels)
if target > best:
best_threshold = threshold
best_percentile = percentile
best = target
# best_threshold = np.percentile(scores, best_threshold)
pred_labels[scores > best_threshold] = 1
# print(f'best percentile={best_percentile}, threshold={best_threshold}')
print(f'{best_percentile},{best_threshold}')
precision = precision_score(gt_labels, pred_labels)
recall = recall_score(gt_labels, pred_labels)
f1 = f1_score(gt_labels, pred_labels)
# print(f'precision={precision:.4f}, recall={recall:.4f}, f1-score={f1:.4f}')
acc = [precision, recall, f1]
return best, best_threshold, acc
############################# start ####################################
# path = f'/home/kj21.choi/hdd/04_AAAI/THOC/wadi/'
# with open(os.path.join(path, 'test.pkl'), 'rb') as f:
# x_test = pickle.load(f)
# x_test = np.asarray(x_test).T
# x_test = down_sample(x_test, 5)
# y_test = np.asarray(x_test[:, -1])
# x_test = x_test[:, :-1]
# with open(os.path.join(path, 'test.pkl'), 'rb') as f:
# x_test = pickle.load(f)
# x_test = np.asarray(x_test).T
# y_test = np.asarray(x_test[:, -1])
# x_test = x_test[:, :-1]
# with open(os.path.join(path, 'label.pkl'), 'rb') as f:
# y_test = np.asarray(pickle.load(f))
# y_test = down_sample(y_test, 10)
# scores = np.load(f'/home/kj21.choi/PycharmProjects/ANTLAD/result/swat.npy')
# path_scores = '/home/kj21.choi/PycharmProjects/ANTLAD/result/siwon_OmniAnomaly/'
# with open(os.path.join(path_scores, 'wadi.pkl'), 'rb') as f:
# scores = np.asarray(pickle.load(f))
#
# pred_labels = np.zeros(len(scores))
# T = len(pred_labels)
# gt_labels = np.array(y_test[:T])
# for i in range(T):
# pred_labels[i] = int(pred_labels[i])
# gt_labels[i] = int(gt_labels[i])
#
# # best threshold search
# best_f1, thres, acc = best_threshold_search(scores, gt_labels, 85, 100, step_num=300)
# # print(f'best F1-score:{best_f1}, threshold:{thres}, accuracy={acc}')
#
# list_k = [0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
# for item in list_k:
# # print('K:{}'.format(item))
#
# point_adjusted_scores = point_adjust_k(scores, gt_labels, thres, k=item)
# precision = precision_score(gt_labels, point_adjusted_scores)
# recall = recall_score(gt_labels, point_adjusted_scores)
# f1 = f1_score(gt_labels, point_adjusted_scores)
# # print(f'precision={precision:.4f}, recall={recall:.4f}, f1-score={f1:.4f}')
# print(f'{precision:.4f}, {recall:.4f}, {f1:.4f}')
path = './data/smd/'
server_list = ['machine-1-1', 'machine-1-2', 'machine-1-3', 'machine-1-4', 'machine-1-5', 'machine-1-6','machine-1-7', 'machine-1-8',
'machine-2-1', 'machine-2-2', 'machine-2-3', 'machine-2-4', 'machine-2-5', 'machine-2-6', 'machine-2-7', 'machine-2-8', 'machine-2-9',
'machine-3-1', 'machine-3-2', 'machine-3-3', 'machine-3-4', 'machine-3-5', 'machine-3-6', 'machine-3-7', 'machine-3-8', 'machine-3-9', 'machine-3-10', 'machine-3-11']
for machine in server_list:
print(f'{machine} PAK results:')
with open(os.path.join(path, f'{machine}_label.pkl'), 'rb') as f:
y_test = np.asarray(pickle.load(f))
y_test = down_sample(y_test, 5)
# scores = np.load(f'/home/kj21.choi/PycharmProjects/ANTLAD/result/siwon_MSCRED/{machine}_scores.npy')
path_scores = '/home/kj21.choi/PycharmProjects/ANTLAD/result/siwon_OmniAnomaly/'
with open(os.path.join(path_scores, f'd_{machine}.pkl'), 'rb') as f:
scores = np.asarray(pickle.load(f))
pred_labels = np.zeros(len(scores))
T = len(pred_labels)
gt_labels = np.array(y_test[:T])
for i in range(T):
pred_labels[i] = int(pred_labels[i])
gt_labels[i] = int(gt_labels[i])
# best threshold search
best_f1, thres, acc = best_threshold_search(scores, gt_labels, 85, 100, step_num=150)
# print(f'best F1-score:{best_f1}, threshold:{thres}, accuracy={acc}')
list_k = [0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
for item in list_k:
# print('K:{}'.format(item))
point_adjusted_scores = point_adjust_k(scores, gt_labels, thres, k=item)
precision = precision_score(gt_labels, point_adjusted_scores)
recall = recall_score(gt_labels, point_adjusted_scores)
f1 = f1_score(gt_labels, point_adjusted_scores)
# print(f'precision={precision:.4f}, recall={recall:.4f}, f1-score={f1:.4f}')
print(f'{precision:.4f}, {recall:.4f}, {f1:.4f}')
|
{"hexsha": "49c26c5d68347c85a3230a7f1fc730f1d9769d09", "size": 6707, "ext": "py", "lang": "Python", "max_stars_repo_path": "pa_k_experiments.py", "max_stars_repo_name": "kj21choi/LATAD", "max_stars_repo_head_hexsha": "80d91e0f251ad0225342ee30e2461a39fa9cca97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-20T06:59:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-20T06:59:50.000Z", "max_issues_repo_path": "pa_k_experiments.py", "max_issues_repo_name": "kj21choi/LATAD", "max_issues_repo_head_hexsha": "80d91e0f251ad0225342ee30e2461a39fa9cca97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pa_k_experiments.py", "max_forks_repo_name": "kj21choi/LATAD", "max_forks_repo_head_hexsha": "80d91e0f251ad0225342ee30e2461a39fa9cca97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4036144578, "max_line_length": 181, "alphanum_fraction": 0.6429103921, "include": true, "reason": "import numpy", "num_tokens": 1992}
|
cc ------------ dpmjet3.4 - authors: S.Roesler, R.Engel, J.Ranft -------
cc -------- phojet1.12-40 - authors: S.Roesler, R.Engel, J.Ranft -------
cc - oct'13 -------
cc ----------- pythia-6.4 - authors: Torbjorn Sjostrand, Lund'10 -------
cc ---------------------------------------------------------------------
cc converted for use with FLUKA -------
cc - oct'13 -------
C...PYMEMX
C...Generates maximum ME weight in some initial-state showers.
C...Inparameter MECOR: kind of hard scattering process
C...Outparameter WTFF: maximum weight for fermion -> fermion
C... WTGF: maximum weight for gluon/photon -> fermion
C... WTFG: maximum weight for fermion -> gluon/photon
C... WTGG: maximum weight for gluon -> gluon
SUBROUTINE PYMEMX(MECOR,WTFF,WTGF,WTFG,WTGG)
C...Double precision and integer declarations.
IMPLICIT DOUBLE PRECISION(A-H, O-Z)
IMPLICIT INTEGER(I-N)
C...Commonblocks.
include 'inc/pyjets'
include 'inc/pydat1'
include 'inc/pypars'
include 'inc/pyint1'
include 'inc/pyint2'
C...Default maximum weight.
WTFF=1D0
WTGF=1D0
WTFG=1D0
WTGG=1D0
C...Select maximum weight by process.
IF(MECOR.EQ.1) THEN
WTFF=1D0
WTGF=3D0
ELSEIF(MECOR.EQ.2) THEN
WTFG=1D0
WTGG=1D0
ENDIF
RETURN
END
|
{"hexsha": "3a13a7bd13803b9571f0c8c6eeb0ed293c288bd0", "size": 1509, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/pythia/pymemx.f", "max_stars_repo_name": "pzhristov/DPMJET", "max_stars_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-06-15T01:59:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-01T08:39:13.000Z", "max_issues_repo_path": "src/pythia/pymemx.f", "max_issues_repo_name": "pzhristov/DPMJET", "max_issues_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-15T09:53:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T20:52:28.000Z", "max_forks_repo_path": "src/pythia/pymemx.f", "max_forks_repo_name": "pzhristov/DPMJET", "max_forks_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-07-05T02:44:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-20T20:49:05.000Z", "avg_line_length": 32.1063829787, "max_line_length": 72, "alphanum_fraction": 0.5168986083, "num_tokens": 412}
|
[STATEMENT]
lemma rQuot_empty[simp]: "rQuot a {} = {}"
and rQuot_epsilon[simp]: "rQuot a {[]} = {}"
and rQuot_char[simp]: "rQuot a {[b]} = (if a = b then {[]} else {})"
and rQuot_union[simp]: "rQuot a (A \<union> B) = rQuot a A \<union> rQuot a B"
and rQuot_inter[simp]: "rQuot a (A \<inter> B) = rQuot a A \<inter> rQuot a B"
and rQuot_compl[simp]: "rQuot a (-A) = - rQuot a A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (rQuot a {} = {} &&& rQuot a {[]} = {} &&& rQuot a {[b]} = (if a = b then {[]} else {})) &&& rQuot a (A \<union> B) = rQuot a A \<union> rQuot a B &&& rQuot a (A \<inter> B) = rQuot a A \<inter> rQuot a B &&& rQuot a (- A) = - rQuot a A
[PROOF STEP]
by (auto simp: rQuot_def)
|
{"llama_tokens": 344, "file": "MSO_Regex_Equivalence_Pi_Regular_Set", "length": 1}
|
[STATEMENT]
lemma powser_times_n_limit_0:
fixes x :: "'a::{real_normed_div_algebra,banach}"
assumes "norm x < 1"
shows "(\<lambda>n. of_nat n * x ^ n) \<longlonglongrightarrow> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>n. of_nat n * x ^ n) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<lambda>n. of_nat n * x ^ n) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
have "norm x / (1 - norm x) \<ge> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> norm x / (1 - norm x)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
norm x < 1
goal (1 subgoal):
1. 0 \<le> norm x / (1 - norm x)
[PROOF STEP]
by (auto simp: field_split_simps)
[PROOF STATE]
proof (state)
this:
0 \<le> norm x / (1 - norm x)
goal (1 subgoal):
1. (\<lambda>n. of_nat n * x ^ n) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
0 \<le> norm x / (1 - norm x)
goal (1 subgoal):
1. (\<lambda>n. of_nat n * x ^ n) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
obtain N where N: "norm x / (1 - norm x) < of_int N"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>N. norm x / (1 - norm x) < real_of_int N \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using ex_le_of_int
[PROOF STATE]
proof (prove)
using this:
\<exists>z. ?x \<le> of_int z
goal (1 subgoal):
1. (\<And>N. norm x / (1 - norm x) < real_of_int N \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (meson ex_less_of_int)
[PROOF STATE]
proof (state)
this:
norm x / (1 - norm x) < real_of_int N
goal (1 subgoal):
1. (\<lambda>n. of_nat n * x ^ n) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
0 \<le> norm x / (1 - norm x)
norm x / (1 - norm x) < real_of_int N
[PROOF STEP]
have N0: "N>0"
[PROOF STATE]
proof (prove)
using this:
0 \<le> norm x / (1 - norm x)
norm x / (1 - norm x) < real_of_int N
goal (1 subgoal):
1. 0 < N
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 < N
goal (1 subgoal):
1. (\<lambda>n. of_nat n * x ^ n) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 < N
[PROOF STEP]
have *: "real_of_int (N + 1) * norm x / real_of_int N < 1"
[PROOF STATE]
proof (prove)
using this:
0 < N
goal (1 subgoal):
1. real_of_int (N + 1) * norm x / real_of_int N < 1
[PROOF STEP]
using N assms
[PROOF STATE]
proof (prove)
using this:
0 < N
norm x / (1 - norm x) < real_of_int N
norm x < 1
goal (1 subgoal):
1. real_of_int (N + 1) * norm x / real_of_int N < 1
[PROOF STEP]
by (auto simp: field_simps)
[PROOF STATE]
proof (state)
this:
real_of_int (N + 1) * norm x / real_of_int N < 1
goal (1 subgoal):
1. (\<lambda>n. of_nat n * x ^ n) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
have **: "real_of_int N * (norm x * (real_of_nat (Suc n) * norm (x ^ n))) \<le>
real_of_nat n * (norm x * ((1 + N) * norm (x ^ n)))" if "N \<le> int n" for n :: nat
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real_of_int N * (norm x * (real (Suc n) * norm (x ^ n))) \<le> real n * (norm x * (real_of_int (1 + N) * norm (x ^ n)))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. real_of_int N * (norm x * (real (Suc n) * norm (x ^ n))) \<le> real n * (norm x * (real_of_int (1 + N) * norm (x ^ n)))
[PROOF STEP]
from that
[PROOF STATE]
proof (chain)
picking this:
N \<le> int n
[PROOF STEP]
have "real_of_int N * real_of_nat (Suc n) \<le> real_of_nat n * real_of_int (1 + N)"
[PROOF STATE]
proof (prove)
using this:
N \<le> int n
goal (1 subgoal):
1. real_of_int N * real (Suc n) \<le> real n * real_of_int (1 + N)
[PROOF STEP]
by (simp add: algebra_simps)
[PROOF STATE]
proof (state)
this:
real_of_int N * real (Suc n) \<le> real n * real_of_int (1 + N)
goal (1 subgoal):
1. real_of_int N * (norm x * (real (Suc n) * norm (x ^ n))) \<le> real n * (norm x * (real_of_int (1 + N) * norm (x ^ n)))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
real_of_int N * real (Suc n) \<le> real n * real_of_int (1 + N)
[PROOF STEP]
have "(real_of_int N * real_of_nat (Suc n)) * (norm x * norm (x ^ n)) \<le>
(real_of_nat n * (1 + N)) * (norm x * norm (x ^ n))"
[PROOF STATE]
proof (prove)
using this:
real_of_int N * real (Suc n) \<le> real n * real_of_int (1 + N)
goal (1 subgoal):
1. real_of_int N * real (Suc n) * (norm x * norm (x ^ n)) \<le> real n * real_of_int (1 + N) * (norm x * norm (x ^ n))
[PROOF STEP]
using N0 mult_mono
[PROOF STATE]
proof (prove)
using this:
real_of_int N * real (Suc n) \<le> real n * real_of_int (1 + N)
0 < N
\<lbrakk>?a \<le> ?b; ?c \<le> ?d; (0::?'a) \<le> ?b; (0::?'a) \<le> ?c\<rbrakk> \<Longrightarrow> ?a * ?c \<le> ?b * ?d
goal (1 subgoal):
1. real_of_int N * real (Suc n) * (norm x * norm (x ^ n)) \<le> real n * real_of_int (1 + N) * (norm x * norm (x ^ n))
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
real_of_int N * real (Suc n) * (norm x * norm (x ^ n)) \<le> real n * real_of_int (1 + N) * (norm x * norm (x ^ n))
goal (1 subgoal):
1. real_of_int N * (norm x * (real (Suc n) * norm (x ^ n))) \<le> real n * (norm x * (real_of_int (1 + N) * norm (x ^ n)))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
real_of_int N * real (Suc n) * (norm x * norm (x ^ n)) \<le> real n * real_of_int (1 + N) * (norm x * norm (x ^ n))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
real_of_int N * real (Suc n) * (norm x * norm (x ^ n)) \<le> real n * real_of_int (1 + N) * (norm x * norm (x ^ n))
goal (1 subgoal):
1. real_of_int N * (norm x * (real (Suc n) * norm (x ^ n))) \<le> real n * (norm x * (real_of_int (1 + N) * norm (x ^ n)))
[PROOF STEP]
by (simp add: algebra_simps)
[PROOF STATE]
proof (state)
this:
real_of_int N * (norm x * (real (Suc n) * norm (x ^ n))) \<le> real n * (norm x * (real_of_int (1 + N) * norm (x ^ n)))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
N \<le> int ?n \<Longrightarrow> real_of_int N * (norm x * (real (Suc ?n) * norm (x ^ ?n))) \<le> real ?n * (norm x * (real_of_int (1 + N) * norm (x ^ ?n)))
goal (1 subgoal):
1. (\<lambda>n. of_nat n * x ^ n) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>n. of_nat n * x ^ n) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
using *
[PROOF STATE]
proof (prove)
using this:
real_of_int (N + 1) * norm x / real_of_int N < 1
goal (1 subgoal):
1. (\<lambda>n. of_nat n * x ^ n) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
by (rule summable_LIMSEQ_zero [OF summable_ratio_test, where N1="nat N"])
(simp add: N0 norm_mult field_simps ** del: of_nat_Suc of_int_add)
[PROOF STATE]
proof (state)
this:
(\<lambda>n. of_nat n * x ^ n) \<longlonglongrightarrow> (0::'a)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3103, "file": null, "length": 32}
|
"""
Tests for constraint module
"""
import numpy as np
import pytest
import quta.constraints as cons
def test_concatenation():
"""
Test for checking that concatenation of sets
of linear constraints works.
"""
C_0 = np.eye(3)
b_0 = np.zeros(3)
n_0 = 0
C_1 = np.eye(3) * 4
b_1 = np.ones(3) * 4
n_1 = 1
C_out, b_out, n_out = cons.concatenate_constraints((C_0, b_0, n_0), (C_1, b_1, n_1))
assert C_out.shape[0] == C_0.shape[0] + C_1.shape[0]
assert C_out.shape[1] == C_0.shape[1] == C_1.shape[1]
assert b_out.shape[0] == b_0.shape[0] + b_1.shape[0]
assert n_out == n_0 + n_1
assert np.all(
C_out == [[4, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 4, 0], [0, 0, 4]]
)
assert np.all(b_out == [4, 0, 0, 0, 4, 4])
def test_padding():
"""
Test for checking the padding of a single
linear constraint set
"""
C_0 = np.eye(3)
C_out = cons.pad_constraints(C_0, 4, 9)
assert C_out.shape == (3, 9)
with pytest.raises(cons.PaddingError):
C_out = cons.pad_constraints(C_0, 4, 6)
def test_abstract_base_classes():
"""
Test for checking the abstract base classes
"""
with pytest.raises(TypeError):
c = cons.Constraint()
with pytest.raises(TypeError):
c = cons.Constraint2D()
def test_1D_constraint():
# Along first variable axis
p0 = (-1, 0)
p1 = (1, 0)
c = cons.Constraint1D(p0, p1)
C, b, n = c.constraints
assert np.all(C == [[0, 1], [1, 0], [-1, 0]])
assert np.all(b == [0, -1, -1])
assert n == 1
# Along second variable axis
p0 = (0, -1)
p1 = (0, 1)
c = cons.Constraint1D(p0, p1)
C, b, n = c.constraints
assert np.all(C == [[1, 0], [0, 1], [0, -1]])
assert np.all(b == [0, -1, -1])
assert n == 1
# Along equal variable axis
p0 = (-1, -1)
p1 = (1, 1)
c = cons.Constraint1D(p0, p1)
C, b, n = c.constraints
assert np.all(C == [[-1, 1], [1, 0], [0, 1], [-1, 0], [0, -1]])
assert np.all(b == [0, -1, -1, -1, -1])
assert n == 1
def test_circle_constraint():
c = cons.CircleConstraint(1, 4)
C, b, n = c.constraints
C_comp = np.array([[-1, 1], [-1, -1], [1, -1], [1, 1]], dtype=float)
assert np.allclose(C, C_comp)
assert np.all(b == [-1, -1, -1, -1])
assert n == 0
def test_sector_constraint():
c = cons.SectorConstraint(1, np.deg2rad(0), np.deg2rad(90), 1)
C, b, n = c.constraints
C_comp = np.array(
[
[1.00000000e00, -6.12323400e-17],
[-0.00000000e00, 1.00000000e00],
[-5.00000000e-01, -1.33974596e-01],
[-3.66025404e-01, -3.66025404e-01],
[-1.33974596e-01, -5.00000000e-01],
]
)
b_comp = np.array([-0.0, -0.0, -0.5, -0.5, -0.5])
assert np.allclose(C, C_comp)
assert np.allclose(b, b_comp)
assert n == 0
|
{"hexsha": "7297406175673b656ec7b0499765d40974a47d59", "size": 2899, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/constraint_test.py", "max_stars_repo_name": "freol35241/quota", "max_stars_repo_head_hexsha": "8fafbe10d474cd18d8d18d48b497ecfe7d786189", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-09-09T17:26:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-19T08:09:32.000Z", "max_issues_repo_path": "test/constraint_test.py", "max_issues_repo_name": "freol35241/quota", "max_issues_repo_head_hexsha": "8fafbe10d474cd18d8d18d48b497ecfe7d786189", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-05-12T18:41:25.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-23T07:42:53.000Z", "max_forks_repo_path": "test/constraint_test.py", "max_forks_repo_name": "freol35241/quta", "max_forks_repo_head_hexsha": "8fafbe10d474cd18d8d18d48b497ecfe7d786189", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5691056911, "max_line_length": 88, "alphanum_fraction": 0.5398413246, "include": true, "reason": "import numpy", "num_tokens": 1084}
|
import pandas as pd
import scipy as sp
from scipy.stats import t
import numpy as np
# CREDITS: this script is written by github user <glesserd> (https://github.com/glesserd)
# Originally available at <https://gist.github.com/glesserd/406519a4a79a49efb2353cfe05bcc6ee>
#from: http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_%28ggplot2%29/
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## conf_interval: the percent range of the confidence interval (default is 95%)
def summarySE(data, measurevar, groupvars, conf_interval=0.95):
def std(s):
return np.std(s, ddof=1)
def stde(s):
return std(s) / np.sqrt(len(s))
def ci(s):
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult = t.ppf(conf_interval/2.0 + .5, len(s)-1)
return stde(s)*ciMult
def ciUp(s):
return np.mean(s)+ci(s)
def ciDown(s):
return np.mean(s)-ci(s)
data = data[groupvars+measurevar].groupby(groupvars).agg([len, np.mean, std, stde, ciUp, ciDown, ci])
data.reset_index(inplace=True)
data.columns = groupvars+ ['_'.join(col).strip() for col in data.columns.values[len(groupvars):]]
return data
#from: http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_%28ggplot2%29/
## Norms the data within specified groups in a data frame; it normalizes each
## subject (identified by idvar) so that they have the same mean, within each group
## specified by betweenvars.
## data: a data frame.
## idvar: the name of a column that identifies each subject (or matched subjects)
## measurevar: the name of a column that contains the variable to be summariezed
## betweenvars: a vector containing names of columns that are between-subjects variables
def normDataWithin(data, idvar, measurevar, betweenvars=[]):
def std(s):
return np.std(s, ddof=1)
#temp = data[data.cond == "PC_IDLE"]
#temp = temp[idvar+betweenvars+measurevar]
#temp.columns = idvar+betweenvars + [x+"_PC_IDLE" for x in measurevar]
data_subjMean = data.groupby(idvar+betweenvars).agg([np.mean])
data_subjMean.reset_index(inplace=True)
data_subjMean.columns = idvar+betweenvars + ['_'.join(col).strip() for col in data_subjMean.columns.values[len(idvar+betweenvars):]]
data = pd.merge(data, data_subjMean, on=idvar+betweenvars)
#data = pd.merge(data, temp, on=idvar+betweenvars)
for obj in measurevar:
data[obj+"_norm"] = data[obj] - data[obj+"_mean"] + data[obj].mean()
#data[obj+"_norm"] = std(data[obj])/data[obj+"_std"]*(data[obj] - data[obj+"_mean"]) + data[obj].mean()
#data[obj+"_norm"] = data[obj] - data[obj+"_PC_IDLE"]
#del data[obj+"_mean"]
#del data[obj+"_std"]
return data
#from: http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_%28ggplot2%29/
## Summarizes data, handling within-subjects variables by removing inter-subject variability.
## It will still work if there are no within-S variables.
## Gives count, un-normed mean, normed mean (with same between-group mean),
## standard deviation, standard error of the mean, and confidence interval.
## If there are within-subject variables, calculate adjusted values using method from Morey (2008).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## betweenvars: a vector containing names of columns that are between-subjects variables
## withinvars: a vector containing names of columns that are within-subjects variables
## idvar: the name of a column that identifies each subject (or matched subjects)
## conf_interval: the percent range of the confidence interval (default is 95%)
def summarySEwithin(data, measurevar, betweenvars=[], withinvars=[], idvar=[], conf_interval=.95):
# Get the means from the un-normed data
datac = summarySE(data, measurevar, groupvars=betweenvars+withinvars, conf_interval=conf_interval)
for e in measurevar:
del datac[e+"_std"]
del datac[e+"_stde"]
del datac[e+"_ci"]
del datac[e+"_ciUp"]
del datac[e+"_ciDown"]
# Norm each subject's data
ndata = normDataWithin(data, idvar, measurevar, betweenvars)
# This is the name of the new columns
measurevar_n = [x+"_norm" for x in measurevar]+measurevar
# Collapse the normed data - now we can treat between and within vars the same
ndatac = summarySE(ndata, measurevar_n, groupvars=betweenvars+withinvars,
conf_interval=conf_interval)
# Apply correction from Morey (2008) to the standard error and confidence interval
# Get the product of the number of conditions of within-S variables
nWithinGroups = 1
for v in withinvars:
nWithinGroups = nWithinGroups*len(ndatac[v].unique())
correctionFactor = np.sqrt( nWithinGroups / (nWithinGroups-1) )
# Apply the correction factor
for m in measurevar_n:
ndatac[m+"_std"] = ndatac[m+"_std"] * correctionFactor
ndatac[m+"_stde"] = ndatac[m+"_stde"] * correctionFactor
ndatac[m+"_ci"] = ndatac[m+"_ci"] * correctionFactor
return ndatac
|
{"hexsha": "0137abbbd5da17dec089f2b2039f88e624ddfa82", "size": 5540, "ext": "py", "lang": "Python", "max_stars_repo_path": "summary.py", "max_stars_repo_name": "saurabhr/psypy", "max_stars_repo_head_hexsha": "65c5c4a4603dd310d1875c7f2247882e938bc19f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "summary.py", "max_issues_repo_name": "saurabhr/psypy", "max_issues_repo_head_hexsha": "65c5c4a4603dd310d1875c7f2247882e938bc19f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "summary.py", "max_forks_repo_name": "saurabhr/psypy", "max_forks_repo_head_hexsha": "65c5c4a4603dd310d1875c7f2247882e938bc19f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.7851239669, "max_line_length": 136, "alphanum_fraction": 0.6990974729, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1468}
|
[GOAL]
C : Type u₁
inst✝² : Category.{v, u₁} C
D : Type u₂
inst✝¹ : Category.{v, u₂} D
e : C ≌ D
inst✝ : WellPowered C
X : D
⊢ EssentiallySmall (MonoOver ((Equivalence.symm e).functor.obj X))
[PROOFSTEP]
infer_instance
|
{"mathlib_filename": "Mathlib.CategoryTheory.Subobject.WellPowered", "llama_tokens": 109}
|
[STATEMENT]
lemma cs_in_initial_state_implies_not_snapshotted:
assumes
"trace init t final" and
"snd (cs (S t i) cid) = NotStarted" and
"channel cid = Some (p, q)"
shows
"~ has_snapshotted (S t i) q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> ps (S t i) q \<noteq> None
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> \<not> ps (S t i) q \<noteq> None \<Longrightarrow> False
[PROOF STEP]
assume "~ ~ has_snapshotted (S t i) q"
[PROOF STATE]
proof (state)
this:
\<not> \<not> ps (S t i) q \<noteq> None
goal (1 subgoal):
1. \<not> \<not> ps (S t i) q \<noteq> None \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> \<not> ps (S t i) q \<noteq> None
[PROOF STEP]
obtain j where "j < i" "~ has_snapshotted (S t j) q" "has_snapshotted (S t (j+1)) q"
[PROOF STATE]
proof (prove)
using this:
\<not> \<not> ps (S t i) q \<noteq> None
goal (1 subgoal):
1. (\<And>j. \<lbrakk>j < i; \<not> ps (S t j) q \<noteq> None; ps (S t (j + 1)) q \<noteq> None\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis Suc_eq_plus1 assms(1) exists_snapshot_for_all_p computation.snapshot_stable_ver_3 computation_axioms nat_le_linear order_le_less)
[PROOF STATE]
proof (state)
this:
j < i
\<not> ps (S t j) q \<noteq> None
ps (S t (j + 1)) q \<noteq> None
goal (1 subgoal):
1. \<not> \<not> ps (S t i) q \<noteq> None \<Longrightarrow> False
[PROOF STEP]
have step_j: "(S t j) \<turnstile> (t ! j) \<mapsto> (S t (j+1))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. S t j \<turnstile> t ! j \<mapsto> S t (j + 1)
[PROOF STEP]
by (metis \<open>\<not> \<not> ps (S t i) q \<noteq> None\<close> \<open>\<not> ps (S t j) q \<noteq> None\<close> \<open>j < i\<close> add.commute assms(1) linorder_neqE_nat no_change_if_ge_length_t order_le_less order_refl plus_1_eq_Suc step_Suc)
[PROOF STATE]
proof (state)
this:
S t j \<turnstile> t ! j \<mapsto> S t (j + 1)
goal (1 subgoal):
1. \<not> \<not> ps (S t i) q \<noteq> None \<Longrightarrow> False
[PROOF STEP]
have tr_j_i: "trace (S t (j+1)) (take (i - (j+1)) (drop (j+1) t)) (S t i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. trace (S t (j + 1)) (take (i - (j + 1)) (drop (j + 1) t)) (S t i)
[PROOF STEP]
using \<open>j < i\<close> assms(1) exists_trace_for_any_i_j
[PROOF STATE]
proof (prove)
using this:
j < i
trace init t final
\<lbrakk>\<exists>c'. trace ?c ?t c'; ?i \<le> ?j\<rbrakk> \<Longrightarrow> trace (s ?c ?t ?i) (take (?j - ?i) (drop ?i ?t)) (s ?c ?t ?j)
goal (1 subgoal):
1. trace (S t (j + 1)) (take (i - (j + 1)) (drop (j + 1) t)) (S t i)
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
trace (S t (j + 1)) (take (i - (j + 1)) (drop (j + 1) t)) (S t i)
goal (1 subgoal):
1. \<not> \<not> ps (S t i) q \<noteq> None \<Longrightarrow> False
[PROOF STEP]
have "~ regular_event (t ! j)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> regular_event (t ! j)
[PROOF STEP]
using step_j \<open>\<not> ps (S t j) q \<noteq> None\<close> \<open>ps (S t (j + 1)) q \<noteq> None\<close> regular_event_cannot_induce_snapshot
[PROOF STATE]
proof (prove)
using this:
S t j \<turnstile> t ! j \<mapsto> S t (j + 1)
\<not> ps (S t j) q \<noteq> None
ps (S t (j + 1)) q \<noteq> None
\<lbrakk>\<not> ps ?c ?p \<noteq> None; ?c \<turnstile> ?ev \<mapsto> ?c'\<rbrakk> \<Longrightarrow> regular_event ?ev \<longrightarrow> \<not> ps ?c' ?p \<noteq> None
goal (1 subgoal):
1. \<not> regular_event (t ! j)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<not> regular_event (t ! j)
goal (1 subgoal):
1. \<not> \<not> ps (S t i) q \<noteq> None \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> regular_event (t ! j)
[PROOF STEP]
have "isSnapshot (t ! j) \<or> isRecvMarker (t ! j)"
[PROOF STATE]
proof (prove)
using this:
\<not> regular_event (t ! j)
goal (1 subgoal):
1. isSnapshot (t ! j) \<or> isRecvMarker (t ! j)
[PROOF STEP]
using nonregular_event
[PROOF STATE]
proof (prove)
using this:
\<not> regular_event (t ! j)
regular_event ?ev \<noteq> (isSnapshot ?ev \<or> isRecvMarker ?ev)
goal (1 subgoal):
1. isSnapshot (t ! j) \<or> isRecvMarker (t ! j)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
isSnapshot (t ! j) \<or> isRecvMarker (t ! j)
goal (1 subgoal):
1. \<not> \<not> ps (S t i) q \<noteq> None \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
isSnapshot (t ! j) \<or> isRecvMarker (t ! j)
[PROOF STEP]
have "snd (cs (S t (j+1)) cid) \<noteq> NotStarted"
[PROOF STATE]
proof (prove)
using this:
isSnapshot (t ! j) \<or> isRecvMarker (t ! j)
goal (1 subgoal):
1. snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
proof (elim disjE, goal_cases)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. isSnapshot (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
2. isRecvMarker (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
case 1
[PROOF STATE]
proof (state)
this:
isSnapshot (t ! j)
goal (2 subgoals):
1. isSnapshot (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
2. isRecvMarker (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
have "occurs_on (t ! j) = q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. occurs_on (t ! j) = q
[PROOF STEP]
using \<open>\<not> ps (S t j) q \<noteq> None\<close> \<open>ps (S t (j + 1)) q \<noteq> None\<close> distributed_system.no_state_change_if_no_event distributed_system_axioms step_j
[PROOF STATE]
proof (prove)
using this:
\<not> ps (S t j) q \<noteq> None
ps (S t (j + 1)) q \<noteq> None
\<lbrakk>distributed_system ?channel; distributed_system.next ?channel ?trans ?send ?recv ?c ?ev ?c'; occurs_on ?ev \<noteq> ?p\<rbrakk> \<Longrightarrow> states ?c ?p = states ?c' ?p \<and> ps ?c ?p = ps ?c' ?p
distributed_system channel
S t j \<turnstile> t ! j \<mapsto> S t (j + 1)
goal (1 subgoal):
1. occurs_on (t ! j) = q
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
occurs_on (t ! j) = q
goal (2 subgoals):
1. isSnapshot (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
2. isRecvMarker (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
with 1
[PROOF STATE]
proof (chain)
picking this:
isSnapshot (t ! j)
occurs_on (t ! j) = q
[PROOF STEP]
have "t ! j = Snapshot q"
[PROOF STATE]
proof (prove)
using this:
isSnapshot (t ! j)
occurs_on (t ! j) = q
goal (1 subgoal):
1. t ! j = Snapshot q
[PROOF STEP]
using isSnapshot_def
[PROOF STATE]
proof (prove)
using this:
isSnapshot (t ! j)
occurs_on (t ! j) = q
isSnapshot ?event = (\<exists>x4. ?event = Snapshot x4)
goal (1 subgoal):
1. t ! j = Snapshot q
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t ! j = Snapshot q
goal (2 subgoals):
1. isSnapshot (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
2. isRecvMarker (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t ! j = Snapshot q
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
t ! j = Snapshot q
goal (1 subgoal):
1. snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
using step_j assms
[PROOF STATE]
proof (prove)
using this:
t ! j = Snapshot q
S t j \<turnstile> t ! j \<mapsto> S t (j + 1)
trace init t final
snd (cs (S t i) cid) = NotStarted
channel cid = Some (p, q)
goal (1 subgoal):
1. snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
goal (1 subgoal):
1. isRecvMarker (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. isRecvMarker (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
case 2
[PROOF STATE]
proof (state)
this:
isRecvMarker (t ! j)
goal (1 subgoal):
1. isRecvMarker (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
have "occurs_on (t ! j) = q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. occurs_on (t ! j) = q
[PROOF STEP]
using \<open>\<not> ps (S t j) q \<noteq> None\<close> \<open>ps (S t (j + 1)) q \<noteq> None\<close> distributed_system.no_state_change_if_no_event distributed_system_axioms step_j
[PROOF STATE]
proof (prove)
using this:
\<not> ps (S t j) q \<noteq> None
ps (S t (j + 1)) q \<noteq> None
\<lbrakk>distributed_system ?channel; distributed_system.next ?channel ?trans ?send ?recv ?c ?ev ?c'; occurs_on ?ev \<noteq> ?p\<rbrakk> \<Longrightarrow> states ?c ?p = states ?c' ?p \<and> ps ?c ?p = ps ?c' ?p
distributed_system channel
S t j \<turnstile> t ! j \<mapsto> S t (j + 1)
goal (1 subgoal):
1. occurs_on (t ! j) = q
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
occurs_on (t ! j) = q
goal (1 subgoal):
1. isRecvMarker (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
with 2
[PROOF STATE]
proof (chain)
picking this:
isRecvMarker (t ! j)
occurs_on (t ! j) = q
[PROOF STEP]
obtain cid' s where RecvMarker: "t ! j = RecvMarker cid' q s"
[PROOF STATE]
proof (prove)
using this:
isRecvMarker (t ! j)
occurs_on (t ! j) = q
goal (1 subgoal):
1. (\<And>cid' s. t ! j = RecvMarker cid' q s \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis event.collapse(5))
[PROOF STATE]
proof (state)
this:
t ! j = RecvMarker cid' q s
goal (1 subgoal):
1. isRecvMarker (t ! j) \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t ! j = RecvMarker cid' q s
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
t ! j = RecvMarker cid' q s
goal (1 subgoal):
1. snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
proof (cases "cid' = cid")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>t ! j = RecvMarker cid' q s; cid' = cid\<rbrakk> \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
2. \<lbrakk>t ! j = RecvMarker cid' q s; cid' \<noteq> cid\<rbrakk> \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
cid' = cid
goal (2 subgoals):
1. \<lbrakk>t ! j = RecvMarker cid' q s; cid' = cid\<rbrakk> \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
2. \<lbrakk>t ! j = RecvMarker cid' q s; cid' \<noteq> cid\<rbrakk> \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
cid' = cid
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
cid' = cid
goal (1 subgoal):
1. snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
using RecvMarker step_j assms
[PROOF STATE]
proof (prove)
using this:
cid' = cid
t ! j = RecvMarker cid' q s
S t j \<turnstile> t ! j \<mapsto> S t (j + 1)
trace init t final
snd (cs (S t i) cid) = NotStarted
channel cid = Some (p, q)
goal (1 subgoal):
1. snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
goal (1 subgoal):
1. \<lbrakk>t ! j = RecvMarker cid' q s; cid' \<noteq> cid\<rbrakk> \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>t ! j = RecvMarker cid' q s; cid' \<noteq> cid\<rbrakk> \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
cid' \<noteq> cid
goal (1 subgoal):
1. \<lbrakk>t ! j = RecvMarker cid' q s; cid' \<noteq> cid\<rbrakk> \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
have "~ has_snapshotted (S t j) q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> ps (S t j) q \<noteq> None
[PROOF STEP]
using \<open>\<not> ps (S t j) q \<noteq> None\<close>
[PROOF STATE]
proof (prove)
using this:
\<not> ps (S t j) q \<noteq> None
goal (1 subgoal):
1. \<not> ps (S t j) q \<noteq> None
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<not> ps (S t j) q \<noteq> None
goal (1 subgoal):
1. \<lbrakk>t ! j = RecvMarker cid' q s; cid' \<noteq> cid\<rbrakk> \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<not> ps (S t j) q \<noteq> None
goal (1 subgoal):
1. \<lbrakk>t ! j = RecvMarker cid' q s; cid' \<noteq> cid\<rbrakk> \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
have "\<exists>r. channel cid = Some (r, q)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>r. channel cid = Some (r, q)
[PROOF STEP]
by (simp add: assms(3))
[PROOF STATE]
proof (state)
this:
\<exists>r. channel cid = Some (r, q)
goal (1 subgoal):
1. \<lbrakk>t ! j = RecvMarker cid' q s; cid' \<noteq> cid\<rbrakk> \<Longrightarrow> snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<not> ps (S t j) q \<noteq> None
\<exists>r. channel cid = Some (r, q)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<not> ps (S t j) q \<noteq> None
\<exists>r. channel cid = Some (r, q)
goal (1 subgoal):
1. snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
using RecvMarker step_j assms False
[PROOF STATE]
proof (prove)
using this:
\<not> ps (S t j) q \<noteq> None
\<exists>r. channel cid = Some (r, q)
t ! j = RecvMarker cid' q s
S t j \<turnstile> t ! j \<mapsto> S t (j + 1)
trace init t final
snd (cs (S t i) cid) = NotStarted
channel cid = Some (p, q)
cid' \<noteq> cid
goal (1 subgoal):
1. snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
goal (1 subgoal):
1. \<not> \<not> ps (S t i) q \<noteq> None \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
[PROOF STEP]
have "snd (cs (S t i) cid) \<noteq> NotStarted"
[PROOF STATE]
proof (prove)
using this:
snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
goal (1 subgoal):
1. snd (cs (S t i) cid) \<noteq> NotStarted
[PROOF STEP]
using tr_j_i cs_not_not_started_stable_trace assms
[PROOF STATE]
proof (prove)
using this:
snd (cs (S t (j + 1)) cid) \<noteq> NotStarted
trace (S t (j + 1)) (take (i - (j + 1)) (drop (j + 1) t)) (S t i)
\<lbrakk>trace ?c ?t ?c'; snd (cs ?c ?cid) \<noteq> NotStarted; channel ?cid = Some (?p, ?q)\<rbrakk> \<Longrightarrow> snd (cs ?c' ?cid) \<noteq> NotStarted
trace init t final
snd (cs (S t i) cid) = NotStarted
channel cid = Some (p, q)
goal (1 subgoal):
1. snd (cs (S t i) cid) \<noteq> NotStarted
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
snd (cs (S t i) cid) \<noteq> NotStarted
goal (1 subgoal):
1. \<not> \<not> ps (S t i) q \<noteq> None \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
snd (cs (S t i) cid) \<noteq> NotStarted
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
snd (cs (S t i) cid) \<noteq> NotStarted
goal (1 subgoal):
1. False
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
snd (cs (S t i) cid) \<noteq> NotStarted
trace init t final
snd (cs (S t i) cid) = NotStarted
channel cid = Some (p, q)
goal (1 subgoal):
1. False
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 7093, "file": "Chandy_Lamport_Snapshot", "length": 71}
|
#=
Copyright (c) 2018-2022 Chris Coey, Lea Kapelevich, and contributors
This Julia package Hypatia.jl is released under the MIT license; see LICENSE
file in the root directory or at https://github.com/chriscoey/Hypatia.jl
see description in native.jl
=#
using SparseArrays
struct MatrixCompletionJuMP{T <: Real} <: ExampleInstanceJuMP{T}
size_ratio::Int
num_rows::Int
end
function build(inst::MatrixCompletionJuMP{T}) where {T <: Float64}
(size_ratio, num_rows) = (inst.size_ratio, inst.num_rows)
@assert size_ratio >= 1
num_cols = size_ratio * num_rows
(rows, cols, Avals) = findnz(sprandn(num_rows, num_cols, 0.8))
is_known = vec(Matrix(sparse(rows, cols,
trues(length(Avals)), num_rows, num_cols)))
model = JuMP.Model()
JuMP.@variable(model, X[1:length(is_known)])
JuMP.@variable(model, t)
JuMP.@objective(model, Min, t)
JuMP.@constraint(model, vcat(t, X) in
MOI.NormSpectralCone(num_rows, num_cols))
X_unknown = X[.!is_known]
JuMP.@constraint(model, vcat(1, X_unknown) in
MOI.GeometricMeanCone(1 + length(X_unknown)))
JuMP.@constraint(model, X[is_known] .== Avals)
return model
end
|
{"hexsha": "ffd1628391a3ae8dd2e6bf56224b956636c7e55f", "size": 1182, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/matrixcompletion/JuMP.jl", "max_stars_repo_name": "chriscoey/2021.0177-1", "max_stars_repo_head_hexsha": "a3f6258d332d2e2edc9b0e3fcd1ae9614ea9499f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-08T04:35:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T02:56:17.000Z", "max_issues_repo_path": "examples/matrixcompletion/JuMP.jl", "max_issues_repo_name": "chriscoey/2021.0177-1", "max_issues_repo_head_hexsha": "a3f6258d332d2e2edc9b0e3fcd1ae9614ea9499f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-07T20:41:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T20:41:44.000Z", "max_forks_repo_path": "examples/matrixcompletion/JuMP.jl", "max_forks_repo_name": "chriscoey/2021.0177-1", "max_forks_repo_head_hexsha": "a3f6258d332d2e2edc9b0e3fcd1ae9614ea9499f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-08T00:07:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T01:48:01.000Z", "avg_line_length": 30.3076923077, "max_line_length": 76, "alphanum_fraction": 0.6979695431, "num_tokens": 351}
|
import pickle
import pandas as pd
import quandl
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
from statistics import mean
style.use("seaborn-dark-palette")
ax1 = plt.subplot(2, 1, 1)
ax2 = plt.subplot(2, 1, 2, sharex=ax1)
def create_labels(cur_hpi, fut_hpi):
if fut_hpi > cur_hpi:
return 1
else:
return 0
def moving_average(values):
return mean(values)
benchmark = pd.read_pickle(
"us_pct.pickle"
) # us overall housing price index percentage change
HPI = pd.read_pickle(
"HPI_complete.pickle"
) # all of the state data, thirty year mortgage, unemployment rate, GDP, SP500
HPI = HPI.join(benchmark["United States"])
# all in percentage change since the start of the data (1975-01-01)
HPI.dropna(inplace=True)
housing_pct = HPI.pct_change()
housing_pct.replace([np.inf, -np.inf], np.nan, inplace=True)
housing_pct["US_HPI_future"] = housing_pct["United States"].shift(-1)
housing_pct.dropna(inplace=True)
housing_pct["label"] = list(
map(create_labels, housing_pct["United States"], housing_pct["US_HPI_future"])
)
# housing_pct['ma_apply_example'] = pd.rolling_apply(housing_pct['M30'], 10, moving_average)
housing_pct["ma_apply_example"] = (
housing_pct["M30"].rolling(window=10).apply(moving_average)
)
print(housing_pct.tail())
# state_HPI_M30 = HPI_data.join(HPI['M30']) # fifty states plus mortgage data
# print(state_HPI_M30.corr().describe().tail())
|
{"hexsha": "49bd0e0f8c763549f8f8d1210c2819c614f4fa2f", "size": 1451, "ext": "py", "lang": "Python", "max_stars_repo_path": "sentdex_data_analysis/pandas_mappingFunctions.py", "max_stars_repo_name": "yull1860outlook/Data-Analysis", "max_stars_repo_head_hexsha": "b777d9a75eb1acc4c899946d547e5585469a83ae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4358, "max_stars_repo_stars_event_min_datetime": "2017-12-29T17:56:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T15:14:57.000Z", "max_issues_repo_path": "sentdex_data_analysis/pandas_mappingFunctions.py", "max_issues_repo_name": "MarwanAmr509/Data-Analysis", "max_issues_repo_head_hexsha": "34a48f16f6be757ed3f35cb3fc458569023c9bd8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 61, "max_issues_repo_issues_event_min_datetime": "2018-01-18T17:50:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T20:16:01.000Z", "max_forks_repo_path": "sentdex_data_analysis/pandas_mappingFunctions.py", "max_forks_repo_name": "MarwanAmr509/Data-Analysis", "max_forks_repo_head_hexsha": "34a48f16f6be757ed3f35cb3fc458569023c9bd8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3689, "max_forks_repo_forks_event_min_datetime": "2017-12-29T17:57:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T12:26:03.000Z", "avg_line_length": 26.3818181818, "max_line_length": 92, "alphanum_fraction": 0.7325982081, "include": true, "reason": "import numpy", "num_tokens": 393}
|
import awkward as ak
import numpy as np
import fastjet._ext # noqa: F401, E402
class _classsingleevent:
def __init__(self, data, jetdef):
self.jetdef = jetdef
self.data = self.single_to_jagged(data)
px, py, pz, E, offsets = self.extract_cons(self.data)
px = self.correct_byteorder(px)
py = self.correct_byteorder(py)
pz = self.correct_byteorder(pz)
E = self.correct_byteorder(E)
offsets = self.correct_byteorder(offsets)
self._results = fastjet._ext.interfacemulti(px, py, pz, E, offsets, jetdef)
def correct_byteorder(self, data):
if data.dtype.byteorder == "=":
pass
else:
data = data.dtype.newbyteorder("=")
return data
def check_jaggedness(self, data):
if isinstance(data.layout, ak.layout.ListOffsetArray64):
return 1 + self.check_jaggedness(ak.Array(data.layout.content))
else:
return 0
def extract_cons(self, array):
px = np.asarray(ak.Array(array.layout.content, behavior=array.behavior).px)
py = np.asarray(ak.Array(array.layout.content, behavior=array.behavior).py)
pz = np.asarray(ak.Array(array.layout.content, behavior=array.behavior).pz)
E = np.asarray(ak.Array(array.layout.content, behavior=array.behavior).E)
off = np.asarray(array.layout.stops)
off = np.insert(off, 0, 0)
return px, py, pz, E, off
def _check_record(self, data):
out = isinstance(
data.layout,
(
ak.layout.RecordArray,
ak.layout.NumpyArray,
),
)
return out
def single_to_jagged(self, array):
single = ak.Array(
ak.layout.ListOffsetArray64(
ak.layout.Index64(np.array([0, len(array)])),
ak.layout.RecordArray(
[
ak.layout.NumpyArray(array.px),
ak.layout.NumpyArray(array.py),
ak.layout.NumpyArray(array.pz),
ak.layout.NumpyArray(array.E),
],
["px", "py", "pz", "E"],
parameters={"__record__": "Momentum4D"},
),
)
)
return single
def inclusive_jets(self, min_pt):
np_results = self._results.to_numpy(min_pt)
return ak.Array(
ak.layout.RecordArray(
(
ak.layout.NumpyArray(np_results[0]),
ak.layout.NumpyArray(np_results[1]),
ak.layout.NumpyArray(np_results[2]),
ak.layout.NumpyArray(np_results[3]),
),
("px", "py", "pz", "E"),
parameters={"__record__": "Momentum4D"},
),
behavior=self.data.behavior,
)
def unclustered_particles(self):
np_results = self._results.to_numpy_unclustered_particles()
return ak.Array(
ak.layout.RecordArray(
(
ak.layout.NumpyArray(np_results[0]),
ak.layout.NumpyArray(np_results[1]),
ak.layout.NumpyArray(np_results[2]),
ak.layout.NumpyArray(np_results[3]),
),
("px", "py", "pz", "E"),
parameters={"__record__": "Momentum4D"},
),
behavior=self.data.behavior,
)
def exclusive_jets(self, n_jets, dcut):
np_results = 0
if n_jets == 0:
raise ValueError("Njets cannot be 0") from None
if dcut == -1 and n_jets != -1:
np_results = self._results.to_numpy_exclusive_njet(n_jets)
if n_jets == -1 and dcut != -1:
np_results = self._results.to_numpy_exclusive_dcut(dcut)
if np_results == 0:
raise ValueError("Either Dcut or Njets should be entered") from None
return ak.Array(
ak.layout.RecordArray(
(
ak.layout.NumpyArray(np_results[0]),
ak.layout.NumpyArray(np_results[1]),
ak.layout.NumpyArray(np_results[2]),
ak.layout.NumpyArray(np_results[3]),
),
("px", "py", "pz", "E"),
parameters={"__record__": "Momentum4D"},
),
behavior=self.data.behavior,
)
def exclusive_jets_ycut(self, ycut):
np_results = self._results.to_numpy_exclusive_ycut(ycut)
return ak.Array(
ak.layout.RecordArray(
(
ak.layout.NumpyArray(np_results[0]),
ak.layout.NumpyArray(np_results[1]),
ak.layout.NumpyArray(np_results[2]),
ak.layout.NumpyArray(np_results[3]),
),
("px", "py", "pz", "E"),
parameters={"__record__": "Momentum4D"},
),
behavior=self.data.behavior,
)
def constituent_index(self, min_pt):
np_results = self._results.to_numpy_with_constituents(min_pt)
off = np.insert(np_results[-1], 0, 0)
out = ak.Array(
ak.layout.ListOffsetArray64(
ak.layout.Index64(np_results[0]), ak.layout.NumpyArray(np_results[1])
)
)
out = ak.Array(ak.layout.ListOffsetArray64(ak.layout.Index64(off), out.layout))
return out[0]
def unique_history_order(self):
np_results = self._results.to_numpy_unique_history_order()
out = ak.Array(ak.layout.NumpyArray(np_results[0]))
return out
def constituents(self, min_pt):
np_results = self._results.to_numpy_with_constituents(min_pt)
off = np.insert(np_results[-1], 0, 0)
out = ak.Array(
ak.layout.ListOffsetArray64(
ak.layout.Index64(np_results[0]), ak.layout.NumpyArray(np_results[1])
)
)
outputs_to_inputs = ak.Array(
ak.layout.ListOffsetArray64(ak.layout.Index64(off), out.layout)
)
shape = ak.num(outputs_to_inputs)
total = np.sum(shape)
duplicate = ak.unflatten(np.zeros(total, np.int64), shape)
prepared = self.data[:, np.newaxis][duplicate]
return prepared[outputs_to_inputs][0]
def exclusive_dmerge(self, njets):
np_results = self._results.to_numpy_exclusive_dmerge(njets)
out = np_results[0]
out = out[0]
return out
def exclusive_dmerge_max(self, njets):
np_results = self._results.to_numpy_exclusive_dmerge_max(njets)
out = np_results[0]
out = out[0]
return out
def exclusive_ymerge_max(self, njets):
np_results = self._results.to_numpy_exclusive_ymerge_max(njets)
out = np_results[0]
out = out[0]
return out
def exclusive_ymerge(self, njets):
np_results = self._results.to_numpy_exclusive_ymerge(njets)
out = np_results[0]
out = out[0]
return out
def Q(self):
np_results = self._results.to_numpy_q()
out = np_results[0]
out = out[0]
return out
def Q2(self):
np_results = self._results.to_numpy_q2()
out = np_results[0]
out = out[0]
return out
def exclusive_subjets(self, data, dcut, nsub):
try:
px = data.px
py = data.py
pz = data.pz
E = data.E
except AttributeError:
raise AttributeError("Lorentz vector not found") from None
np_results = 0
if nsub == 0:
raise ValueError("Nsub cannot be 0")
if dcut == -1 and nsub != -1:
np_results = self._results.to_numpy_exclusive_subjets_nsub(
px, py, pz, E, nsub
)
if nsub == -1 and dcut != -1:
np_results = self._results.to_numpy_exclusive_subjets_dcut(
px, py, pz, E, dcut
)
if np_results == 0:
raise ValueError("Either Dcut or Njets should be entered") from None
return ak.Array(
ak.layout.RecordArray(
[
ak.layout.NumpyArray(np_results[0]),
ak.layout.NumpyArray(np_results[1]),
ak.layout.NumpyArray(np_results[2]),
ak.layout.NumpyArray(np_results[3]),
],
["px", "py", "pz", "E"],
parameters={"__record__": "Momentum4D"},
),
behavior=self.data.behavior,
)
def exclusive_subjets_up_to(self, data, nsub):
try:
px = data.px
py = data.py
pz = data.pz
E = data.E
except AttributeError:
raise AttributeError("Lorentz vector not found") from None
np_results = self._results.to_numpy_exclusive_subjets_up_to(px, py, pz, E, nsub)
return ak.Array(
ak.layout.RecordArray(
[
ak.layout.NumpyArray(np_results[0]),
ak.layout.NumpyArray(np_results[1]),
ak.layout.NumpyArray(np_results[2]),
ak.layout.NumpyArray(np_results[3]),
],
["px", "py", "pz", "E"],
parameters={"__record__": "Momentum4D"},
),
behavior=self.data.behavior,
)
def exclusive_subdmerge(self, data, nsub):
try:
px = data.px
py = data.py
pz = data.pz
E = data.E
except AttributeError:
raise AttributeError("Lorentz vector not found") from None
np_results = self._results.to_numpy_exclusive_subdmerge(px, py, pz, E, nsub)
out = np_results[0]
out = out[0]
return out
def exclusive_subdmerge_max(self, data, nsub):
try:
px = data.px
py = data.py
pz = data.pz
E = data.E
except AttributeError:
raise AttributeError("Lorentz vector not found") from None
np_results = self._results.to_numpy_exclusive_subdmerge_max(px, py, pz, E, nsub)
out = np_results[0]
out = out[0]
return out
def n_exclusive_subjets(self, data, dcut):
try:
px = data.px
py = data.py
pz = data.pz
E = data.E
except AttributeError:
raise AttributeError("Lorentz vector not found") from None
np_results = self._results.to_numpy_n_exclusive_subjets(px, py, pz, E, dcut)
out = np_results[0]
out = out[0]
return out
def has_parents(self, data):
try:
px = data.px
py = data.py
pz = data.pz
E = data.E
except AttributeError:
raise AttributeError("Lorentz vector not found") from None
np_results = self._results.to_numpy_has_parents(px, py, pz, E)
out = np_results[0]
out = out[0]
return out
def has_child(self, data):
try:
px = data.px
py = data.py
pz = data.pz
E = data.E
except AttributeError:
raise AttributeError("Lorentz vector not found") from None
np_results = self._results.to_numpy_has_child(px, py, pz, E)
out = np_results[0]
out = out[0]
return out
def jet_scale_for_algorithm(self, data):
try:
px = data.px
py = data.py
pz = data.pz
E = data.E
except AttributeError:
raise AttributeError("Lorentz vector not found") from None
np_results = self._results.to_numpy_jet_scale_for_algorithm(px, py, pz, E)
out = np_results[0]
out = out[0]
return out
def n_particles(self):
np_results = self._results.to_numpy_n_particles()
out = np_results[0]
out = out[0]
return out
def n_exclusive_jets(self, dcut):
np_results = self._results.to_numpy_n_exclusive_jets(dcut)
out = np_results[0]
out = out[0]
return out
def childless_pseudojets(self):
np_results = self._results.to_numpy_childless_pseudojets()
return ak.Array(
ak.layout.RecordArray(
(
ak.layout.NumpyArray(np_results[0]),
ak.layout.NumpyArray(np_results[1]),
ak.layout.NumpyArray(np_results[2]),
ak.layout.NumpyArray(np_results[3]),
),
("px", "py", "pz", "E"),
parameters={"__record__": "Momentum4D"},
),
behavior=self.data.behavior,
)
def jets(self):
np_results = self._results.to_numpy_jets()
return ak.Array(
ak.layout.RecordArray(
(
ak.layout.NumpyArray(np_results[0]),
ak.layout.NumpyArray(np_results[1]),
ak.layout.NumpyArray(np_results[2]),
ak.layout.NumpyArray(np_results[3]),
),
("px", "py", "pz", "E"),
parameters={"__record__": "Momentum4D"},
),
behavior=self.data.behavior,
)
def get_parents(self, data):
try:
px = data.px
py = data.py
pz = data.pz
E = data.E
except AttributeError:
raise AttributeError("Lorentz vector not found") from None
np_results = self._results.to_numpy_get_parents(px, py, pz, E)
return ak.Array(
ak.layout.RecordArray(
(
ak.layout.NumpyArray(np_results[0]),
ak.layout.NumpyArray(np_results[1]),
ak.layout.NumpyArray(np_results[2]),
ak.layout.NumpyArray(np_results[3]),
),
("px", "py", "pz", "E"),
parameters={"__record__": "Momentum4D"},
),
behavior=self.data.behavior,
)
def get_child(self, data):
try:
px = data.px
py = data.py
pz = data.pz
E = data.E
except AttributeError:
raise AttributeError("Lorentz vector not found") from None
np_results = self._results.to_numpy_get_child(px, py, pz, E)
return ak.Array(
ak.layout.RecordArray(
(
ak.layout.NumpyArray(np_results[0]),
ak.layout.NumpyArray(np_results[1]),
ak.layout.NumpyArray(np_results[2]),
ak.layout.NumpyArray(np_results[3]),
),
("px", "py", "pz", "E"),
parameters={"__record__": "Momentum4D"},
),
behavior=self.data.behavior,
)
|
{"hexsha": "4fe2b3139e68f9c9004d147ba2d528b75651d323", "size": 15014, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/fastjet/_singleevent.py", "max_stars_repo_name": "scikit-hep/fastjet", "max_stars_repo_head_hexsha": "e5aebdc66167400472cd29a36f4af0f2a789a992", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-04-18T07:00:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T16:07:21.000Z", "max_issues_repo_path": "src/fastjet/_singleevent.py", "max_issues_repo_name": "scikit-hep/fastjet", "max_issues_repo_head_hexsha": "e5aebdc66167400472cd29a36f4af0f2a789a992", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2021-04-15T16:35:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T20:38:57.000Z", "max_forks_repo_path": "src/fastjet/_singleevent.py", "max_forks_repo_name": "scikit-hep/fastjet", "max_forks_repo_head_hexsha": "e5aebdc66167400472cd29a36f4af0f2a789a992", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-07-26T23:12:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-24T14:57:27.000Z", "avg_line_length": 34.5149425287, "max_line_length": 88, "alphanum_fraction": 0.5255095244, "include": true, "reason": "import numpy", "num_tokens": 3397}
|
program big_integer
!! this gets to 20! with int64 or 12! with int32
use, intrinsic :: iso_fortran_env, only : int64, real128
implicit none (type, external)
integer(int64) :: n, i, fac
integer :: ios
character(2) :: argv
n = 10
call get_command_argument(1, argv, status=ios)
if (ios==0) read(argv,'(i2)') n
if (n<0) error stop 'N >= 0 for factorial(N)'
fac = 1
do i = 1, n
fac = fac * i
end do
! fac = gamma(real(n, real128))
print *, 'factorial',n,'=',fac
end program
|
{"hexsha": "d65522ab61bd551eda4fb5df2cfbb448d344e3b8", "size": 478, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "standard/big_integer.f90", "max_stars_repo_name": "supershushu/fortran2018-examples", "max_stars_repo_head_hexsha": "f0dc03b80326bc7c06fa31945b6e7406a60c1fa8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 305, "max_stars_repo_stars_event_min_datetime": "2017-12-07T12:47:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T12:03:16.000Z", "max_issues_repo_path": "src/standard/big_integer.f90", "max_issues_repo_name": "scivision/fortran2015-examples", "max_issues_repo_head_hexsha": "23fc7090997ecb4b838ebc1f09b86e2872d7141c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-11-24T15:45:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-06T08:10:43.000Z", "max_forks_repo_path": "src/standard/big_integer.f90", "max_forks_repo_name": "scivision/fortran2015-examples", "max_forks_repo_head_hexsha": "23fc7090997ecb4b838ebc1f09b86e2872d7141c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 60, "max_forks_repo_forks_event_min_datetime": "2017-11-28T07:56:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T01:37:53.000Z", "avg_line_length": 18.3846153846, "max_line_length": 56, "alphanum_fraction": 0.6589958159, "num_tokens": 167}
|
#include "platform/i_platform.h"
#include "network/client_system.h"
#include <boost/timer.hpp>
#include "core/program_state.h"
#include "messsage_holder.h"
#include <portable_oarchive.hpp>
#include <iosfwd>
#include "my_name_message.h"
#include "engine/engine.h"
#include "main/window.h"
#include "platform/settings.h"
namespace network {
ClientSystem::ClientSystem()
: mClient( NULL )
, mPeer( NULL )
, mClientModel( "client", &RootModel::Get() )
, mConnectModel( VoidFunc( this, &ClientSystem::Connect ), "connect", &mClientModel )
, mMessageHolder( MessageHolder::Get() )
, mProgramState( ProgramState::Get() )
, mRunning( false )
, mThreaded( false )
, mWaitMillisecs( 10 )
{
mOnPhaseChanged = EventServer<PhaseChangedEvent>::Get().Subscribe( boost::bind( &ClientSystem::OnPhaseChanged, this, _1 ) );
}
namespace {
class Timer_t
{
::boost::timer mMeasurer;
double mPrevMeasurement;
public:
void Log( std::string const& Str = std::string() )
{
double Now = mMeasurer.elapsed();
L2( "Timer - %s: %f %f\n", Str.c_str(), Now, Now - mPrevMeasurement );
mPrevMeasurement = Now;
}
Timer_t(): mMeasurer(), mPrevMeasurement( mMeasurer.elapsed() )
{
Log( "timer init" );
}
} PerfTimer;
}
void ClientSystem::Init()
{
if ( enet_initialize () != 0 )
{
L1 ( "An error occurred while initializing ENet.\n" );
}
atexit ( enet_deinitialize );
Connect();
mThreaded = Settings::Get().GetBool( "network.threaded", true );
mRunning = true;
mWaitMillisecs = Settings::Get().GetInt( "network.wait_millisec", 10 );
if (mThreaded)
{
mThread = std::thread(boost::bind(&ClientSystem::UpdateThread,this));
}
}
void ClientSystem::Update( double DeltaTime )
{
PerfTimer.Log( "client update started" );
if ( !mProgramState.mClientConnected )
{
PerfTimer.Log( "client not connected, client update ended" );
return;
}
if (mThreaded)
{
std::lock_guard<std::mutex> lck( mMessageHolder.GetOutgoingMessages().GetMutex() );
mMessageHolder.GetOutgoingMessages().Publish();
mMessageHolder.GetOutgoingMessages().GetCV().notify_all();
}
else
{
ReceiveMessages();
mMessageHolder.GetOutgoingMessages().Publish();
SendMessages();
}
PerfTimer.Log( "client update ended" );
}
void ClientSystem::Connect()
{
if ( mProgramState.mClientConnected )
{
L1( "Already connected, won't try it again!\n" );
return; //Really wont try again
}
// ENetAddress address2;
// address2.host = ENET_HOST_ANY;
// /* Bind the server to port 1234. */
// address2.port = 1236;
mClient = enet_host_create ( NULL /* create a client host */,
1 /* only allow 1 outgoing connection */,
2 /* allow up 2 channels to be used, 0 and 1 */,
0 ,
0 );
if ( mClient == NULL )
{
L1( "An error occurred while trying to create an ENet client host.\n" );
exit ( EXIT_FAILURE );
}
ENetAddress address;
ENetEvent event;
/* Connect to some.server.net:1234. */
address.port = 1234;
enet_address_set_host ( & address, core::ProgramState::Get().mServerIp.c_str() ); //core::ProgramState::Get().mServerIp.c_str());
/* Initiate the connection, allocating the two channels 0 and 1. */
mPeer = enet_host_connect ( mClient, & address, 2, 0 );
if ( mPeer == NULL )
{
L1( "No available peers for initiating an ENet connection.\n" );
exit ( EXIT_FAILURE );
}
bool connectSuccess = false;
for( size_t i = 0; i < 500; ++i )
{
if ( enet_host_service ( mClient, & event, 10 ) > 0 &&
event.type == ENET_EVENT_TYPE_CONNECT )
{
connectSuccess = true;
break;
}
}
enet_host_flush( mClient );
if( !connectSuccess )
{
L1( "Connection timed out.\n" );
enet_peer_reset ( mPeer );
engine::Engine::Get().GetSystem<engine::WindowSystem>()->Close();
}
else
{
mProgramState.mClientConnected = true;
std::auto_ptr<MyNameMessage> msg( new MyNameMessage );
msg->mName = core::ProgramState::Get().mClientName;
msg->mControlledLocalPlayerId = 1; // TODO: change when multiple local players are allowed
mMessageHolder.AddOutgoingMessage( std::auto_ptr<Message>( msg.release() ) );
}
}
void ClientSystem::Receive( ENetEvent& event )
{
// L1 ("A packet of length %u containing %s was received from %s on channel %u.\n",
// event.packet -> dataLength,
// event.packet -> data,
// event.peer -> data,
// event.channelID);
std::istringstream iss( std::string( ( char* )( event.packet->data ), event.packet->dataLength ) );
eos::portable_iarchive ia( iss );
MessageList::Messages_t messages;
ia >> messages;
mMessageHolder.GetIncomingMessages().TransferFrom( messages );
/* Clean up the packet now that we're done using it. */
enet_packet_destroy ( event.packet );
}
void ClientSystem::UpdateThread()
{
while (mRunning)
{
//PerfTimer.Log( "client thread update started" );
if (!mProgramState.mClientConnected)
{
//PerfTimer.Log( "client not connected, client update ended" );
std::this_thread::sleep_for( std::chrono::milliseconds( mWaitMillisecs ) );
continue;
}
ReceiveMessages();
SendMessages();
//PerfTimer.Log( "client thread update ended" );
}
}
void ClientSystem::OnPhaseChanged( PhaseChangedEvent const& Evt )
{
if (mThreaded && Evt.CurrentPhase == ProgramPhase::CloseSignal)
{
mRunning = false;
mThread.join();
}
}
void ClientSystem::PublishIncomingMessages()
{
if (mThreaded)
{
std::lock_guard<std::mutex> lck( mMessageHolder.GetIncomingMessages().GetMutex() );
mMessageHolder.GetIncomingMessages().Publish();
}
else
{
mMessageHolder.GetIncomingMessages().Publish();
}
}
void ClientSystem::TransferOutgoingMessagesTo( MessageList::Messages_t& messages )
{
if (mThreaded)
{
std::unique_lock<std::mutex> ulck( mMessageHolder.GetOutgoingMessages().GetMutex() );
if (!mMessageHolder.GetOutgoingMessages().HasPublishedMessages())
{
mMessageHolder.GetOutgoingMessages().GetCV().wait_for( ulck, std::chrono::milliseconds( mWaitMillisecs ) );
}
mMessageHolder.GetOutgoingMessages().TransferPublishedMessagesTo( messages );
}
else
{
mMessageHolder.GetOutgoingMessages().TransferPublishedMessagesTo( messages );
}
}
void ClientSystem::ReceiveMessages()
{
ENetEvent event;
while (enet_host_service( mClient, &event, 0 ) > 0)
{
//PerfTimer.Log("server enter");
switch (event.type)
{
case ENET_EVENT_TYPE_CONNECT:
break;
case ENET_EVENT_TYPE_RECEIVE:
Receive( event );
break;
case ENET_EVENT_TYPE_DISCONNECT:
// L1( "%s disconnected.\n", event.peer->data );
mProgramState.mClientConnected = false;
// L1( "\n\n\n\nLost connection please try reconnecting later! One Love!\n" );
engine::Engine::Get().GetSystem<engine::WindowSystem>()->Close();
}
}
// PerfTimer.Log( "client receive ended" );
PublishIncomingMessages();
}
void ClientSystem::SendMessages()
{
MessageList::Messages_t messages;
TransferOutgoingMessagesTo( messages );
if (!messages.empty())
{
std::ostringstream oss;
eos::portable_oarchive oa( oss );
oa & messages;
std::string astr( oss.str() );
ENetPacket* packet = enet_packet_create( astr.c_str(),
astr.size(),
ENET_PACKET_FLAG_RELIABLE );
enet_peer_send( mPeer, 0, packet );
enet_host_flush( mClient );
}
}
} // namespace engine
|
{"hexsha": "8fd706ef41362e83f8521a1d6b9e5e45edc79ae3", "size": 8185, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/network/client_system.cpp", "max_stars_repo_name": "MrPepperoni/Reaping2-1", "max_stars_repo_head_hexsha": "4ffef3cca1145ddc06ca87d2968c7b0ffd3ba3fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2015-02-22T20:34:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-04T08:55:25.000Z", "max_issues_repo_path": "src/network/client_system.cpp", "max_issues_repo_name": "MrPepperoni/Reaping2-1", "max_issues_repo_head_hexsha": "4ffef3cca1145ddc06ca87d2968c7b0ffd3ba3fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22.0, "max_issues_repo_issues_event_min_datetime": "2015-12-13T16:29:40.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-04T15:45:44.000Z", "max_forks_repo_path": "src/network/client_system.cpp", "max_forks_repo_name": "Reaping2/Reaping2", "max_forks_repo_head_hexsha": "0d4c988c99413e50cc474f6206cf64176eeec95d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14.0, "max_forks_repo_forks_event_min_datetime": "2015-11-23T21:25:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-17T17:03:23.000Z", "avg_line_length": 29.8722627737, "max_line_length": 133, "alphanum_fraction": 0.6076970067, "num_tokens": 2024}
|
Require Export Bedrock.Word.
Require Import
Fiat.Narcissus.Examples.NetworkStack.EthernetHeader
Fiat.Narcissus.Examples.NetworkStack.ARPPacket
Fiat.Narcissus.Examples.NetworkStack.IPv4Header
Fiat.Narcissus.Examples.NetworkStack.TCP_Packet
Fiat.Narcissus.Examples.NetworkStack.UDP_Packet.
Require Coq.Vectors.Vector.
Export Coq.Vectors.Vector.VectorNotations.
(* Require Export *)
(* Fiat.Common.SumType *)
(* Fiat.Common.EnumType *)
(* Fiat.Common.BoundedLookup *)
(* Fiat.Common.ilist *)
(* Fiat.Computation *)
(* Fiat.QueryStructure.Specification.Representation.Notations *)
(* Fiat.QueryStructure.Specification.Representation.Heading *)
(* Fiat.QueryStructure.Specification.Representation.Tuple *)
(* Fiat.Narcissus.BinLib.Core *)
(* Fiat.Narcissus.BinLib.AlignedByteString *)
(* Fiat.Narcissus.Common.Specs *)
(* Fiat.Narcissus.Common.WordFacts *)
(* Fiat.Narcissus.Common.ComposeCheckSum *)
(* Fiat.Narcissus.Common.ComposeIf *)
(* Fiat.Narcissus.Common.ComposeOpt *)
(* Fiat.Narcissus.Formats.FixListOpt *)
(* Fiat.Narcissus.Stores.EmptyStore *)
(* Fiat.Narcissus.Formats.WordOpt *)
(* Fiat.Narcissus.Formats.Bool *)
(* Fiat.Narcissus.Formats.NatOpt *)
(* Fiat.Narcissus.Formats.Vector *)
(* Fiat.Narcissus.Formats.EnumOpt *)
(* Fiat.Narcissus.Formats.SumTypeOpt *)
(* Fiat.Narcissus.Formats.IPChecksum. *)
Definition InjectEnum {n A}
(gallina_constructors: Vector.t A n)
(enum_member: Fin.t n) : A :=
Vector.nth gallina_constructors enum_member.
Require Import AlignedByteString.
Definition WrapDecoder {A B} sz
(impl: forall {sz}, ByteBuffer.t sz -> option (A * nat * B))
(bs: ByteBuffer.t sz) : option A :=
match impl bs with
| Some (pkt, _, _) => Some pkt
| None => None
end.
Definition WrapEncoder {A B} sz
(impl: forall {sz}, ByteBuffer.t sz -> A -> option (ByteBuffer.t sz * nat * B))
(pkt: A) (out: ByteBuffer.t sz) : option (ByteBuffer.t sz) :=
match impl out pkt with
| Some (out, _, _) => Some out
| None => None
end.
Definition IsSome {A} (x: option A) :=
match x with Some _ => True | None => False end.
Definition must {A} (x: option A) (pr: IsSome x) : A :=
match x as xx return (IsSome xx -> A) with
| Some a => fun _ => a
| None => fun pr => False_rect _ pr
end pr.
Definition MakeBuffer sz := AlignedByteString.initialize_Aligned_ByteString sz.
Definition FreshBuffer {sz} (len: nat) (v: ByteBuffer.t sz) := AlignedByteString.initialize_Aligned_ByteString len.
Definition fiat_ethernet_encode {sz} :=
WrapEncoder sz (fun sz v pkt => @EthernetHeader_encoder_impl pkt sz v).
Definition fiat_ethernet_decode {sz} v packet_length :=
WrapDecoder sz (@Ethernet_decoder_impl packet_length) v.
Definition fiat_arp_decode {sz} :=
WrapDecoder sz (@ARP_decoder_impl).
Definition fiat_arp_encode {sz} :=
WrapEncoder sz (@ARP_encoder_impl).
Definition fiat_ipv4_decode {sz} :=
WrapDecoder sz (@IPv4_decoder_impl).
Definition fiat_ipv4_encode {sz} :=
WrapEncoder sz (@IPv4_encoder_impl).
Definition fiat_tcp_encode {sz} v srcAddress dstAddress tcpLength :=
WrapEncoder sz (fun sz v pkt => @TCP_encoder_impl srcAddress dstAddress tcpLength pkt sz v) v.
Definition fiat_tcp_decode {sz} v (srcAddress dstAddress: ByteBuffer.t 4) tcpLength :=
WrapDecoder sz (@TCP_decoder_impl srcAddress dstAddress tcpLength) v.
Definition fiat_udp_encode {sz} v srcAddress dstAddress udpLength :=
WrapEncoder sz (fun sz v pkt => @UDP_encoder_impl srcAddress dstAddress udpLength pkt sz v) v.
Definition fiat_udp_decode {sz} v (srcAddress dstAddress: ByteBuffer.t 4) (udpLength: word 16) :=
WrapDecoder sz (@UDP_decoder_impl srcAddress dstAddress udpLength) v.
|
{"author": "mit-plv", "repo": "fiat", "sha": "4c78284c3a88db32051bdba79202f40c645ffb7f", "save_path": "github-repos/coq/mit-plv-fiat", "path": "github-repos/coq/mit-plv-fiat/fiat-4c78284c3a88db32051bdba79202f40c645ffb7f/src/Narcissus/Examples/NetworkStack/TestInfrastructure.v"}
|
# !python3
# -*- coding: utf-8 -*-
# author: flag
import numpy as np
import scipy.io
# load the data from matlab of .mat
def loadMatlabIdata(filename=None):
data = scipy.io.loadmat(filename, mat_dtype=True, struct_as_record=True) # variable_names='CATC'
return data
def get_data2multi_scale(equation_name=None, mesh_number=2):
if equation_name == 'multi_scale2D_1':
test_meshXY_file = 'data2Matlab/E1/' + str('meshXY') + str(mesh_number) + str('.mat')
elif equation_name == 'multi_scale2D_2':
test_meshXY_file = 'data2Matlab/E2/' + str('meshXY') + str(mesh_number) + str('.mat')
elif equation_name == 'multi_scale2D_3':
test_meshXY_file = 'data2Matlab/E3/' + str('meshXY') + str(mesh_number) + str('.mat')
elif equation_name == 'multi_scale2D_4':
test_meshXY_file = 'data2Matlab/E4/' + str('meshXY') + str(mesh_number) + str('.mat')
elif equation_name == 'multi_scale2D_5':
test_meshXY_file = 'data2Matlab/E5/' + str('meshXY') + str(mesh_number) + str('.mat')
mesh_XY = loadMatlabIdata(test_meshXY_file)
XY = mesh_XY['meshXY']
test_xy_data = np.transpose(XY, (1, 0))
return test_xy_data
if __name__ == '__main__':
mat_file_name = 'data2Matlab/meshXY.mat'
mat_data = loadMatlabIdata(mat_file_name)
XY = mat_data['meshXY']
XY_T = np.transpose(XY, (1, 0))
print('shdshd')
|
{"hexsha": "60f42957bc1b1e6311f687c49e102cadaa77c1ae", "size": 1380, "ext": "py", "lang": "Python", "max_stars_repo_path": "matData2multi_scale.py", "max_stars_repo_name": "xuzhiqin1990/MSDNN2ellipticPDEs", "max_stars_repo_head_hexsha": "ddaee034474c18bc23b51824fb6a00539c07d52c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-23T07:40:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T07:40:04.000Z", "max_issues_repo_path": "matData2multi_scale.py", "max_issues_repo_name": "xuzhiqin1990/MSDNN2ellipticPDEs", "max_issues_repo_head_hexsha": "ddaee034474c18bc23b51824fb6a00539c07d52c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matData2multi_scale.py", "max_forks_repo_name": "xuzhiqin1990/MSDNN2ellipticPDEs", "max_forks_repo_head_hexsha": "ddaee034474c18bc23b51824fb6a00539c07d52c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-31T10:57:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-31T10:57:17.000Z", "avg_line_length": 37.2972972973, "max_line_length": 101, "alphanum_fraction": 0.6731884058, "include": true, "reason": "import numpy,import scipy", "num_tokens": 423}
|
/*
** Author(s):
** - Herve Cuche <hcuche@aldebaran-robotics.com>
**
** Copyright (C) 2010, 2012 Aldebaran Robotics
*/
#include <future>
#include <vector>
#include <string>
#include <gtest/gtest.h>
#include <qi/session.hpp>
#include <qi/anyobject.hpp>
#include <qi/type/dynamicobjectbuilder.hpp>
#include <qi/type/dynamicobject.hpp>
#include <qi/type/objecttypebuilder.hpp>
#include <qi/os.hpp>
#include <qi/application.hpp>
#include <testsession/testsessionpair.hpp>
#include <boost/optional/optional_io.hpp>
qiLogCategory("test");
static const qi::MilliSeconds usualTimeout{200};
static std::string reply(const std::string &msg)
{
return msg;
}
/* For asynchronous things where no synchronisation mechanism
* is possible, loop the check and wait a small delay,
* instead of one big sleep that will slow us down
*
*/
#define PERSIST_CHECK(code, cond, what, msdelay) \
do \
{ \
code; \
for(unsigned i=0; i<50 && !(cond); ++i) \
{ \
std::this_thread::sleep_for(std::chrono::milliseconds{1} + msdelay / 50 ); \
code; \
} \
what(cond); \
} while(0)
#define PERSIST_ASSERT(code, cond, msdelay) \
PERSIST_CHECK(code, cond, ASSERT_TRUE, msdelay)
#define PERSIST_EXPECT(code, cond, msdelay) \
PERSIST_CHECK(code, cond, EXPECT_TRUE, msdelay)
#define PERSIST(code, cond, msdelay) \
PERSIST_CHECK(code, cond, (void),msdelay)
//check for server closed
//check for socket disconnected
//check for service unregistered
//check for service unregistered, then readded
TEST(QiService, RemoteObjectCacheServerClose)
{
TestSessionPair p;
if (p.server() == p.client()) // we close and not unregister, so does not work in direct mode
return;
qi::DynamicObjectBuilder ob;
ob.advertiseMethod("reply", &reply);
qi::AnyObject obj(ob.object());
p.server()->registerService("serviceTest", obj);
qi::Future<qi::AnyObject> fut;
fut = p.client()->service("serviceTest");
EXPECT_FALSE(fut.hasError());
EXPECT_EQ(std::string("titi"), fut.value().call<std::string>("reply", "titi"));
p.server()->close();
PERSIST_ASSERT(fut = p.client()->service("serviceTest"), fut.hasError(), std::chrono::milliseconds{1000});
}
TEST(QiService, RemoteObjectCacheUnregister)
{
TestSessionPair p;
qi::DynamicObjectBuilder ob;
ob.advertiseMethod("reply", &reply);
qi::AnyObject obj(ob.object());
unsigned int idx = p.server()->registerService("serviceTest", obj).value();
qi::Future<qi::AnyObject> fut;
fut = p.client()->service("serviceTest");
EXPECT_FALSE(fut.hasError());
EXPECT_EQ(std::string("titi"), fut.value().call<std::string>("reply", "titi"));
p.server()->unregisterService(idx);
PERSIST_ASSERT(fut = p.client()->service("serviceTest"), fut.hasError(), std::chrono::milliseconds{1000});
}
TEST(QiService, RemoteObjectCacheABAUnregister)
{
TestSessionPair p;
qi::DynamicObjectBuilder ob;
ob.advertiseMethod("reply", &reply);
qi::AnyObject obj(ob.object());
unsigned int idx = p.server()->registerService("serviceTest", obj).value();
qi::Future<qi::AnyObject> fut;
fut = p.client()->service("serviceTest");
EXPECT_FALSE(fut.hasError());
EXPECT_EQ(std::string("titi"), fut.value().call<std::string>("reply", "titi"));
p.server()->unregisterService(idx);
PERSIST_ASSERT(fut = p.client()->service("serviceTest"), fut.hasError(), std::chrono::milliseconds{1000});
unsigned int idx2 = p.server()->registerService("serviceTest", obj).value();
//new service should not have a previoulsy registered ID
EXPECT_NE(idx2, idx);
fut = p.client()->service("serviceTest");
EXPECT_FALSE(fut.hasError());
qi::Future<std::string> fret = fut.value().async<std::string>("reply", "titi");
if (fret.hasError()) {
std::cout << "Error returned:" << fret.error();
}
EXPECT_FALSE(fret.hasError());
EXPECT_EQ(std::string("titi"), fret.value());
}
TEST(QiService, RemoteObjectCacheABANewServer)
{
TestSessionPair p;
auto ses = qi::makeSession();
if (p.server() == p.client()) // we close and not unregister, so does not work in direct mode
return;
qi::DynamicObjectBuilder ob;
ob.advertiseMethod("reply", &reply);
qi::AnyObject obj(ob.object());
unsigned int idx = p.server()->registerService("serviceTest", obj).value();
qi::Future<qi::AnyObject> fut;
fut = p.client()->service("serviceTest");
EXPECT_FALSE(fut.hasError());
EXPECT_EQ(std::string("titi"), fut.value().call<std::string>("reply", "titi"));
p.server()->close();
PERSIST_ASSERT(fut = p.client()->service("serviceTest"), fut.hasError(), std::chrono::milliseconds{1000});
qi::Future<void> f = ses->connect(p.client()->url().str());
f.wait(8000);
EXPECT_FALSE(f.hasError());
ses->listen("tcp://0.0.0.0:0");
unsigned int idx2 = ses->registerService("serviceTest", obj).value();
//new service should not have a previoulsy registered ID
EXPECT_NE(idx2, idx);
fut = p.client()->service("serviceTest");
EXPECT_FALSE(fut.hasError());
qi::Future<std::string> fret = fut.value().async<std::string>("reply", "titi");
if (fret.hasError()) {
std::cout << "Error returned:" << fret.error();
}
EXPECT_FALSE(fret.hasError());
EXPECT_EQ(std::string("titi"), fret.value());
}
TEST(QiService, RemoteObjectNackTransactionWhenServerClosed)
{
TestSessionPair p;
if (p.server() == p.client()) // we close and not unregister, so does not work in direct mode
return;
qi::DynamicObjectBuilder ob;
ob.advertiseMethod("sleep", [](qi::MilliSeconds delay){ boost::this_thread::sleep_for(delay); });
qi::AnyObject obj(ob.object());
p.server()->registerService("serviceTest", obj);
qi::Future<qi::AnyObject> fut;
fut = p.client()->service("serviceTest");
EXPECT_FALSE(fut.hasError());
qi::Future<void> fret = fut.value().async<void>("sleep", qi::Seconds{2});
qi::Future<void> fclose = p.server()->close();
fclose.wait(1000);
EXPECT_TRUE(fclose.isFinished());
EXPECT_FALSE(fclose.hasError(1));
fret.wait(1000);
//once the server is close, the answer should be ready.
EXPECT_TRUE(fret.isFinished());
//the service is closed, so it can't send an answer.
EXPECT_TRUE(fret.hasError(1000));
}
class Foo
{
public:
int ping(int i) { return i + prop.get().value();}
qi::Property<int> prop;
};
void inc (qi::Atomic<int>* daInt, int unused)
{
++(*daInt);
}
TEST(QiService, ClassProperty)
{
Foo f; // foo is registered as service, so must survive the session
TestSessionPair p;
qi::ObjectTypeBuilder<Foo> builder;
builder.advertiseMethod("ping", &Foo::ping);
ASSERT_TRUE(builder.advertiseProperty("offset", &Foo::prop) > 0);
qi::AnyObject obj = builder.object(&f, &qi::AnyObject::deleteGenericObjectOnly);
p.server()->registerService("foo", obj);
qi::AnyObject client = p.client()->service("foo").value();
qi::detail::printMetaObject(std::cerr, obj.metaObject());
std::cerr <<"--" << std::endl;
qi::detail::printMetaObject(std::cerr, client.metaObject());
qiLogDebug() << "setProp";
client.setProperty<int>("offset", 1).value();
qiLogDebug() << "setProp done";
ASSERT_EQ(1, f.prop.get().value());
ASSERT_EQ(2, client.call<int>("ping", 1));
f.prop.set(2);
ASSERT_EQ(3, client.call<int>("ping", 1));
ASSERT_EQ(2, client.property<int>("offset").value());
// test event
qi::Atomic<int> hit{0};
f.prop.connect(boost::bind(&inc, &hit, _1));
obj.connect("offset", boost::bind(&inc, &hit,_1));
client.connect("offset", boost::bind(&inc, &hit,_1));
f.prop.set(1);
PERSIST_ASSERT(, (hit.load()) == 3, std::chrono::milliseconds{5});
client.setProperty("offset", 2);
PERSIST_ASSERT(, (hit.load()) == 6, std::chrono::milliseconds{5});
// test error handling
EXPECT_TRUE(client.setProperty("canard", 5).hasError());
EXPECT_TRUE(client.setProperty("offset", "astring").hasError());
}
int prop_ping(qi::PropertyBase* &p, int v)
{
return static_cast<int>(p->value().value().toInt() + v);
}
TEST(QiService, GenericProperty)
{
TestSessionPair p;
qi::DynamicObject* dobj = new qi::DynamicObject();
qi::DynamicObjectBuilder builder(dobj);
unsigned int propId = builder.advertiseProperty<int>("offset");
qi::PropertyBase* prop;
builder.advertiseMethod("ping",
(boost::function<int (int)>)boost::bind(&prop_ping, boost::ref(prop), _1));
qi::AnyObject obj = builder.object();
prop = dobj->property(propId);
prop->setValue(0);
p.server()->registerService("foo", obj);
qi::AnyObject client = p.client()->service("foo").value();
client.setProperty("offset", 1);
ASSERT_EQ(1, prop->value().value().toInt());
ASSERT_EQ(2, client.call<int>("ping", 1));
prop->setValue(2);
ASSERT_EQ(3, client.call<int>("ping", 1));
ASSERT_EQ(2, client.property<int>("offset").value());
// test event
qi::Atomic<int> hit;
qiLogVerbose() << "Connecting to signal";
ASSERT_NE(qi::SignalBase::invalidSignalLink, prop->signal()->connect((boost::function<void(int)>)boost::bind(&inc, &hit, _1)));
ASSERT_NE(qi::SignalBase::invalidSignalLink, obj.connect("offset", boost::bind(&inc, &hit, _1)).value());
ASSERT_NE(qi::SignalBase::invalidSignalLink, client.connect("offset", boost::bind(&inc, &hit, _1)).value());
qiLogVerbose() << "Triggering prop set";
prop->setValue(1);
PERSIST(, (hit.load()) == 3, std::chrono::milliseconds{500});
std::this_thread::sleep_for(std::chrono::milliseconds{500}); \
EXPECT_EQ(3, hit.load());
client.setProperty<int>("offset", 2);
PERSIST(, (hit.load()) == 6, std::chrono::milliseconds{500});
std::this_thread::sleep_for(std::chrono::milliseconds{500}); \
EXPECT_EQ(6, hit.load());
if (client != obj)
{
client.call<void>("setProperty", "offset", 3);
EXPECT_EQ(3, prop->value().value().toInt());
}
// test error handling
EXPECT_TRUE(client.setProperty("canard", 5).hasError());
EXPECT_TRUE(client.setProperty("offset", "astring").hasError());
}
class Bar
{
public:
void ping() { }
};
QI_REGISTER_OBJECT(Bar, ping)
TEST(QiService, RemoteServiceRegistrationAfterDisconnection)
{
TestSessionPair p;
// Create an object
boost::shared_ptr<Bar> bar(new Bar());
qi::AnyObject barAsObject = qi::AnyValue::from(bar).to<qi::AnyObject>();
// Register the object with the provider, find it back from the client
p.server()->registerService("Bar", barAsObject);
qi::AnyObject barAsRemoteService = p.client()->service("Bar").value();
ASSERT_TRUE(barAsRemoteService);
// Disconnect the provider, it should unregister any related services
p.server()->close();
qiLogVerbose() << "close finished";
qi::Future<void> fc = p.server()->connect(p.serviceDirectoryEndpoints()[0]);
fc.wait(3000);
if (fc.hasError())
qiLogError() << fc.error();
ASSERT_TRUE(fc.hasValue());
qiLogVerbose() << "Connect finished";
// Register the object again with the provider, find it back from the client
ASSERT_NO_THROW(p.server()->registerService("Bar", barAsObject));
ASSERT_EQ(qi::FutureState_FinishedWithValue,
p.client()->waitForService("Bar").wait(qi::Seconds{ 3 }));
ASSERT_TRUE(p.client()->service("Bar").value());
}
class DummyObject
{
public:
DummyObject(qi::Promise<void> prom) : p(prom), a(0) { }
~DummyObject()
{
p.setValue(0);
}
qi::Promise<void> p;
int a;
};
QI_REGISTER_OBJECT(DummyObject, a);
class ServiceThatServesObjects
{
public:
qi::AnyObject getObject()
{
return qi::Object<DummyObject>(new DummyObject(prom));
}
qi::Promise<void> prom;
};
QI_REGISTER_OBJECT(ServiceThatServesObjects, getObject);
TEST(QiService, NetworkObjectsAreClosedWithTheSession)
{
auto server = qi::makeSession();
auto client = qi::makeSession();
ServiceThatServesObjects *concreteService = new ServiceThatServesObjects;
qi::Future<void> fut = concreteService->prom.future();
server->listenStandalone(qi::Url("tcp://127.0.0.1:0"));
server->registerService("service", qi::Object<ServiceThatServesObjects>(concreteService));
client->connect(server->endpoints()[0]);
ASSERT_TRUE(client->isConnected());
qi::AnyObject service = client->service("service").value();
qi::AnyObject obj = service.call<qi::AnyObject>("getObject");
client->close();
fut.wait();
// if we reach here, the test is a success: the remote reference "client"
// is gone so our object has been deleted.
}
class DoSomething
{
public:
int ping(int i) { return i; }
};
QI_REGISTER_OBJECT(DoSomething, ping);
class CallDoSomethingInDtor
{
public:
CallDoSomethingInDtor(const qi::SessionPtr& session)
: _session(session)
{
}
~CallDoSomethingInDtor()
{
// Should be always work event on client session
// ie: call Dtor object THEN close session and not the other way around
if (_session->isConnected())
{
qiLogFatal() << "get doSomething service";
_doSomething = _session->service("doSomething").value();
qi::detail::printMetaObject(std::cout, _doSomething.metaObject());
qiLogFatal() << "call doSomething.ping()";
_doSomething.call<int>("ping", 12);
}
}
void doNothing() {}
private:
qi::AnyObject _doSomething;
qi::SessionPtr _session;
};
QI_REGISTER_OBJECT(CallDoSomethingInDtor, doNothing);
TEST(QiService, CallRemoteServiceInsideDtorService)
{
TestSessionPair p;
auto ds = boost::make_shared<DoSomething>();
p.server()->registerService("doSomething", qi::Object<DoSomething>(ds)).wait();
auto callds = boost::make_shared<CallDoSomethingInDtor>(p.client());
unsigned int idCallDS =
p.server()->registerService("callDoSomethingInDtor", qi::Object<CallDoSomethingInDtor>(callds)).value();
{
EXPECT_NO_THROW(p.client()->service("doSomething").value());
EXPECT_NO_THROW(p.client()->service("callDoSomethingInDtor").value());
}
// Have the client unregister the service
// this should not deadlock
p.client()->unregisterService(idCallDS).wait();
}
TEST(QiService, ExceptionFromPropertySetterSetsErrorOnFuture)
{
using CustomException = std::exception;
const int initialValue = 12;
qi::Property<int> property{initialValue, qi::Property<int>::Getter{}, [this](int&, const int&)->bool
{
throw CustomException{};
}};
const std::string serviceName{"Corine"};
const std::string propertyName{"Ptitegoutte"};
qi::DynamicObjectBuilder objectBuilder;
objectBuilder.advertiseProperty(propertyName, &property);
TestSessionPair sessions;
sessions.server()->registerService(serviceName, objectBuilder.object());
auto setting = sessions.client()->service(serviceName).value().setProperty(propertyName, 42);
auto settingState = setting.waitFor(usualTimeout);
ASSERT_EQ(qi::FutureState_FinishedWithError, settingState);
}
TEST(QiService, BlockingPropertySetterDoesNotBlockOtherCalls)
{
std::promise<void> promise;
qi::Property<int> property{qi::Property<int>::Getter{}, [&](int&, const int&)->bool
{
promise.get_future().wait();
return false;
}};
const std::string serviceName{"Alain"};
const std::string propertyName{"Alex"};
const std::string methodName{"Terieur"};
qi::DynamicObjectBuilder objectBuilder;
objectBuilder.advertiseProperty(propertyName, &property);
objectBuilder.advertiseMethod(methodName, []{});
objectBuilder.setThreadingModel(qi::ObjectThreadingModel_MultiThread);
TestSessionPair sessions;
sessions.server()->registerService(serviceName, objectBuilder.object());
auto remoteService = sessions.client()->service(serviceName).value();
remoteService.setProperty(propertyName, 42).async();
auto calling = remoteService.async<void>(methodName);
auto callingState = calling.waitFor(usualTimeout);
EXPECT_EQ(qi::FutureState_FinishedWithValue, callingState);
promise.set_value();
}
////////////////////////////////////////////////////////////////////////////////
/// Optionals
////////////////////////////////////////////////////////////////////////////////
using boost::make_optional;
class OptPropService
{
public:
struct Position
{
int x;
int y;
KA_GENERATE_FRIEND_REGULAR_OPS_2(Position, x, y)
friend std::ostream& operator<<(std::ostream& os, const Position& pos)
{
return os << "Position(" << pos.x << ", " << pos.y << ")";
}
};
qi::Property<boost::optional<std::string>> info;
qi::Property<boost::optional<Position>> pos;
};
QI_TYPE_STRUCT(OptPropService::Position, x, y)
QI_REGISTER_OBJECT(OptPropService, info, pos)
namespace
{
class QiServiceOptionalProperty : public ::testing::Test
{
protected:
struct ObjectAssertingProxy
{
explicit ObjectAssertingProxy(qi::AnyObject& obj)
: obj(obj)
{}
template <typename V>
::testing::AssertionResult set(const char* propName, V&& value)
{
const auto fut = obj.setProperty(propName, std::forward<V>(value));
if (fut.hasError())
return ::testing::AssertionFailure() << fut.error();
return ::testing::AssertionSuccess();
}
template <typename T>
::testing::AssertionResult isSet(const char* propName, bool expected)
{
const auto fut = obj.property<boost::optional<T>>(propName);
if (fut.hasError())
return ::testing::AssertionFailure() << fut.error();
const auto actual = static_cast<bool>(fut.value());
if (actual != expected)
return ::testing::AssertionFailure() << "value set state '" << std::boolalpha << actual
<< "' is not equal to '" << expected << "'";
return ::testing::AssertionSuccess();
}
template <typename T>
::testing::AssertionResult valueEquals(const char* propName, T&& value = T())
{
const auto fut = obj.property<boost::optional<T>>(propName);
if (fut.hasError())
return ::testing::AssertionFailure() << fut.error();
const auto futValue = fut.value();
if (!futValue)
return ::testing::AssertionFailure() << "optional is not set";
if (*futValue != value)
return ::testing::AssertionFailure() << "'" << futValue << "' is not equal to '" << value << "'";
return ::testing::AssertionSuccess();
}
qi::AnyObject& obj;
};
QiServiceOptionalProperty()
: serviceObject([=] {
qi::ObjectTypeBuilder<OptPropService> builder;
builder.advertiseProperty("info", &OptPropService::info);
builder.advertiseProperty("pos", &OptPropService::pos);
auto obj = builder.object(&optPropService, &qi::AnyObject::deleteGenericObjectOnly);
session.server()->registerService("optpropservice", obj);
return obj;
}())
, client(session.client()->service("optpropservice").value())
, object(client)
{
}
void setValues()
{
EXPECT_TRUE(object.set("info", make_optional<std::string>("pineapples")));
EXPECT_TRUE(object.set("pos", make_optional(OptPropService::Position{-3242, 024})));
ASSERT_TRUE(object.isSet<std::string>("info", true));
ASSERT_TRUE(object.isSet<OptPropService::Position>("pos", true));
}
OptPropService optPropService;
TestSessionPair session;
qi::AnyObject serviceObject;
qi::AnyObject client;
ObjectAssertingProxy object;
};
}
TEST_F(QiServiceOptionalProperty, IsUnsetByDefault)
{
EXPECT_TRUE(object.isSet<std::string>("info", false));
EXPECT_TRUE(object.isSet<OptPropService::Position>("pos", false));
}
TEST_F(QiServiceOptionalProperty, IsSetAfterSetting)
{
ASSERT_TRUE(object.set("info", make_optional<std::string>("cookies")));
ASSERT_TRUE(object.set("pos", make_optional(OptPropService::Position{37, 74})));
EXPECT_TRUE(object.isSet<std::string>("info", true));
EXPECT_TRUE(object.isSet<OptPropService::Position>("pos", true));
EXPECT_TRUE(object.valueEquals<std::string>("info", "cookies"));
EXPECT_TRUE(object.valueEquals<OptPropService::Position>("pos", OptPropService::Position{37, 74}));
}
TEST_F(QiServiceOptionalProperty, CanBeSetFromUnderlyingType)
{
ASSERT_TRUE(object.set("info", std::string{"muffins"}));
ASSERT_TRUE(object.set("pos", OptPropService::Position{42, 1337}));
EXPECT_TRUE(object.isSet<std::string>("info", true));
EXPECT_TRUE(object.isSet<OptPropService::Position>("pos", true));
EXPECT_TRUE(object.valueEquals<std::string>("info", "muffins"));
EXPECT_TRUE(object.valueEquals<OptPropService::Position>("pos", OptPropService::Position{42, 1337}));
}
TEST_F(QiServiceOptionalProperty, IsNotSetAfterSettingToNone)
{
setValues();
EXPECT_TRUE(object.set("info", boost::none));
EXPECT_TRUE(object.set("pos", boost::none));
EXPECT_TRUE(object.isSet<std::string>("info", false));
EXPECT_TRUE(object.isSet<OptPropService::Position>("pos", false));
}
TEST_F(QiServiceOptionalProperty, IsNotSetAfterSettingVoidAnyValue)
{
setValues();
EXPECT_TRUE(object.set("info", qi::AnyValue::makeVoid()));
EXPECT_TRUE(object.set("pos", qi::AnyValue::makeVoid()));
EXPECT_TRUE(object.isSet<std::string>("info", false));
EXPECT_TRUE(object.isSet<OptPropService::Position>("pos", false));
}
TEST_F(QiServiceOptionalProperty, CannotBeSetFromUnrelatedTypes)
{
EXPECT_FALSE(object.set("info", 42));
EXPECT_FALSE(object.set("pos", "abc"));
}
|
{"hexsha": "5ec872579265d7f17d0b7499a4b1837b54db136f", "size": 21691, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/messaging/test_service.cpp", "max_stars_repo_name": "yumilceh/libqi", "max_stars_repo_head_hexsha": "f094bcad506bcfd5a8dcfa7688cbcce864b0765b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/messaging/test_service.cpp", "max_issues_repo_name": "yumilceh/libqi", "max_issues_repo_head_hexsha": "f094bcad506bcfd5a8dcfa7688cbcce864b0765b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/messaging/test_service.cpp", "max_forks_repo_name": "yumilceh/libqi", "max_forks_repo_head_hexsha": "f094bcad506bcfd5a8dcfa7688cbcce864b0765b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1348148148, "max_line_length": 129, "alphanum_fraction": 0.6621640312, "num_tokens": 5434}
|
import pandas as pd
import os
import yaml
from yaml.loader import SafeLoader
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
# Open the file and load the file
with open('C:\\Users\\jonah\\Desktop\\Projects\\Programming\\Personal\\sliceSLAM\\config.YAML') as f:
cfg = yaml.load(f, Loader=SafeLoader)
combined_data = pd.read_csv(cfg.get("combined_data_path"))
def get_drone_data():
drone_points_x = np.zeros((len(combined_data)))
drone_points_y = np.zeros((len(combined_data)))
drone_points_z = np.zeros((len(combined_data)))
for i in range(len(combined_data)):
drone_points_x[i] = np.array((combined_data.loc[i, "X_Pos"]))
drone_points_y[i] = combined_data.loc[i, "Y_Pos"]
drone_points_z[i] = combined_data.loc[i, "Z_Pos"]
return drone_points_x, drone_points_y, drone_points_z
def plot(line, scatter, x, y, z, x2=np.array(0), y2=np.array(0), z2=np.array(0), drone_path=False):
fig = plt.figure()
ax = plt.axes(projection='3d')
if scatter:
ax.scatter3D(x, y, z, cmap='Greens')
if drone_path:
ax.scatter3D(x2, y2, z2, cmap='Blue')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show()
if line:
ax.plot3D(x, y, z, 'gray')
if drone_path:
ax.plot3D(x2, y2, z2, 'blue')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show()
def plot_drone(line=False, scatter=True):
x, y, z = get_drone_data()
plot(line, scatter, x, y, z)
def environment_plotter(drone_path=True, scatter=True, line=False):
env_data = np.genfromtxt(cfg.get("environment_data"), delimiter=',')
x, y, z = env_data[:, 0], env_data[:, 1], env_data[:, 2]
if drone_path:
dx, dy, dz = get_drone_data()
plot(line, scatter, x, y, z, dx, dy, dz, drone_path)
else:
plot(line, scatter, x, y, z)
|
{"hexsha": "dd8fc590267f2c5d0315ae1988366ab4b3daded1", "size": 1981, "ext": "py", "lang": "Python", "max_stars_repo_path": "modelling/plots.py", "max_stars_repo_name": "Jonah1234567/sliceSLAM", "max_stars_repo_head_hexsha": "be64f5bdabfb282cbf128a65d82609b49174750e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modelling/plots.py", "max_issues_repo_name": "Jonah1234567/sliceSLAM", "max_issues_repo_head_hexsha": "be64f5bdabfb282cbf128a65d82609b49174750e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-14T21:58:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T21:58:13.000Z", "max_forks_repo_path": "modelling/plots.py", "max_forks_repo_name": "Jonah1234567/sliceSLAM", "max_forks_repo_head_hexsha": "be64f5bdabfb282cbf128a65d82609b49174750e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.953125, "max_line_length": 101, "alphanum_fraction": 0.6360424028, "include": true, "reason": "import numpy", "num_tokens": 562}
|
#include "graph.hpp"
#include <fstream>
#include <stack>
#include <boost/filesystem.hpp>
#include <boost/graph/graphviz.hpp>
namespace fs = boost::filesystem;
//#####################################################################################################################
struct VertexWriter
{
VertexWriter(Graph::graph_type* graph)
: graph_{graph}
{
}
template <typename DescrT>
void operator()(std::ostream& out, DescrT const& v) const
{
out << "[label=\"" << (*graph_)[v].file << "\"]";
}
private:
Graph::graph_type* graph_;
};
//#####################################################################################################################
struct EdgeWriter
{
EdgeWriter(Graph::graph_type* graph)
: graph_{graph}
{
}
template <typename DescrT>
void operator()(std::ostream& out, DescrT const& v) const
{
//out << "[label=\"" << (*graph_)[v].file << "\"]";
}
private:
Graph::graph_type* graph_;
};
//#####################################################################################################################
struct GraphWriter
{
GraphWriter(Graph::graph_type* graph)
: graph_{graph}
{
}
void operator()(std::ostream& out) const
{
out << "randir = \"LR\";\n";
//out << "[label=\"" << (*graph_)[v].file << "\"]";
}
private:
Graph::graph_type* graph_;
};
//#####################################################################################################################
//#####################################################################################################################
void Graph::loadFromFile(std::string const& file, std::vector <std::string> const& blacklist)
{
std::ifstream reader{file, std::ios_base::binary};
loadFromStream(reader, blacklist);
}
//---------------------------------------------------------------------------------------------------------------------
void Graph::loadFromStream(std::istream& stream, std::vector <std::string> const& blacklist)
{
using namespace boost;
graph_.clear();
std::string line;
std::stack <vertex_descriptor> vertexStack;
// root vertex
vertexStack.push(add_vertex({"", "ROOT"}, graph_));
int levelIgnore = -1;
while(std::getline(stream, line))
{
if (line.empty())
return;
if (line.front() != '.')
return;
if (line.back() == '\r')
line.pop_back();
unsigned int depth = 0;
while (line.front() == '.')
{
++depth;
line.erase(0, 1); // drop first character.
}
auto p = fs::path{line};
Vertex v{p.parent_path().string(), p.filename().string()};
// black listing
if (levelIgnore != -1 && depth > levelIgnore)
continue;
else
levelIgnore = -1;
bool filtered = false;
for (auto const& b : blacklist)
{
auto filterPath = fs::path{b}.make_preferred().string();
auto prefLine = p.make_preferred().string();
if (filterPath.empty())
continue;
if (filterPath.length() <= prefLine.length() && prefLine.substr(prefLine.length() - filterPath.length(), filterPath.length()) == filterPath)
{
filtered = true;
break;
}
}
if (filtered)
{
levelIgnore = depth;
// add edge, but dont push it to stack, since all following are ignored
auto current = add_vertex(v, graph_);
while (vertexStack.size() > depth)
vertexStack.pop();
add_edge(vertexStack.top(), current, graph_);
continue;
}
// A) Add vertex
auto current = add_vertex(v, graph_);
// B)
while (vertexStack.size() > depth)
vertexStack.pop();
// C)
add_edge(vertexStack.top(), current, graph_);
// D)
vertexStack.push(current);
}
}
//---------------------------------------------------------------------------------------------------------------------
void Graph::toDotFile(std::string const& fileName)
{
std::ofstream stream(fileName.c_str());
/*
// write the dot file
write_graphviz(
stream,
graph_,
VertexWriter(&graph_),
EdgeWriter(&graph_),
GraphWriter(&graph_)
);
*/
// degreeStack_[0] = all elements with input degree 0
graph_[0].parent = 0;
auto es = boost::edges(graph_);
for (auto eit = es.first; eit != es.second; ++eit)
{
graph_[boost::target(*eit, graph_)].parent = boost::source(*eit, graph_);
graph_[boost::target(*eit, graph_)].descriptor = boost::target(*eit, graph_);
}
int maxLevel = 0;
auto vs = boost::vertices(graph_);
for (auto vit = vs.first; vit != vs.second; ++vit)
{
auto& vertex = graph_[*vit];
for (auto p = vertex.parent; p != 0; p = graph_[p].parent)
{
vertex.level++;
maxLevel = std::max(maxLevel, vertex.level);
}
}
std::vector <std::vector <Vertex>> levels(maxLevel + 1);
vs = boost::vertices(graph_);
for (auto vit = vs.first; vit != vs.second; ++vit)
{
levels[maxLevel - graph_[*vit].level].push_back(graph_[*vit]);
}
stream << "digraph G\n{\n";
stream << "\tnode[shape=box];\n";
stream << "\trankdir=\"LR\";\n";
stream << "\tsplines=polyline;\n";
for (auto const& i : levels)
{
stream << "\t{\n";
stream << "\t\trankdir=\"TB\"\n";
for (auto const& j : i)
{
stream << "\t\t" << j.descriptor << "[label=\"" << j.file << "\"];\n";
}
stream << "\t}\n";
}
stream << "\n";
es = boost::edges(graph_);
for (auto eit = es.first; eit != es.second; ++eit)
{
stream << "\t" << boost::source(*eit, graph_) << "->" << boost::target(*eit, graph_) << ";\n";
}
stream << "}";
}
//#####################################################################################################################
|
{"hexsha": "77d085167a351e8404e6adedbd0d82261b0081b5", "size": 6432, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "graph.cpp", "max_stars_repo_name": "5cript/include-graph", "max_stars_repo_head_hexsha": "be6b2b1278ec25edfb3c66bbd45be72c9deae3ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "graph.cpp", "max_issues_repo_name": "5cript/include-graph", "max_issues_repo_head_hexsha": "be6b2b1278ec25edfb3c66bbd45be72c9deae3ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graph.cpp", "max_forks_repo_name": "5cript/include-graph", "max_forks_repo_head_hexsha": "be6b2b1278ec25edfb3c66bbd45be72c9deae3ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3698630137, "max_line_length": 153, "alphanum_fraction": 0.4309701493, "num_tokens": 1385}
|
export RBMSplit
struct RBMSplit{VT,MT} <: MatrixNeuralNetwork
ar::VT
ac::VT
b::VT
Wr::MT
Wc::MT
end
@treelike RBMSplit
"""
RBMSplit([T=Complex{STD_REAL_PREC}], N, α, [initW, initb, inita])
Constructs a Restricted Bolzmann Machine to encode a vectorised density matrix,
with weights of type `T` (Defaults to ComplexF32), `2N` input neurons,
2N⋅α hidden neurons.
This network does not ensure positive-definitness of the density matrix.
`N` must match the size of the lattice.
The initial parameters of the neurons are initialized with a rescaled normal
distribution of width 0.01 for the coupling matrix and 0.05 for the local
biases. The default initializers can be overriden by specifying
initW=(dims...)->rescaled_normal(T, 0.01, dims...),
initb=(dims...)->rescaled_normal(T, 0.05, dims...),
initb=(dims...)->rescaled_normal(T, 0.01, dims...),
Refs:
https://arxiv.org/abs/1902.07006
"""
RBMSplit(in::Int, α::Number, args...) = RBMSplit(ComplexF32, in, α, args...)
RBMSplit(T::Type, in, α,
initW=(dims...)->rescaled_normal(T, 0.01, dims...),
initb=(dims...)->rescaled_normal(T, 0.05, dims...),
inita=(dims...)->rescaled_normal(T, 0.01, dims...)) =
RBMSplit(inita(in), inita(in),
initb(convert(Int, α*in)),
initW(convert(Int, α*in), in), initW(convert(Int, α*in), in))
input_type(net::RBMSplit) = real(eltype(net.ar))
weight_type(net::RBMSplit) = out_type(net)
out_type(net::RBMSplit) = eltype(net.Wr)
input_shape(net::RBMSplit) = (length(net.ar), length(net.ac))
random_input_state(net::RBMSplit) =
(eltype(net.ar).([rand(0:1) for i=1:length(net.ar)]), eltype(net.ar).([rand(0:1) for i=1:length(net.ar)]))
is_analytic(net::RBMSplit) = true
(net::RBMSplit)(σ::State) = net(config(σ)...)
(net::RBMSplit)(σr, σc) = transpose(net.ar)*σr .+ transpose(net.ac)*σc .+ sum_autobatch(logℒ.(net.b .+
net.Wr*σr .+ net.Wc*σc))
function Base.show(io::IO, m::RBMSplit)
print(io, "RBMSplit($(eltype(m.ar)), n=$(length(m.ar)), α=$(length(m.b)/length(m.ar)))")
end
Base.show(io::IO, ::MIME"text/plain", m::RBMSplit) = print(
"RBMSplit($(eltype(m.ar)), n=$(length(m.ar)), α=$(length(m.b)/length(m.ar)))")
# Cached version
mutable struct RBMSplitCache{VT,VS,VST} <: NNCache{RBMSplit}
θ::VT
θ_tmp::VT
logℒθ::VT
∂logℒθ::VT
# complex sigmas
res::VS #batch
res_tmp::VST #batch
# states
σr::VT
σc::VT
valid::Bool # = false
end
cache(net::RBMSplit) =
RBMSplitCache(similar(net.b),
similar(net.b),
similar(net.b),
similar(net.b),
similar(net.b),
similar(net.b),
similar(net.b, length(net.ar)),
similar(net.b, length(net.ar)),
false)
(net::RBMSplit)(c::RBMSplitCache, σ::State) = net(c, config(σ))
(net::RBMSplit)(c::RBMSplitCache, (σr, σc)::Tuple{AbstractArray,AbstractArray}) = net(c, σr, σc)
function (net::RBMSplit)(c::RBMSplitCache, σr_r, σc_r)
θ = c.θ
θ_tmp = c.θ_tmp
logℒθ = c.logℒθ
T = eltype(θ)
# copy the states to complex valued states for the computations.
σr = c.σr; copyto!(σr, σr_r)
σc = c.σc; copyto!(σc, σc_r)
#θ .= net.b .+
# net.Wr*σr .+
# net.Wc*σc
mul!(θ, net.Wr, σr)
mul!(θ_tmp, net.Wc, σc)
θ .+= net.b .+ θ_tmp
logℒθ .= logℒ.(θ)
lnψ = dot(σr,net.ar) + dot(σc,net.ac) + sum(logℒθ)
return lnψ
end
function logψ_and_∇logψ!(∇logψ, net::RBMSplit, c::RBMSplitCache, σr_r, σc_r)
θ = c.θ
θ_tmp = c.θ_tmp
logℒθ = c.logℒθ
∂logℒθ = c.∂logℒθ
T = eltype(θ)
# copy the states to complex valued states for the computations.
σr = c.σr; copyto!(σr, σr_r)
σc = c.σc; copyto!(σc, σc_r)
#θ .= net.b .+
# net.Wr*σr .+
# net.Wc*σc
mul!(θ, net.Wr, σr)
mul!(θ_tmp, net.Wc, σc)
θ .+= net.b .+ θ_tmp
logℒθ .= logℒ.(θ)
∂logℒθ .= ∂logℒ.(θ)
∇logψ.ar .= σr
∇logψ.ac .= σc
∇logψ.b .= ∂logℒθ
∇logψ.Wr .= ∂logℒθ .* transpose(σr)
∇logψ.Wc .= ∂logℒθ .* transpose(σc)
lnψ = dot(σr,net.ar) + dot(σc,net.ac) + sum(logℒθ)
return lnψ
end
|
{"hexsha": "d4945a3fadc4f8a4a7e484336fff995c872edb60", "size": 4273, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Networks/RBMSplit.jl", "max_stars_repo_name": "TheorieMPQ/NeuralQuantum.jl", "max_stars_repo_head_hexsha": "e78e2f44f83da2217965e6f4404eb29f1b87d321", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Networks/RBMSplit.jl", "max_issues_repo_name": "TheorieMPQ/NeuralQuantum.jl", "max_issues_repo_head_hexsha": "e78e2f44f83da2217965e6f4404eb29f1b87d321", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Networks/RBMSplit.jl", "max_forks_repo_name": "TheorieMPQ/NeuralQuantum.jl", "max_forks_repo_head_hexsha": "e78e2f44f83da2217965e6f4404eb29f1b87d321", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4689655172, "max_line_length": 110, "alphanum_fraction": 0.5846009829, "num_tokens": 1545}
|
/-
Copyright (c) 2018 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Simon Hudon, Patrick Massot
-/
import tactic.pi_instances
import algebra.group.pi
import algebra.hom.ring
/-!
# Pi instances for ring
This file defines instances for ring, semiring and related structures on Pi Types
-/
namespace pi
universes u v w
variable {I : Type u} -- The indexing type
variable {f : I → Type v} -- The family of types already equipped with instances
variables (x y : Π i, f i) (i : I)
instance distrib [Π i, distrib $ f i] : distrib (Π i : I, f i) :=
by refine_struct { add := (+), mul := (*), .. }; tactic.pi_instance_derive_field
instance non_unital_non_assoc_semiring [∀ i, non_unital_non_assoc_semiring $ f i] :
non_unital_non_assoc_semiring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*), .. };
tactic.pi_instance_derive_field
instance non_unital_semiring [∀ i, non_unital_semiring $ f i] :
non_unital_semiring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*), .. };
tactic.pi_instance_derive_field
instance non_assoc_semiring [∀ i, non_assoc_semiring $ f i] :
non_assoc_semiring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), one := 1, add := (+), mul := (*), .. };
tactic.pi_instance_derive_field
instance semiring [∀ i, semiring $ f i] : semiring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), one := 1, add := (+), mul := (*),
nsmul := add_monoid.nsmul, npow := monoid.npow };
tactic.pi_instance_derive_field
instance non_unital_comm_semiring [∀ i, non_unital_comm_semiring $ f i] :
non_unital_comm_semiring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*), nsmul := add_monoid.nsmul };
tactic.pi_instance_derive_field
instance comm_semiring [∀ i, comm_semiring $ f i] : comm_semiring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), one := 1, add := (+), mul := (*),
nsmul := add_monoid.nsmul, npow := monoid.npow };
tactic.pi_instance_derive_field
instance non_unital_non_assoc_ring [∀ i, non_unital_non_assoc_ring $ f i] :
non_unital_non_assoc_ring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*),
neg := has_neg.neg, nsmul := add_monoid.nsmul, zsmul := sub_neg_monoid.zsmul };
tactic.pi_instance_derive_field
instance non_unital_ring [∀ i, non_unital_ring $ f i] :
non_unital_ring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*),
neg := has_neg.neg, nsmul := add_monoid.nsmul, zsmul := sub_neg_monoid.zsmul };
tactic.pi_instance_derive_field
instance non_assoc_ring [∀ i, non_assoc_ring $ f i] :
non_assoc_ring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*),
neg := has_neg.neg, nsmul := add_monoid.nsmul, zsmul := sub_neg_monoid.zsmul };
tactic.pi_instance_derive_field
instance ring [∀ i, ring $ f i] : ring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), one := 1, add := (+), mul := (*),
neg := has_neg.neg, nsmul := add_monoid.nsmul, zsmul := sub_neg_monoid.zsmul,
npow := monoid.npow };
tactic.pi_instance_derive_field
instance non_unital_comm_ring [∀ i, non_unital_comm_ring $ f i] :
non_unital_comm_ring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*), neg := has_neg.neg,
nsmul := add_monoid.nsmul, zsmul := sub_neg_monoid.zsmul };
tactic.pi_instance_derive_field
instance comm_ring [∀ i, comm_ring $ f i] : comm_ring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), one := 1, add := (+), mul := (*),
neg := has_neg.neg, nsmul := add_monoid.nsmul, zsmul := sub_neg_monoid.zsmul,
npow := monoid.npow };
tactic.pi_instance_derive_field
/-- A family of ring homomorphisms `f a : γ →+* β a` defines a ring homomorphism
`pi.ring_hom f : γ →+* Π a, β a` given by `pi.ring_hom f x b = f b x`. -/
@[simps]
protected def ring_hom {γ : Type w} [Π i, non_assoc_semiring (f i)] [non_assoc_semiring γ]
(g : Π i, γ →+* f i) : γ →+* Π i, f i :=
{ to_fun := λ x b, g b x,
map_add' := λ x y, funext $ λ z, (g z).map_add x y,
map_mul' := λ x y, funext $ λ z, (g z).map_mul x y,
map_one' := funext $ λ z, (g z).map_one,
map_zero' := funext $ λ z, (g z).map_zero }
lemma ring_hom_injective {γ : Type w} [nonempty I] [Π i, non_assoc_semiring (f i)]
[non_assoc_semiring γ] (g : Π i, γ →+* f i) (hg : ∀ i, function.injective (g i)) :
function.injective (pi.ring_hom g) :=
λ x y h, let ⟨i⟩ := ‹nonempty I› in hg i ((function.funext_iff.mp h : _) i)
end pi
section ring_hom
universes u v
variable {I : Type u}
/-- Evaluation of functions into an indexed collection of rings at a point is a ring
homomorphism. This is `function.eval` as a `ring_hom`. -/
@[simps]
def pi.eval_ring_hom (f : I → Type v) [Π i, non_assoc_semiring (f i)] (i : I) :
(Π i, f i) →+* f i :=
{ ..(pi.eval_monoid_hom f i),
..(pi.eval_add_monoid_hom f i) }
/-- `function.const` as a `ring_hom`. -/
@[simps]
def pi.const_ring_hom (α β : Type*) [non_assoc_semiring β] : β →+* (α → β) :=
{ to_fun := function.const _,
.. pi.ring_hom (λ _, ring_hom.id β) }
/-- Ring homomorphism between the function spaces `I → α` and `I → β`, induced by a ring
homomorphism `f` between `α` and `β`. -/
@[simps] protected def ring_hom.comp_left {α β : Type*} [non_assoc_semiring α]
[non_assoc_semiring β] (f : α →+* β) (I : Type*) :
(I → α) →+* (I → β) :=
{ to_fun := λ h, f ∘ h,
.. f.to_monoid_hom.comp_left I,
.. f.to_add_monoid_hom.comp_left I }
end ring_hom
|
{"author": "nick-kuhn", "repo": "leantools", "sha": "567a98c031fffe3f270b7b8dea48389bc70d7abb", "save_path": "github-repos/lean/nick-kuhn-leantools", "path": "github-repos/lean/nick-kuhn-leantools/leantools-567a98c031fffe3f270b7b8dea48389bc70d7abb/src/algebra/ring/pi.lean"}
|
"""
struct CXXScopeSpec <: Any
Hold a pointer to a `clang::CXXScopeSpec` object.
"""
struct CXXScopeSpec
ptr::CXCXXScopeSpec
end
Base.unsafe_convert(::Type{CXCXXScopeSpec}, x::CXXScopeSpec) = x.ptr
Base.cconvert(::Type{CXCXXScopeSpec}, x::CXXScopeSpec) = x
|
{"hexsha": "678645c4fbc66c39e6152ef508f4df99414af874", "size": 266, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/clang/core/Sema/DeclSpec.jl", "max_stars_repo_name": "vchuravy/ClangCompiler.jl", "max_stars_repo_head_hexsha": "47080072b059465f8176349c6e67bc678fa238d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2021-08-24T04:01:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T00:43:19.000Z", "max_issues_repo_path": "src/clang/core/Sema/DeclSpec.jl", "max_issues_repo_name": "vchuravy/ClangCompiler.jl", "max_issues_repo_head_hexsha": "47080072b059465f8176349c6e67bc678fa238d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-07-17T12:50:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-22T20:27:02.000Z", "max_forks_repo_path": "src/clang/core/Sema/DeclSpec.jl", "max_forks_repo_name": "vchuravy/ClangCompiler.jl", "max_forks_repo_head_hexsha": "47080072b059465f8176349c6e67bc678fa238d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-03T20:49:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-03T20:49:33.000Z", "avg_line_length": 24.1818181818, "max_line_length": 68, "alphanum_fraction": 0.7293233083, "num_tokens": 89}
|
from torchvision import datasets, transforms
from base import BaseDataLoader
import numpy as np
import cv2
import random
from utils import util
import torch.utils.data as data
import os
import torch
import torch.utils.data.sampler as sampler
def safe_crop(mat, x, y, crop_size=(320, 320)):
crop_h, crop_w = crop_size
if len(mat.shape) == 2:
ret = np.zeros((crop_h, crop_w), dtype=np.float32)
else:
ret = np.zeros((crop_h, crop_w, 3), dtype=np.float32) # HWC
h = mat.shape[0]
w = mat.shape[1]
radio = w / h
if h < crop_h:
new_w = int(crop_h * radio)
mat = cv2.resize(mat, dsize=(new_w, crop_h), interpolation=cv2.INTER_CUBIC)
w = mat.shape[1]
if w < crop_w:
new_h = int(crop_w / radio)
mat = cv2.resize(mat, dsize=(crop_w, new_h), interpolation=cv2.INTER_CUBIC)
h = mat.shape[0]
w = mat.shape[1]
maxh = y + crop_h
maxw = x + crop_w
#print(crop_size)
#print(h,w,maxh,maxw,x,y)
if maxh > h:
y = h - crop_h
if maxw > w:
x = w - crop_w
crop_area = mat[y: y + crop_h, x:x + crop_w] # don't exceed mat's bound
h, w = crop_area.shape[:2]
ret[0:h, 0:w] = crop_area
if crop_size != (320, 320):
ret = cv2.resize(ret, dsize=(320, 320), interpolation=cv2.INTER_CUBIC) # finally, all resize to 320*320
return ret
class my_Transform(object):
def __init__(self, flip=False):
self.flip = flip
def __call__(self, img, alpha, fg, bg, trimap):
different_sizes = [(320, 320), (480, 480), (640, 640)]
crop_size = random.choice(different_sizes)
crop_h, crop_w = crop_size
# random crop in the unknown region center
unknown_area = np.where(trimap == 128)
unknown_area_num = len(unknown_area[0])
x, y = 0, 0
if unknown_area_num > 0:
id = np.random.choice(range(unknown_area_num))
center_x = unknown_area[1][id]
center_y = unknown_area[0][id]
x = max(0, center_x - int(crop_w / 2))
y = max(0, center_y - int(crop_h / 2))
img = safe_crop(img, x, y, crop_size)
alpha = safe_crop(alpha, x, y, crop_size)
bg = safe_crop(bg, x, y, crop_size)
fg = safe_crop(fg, x, y, crop_size)
trimap = safe_crop(trimap, x, y, crop_size)
if self.flip and random.random() <= 0.5:
img = cv2.flip(img, 1)
alpha = cv2.flip(alpha, 1)
fg = cv2.flip(fg, 1)
bg = cv2.flip(bg, 1)
trimap = cv2.flip(trimap, 1)
return img, alpha, fg, bg, trimap
def get_files(dir):
res = []
for root, dirs, files in os.walk(dir, followlinks=True):
for f in files:
if f.endswith(".jpg") or f.endswith(".png") or f.endswith(".jpeg") or f.endswith(".JPG") or f.endswith(
'.PNG'):
res.append(f)
return res
def image_resize(image,shape=None):
#h = image.shape[0]
#w = image.shape[1]
#new_h = h - h % 32
#new_w = w - w % 32
#if shape != None:
# new_h, new_w, _ = shape
new_w = 640
new_h = 640
image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
return image
class DIMdataset(data.Dataset):
def __init__(self, config, usage, transform=None):
self.transform = transform
self.config = config
self.samples = []
self.usage = usage
# fg_paths = get_files(config['fg_dir'])
# bg_paths = get_files(config['bg_dir'])
# image path
self.image_paths = config['data_loader']['{}_image_dir'.format(usage)]
self.fg_paths = config['data_loader']['{}_fg_dir'.format(usage)]
self.bg_paths = config['data_loader']['{}_bg_dir'.format(usage)]
self.alpha_paths = config['data_loader']['{}_alpha_dir'.format(usage)]
# image name
self.image_files = get_files(config['data_loader']['{}_image_dir'.format(usage)])
fg_files = config['data_loader']['{}_fg_names'.format(usage)]
bg_files = config['data_loader']['{}_bg_names'.format(usage)]
with open(fg_files, 'r') as f:
self.fg_names = f.read().splitlines()
with open(bg_files, 'r') as f:
self.bg_names = f.read().splitlines()
# self.fg_cnt = len(fg_paths)
# self.bg_cnt = len(bg_paths)
self.image_cnt = len(self.image_paths)
"""
for fg_path in fg_paths:
alpha_path = fg_path.replace(config['fg_dir'], config['alpha_dir']) # because alpha_name = fg_name only the dir is diff
assert(os.path.exists(alpha_path))
assert(os.path.exists(fg_path))
self.fg_samples.append(alpha_path, fg_path)
print("\t--Valid FG Samples:{}" .format(self.fg_cnt))
for bg_path in bg_paths:
assert(os.path.exists(bg_path))
self.bg_samples.append(bg_path)
print("\t -- Vaild BG Samples:{}" .format(self.bg_cnt))
assert(self.fg_cnt > 0 and self.bg_cnt > 0)
"""
"""
for fg_path in fg_paths:
alpha_path = fg_path.replace(self.args.fgDir, self.args.alphaDir)
img_path = fg_path.replace(self.args.fgDir, self.args.imgDir)
bg_path = fg_path.replace(self.args.fgDir, self.args.bgDir)
assert(os.path.exists(alpha_path))
assert(os.path.exists(fg_path))
assert(os.path.exists(bg_path))
assert(os.path.exists(img_path))
self.samples.append((alpha_path, fg_path, bg_path, img_path))
print("\t--Valid Samples: {}".format(self.cnt))
assert(self.cnt > 0)
"""
for image_name in self.image_files:
fg_index = int(image_name.split('_')[0])
bg_index = int(image_name.split('_')[1].split('.')[0])
fg_name = self.fg_names[fg_index]
bg_name = self.bg_names[bg_index]
fg_path = os.path.join(self.fg_paths, fg_name)
alpha_path = os.path.join(self.alpha_paths, fg_name) # alpha has the same name as fg
bg_path = os.path.join(self.bg_paths, bg_name)
image_path = os.path.join(self.image_paths, image_name)
assert (os.path.exists(alpha_path))
assert (os.path.exists(fg_path))
assert (os.path.exists(bg_path))
assert (os.path.exists(image_path))
self.samples.append((alpha_path, fg_path, bg_path, image_path))
print("\t--Valid Samples: {}".format(len(self.samples)))
assert (self.image_cnt > 0)
# super(DIMDataLoader, self).__init__(self.samples, batch_size, shuffle, validation_split,
# num_workers)
def __getitem__(self, item):
alpha_path, fg_path, bg_path, img_path = self.samples[item]
img_info = [fg_path, alpha_path, bg_path, img_path]
# read fg, alpha
fg = cv2.imread(fg_path)[:, :, 0:3]
alpha = cv2.imread(alpha_path)
alpha = cv2.cvtColor(alpha, cv2.COLOR_RGB2GRAY) # get a 2-dim [:, :, 0] # get a 2-dim
bg = cv2.imread(bg_path)[:, :, 0:3]
img = cv2.imread(img_path)[:, :, 0:3]
if self.usage == 'test':
img = image_resize(img)
fg = image_resize(fg)
bg = image_resize(bg,img.shape)
alpha = image_resize(alpha)
# print("read: bg:{},fg:{},img:{}" .format(bg.shape, fg.shape, img.shape))
# assert(bg.shape == fg.shape and bg.shape == img.shape)
img_info.append(fg.shape)
h, w, c = fg.shape # shape is HWC
trimap = util.gen_trimap(alpha)
# random crop and flip
if self.usage != "test" and self.transform:
#if self.transform:
img, alpha, fg, bg, trimap = self.transform(img, alpha, fg, bg, trimap)
trimap = util.gen_trimap(alpha)
# grad = util.compute_gradient(img)
# change numpy to tensor and change [HWC] -> [CHW]
# print("alpha:{}, trimap:{}, bg:{},fg:{},img:{}".format(alpha.shape, trimap.shape, bg.shape, fg.shape, img.shape))
# input()
alpha = torch.from_numpy(alpha.astype(np.float32)[np.newaxis, :, :])
trimap = torch.from_numpy(trimap.astype(np.float32)[np.newaxis, :, :])
# grad = torch.from_numpy(grad.astype(np.float32)[np.newaxis, :, :])
img = torch.from_numpy(img.astype(np.float32)).permute(2, 0, 1)
fg = torch.from_numpy(fg.astype(np.float32)).permute(2, 0, 1)
bg = torch.from_numpy(bg.astype(np.float32)).permute(2, 0, 1)
return img, alpha, fg, bg, trimap
def __len__(self):
return len(self.samples)
def dataloader(config, usage, batch_size, shuffle, validation_split, num_workers, transform_switch):
if transform_switch is True:
transform = my_Transform(flip=True)
dataset = DIMdataset(config, usage, transform)
else:
dataset = DIMdataset(config, usage)
num_samples = len(dataset)
# print(num_samples)
idx_full = np.arange(num_samples) # 生成一个1-n的list
np.random.seed(0) # 随机种子(numpy)
np.random.shuffle(idx_full) # 随机
len_valid = int(num_samples * validation_split) # 验证数据集大小
valid_idx = idx_full[0:len_valid] # 获取验证样本
train_idx = np.delete(idx_full, np.arange(0, len_valid)) # 把验证集的序号删掉
train_sampler = sampler.SubsetRandomSampler(train_idx) # 获取训练样本
valid_sampler = sampler.SubsetRandomSampler(valid_idx)
# print(len(train_sampler))
# print(len(valid_sampler))
train_data_loader = data.DataLoader(dataset=dataset, sampler=train_sampler, batch_size=batch_size,
num_workers=num_workers)
valid_data_loader = data.DataLoader(dataset=dataset, sampler=valid_sampler, batch_size=batch_size,
num_workers=num_workers)
# print(len(train_data_loader))
# print(len(valid_data_loader))
return train_data_loader, valid_data_loader
def dataloader_test(config, usage, batch_size, shuffle, validation_split, num_workers, transform_switch):
if transform_switch is True:
transform = my_Transform(flip=True)
dataset = DIMdataset(config, usage, transform)
else:
dataset = DIMdataset(config, usage)
if usage == 'train':
data_loader = data.DataLoader(dataset=dataset, shuffle=shuffle, batch_size=batch_size,
num_workers=num_workers)
else:
data_loader = data.DataLoader(dataset=dataset, shuffle=shuffle, batch_size=batch_size,
num_workers=num_workers)
# print(len(train_data_loader))
# print(len(valid_data_loader))
return data_loader
|
{"hexsha": "bfdf69444e0f748e2604822822bc2f7b8762c706", "size": 10767, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_loader/data_loaders.py", "max_stars_repo_name": "wyk0517/image-matting-experiment", "max_stars_repo_head_hexsha": "1b86bdf241468f65f3b551b48db72d277b8163db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-09-20T07:13:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-22T11:32:33.000Z", "max_issues_repo_path": "data_loader/data_loaders.py", "max_issues_repo_name": "wyk0517/image-matting-experiment", "max_issues_repo_head_hexsha": "1b86bdf241468f65f3b551b48db72d277b8163db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_loader/data_loaders.py", "max_forks_repo_name": "wyk0517/image-matting-experiment", "max_forks_repo_head_hexsha": "1b86bdf241468f65f3b551b48db72d277b8163db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8700361011, "max_line_length": 133, "alphanum_fraction": 0.6020247051, "include": true, "reason": "import numpy", "num_tokens": 2805}
|
#ifndef SAMPML_INCLUDE_FEATURE_VECTOR_HPP
#define SAMPML_INCLUDE_FEATURE_VECTOR_HPP
#include <dlib/matrix.h>
#include "common.hpp"
namespace SAMPML_NAMESPACE {
template <typename T, std::size_t N>
using feature_vector = dlib::matrix<T, N, 1>;
}
#endif /* SAMPML_INCLUDE_COMMON_HPP */
|
{"hexsha": "34d2e620d55beabd8e51895298e817a401ed32f9", "size": 295, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "sampml/sampml/feature_vector.hpp", "max_stars_repo_name": "YashasSamaga/sampml", "max_stars_repo_head_hexsha": "dc84110b53b120caeeb4c0234fcfd6ab16793c59", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2018-12-01T18:30:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T21:53:36.000Z", "max_issues_repo_path": "sampml/sampml/feature_vector.hpp", "max_issues_repo_name": "YashasSamaga/sampml", "max_issues_repo_head_hexsha": "dc84110b53b120caeeb4c0234fcfd6ab16793c59", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2019-08-21T17:52:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-17T03:28:11.000Z", "max_forks_repo_path": "sampml/sampml/feature_vector.hpp", "max_forks_repo_name": "YashasSamaga/sampml", "max_forks_repo_head_hexsha": "dc84110b53b120caeeb4c0234fcfd6ab16793c59", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2020-09-04T14:53:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T17:53:33.000Z", "avg_line_length": 22.6923076923, "max_line_length": 49, "alphanum_fraction": 0.7661016949, "num_tokens": 75}
|
# Loads the given model and data and predicts (labels and probabilities).
import pandas as pd
import numpy as np
import pickle
import joblib
import argparse
import sys
import os
from sklearn.ensemble import RandomForestClassifier as RFC
### Configuration
_datasettypes = ['covid19_infected_0','hospital_admission_12','icu_admission_-12','icu_admission_12']
_all_targets = ['hospital_admitted','ICU_admitted','used_ventilator','death']
_rel_targets = {
'covid19_infected_0':_all_targets[:],
'hospital_admission_12':_all_targets[1:],
'icu_admission_-12':_all_targets[2:],
'icu_admission_12':_all_targets[2:],
}
_feature_sets = ['basicinfo','disease','temporal_features','tests']
### Loads the model, default path is "models/"
def load_model(datasettype, feature_set, target, path="models/"):
model = "rf"
model_save_name = path + datasettype + "_" + model + "_" + "_".join(feature_set) + "_" + target
# Load list of features
[_, colnames] = pickle.load(open(model_save_name+"_dtypes_colnames.pkl", "rb"))
# Load model
model = joblib.load(model_save_name+".model")
return model, colnames
### Load patients from given path
def load_data(path, features):
if not os.path.isfile(path):
return None
data = pd.read_csv(path, sep=';', index_col='pid')
return data[features].values
### API input mapping
_thresholds_map = dict(zip(['test','hospital','pre-icu','post-icu'],_datasettypes))
_target_map = dict(zip(['hospital','icu','ventilator','death'],_all_targets))
_fset_map = {
'basic': _feature_sets[:1],
'comorbidities': _feature_sets[:2],
'temporal': _feature_sets[:3],
'tests': _feature_sets[:],
}
### Usage
### python cope.py <data-file> [-t <time>] [-p <predict>] [-f <features>]
parser = argparse.ArgumentParser(description='The COvid19 Prediction Engine (COPE).')
parser.add_argument('data', type=str, help='data set')
parser.add_argument('-t','--time', default='test', metavar='T', choices=list(_thresholds_map.keys()),
help='Time of prediction. Must be one of {'+','.join(_thresholds_map.keys())+'}, default is \'test\'.')
parser.add_argument('-p','--predict', default='death', metavar='P', choices=list(_target_map.keys()),
help='Target to predict. Must be one of {'+','.join(_target_map.keys())+'}, default is \'death\'.')
parser.add_argument('-f','--features', default='comorbidities', metavar='FS', choices=list(_fset_map.keys()),
help='Feature set FS to use. Must be one of {'+','.join(_fset_map.keys())+'}, default is \'comorbidities\'.')
# Parse and get args
args = parser.parse_args()
data_path = args.data
threshold = _thresholds_map[args.time]
target = _target_map[args.predict]
fset = _fset_map[args.features]
# Check feature use at testing time
if threshold=='covid19_infected_0' and len(fset)>2:
print("The given feature set is not applicable for time='test', as temporal features and in-hospital tests are not available at this time.")
sys.exit(1)
# Check relevant target
if target not in _rel_targets[threshold]:
print("Predict='"+str(args.predict)+"' is not relevant for time='"+str(args.time)+"'")
sys.exit(1)
# Load model
model, cols = load_model(threshold, fset, target)
# Load data
data = load_data(data_path, cols)
if data is None:
print("Data file not found!")
sys.exit(1)
print("COvid Prediction Engine.")
print("Time = '"+args.time+"'")
print("Prediction target = '"+args.predict+"'")
print("Features = '"+args.features+"'")
print("")
print("Output (labels):")
print(" ".join([str(x) for x in model.predict(data)]))
print("Output (probabilities in %):")
print(" ".join([str(round(x*100,1)) for x in model.predict_proba(data)[:,1]]))
|
{"hexsha": "e92dc2bb2a7fe36405ed658d8364a2d9d1d3d138", "size": 3791, "ext": "py", "lang": "Python", "max_stars_repo_path": "cope.py", "max_stars_repo_name": "StephanLorenzen/Covid19PredictionEngine", "max_stars_repo_head_hexsha": "af9550a01110175989bc295006cfff1bd0babfac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cope.py", "max_issues_repo_name": "StephanLorenzen/Covid19PredictionEngine", "max_issues_repo_head_hexsha": "af9550a01110175989bc295006cfff1bd0babfac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cope.py", "max_forks_repo_name": "StephanLorenzen/Covid19PredictionEngine", "max_forks_repo_head_hexsha": "af9550a01110175989bc295006cfff1bd0babfac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2929292929, "max_line_length": 144, "alphanum_fraction": 0.6779213928, "include": true, "reason": "import numpy", "num_tokens": 984}
|
# -*- coding: utf-8 -*-
# Author: HowkeWayne
# Date: 2019/4/18 - 9:11
"""
File Description...
lenet-5 网络测试实验
LeNet5 implements on tensorflow
"""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.examples.tutorials.mnist import input_data
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # TF只显示 error 信息
LOG_DIR = os.path.join(os.getcwd(), 'logs')
NAME_TO_VISUALISE_VARIABLE = "mnistEmbedding"
TO_EMBED_COUNT = 500
def create_sprite_image(images):
"""Returns a sprite image consisting of images passed as argument. Images should be count x width x height"""
if isinstance(images, list):
images = np.array(images)
img_h = images.shape[1]
img_w = images.shape[2]
n_plots = int(np.ceil(np.sqrt(images.shape[0])))
spriteimage = np.ones((img_h * n_plots, img_w * n_plots))
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < images.shape[0]:
this_img = images[this_filter]
spriteimage[i * img_h:(i + 1) * img_h, j * img_w:(j + 1) * img_w] = this_img
return spriteimage
def vector_to_matrix_mnist(mnist_digits):
"""Reshapes normal mnist digit (batch,28*28) to matrix (batch,28,28)"""
return np.reshape(mnist_digits, (-1, 28, 28))
def invert_grayscale(mnist_digits):
""" Makes black white, and white black """
return 1 - mnist_digits
class LeNet5:
def __init__(self):
print('当前tensortflow版本:{0}'.format(tf.__version__))
print('当前keras版本:{0}'.format(tf.keras.__version__))
# 注意path是绝对路径
self.mnist = input_data.read_data_sets('./MNIST_data', one_hot=True)
# Up method is deprecated,using below way(tf.keras)
# self.mnist = tf.keras.datasets.mnist
# (self.mnist.train_image, self.mnist.train_label), (self.mnist.test_image, self.mnist.test_label) \
# = self.mnist.load_data(path=os.getcwd() + r'\Data\mnist.npz')
self.path_for_mnist_sprites = os.path.join(LOG_DIR, 'mnistdigits.png') # 映射图片地址
self.path_for_mnist_metadata = os.path.join(LOG_DIR, 'metadata.tsv') # label 和 index 对应表地址
self.summary_writer = tf.summary.FileWriter(LOG_DIR) # 事件记录器
self.embedding_var = tf.Variable(tf.ones([1024, 64]), name=NAME_TO_VISUALISE_VARIABLE) # 投影变量
self.config = projector.ProjectorConfig() # 配置投影
self.embedding = self.config.embeddings.add()
self.embedding.tensor_name = self.embedding_var.name
# Specify where you find the metadata
self.embedding.metadata_path = self.path_for_mnist_metadata # 'metadata.tsv'
# Specify where you find the sprite (we will create this later)
self.embedding.sprite.image_path = self.path_for_mnist_sprites # 'mnistdigits.png'
self.embedding.sprite.single_image_dim.extend([28, 28])
# Say that you want to visualise the embeddings
projector.visualize_embeddings(self.summary_writer, self.config)
@staticmethod
def softmax(x):
"""
softmax function implements with numpy
parameters:
x: a numpy array
"""
return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)
# LeNet-5 model
@staticmethod
def inference(input_tensor):
with tf.variable_scope("layer1-conv1"):
conv1_weight = tf.get_variable(name="conv1_variable", shape=[5, 5, 1, 6],
initializer=tf.truncated_normal_initializer())
conv1_bias = tf.get_variable(name="conv1_bias", shape=[6], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(input=input_tensor, filter=conv1_weight, strides=[1, 1, 1, 1], padding="VALID")
relu1 = tf.nn.relu(tf.add(conv1, conv1_bias))
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
with tf.variable_scope("layer2-conv2"):
conv2_weight = tf.get_variable(name="conv2_variable", shape=[5, 5, 6, 16],
initializer=tf.truncated_normal_initializer())
conv2_bias = tf.get_variable(name="conv2_bias", shape=[16], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(input=pool1, filter=conv2_weight, strides=[1, 1, 1, 1], padding="VALID")
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_bias))
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
with tf.variable_scope("layer3-fc1"):
conv_layer_flatten = tf.layers.flatten(inputs=pool2) # [batch_size, 256]
fc1_variable = tf.get_variable(name='fc1_variable', shape=[256, 128],
initializer=tf.random_normal_initializer()) * 0.01
fc1_bias = tf.get_variable(name='fc1_bias', shape=[1, 128], initializer=tf.constant_initializer(value=0))
fc1 = tf.nn.relu(tf.add(tf.matmul(conv_layer_flatten, fc1_variable), fc1_bias)) # [batch_size, 120]
with tf.variable_scope("layer4-fc2"):
fc2_variable = tf.get_variable(name="fc2_variable", shape=[128, 64],
initializer=tf.random_normal_initializer()) * 0.01 # [batch_size, 84]
fc2_bias = tf.get_variable(name="fc2_bias", shape=[1, 64], initializer=tf.constant_initializer(value=0))
fc2 = tf.nn.relu(tf.add(tf.matmul(fc1, fc2_variable), fc2_bias)) # [batch_size, 64]
with tf.variable_scope("layer5-output"):
output_variable = tf.get_variable(name="output_variable", shape=[64, 10],
initializer=tf.random_normal_initializer()) * 0.01
output_bias = tf.get_variable(name="output_bias", shape=[1, 10],
initializer=tf.constant_initializer(value=0))
output = tf.add(tf.matmul(fc2, output_variable), output_bias) # [batch_size, 10]
return output, fc2
def variable_summaries(self, var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
# training model
def train(self, iter_num=500, batch_size=400, learning_rate=0.1, learning_rate_decay=0.85):
costs = []
x = tf.placeholder(dtype=tf.float32, shape=[None, 28, 28, 1], name="x")
y = tf.placeholder(dtype=tf.float32, shape=[None, 10], name="y")
output, fc2 = self.inference(x)
assignment = self.embedding_var.assign(fc2)
# (Softmax->交叉熵)->均值误差
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y, name="loss"))
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
saver = tf.train.Saver() # 添加参数存储器
file_writer = tf.summary.FileWriter(LOG_DIR)
with tf.Session() as sess:
file_writer.add_graph(sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
for i in range(iter_num):
batch_xs, batch_ys = self.mnist.train.next_batch(batch_size)
batch_xs = batch_xs.reshape([-1, 28, 28, 1])
loss, _ = sess.run([cross_entropy, train_step], feed_dict={x: batch_xs, y: batch_ys})
costs.append(loss)
if i % 100 == 0:
learning_rate = learning_rate * learning_rate_decay ** (i / 100)
print("loss after %d iteration is : " % i + str(loss))
batch_xs = self.mnist.validation.images[:1024]
# batch_ys = self.mnist.validation.labels[:1024]
batch_xs = batch_xs.reshape([-1, 28, 28, 1])
sess.run(assignment, feed_dict={x: batch_xs, y: self.mnist.validation.labels[:1024]})
saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"), i)
# saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
to_visualise = self.mnist.validation.images[:1024]
labels = [np.argmax(label) for label in self.mnist.validation.labels[:1024]]
to_visualise = vector_to_matrix_mnist(to_visualise)
to_visualise = invert_grayscale(to_visualise)
sprite_image = create_sprite_image(to_visualise)
plt.imsave(self.path_for_mnist_sprites, sprite_image, cmap='gray')
# plt.imshow(sprite_image, cmap='gray')
with open(self.path_for_mnist_metadata, 'w') as f:
f.write("Index\tLabel\n")
for index, label in enumerate(labels):
f.write("%d\t%d\n" % (index, label))
plt.figure()
plt.title("loss")
plt.xlabel("iteration num")
plt.ylabel("loss")
plt.plot(np.arange(0, iter_num), costs)
plt.show()
def evaluate(self, images, y_true):
try:
tf.reset_default_graph()
except AssertionError:
print('"tf.reset_default_graph()" function is called within a nested graph.')
return
x = tf.placeholder(dtype=tf.float32, shape=[None, 28, 28, 1])
output = self.inference(x)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "./mnistModel/model.ckpt")
output = sess.run(output, feed_dict={x: images})
y_pred = np.argmax(self.softmax(output), axis=1).reshape(-1, 1)
accuracy = np.mean(y_pred == y_true)
print("accuracy is " + str(accuracy))
def predict(self, image):
tf.reset_default_graph()
image = image.reshape([1, 28, 28, 1])
x = tf.placeholder(dtype=tf.float32, shape=[None, 28, 28, 1])
predict = self.inference(x)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "./mnistModel/model.ckpt")
predict = sess.run(predict, feed_dict={x: image})
y_pred = np.argmax(self.softmax(predict), axis=1).reshape(-1, 1)
print("预测结果为:" + str(y_pred))
if __name__ == "__main__":
model = LeNet5()
# train model
model.train(iter_num=1000, batch_size=512, learning_rate=0.1)
# # evaluate model on trainSet
# images_train = model.mnist.train.images.reshape([-1, 28, 28, 1])
# y_true_train = np.argmax(model.mnist.train.labels, axis=1).reshape(-1, 1)
# model.evaluate(images_train, y_true_train) # accuracy is 0.9939818181818182
#
# # evaluate model on testSet
# images_test = model.mnist.test.images.reshape([-1, 28, 28, 1])
# y_true_test = np.argmax(model.mnist.test.labels, axis=1).reshape(-1, 1)
# model.evaluate(images_test, y_true_test) # accuracy is 0.9897
#
# # evaluate model on validate
# images_validation = model.mnist.validation.images.reshape([-1, 28, 28, 1])
# y_true_validation = np.argmax(model.mnist.validation.labels, axis=1).reshape(-1, 1)
# model.evaluate(images_validation, y_true_validation) # accuracy is 0.9648
|
{"hexsha": "251c4a38de1966b823a9c4488f52bc5c4a5004e9", "size": 11531, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/lenet-5.py", "max_stars_repo_name": "HowkeWayne/G-program", "max_stars_repo_head_hexsha": "e96df9d8c890ced88027d5eeb0734fcff7fb7ad7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lenet-5.py", "max_issues_repo_name": "HowkeWayne/G-program", "max_issues_repo_head_hexsha": "e96df9d8c890ced88027d5eeb0734fcff7fb7ad7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lenet-5.py", "max_forks_repo_name": "HowkeWayne/G-program", "max_forks_repo_head_hexsha": "e96df9d8c890ced88027d5eeb0734fcff7fb7ad7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.4892703863, "max_line_length": 117, "alphanum_fraction": 0.6233631081, "include": true, "reason": "import numpy", "num_tokens": 2906}
|
# coding: utf-8
# A [Mandelbrot set](https://en.wikipedia.org/wiki/Mandelbrot_set) is the set of complex numbers c where:
# $$
# \begin{array}{c}
# c \in \mathbb{C} \\\
# z_0 = 0 \\\
# z_{n+1} = z_n^2 + c \\\
# \lim_{n\to \infty} \lvert z_{n+1}\rvert \le 2
# \end{array}
# $$
#
# In[1]:
get_ipython().magic('matplotlib inline')
import numpy as np
from itertools import product
import matplotlib.pyplot as plt
from matplotlib import colors
# In[2]:
def mset_draw(mset, x=None, y=None):
if x is not None and y is not None:
plt.imshow(mset, norm=colors.PowerNorm(0.3), cmap='cubehelix', extent=[x[0], x[-1], y[0], y[-1]]);
else:
plt.imshow(mset, norm=colors.PowerNorm(0.3), cmap='cubehelix');
plt.axis('off');
# In[3]:
def create_intervals(xmin, xmax, ymin, ymax, width, height):
return np.linspace(xmin, xmax, width), np.linspace(ymin, ymax, height)
# In[4]:
# Mandelbrot set iteration
def mset_iteration(c, maxiter=256):
z = c
for n in range(maxiter):
if abs(z) > 2:
return n
z = z**2 + c
return n
# In[5]:
def mandelbrot_set_list_comp(xmin, xmax, ymin, ymax, width, height, maxiter=256):
real_range, imaginary_range = create_intervals(xmin, xmax, ymin, ymax, width, height)
m = [mset_iteration(r + s*1j, maxiter) for r in real_range for s in imaginary_range]
return m, real_range, imaginary_range
# In[6]:
mset, r, i = mandelbrot_set_list_comp(-2.0,0.5,-1.25,1.25, 600, 600)
mset_draw(np.array(mset).reshape(600, 600).T, r, i);
# In[7]:
get_ipython().magic('timeit mandelbrot_set_list_comp(-2.0,0.5,-1.25,1.25, 600, 600)')
# In[8]:
def mandelbrot_set_numpy(xmin, xmax, ymin, ymax, width, height, maxiter=256):
m = np.empty((height, width), dtype=np.uint8)
real_range, imaginary_range = create_intervals(xmin, xmax, ymin, ymax, width, height)
for j, i in product(range(height), range(width)):
x = real_range[i]
y = imaginary_range[j]
c = x + y*1j
m[j,i] = mset_iteration(c, maxiter)
return m, real_range, imaginary_range
# In[9]:
#mset_draw(mandelbrot_set_numpy(-2.0,0.5,-1.25,1.25, 600, 600)[0])
# In[10]:
get_ipython().magic('timeit mandelbrot_set_numpy(-2.0,0.5,-1.25,1.25, 600, 600)')
# In[11]:
from numba import jit
@jit
def mset_iteration_numba(c, maxiter=256):
z = c
for n in range(maxiter):
if abs(z) > 2:
return n
z = z**2 + c
return n
@jit
def mandelbrot_set_numba(xmin, xmax, ymin, ymax, width, height, maxiter=256):
m = np.empty((height, width), dtype=np.uint8)
real_range, imaginary_range = create_intervals(xmin, xmax, ymin, ymax, width, height)
for j, i in product(range(height), range(width)):
x = real_range[i]
y = imaginary_range[j]
c = x + y*1j
m[j,i] = mset_iteration_numba(c, maxiter)
return m, real_range, imaginary_range
# In[12]:
#mset_draw(mandelbrot_set_numba(-2.0,0.5,-1.25,1.25, 600, 600)[0])
# In[13]:
get_ipython().magic('timeit mandelbrot_set_numba(-2.0,0.5,-1.25,1.25, 600, 600)')
# In[14]:
def mandelbrot_vectors(c, maxiter):
output = np.zeros(c.shape)
z = np.zeros(c.shape, np.complex64)
for it in range(maxiter):
notdone = np.less(z.real*z.real + z.imag*z.imag, 4.0)
output[notdone] = it
z[notdone] = z[notdone]**2 + c[notdone]
output[output == maxiter-1] = maxiter-1
return output
def mandelbrot_set_vectors(xmin, xmax, ymin, ymax, width, height, maxiter=256):
real_range, imaginary_range = create_intervals(xmin, xmax, ymin, ymax, width, height)
c = real_range + imaginary_range[:,None]*1j
return mandelbrot_vectors(c, maxiter)
# In[15]:
#mset_draw(mandelbrot_set_vectors(-2.0,0.5,-1.25,1.25, 600, 600))
# In[16]:
get_ipython().magic('timeit mandelbrot_set_vectors(-2.0,0.5,-1.25,1.25, 600, 600)')
# In[17]:
get_ipython().magic('load_ext Cython')
# In[18]:
get_ipython().run_cell_magic('cython', '', '\nimport cython\nimport numpy as np\ncimport numpy as np\n\ncdef np.uint8_t cp_mset_iteration(double complex c, int maxiter=256):\n cdef:\n double complex z\n int n\n \n z = c\n for n in range(maxiter):\n if z.real**2 + z.imag**2 > 4.0:\n return n\n z = z**2 + c\n return n\n\n@cython.boundscheck(False)\ncpdef cp_mandelbrot_set_loop(double xmin, double xmax, double ymin, double ymax, int width, int height, int maxiter=256):\n cdef:\n int j, i\n double x, y\n double[:] real_range = np.linspace(xmin, xmax, width)\n double[:] imaginary_range = np.linspace(ymin, ymax, height)\n double complex c\n np.uint8_t[:,:] m = np.empty((height, width), dtype=np.uint8)\n \n for j in range(height):\n for i in range(width):\n x = real_range[i]\n y = imaginary_range[j]\n c = x + y*1j\n m[j,i] = cp_mset_iteration(c, maxiter)\n\n return m')
# In[19]:
#mset_draw(cp_mandelbrot_set_loop(-2.0,0.5,-1.25,1.25, 600, 600))
# In[20]:
get_ipython().magic('timeit cp_mandelbrot_set_loop(-2.0,0.5,-1.25,1.25, 600, 600)')
# In[ ]:
# seahorses?!
mset = cp_mandelbrot_set_loop(-0.745625,-0.7449749,0.1121549, 0.112805, 15000, 15000)
plt.figimage(mset, norm=colors.PowerNorm(0.3), cmap='cubehelix', resize=True);
#plt.imshow(mset, norm=colors.PowerNorm(0.3), cmap='cubehelix');
plt.savefig('mandelbrot.png', format='png');
# In[ ]:
|
{"hexsha": "42d20803449fce8088cc65e281e4ddff7686b1d1", "size": 5476, "ext": "py", "lang": "Python", "max_stars_repo_path": "mandelbrot_perf.py", "max_stars_repo_name": "stelmod/python__num_perf", "max_stars_repo_head_hexsha": "44aa6d785c67ec4b2d32638a6aee412fbf9fda6d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mandelbrot_perf.py", "max_issues_repo_name": "stelmod/python__num_perf", "max_issues_repo_head_hexsha": "44aa6d785c67ec4b2d32638a6aee412fbf9fda6d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mandelbrot_perf.py", "max_forks_repo_name": "stelmod/python__num_perf", "max_forks_repo_head_hexsha": "44aa6d785c67ec4b2d32638a6aee412fbf9fda6d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7969543147, "max_line_length": 1041, "alphanum_fraction": 0.6323959094, "include": true, "reason": "import numpy,from numba", "num_tokens": 1852}
|
[STATEMENT]
lemma M_ne_policy[intro]: "is_policy p \<Longrightarrow> s \<in> space (prob_algebra Ms) \<Longrightarrow> space M \<noteq> {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>is_policy p; s \<in> space (prob_algebra Ms)\<rbrakk> \<Longrightarrow> space M \<noteq> {}
[PROOF STEP]
using space_K0 prob_space.not_empty prob_space_K0
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>is_policy ?p; ?s \<in> space (prob_algebra Ms)\<rbrakk> \<Longrightarrow> space (K0 ?p ?s) = space M
prob_space ?M \<Longrightarrow> space ?M \<noteq> {}
\<lbrakk>is_policy ?p; ?x0.0 \<in> space (prob_algebra Ms)\<rbrakk> \<Longrightarrow> prob_space (K0 ?p ?x0.0)
goal (1 subgoal):
1. \<lbrakk>is_policy p; s \<in> space (prob_algebra Ms)\<rbrakk> \<Longrightarrow> space M \<noteq> {}
[PROOF STEP]
by force
|
{"llama_tokens": 322, "file": "MDP-Rewards_MDP_cont", "length": 2}
|
import cv2
import mediapipe as mp
import numpy as np
class HandDetector():
def __init__(self, window_shape):
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(max_num_hands=2)
self.mpDraw = mp.solutions.drawing_utils
self.center = {"pitch": (0, 0),
"dynamics": (0, 0)}
self.height = window_shape[0]
self.width = window_shape[1]
def find_hands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
imgRGB.flags.writeable = False
self.results = self.hands.process(imgRGB)
imgRGB.flags.writeable = True
h, w, c = img.shape
self.height, self.width = h, w
center = [self.center["pitch"], self.center["dynamics"]]
i = 0
if self.results.multi_hand_landmarks:
print(len(self.results.multi_hand_landmarks))
for hand_lms in self.results.multi_hand_landmarks:
center[i] = self.get_center(img, hand_lms)
i += 1
#if i == 2:
# break
if center[0][0] > center[1][0]:
center[0], center[1] = center[1], center[0]
self.center["pitch"] = center[0]
self.center["dynamics"] = center[1]
if draw:
cv2.circle(img, (self.center["pitch"][0],
self.center["pitch"][1]),
radius=20,
color=(0, 0, 255),
thickness=cv2.FILLED)
cv2.circle(img, (self.center["dynamics"][0],
self.center["dynamics"][1]),
radius=20,
color=(255, 0, 0),
thickness=cv2.FILLED)
return img
def find_position(self, img, hand_lms):
lm_list = []
h, w, c = img.shape
for id, lm in enumerate(hand_lms.landmark):
cx, cy = lm.x * w, lm.y * h
lm_list.append([cx, cy])
return np.array(lm_list)
def get_center(self, img, hand_lms):
lm_list = self.find_position(img, hand_lms)
if len(lm_list) > 0:
center = np.mean(lm_list, axis=0)
return (int(center[0]), int(center[1]))
|
{"hexsha": "e6f9f6e9b260bee29d3da32576e0a70254fa14a2", "size": 2316, "ext": "py", "lang": "Python", "max_stars_repo_path": "handdetector.py", "max_stars_repo_name": "realfolkcode/theremin", "max_stars_repo_head_hexsha": "d3694f633c91a493e64e7897ded871a68dfb5ded", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "handdetector.py", "max_issues_repo_name": "realfolkcode/theremin", "max_issues_repo_head_hexsha": "d3694f633c91a493e64e7897ded871a68dfb5ded", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "handdetector.py", "max_forks_repo_name": "realfolkcode/theremin", "max_forks_repo_head_hexsha": "d3694f633c91a493e64e7897ded871a68dfb5ded", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1875, "max_line_length": 65, "alphanum_fraction": 0.5047495682, "include": true, "reason": "import numpy", "num_tokens": 569}
|
import numpy
import pandas
import statsmodels.api as sm
def custom_heuristic(file_path):
'''
You are given a list of Titantic passengers and their associated
information. More information about the data can be seen at the link below:
http://www.kaggle.com/c/titanic-gettingStarted/data
For this exercise, you need to write a custom heuristic that will take
in some combination of the passenger's attributes and predict if the passenger
survived the Titanic diaster.
Can your custom heuristic beat 80% accuracy?
The available attributes are:
Pclass Passenger Class
(1 = 1st; 2 = 2nd; 3 = 3rd)
Name Name
Sex Sex
Age Age
SibSp Number of Siblings/Spouses Aboard
Parch Number of Parents/Children Aboard
Ticket Ticket Number
Fare Passenger Fare
Cabin Cabin
Embarked Port of Embarkation
(C = Cherbourg; Q = Queenstown; S = Southampton)
SPECIAL NOTES:
Pclass is a proxy for socioeconomic status (SES)
1st ~ Upper; 2nd ~ Middle; 3rd ~ Lower
Age is in years; fractional if age less than one
If the age is estimated, it is in the form xx.5
With respect to the family relation variables (i.e. SibSp and Parch)
some relations were ignored. The following are the definitions used
for SibSp and Parch.
Sibling: brother, sister, stepbrother, or stepsister of passenger aboard Titanic
Spouse: husband or wife of passenger aboard Titanic (mistresses and fiancees ignored)
Parent: mother or father of passenger aboard Titanic
Child: son, daughter, stepson, or stepdaughter of passenger aboard Titanic
Write your prediction back into the "predictions" dictionary. The
key of the dictionary should be the passenger's id (which can be accessed
via passenger["PassengerId"]) and the associating value should be 1 if the
passenger survvied or 0 otherwise.
For example, if a passenger is predicted to have survived:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 1
And if a passenger is predicted to have perished in the disaster:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 0
You can also look at the Titantic data that you will be working with
at the link below:
https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/titanic_data.csv
'''
predictions = {}
df = pandas.read_csv(file_path)
averageFare=numpy.mean(df[['Fare']])
std=numpy.std(df[['Fare']])
for passenger_index, passenger in df.iterrows():
#
# your code here
#
passenger_id = passenger['PassengerId']
if passenger['Sex'] == 'female' or passenger['Pclass'] < 3 and passenger['Age'] < 18 or int(passenger['Fare']) > int(averageFare+5*std):
predictions[passenger_id] = 1
else:
predictions[passenger_id] = 0
return predictions
|
{"hexsha": "55efae8390b08133c43b22a306e42aaabe959cd1", "size": 3096, "ext": "py", "lang": "Python", "max_stars_repo_path": "aula 2/03_quiz_your_custom_heuristic.py", "max_stars_repo_name": "RichardPSilva/Udacity-Intro-to-Data-Science", "max_stars_repo_head_hexsha": "36820b186c670a4b022a623eacc21e4c18a10235", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aula 2/03_quiz_your_custom_heuristic.py", "max_issues_repo_name": "RichardPSilva/Udacity-Intro-to-Data-Science", "max_issues_repo_head_hexsha": "36820b186c670a4b022a623eacc21e4c18a10235", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aula 2/03_quiz_your_custom_heuristic.py", "max_forks_repo_name": "RichardPSilva/Udacity-Intro-to-Data-Science", "max_forks_repo_head_hexsha": "36820b186c670a4b022a623eacc21e4c18a10235", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7, "max_line_length": 144, "alphanum_fraction": 0.6634366925, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 738}
|
# Suppress warnings
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import os, sys, gc, random
import datetime
import dateutil.relativedelta
import argparse
# Machine learning
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import SimpleImputer
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
import lightgbm as lgb
# Custom library
import utils
from utils import seed_everything, print_score
from features import generate_label, feature_engineering, feature_engineering1, feature_engineering2
TOTAL_THRES = 300 # 구매액 임계값
SEED = 42 # 랜덤 시드
seed_everything(SEED) # 시드 고정
data_dir = '../input' # os.environ['SM_CHANNEL_TRAIN']
model_dir = '../model' # os.environ['SM_MODEL_DIR']
output_dir = '../output' # os.environ['SM_OUTPUT_DATA_DIR']
def make_lgb_oof_prediction(train, y, test, features, categorical_features='auto', model_params=None, folds=10):
x_train = train[features]
x_test = test[features]
# 테스트 데이터 예측값을 저장할 변수
test_preds = np.zeros(x_test.shape[0])
# ============================================================================== 기하
# test_preds = np.ones(x_test.shape[0])
# Out Of Fold Validation 예측 데이터를 저장할 변수
y_oof = np.zeros(x_train.shape[0])
# 폴드별 평균 Validation 스코어를 저장할 변수
score = 0
# ============================================================================== 기하
# score = 1
# 피처 중요도를 저장할 데이터 프레임 선언
fi = pd.DataFrame()
fi['feature'] = features
# Stratified K Fold 선언
skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=SEED)
for fold, (tr_idx, val_idx) in enumerate(skf.split(x_train, y)):
# train index, validation index로 train 데이터를 나눔
x_tr, x_val = x_train.loc[tr_idx, features], x_train.loc[val_idx, features]
y_tr, y_val = y[tr_idx], y[val_idx]
print(f'fold: {fold+1}, x_tr.shape: {x_tr.shape}, x_val.shape: {x_val.shape}')
# LightGBM 데이터셋 선언
dtrain = lgb.Dataset(x_tr, label=y_tr)
dvalid = lgb.Dataset(x_val, label=y_val)
# LightGBM 모델 훈련
clf = lgb.train(
model_params,
dtrain,
valid_sets=[dtrain, dvalid], # Validation 성능을 측정할 수 있도록 설정
categorical_feature=categorical_features,
verbose_eval=200
)
# Validation 데이터 예측
val_preds = clf.predict(x_val)
# Validation index에 예측값 저장
y_oof[val_idx] = val_preds
# 폴드별 Validation 스코어 측정
print(f"Fold {fold + 1} | AUC: {roc_auc_score(y_val, val_preds)}")
print('-'*80)
# score 변수에 폴드별 평균 Validation 스코어 저장 # 기하평군으로 바꾸면 잘된다.
score += roc_auc_score(y_val, val_preds) / folds
# ============================================================================== 기하
# score *= roc_auc_score(y_val, val_preds)
# 테스트 데이터 예측하고 평균해서 저장
test_preds += clf.predict(x_test) / folds
# ============================================================================== 기하
# test_preds *= (clf.predict(x_test) + np.finfo(float).eps)
# 폴드별 피처 중요도 저장
fi[f'fold_{fold+1}'] = clf.feature_importance()
del x_tr, x_val, y_tr, y_val
gc.collect()
# ============================================================================== 기하
# score = score**(1/folds)
# test_preds = test_preds**(1/folds)
print(f"\nMean AUC = {score}") # 폴드별 Validation 스코어 출력
print(f"OOF AUC = {roc_auc_score(y, y_oof)}") # Out Of Fold Validation 스코어 출력
# 폴드별 피처 중요도 평균값 계산해서 저장
fi_cols = [col for col in fi.columns if 'fold_' in col]
fi['importance'] = fi[fi_cols].mean(axis=1)
return y_oof, test_preds, fi
if __name__ == '__main__':
# 데이터 파일 읽기
data = pd.read_csv(data_dir + '/train.csv', parse_dates=['order_date'])
# 예측할 연월 설정
year_month = '2011-12'
# model_params = {
# 'objective': 'binary', # 이진 분류
# 'boosting_type': 'gbdt',
# 'metric': 'auc', # 평가 지표 설정
# 'feature_fraction': 0.7, # 피처 샘플링 비율
# 'bagging_fraction': 0.7, # 데이터 샘플링 비율
# 'bagging_freq': 1,
# 'n_estimators': 10000, # 트리 개수
# 'early_stopping_rounds': 1400,
# 'learning_rate': 0.01,
# 'max_bin':255,
# 'seed': SEED,
# 'verbose': -1,
# 'n_jobs': -1,
# 'num_leaves': 31,
# 'min_data_in_leaf':1500,
# 'lambda_l1': 1,
# 'lambda_l2':1,
# # 'boost_from_average': False,
# }
model_params = {
'objective': 'binary', # 이진 분류
'boosting_type': 'gbdt',
'metric': 'auc', # 평가 지표 설정
'feature_fraction': 0.8, # 피처 샘플링 비율
'bagging_fraction': 0.8, # 데이터 샘플링 비율
'bagging_freq': 1,
'n_estimators': 10000, # 트리 개수
'early_stopping_rounds': 100,
'seed': SEED,
'verbose': -1,
'n_jobs': -1,
#
# 'num_leaves': 64,
# 'boost_from_average': False,
}
# 피처 엔지니어링 실행
import features
# features.get_year_month_list(data, year_month)
# print('end')
train, test, y, features = feature_engineering2(data, year_month) ##################
# print(train.head())
# print(test.head())
# print(y.head())
# print(features.shape)
# SMOTE 실험
# from imblearn.over_sampling import SMOTE
#
# t = train.drop(['customer_id', 'year_month', 'label'], axis=1)
# X_train_over, y_train_over = SMOTE(random_state=0).fit_resample(t, y)
# print('SMOTE 적용 전 학습용 피처/레이블 데이터 세트: ', t.shape, y.shape)
# print('SMOTE 적용 후 학습용 피처/레이블 데이터 세트: ', X_train_over.shape, y_train_over.shape)
# print('SMOTE 적용 후 레이블 값 분포:\n', pd.Series(y_train_over).value_counts())
# Quantile 실험
from sklearn.preprocessing import QuantileTransformer
# quan = QuantileTransformer(n_quantiles=2000, output_distribution='normal',random_state=42)
quan = QuantileTransformer(n_quantiles=1000, random_state=42)
X_quan = quan.fit_transform(train[features])
Y_quan = quan.fit_transform(test[features])
x_quan = pd.DataFrame(X_quan, columns=features)
y_quan = pd.DataFrame(Y_quan, columns=features)
train[features] = x_quan[features]
test[features] = y_quan[features]
### feature_pca = feature.append(train_pca_df.columns)
# Cross Validation Out Of Fold로 LightGBM 모델 훈련 및 예측
y_oof, test_preds, fi = make_lgb_oof_prediction(train, y, test, features, model_params=model_params)
# y_oof, test_preds, fi = make_lgb_oof_prediction(X_train_over, y_train_over, test, features, model_params=model_params) # SMOTE
# y_oof, test_preds, fi = make_lgb_oof_prediction(train, y, test, features, model_params=model_params) # QUANT
sub = pd.read_csv(data_dir + '/sample_submission.csv')
sub['probability'] = test_preds # 테스트 예측 결과 저장
os.makedirs(output_dir, exist_ok=True)
sub.to_csv(os.path.join(output_dir , 'output17.csv'), index=False) # 제출 파일 쓰기 # 상황: 아직 없음 # 경로 바꿔주기!!!!
from evaluation import plot_feature_importances
fi = plot_feature_importances(fi)
|
{"hexsha": "4f7d9ecde5cdd2f0a9e178c8ac49d3756019272b", "size": 7223, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/src/inference.py", "max_stars_repo_name": "bsm8734/BC_stage2_Tabular_data_Classification", "max_stars_repo_head_hexsha": "e421360f3f6f9016c58bfff2dd20485206e4a365", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/src/inference.py", "max_issues_repo_name": "bsm8734/BC_stage2_Tabular_data_Classification", "max_issues_repo_head_hexsha": "e421360f3f6f9016c58bfff2dd20485206e4a365", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/src/inference.py", "max_forks_repo_name": "bsm8734/BC_stage2_Tabular_data_Classification", "max_forks_repo_head_hexsha": "e421360f3f6f9016c58bfff2dd20485206e4a365", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7259615385, "max_line_length": 132, "alphanum_fraction": 0.5940744843, "include": true, "reason": "import numpy", "num_tokens": 2269}
|
import functools
import numpy as np
from scipy.stats import norm as ndist
import nose.tools as nt
from ..lasso import lasso, full_targets
from ...tests.instance import gaussian_instance
def test_onedim_lasso(n=50000, W=1.5, signal=2., sigma=1, randomizer_scale=1):
beta = np.array([signal])
while True:
X = np.random.standard_normal((n, 1))
X /= np.sqrt((X**2).sum(0))[None, :]
Y = X.dot(beta) + sigma * np.random.standard_normal(n)
conv = lasso.gaussian(X,
Y,
W * np.ones(X.shape[1]),
randomizer_scale=randomizer_scale * sigma,
ridge_term=0.)
signs = conv.fit()
nonzero = signs != 0
if nonzero.sum():
# this is current code where we estimate sigma
(observed_target,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero)
estimate_cur, I_cur, Z_cur, pv_cur = conv.selective_MLE(observed_target,
cov_target,
cov_target_score)[:4]
# this matches exactly with old code
target_Z = X.T.dot(Y) / np.sqrt((X**2).sum(0))
estimate, I, Z, pv = conv.sampler.selective_MLE(target_Z, sigma**2 * np.ones((1,1)),
-sigma**2 * np.ones((1,1)), np.ones((1,)),
solve_args={'tol':1.e-12})[:4]
target_transform = (-np.identity(1), np.zeros(1))
s = signs
opt_transform = (s * np.identity(1), (s * W) * np.ones(1))
beta_hat = X.T.dot(Y) / np.sum(X**2, 0)
sigma_ = np.linalg.norm(Y - X.dot(beta_hat)) / np.sqrt(n-1)
approx_MLE = solve_UMVU(target_transform,
opt_transform,
target_Z,
np.ones(1),
(sigma ** 2.) * np.identity(1),
(1. / (sigma ** 2.)) * np.identity(1), tol=1.e-12)
print(estimate, approx_MLE, 'selective MLE')
print(beta[nonzero], 'truth')
print(np.linalg.pinv(X[:, nonzero]).dot(Y), 'relaxed')
print(pv, 'pv')
pivot = ndist.cdf((estimate_cur - signal) / np.sqrt(I_cur[0,0]))
print(pivot, 'pivot')
return estimate, estimate_cur, np.atleast_1d(approx_MLE), pivot
def test_agreement(seed=0):
np.random.seed(seed)
beta_seq = np.hstack([np.linspace(-6., -2., 100), np.linspace(2, 6, 100)])
MLE_check = []
MLE_cur = []
MLE_prev = []
pivot = []
for signal in beta_seq:
test = test_onedim_lasso(n=2000, signal=signal, sigma=1.,randomizer_scale=1.)
MLE_check.append(test[0])
MLE_cur.append(test[1])
MLE_prev.append(test[2])
pivot.append(test[3])
MLE_check = np.hstack(MLE_check)
MLE_cur = np.hstack(MLE_cur)
MLE_prev = np.hstack(MLE_prev)
pivot = np.hstack(pivot)
np.testing.assert_allclose(MLE_check, MLE_prev, rtol=1.e-5)
nt.assert_true(np.linalg.norm(MLE_cur - MLE_prev) / np.linalg.norm(MLE_prev) < 1.e-2)
return beta_seq, MLE_cur, MLE_prev, pivot
def main():
beta_seq, MLE_cur, MLE_prev, pivot = test_agreement()
import matplotlib.pyplot as plt
from statsmodels.distributions import ECDF
plt.figure(num=1)
plt.plot(beta_seq, np.array(MLE_cur), label='MLE now')
plt.plot(beta_seq, np.array(MLE_prev), 'r--', label='MLE prev')
plt.legend()
plt.figure(num=2)
U = np.linspace(0, 1, 101)
plt.plot(U, ECDF(pivot)(U))
plt.plot([0,1],[0,1], 'k--')
#####################################################
# Old selection.randomized.selective_MLE module
def solve_UMVU(target_transform,
opt_transform,
target_observed,
feasible_point,
target_cov,
randomizer_precision,
tol=1.e-8):
A, data_offset = target_transform # data_offset = N
B, opt_offset = opt_transform # opt_offset = u
nopt = B.shape[1]
ntarget = A.shape[1]
# setup joint implied covariance matrix
target_precision = np.linalg.inv(target_cov)
implied_precision = np.zeros((ntarget + nopt, ntarget + nopt))
implied_precision[:ntarget,:ntarget] = A.T.dot(randomizer_precision).dot(A) + target_precision
implied_precision[:ntarget,ntarget:] = A.T.dot(randomizer_precision).dot(B)
implied_precision[ntarget:,:ntarget] = B.T.dot(randomizer_precision).dot(A)
implied_precision[ntarget:,ntarget:] = B.T.dot(randomizer_precision).dot(B)
implied_cov = np.linalg.inv(implied_precision)
implied_opt = implied_cov[ntarget:,ntarget:]
implied_target = implied_cov[:ntarget,:ntarget]
implied_cross = implied_cov[:ntarget,ntarget:]
L = implied_cross.dot(np.linalg.inv(implied_opt))
M_1 = np.linalg.inv(implied_precision[:ntarget,:ntarget]).dot(target_precision)
M_2 = -np.linalg.inv(implied_precision[:ntarget,:ntarget]).dot(A.T.dot(randomizer_precision))
conditioned_value = data_offset + opt_offset
linear_term = implied_precision[ntarget:,ntarget:].dot(implied_cross.T.dot(np.linalg.inv(implied_target)))
offset_term = -B.T.dot(randomizer_precision).dot(conditioned_value)
natparam_transform = (linear_term, offset_term)
conditional_natural_parameter = linear_term.dot(target_observed) + offset_term
conditional_precision = implied_precision[ntarget:,ntarget:]
M_1_inv = np.linalg.inv(M_1)
mle_offset_term = - M_1_inv.dot(M_2.dot(conditioned_value))
mle_transform = (M_1_inv, -M_1_inv.dot(L), mle_offset_term)
var_transform = (-implied_precision[ntarget:,:ntarget].dot(M_1),
-implied_precision[ntarget:,:ntarget].dot(M_2.dot(conditioned_value)))
cross_covariance = np.linalg.inv(implied_precision[:ntarget, :ntarget]).dot(implied_precision[:ntarget, ntarget:])
var_matrices = (np.linalg.inv(implied_opt), np.linalg.inv(implied_precision[:ntarget,:ntarget]),
cross_covariance,target_precision)
def mle_map(natparam_transform, mle_transform, var_transform, var_matrices,
feasible_point, conditional_precision, target_observed):
param_lin, param_offset = natparam_transform
mle_target_lin, mle_soln_lin, mle_offset = mle_transform
soln, value, _ = solve_barrier_nonneg(param_lin.dot(target_observed) + param_offset,
conditional_precision,
feasible_point=feasible_point,
step=1,
nstep=2000,
tol=tol)
selective_MLE = mle_target_lin.dot(target_observed) + mle_soln_lin.dot(soln) + mle_offset
var_target_lin, var_offset = var_transform
var_precision, inv_precision_target, cross_covariance, target_precision = var_matrices
_, _, hess = solve_barrier_nonneg(var_target_lin.dot(selective_MLE) + var_offset + mle_offset,
var_precision,
feasible_point=None,
step=1,
nstep=2000)
hessian = target_precision.dot(inv_precision_target +
cross_covariance.dot(hess).dot(cross_covariance.T)).dot(target_precision)
return selective_MLE, np.linalg.inv(hessian)
mle_partial = functools.partial(mle_map, natparam_transform, mle_transform, var_transform, var_matrices,
feasible_point, conditional_precision)
sel_MLE, inv_hessian = mle_partial(target_observed)
#print("shapes", target_precision.dot(sel_MLE).shape, A.T.dot(randomizer_precision).shape, offset_term.shape)
#implied_parameter = np.hstack([target_precision.dot(sel_MLE)-A.T.dot(randomizer_precision).dot(conditioned_value),
# offset_term*np.ones((1,1))])
print("selective MLE", sel_MLE)
return np.squeeze(sel_MLE)
#, inv_hessian, mle_partial, implied_cov, implied_cov.dot(implied_parameter), mle_transform
def solve_barrier_nonneg(conjugate_arg,
precision,
feasible_point=None,
step=1,
nstep=1000,
tol=1.e-8):
scaling = np.sqrt(np.diag(precision))
if feasible_point is None:
feasible_point = 1. / scaling
objective = lambda u: -u.T.dot(conjugate_arg) + u.T.dot(precision).dot(u)/2. + np.log(1.+ 1./(u / scaling)).sum()
grad = lambda u: -conjugate_arg + precision.dot(u) + (1./(scaling + u) - 1./u)
barrier_hessian = lambda u: (-1./((scaling + u)**2.) + 1./(u**2.))
current = feasible_point
current_value = np.inf
for itercount in range(nstep):
newton_step = grad(current)
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * newton_step
if np.all(proposal > 0):
break
step *= 0.5
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
proposal = current - step * newton_step
proposed_value = objective(proposal)
if proposed_value <= current_value:
break
step *= 0.5
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value):
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
hess = np.linalg.inv(precision + np.diag(barrier_hessian(current)))
return current, current_value, hess
|
{"hexsha": "df3aea08da908cf48376120a95f767f513fb0b94", "size": 10485, "ext": "py", "lang": "Python", "max_stars_repo_path": "selectinf/randomized/tests/test_selective_MLE_onedim.py", "max_stars_repo_name": "selective-inference/Python-software", "max_stars_repo_head_hexsha": "e906fbb98946b129eb6713e8956bde7a080181f4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2016-03-31T16:34:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T04:32:58.000Z", "max_issues_repo_path": "selectinf/randomized/tests/test_selective_MLE_onedim.py", "max_issues_repo_name": "selective-inference/selective-inference", "max_issues_repo_head_hexsha": "e906fbb98946b129eb6713e8956bde7a080181f4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2016-04-07T00:19:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-03T18:31:14.000Z", "max_forks_repo_path": "selectinf/randomized/tests/test_selective_MLE_onedim.py", "max_forks_repo_name": "selective-inference/Python-software", "max_forks_repo_head_hexsha": "e906fbb98946b129eb6713e8956bde7a080181f4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2015-10-28T17:29:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-16T21:04:30.000Z", "avg_line_length": 38.5477941176, "max_line_length": 119, "alphanum_fraction": 0.5723414402, "include": true, "reason": "import numpy,from scipy,from statsmodels", "num_tokens": 2443}
|
import numpy
def ToyObjective(objective):
def __init__(self):
pass
def numVar(objective):
pass
def evaluate(objective):
pass
class Igo(object):
def __init__(self,objective):
self.objective = objective
self.numVar_ = objective.numVar()
self.mean = numpy.zeros(self.numVar_)
self.covariance = numpy.eye(self.numVar_)
def setStartingPoint(self,mean,covariance):
self.mean[:] = mean[:]
self.covariance[:] = covariance[:]
def generateSamples(self,n):
return numpy.random.multivariate_normal(self.mean, self.covariance, n)
def evaluateSamples(self,samples):
""" get the value of the objective for each sample"""
assert samples.ndim==2
print samples.shape
assert samples.shape[1] == self.numVar_
objectiveValue = numpy.zeros(samples.shape[0])
for si in range(samples.shape[0]):
sample = samples[si,:]
objectiveValue[si]=self.objective.evaluate(sample)
return objectiveValue
def rankSamples(self,objectiveValues,selectionQuantile=0.027):
numberOfSamples = len(objectiveValues)
sampleRank = numpy.argsort(objectiveValues).astype(numpy.float32)
# argument of quantile selction function
w = (sampleRank +0.5)/float(numberOfSamples)
# find where the quantile is to large
whereQ=numpy.where(w>selectionQuantile)
w[:]=1.0
w[whereQ]=0.0
w/=float(numberOfSamples)
return w
def update(self,samples,w,etaMean,etaCovariance):
numberOfSamples = samples.shape[0]
newMean = numpy.zeros(self.mean.shape)
newCovariance = numpy.zeros(self.covariance.shape)
print "wshape ",w.shape
print "s", samples.shape
for s in range(numberOfSamples):
d = samples[s]-self.mean
dM = d.reshape([-1,1])
dMT = dM.T
newMean +=w[s]*d
dotRes = numpy.dot(dM,dMT)
#print "dosRes ",dotRes.shape
#print "cov ",self.covariance.shape
newCovariance +=w[s]*(dotRes-self.covariance)
# gradient step
self.mean += etaMean*newMean
self.covariance += etaCovariance*newCovariance
def infer(self):
for i in range(1000):
print i
samples = self.generateSamples(self.numVar_*20)
print "eval ",samples.shape
objectiveValues = self.evaluateSamples(samples)
w = self.rankSamples(objectiveValues=objectiveValues)
if i%100==0:
self.objective.show(samples[numpy.argmin(w),:])
print "min objectiveValues",objectiveValues.min()
self.update(samples=samples,w=w,etaMean=1.0,etaCovariance=1.0)
|
{"hexsha": "d6983730fd1c069d2aca01425251df5575e79c1a", "size": 2899, "ext": "py", "lang": "Python", "max_stars_repo_path": "seglibpython/seglib/igo/igo_opt.py", "max_stars_repo_name": "DerThorsten/seglib", "max_stars_repo_head_hexsha": "4655079e390e301dd93e53f5beed6c9737d6df9f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "seglibpython/seglib/igo/igo_opt.py", "max_issues_repo_name": "DerThorsten/seglib", "max_issues_repo_head_hexsha": "4655079e390e301dd93e53f5beed6c9737d6df9f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "seglibpython/seglib/igo/igo_opt.py", "max_forks_repo_name": "DerThorsten/seglib", "max_forks_repo_head_hexsha": "4655079e390e301dd93e53f5beed6c9737d6df9f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1720430108, "max_line_length": 80, "alphanum_fraction": 0.5950327699, "include": true, "reason": "import numpy", "num_tokens": 685}
|
import numpy as np
from pusion.core.combiner import UtilityBasedCombiner
from pusion.util.constants import *
class ComplementaryOutputCombiner(UtilityBasedCombiner):
"""
The :class:`ComplementaryOutputCombiner` combines fully complementary decision outputs by concatenating individual
decisions across classes for each sample.
"""
_SUPPORTED_PAC = [
(Problem.MULTI_CLASS, AssignmentType.CONTINUOUS, CoverageType.COMPLEMENTARY),
(Problem.MULTI_CLASS, AssignmentType.CRISP, CoverageType.COMPLEMENTARY),
(Problem.MULTI_LABEL, AssignmentType.CONTINUOUS, CoverageType.COMPLEMENTARY),
(Problem.MULTI_LABEL, AssignmentType.CRISP, CoverageType.COMPLEMENTARY)
]
SHORT_NAME = 'COB'
def __init__(self):
UtilityBasedCombiner.__init__(self)
def combine(self, decision_outputs):
"""
Combine fully complementary decision outputs by concatenating individual decisions according to the coverage
of all classifiers. Due to the nature of complementary class coverage, no fusion between redundant class
assignments is required.
:param decision_outputs: `list` of `numpy.array` matrices, each of shape `(n_samples, n_classes')`,
where `n_classes'` is classifier-specific and described by the coverage. Each matrix corresponds to
one of `n_classifiers` classifiers and contains crisp or continuous decision outputs per sample.
:return: A matrix (`numpy.array`) of either crisp or continuous class assignments which represents fused
decisions obtained by the highest cumulative cosine-similarity. Axis 0 represents samples and axis 1 the
class labels which are aligned with axis 2 in ``decision_tensor`` input tensor.
"""
n_classes = np.sum([len(ca) for ca in self.coverage])
fused_decisions = np.zeros_like((len(decision_outputs[0]), n_classes))
for i, classifier_coverage in enumerate(self.coverage):
for ci in classifier_coverage:
fused_decisions[:, ci] = decision_outputs[i, :, ci]
return fused_decisions
|
{"hexsha": "c1935686495fd702deb9836682ba4ed1aa2da4ba", "size": 2153, "ext": "py", "lang": "Python", "max_stars_repo_path": "pusion/core/complementary_output_combiner.py", "max_stars_repo_name": "IPVS-AS/pusion", "max_stars_repo_head_hexsha": "58ef24b602f611192430f6005ecf5305f878f412", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-07-24T16:05:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T15:06:03.000Z", "max_issues_repo_path": "pusion/core/complementary_output_combiner.py", "max_issues_repo_name": "IPVS-AS/pusion", "max_issues_repo_head_hexsha": "58ef24b602f611192430f6005ecf5305f878f412", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pusion/core/complementary_output_combiner.py", "max_forks_repo_name": "IPVS-AS/pusion", "max_forks_repo_head_hexsha": "58ef24b602f611192430f6005ecf5305f878f412", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-24T16:05:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T21:24:40.000Z", "avg_line_length": 45.8085106383, "max_line_length": 120, "alphanum_fraction": 0.7157454714, "include": true, "reason": "import numpy", "num_tokens": 442}
|
struct TimeDateZone <: NanosecondBasis
timestamp::TimeDate
inzone::AkoTimeZone # one of {FixedTimeZone, VariableTimeZone}
atzone::FixedTimeZone
# ensure other constructors will be give explictly
function TimeDateZone(attime::Time, ondate::Date, inzone::VariableTimeZone, atzone::FixedTimeZone)
tmdt = TimeDate(attime, ondate)
return new(tmdt, inzone, atzone)
end
function TimeDateZone(attime::Time, ondate::Date, inzone::FixedTimeZone, atzone::FixedTimeZone)
tmdt = TimeDate(attime, ondate)
return new(tmdt, inzone, atzone)
end
end
@inline timestamp(x::TimeDateZone) = x.timestamp
@inline at_time(x::TimeDateZone) = x.timestamp.attime
@inline on_date(x::TimeDateZone) = x.timestamp.ondate
@inline in_zone(x::TimeDateZone) = x.inzone
@inline at_zone(x::TimeDateZone) = x.atzone
@inline timestamp(x::ZonedDateTime) = DateTime(x)
@inline at_time(x::ZonedDateTime) = Time(DateTime(x))
@inline on_date(x::ZonedDateTime) = Date(DateTime(x))
@inline in_zone(x::ZonedDateTime) = x.timezone
@inline at_zone(x::ZonedDateTime) = x.zone
@inline timestamp(x::TimeDate) = x
@inline timestamp(x::DateTime) = x
@inline timestamp(x::Date) = x + Time(0)
TimeDateZone(x::TimeDateZone) = x
TimeDate(x::TimeDateZone) = x.timestamp
function TimeDateZone(attime::Time, ondate::Date, atzone::FixedTimeZone)
return TimeDateZone(attime, ondate, atzone, atzone)
end
function TimeDateZone(attime::Time, ondate::Date, inzone::VariableTimeZone)
fast_time, slow_time = fastpart(attime), slowpart(attime)
datetime = ondate + slow_time
zdt = ZonedDateTime(datetime, inzone)
atzone = at_zone(zdt)
tim, dat = timedate(zdt)
tim = tim + fast_time
tdz = TimeDateZone(tim, dat, inzone, atzone)
return tdz
end
TimeDateZone(ondate::Date, attime::Time, inzone::VariableTimeZone) =
TimeDateZone(attime, ondate, inzone)
TimeDateZone(ondate::Date, attime::Time, atzone::FixedTimeZone) =
TimeDateZone(attime, ondate, atzone, atzone)
@inline function ZonedDateTime(tdz::TimeDateZone)
datetime = DateTime(timestamp(tdz))
inzone = in_zone(tdz)
zdt = ZonedDateTime(datetime, inzone)
return zdt
end
@inline function TimeDateZone(zdt::ZonedDateTime)
tim, dat = timedate(DateTime(zdt))
inzone = in_zone(zdt)
atzone = at_zone(zdt)
tdz = TimeDateZone(tim, dat, inzone, atzone)
return tdz
end
@inline function TimeDateZone(datetime::DateTime, tzone::T, idx::Int) where {T<:AkoTimeZone}
zdt = ZonedDateTime(datetime, tzone, idx)
return TimeDateZone(zdt)
end
TimeDateZone(datetime::DateTime, inzone::T) where {T<:AkoTimeZone} =
TimeDateZone(ZonedDateTime(datetime, inzone))
TimeDateZone(tmdt::TimeDate, inzone::T) where {T<:AkoTimeZone} =
TimeDateZone(Time(tmdt), Date(tmdt), inzone)
TimeDateZone(dat::Date, inzone::T) where {T<:AkoTimeZone} =
TimeDateZone(ZonedDateTime(dat+Time(0), inzone))
function astimezone(tdz::TimeDateZone, tzone::T) where {T<:AkoTimeZone}
TimeDateZone(astimezone(ZonedDateTime(tdz), tzone)) + fastpart(tdz)
end
"""
utcoffset(FixedTimeZone)
offset from UT to LocalTime in Seconds
"""
@inline function utcoffset(tz::FixedTimeZone)
ofs = tz.offset
return ofs.std + ofs.dst
end
@inline utcoffset(zdt::ZonedDateTime) = utcoffset(at_zone(zdt))
@inline utcoffset(tdz::TimeDateZone) = utcoffset(at_zone(tdz))
@inline utcoffset(tz::VariableTimeZone) = utcoffset(TimeZones.now(tz))
TimeDateZone(cperiod::CompoundPeriod, tzone::T) where {T<:AkoTimeZone} =
TimeDateZone(TimeDate(cperiod), tzone)
@inline function Date(tdz::TimeDateZone)
timedate = timestamp(tdz)
dt = on_date(timedate)
return dt
end
@inline function Time(tdz::TimeDateZone)
timedate = timestamp(tdz)
tm = at_time(timedate)
return tm
end
@inline function DateTime(tdz::TimeDateZone)
timedate = timestamp(tdz)
tm = slowpart(at_time(timedate))
dt = on_date(timedate)
return dt+tm
end
@inline function fastpart(tdz::TimeDateZone)
fast_time = fastpart(at_time(tdz))
return isempty(fast_time) ? Nanosecond(0) : fast_time
end
@inline slowpart(tdz::TimeDateZone) = slowpart(at_time(tdz))
@inline fastpart(zdt::ZonedDateTime) = Nanosecond(0)
@inline slowpart(zdt::ZonedDateTime) = Time(DateTime(zdt))
|
{"hexsha": "0dd18cfc9234417dae0a2f6a2fab95f10769d371", "size": 4295, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/TimeDateZone.jl", "max_stars_repo_name": "UnofficialJuliaMirror/TimesDates.jl-bdfc003b-8df8-5c39-adcd-3a9087f5df4a", "max_stars_repo_head_hexsha": "1480b658d20532692e4b19ca70bbe50e701681a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2018-03-14T13:22:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T11:55:35.000Z", "max_issues_repo_path": "src/TimeDateZone.jl", "max_issues_repo_name": "UnofficialJuliaMirror/TimesDates.jl-bdfc003b-8df8-5c39-adcd-3a9087f5df4a", "max_issues_repo_head_hexsha": "1480b658d20532692e4b19ca70bbe50e701681a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2018-03-16T15:12:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T06:54:01.000Z", "max_forks_repo_path": "src/TimeDateZone.jl", "max_forks_repo_name": "UnofficialJuliaMirror/TimesDates.jl-bdfc003b-8df8-5c39-adcd-3a9087f5df4a", "max_forks_repo_head_hexsha": "1480b658d20532692e4b19ca70bbe50e701681a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-04-27T10:17:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-22T18:47:21.000Z", "avg_line_length": 30.8992805755, "max_line_length": 102, "alphanum_fraction": 0.7250291036, "num_tokens": 1266}
|
from mapoca.trainers.subprocess_env_manager import worker
import numpy as np
from typing import List, Tuple, Optional, Mapping as MappingType
from mlagents_envs.base_env import (
BehaviorSpec,
ObservationSpec,
DimensionProperty,
ObservationType,
ActionSpec,
DecisionSteps,
TerminalSteps,
BaseEnv,
BehaviorName,
ActionTuple,
AgentId,
)
from mlagents_envs.communicator_objects.capabilities_pb2 import UnityRLCapabilitiesProto
def _make_env(scenario_name, benchmark=False):
"""
Creates a MultiAgentEnv object as env. This can be used similar to a gym
environment by calling env.reset() and env.step().
Use env.render() to view the environment on the screen.
Input:
scenario_name : name of the scenario from ./scenarios/ to be Returns
(without the .py extension)
benchmark : whether you want to produce benchmarking data
(usually only done during evaluation)
Some useful env properties (see environment.py):
.observation_space : Returns the observation space for each agent
.action_space : Returns the action space for each agent
.n : Returns the number of Agents
"""
from multiagent.environment import MultiAgentEnv
from multiagent import scenarios
# load scenario from script
scenario = scenarios.load(scenario_name + ".py").Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
if benchmark:
env = MultiAgentEnv(
world,
scenario.reset_world,
scenario.reward,
scenario.observation,
scenario.benchmark_data,
)
else:
env = MultiAgentEnv(
world, scenario.reset_world, scenario.reward, scenario.observation
)
return env
class ParticlesEnvironment(BaseEnv):
def __init__(self, name: str = "simple_spread", worker_id=0):
self._obs: Optional[List[np.array]] = None
self._rew: Optional[List[int]] = None
self._done: Optional[List[bool]] = None
self._actions: Optional[List[int]] = None
self._name = name
self._env = _make_env(name)
self._env.discrete_action_input = True
self._worker_id = worker_id
# :(
self.academy_capabilities = UnityRLCapabilitiesProto()
self.academy_capabilities.baseRLCapabilities = True
self.academy_capabilities.concatenatedPngObservations = True
self.academy_capabilities.compressedChannelMapping = True
self.academy_capabilities.hybridActions = True
self.academy_capabilities.trainingAnalytics = True
self.academy_capabilities.variableLengthObservation = True
self.academy_capabilities.multiAgentGroups = True
self.count = 0
self.episode_count = 0
def step(self) -> None:
if self._actions is None:
self._obs, self._rew, self._done, _ = self._env.step([0] * self._env.n)
else:
self._obs, self._rew, self._done, _ = self._env.step(self._actions)
if self.count >= 25:
self._done = [True] * self._env.n
self.count += 1
if self.episode_count % 100 == 0 and self._worker_id == 0:
self._env.render(mode="agent")
def reset(self) -> None:
self._rew = [0] * self._env.n
self._done = [False] * self._env.n
self._actions = [0] * self._env.n
self._obs = self._env.reset()
self.episode_count += 1
self.count = 0
def close(self) -> None:
self._env.close()
@property
def behavior_specs(self) -> MappingType[str, BehaviorSpec]:
return {
self._name: BehaviorSpec(
[
ObservationSpec(
self._env.observation_space[0].shape,
(DimensionProperty.NONE,),
ObservationType.DEFAULT,
"obs_0",
)
],
ActionSpec(0, (self._env.action_space[0].n,)),
)
}
def set_actions(self, behavior_name: BehaviorName, action: ActionTuple) -> None:
assert behavior_name == self._name
self._actions = [action.discrete[i] for i in range(self._env.n)]
def set_action_for_agent(
self, behavior_name: BehaviorName, agent_id: AgentId, action: ActionTuple
) -> None:
assert behavior_name == self._name
if self._actions is None:
self._actions = [0] * self._env.n
self._actions[agent_id] = action.discrete[0]
def get_steps(
self, behavior_name: BehaviorName
) -> Tuple[DecisionSteps, TerminalSteps]:
terminal_steps = self.get_terminal_steps()
if any(self._done):
# if any is done, reset the environment and
# get the next steps
self.reset()
decision_steps = self.get_decision_steps()
return decision_steps, terminal_steps
def get_decision_steps(self) -> DecisionSteps:
reward_scale = 1
decision_obs = np.array(
[self._obs[i] for i in range(self._env.n) if not self._done[i]],
dtype=np.float32,
)
decision_reward = np.array(
[
self._rew[i] * 0
for i in range(self._env.n)
if not self._done[i]
],
dtype=np.float32,
)
decision_id = np.array([i for i in range(self._env.n) if not self._done[i]])
decision_group_reward = np.array(
[self._rew[0] * reward_scale for i in range(self._env.n) if not self._done[i]],
dtype=np.float32,
)
decision_group_id = np.array(
[1 for i in range(self._env.n) if not self._done[i]]
)
decision_step = DecisionSteps(
[decision_obs],
decision_reward,
decision_id,
None,
decision_group_id,
decision_group_reward,
)
return decision_step
def get_terminal_steps(self) -> TerminalSteps:
reward_scale = 1.0
terminal_obs = np.array(
[self._obs[i] for i in range(self._env.n) if self._done[i]],
dtype=np.float32,
)
terminal_reward = np.array(
[self._rew[i] * 0 for i in range(self._env.n) if self._done[i]],
dtype=np.float32,
)
terminal_id = np.array([i for i in range(self._env.n) if self._done[i]])
terminal_group_reward = np.array(
[
self._rew[0] * reward_scale
for i in range(self._env.n)
if self._done[i]
],
dtype=np.float32,
)
terminal_group_id = np.array([1 for i in range(self._env.n) if self._done[i]])
# TODO : Figureout the type of interruption
terminal_interruption = np.array(
[False for i in range(self._env.n) if self._done[i]]
)
terminal_steps = TerminalSteps(
[terminal_obs],
terminal_reward,
terminal_interruption,
terminal_id,
terminal_group_id,
terminal_group_reward,
)
return terminal_steps
|
{"hexsha": "d58b88457eda13b8e21a53dc05d704111d471bc1", "size": 7356, "ext": "py", "lang": "Python", "max_stars_repo_path": "ma-poca/mapoca/mapoca/particles_env.py", "max_stars_repo_name": "Unity-Technologies/paper-ml-agents", "max_stars_repo_head_hexsha": "885144ee25e86b929c5acee90b9b8dc059bcb9af", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-25T13:54:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T13:54:29.000Z", "max_issues_repo_path": "ma-poca/mapoca/mapoca/particles_env.py", "max_issues_repo_name": "Unity-Technologies/paper-ml-agents", "max_issues_repo_head_hexsha": "885144ee25e86b929c5acee90b9b8dc059bcb9af", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ma-poca/mapoca/mapoca/particles_env.py", "max_forks_repo_name": "Unity-Technologies/paper-ml-agents", "max_forks_repo_head_hexsha": "885144ee25e86b929c5acee90b9b8dc059bcb9af", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-25T14:42:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T14:42:32.000Z", "avg_line_length": 35.3653846154, "max_line_length": 91, "alphanum_fraction": 0.5906742795, "include": true, "reason": "import numpy", "num_tokens": 1659}
|
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/app/main/Application.h>
#include <ripple/app/misc/NetworkOPs.h>
#include <ripple/app/misc/SHAMapStore.h>
#include <ripple/protocol/jss.h>
#include <ripple/rpc/Context.h>
#include <ripple/beast/core/LexicalCast.h>
#include <boost/algorithm/string/case_conv.hpp>
#include <boost/format.hpp>
namespace ripple {
Json::Value doCanDelete (RPC::Context& context)
{
if (! context.app.getSHAMapStore().advisoryDelete())
return RPC::make_error(rpcNOT_ENABLED);
Json::Value ret (Json::objectValue);
if (context.params.isMember(jss::can_delete))
{
Json::Value canDelete = context.params.get(jss::can_delete, 0);
std::uint32_t canDeleteSeq = 0;
if (canDelete.isUInt())
{
canDeleteSeq = canDelete.asUInt();
}
else
{
std::string canDeleteStr = canDelete.asString();
boost::to_lower (canDeleteStr);
if (canDeleteStr.find_first_not_of ("0123456789") ==
std::string::npos)
{
canDeleteSeq =
beast::lexicalCast <std::uint32_t>(canDeleteStr);
}
else if (canDeleteStr == "never")
{
canDeleteSeq = 0;
}
else if (canDeleteStr == "always")
{
canDeleteSeq = std::numeric_limits <std::uint32_t>::max();
}
else if (canDeleteStr == "now")
{
canDeleteSeq = context.app.getSHAMapStore().getLastRotated();
if (!canDeleteSeq)
return RPC::make_error (rpcNOT_READY);
}
else if (canDeleteStr.size() == 64 &&
canDeleteStr.find_first_not_of("0123456789abcdef") ==
std::string::npos)
{
auto ledger = context.ledgerMaster.getLedgerByHash (
from_hex_text<uint256>(canDeleteStr));
if (!ledger)
return RPC::make_error(rpcLGR_NOT_FOUND, "ledgerNotFound");
canDeleteSeq = ledger->info().seq;
}
else
{
return RPC::make_error (rpcINVALID_PARAMS);
}
}
ret[jss::can_delete] =
context.app.getSHAMapStore().setCanDelete (canDeleteSeq);
}
else
{
ret[jss::can_delete] = context.app.getSHAMapStore().getCanDelete();
}
return ret;
}
}
|
{"hexsha": "1463ad699af3f71aea27f691eca567cb55e6193a", "size": 2548, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/ripple/rpc/handlers/CanDelete.cpp", "max_stars_repo_name": "DEEPSPACE007/DsDeFi-Exchange", "max_stars_repo_head_hexsha": "777486b799bae42a4297f9524f3ff30e0b149ef7", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ripple/rpc/handlers/CanDelete.cpp", "max_issues_repo_name": "DEEPSPACE007/DsDeFi-Exchange", "max_issues_repo_head_hexsha": "777486b799bae42a4297f9524f3ff30e0b149ef7", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ripple/rpc/handlers/CanDelete.cpp", "max_forks_repo_name": "DEEPSPACE007/DsDeFi-Exchange", "max_forks_repo_head_hexsha": "777486b799bae42a4297f9524f3ff30e0b149ef7", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.1636363636, "max_line_length": 79, "alphanum_fraction": 0.5404238619, "num_tokens": 582}
|
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on 23.01.2015
@author: marscher
'''
import mdtraj
import os
import tempfile
import unittest
from pyemma.coordinates import api
from pyemma.coordinates.data.feature_reader import FeatureReader
from pyemma.util.log import getLogger
import pkg_resources
import numpy as np
from pyemma.coordinates.api import discretizer, tica, source
log = getLogger('TestFeatureReader')
def create_traj(top):
trajfile = tempfile.mktemp('.xtc')
n_frames = np.random.randint(500, 1500)
log.debug("create traj with %i frames" % n_frames)
xyz = np.arange(n_frames * 3 * 3).reshape((n_frames, 3, 3))
t = mdtraj.load(top)
t.xyz = xyz
t.time = np.arange(n_frames)
t.save(trajfile)
return trajfile, xyz, n_frames
class TestFeatureReader(unittest.TestCase):
@classmethod
def setUpClass(cls):
c = super(TestFeatureReader, cls).setUpClass()
# create a fake trajectory which has 3 atoms and coordinates are just a range
# over all frames.
cls.topfile = pkg_resources.resource_filename(
'pyemma.coordinates.tests.test_featurereader', 'data/test.pdb')
cls.trajfile, cls.xyz, cls.n_frames = create_traj(cls.topfile)
cls.trajfile2, cls.xyz2, cls.n_frames2 = create_traj(cls.topfile)
return c
@classmethod
def tearDownClass(cls):
try:
os.unlink(cls.trajfile)
except EnvironmentError:
pass
def testIteratorAccess(self):
reader = api.source(self.trajfile, top=self.topfile)
assert isinstance(reader, FeatureReader)
frames = 0
data = []
for i, X in reader:
assert isinstance(X, np.ndarray)
frames += X.shape[0]
data.append(X)
self.assertEqual(frames, reader.trajectory_lengths()[0])
data = np.vstack(data)
# restore shape of input
data.reshape(self.xyz.shape)
self.assertTrue(np.allclose(data, self.xyz.reshape(-1, 9)))
def testIteratorAccess2(self):
reader = FeatureReader([self.trajfile, self.trajfile2], self.topfile)
reader.chunksize = 100
data = {itraj: [] for itraj in xrange(reader.number_of_trajectories())}
for i, X in reader:
data[i].append(X)
# restore shape of input
data[0] = np.vstack(data[0]).reshape(-1, 9)
data[1] = np.vstack(data[1]).reshape(-1, 9)
np.testing.assert_equal(data[0], self.xyz.reshape(-1, 9))
np.testing.assert_equal(data[1], self.xyz2.reshape(-1, 9))
def testTimeLaggedIterator(self):
lag = 10
reader = FeatureReader(self.trajfile, self.topfile)
frames = 0
data = []
lagged = []
for _, X, Y in reader.iterator(lag=lag):
frames += X.shape[0]
data.append(X)
lagged.append(Y)
assert len(data) == len(lagged)
# .reshape(self.xyz.shape)
merged_lagged = np.concatenate(lagged, axis=0)
# reproduce outcome
xyz_s = self.xyz.shape
fake_lagged = self.xyz.reshape((xyz_s[0], -1))[lag:]
self.assertTrue(np.allclose(merged_lagged, fake_lagged))
# restore shape of input
data = np.vstack(data).reshape(self.xyz.shape)
self.assertEqual(frames, reader.trajectory_lengths()[0])
self.assertTrue(np.allclose(data, self.xyz))
def test_with_pipeline_time_lagged(self):
reader = api.source(self.trajfile, top=self.topfile)
assert isinstance(reader, FeatureReader)
t = tica(dim=2, lag=1)
d = discretizer(reader, t)
d.parametrize()
def test_in_memory(self):
reader = api.source(self.trajfile, top=self.topfile)
out1 = reader.get_output()
# now map stuff to memory
reader.in_memory = True
reader2 = api.source(self.trajfile, top=self.topfile)
out = reader2.get_output()
assert len(out) == len(reader._Y) == 1
np.testing.assert_equal(out1, out)
np.testing.assert_equal(reader._Y[0], out[0])
np.testing.assert_equal(reader.get_output(), out)
# reset in_memory and check output gets deleted
reader.in_memory = False
assert reader._Y is None
def test_in_memory_with_stride(self):
# map "results" to memory
reader = api.source(self.trajfile, top=self.topfile)
reader.in_memory = True
assert reader._parametrized
reader.parametrize(stride=2)
reader2 = api.source(self.trajfile, top=self.topfile)
out = reader2.get_output(stride=2)
np.testing.assert_equal(reader._Y[0], out[0])
def test_in_memory_switch_stride_dim(self):
reader = api.source(self.trajfile, top=self.topfile)
reader.chunksize = 100
reader.in_memory = True
# now get output with different strides
strides = [1, 2, 3, 4, 5, 10, 20]
for s in strides:
out = reader.get_output(stride=s)
shape = (reader.trajectory_length(0, stride=s), reader.dimension())
self.assertEqual(
out[0].shape, shape, "not equal for stride=%i" % s)
def test_lagged_stridden_access(self):
reader = api.source([self.trajfile, self.trajfile2], top=self.topfile)
reader.chunksize = 210
strides = [2, 3, 5, 7, 15]
lags = [1, 3, 7, 10, 30]
err_msg = "not equal for stride=%i, lag=%i"
for stride in strides:
for lag in lags:
chunks = {itraj: []
for itraj in xrange(reader.number_of_trajectories())}
for itraj, _, Y in reader.iterator(stride, lag):
chunks[itraj].append(Y)
chunks[0] = np.vstack(chunks[0])
np.testing.assert_almost_equal(
chunks[0], self.xyz.reshape(-1, 9)[lag::stride], err_msg=err_msg % (stride, lag))
chunks[1] = np.vstack(chunks[1])
np.testing.assert_almost_equal(
chunks[1], self.xyz2.reshape(-1, 9)[lag::stride], err_msg=err_msg % (stride, lag))
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "8cdae65e3882a66f1ccef9dd201ff7948faf151b", "size": 7587, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyemma/coordinates/tests/test_featurereader.py", "max_stars_repo_name": "clonker/PyEMMA", "max_stars_repo_head_hexsha": "a36534ce2ec6a799428dfbdef0465c979e6c68aa", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-21T16:55:38.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-21T16:55:38.000Z", "max_issues_repo_path": "pyemma/coordinates/tests/test_featurereader.py", "max_issues_repo_name": "clonker/PyEMMA", "max_issues_repo_head_hexsha": "a36534ce2ec6a799428dfbdef0465c979e6c68aa", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-10T18:09:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-10T18:09:25.000Z", "max_forks_repo_path": "pyemma/coordinates/tests/test_featurereader.py", "max_forks_repo_name": "clonker/PyEMMA", "max_forks_repo_head_hexsha": "a36534ce2ec6a799428dfbdef0465c979e6c68aa", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.453271028, "max_line_length": 102, "alphanum_fraction": 0.6462369843, "include": true, "reason": "import numpy", "num_tokens": 1807}
|
# -*- coding: utf-8 -*-
"""Stacking (meta ensembling). See http://blog.kaggle.com/2016/12/27/a-kagglers-guide-to-model-stacking-in-practice/
for more information.
"""
# Author: Yue Zhao <zhaoy@cmu.edu>
# License: BSD 2 clause
import warnings
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.utils import check_array
from sklearn.utils import check_X_y
from sklearn.utils.validation import check_is_fitted
from pyod.utils.utility import check_parameter
from ..utils.utility import list_diff
from .base import BaseAggregator
def split_datasets(X, y, n_folds=3, shuffle_data=False, random_state=None):
"""Utility function to split the data for stacking. The data is split
into n_folds with roughly equal rough size.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : numpy array of shape (n_samples,)
The ground truth of the input samples (labels).
n_folds : int, optional (default=3)
The number of splits of the training sample.
shuffle_data : bool, optional (default=False)
If True, shuffle the input data.
random_state : RandomState, optional (default=None)
A random number generator instance to define the state of the random
permutations generator.
Returns
-------
X : numpy array of shape (n_samples, n_features)
The input samples. If shuffle_data, return the shuffled data.
y : numpy array of shape (n_samples,)
The ground truth of the input samples (labels). If shuffle_data,
return the shuffled data.
index_lists : list of list
The list of indexes of each fold regarding the returned X and y.
For instance, index_lists[0] contains the indexes of fold 0.
"""
if not isinstance(n_folds, int):
raise ValueError('n_folds must be an integer variable')
check_parameter(n_folds, low=2, include_left=True, param_name='n_folds')
random_state = check_random_state(random_state)
if shuffle_data:
X, y = shuffle(X, y, random_state=random_state)
idx_length = len(y)
idx_list = list(range(idx_length))
avg_length = int(idx_length / n_folds)
index_lists = []
for i in range(n_folds - 1):
index_lists.append(idx_list[i * avg_length:(i + 1) * avg_length])
index_lists.append(idx_list[(n_folds - 1) * avg_length:])
return X, y, index_lists
class Stacking(BaseAggregator):
"""Meta ensembling, also known as stacking. See
http://blog.kaggle.com/2016/12/27/a-kagglers-guide-to-model-stacking-in-practice/
for more information
Parameters
----------
base_estimators: list or numpy array (n_estimators,)
A list of base classifiers.
meta_clf : object, optional (default=LogisticRegression)
The meta classifier to make the final prediction.
n_folds : int, optional (default=2)
The number of splits of the training sample.
keep_original : bool, optional (default=False)
If True, keep the original features for training and predicting.
use_proba : bool, optional (default=False)
If True, use the probability prediction as the new features.
shuffle_data : bool, optional (default=False)
If True, shuffle the input data.
random_state : int, RandomState or None, optional (default=None)
If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
threshold : float in (0, 1), optional (default=None)
Cut-off value to convert scores into binary labels.
pre_fitted : bool, optional (default=False)
Whether the base classifiers are trained. If True, `fit`
process may be skipped.
"""
def __init__(self, base_estimators, meta_clf=None, n_folds=2,
keep_original=True,
use_proba=False, shuffle_data=False, random_state=None,
threshold=None, pre_fitted=None):
super(Stacking, self).__init__(
base_estimators=base_estimators, pre_fitted=pre_fitted)
# validate input parameters
if not isinstance(n_folds, int):
raise ValueError('n_folds must be an integer variable')
check_parameter(n_folds, low=2, include_left=True,
param_name='n_folds')
self.n_folds = n_folds
if meta_clf is not None:
self.meta_clf = meta_clf
else:
self.meta_clf = LogisticRegression()
# set flags
self.keep_original = keep_original
self.use_proba = use_proba
self.shuffle_data = shuffle_data
self.random_state = random_state
if threshold is not None:
warnings.warn(
"Stacking does not support threshold setting option. "
"Please set the threshold in classifiers directly.")
if pre_fitted is not None:
warnings.warn("Stacking does not support pre_fitted option.")
def fit(self, X, y):
"""Fit classifier.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : numpy array of shape (n_samples,), optional (default=None)
The ground truth of the input samples (labels).
"""
# Validate inputs X and y
X, y = check_X_y(X, y)
X = check_array(X)
self._set_n_classes(y)
n_samples = X.shape[0]
# initialize matrix for storing newly generated features
new_features = np.zeros([n_samples, self.n_base_estimators_])
# build CV datasets
X_new, y_new, index_lists = split_datasets(
X, y, n_folds=self.n_folds, shuffle_data=self.shuffle_data,
random_state=self.random_state)
# iterate over all base classifiers
for i, clf in enumerate(self.base_estimators):
# iterate over all folds
for j in range(self.n_folds):
# build train and test index
full_idx = list(range(n_samples))
test_idx = index_lists[j]
train_idx = list_diff(full_idx, test_idx)
X_train, y_train = X_new[train_idx, :], y_new[train_idx]
X_test, y_test = X_new[test_idx, :], y_new[test_idx]
# train the classifier
clf.fit(X_train, y_train)
# generate the new features on the pseudo test set
if self.use_proba:
new_features[test_idx, i] = clf.predict_proba(
X_test)[:, 1]
else:
new_features[test_idx, i] = clf.predict(X_test)
# build the new dataset for training
if self.keep_original:
X_new_comb = np.concatenate([X_new, new_features], axis=1)
else:
X_new_comb = new_features
y_new_comb = y_new
# train the meta classifier
self.meta_clf.fit(X_new_comb, y_new_comb)
self.fitted_ = True
# train all base classifiers on the full train dataset
# iterate over all base classifiers
for i, clf in enumerate(self.base_estimators):
clf.fit(X_new, y_new)
return
def _process_data(self, X):
"""Internal class for both `predict` and `predict_proba`
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
Returns
-------
X_new_comb : Numpy array
The processed dataset of X.
"""
check_is_fitted(self, ['fitted_'])
X = check_array(X)
n_samples = X.shape[0]
# initialize matrix for storing newly generated features
new_features = np.zeros([n_samples, self.n_base_estimators_])
# build the new features for unknown samples
# iterate over all base classifiers
for i, clf in enumerate(self.base_estimators):
# generate the new features on the test set
if self.use_proba:
new_features[:, i] = clf.predict_proba(X)[:, 1]
else:
new_features[:, i] = clf.predict(X)
# build the new dataset for unknown samples
if self.keep_original:
X_new_comb = np.concatenate([X, new_features], axis=1)
else:
X_new_comb = new_features
return X_new_comb
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
Returns
-------
labels : numpy array of shape (n_samples,)
Class labels for each data sample.
"""
X_new_comb = self._process_data(X)
return self.meta_clf.predict(X_new_comb)
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
Returns
-------
p : numpy array of shape (n_samples,)
The class probabilities of the input samples.
Classes are ordered by lexicographic order.
"""
X_new_comb = self._process_data(X)
return self.meta_clf.predict_proba(X_new_comb)
def fit_predict(self, X, y):
"""Fit estimator and predict on X
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : numpy array of shape (n_samples,), optional (default=None)
The ground truth of the input samples (labels).
Returns
-------
labels : numpy array of shape (n_samples,)
Class labels for each data sample.
"""
raise NotImplementedError(
'fit_predict should not be used in supervised learning models.')
|
{"hexsha": "01c681a57f8dbe6fe83792ae7798d75aacb5c746", "size": 10251, "ext": "py", "lang": "Python", "max_stars_repo_path": "combo/models/classifier_stacking.py", "max_stars_repo_name": "vishalbelsare/combo", "max_stars_repo_head_hexsha": "229d578de498b47ae03cf2580472aceebf8c2766", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 611, "max_stars_repo_stars_event_min_datetime": "2019-07-14T14:54:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T08:34:48.000Z", "max_issues_repo_path": "combo/models/classifier_stacking.py", "max_issues_repo_name": "chrinide/combo-1", "max_issues_repo_head_hexsha": "0c0539c9b116dd35763c89d97edb6b568e98abbf", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2019-08-01T05:41:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T22:55:45.000Z", "max_forks_repo_path": "combo/models/classifier_stacking.py", "max_forks_repo_name": "chrinide/combo-1", "max_forks_repo_head_hexsha": "0c0539c9b116dd35763c89d97edb6b568e98abbf", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 107, "max_forks_repo_forks_event_min_datetime": "2019-07-16T16:01:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-19T18:52:10.000Z", "avg_line_length": 32.750798722, "max_line_length": 116, "alphanum_fraction": 0.6232562677, "include": true, "reason": "import numpy", "num_tokens": 2218}
|
#include <iostream>
#include <string>
#include <vector>
#define BOOST_TEST_MAIN
#include <boost/test/included/unit_test.hpp>
#include "pillowtalk.h"
using namespace std;
int gNumberOfHeartbeats = 0;
static int callback_non_cont(pt_node_t* node)
{
string document = "http://127.0.0.1:5984/pt_test/";
pt_printout(node, " ");
cout << endl;
return 0;
}
static int callback(pt_node_t* node)
{
if (pt_is_null(node)) {
gNumberOfHeartbeats++;
cout << "Beat" << endl;
if (gNumberOfHeartbeats == 1) {
cout << "Number of heartbeats seen: " << gNumberOfHeartbeats << endl;
cout << "Ending ..." << endl;
return -1;
}
return 0;
}
string document = "http://127.0.0.1:5984/pt_test/";
pt_printout(node, " ");
const char* astr = pt_string_get(pt_map_get(node, "id"));
if (astr) document += astr;
pt_response_t* temp = pt_get(document.c_str());
pt_printout(temp->root, " ");
cout << endl;
pt_free_response(temp);
return 0;
}
BOOST_AUTO_TEST_CASE(test_noncontinuous_changesfeed)
{
pt_init();
pt_changes_feed cf = pt_changes_feed_alloc();
pt_changes_feed_config(cf, pt_changes_feed_continuous, 0);
pt_changes_feed_config(cf, pt_changes_feed_callback_function, &callback_non_cont);
int ret_code = pt_changes_feed_run(cf, "http://127.0.0.1:5984","pt_test");
BOOST_REQUIRE_EQUAL(ret_code,0);
pt_changes_feed_free(cf);
pt_cleanup();
}
BOOST_AUTO_TEST_CASE(test_continuous_changesfeed)
{
pt_init();
pt_changes_feed cf = pt_changes_feed_alloc();
pt_changes_feed_config(cf, pt_changes_feed_continuous, 1);
pt_changes_feed_config(cf, pt_changes_feed_req_heartbeats, 1000);
pt_changes_feed_config(cf, pt_changes_feed_callback_function, &callback);
int ret_code = pt_changes_feed_run(cf, "http://127.0.0.1:5984","pt_test");
BOOST_WARN_MESSAGE(ret_code == 0, "Run continuous changes feed failed, not compiled with pthreads?");
pt_changes_feed_free(cf);
pt_cleanup();
}
|
{"hexsha": "2197903b6bac0b29d56baa653d4006058345dfe4", "size": 1960, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/test_changes_feed.cpp", "max_stars_repo_name": "kuhlmannmarkus/pillowtalk", "max_stars_repo_head_hexsha": "eb752d148b9bdde6739294e5b30d30523caf4085", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2016-03-25T16:50:35.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-27T02:42:49.000Z", "max_issues_repo_path": "test/test_changes_feed.cpp", "max_issues_repo_name": "kuhlmannmarkus/pillowtalk", "max_issues_repo_head_hexsha": "eb752d148b9bdde6739294e5b30d30523caf4085", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2016-04-24T20:53:19.000Z", "max_issues_repo_issues_event_max_datetime": "2016-04-24T20:53:19.000Z", "max_forks_repo_path": "test/test_changes_feed.cpp", "max_forks_repo_name": "kuhlmannmarkus/pillowtalk", "max_forks_repo_head_hexsha": "eb752d148b9bdde6739294e5b30d30523caf4085", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2016-03-26T19:17:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-25T18:31:08.000Z", "avg_line_length": 27.2222222222, "max_line_length": 103, "alphanum_fraction": 0.7142857143, "num_tokens": 540}
|
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_pkg.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_types.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_utils.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_if.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_if_connector.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_byte_counter.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_id_generator.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_id_locker.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_request_info_buffer.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_write_read_mux.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_write_read_demux.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_slave_write_adapter.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_slave_read_adapter.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_slave_adapter.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_master_write_adapter.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_master_read_adapter.sv
${TNOC_HOME}/rtl/axi_adapter/tnoc_axi_master_adapter.sv
|
{"hexsha": "b9b3fb2c9d843bf8df46e3fb08cdb44231b2b103", "size": 926, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "third_party/tests/Tnoc/cores/tnoc/rtl/axi_adapter/compile.f", "max_stars_repo_name": "little-blue/Surelog", "max_stars_repo_head_hexsha": "1c2459f841f6e6d923b336feacd22ccfb9aea845", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 156, "max_stars_repo_stars_event_min_datetime": "2019-11-16T17:29:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T05:41:13.000Z", "max_issues_repo_path": "third_party/tests/Tnoc/cores/tnoc/rtl/axi_adapter/compile.f", "max_issues_repo_name": "little-blue/Surelog", "max_issues_repo_head_hexsha": "1c2459f841f6e6d923b336feacd22ccfb9aea845", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 414, "max_issues_repo_issues_event_min_datetime": "2021-06-11T07:22:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:06:14.000Z", "max_forks_repo_path": "third_party/tests/Tnoc/cores/tnoc/rtl/axi_adapter/compile.f", "max_forks_repo_name": "little-blue/Surelog", "max_forks_repo_head_hexsha": "1c2459f841f6e6d923b336feacd22ccfb9aea845", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2019-11-18T16:31:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-26T01:22:51.000Z", "avg_line_length": 51.4444444444, "max_line_length": 61, "alphanum_fraction": 0.8531317495, "num_tokens": 345}
|
From Coq Require Import Arith.Arith.
From Coq Require Import Bool.Bool.
From Coq Require Import Arith.PeanoNat.
From Coq Require Import micromega.Lia.
From Coq Require Import Lists.List.
From Coq Require Import Reals.Reals. Import Rdefinitions. Import RIneq.
From Coq Require Import ZArith.Int. Import Znat.
From Coq Require Import Setoids.Setoid.
From Coq Require Import Logic.FunctionalExtensionality.
Require Coq.derive.Derive.
Import ListNotations.
From ATL Require Import ATL Common CommonTactics Tactics GenPushout LetLifting.
Definition im2colmini K W RR (w : (list (list R))) (x : list R) :=
GEN [ k < K ]
GEN [ p < W ]
SUM [ r < RR ]
|[ p + r <? K ]| (w _[ k ; r ] * x _[ p + r ])%R.
Definition im2col B K W C RR (w x : (list (list (list R)))) :=
GEN [ n < B ]
GEN [ k < K ]
GEN [ p < W ]
SUM [ c < C ]
SUM [ r < RR ]
(w _[ k ; c ; r ] * x _[ n ; c ; p + r ])%R.
Hint Unfold im2col im2colmini : examples.
Section Mini.
Variables (K W RR : Z) (w : (list (list R))) (x : list R).
Derive im2colminilifted SuchThat
((0 < RR)%Z ->
im2colmini K W RR w x = im2colminilifted) As miniim2col.
Proof.
reschedule.
setoid_rewrite <- guard_mul_r.
rw^ @lbind_helper for (fun e => _ * e)%R.
time rw ll_sum.
rw @ll_gen.
rw @ll_gen_indep.
done.
Defined.
End Mini.
Section Im2col.
Variables (B K W C RR : Z) (w x : (list (list (list R)))).
Derive im2col_lifted SuchThat
(im2col B K W C RR w x =
im2col_lifted) As im2col_sched.
Proof.
reschedule.
rw^ @lbind_helper for (fun e => _ * e)%R.
rw @ll_sum.
rw @ll_sum.
rw @ll_gen.
rw @ll_gen_indep.
rw @ll_gen.
done.
Qed.
End Im2col.
Hint Unfold im2col_lifted im2colminilifted : examples.
Goal forall B K W C RR w x,
im2col_lifted B K W C RR w x =
tlet x0
:= GEN [ i < B ]
GEN [ i0 < W ]
GEN [ i1 < C ]
GEN [ i2 < RR ]
x _[ i; i1; i0 + i2]
in GEN [ n' < B ]
GEN [ n'0 < K ]
GEN [ n'1 < W ]
SUM [ n'2 < C ]
SUM [ n'3 < RR ]
(w _[ n'0; n'2; n'3] * x0 _[ n'; n'1; n'2; n'3])%R.
Proof. reflexivity. Qed.
Goal forall K W RR w x,
im2colminilifted K W RR w x =
tlet x0 := GEN [ i < W ]
GEN [ i0 < RR ]
(|[ i + i0 <? K ]| x _[ i + i0])
in GEN [ n' < K ]
GEN [ n'0 < W ]
SUM [ n'1 < RR ]
(w _[ n'; n'1] * x0 _[ n'0; n'1])%R.
Proof. reflexivity. Qed.
|
{"author": "ChezJrk", "repo": "verified-scheduling", "sha": "e9876602147114e4378f10ac1402bd5705c0cef0", "save_path": "github-repos/coq/ChezJrk-verified-scheduling", "path": "github-repos/coq/ChezJrk-verified-scheduling/verified-scheduling-e9876602147114e4378f10ac1402bd5705c0cef0/src/Im2col.v"}
|
from xcal3d import *
import numpy
if __name__ == "__main__":
import sys
import os
f = sys.argv[1]
ext = os.path.splitext(f)[1]
if ext != ".xsf":
print "unsupported skeleton file, only xsf"
source = open(f,"rb")
pa = SkelParser()
xml.sax.parse(source, pa)
sk = pa.target
db = dict([(b.name,b) for b in sk.bones.values()])
prefix = "Bip01 "
names = ["Spine","R Thigh","R Calf","R Foot"]
bones = [db[prefix+x] for x in names]
print "\n".join(["%s <- %s" %(x.name,x.parent is not None and x.parent.name or "<none>") for x in bones])
for x in db.values():
if x.parent == bones[-1]:
print "children of foot:",x.name
poses = [numpy.array(x.tx) for x in bones]
distances = [numpy.linalg.norm(poses[i]-poses[i-1]) for i in range(1,len(poses))]
print "distances",distances
deltas = "\n".join(["%s:%s" % (bones[i].name,str(poses[i]-poses[i-1])) for i in range(1,len(poses))])
print deltas
|
{"hexsha": "49bde1ed589401c373a2d67b12accdfd2c9ff9ec", "size": 919, "ext": "py", "lang": "Python", "max_stars_repo_path": "xcal3d/showbodyinfo.py", "max_stars_repo_name": "eruffaldi/pyxcal3d", "max_stars_repo_head_hexsha": "443fb7c0c9168dbd2dc39816198933172682bde2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-09-10T13:31:04.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-31T03:48:53.000Z", "max_issues_repo_path": "xcal3d/showbodyinfo.py", "max_issues_repo_name": "eruffaldi/pyxcal3d", "max_issues_repo_head_hexsha": "443fb7c0c9168dbd2dc39816198933172682bde2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xcal3d/showbodyinfo.py", "max_forks_repo_name": "eruffaldi/pyxcal3d", "max_forks_repo_head_hexsha": "443fb7c0c9168dbd2dc39816198933172682bde2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5641025641, "max_line_length": 106, "alphanum_fraction": 0.6267682263, "include": true, "reason": "import numpy", "num_tokens": 291}
|
SUBROUTINE GSTURB ( szturb, ituwid, iret )
C************************************************************************
C* GSTURB *
C* *
C* This subroutine sets the turbulence symbol attributes. *
C* *
C* GSTURB ( SZTURB, ITUWID, IRET ) *
C* *
C* Input parameters: *
C* SZTURB REAL Turbulence symbol size *
C* ITUWID INTEGER Turbulence symbol line width *
C* *
C* Output parameters: *
C* IRET INTEGER Return code *
C** *
C* Log: *
C* M. Linda/GSC 8/96 Based on GSWTHR *
C************************************************************************
INCLUDE 'ERROR.PRM'
INCLUDE 'DEVCHR.CMN'
INCLUDE 'DEVREQ.CMN'
INCLUDE 'DEVSET.CMN'
C-----------------------------------------------------------------------
C*
iret = NORMAL
C
C* If these are the current requested characteristics, do nothing.
C
IF ( szturb .eq. rtursz .and. ituwid .eq. ktuwid ) RETURN
C
C* Set requested parameters.
C
IF ( szturb .gt. 0 ) rtursz = szturb
IF ( ituwid .ge. 1 ) ktuwid = ituwid
C
C* Make changes in device only if the device has been set.
C
IF ( ddev .ne. ' ' ) THEN
C
C* Set only if requested is different than what is already set.
C
IF ( rtursz .ne. stursz .or. ktuwid .ne. ltuwid)
+ CALL DSTURB ( rtursz, ktuwid, stursz, ltuwid, iret )
END IF
C*
RETURN
END
|
{"hexsha": "82acff732308c9b14f304f8331f2608589aa4e8d", "size": 1355, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/gplt/attribute/gsturb.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/gplt/attribute/gsturb.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/gplt/attribute/gsturb.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 28.2291666667, "max_line_length": 73, "alphanum_fraction": 0.5232472325, "num_tokens": 448}
|
from pdb import set_trace as st
import os
import numpy as np
import cv2
import argparse
from sklearn.model_selection import train_test_split
import glob
parser = argparse.ArgumentParser('create subdirectories for trainin deblur gan')
parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A, ex. sharp images', type=str, default='../dataset/50kshoes_edges')
parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B, ex. blurred images', type=str, default='../dataset/50kshoes_jpg')
parser.add_argument('--output', dest='output', help='output directory', type=str, default='../dataset/test_AB')
args = parser.parse_args()
for arg in vars(args):
print('[%s] = ' % arg, getattr(args, arg))
folder = args.output
os.makedirs(folder,exist_ok=True)
sharp_out = os.path.join(folder,'sharp')
blurred_out = os.path.join(folder,'blurred')
os.makedirs(folder,exist_ok=True)
os.makedirs(sharp_out,exist_ok=True)
os.makedirs(blurred_out,exist_ok=True)
map_folder={}
for f in [sharp_out,blurred_out]:
for s in ['train','test','val']:
map_folder[f+'_'+s]=os.path.join(f,s)
os.makedirs(map_folder[f+'_'+s],exist_ok=True)
print(map_folder)
#blurred_in = glob.glob(os.path.join(args.fold_A,'*.*g'))
imfiles= glob.glob(os.path.join(args.fold_B,'*.*g'))
X_train, X_test, y_train, _= train_test_split(imfiles, np.ones(len(imfiles)), test_size=0.2, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=1)
for x in X_train:
name = x.split('/')[-1]
dst = os.path.join(map_folder[sharp_out+'_'+'train'],name)
blur= os.path.join(args.fold_B,name)
if not os.path.exists(blur):
print('blur image does not exist n/',blur)
continue
print('in',x)
img=cv2.imread(x)
#print(img.shape)
cv2.imwrite(dst,img)
print('out',dst)
#os.symlink(x,dst)
dst_blur = os.path.join(map_folder[blurred_out+'_'+'train'],name)
img_blur = cv2.imread(blur)
cv2.imwrite(dst_blur,img_blur)
#os.symlink(blur,dst_blur)
for x in X_test:
name = x.split('/')[-1]
dst = os.path.join(map_folder[sharp_out+'_'+'test'],name)
blur= os.path.join(args.fold_B,name)
if not os.path.exists(blur):
print('blur image does not exist n/',blur)
continue
img=cv2.imread(x)
cv2.imwrite(dst,img)
#os.symlink(x,dst)
dst_blur = os.path.join(map_folder[blurred_out+'_'+'test'],name)
img_blur = cv2.imread(blur)
cv2.imwrite(dst_blur,img_blur)
#os.symlink(blur,dst_blur)
for x in X_val:
name = x.split('/')[-1]
dst = os.path.join(map_folder[sharp_out+'_'+'val'],name)
blur= os.path.join(args.fold_B,name)
if not os.path.exists(blur):
print('blur image does not exist n/',blur)
continue
img=cv2.imread(x)
cv2.imwrite(dst,img)
#os.symlink(x,dst)
dst_blur = os.path.join(map_folder[blurred_out+'_'+'val'],name)
img_blur = cv2.imread(blur)
cv2.imwrite(dst_blur,img_blur)
#os.symlink(blur,dst_blur)
|
{"hexsha": "38dc462b7b66daa60197d70c13aabe32a9338839", "size": 3099, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/split_train_test_val.py", "max_stars_repo_name": "piperod/DeblurGAN", "max_stars_repo_head_hexsha": "a008b9fa94f49b351a68fafaac864619f0b7d569", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datasets/split_train_test_val.py", "max_issues_repo_name": "piperod/DeblurGAN", "max_issues_repo_head_hexsha": "a008b9fa94f49b351a68fafaac864619f0b7d569", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets/split_train_test_val.py", "max_forks_repo_name": "piperod/DeblurGAN", "max_forks_repo_head_hexsha": "a008b9fa94f49b351a68fafaac864619f0b7d569", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-18T13:04:51.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-18T13:04:51.000Z", "avg_line_length": 32.6210526316, "max_line_length": 147, "alphanum_fraction": 0.6721523072, "include": true, "reason": "import numpy", "num_tokens": 868}
|
from decoding.strategy_utils import generate_step_with_prob, assign_single_value_long, assign_single_value_byte, assign_multi_value_long, convert_tokens
import models.Constants as Constants
import torch
from tqdm import tqdm
import numpy as np
import json
import math
import matplotlib.pyplot as plt
from matplotlib import cm
import torch.nn.functional as F
def plot(tgt_tokens, tgt_vocab, token_probs, corresponding_probs, num_mask, mask_ind, counter, teacher_model, split=False):
for n in range(1,2):
sent = []
stu = []
tea = []
overall = []
select_id = n
tmp = tgt_tokens[select_id].tolist()
mask_id = [0] * len(tmp)
for i, token in enumerate(tmp):
if token == Constants.PAD:
break
word = tgt_vocab[str(token)]
tmp1 = token_probs[select_id, i]
tmp2 = corresponding_probs[select_id, i]
#jud = 'True' if tgt_tokens[select_id, i].item() == Constants.MASK else 'False'
#tqdm.write('%s\t%.4f\t%.4f\t%.8f\t%s' % (word, tmp1, tmp2, tmp1 * tmp2, jud))
sent.append('%s' % word)
stu.append("%.2f" %tmp1)
tea.append("%.2f" %tmp2)
overall.append('%.2f' % (math.sqrt(tmp1 * tmp2)))
if i < num_mask[select_id].item():
mask_id[mask_ind[select_id, i].item()] = 1.0
sent.append(str(num_mask[select_id].item()))
tqdm.write(("Step %d: " % (counter)) + ' '.join(sent))
tqdm.write(("Step %d Stu: " % (counter)) + ','.join(stu))
tqdm.write(("Step %d Tea: " % (counter)) + ','.join(tea))
tqdm.write(("Step %d All: " % (counter)) + ','.join(overall))
mask_id = ['%.2f' % item for item in mask_id]
tqdm.write(("Step %d Mas: " % (counter)) + ','.join(mask_id))
stu = [float(item) for item in stu]
tea = [float(item) for item in tea]
overall = [float(item) for item in overall]
if teacher_model is not None:
a = np.array([stu[:-1], tea[:-1], overall[:-1]])
else:
a = np.array([stu[:-1]])
myplot = plt.imshow(a, cmap=cm.Blues, vmin=0, vmax=1)
cbar = plt.colorbar(myplot, shrink=.92, orientation='horizontal')
plt.xticks(())
plt.yticks(())
plt.savefig('./%d_%d.png' % (1 if teacher_model is not None else 0, counter))
plt.show()
if split:
tqdm.write('-----------------------')
def to_sentence(hyp, vocab, break_words=[Constants.PAD], skip_words=[]):
sent = []
for word_id in hyp:
if word_id in skip_words:
continue
if word_id in break_words:
break
sent.append(vocab[str(word_id)])
return ' '.join(sent)
class MaskPredict(object):
def __init__(self, iterations, seed, dict_mapping, plot=False, collect_best_candidate_iterative_results=False):
super().__init__()
self.iterations = iterations
self.random = np.random.RandomState(seed)
self.dict_mapping = dict_mapping
self.plot = plot
self.collect_best_candidate_iterative_results = collect_best_candidate_iterative_results
def generate(self, model, teacher_model, enc_output, teacher_enc_output, category, tgt_tokens, tgt_vocab):
bsz, seq_len = tgt_tokens.size()
pad_mask = tgt_tokens.eq(Constants.PAD)
seq_lens = seq_len - pad_mask.sum(dim=1)
collect_results = []
iterations = seq_len if self.iterations is None else self.iterations
tgt_tokens, token_probs, all_probs = self.generate_non_autoregressive(model, enc_output, category, tgt_tokens)
corresponding_probs = self.scoring_by_teacher(teacher_model, teacher_enc_output, category, tgt_tokens)#, no_masking_desicion=True)
tgt_tokens[pad_mask] = Constants.PAD
token_probs[pad_mask] = 1.0
corresponding_probs[pad_mask] = 1.0
if self.collect_best_candidate_iterative_results:
collect_results.append(tgt_tokens.clone())
#tqdm.write("Initialization: " + to_sentence(tgt_tokens[0].tolist(), tgt_vocab))
for counter in range(1, iterations):
ratio = (1.0 - (counter / iterations))
ratio = max(ratio, 0.4)
# Mask
num_mask = (seq_lens.float() * ratio).long()
mask_ind = self.select_worst(token_probs * corresponding_probs, num_mask)
if self.plot: plot(tgt_tokens, tgt_vocab, token_probs, corresponding_probs, num_mask, mask_ind, counter, teacher_model, split=True)
tgt_tokens[mask_ind] = Constants.MASK
# Predict
new_tgt_tokens, new_token_probs, all_probs = self.generate_non_autoregressive(model, enc_output, category, tgt_tokens)
token_probs[mask_ind] = new_token_probs[mask_ind]
tgt_tokens[mask_ind] = new_tgt_tokens[mask_ind]
# Interact
corresponding_probs = self.scoring_by_teacher(teacher_model, teacher_enc_output, category, tgt_tokens)
corresponding_probs[pad_mask] = 1.0
if self.collect_best_candidate_iterative_results:
collect_results.append(tgt_tokens.clone())
if self.plot:
plot(tgt_tokens, tgt_vocab, token_probs, corresponding_probs, num_mask, mask_ind, counter+1, teacher_model, split=True)
#lprobs = token_probs.log()
lprobs = (token_probs * corresponding_probs).log()
#eos_mask = tgt_tokens.eq(Constants.EOS)
#non_pad_eos_mask = 1 - (eos_mask + pad_mask).gt(0)
#lengths = non_pad_eos_mask.sum(-1)
return tgt_tokens, lprobs, collect_results
def generate_non_autoregressive(self, model, enc_output, category, tgt_tokens):
#print(enc_output[0])
decoder_out, *_ = model.decoder(tgt_tokens, enc_output, category)
if isinstance(decoder_out, list):
decoder_out = decoder_out[-1]
tgt_tokens, token_probs, all_probs = generate_step_with_prob(model.tgt_word_prj(decoder_out))
return tgt_tokens, token_probs, all_probs
def mapping(self, tgt_tokens):
tokens = tgt_tokens.clone().flatten()
for i, token in enumerate(tokens):
tokens[i] = self.dict_mapping[token.item()]
return tokens.view(*tgt_tokens.shape)
def scoring_by_teacher(self, teacher_model, teacher_enc_output, category, tgt_tokens, no_masking_desicion=False):
if teacher_model is None or no_masking_desicion:
return tgt_tokens.new(*tgt_tokens.shape).fill_(1).float()
if self.dict_mapping != {}:
tokens = self.mapping(tgt_tokens)
else:
tokens = tgt_tokens
tgt_tokens_with_bos = torch.cat([tokens.new(tokens.size(0), 1).fill_(Constants.BOS), tokens], dim=1)
#print(tgt_tokens_with_bos.shape, teacher_enc_output.shape, category.shape)
decoder_out, *_ = teacher_model.decoder(tgt_tokens_with_bos[:, :-1], teacher_enc_output, category)
if isinstance(decoder_out, list):
decoder_out = decoder_out[-1]
probs = F.softmax(teacher_model.tgt_word_prj(decoder_out), dim=-1)
return probs.gather(2, tokens.unsqueeze(2)).squeeze(2)
'''
def select_worst(self, token_probs, num_mask):
bsz, seq_len = token_probs.size()
masks = [token_probs[batch, :].topk(max(1, num_mask[batch]), largest=False, sorted=False)[1] for batch in range(bsz)]
masks = [torch.cat([mask, mask.new(seq_len - mask.size(0)).fill_(mask[0])], dim=0) for mask in masks]
return torch.stack(masks, dim=0)
'''
def select_worst(self, token_probs, num_mask):
masks = torch.zeros(*token_probs.shape, device=token_probs.device)
for i in range(masks.size(0)):
ind = token_probs[i, :].topk(max(1, num_mask[i]), largest=False, sorted=False)[1]
masks[i, ind] = 1
return masks.byte()
def select_random(self, token_probs, num_mask, seq_lens):
bsz, seq_len = token_probs.size()
masks = []
for i in range(bsz):
ind = self.random.choice(seq_lens[i].item(), size=max(1, num_mask[i].item()), replace=False)
ind = list(ind)
ind += [ind[0]] * (seq_len - len(ind))
masks.append(torch.LongTensor(ind))
return torch.stack(masks, dim=0).to(token_probs.device)
def select_multinomial(self, token_probs, num_mask, seq_lens):
probs = torch.exp(-token_probs)
bsz, seq_len = token_probs.size()
masks = []
for i in range(bsz):
ind = probs[i, :int(seq_lens[i])].multinomial(max(1, num_mask[i].item()))
ind = list(ind)
ind += [ind[0]] * (seq_len - len(ind))
masks.append(torch.LongTensor(ind))
return torch.stack(masks, dim=0).to(token_probs.device)
def generate(model, teacher_model, encoder_outputs, teacher_encoder_outputs, category, tgt_tokens, tgt_vocab, opt, dict_mapping, length_bias):
strategy = MaskPredict(opt['iterations'], opt['seed'], dict_mapping=dict_mapping)
length_beam_size = opt['length_beam_size']
#gold_target_len = tgt_tokens.ne(Constants.PAD).sum(-1)
gold_target_len = None
#gold_target_len = tgt_tokens.ne(Constants.PAD).sum(-1) if opt['use_gold_target_len'] else None
beam_alpha = opt.get('beam_alpha', 1.0)
#print(beam_alpha)
enc_output, pred_length = encoder_outputs['enc_output'], encoder_outputs['pred_length']
if teacher_encoder_outputs is not None:
teacher_enc_output = teacher_encoder_outputs['enc_output']
if isinstance(teacher_enc_output, list):
teacher_enc_output = teacher_enc_output[0]
else:
teacher_enc_output = None
if isinstance(enc_output, list):
assert len(enc_output) == 1
enc_output = enc_output[0]
bsz = enc_output.size(0)
beam = predict_length_beam(gold_target_len, pred_length, length_beam_size, length_bias)
max_len = beam.max().item()
length_mask = torch.triu(enc_output.new(max_len, max_len).fill_(1).long(), 1)
length_mask = torch.stack([length_mask[beam[batch] - 1] for batch in range(bsz)], dim=0)
tgt_tokens = enc_output.new(bsz, length_beam_size, max_len).fill_(Constants.MASK).long()
tgt_tokens = (1 - length_mask) * tgt_tokens + length_mask * Constants.PAD
tgt_tokens = tgt_tokens.view(bsz * length_beam_size, max_len)
enc_output = enlarge(enc_output, length_beam_size)
category = enlarge(category, length_beam_size)
if teacher_enc_output is not None:
teacher_enc_output = enlarge(teacher_enc_output, length_beam_size)
hypotheses, lprobs, collect_results = strategy.generate(model, teacher_model, enc_output, teacher_enc_output, category, tgt_tokens, tgt_vocab)
tgt_lengths = (1 - length_mask).sum(-1) -1
hypotheses = hypotheses.view(bsz, length_beam_size, max_len)
lprobs = lprobs.view(bsz, length_beam_size, max_len)
tgt_lengths = tgt_lengths.view(bsz, length_beam_size)
#tgt_lengths = (1 - length_mask).sum(-1)-1
avg_log_prob = lprobs.sum(-1) / (tgt_lengths.float() ** beam_alpha)
best_lengths = avg_log_prob.max(-1)[1] # [batch_size]
best_lengths = best_lengths.unsqueeze(1).unsqueeze(2).repeat(1, 1, max_len) # [batch_size, 1, max_len]
hypotheses = hypotheses.gather(1, best_lengths).squeeze(1) # [batch_size, max_len]
#lprobs = lprobs.gather(1, best_lengths).squeeze(1) = [batch_size, max_len]
lprobs = None # For speedup
if collect_results:
collect_results = [item.view(bsz, length_beam_size, max_len) for item in collect_results]
#print(collect_results[0][0])
#print(collect_results[1][0])
#print(collect_results[2][0])
collect_results = [item.gather(1, best_lengths).squeeze(1) for item in collect_results]
lprobs = torch.stack(collect_results, dim=1)
return hypotheses, lprobs
hypotheses = torch.stack([hypotheses[b, l, :] for b, l in enumerate(best_lengths)], dim=0)
lprobs = torch.stack([lprobs[b, l, :] for b, l in enumerate(best_lengths)], dim=0)
return hypotheses, lprobs
def enlarge(info, beam_size):
bsz, *rest_shape = info.shape
if len(rest_shape) == 2:
return info.unsqueeze(1).repeat(1, beam_size, 1, 1).view(bsz * beam_size, *rest_shape)
return info.unsqueeze(1).repeat(1, beam_size, 1).view(bsz * beam_size, *rest_shape)
def predict_length_beam(gold_target_len, predicted_lengths, length_beam_size, length_bias):
if gold_target_len is not None:
beam_starts = gold_target_len - (length_beam_size - 1) // 2
beam_ends = gold_target_len + length_beam_size // 2 + 1
#beam = torch.stack([torch.arange(7, 12, device=beam_starts.device) for batch in range(gold_target_len.size(0))], dim=0)
beam = torch.stack([torch.arange(beam_starts[batch], beam_ends[batch], device=beam_starts.device) for batch in range(gold_target_len.size(0))], dim=0)
else:
beam = predicted_lengths.topk(length_beam_size, dim=1)[1] + length_bias + 1
beam[beam < 4] = 4
beam[beam > 19] = 19
#print(beam)
return beam
|
{"hexsha": "059ea08906ffd0f53cb04aada0ed87c979beb381", "size": 13299, "ext": "py", "lang": "Python", "max_stars_repo_path": "decoding/decoding/mask_predict.py", "max_stars_repo_name": "ybCliff/VideoCaptioning", "max_stars_repo_head_hexsha": "93fc3b095c970e51e1e24909163a827df98d6ef3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-05-16T23:59:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-14T01:59:41.000Z", "max_issues_repo_path": "decoding/decoding/mask_predict.py", "max_issues_repo_name": "ybCliff/VideoCaptioning", "max_issues_repo_head_hexsha": "93fc3b095c970e51e1e24909163a827df98d6ef3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "decoding/decoding/mask_predict.py", "max_forks_repo_name": "ybCliff/VideoCaptioning", "max_forks_repo_head_hexsha": "93fc3b095c970e51e1e24909163a827df98d6ef3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-05-17T00:01:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-28T18:04:05.000Z", "avg_line_length": 45.3890784983, "max_line_length": 158, "alphanum_fraction": 0.647417099, "include": true, "reason": "import numpy", "num_tokens": 3315}
|
using MathOptInterface
using ParametricOptInterface
using BenchmarkTools
const MOI = MathOptInterface
const POI = ParametricOptInterface
import Pkg
function moi_add_variables(N::Int)
model = MOI.Utilities.Model{Float64}()
MOI.add_variables(model, N)
return nothing
end
function poi_add_variables(N::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
MOI.add_variables(model, N)
return nothing
end
function poi_add_parameters(N::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
MOI.add_constrained_variable.(model, POI.Parameter.(ones(N)));
return nothing
end
function poi_add_parameters_and_variables(N::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
MOI.add_variables(model, N/2)
MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2))))
return nothing
end
function poi_add_parameters_and_variables_alternating(N::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
for i in 1:Int(N/2)
MOI.add_variable(model)
MOI.add_constrained_variable(model, POI.Parameter(1))
end
return nothing
end
function moi_add_saf_ctr(N::Int, M::Int)
model = MOI.Utilities.Model{Float64}()
x = MOI.add_variables(model, N)
for _ in 1:M
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
MOI.ScalarAffineTerm.(1.0, x),
0.0,
),
MOI.GreaterThan(1.0),
)
end
return nothing
end
function poi_add_saf_ctr(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N)
for _ in 1:M
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
MOI.ScalarAffineTerm.(1.0, x),
0.0,
),
MOI.GreaterThan(1.0),
)
end
return nothing
end
function poi_add_saf_variables_and_parameters_ctr(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N/2)
y = first.(MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2)))))
for _ in 1:M
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm.(1.0, x); MOI.ScalarAffineTerm.(1.0, y)],
0.0,
),
MOI.GreaterThan(1.0),
)
end
return nothing
end
function poi_add_saf_variables_and_parameters_ctr_parameter_update(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N/2)
y = first.(MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2)))))
for _ in 1:M
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm.(1.0, x); MOI.ScalarAffineTerm.(1.0, y)],
0.0,
),
MOI.GreaterThan(1.0),
)
end
MOI.set.(model, POI.ParameterValue(), y, 0.5)
POI.update_parameters!(model)
return nothing
end
function moi_add_sqf_variables_ctr(N::Int, M::Int)
model = MOI.Utilities.Model{Float64}()
x = MOI.add_variables(model, N)
for _ in 1:M
MOI.add_constraint(
model,
MOI.ScalarQuadraticFunction(
MOI.ScalarQuadraticTerm.(1.0, x, x),
MOI.ScalarAffineTerm{Float64}[],
0.0,
),
MOI.GreaterThan(1.0),
)
end
return nothing
end
function poi_add_sqf_variables_ctr(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N)
for _ in 1:M
MOI.add_constraint(
model,
MOI.ScalarQuadraticFunction(
MOI.ScalarQuadraticTerm.(1.0, x, x),
MOI.ScalarAffineTerm{Float64}[],
0.0,
),
MOI.GreaterThan(1.0),
)
end
return nothing
end
function poi_add_sqf_variables_parameters_ctr(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N/2)
y = first.(MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2)))))
for _ in 1:M
MOI.add_constraint(
model,
MOI.ScalarQuadraticFunction(
MOI.ScalarQuadraticTerm.(1.0, x, y),
MOI.ScalarAffineTerm{Float64}[],
0.0,
),
MOI.GreaterThan(1.0),
)
end
return nothing
end
function poi_add_sqf_variables_parameters_ctr_parameter_update(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N/2)
y = first.(MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2)))))
for _ in 1:M
MOI.add_constraint(
model,
MOI.ScalarQuadraticFunction(
MOI.ScalarQuadraticTerm.(1.0, x, y),
MOI.ScalarAffineTerm{Float64}[],
0.0,
),
MOI.GreaterThan(1.0),
)
end
MOI.set.(model, POI.ParameterValue(), y, 0.5)
POI.update_parameters!(model)
return nothing
end
function poi_add_sqf_parameters_parameters_ctr(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N/2)
y = first.(MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2)))))
for _ in 1:M
MOI.add_constraint(
model,
MOI.ScalarQuadraticFunction(
MOI.ScalarQuadraticTerm.(1.0, y, y),
MOI.ScalarAffineTerm{Float64}[],
0.0,
),
MOI.GreaterThan(1.0),
)
end
return nothing
end
function poi_add_sqf_parameters_parameters_ctr_parameter_update(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N/2)
y = first.(MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2)))))
for _ in 1:M
MOI.add_constraint(
model,
MOI.ScalarQuadraticFunction(
MOI.ScalarQuadraticTerm.(1.0, y, y),
MOI.ScalarAffineTerm{Float64}[],
0.0,
),
MOI.GreaterThan(1.0),
)
end
MOI.set.(model, POI.ParameterValue(), y, 0.5)
POI.update_parameters!(model)
return nothing
end
function moi_add_saf_obj(N::Int, M::Int)
model = MOI.Utilities.Model{Float64}()
x = MOI.add_variables(model, N)
for _ in 1:M
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
MOI.ScalarAffineFunction(
MOI.ScalarAffineTerm.(1.0, x),
0.0,
)
)
end
return nothing
end
function poi_add_saf_obj(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N)
for _ in 1:M
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
MOI.ScalarAffineFunction(
MOI.ScalarAffineTerm.(1.0, x),
0.0,
)
)
end
return nothing
end
function poi_add_saf_variables_and_parameters_obj(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N/2)
y = first.(MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2)))))
for _ in 1:M
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm.(1.0, x); MOI.ScalarAffineTerm.(1.0, y)],
0.0,
)
)
end
return nothing
end
function poi_add_saf_variables_and_parameters_obj_parameter_update(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N/2)
y = first.(MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2)))))
for _ in 1:M
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
MOI.ScalarAffineFunction(
[MOI.ScalarAffineTerm.(1.0, x); MOI.ScalarAffineTerm.(1.0, y)],
0.0,
)
)
end
for _ in 1:M
MOI.set.(model, POI.ParameterValue(), y, 0.5)
POI.update_parameters!(model)
end
return nothing
end
function moi_add_sqf_variables_obj(N::Int, M::Int)
model = MOI.Utilities.Model{Float64}()
x = MOI.add_variables(model, N)
for _ in 1:M
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(),
MOI.ScalarQuadraticFunction(
MOI.ScalarQuadraticTerm.(1.0, x, x),
MOI.ScalarAffineTerm{Float64}[],
0.0,
)
)
end
return nothing
end
function poi_add_sqf_variables_obj(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N)
for _ in 1:M
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(),
MOI.ScalarQuadraticFunction(
MOI.ScalarQuadraticTerm.(1.0, x, x),
MOI.ScalarAffineTerm{Float64}[],
0.0,
)
)
end
return nothing
end
function poi_add_sqf_variables_parameters_obj(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N/2)
y = first.(MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2)))))
for _ in 1:M
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(),
MOI.ScalarQuadraticFunction(
MOI.ScalarQuadraticTerm.(1.0, x, y),
MOI.ScalarAffineTerm{Float64}[],
0.0,
)
)
end
return nothing
end
function poi_add_sqf_variables_parameters_obj_parameter_update(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N/2)
y = first.(MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2)))))
for _ in 1:M
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(),
MOI.ScalarQuadraticFunction(
MOI.ScalarQuadraticTerm.(1.0, x, y),
MOI.ScalarAffineTerm{Float64}[],
0.0,
)
)
end
for _ in 1:M
MOI.set.(model, POI.ParameterValue(), y, 0.5)
POI.update_parameters!(model)
end
return nothing
end
function poi_add_sqf_parameters_parameters_obj(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N/2)
y = first.(MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2)))))
for _ in 1:M
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(),
MOI.ScalarQuadraticFunction(
MOI.ScalarQuadraticTerm.(1.0, y, y),
MOI.ScalarAffineTerm{Float64}[],
0.0,
)
)
end
return nothing
end
function poi_add_sqf_parameters_parameters_obj_parameter_update(N::Int, M::Int)
model = POI.Optimizer(MOI.Utilities.Model{Float64}())
x = MOI.add_variables(model, N/2)
y = first.(MOI.add_constrained_variable.(model, POI.Parameter.(ones(Int(N/2)))))
for _ in 1:M
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(),
MOI.ScalarQuadraticFunction(
MOI.ScalarQuadraticTerm.(1.0, y, y),
MOI.ScalarAffineTerm{Float64}[],
0.0,
)
)
end
for _ in 1:M
MOI.set.(model, POI.ParameterValue(), y, 0.5)
POI.update_parameters!(model)
end
return nothing
end
function run_benchmarks(N::Int, M::Int)
println("Pkg status:")
Pkg.status()
println("")
GC.gc()
println("variables on a MOIU.Model.")
@btime moi_add_variables($N)
GC.gc()
println("variables on a POI.Optimizer.")
@btime poi_add_variables($N)
GC.gc()
println("parameters on a POI.Optimizer.")
@btime poi_add_parameters($N)
GC.gc()
println("parameters and variables on a POI.Optimizer.")
@btime poi_add_parameters_and_variables($N)
GC.gc()
println("alternating parameters and variables on a POI.Optimizer.")
@btime poi_add_parameters_and_variables_alternating($N)
GC.gc()
println("SAF constraint with variables on a MOIU.Model.")
@btime moi_add_saf_ctr($N, $M)
GC.gc()
println("SAF constraint with variables on a POI.Optimizer.")
@btime poi_add_saf_ctr($N, $M)
GC.gc()
println("SAF constraint with variables and parameters on a POI.Optimizer.")
@btime poi_add_saf_variables_and_parameters_ctr($N, $M)
GC.gc()
println("SQF constraint with variables on a MOIU.Model{Float64}.")
@btime moi_add_sqf_variables_ctr($N, $M)
GC.gc()
println("SQF constraint with variables on a POI.Optimizer.")
@btime poi_add_sqf_variables_ctr($N, $M)
GC.gc()
println("SQF constraint with product of variables and parameters on a POI.Optimizer.")
@btime poi_add_sqf_variables_parameters_ctr($N, $M)
GC.gc()
println("SQF constraint with product of parameters on a POI.Optimizer.")
@btime poi_add_sqf_parameters_parameters_ctr($N, $M)
GC.gc()
println("SAF objective with variables on a MOIU.Model.")
@btime moi_add_saf_obj($N, $M)
GC.gc()
println("SAF objective with variables on a POI.Optimizer.")
@btime poi_add_saf_obj($N, $M)
GC.gc()
println("SAF objective with variables and parameters on a POI.Optimizer.")
@btime poi_add_saf_variables_and_parameters_obj($N, $M)
GC.gc()
println("SQF objective with variables on a MOIU.Model.")
@btime moi_add_sqf_variables_obj($N, $M)
GC.gc()
println("SQF objective with variables on a POI.Optimizer.")
@btime poi_add_sqf_variables_obj($N, $M)
GC.gc()
println("SQF objective with product of variables and parameters on a POI.Optimizer.")
@btime poi_add_sqf_variables_parameters_obj($N, $M)
GC.gc()
println("SQF objective with product of parameters on a POI.Optimizer.")
@btime poi_add_sqf_parameters_parameters_obj($N, $M)
GC.gc()
println("Update parameters in SAF constraint with variables and parameters on a POI.Optimizer.")
@btime poi_add_saf_variables_and_parameters_ctr_parameter_update($N, $M)
GC.gc()
println("Update parameters in SAF objective with variables and parameters on a POI.Optimizer.")
@btime poi_add_saf_variables_and_parameters_obj_parameter_update($N, $M)
GC.gc()
println("Update parameters in SQF constraint with product of variables and parameters on a POI.Optimizer.")
@btime poi_add_sqf_variables_parameters_ctr_parameter_update($N, $M)
GC.gc()
println("Update parameters in SQF constraint with product of parameters on a POI.Optimizer.")
@btime poi_add_sqf_parameters_parameters_ctr_parameter_update($N, $M)
GC.gc()
println("Update parameters in SQF objective with product of variables and parameters on a POI.Optimizer.")
@btime poi_add_sqf_variables_parameters_obj_parameter_update($N, $M)
GC.gc()
println("Update parameters in SQF objective with product of parameters on a POI.Optimizer.")
@btime poi_add_sqf_parameters_parameters_obj_parameter_update($N, $M)
return nothing
end
N = 10_000
M = 100
run_benchmarks(N, M)
|
{"hexsha": "0195067bfcc5bd5199713201078111f02ca34e83", "size": 16433, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "benchmark/run_benchmarks.jl", "max_stars_repo_name": "tomasfmg/ParametricOptInterface.jl", "max_stars_repo_head_hexsha": "9f4a7c969374aa2dd16187d8d8c280684e606577", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-06-05T06:41:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-02T10:51:54.000Z", "max_issues_repo_path": "benchmark/run_benchmarks.jl", "max_issues_repo_name": "tomasfmg/ParametricOptInterface.jl", "max_issues_repo_head_hexsha": "9f4a7c969374aa2dd16187d8d8c280684e606577", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2020-06-04T22:55:56.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-30T22:51:38.000Z", "max_forks_repo_path": "benchmark/run_benchmarks.jl", "max_forks_repo_name": "tomasfmg/ParametricOptInterface.jl", "max_forks_repo_head_hexsha": "9f4a7c969374aa2dd16187d8d8c280684e606577", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-18T12:31:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-15T14:10:16.000Z", "avg_line_length": 33.6053169734, "max_line_length": 111, "alphanum_fraction": 0.5967869531, "num_tokens": 4372}
|
[STATEMENT]
lemma (in cpx_sq_mat) spectrum_to_pm_idx_bij:
assumes "hermitian A"
and "A\<in> fc_mats"
shows "bij_betw (spectrum_to_pm_idx A) (spectrum A) {..< card (spectrum A)}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
define p where "p = proj_meas_size (make_pm A)"
[PROOF STATE]
proof (state)
this:
p = proj_meas_size (make_pm A)
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
define M where "M = proj_meas_outcomes (make_pm A)"
[PROOF STATE]
proof (state)
this:
M = proj_meas_outcomes (make_pm A)
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
have es: "char_poly A = (\<Prod> (e :: complex) \<leftarrow> (eigvals A). [:- e, 1:])"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. char_poly A = (\<Prod>e\<leftarrow>eigvals A. [:- e, 1:])
[PROOF STEP]
using assms fc_mats_carrier eigvals_poly_length dim_eq
[PROOF STATE]
proof (prove)
using this:
hermitian A
A \<in> fc_mats
fc_mats = carrier_mat dimR dimC
?M \<in> carrier_mat ?n ?n \<Longrightarrow> char_poly ?M = (\<Prod>a\<leftarrow>eigvals ?M. [:- a, 1:]) \<and> length (eigvals ?M) = dim_row ?M
dimR = dimC
goal (1 subgoal):
1. char_poly A = (\<Prod>e\<leftarrow>eigvals A. [:- e, 1:])
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
char_poly A = (\<Prod>e\<leftarrow>eigvals A. [:- e, 1:])
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
obtain B U Q where us: "unitary_schur_decomposition A (eigvals A) = (B,U,Q)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>B U Q. unitary_schur_decomposition A (eigvals A) = (B, U, Q) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (cases "unitary_schur_decomposition A (eigvals A)")
[PROOF STATE]
proof (state)
this:
unitary_schur_decomposition A (eigvals A) = (B, U, Q)
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
hence pr: "similar_mat_wit A B U (Complex_Matrix.adjoint U) \<and>
diag_mat B = (eigvals A)"
[PROOF STATE]
proof (prove)
using this:
unitary_schur_decomposition A (eigvals A) = (B, U, Q)
goal (1 subgoal):
1. similar_mat_wit A B U (Complex_Matrix.adjoint U) \<and> diag_mat B = eigvals A
[PROOF STEP]
using hermitian_eigenvalue_real assms fc_mats_carrier es dim_eq
[PROOF STATE]
proof (prove)
using this:
unitary_schur_decomposition A (eigvals A) = (B, U, Q)
\<lbrakk>?A \<in> carrier_mat ?n ?n; hermitian ?A; char_poly ?A = (\<Prod>e\<leftarrow>?es. [:- e, 1:]); unitary_schur_decomposition ?A ?es = (?B, ?P, ?Q)\<rbrakk> \<Longrightarrow> similar_mat_wit ?A ?B ?P (Complex_Matrix.adjoint ?P) \<and> diagonal_mat ?B \<and> diag_mat ?B = ?es \<and> Complex_Matrix.unitary ?P \<and> (\<forall>i<?n. ?B $$ (i, i) \<in> \<real>)
hermitian A
A \<in> fc_mats
fc_mats = carrier_mat dimR dimC
char_poly A = (\<Prod>e\<leftarrow>eigvals A. [:- e, 1:])
dimR = dimC
goal (1 subgoal):
1. similar_mat_wit A B U (Complex_Matrix.adjoint U) \<and> diag_mat B = eigvals A
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
similar_mat_wit A B U (Complex_Matrix.adjoint U) \<and> diag_mat B = eigvals A
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
have "(p,M) = make_pm A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (p, M) = make_pm A
[PROOF STEP]
unfolding p_def M_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (proj_meas_size (make_pm A), proj_meas_outcomes (make_pm A)) = make_pm A
[PROOF STEP]
using make_pm_decomp
[PROOF STATE]
proof (prove)
using this:
make_pm ?A = (proj_meas_size (make_pm ?A), proj_meas_outcomes (make_pm ?A))
goal (1 subgoal):
1. (proj_meas_size (make_pm A), proj_meas_outcomes (make_pm A)) = make_pm A
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(p, M) = make_pm A
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
hence "p = dist_el_card B"
[PROOF STATE]
proof (prove)
using this:
(p, M) = make_pm A
goal (1 subgoal):
1. p = dist_el_card B
[PROOF STEP]
using assms us
[PROOF STATE]
proof (prove)
using this:
(p, M) = make_pm A
hermitian A
A \<in> fc_mats
unitary_schur_decomposition A (eigvals A) = (B, U, Q)
goal (1 subgoal):
1. p = dist_el_card B
[PROOF STEP]
unfolding make_pm_def
[PROOF STATE]
proof (prove)
using this:
(p, M) = (let (B, U, uu_) = unitary_schur_decomposition A (eigvals A) in (dist_el_card B, mk_meas_outcome B U))
hermitian A
A \<in> fc_mats
unitary_schur_decomposition A (eigvals A) = (B, U, Q)
goal (1 subgoal):
1. p = dist_el_card B
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
p = dist_el_card B
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
have dimB: "B \<in> carrier_mat dimR dimR"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. B \<in> carrier_mat dimR dimR
[PROOF STEP]
using dim_eq pr assms
fc_mats_carrier
[PROOF STATE]
proof (prove)
using this:
dimR = dimC
similar_mat_wit A B U (Complex_Matrix.adjoint U) \<and> diag_mat B = eigvals A
hermitian A
A \<in> fc_mats
fc_mats = carrier_mat dimR dimC
goal (1 subgoal):
1. B \<in> carrier_mat dimR dimR
[PROOF STEP]
unfolding similar_mat_wit_def
[PROOF STATE]
proof (prove)
using this:
dimR = dimC
(let n = dim_row A in {A, B, U, Complex_Matrix.adjoint U} \<subseteq> carrier_mat n n \<and> U * Complex_Matrix.adjoint U = 1\<^sub>m n \<and> Complex_Matrix.adjoint U * U = 1\<^sub>m n \<and> A = U * B * Complex_Matrix.adjoint U) \<and> diag_mat B = eigvals A
hermitian A
A \<in> fc_mats
fc_mats = carrier_mat dimR dimC
goal (1 subgoal):
1. B \<in> carrier_mat dimR dimR
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
B \<in> carrier_mat dimR dimR
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
have Bvals: "diag_mat B = eigvals A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. diag_mat B = eigvals A
[PROOF STEP]
using pr hermitian_decomp_eigenvalues[of A B U]
[PROOF STATE]
proof (prove)
using this:
similar_mat_wit A B U (Complex_Matrix.adjoint U) \<and> diag_mat B = eigvals A
hermitian_decomp A B U \<Longrightarrow> diag_mat B = eigvals A
goal (1 subgoal):
1. diag_mat B = eigvals A
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
diag_mat B = eigvals A
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
have "diag_elems B = spectrum A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. diag_elems B = spectrum A
[PROOF STEP]
unfolding spectrum_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. diag_elems B = set (eigvals A)
[PROOF STEP]
using dimB Bvals
diag_elems_set_diag_mat[of B]
[PROOF STATE]
proof (prove)
using this:
B \<in> carrier_mat dimR dimR
diag_mat B = eigvals A
diag_elems B = set (diag_mat B)
goal (1 subgoal):
1. diag_elems B = set (eigvals A)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
diag_elems B = spectrum A
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
diag_elems B = spectrum A
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
have "dist_el_card B = card (spectrum A)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. dist_el_card B = card (spectrum A)
[PROOF STEP]
using spectrum_size[of A p M] assms
\<open>(p,M) = make_pm A\<close> \<open>p = dist_el_card B\<close>
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>hermitian A; A \<in> fc_mats; make_pm A = (p, M)\<rbrakk> \<Longrightarrow> p = card (spectrum A)
hermitian A
A \<in> fc_mats
(p, M) = make_pm A
p = dist_el_card B
goal (1 subgoal):
1. dist_el_card B = card (spectrum A)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
dist_el_card B = card (spectrum A)
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
diag_elems B = spectrum A
dist_el_card B = card (spectrum A)
[PROOF STEP]
show "bij_betw (spectrum_to_pm_idx A) (spectrum A) {..< card (spectrum A)}"
[PROOF STATE]
proof (prove)
using this:
diag_elems B = spectrum A
dist_el_card B = card (spectrum A)
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
using diag_el_to_idx_bij us
[PROOF STATE]
proof (prove)
using this:
diag_elems B = spectrum A
dist_el_card B = card (spectrum A)
bij_betw (diag_el_to_idx ?B) (diag_elems ?B) {..<dist_el_card ?B}
unitary_schur_decomposition A (eigvals A) = (B, U, Q)
goal (1 subgoal):
1. bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
unfolding spectrum_to_pm_idx_def Let_def
[PROOF STATE]
proof (prove)
using this:
diag_elems B = spectrum A
dist_el_card B = card (spectrum A)
bij_betw (diag_el_to_idx ?B) (diag_elems ?B) {..<dist_el_card ?B}
unitary_schur_decomposition A (eigvals A) = (B, U, Q)
goal (1 subgoal):
1. bij_betw (\<lambda>x. case unitary_schur_decomposition A (eigvals A) of (B, U, uu_) \<Rightarrow> diag_el_to_idx B x) (spectrum A) {..<card (spectrum A)}
[PROOF STEP]
by (metis (mono_tags, lifting) bij_betw_cong case_prod_conv)
[PROOF STATE]
proof (state)
this:
bij_betw (spectrum_to_pm_idx A) (spectrum A) {..<card (spectrum A)}
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4381, "file": "Projective_Measurements_Projective_Measurements", "length": 40}
|
# !/usr/bin/env python3
import os
from glob import glob
import numpy as np
import random
import time
class Vocabulary(object):
def __init__(self, filename, vadidate_file=False):
self._id_to_word = []
self._word_to_id = {}
self._unk = -1
self._bos = -1
self._eos = -1
idx = 0
with open(filename, 'r', encoding='utf8') as f:
for line in f.readlines():
line = line.strip()
if line is None or line == '':
continue
ss = line.split('\t')
if len(ss) != 2:
continue
if ss[0] == '<s>':
self._bos = idx
elif ss[0] == '</s>':
self._eos = idx
elif ss[0] == '<unk>':
self._unk = idx
self._id_to_word.append(ss[0])
self._word_to_id[ss[0]] = idx
idx += 1
self._size = len(self._id_to_word)
if vadidate_file:
if self._bos == -1 or self._eos == -1 or self._unk == -1:
raise ValueError("Ensure the vocabulary file has"
"<s>, </s>, <unk> tokens!")
@property
def bos(self):
return self._bos
@property
def eos(self):
return self._eos
@property
def unk(self):
return self._unk
@property
def size(self):
return len(self._id_to_word)
def word_to_id(self, word):
if word in self._word_to_id:
return self._word_to_id[word]
return self._unk
def id_to_word(self, id):
if id >= self._size:
return '<unk>'
return self._id_to_word[id]
def decode(self, ids):
return ' '.join([self.id_to_word(id) for id in ids])
def encode(self, sentence, reverse=False):
# todo: need to check
ids = [self.word_to_id(word) for word in sentence.split()]
if reverse:
return np.array([self.eos] + ids + [self.bos], dtype=np.int32)
else:
return np.array([self.bos] + ids + [self.eos], dtype=np.int32)
def _get_batch(generator, batchsize, numsteps):
cur_stream = [None]*batchsize
no_more_data = False
while True:
inputs = np.zeros([batchsize, numsteps], np.int32)
targets = np.zeros([batchsize, numsteps], np.int32)
for i in range(batchsize):
cur_pos = 0
while cur_pos < numsteps:
if cur_stream[i] is None or len(cur_stream[i]) <= 1:
try:
cur_stream[i] = list(next(generator))
except StopIteration:
no_more_data = True
how_many = min(len(cur_stream[i])-1, numsteps-cur_pos)
next_pos = cur_pos + how_many
inputs[i, cur_pos:next_pos] = cur_stream[i][:how_many]
targets[i, cur_pos:next_pos] = cur_stream[i][1:how_many+1]
cur_pos = next_pos
cur_stream[i] = cur_stream[i][how_many:]
if no_more_data:
break
X = {'token_ids': inputs, 'target_ids': targets}
yield X
class LMDataset(object):
def __init__(self, filepattern, vocab, reverse=False, test=False,
shuffle_on_load=False):
self._vocab = vocab
self._all_shards = glob(filepattern)
print('Found {} shards at {}'.format(len(self._all_shards), filepattern))
self._shards_to_choose = []
self._reverse = reverse
self._test = test
self._shuffle_on_load = shuffle_on_load
self._ids = self._load_random_shard()
def _choose_random_shards(self):
if len(self._shards_to_choose) == 0:
self._shards_to_choose = list(self._all_shards)
random.shuffle(self._shards_to_choose)
shard_name = self._shards_to_choose.pop()
return shard_name
def _load_shard(self, shard_name):
print('Loading data from: {}'.format(shard_name))
with open(shard_name, 'r', encoding='utf8') as f:
sentences_raw = f.readlines()
if self._reverse:
sentences = []
for sent in sentences_raw:
splitted = sent.split()
splitted.reverse()
sentences.append(' '.join(splitted))
else:
sentences = sentences_raw
if self._shuffle_on_load:
random.shuffle(sentences)
ids = [self.vocab.encode(sent, self._reverse) for sent in sentences]
print('Loaded {} sentences.'.format(len(ids)))
print('Finished loading!')
return list(ids)
def _load_random_shard(self):
if self._test:
if len(self._all_shards) == 0:
raise StopIteration
else:
shard_name = self._all_shards.pop()
else:
shard_name = self._choose_random_shards()
ids = self._load_shard(shard_name)
self._i = 0
self._nids = len(ids)
return ids
def get_sentence(self):
while True:
if self._i == self._nids:
self._ids = self._load_random_shard()
ret = self._ids[self._i]
self._i += 1
yield ret
def iter_batches(self, batchsize, numsteps):
for X in _get_batch(self.get_sentence(), batchsize, numsteps):
yield X
# return X
@property
def vocab(self):
return self._vocab
class BidirectionalLMDataset(object):
def __init__(self, filepattern, vocab, test=False, shuffle_on_load=False):
self._forward = LMDataset(filepattern, vocab, reverse=False, test=test,
shuffle_on_load=shuffle_on_load)
self._backward = LMDataset(filepattern, vocab, reverse=True, test=test,
shuffle_on_load=shuffle_on_load)
def iter_batches(self, batchsize, numsteps):
for X, Xr in zip(
_get_batch(self._forward.get_sentence(), batchsize, numsteps),
_get_batch(self._backward.get_sentence(), batchsize, numsteps)
):
for k,v in Xr.items():
X[k+'_reverse'] = v
yield X
def test_LMDataset():
vocab_file = '../data/example.vocab'
vocab = Vocabulary(vocab_file)
filepattern = '../data/*_seg_words.txt'
ds = LMDataset(filepattern, vocab)
data_batch = ds.iter_batches(4, 50)
icnt = 0
for idx, batch in enumerate(data_batch):
print('inputs:\t' + vocab.decode(batch['token_ids'][0]))
print('outputs:\t' + vocab.decode(batch['token_ids'][0]))
# time.sleep(1)
if icnt%10 == 0:
break
icnt += 1
print('\n\n\n\n')
print('===>when test mode:')
ds = LMDataset(filepattern, vocab, test=True)
data_batch = ds.iter_batches(512, 50)
for idx, batch in enumerate(data_batch):
print('inputs:\t' + vocab.decode(batch['token_ids'][0]))
print('outputs:\t' + vocab.decode(batch['token_ids'][0]))
print('\n\n\n\n')
def test_BidirectionalLMDataset():
vocab_file = '../data/example.vocab'
vocab = Vocabulary(vocab_file)
filepattern = '../data/*_seg_words.txt'
ds = BidirectionalLMDataset(filepattern, vocab)
data_batch = ds.iter_batches(512, 50)
for index, batch in enumerate(data_batch):
print('inputs:\t' + vocab.decode(batch['token_ids'][0]))
print('outputs:\t' + vocab.decode(batch['token_ids'][0]))
print('inputs reverse:\t' + vocab.decode(batch['token_ids_reverse'][0]))
print('outputs reverse:\t' + vocab.decode(batch['token_ids_reverse'][0]))
print('\n')
if __name__ == '__main__':
# test_LMDataset()
test_BidirectionalLMDataset()
|
{"hexsha": "ae22f7e11bba11883e19ecaaab8507746ec1847e", "size": 7828, "ext": "py", "lang": "Python", "max_stars_repo_path": "subword/bilm/data.py", "max_stars_repo_name": "searobbersduck/ELMo_Chin", "max_stars_repo_head_hexsha": "5d9b2f0759ee3a46a4a1e20c08cc26109b7b90c9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 111, "max_stars_repo_stars_event_min_datetime": "2018-08-28T17:46:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T00:03:13.000Z", "max_issues_repo_path": "subword/bilm/data.py", "max_issues_repo_name": "sladesha/ELMo_Chin", "max_issues_repo_head_hexsha": "5d9b2f0759ee3a46a4a1e20c08cc26109b7b90c9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2018-08-03T09:41:39.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-02T07:58:45.000Z", "max_forks_repo_path": "subword/bilm/data.py", "max_forks_repo_name": "sladesha/ELMo_Chin", "max_forks_repo_head_hexsha": "5d9b2f0759ee3a46a4a1e20c08cc26109b7b90c9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2018-09-26T08:02:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-02T14:44:57.000Z", "avg_line_length": 34.3333333333, "max_line_length": 81, "alphanum_fraction": 0.5619570772, "include": true, "reason": "import numpy", "num_tokens": 1884}
|
module #6 where
open import Level
open import Data.Bool
open import Relation.Binary.PropositionalEquality
{-
Exercise 1.6. Show that if we define A × B :≡ ∏(x:2) rec2(U, A, B, x), then we can give a
definition of indA×B for which the definitional equalities stated in §1.5 hold propositionally (i.e. using equality
types). (This requires the function extensionality axiom, which is introduced in §2.9.)
-}
rec₂ : ∀{c}{C : Set c} → C → C → Bool → C
rec₂ c₀ c₁ true = c₁
rec₂ c₀ c₁ false = c₀
ind₂ : ∀{c}(C : Bool → Set c) → C false → C true → (x : Bool) → C x
ind₂ C c₀ c₁ true = c₁
ind₂ C c₀ c₁ false = c₀
_×_ : ∀ {i} → Set i → Set i → Set i
A × B = (b : Bool) → if b then A else B
module ProductTwo {a}{A B : Set a} where
_,_ : A → B → A × B
_,_ x y true = x
_,_ x y false = y
proj₁ : A × B → A
proj₁ x = x true
proj₂ : A × B → B
proj₂ x = x false
postulate
extensionality : ∀ {a b} {A : Set a} {B : A → Set b} (f g : (a : A) → B a) → (∀ x → f x ≡ g x) → f ≡ g
indₓ₂ : ∀{c}{C : A × B -> Set c} → (f : (x : A)(y : B) → C (x , y)) → (x : A × B) → C (proj₁ x , proj₂ x)
indₓ₂ f x = f (proj₁ x) (proj₂ x)
indₓ₂-β : ∀{c}{C : A × B -> Set c} → (f : (x : A)(y : B) → C (x , y)) → (x : A × B) → indₓ₂ {C = C} f x ≡ f (proj₁ x) (proj₂ x)
indₓ₂-β f x = refl
|
{"hexsha": "b72a8d0874fdb73cce0fdb715f55eaa80b9ebfbb", "size": 1292, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Chapter1/#6.agda", "max_stars_repo_name": "CodaFi/HoTT-Exercises", "max_stars_repo_head_hexsha": "3411b253b0a49a5f9c3301df175ae8ecdc563b12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapter1/#6.agda", "max_issues_repo_name": "CodaFi/HoTT-Exercises", "max_issues_repo_head_hexsha": "3411b253b0a49a5f9c3301df175ae8ecdc563b12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter1/#6.agda", "max_forks_repo_name": "CodaFi/HoTT-Exercises", "max_forks_repo_head_hexsha": "3411b253b0a49a5f9c3301df175ae8ecdc563b12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7111111111, "max_line_length": 129, "alphanum_fraction": 0.5472136223, "num_tokens": 549}
|
from itertools import combinations
import numpy as np
class FastCausalInference:
def __init__(self, data, conditional_independence_test):
self.data = data
sample_counts = [len(data[key]) for key in data]
for sample_count in sample_counts:
if sample_count != sample_counts[0]:
raise ValueError('All Variables Should Contain the Same Number of Samples!')
self.sample_count = sample_counts[0]
self.conditional_independence_test = conditional_independence_test
def initialize_edges(self, nodes):
edges = set()
for node_from in nodes:
for node_to in nodes:
if node_from != node_to:
edges.add((node_from, node_to))
return edges
def calculate_adjacencies(self, edges):
adjacencies = {}
for edge in edges:
try:
adjacencies[edge[0]].add(edge[1])
except KeyError:
adjacencies[edge[0]] = {edge[1]}
return adjacencies
def select_edge_with_l(self, edges, edges_checked, adjacencies, l):
selected_edge = None
for edge in edges:
if len(adjacencies[edge[0]].difference({edge[1]})) >= l and edge not in edges_checked:
selected_edge = edge
break
return selected_edge
def create_edges_dict(self, edges, reorient_tails=False):
edges_dict = {}
for edge in edges:
if edge[0] not in edges_dict:
edges_dict[edge[0]] = {}
if edge[1] not in edges_dict[edge[0]]:
if reorient_tails:
edges_dict[edge[0]][edge[1]] = (edge[0], edge[1], 'o', 'o')
else:
edges_dict[edge[0]][edge[1]] = edge
return edges_dict
def create_graph_dict(self, edges):
graph_dict = {}
for edge in edges:
try:
graph_dict[edge[0]].add(edge[1])
except KeyError:
graph_dict[edge[0]] = set()
graph_dict[edge[0]].add(edge[1])
try:
graph_dict[edge[1]].add(edge[0])
except KeyError:
graph_dict[edge[1]] = set()
graph_dict[edge[1]].add(edge[0])
return graph_dict
def orient_unshield_triples(self, edges, separation_set):
graph_dict = self.create_graph_dict(edges)
edges_dict = self.create_edges_dict(edges, True)
unshielded_triples = set()
for node_left in graph_dict:
for node_middle in graph_dict[node_left]:
for node_right in graph_dict[node_middle]:
if node_left == node_right:
continue
if node_left not in graph_dict[node_right]:
unshielded_triples.add((node_left, node_middle, node_right))
for unshielded_triple in unshielded_triples:
try:
separations = separation_set[(unshielded_triple[0], unshielded_triple[2])]
except KeyError:
separations = []
if (unshielded_triple[1],) not in separations:
current_edge = edges_dict[unshielded_triple[0]][unshielded_triple[1]]
edges_dict[unshielded_triple[0]][unshielded_triple[1]] = \
(current_edge[0], current_edge[1], current_edge[2], '>')
if current_edge[2] == 'o':
arrowhead = 'o'
else:
arrowhead = '>'
edges_dict[unshielded_triple[1]][unshielded_triple[0]] = \
(current_edge[1], current_edge[0], '<', arrowhead)
else:
pass
return [edges_dict[node_from][node_to] for node_from in edges_dict for node_to in edges_dict[node_from]]
def edge_selection_based_on_adjacencies(self, data, edges):
separation_set = {}
l = 0
while True:
l += 1
edges_checked = set()
while True:
adjacencies = self.calculate_adjacencies(edges)
selected_edge = self.select_edge_with_l(edges, edges_checked, adjacencies, l)
if selected_edge is None:
break
edges_checked.add(selected_edge)
for conditions in combinations(adjacencies[selected_edge[0]].difference({selected_edge[1]}), r=l):
x = np.array(data[selected_edge[0]]).reshape(-1, 1)
y = np.array(data[selected_edge[1]]).reshape(-1, 1)
z = np.concatenate([np.array(data[condition]).reshape(-1, 1) for condition in conditions], axis=1)
is_independent = self.conditional_independence_test(x, y, z)
if is_independent:
edges.remove(selected_edge)
edges.remove((selected_edge[1], selected_edge[0]))
separation_set[selected_edge] = conditions
break
adjacencies = self.calculate_adjacencies(edges)
check_adjacencies = True
for edge in edges:
if len(adjacencies[edge[0]].difference({edge[1]})) >= l:
check_adjacencies = False
break
if check_adjacencies:
break
return edges, separation_set
def check_path_eligibility(self, path, edges_dict):
for i in range(len(path) - 2):
sub_path = path[i:i + 3]
is_collider = edges_dict[sub_path[0]][sub_path[1]][3] == '>' and edges_dict[sub_path[1]][sub_path[2]][
2] == '<'
is_triangle = \
sub_path[1] in edges_dict[sub_path[0]] and \
sub_path[2] in edges_dict[sub_path[1]] and \
sub_path[0] in edges_dict[sub_path[2]]
is_eligible = is_collider or is_triangle
if not is_eligible:
return False
return True
def get_possible_d_sep(self, edges_dict):
nodes = list(edges_dict.keys())
possible_d_sep = {}
for node in nodes:
for node_to in nodes:
if node == node_to:
continue
paths = [[node]]
while True:
found_paths = []
paths_next = []
for path in paths:
neighbours = list(edges_dict[path[-1]].keys())
for neighbour in neighbours:
if neighbour == node_to:
found_paths.append(path + [neighbour])
elif neighbour not in path:
paths_next.append(path + [neighbour])
else:
pass
paths = paths_next
if len(paths) == 0:
break
variable_checked = False
for path in found_paths:
if len(path) >= 3:
if self.check_path_eligibility(path, edges_dict):
try:
possible_d_sep[node].add(node_to)
except KeyError:
possible_d_sep[node] = {node_to}
variable_checked = True
break
if variable_checked:
break
if node not in possible_d_sep:
possible_d_sep[node] = set()
return possible_d_sep
def edge_selection_based_on_possible_d_sep(self, data, edges, separation_set):
nodes = set()
for edge in edges:
nodes.add(edge[0])
nodes.add(edge[1])
edges_dict = self.create_edges_dict(edges, False)
for node in nodes:
possible_d_sep = self.get_possible_d_sep(edges_dict)
for neighbour in list(edges_dict[node].keys()):
for l in range(1, len(possible_d_sep[node]) + 1):
selected_combinations = \
[elem for elem in combinations(possible_d_sep[node].difference({neighbour}), r=l)]
if len(selected_combinations) == 0:
break
is_edge_deleted = False
for conditions in selected_combinations:
x = np.array(data[node]).reshape(-1, 1)
y = np.array(data[neighbour]).reshape(-1, 1)
z = np.concatenate(
[np.array(data[condition]).reshape(-1, 1) for condition in conditions], axis=1
)
is_independent = self.conditional_independence_test(x, y, z)
if is_independent:
del edges_dict[node][neighbour]
del edges_dict[neighbour][node]
separation_set[(node, neighbour)] = conditions
is_edge_deleted = True
break
if is_edge_deleted:
break
return [edges_dict[node_from][node_to] for node_from in edges_dict for node_to in edges_dict[node_from]], \
separation_set
def infer_separation_set_for_edges(self, data, edges):
nodes = set(data.keys())
edges_to_check = {(edge[0], edge[1]) for edge in edges}
missing_edges = set()
for node_from in nodes:
for node_to in nodes:
if (node_from, node_to) not in edges_to_check:
missing_edges.add((node_from, node_to))
separation_set = {}
edges_dict = self.create_edges_dict(edges, False)
for edge in missing_edges:
separation_set[edge] = \
{node_to for node_to in edges_dict[edge[0]]}.union({node_to for node_to in edges_dict[edge[1]]})
return separation_set
def infer_skeleton(self):
edges = self.initialize_edges(list(self.data.keys()))
edges, separation_set = self.edge_selection_based_on_adjacencies(self.data, edges)
edges = self.orient_unshield_triples(edges, separation_set)
edges, separation_set = self.edge_selection_based_on_possible_d_sep(self.data, edges, separation_set)
edges = self.orient_unshield_triples(edges, separation_set)
return edges
def bootstrap_infer_skeleton(self, bootstrap_samples=100, bootstrap_sample_ratio=1.0, bootstrap_edge_threshold=0.95):
edge_counts = {}
for i in range(bootstrap_samples):
print('Inferring bootstrap sample no.', i)
sample_indices = np.random.choice(self.sample_count, size=int(self.sample_count * bootstrap_sample_ratio))
data_sampled = {
item: [self.data[item][sample_idx] for sample_idx in sample_indices]
for item in self.data
}
edges = self.initialize_edges(list(data_sampled.keys()))
edges, separation_set = self.edge_selection_based_on_adjacencies(data_sampled, edges)
edges = self.orient_unshield_triples(edges, separation_set)
edges, separation_set = self.edge_selection_based_on_possible_d_sep(data_sampled, edges, separation_set)
for edge in edges:
try:
edge_counts[(edge[0], edge[1], 'o', 'o')] += 1.0
except KeyError:
edge_counts[(edge[0], edge[1], 'o', 'o')] = 1.0
edge_counts = {key: edge_counts[key] / bootstrap_samples for key in edge_counts}
edges = [key for key in edge_counts if edge_counts[key] >= bootstrap_edge_threshold]
separation_set = self.infer_separation_set_for_edges(self.data, edges)
edges = self.orient_unshield_triples(edges, separation_set)
return edges
|
{"hexsha": "d336204a251e847bdfe13e90facfa570debc66ca", "size": 12109, "ext": "py", "lang": "Python", "max_stars_repo_path": "causal_inference.py", "max_stars_repo_name": "valerK/causal_discovery", "max_stars_repo_head_hexsha": "e3fbd3d221387f343b2ff0961d0f2faf581daeef", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "causal_inference.py", "max_issues_repo_name": "valerK/causal_discovery", "max_issues_repo_head_hexsha": "e3fbd3d221387f343b2ff0961d0f2faf581daeef", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "causal_inference.py", "max_forks_repo_name": "valerK/causal_discovery", "max_forks_repo_head_hexsha": "e3fbd3d221387f343b2ff0961d0f2faf581daeef", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4429967427, "max_line_length": 121, "alphanum_fraction": 0.5420761417, "include": true, "reason": "import numpy", "num_tokens": 2468}
|
/*! \file
\brief A node constraint element.
Copyright (C) 2019-2021 kaoru https://www.tetengo.org/
*/
#include <any>
#include <iterator>
#include <string_view>
#include <utility>
#include <vector>
#include <boost/preprocessor.hpp>
#include <boost/scope_exit.hpp>
#include <boost/test/unit_test.hpp>
#include <tetengo/lattice/constraintElement.h>
#include <tetengo/lattice/entry.h>
#include <tetengo/lattice/node.h>
#include <tetengo/lattice/node.hpp>
#include <tetengo/lattice/node_constraint_element.hpp>
BOOST_AUTO_TEST_SUITE(test_tetengo)
BOOST_AUTO_TEST_SUITE(lattice)
BOOST_AUTO_TEST_SUITE(node_constraint_element)
BOOST_AUTO_TEST_CASE(construction)
{
BOOST_TEST_PASSPOINT();
{
const std::any element_node_value{ 42 };
const std::vector<int> element_node_preceding_edge_costs{ 3, 1, 4, 1, 5, 9, 2, 6 };
tetengo::lattice::node element_node{
"mizuho", &element_node_value, 1, &element_node_preceding_edge_costs, 5, 24, 2424
};
const tetengo::lattice::node_constraint_element element{ std::move(element_node) };
}
{
const std::string_view element_key{ "mizuho" };
const std::any element_value{ reinterpret_cast<const void*>("MIZUHO") };
const std::vector<int> element_preceding_edge_costs{ 3, 1, 4, 1, 5, 9, 2, 6 };
const tetengo_lattice_node_t element_node{ { std::data(element_key), element_key.length() },
reinterpret_cast<tetengo_lattice_entry_valueHandle_t>(
&element_value),
1,
std::data(element_preceding_edge_costs),
std::size(element_preceding_edge_costs),
5,
24,
2424 };
const auto* const p_constraint_element =
tetengo_lattice_constraintElement_createNodeConstraintElement(&element_node);
BOOST_SCOPE_EXIT(p_constraint_element)
{
tetengo_lattice_constraintElement_destroy(p_constraint_element);
}
BOOST_SCOPE_EXIT_END;
BOOST_TEST(p_constraint_element);
}
{
const auto* const p_constraint_element = tetengo_lattice_constraintElement_createNodeConstraintElement(nullptr);
BOOST_TEST(!p_constraint_element);
}
}
BOOST_AUTO_TEST_CASE(matches)
{
BOOST_TEST_PASSPOINT();
{
const std::any element_node_value{ 42 };
const std::vector<int> element_node_preceding_edge_costs{ 3, 1, 4, 1, 5, 9, 2, 6 };
tetengo::lattice::node element_node{
"mizuho", &element_node_value, 1, &element_node_preceding_edge_costs, 5, 24, 2424
};
const tetengo::lattice::node_constraint_element element{ std::move(element_node) };
{
const std::any value{ 42 };
const std::vector<int> preceding_edge_costs{ 3, 1, 4, 1, 5, 9, 2, 6 };
const tetengo::lattice::node node_{ "mizuho", &value, 1, &preceding_edge_costs, 5, 24, 2424 };
BOOST_TEST(element.matches(node_) == 0);
}
{
const std::any value{ 42 };
const std::vector<int> preceding_edge_costs{ 3, 1, 4, 1, 5, 9, 2, 6 };
const tetengo::lattice::node node_{ "sakura", &value, 1, &preceding_edge_costs, 5, 24, 2424 };
BOOST_TEST(element.matches(node_) < 0);
}
}
{
const std::string_view element_key{ "mizuho" };
const std::any element_value{ reinterpret_cast<const void*>("MIZUHO") };
const std::vector<int> element_preceding_edge_costs{ 3, 1, 4, 1, 5, 9, 2, 6 };
const tetengo_lattice_node_t element_node{ { std::data(element_key), element_key.length() },
reinterpret_cast<tetengo_lattice_entry_valueHandle_t>(
&element_value),
1,
std::data(element_preceding_edge_costs),
std::size(element_preceding_edge_costs),
5,
24,
2424 };
const auto* const p_constraint_element =
tetengo_lattice_constraintElement_createNodeConstraintElement(&element_node);
BOOST_SCOPE_EXIT(p_constraint_element)
{
tetengo_lattice_constraintElement_destroy(p_constraint_element);
}
BOOST_SCOPE_EXIT_END;
BOOST_TEST_REQUIRE(p_constraint_element);
{
const std::string_view key{ "mizuho" };
const std::any value{ reinterpret_cast<const void*>("MIZUHO") };
const std::vector<int> preceding_edge_costs{ 3, 1, 4, 1, 5, 9, 2, 6 };
const tetengo_lattice_node_t node{ { std::data(key), key.length() },
reinterpret_cast<tetengo_lattice_entry_valueHandle_t>(&value),
1,
std::data(preceding_edge_costs),
std::size(preceding_edge_costs),
5,
24,
2424 };
BOOST_TEST(tetengo_lattice_constraintElement_matches(p_constraint_element, &node) == 0);
}
{
const std::string_view key{ "sakura" };
const std::any value{ reinterpret_cast<const void*>("SAKURA") };
const std::vector<int> preceding_edge_costs{ 3, 1, 4, 1, 5, 9, 2, 6 };
const tetengo_lattice_node_t node{ { std::data(key), key.length() },
reinterpret_cast<tetengo_lattice_entry_valueHandle_t>(&value),
1,
std::data(preceding_edge_costs),
std::size(preceding_edge_costs),
5,
24,
2424 };
BOOST_TEST(tetengo_lattice_constraintElement_matches(p_constraint_element, &node) < 0);
}
}
{
const std::string_view key{ "mizuho" };
const std::any value{ reinterpret_cast<const void*>("MIZUHO") };
const std::vector<int> preceding_edge_costs{ 3, 1, 4, 1, 5, 9, 2, 6 };
const tetengo_lattice_node_t node{ { std::data(key), key.length() },
reinterpret_cast<tetengo_lattice_entry_valueHandle_t>(&value),
1,
std::data(preceding_edge_costs),
std::size(preceding_edge_costs),
5,
24,
2424 };
BOOST_TEST(tetengo_lattice_constraintElement_matches(nullptr, &node) < 0);
}
{
const std::string_view element_key{ "mizuho" };
const std::any element_value{ reinterpret_cast<const void*>("MIZUHO") };
const std::vector<int> element_preceding_edge_costs{ 3, 1, 4, 1, 5, 9, 2, 6 };
const tetengo_lattice_node_t element_node{ { std::data(element_key), element_key.length() },
reinterpret_cast<tetengo_lattice_entry_valueHandle_t>(
&element_value),
1,
std::data(element_preceding_edge_costs),
std::size(element_preceding_edge_costs),
5,
24,
2424 };
const auto* const p_constraint_element =
tetengo_lattice_constraintElement_createNodeConstraintElement(&element_node);
BOOST_SCOPE_EXIT(p_constraint_element)
{
tetengo_lattice_constraintElement_destroy(p_constraint_element);
}
BOOST_SCOPE_EXIT_END;
BOOST_TEST_REQUIRE(p_constraint_element);
BOOST_TEST(tetengo_lattice_constraintElement_matches(p_constraint_element, nullptr) < 0);
}
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "f99cd2b0dbed9997080dc232b5258570925e7db1", "size": 9417, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "library/lattice/test/src/test_tetengo.lattice.node_constraint_element.cpp", "max_stars_repo_name": "kaorut/tetengo", "max_stars_repo_head_hexsha": "3360cce3e3f4c92b18154927685986c1fa7b4e8e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "library/lattice/test/src/test_tetengo.lattice.node_constraint_element.cpp", "max_issues_repo_name": "kaorut/tetengo", "max_issues_repo_head_hexsha": "3360cce3e3f4c92b18154927685986c1fa7b4e8e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 153.0, "max_issues_repo_issues_event_min_datetime": "2019-08-11T05:26:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-23T17:24:04.000Z", "max_forks_repo_path": "library/lattice/test/src/test_tetengo.lattice.node_constraint_element.cpp", "max_forks_repo_name": "kaorut/tetengo", "max_forks_repo_head_hexsha": "3360cce3e3f4c92b18154927685986c1fa7b4e8e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.0459183673, "max_line_length": 121, "alphanum_fraction": 0.4927259212, "num_tokens": 1873}
|
from sympy.abc import x
from sympy import factor
result = factor(x**2 + 3*x)
print(result)
result1 = factor(x**2 - 9)
print(result1)
result2 = factor(x**2 - 4 * x + 4)
print(result2)
|
{"hexsha": "3b6b524cf20fbf7b345b7cf455c27dfb4b79f358", "size": 187, "ext": "py", "lang": "Python", "max_stars_repo_path": "fat_polynomial.py", "max_stars_repo_name": "maiconloure/Learning_Python", "max_stars_repo_head_hexsha": "2999508909ace5f8ca0708cdea93b82abaaeafb2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fat_polynomial.py", "max_issues_repo_name": "maiconloure/Learning_Python", "max_issues_repo_head_hexsha": "2999508909ace5f8ca0708cdea93b82abaaeafb2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fat_polynomial.py", "max_forks_repo_name": "maiconloure/Learning_Python", "max_forks_repo_head_hexsha": "2999508909ace5f8ca0708cdea93b82abaaeafb2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.3846153846, "max_line_length": 34, "alphanum_fraction": 0.6684491979, "include": true, "reason": "from sympy", "num_tokens": 63}
|
import numpy as np
from lib import common
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
Vmax = 10
Vmin = -10
N_ATOMS = 51
DELTA_Z = (Vmax - Vmin) / (N_ATOMS - 1)
def save_distr(vec, name):
plt.cla()
p = np.arange(Vmin, Vmax+DELTA_Z, DELTA_Z)
plt.bar(p, vec, width=0.5)
plt.savefig(name + ".png")
if __name__ == "__main__":
np.random.seed(123)
atoms = np.arange(Vmin, Vmax+DELTA_Z, DELTA_Z)
# single peak distribution
src_hist = np.zeros(shape=(1, N_ATOMS), dtype=np.float32)
src_hist[0, N_ATOMS//2+1] = 1.0
save_distr(src_hist[0], "peak-01")
proj_hist = common.distr_projection(src_hist, np.array([2], dtype=np.float32), np.array([False]),
Vmin, Vmax, N_ATOMS, gamma=0.9)
save_distr(proj_hist[0], "peak-02")
# normal distribution
data = np.random.normal(size=1000, scale=3)
hist = np.histogram(data, bins=np.arange(Vmin - DELTA_Z/2, Vmax + DELTA_Z*3/2, DELTA_Z))
save_distr(hist[0], "normal-01")
src_hist = hist[0]
proj_hist = common.distr_projection(np.array([src_hist]), np.array([2], dtype=np.float32), np.array([False]),
Vmin, Vmax, N_ATOMS, gamma=0.9)
save_distr(proj_hist[0], "normal-02")
# normal distribution, but done episode
proj_hist = common.distr_projection(np.array([src_hist]), np.array([2], dtype=np.float32), np.array([True]),
Vmin, Vmax, N_ATOMS, gamma=0.9)
save_distr(proj_hist[0], "normal-03")
# clipping for out-of-range distribution
proj_dist = common.distr_projection(np.array([src_hist]), np.array([10], dtype=np.float32), np.array([False]),
Vmin, Vmax, N_ATOMS, gamma=0.9)
save_distr(proj_dist[0], "normal-04")
proj_dist = common.distr_projection(np.array([src_hist]), np.array([10], dtype=np.float32), np.array([False]),
Vmin, Vmax, N_ATOMS, gamma=0.9)
save_distr(proj_dist[0], "normal-05")
# test both done and not done, unclipped
proj_hist = common.distr_projection(np.array([src_hist, src_hist]), np.array([2, 2], dtype=np.float32),
np.array([False, True]), Vmin, Vmax, N_ATOMS, gamma=0.9)
save_distr(proj_hist[0], "both_not_clip-01-incomplete")
save_distr(proj_hist[1], "both_not_clip-02-complete")
# test both done and not done, clipped right
proj_hist = common.distr_projection(np.array([src_hist, src_hist]), np.array([10, 10], dtype=np.float32),
np.array([False, True]), Vmin, Vmax, N_ATOMS, gamma=0.9)
save_distr(proj_hist[0], "both_clip-right-01-incomplete")
save_distr(proj_hist[1], "both_clip-right-02-complete")
# test both done and not done, clipped left
proj_hist = common.distr_projection(np.array([src_hist, src_hist]), np.array([-10, -10], dtype=np.float32),
np.array([False, True]), Vmin, Vmax, N_ATOMS, gamma=0.9)
save_distr(proj_hist[0], "both_clip-left-01-incomplete")
save_distr(proj_hist[1], "both_clip-left-02-complete")
pass
|
{"hexsha": "514b0cb7d25dabc7bd868fba4803ef18ffc138a5", "size": 3201, "ext": "py", "lang": "Python", "max_stars_repo_path": "samples/rainbow/distr_test.py", "max_stars_repo_name": "ChengUVa/ptan", "max_stars_repo_head_hexsha": "f9b3ef2680ff64fad52e600d73ff2bf42eee310d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 492, "max_stars_repo_stars_event_min_datetime": "2017-10-08T13:01:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T15:44:44.000Z", "max_issues_repo_path": "samples/rainbow/distr_test.py", "max_issues_repo_name": "ChengUVa/ptan", "max_issues_repo_head_hexsha": "f9b3ef2680ff64fad52e600d73ff2bf42eee310d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 41, "max_issues_repo_issues_event_min_datetime": "2018-04-15T13:25:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-18T10:57:28.000Z", "max_forks_repo_path": "samples/rainbow/distr_test.py", "max_forks_repo_name": "ChengUVa/ptan", "max_forks_repo_head_hexsha": "f9b3ef2680ff64fad52e600d73ff2bf42eee310d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 164, "max_forks_repo_forks_event_min_datetime": "2017-11-26T00:14:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T16:21:17.000Z", "avg_line_length": 41.0384615385, "max_line_length": 114, "alphanum_fraction": 0.6173070915, "include": true, "reason": "import numpy", "num_tokens": 909}
|
from builtins import str
from builtins import range
import argparse
import glob
import lsst.afw.image as afwImage
import lsst.afw.geom as afwGeom
import lsst.afw.display.ds9 as ds9
import lsst.afw.math as afwMath
import numpy as np
#generate counts vs. exposure time data for a directory of flat fields
def linearity(directory, infilebase, outfile, amps, x0, y0, boxsize):
#get list of files
files = glob.glob(directory+infilebase)
#write output file header
f = open(directory+outfile, 'w+')
f.write('amp\tx0\ty0\tboxsize\tmedian\texptime\n')
for filename in files:
#get exposure time from header
hdr = afwImage.readMetadata(filename, 1)
exptime = hdr.get('EXPTIME')
for amp in amps:
#define selected region
box = afwGeom.Box2I(afwGeom.Point2I(x0, y0), afwGeom.Extent2I(boxsize, boxsize))
#read in selected region of file
im = afwImage.ExposureF(filename, amp+1, box)
#get median of region of image
box_median = afwMath.makeStatistics(im.getMaskedImage(), afwMath.MEDIAN).getValue()
#write amp, box parameters, region median, and exptime to file
f.write(str(amp) + '\t' + str(x0) + '\t' + str(y0) + '\t' +
str(boxsize) + '\t' + str(box_median) + '\t' + str(exptime) + '\n')
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate linearity data for a set of flat field exposures')
parser.add_argument('-d', '--direc', default='./', type=str,
help="directory of files to work on. Include /")
parser.add_argument('-f', '--infiles', type=str, default='*.fits',
help="file string to search for; default= *.fits")
parser.add_argument('-o', '--outfile', type=str, default='linearity_results.txt',
help="output file name; default=linearity_results.txt")
parser.add_argument('-x', '--x0', type=int, default=200,
help="x0 pixel position for region of interest; default 200")
parser.add_argument('-y', '--y0', type=int, default=900,
help="y0 pixel position for region of interest; default 900")
parser.add_argument('-s', '--size', type=int, default=100, help="box size in pixels; default 100")
parser.add_argument('-a', '--amps', help="amps to be analyzed, separated by a space",
type=int, nargs='+', default=list(range(1, 17)))
args = parser.parse_args()
linearity(args.direc, args.infiles, args.outfile, args.amps, args.x0, args.y0, args.size)
|
{"hexsha": "928dbdbb113a0e8cf92e08da254afee94e61c2b4", "size": 2645, "ext": "py", "lang": "Python", "max_stars_repo_path": "Attic/linearity.py", "max_stars_repo_name": "tguillemLSST/eotest", "max_stars_repo_head_hexsha": "c6f150984fa5dff85b9805028645bf46fc846f11", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-04-21T07:05:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-05T08:37:37.000Z", "max_issues_repo_path": "Attic/linearity.py", "max_issues_repo_name": "tguillemLSST/eotest", "max_issues_repo_head_hexsha": "c6f150984fa5dff85b9805028645bf46fc846f11", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 70, "max_issues_repo_issues_event_min_datetime": "2015-03-26T09:48:53.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-22T16:29:43.000Z", "max_forks_repo_path": "Attic/linearity.py", "max_forks_repo_name": "tguillemLSST/eotest", "max_forks_repo_head_hexsha": "c6f150984fa5dff85b9805028645bf46fc846f11", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-08-15T20:52:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T12:54:07.000Z", "avg_line_length": 42.6612903226, "max_line_length": 109, "alphanum_fraction": 0.6310018904, "include": true, "reason": "import numpy", "num_tokens": 677}
|
/* Copyright (c) 2017, United States Government, as represented by the
* Administrator of the National Aeronautics and Space Administration.
*
* All rights reserved.
*
* The Astrobee platform is licensed under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
// Command line flags
#include <gflags/gflags.h>
#include <gflags/gflags_completions.h>
// Include RPOS
#include <ros/ros.h>
// Listen for transforms
#include <tf2_ros/transform_listener.h>
// FSW includes
#include <ff_util/ff_names.h>
#include <ff_util/ff_flight.h>
#include <ff_util/ff_action.h>
#include <ff_util/ff_serialization.h>
#include <ff_util/config_client.h>
// Primitive actions
#include <ff_msgs/LocalizationAction.h>
#include <ff_msgs/MotionAction.h>
// Eigen C++ includes
#include <Eigen/Dense>
#include <Eigen/Geometry>
// C++ STL inclues
#include <iostream>
#include <sstream>
#include <fstream>
#include <string>
#include <memory>
// Gflags
DEFINE_string(ns, "", "Robot namespace");
DEFINE_bool(reset, false, "Reset localization pipeline");
DEFINE_bool(bias, false, "Estimate bias for the localization pipeline");
DEFINE_string(loc, "", "Localization pipeline (none, ml, ar, hr)");
DEFINE_string(mode, "nominal", "Flight mode");
DEFINE_string(planner, "trapezoidal", "Path planning algorithm");
DEFINE_bool(ff, false, "Plan in face-forward mode");
DEFINE_double(rate, 1.0, "Segment sampling rate");
DEFINE_double(vel, -1.0, "Desired velocity");
DEFINE_double(accel, -1.0, "Desired acceleration");
DEFINE_double(omega, -1.0, "Desired angular velocity");
DEFINE_double(alpha, -1.0, "Desired angular acceleration");
DEFINE_bool(move, false, "Send move command");
DEFINE_bool(stop, false, "Send stop command");
DEFINE_bool(idle, false, "Send idle command");
DEFINE_bool(prep, false, "Send prep command");
DEFINE_bool(novalidate, false, "Don't validate the segment before running");
DEFINE_bool(nocollision, false, "Don't check for collisions during action");
DEFINE_bool(nobootstrap, false, "Don't move to the starting station on execute");
DEFINE_bool(noimmediate, false, "Don't execute immediately");
DEFINE_bool(replan, false, "Enable replanning");
DEFINE_bool(timesync, false, "Enable time synchronization");
DEFINE_string(rec, "", "Plan and record to this file.");
DEFINE_string(exec, "", "Execute a given segment");
DEFINE_string(pos, "", "Desired position in cartesian format 'X Y Z' (meters)");
DEFINE_string(att, "", "Desired attitude in angle-axis format 'angle X Y Z'");
DEFINE_double(wait, 0.0, "Defer move by given amount in seconds (needs -noimmediate)");
DEFINE_double(connect, 30.0, "Action connect timeout");
DEFINE_double(active, 30.0, "Action active timeout");
DEFINE_double(response, 30.0, "Action response timeout");
DEFINE_double(deadline, -1.0, "Action deadline timeout");
// Avoid sending the command multiple times
bool sent_ = false;
// Generic completion function
void MResultCallback(ff_util::FreeFlyerActionState::Enum result_code,
ff_msgs::MotionResultConstPtr const& result) {
switch (result_code) {
// Result will be a null pointer
case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_CONNECT:
std::cout << "Timeout on connecting to action" << std::endl;
break;
case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_ACTIVE:
std::cout << "Timeout on action going active" << std::endl;
break;
case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_RESPONSE:
std::cout << "Timeout on receiving a response" << std::endl;
break;
case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_DEADLINE:
std::cout << "Timeout on result deadline" << std::endl;
break;
// Result expected
case ff_util::FreeFlyerActionState::Enum::SUCCESS:
if (!FLAGS_rec.empty()) {
ff_msgs::MotionGoal msg;
msg.command = ff_msgs::MotionGoal::EXEC;
msg.flight_mode = FLAGS_mode;
msg.segment = result->segment;
if (!ff_util::Serialization::WriteFile(FLAGS_rec, msg))
std::cout << std::endl << "Segment saved to " << FLAGS_rec;
else
std::cout << std::endl << "Segment not saved";
}
case ff_util::FreeFlyerActionState::Enum::PREEMPTED:
case ff_util::FreeFlyerActionState::Enum::ABORTED: {
std::cout << std::endl << "Result: " << result->fsm_result
<< " (response: " << result->response << ")" << std::endl;
}
default:
break;
}
ros::shutdown();
}
// Mobility feedback
void MFeedbackCallback(ff_msgs::MotionFeedbackConstPtr const& feedback) {
std::cout << '\r' << std::flush;
std::cout << std::fixed << std::setprecision(2)
<< "POS: " << 1000.00 * feedback->progress.error_position << " mm "
<< "ATT: " << 57.2958 * feedback->progress.error_attitude << " deg "
<< "VEL: " << 1000.00 * feedback->progress.error_velocity << " mm/s "
<< "OMEGA: " << 57.2958 * feedback->progress.error_omega << " deg/s "
<< "[" << feedback->state.fsm_state << "] ";
}
// Switch feedback
void SFeedbackCallback(ff_msgs::LocalizationFeedbackConstPtr const& feedback) {}
// Switch result
void SResultCallback(ff_util::FreeFlyerActionState::Enum result_code,
ff_msgs::LocalizationResultConstPtr const& result,
tf2_ros::Buffer * tf_buffer_,
ff_util::FreeFlyerActionClient<ff_msgs::MotionAction> * action) {
// Setup a new mobility goal
ff_msgs::MotionGoal goal;
goal.flight_mode = FLAGS_mode;
// Rest of the goal depends on result
switch (result_code) {
case ff_util::FreeFlyerActionState::SUCCESS: {
// Idle command
if (FLAGS_idle) {
goal.command = ff_msgs::MotionGoal::IDLE;
// Stop command
} else if (FLAGS_stop) {
goal.command = ff_msgs::MotionGoal::STOP;
// Stop command
} else if (FLAGS_prep) {
goal.command = ff_msgs::MotionGoal::PREP;
// Obtain the current state
} else if (FLAGS_move) {
goal.command = ff_msgs::MotionGoal::MOVE;
geometry_msgs::PoseStamped state;
try {
std::string ns = FLAGS_ns;
geometry_msgs::TransformStamped tfs = tf_buffer_->lookupTransform(
std::string(FRAME_NAME_WORLD),
(ns.empty() ? "body" : ns + "/" + std::string(FRAME_NAME_BODY)),
ros::Time(0));
state.header = tfs.header;
state.pose.position.x = tfs.transform.translation.x;
state.pose.position.y = tfs.transform.translation.y;
state.pose.position.z = tfs.transform.translation.z;
state.pose.orientation = tfs.transform.rotation;
} catch (tf2::TransformException &ex) {
std::cout << "Could not query the pose of the robot: "
<< ex.what() << std::endl;
ros::shutdown();
}
// Manipulate timestamp to cause deferral
state.header.stamp += ros::Duration(FLAGS_wait);
// Parse and modify the position
std::string str_p = FLAGS_pos;
if (!str_p.empty()) {
std::istringstream iss_p(str_p);
std::vector<double> vec_p {
std::istream_iterator<double>(iss_p),
std::istream_iterator<double>()
};
if (vec_p.size() > 0) state.pose.position.x = vec_p[0];
if (vec_p.size() > 1) state.pose.position.y = vec_p[1];
if (vec_p.size() > 2) state.pose.position.z = vec_p[2];
}
// Parse the attitude - roll, pitch then yaw
std::string str_a = FLAGS_att;
if (!str_a.empty()) {
// Parse double vector from string
std::istringstream iss_a(str_a);
std::vector<double> vec_a {
std::istream_iterator<double>(iss_a),
std::istream_iterator<double>()
};
// Convert the axis angle input to a quaternion
Eigen::AngleAxisd aa(0.0, Eigen::Vector3d(0.0, 0.0, 0.0));
if (vec_a.size() == 1) {
Eigen::Quaterniond q0(state.pose.orientation.w, state.pose.orientation.x,
state.pose.orientation.y, state.pose.orientation.z);
Eigen::Vector3d x(1, 0, 0);
Eigen::Vector3d p = q0.matrix()*x;
p(2) = 0;
p.normalize();
double alpha = vec_a[0] - std::atan2(p(1), p(0));
Eigen::Quaterniond qz(std::cos(0.5*alpha), 0, 0, std::sin(0.5*alpha));
Eigen::Quaterniond qd = qz*q0;
Eigen::Vector3d p_check = qd.matrix()*x;
p_check(2) = 0;
Eigen::Vector3d p_check2(std::cos(alpha), std::sin(alpha), 0);
// End check
state.pose.orientation.x = qd.x();
state.pose.orientation.y = qd.y();
state.pose.orientation.z = qd.z();
state.pose.orientation.w = qd.w();
} else if (vec_a.size() == 4) {
aa.angle() = vec_a[0];
aa.axis().x() = vec_a[1];
aa.axis().y() = vec_a[2];
aa.axis().z() = vec_a[3];
Eigen::Quaterniond q(aa);
state.pose.orientation.x = q.x();
state.pose.orientation.y = q.y();
state.pose.orientation.z = q.z();
state.pose.orientation.w = q.w();
} else if (vec_a.size() > 0) {
std::cout << "Invalid axis-angle format passed to -att. "
<< "Four elements required. Aborting" << std::endl;
break;
}
}
// Package up and send the move goal
goal.states.push_back(state);
// Execute command
} else if (!FLAGS_exec.empty()) {
if (!ff_util::Serialization::ReadFile(FLAGS_exec, goal)) {
std::cout << "Segment not loaded from file " << FLAGS_exec << std::endl;
break;
}
// We don't actually have a motion goal
} else {
std::cout << "Result: SUCCESS" << std::endl;
std::cout << "Message: " << result->fsm_result << std::endl;
break;
}
// Try and send the goal
if (!action->SendGoal(goal))
std::cout << "Mobility client did not accept goal" << std::endl;
else
return;
}
case ff_util::FreeFlyerActionState::PREEMPTED:
std::cout << "Error: PREEMPTED" << std::endl;
break;
case ff_util::FreeFlyerActionState::ABORTED:
std::cout << "Error: ABORTED" << std::endl;
std::cout << "Reason: " << result->fsm_result << std::endl;
break;
case ff_util::FreeFlyerActionState::TIMEOUT_ON_CONNECT:
std::cout << "Error: TIMEOUT_ON_CONNECT" << std::endl;
break;
case ff_util::FreeFlyerActionState::TIMEOUT_ON_ACTIVE:
std::cout << "Error: TIMEOUT_ON_ACTIVE" << std::endl;
break;
case ff_util::FreeFlyerActionState::TIMEOUT_ON_RESPONSE:
std::cout << "Error: TIMEOUT_ON_RESPONSE" << std::endl;
break;
case ff_util::FreeFlyerActionState::TIMEOUT_ON_DEADLINE:
std::cout << "Error: TIMEOUT_ON_DEADLINE" << std::endl;
break;
default:
std::cout << "Error: UNKNOWN" << std::endl;
break;
}
ros::shutdown();
}
// Ensure all clients are connected
void ConnectedCallback(tf2_ros::Buffer * tf_buffer_,
ff_util::FreeFlyerActionClient<ff_msgs::LocalizationAction> * client_s_,
ff_util::FreeFlyerActionClient<ff_msgs::MotionAction> * client_t_) {
// Check to see if connected
if (!client_s_->IsConnected()) return; // Switch
if (!client_t_->IsConnected()) return; // Mobility
if (sent_) return; // Avoid calling twice
else
sent_ = true;
// Package up and send the move goal
if (!FLAGS_loc.empty() || FLAGS_bias || FLAGS_reset) {
ff_msgs::LocalizationGoal goal;
if (!FLAGS_loc.empty()) {
goal.command = ff_msgs::LocalizationGoal::COMMAND_SWITCH_PIPELINE;
goal.pipeline = FLAGS_loc;
}
if (FLAGS_reset)
goal.command = ff_msgs::LocalizationGoal::COMMAND_RESET_FILTER;
if (FLAGS_bias)
goal.command = ff_msgs::LocalizationGoal::COMMAND_ESTIMATE_BIAS;
if (!client_s_->SendGoal(goal))
std::cout << "Localization client did not accept goal" << std::endl;
return;
}
// Fake a switch result to trigger the releop action
SResultCallback(ff_util::FreeFlyerActionState::SUCCESS, nullptr,
tf_buffer_, client_t_);
}
// Main entry point for application
int main(int argc, char *argv[]) {
// Initialize a ros node
ros::init(argc, argv, "teleop", ros::init_options::AnonymousName);
// Gather some data from the command
google::SetUsageMessage("Usage: rosrun mobility teleop <opts>");
google::SetVersionString("1.0.0");
google::ParseCommandLineFlags(&argc, &argv, true);
// Some simple checks
uint8_t mode1 = 0, mode2 = 0;
if (!FLAGS_exec.empty()) mode1++;
if (FLAGS_idle) mode1++;
if (FLAGS_stop) mode1++;
if (FLAGS_move) mode1++;
if (FLAGS_prep) mode1++;
if (!FLAGS_loc.empty()) mode2++;
if (FLAGS_bias) mode2++;
if (FLAGS_reset) mode2++;
// Check we have specified one of the required switches
if (mode1 == 0 && mode2 == 0) {
std::cout << "You must specify at least one of "
<< "-bias, -reset, -loc, -move, -stop, -idle, -exec <segment>" << std::endl;
return 1;
}
if (mode1 > 1) {
std::cout << "You can only specify one of "
<< "-move, -stop, -idle, or -exec <segment>" << std::endl;
return 1;
}
if (mode2 > 1) {
std::cout << "You can only specify one of "
<< "-loc -bias or -reset" << std::endl;
return 1;
}
if (FLAGS_move && FLAGS_pos.empty() && FLAGS_att.empty()) {
std::cout << "The move flag must also have a pos / att flag" << std::endl;
return 1;
}
if (FLAGS_connect <= 0.0) {
std::cout << "Your connect timeout must be positive" << std::endl;
return 1;
}
if (FLAGS_active <= 0.0) {
std::cout << "Your active timeout must be positive" << std::endl;
return 1;
}
if (FLAGS_response <= 0.0) {
std::cout << "Your response timeout must be positive" << std::endl;
return 1;
}
// Action clients
ff_util::FreeFlyerActionClient<ff_msgs::LocalizationAction> client_s_;
ff_util::FreeFlyerActionClient<ff_msgs::MotionAction> client_t_;
// Create a node handle
ros::NodeHandle nh(std::string("/") + FLAGS_ns);
// TF2 Subscriber
tf2_ros::Buffer tf_buffer_;
tf2_ros::TransformListener tfListener(tf_buffer_);
// Setup SWITCH action
client_s_.SetConnectedTimeout(FLAGS_connect);
client_s_.SetActiveTimeout(FLAGS_active);
client_s_.SetResponseTimeout(FLAGS_response);
if (FLAGS_deadline > 0)
client_s_.SetDeadlineTimeout(FLAGS_deadline);
client_s_.SetFeedbackCallback(std::bind(
SFeedbackCallback, std::placeholders::_1));
client_s_.SetResultCallback(std::bind(
SResultCallback, std::placeholders::_1, std::placeholders::_2,
&tf_buffer_, &client_t_));
client_s_.SetConnectedCallback(std::bind(ConnectedCallback,
&tf_buffer_, &client_s_, &client_t_));
client_s_.Create(&nh, ACTION_LOCALIZATION_MANAGER_LOCALIZATION);
// Setup MOBILITY action
client_t_.SetConnectedTimeout(FLAGS_connect);
client_t_.SetActiveTimeout(FLAGS_active);
client_t_.SetResponseTimeout(FLAGS_response);
if (FLAGS_deadline > 0)
client_t_.SetDeadlineTimeout(FLAGS_deadline);
client_t_.SetFeedbackCallback(std::bind(
MFeedbackCallback, std::placeholders::_1));
client_t_.SetResultCallback(std::bind(
MResultCallback, std::placeholders::_1, std::placeholders::_2));
client_t_.SetConnectedCallback(std::bind(ConnectedCallback,
&tf_buffer_, &client_s_, &client_t_));
client_t_.Create(&nh, ACTION_MOBILITY_MOTION);
// For moves and executes check that we are configured correctly
if (FLAGS_move || !FLAGS_exec.empty()) {
ff_util::ConfigClient cfg(&nh, NODE_CHOREOGRAPHER);
if (FLAGS_vel > 0) cfg.Set<double>("desired_vel", FLAGS_vel);
if (FLAGS_accel > 0) cfg.Set<double>("desired_accel", FLAGS_accel);
if (FLAGS_omega > 0) cfg.Set<double>("desired_omega", FLAGS_omega);
if (FLAGS_alpha > 0) cfg.Set<double>("desired_alpha", FLAGS_alpha);
if (FLAGS_rate > 0) cfg.Set<double>("desired_rate", FLAGS_rate);
cfg.Set<bool>("enable_collision_checking", !FLAGS_nocollision);
cfg.Set<bool>("enable_validation", !FLAGS_novalidate);
cfg.Set<bool>("enable_bootstrapping", !FLAGS_nobootstrap);
cfg.Set<bool>("enable_immediate", !FLAGS_noimmediate);
cfg.Set<bool>("enable_timesync", FLAGS_timesync);
cfg.Set<bool>("enable_replanning", FLAGS_replan);
cfg.Set<bool>("enable_faceforward", FLAGS_ff);
if (!FLAGS_planner.empty())
cfg.Set<std::string>("planner", FLAGS_planner);
if (!cfg.Reconfigure()) {
std::cout << "Could not reconfigure the choreographer node " << std::endl;
ros::shutdown();
}
}
// Synchronous mode
ros::spin();
// Finish commandline flags
google::ShutDownCommandLineFlags();
// Make for great success
return 0;
}
|
{"hexsha": "b4f3ff4cd54d2c1a5b96dbbea7d93017c42b26ad", "size": 16915, "ext": "cc", "lang": "C++", "max_stars_repo_path": "mobility/mobility/tools/teleop.cc", "max_stars_repo_name": "algprasad/astrobee", "max_stars_repo_head_hexsha": "a5697d71e0c86598b3a762cadf94e8da826171c1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-05-07T12:01:41.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-07T12:01:41.000Z", "max_issues_repo_path": "mobility/mobility/tools/teleop.cc", "max_issues_repo_name": "Giovan/astrobee", "max_issues_repo_head_hexsha": "effb7608a7beff115b58a1c6fa6aa58313f159a0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mobility/mobility/tools/teleop.cc", "max_forks_repo_name": "Giovan/astrobee", "max_forks_repo_head_hexsha": "effb7608a7beff115b58a1c6fa6aa58313f159a0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3372093023, "max_line_length": 87, "alphanum_fraction": 0.6649719184, "num_tokens": 4461}
|
[STATEMENT]
lemma fls_regpart_of_int [simp]:
"fls_regpart (of_int i) = (of_int i :: 'a::ring_1 fps)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fls_regpart (of_int i) = of_int i
[PROOF STEP]
by (simp add: fls_of_int fps_of_int)
|
{"llama_tokens": 115, "file": null, "length": 1}
|
""" src.py.spad
"""
import numpy as np
def spad_timg(rates, eps=1e-6):
return 1.0 / np.clip(rates, eps, None)
def spad_logtimg(rates, eps=1e-6):
# TODO: see if this name is confusing?
# mean(logtimgs) or log(meantimgs)?
# and which one do we need?
return np.log(spad_timg(rates, eps=eps))
def invert_spad_timg(timg, tmin=1e-3, tmax=1e6):
return 1.0 / np.clip(timg, tmin, tmax)
def invert_spad_logtimg(logtimg, tmin=1e-3, tmax=1e6):
return np.exp(-np.clip(logtimg, np.log(tmin), np.log(tmax)))
def sample_spad_timestamps(rates, N=1, tmin=0, tmax=np.inf, eps=1e-6,
avg_fn='AM'):
scale = 1.0 / np.maximum(rates, eps)
H, W = scale.shape
scale = scale.reshape((1, H, W))
# --------------------------------------------------------------------------
# first we just simulate the arrival times
# we let tmin be the minimum time period the sensor can sensibly (haha)
# measure (basically = 1/2 of the clock period in some sense)
times = tmin + np.random.exponential(scale=scale, size=(N, H, W))
# --------------------------------------------------------------------------
# there are 2 ways to take the readings:
# 1. take N independent readings, waiting at most tmax for each one, and
# average them later
# 2. start recording once, and wait for N photons to show up. the reading
# is the mean (arithmetic or geometric) of the inter-photon intervals.
#
# i think the paper assumes approach 2, so i've kept that one active.
#
# NOTE: tmax has a different meaning (and scale) in the two approaches
# --------------------------------------------------------------------------
# approach 1
# --------------------------------------------------------------------------
# T = np.clip(times, None, tmax).mean(axis=0)
# --------------------------------------------------------------------------
# approach 2
# --------------------------------------------------------------------------
# count the photons we actually got
total_times = np.cumsum(times, axis=0)
num_photons = (total_times <= tmax).sum(axis=0) # is a 2D array
with np.errstate(divide='ignore', invalid='ignore'):
if avg_fn == 'AM':
if tmax is not None:
times[total_times > tmax] = 0 # ignored in sum
T = np.sum(times, axis=0) / num_photons
elif avg_fn == 'GM':
if tmax is not None:
times[total_times > tmax] = 1 # to get it ignored in sum(log)
T = np.exp(np.sum(np.log(times), axis=0) / num_photons)
else:
raise NotImplementedError
T[num_photons == 0] = np.inf
return T
|
{"hexsha": "54085fbc80744b80634b65f04309142a8e72c6fd", "size": 2741, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/py/spad.py", "max_stars_repo_name": "shantanu-gupta/spad-timg-denoise", "max_stars_repo_head_hexsha": "3c9e5ae004dc3175ae796499ac7827e9bc2b4573", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/py/spad.py", "max_issues_repo_name": "shantanu-gupta/spad-timg-denoise", "max_issues_repo_head_hexsha": "3c9e5ae004dc3175ae796499ac7827e9bc2b4573", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/py/spad.py", "max_forks_repo_name": "shantanu-gupta/spad-timg-denoise", "max_forks_repo_head_hexsha": "3c9e5ae004dc3175ae796499ac7827e9bc2b4573", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.828125, "max_line_length": 80, "alphanum_fraction": 0.5089383437, "include": true, "reason": "import numpy", "num_tokens": 709}
|
from scipy.stats import ttest_ind, ttest_1samp, ttest_rel, mannwhitneyu, norm
from collections import OrderedDict
from numpy.random import randint
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator, MultipleLocator, MaxNLocator, FixedLocator, AutoLocator, FormatStrFormatter
from decimal import Decimal
import matplotlib.pyplot as plt
from matplotlib import rc, rcParams, rcdefaults
import sys
import seaborn.apionly as sns
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# This imports the custom functions used.
# These have been placed in separate .py files for reduced code clutter.
from .mpl_tools import rotateTicks, normalizeSwarmY, normalizeContrastY, offsetSwarmX, resetSwarmX, getSwarmSpan
from .mpl_tools import align_yaxis, halfviolin, drawback_y, drawback_x
from .bootstrap_tools import ci, bootstrap, bootstrap_contrast, bootstrap_indexes, jackknife_indexes, getstatarray, bca
from .plot_bootstrap_tools import plotbootstrap, plotbootstrap_hubspoke, swarmsummary
def contrastplot_test(
data, x, y, idx=None,
alpha=0.75,
axis_title_size=None,
barWidth=5,
contrastShareY=True,
contrastEffectSizeLineStyle='solid',
contrastEffectSizeLineColor='black',
contrastYlim=None,
contrastZeroLineStyle='solid',
contrastZeroLineColor='black',
effectSizeYLabel="Effect Size",
figsize=None,
floatContrast=True,
floatSwarmSpacer=0.2,
heightRatio=(1, 1),
idcol=None,
lineWidth=2,
legend=True,
legendFontSize=14,
legendFontProps={},
paired=False,
pal=None,
rawMarkerSize=8,
rawMarkerType='o',
reps=3000,
showGroupCount=True,
show95CI=False,
showAllYAxes=False,
showRawData=True,
smoothboot=False,
statfunction=None,
summaryBar=False,
summaryBarColor='grey',
summaryBarAlpha=0.25,
summaryColour='black',
summaryLine=True,
summaryLineStyle='solid',
summaryLineWidth=0.25,
summaryMarkerSize=10,
summaryMarkerType='o',
swarmShareY=True,
swarmYlim=None,
tickAngle=45,
tickAlignment='right',
violinOffset=0.375,
violinWidth=0.2,
violinColor='k',
xticksize=None,
yticksize=None,
**kwargs):
'''Takes a pandas dataframe and produces a contrast plot:
either a Cummings hub-and-spoke plot or a Gardner-Altman contrast plot.
-----------------------------------------------------------------------
Description of flags upcoming.'''
# Check that `data` is a pandas dataframe
if 'DataFrame' not in str(type(data)):
raise TypeError("The object passed to the command is not not a pandas DataFrame.\
Please convert it to a pandas DataFrame.")
# Get and set levels of data[x]
if idx is None:
widthratio=[1]
allgrps=np.sort(data[x].unique())
if paired:
# If `idx` is not specified, just take the FIRST TWO levels alphabetically.
tuple_in=tuple(allgrps[0:2],)
else:
# No idx is given, so all groups are compared to the first one in the DataFrame column.
tuple_in=(tuple(allgrps), )
if len(allgrps)>2:
floatContrast=False
else:
if all(isinstance(element, str) for element in idx):
# if idx is supplied but not a multiplot (ie single list or tuple)
tuple_in=(idx, )
widthratio=[1]
if len(idx)>2:
floatContrast=False
elif all(isinstance(element, tuple) for element in idx):
# if idx is supplied, and it is a list/tuple of tuples or lists, we have a multiplot!
tuple_in=idx
if ( any(len(element)>2 for element in tuple_in) ):
# if any of the tuples in idx has more than 2 groups, we turn set floatContrast as False.
floatContrast=False
# Make sure the widthratio of the seperate multiplot corresponds to how
# many groups there are in each one.
widthratio=[]
for i in tuple_in:
widthratio.append(len(i))
else:
raise TypeError("The object passed to `idx` consists of a mixture of single strings and tuples. \
Please make sure that `idx` is either a tuple of column names, or a tuple of tuples for plotting.")
# initialise statfunction
if statfunction == None:
statfunction=np.mean
# Create list to collect all the contrast DataFrames generated.
contrastList=list()
contrastListNames=list()
# # Calculate the bootstraps according to idx.
# for ix, current_tuple in enumerate(tuple_in):
# bscontrast=list()
# for i in range (1, len(current_tuple)):
# # Note that you start from one. No need to do auto-contrast!
# tempbs=bootstrap_contrast(
# data=data,
# x=x,
# y=y,
# idx=[current_tuple[0], current_tuple[i]],
# statfunction=statfunction,
# smoothboot=smoothboot,
# reps=reps)
# bscontrast.append(tempbs)
# contrastList.append(tempbs)
# contrastListNames.append(current_tuple[i]+' vs. '+current_tuple[0])
# Setting color palette for plotting.
if pal is None:
if 'hue' in kwargs:
colorCol=kwargs['hue']
colGrps=data[colorCol].unique()
nColors=len(colGrps)
else:
colorCol=x
colGrps=data[x].unique()
nColors=len([element for tupl in tuple_in for element in tupl])
plotPal=dict( zip( colGrps, sns.color_palette(n_colors=nColors) ) )
else:
plotPal=pal
# Ensure summaryLine and summaryBar are not displayed together.
if summaryLine is True and summaryBar is True:
summaryBar=True
summaryLine=False
# Turn off summary line if floatContrast is true
if floatContrast:
summaryLine=False
if swarmYlim is None:
# get range of _selected groups_.
u = list()
for t in idx:
for i in np.unique(t):
u.append(i)
u = np.unique(u)
tempdat=data[data[x].isin(u)]
swarm_ylim=np.array([np.min(tempdat[y]), np.max(tempdat[y])])
else:
swarm_ylim=np.array([swarmYlim[0],swarmYlim[1]])
if contrastYlim is not None:
contrastYlim=np.array([contrastYlim[0],contrastYlim[1]])
barWidth=barWidth/1000 # Not sure why have to reduce the barwidth by this much!
if showRawData is True:
maxSwarmSpan=0.25
else:
maxSwarmSpan=barWidth
# Expand the ylim in both directions.
## Find half of the range of swarm_ylim.
swarmrange=swarm_ylim[1] -swarm_ylim[0]
pad=0.1*swarmrange
x2=np.array([swarm_ylim[0]-pad, swarm_ylim[1]+pad])
swarm_ylim=x2
# plot params
if axis_title_size is None:
axis_title_size=25
if yticksize is None:
yticksize=18
if xticksize is None:
xticksize=18
# Set clean style
sns.set(style='ticks')
axisTitleParams={'labelsize' : axis_title_size}
xtickParams={'labelsize' : xticksize}
ytickParams={'labelsize' : yticksize}
svgParams={'fonttype' : 'none'}
rc('axes', **axisTitleParams)
rc('xtick', **xtickParams)
rc('ytick', **ytickParams)
rc('svg', **svgParams)
if figsize is None:
if len(tuple_in)>2:
figsize=(12,(12/np.sqrt(2)))
else:
figsize=(8,(8/np.sqrt(2)))
# Initialise figure, taking into account desired figsize.
fig=plt.figure(figsize=figsize)
# Initialise GridSpec based on `tuple_in` shape.
gsMain=gridspec.GridSpec(
1, np.shape(tuple_in)[0],
# 1 row; columns based on number of tuples in tuple.
width_ratios=widthratio,
wspace=0 )
for gsIdx, current_tuple in enumerate(tuple_in):
#### FOR EACH TUPLE IN IDX
plotdat=data[data[x].isin(current_tuple)]
plotdat[x]=plotdat[x].astype("category")
plotdat[x].cat.set_categories(
current_tuple,
ordered=True,
inplace=True)
plotdat.sort_values(by=[x])
# Drop all nans.
plotdat=plotdat.dropna()
# Calculate summaries.
summaries=plotdat.groupby([x],sort=True)[y].apply(statfunction)
if floatContrast is True:
# Use fig.add_subplot instead of plt.Subplot
ax_raw=fig.add_subplot(gsMain[gsIdx],
frame_on=False)
ax_contrast=ax_raw.twinx()
else:
# Create subGridSpec with 2 rows and 1 column.
subGridSpec=gridspec.GridSpecFromSubplotSpec(2, 1,
subplot_spec=gsMain[gsIdx],
wspace=0)
# Use plt.Subplot instead of fig.add_subplot
ax_raw=plt.Subplot(fig,
subGridSpec[0, 0],
frame_on=False)
ax_contrast=plt.Subplot(fig,
subGridSpec[1, 0],
sharex=ax_raw,
frame_on=False)
# Calculate the boostrapped contrast
bscontrast=list()
for i in range (1, len(current_tuple)):
# Note that you start from one. No need to do auto-contrast!
tempbs=bootstrap_contrast(
data=data,
x=x,
y=y,
idx=[current_tuple[0], current_tuple[i]],
statfunction=statfunction,
smoothboot=smoothboot,
reps=reps)
bscontrast.append(tempbs)
contrastList.append(tempbs)
contrastListNames.append(current_tuple[i]+' vs. '+current_tuple[0])
#### PLOT RAW DATA.
if showRawData is True:
# Seaborn swarmplot doc says to set custom ylims first.
ax_raw.set_ylim(swarm_ylim)
sw=sns.swarmplot(
data=plotdat,
x=x, y=y,
order=current_tuple,
ax=ax_raw,
alpha=alpha,
palette=plotPal,
size=rawMarkerSize,
marker=rawMarkerType,
**kwargs)
if summaryBar is True:
bar_raw=sns.barplot(
x=summaries.index.tolist(),
y=summaries.values,
facecolor=summaryBarColor,
ax=ax_raw,
alpha=summaryBarAlpha)
if floatContrast:
# Get horizontal offset values.
maxXBefore=max(sw.collections[0].get_offsets().T[0])
minXAfter=min(sw.collections[1].get_offsets().T[0])
xposAfter=maxXBefore+floatSwarmSpacer
xAfterShift=minXAfter-xposAfter
# shift the swarmplots
offsetSwarmX(sw.collections[1], -xAfterShift)
## get swarm with largest span, set as max width of each barplot.
for i, bar in enumerate(bar_raw.patches):
x_width=bar.get_x()
width=bar.get_width()
centre=x_width + (width/2.)
if i == 0:
bar.set_x(centre-maxSwarmSpan/2.)
else:
bar.set_x(centre-xAfterShift-maxSwarmSpan/2.)
bar.set_width(maxSwarmSpan)
## Set the ticks locations for ax_raw.
ax_raw.xaxis.set_ticks((0, xposAfter))
firstTick=ax_raw.xaxis.get_ticklabels()[0].get_text()
secondTick=ax_raw.xaxis.get_ticklabels()[1].get_text()
ax_raw.set_xticklabels([firstTick,#+' n='+count[firstTick],
secondTick],#+' n='+count[secondTick]],
rotation=tickAngle,
horizontalalignment=tickAlignment)
if summaryLine is True:
for i, m in enumerate(summaries):
ax_raw.plot(
(i -summaryLineWidth,
i + summaryLineWidth), # x-coordinates
(m, m),
color=summaryColour,
linestyle=summaryLineStyle)
if show95CI is True:
sns.barplot(
data=plotdat,
x=x, y=y,
ax=ax_raw,
alpha=0, ci=95)
ax_raw.set_xlabel("")
if floatContrast is False:
fig.add_subplot(ax_raw)
#### PLOT CONTRAST DATA.
if len(current_tuple)==2:
# Plot the CIs on the contrast axes.
plotbootstrap(sw.collections[1],
bslist=tempbs,
ax=ax_contrast,
violinWidth=violinWidth,
violinOffset=violinOffset,
markersize=summaryMarkerSize,
marker=summaryMarkerType,
offset=floatContrast,
color=violinColor,
linewidth=1)
if floatContrast:
# Set reference lines
## First get leftmost limit of left reference group
xtemp, _=np.array(sw.collections[0].get_offsets()).T
leftxlim=xtemp.min()
## Then get leftmost limit of right test group
xtemp, _=np.array(sw.collections[1].get_offsets()).T
rightxlim=xtemp.min()
## zero line
ax_contrast.hlines(0, # y-coordinates
leftxlim, 3.5, # x-coordinates, start and end.
linestyle=contrastZeroLineStyle,
linewidth=0.75,
color=contrastZeroLineColor)
## effect size line
ax_contrast.hlines(tempbs['summary'],
rightxlim, 3.5, # x-coordinates, start and end.
linestyle=contrastEffectSizeLineStyle,
linewidth=0.75,
color=contrastEffectSizeLineColor)
## If the effect size is positive, shift the right axis up.
if float(tempbs['summary'])>0:
rightmin=ax_raw.get_ylim()[0] -float(tempbs['summary'])
rightmax=ax_raw.get_ylim()[1] -float(tempbs['summary'])
## If the effect size is negative, shift the right axis down.
elif float(tempbs['summary'])<0:
rightmin=ax_raw.get_ylim()[0] + float(tempbs['summary'])
rightmax=ax_raw.get_ylim()[1] + float(tempbs['summary'])
ax_contrast.set_ylim(rightmin, rightmax)
if gsIdx>0:
ax_contrast.set_ylabel('')
align_yaxis(ax_raw, tempbs['statistic_ref'], ax_contrast, 0.)
else:
# Set bottom axes ybounds
if contrastYlim is not None:
ax_contrast.set_ylim(contrastYlim)
# Set xlims so everything is properly visible!
swarm_xbounds=ax_raw.get_xbound()
ax_contrast.set_xbound(swarm_xbounds[0] -(summaryLineWidth * 1.1),
swarm_xbounds[1] + (summaryLineWidth * 1.1))
else:
# Plot the CIs on the bottom axes.
plotbootstrap_hubspoke(
bslist=bscontrast,
ax=ax_contrast,
violinWidth=violinWidth,
violinOffset=violinOffset,
markersize=summaryMarkerSize,
marker=summaryMarkerType,
linewidth=lineWidth)
if floatContrast is False:
fig.add_subplot(ax_contrast)
if gsIdx>0:
ax_raw.set_ylabel('')
ax_contrast.set_ylabel('')
# Turn contrastList into a pandas DataFrame,
contrastList=pd.DataFrame(contrastList).T
contrastList.columns=contrastListNames
########
axesCount=len(fig.get_axes())
## Loop thru SWARM axes for aesthetic touchups.
for i in range(0, axesCount, 2):
axx=fig.axes[i]
if i!=axesCount-2 and 'hue' in kwargs:
# If this is not the final swarmplot, remove the hue legend.
axx.legend().set_visible(False)
if floatContrast is False:
axx.xaxis.set_visible(False)
sns.despine(ax=axx, trim=True, bottom=False, left=False)
else:
sns.despine(ax=axx, trim=True, bottom=True, left=True)
if showAllYAxes is False:
if i in range(2, axesCount):
axx.yaxis.set_visible(showAllYAxes)
else:
# Draw back the lines for the relevant y-axes.
# Not entirely sure why I have to do this.
drawback_y(axx)
# Add zero reference line for swarmplots with bars.
if summaryBar is True:
axx.add_artist(Line2D(
(axx.xaxis.get_view_interval()[0],
axx.xaxis.get_view_interval()[1]),
(0,0),
color='black', linewidth=0.75
)
)
# I don't know why the swarm axes controls the contrast axes ticks....
if showGroupCount:
count=data.groupby(x).count()[y]
newticks=list()
for ix, t in enumerate(axx.xaxis.get_ticklabels()):
t_text=t.get_text()
nt=t_text+' n='+str(count[t_text])
newticks.append(nt)
axx.xaxis.set_ticklabels(newticks)
if legend is False:
axx.legend().set_visible(False)
else:
if i==axesCount-2: # the last (rightmost) swarm axes.
axx.legend(loc='top right',
bbox_to_anchor=(1.1,1.0),
fontsize=legendFontSize,
**legendFontProps)
## Loop thru the CONTRAST axes and perform aesthetic touch-ups.
## Get the y-limits:
for j,i in enumerate(range(1, axesCount, 2)):
axx=fig.get_axes()[i]
if floatContrast is False:
xleft, xright=axx.xaxis.get_view_interval()
# Draw zero reference line.
axx.hlines(y=0,
xmin=xleft-1,
xmax=xright+1,
linestyle=contrastZeroLineStyle,
linewidth=0.75,
color=contrastZeroLineColor)
# reset view interval.
axx.set_xlim(xleft, xright)
# # Draw back x-axis lines connecting ticks.
# drawback_x(axx)
if showAllYAxes is False:
if i in range(2, axesCount):
axx.yaxis.set_visible(False)
else:
# Draw back the lines for the relevant y-axes.
# Not entirely sure why I have to do this.
drawback_y(axx)
sns.despine(ax=axx,
top=True, right=True,
left=False, bottom=False,
trim=True)
# Rotate tick labels.
rotateTicks(axx,tickAngle,tickAlignment)
else:
# Re-draw the floating axis to the correct limits.
lower=np.min(contrastList.ix['diffarray',j])
upper=np.max(contrastList.ix['diffarray',j])
meandiff=contrastList.ix['summary', j]
## Make sure we have zero in the limits.
if lower>0:
lower=0.
if upper<0:
upper=0.
## Get the tick interval from the left y-axis.
leftticks=fig.get_axes()[i-1].get_yticks()
tickstep=leftticks[1] -leftticks[0]
## First re-draw of axis with new tick interval
axx.yaxis.set_major_locator(MultipleLocator(base=tickstep))
newticks1=axx.get_yticks()
## Obtain major ticks that comfortably encompass lower and upper.
newticks2=list()
for a,b in enumerate(newticks1):
if (b >= lower and b <= upper):
# if the tick lies within upper and lower, take it.
newticks2.append(b)
# if the meandiff falls outside of the newticks2 set, add a tick in the right direction.
if np.max(newticks2)<meandiff:
ind=np.where(newticks1 == np.max(newticks2))[0][0] # find out the max tick index in newticks1.
newticks2.append( newticks1[ind+1] )
elif meandiff<np.min(newticks2):
ind=np.where(newticks1 == np.min(newticks2))[0][0] # find out the min tick index in newticks1.
newticks2.append( newticks1[ind-1] )
newticks2=np.array(newticks2)
newticks2.sort()
## Second re-draw of axis to shrink it to desired limits.
axx.yaxis.set_major_locator(FixedLocator(locs=newticks2))
## Despine the axes.
sns.despine(ax=axx, trim=True,
bottom=False, right=False,
left=True, top=True)
# Normalize bottom/right Contrast axes to each other for Cummings hub-and-spoke plots.
if (axesCount>2 and
contrastShareY is True and
floatContrast is False):
# Set contrast ylim as max ticks of leftmost swarm axes.
if contrastYlim is None:
lower=list()
upper=list()
for c in range(0,len(contrastList.columns)):
lower.append( np.min(contrastList.ix['bca_ci_low',c]) )
upper.append( np.max(contrastList.ix['bca_ci_high',c]) )
lower=np.min(lower)
upper=np.max(upper)
else:
lower=contrastYlim[0]
upper=contrastYlim[1]
normalizeContrastY(fig,
contrast_ylim = contrastYlim,
show_all_yaxes = showAllYAxes)
# if (axesCount==2 and
# floatContrast is False):
# drawback_x(fig.get_axes()[1])
# drawback_y(fig.get_axes()[1])
# if swarmShareY is False:
# for i in range(0, axesCount, 2):
# drawback_y(fig.get_axes()[i])
# if contrastShareY is False:
# for i in range(1, axesCount, 2):
# if floatContrast is True:
# sns.despine(ax=fig.get_axes()[i],
# top=True, right=False, left=True, bottom=True,
# trim=True)
# else:
# sns.despine(ax=fig.get_axes()[i], trim=True)
# Zero gaps between plots on the same row, if floatContrast is False
if (floatContrast is False and showAllYAxes is False):
gsMain.update(wspace=0.)
else:
# Tight Layout!
gsMain.tight_layout(fig)
# And we're all done.
rcdefaults() # restore matplotlib defaults.
sns.set() # restore seaborn defaults.
return fig, contrastList
|
{"hexsha": "1b0eaa19094dac05c6fca80ba4ce893f2cc74d7d", "size": 23194, "ext": "py", "lang": "Python", "max_stars_repo_path": "bootstrap_contrast/old__/sandbox.py", "max_stars_repo_name": "josesho/bootstrap-contrast", "max_stars_repo_head_hexsha": "94fa42a5dc4622be016e2e522d1f07b19ba23a8d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-12-19T12:55:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-30T14:14:08.000Z", "max_issues_repo_path": "bootstrap_contrast/old__/sandbox.py", "max_issues_repo_name": "josesho/bootstrap-contrast", "max_issues_repo_head_hexsha": "94fa42a5dc4622be016e2e522d1f07b19ba23a8d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bootstrap_contrast/old__/sandbox.py", "max_forks_repo_name": "josesho/bootstrap-contrast", "max_forks_repo_head_hexsha": "94fa42a5dc4622be016e2e522d1f07b19ba23a8d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-07-14T08:04:30.000Z", "max_forks_repo_forks_event_max_datetime": "2017-11-29T04:53:56.000Z", "avg_line_length": 36.0715396579, "max_line_length": 123, "alphanum_fraction": 0.5606622402, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 5254}
|
import ContinuumArrays: apply, MulQuasiMatrix
@testset "$(rpad("Bernstein Basis Tests",80))" begin
l = Bernstein(2)
d = Derivative(axes(l,1))
@test apply(*,d,l) isa BernsteinDerivative
@test d*l isa BernsteinDerivative
@test l' isa BernsteinDerivative
@test basis(l) == l.b
@test nbasis(l) == 2
@test eachindex(l) == 0:1
@test order(l) == 2
@test degree(l) == 1
l1 = Bernstein(Float64, 2)
l2 = Bernstein(Integer, 2)
l3 = Bernstein(Float64, 3)
@test hash(l) == hash(l1)
@test hash(l) == hash(l2)
@test hash(l) != hash(l3)
@test l == l1
@test l == l2
@test l != l3
@test isequal(l, l1)
@test !isequal(l, l2)
@test !isequal(l, l3)
y = rand(5)
z1 = parent([ l[y[i], j] for i in eachindex(y), j in eachindex(l)])
z2 = hcat([ l[y[i], :] for i in eachindex(y)]...)'
z3 = hcat([ l[y, j] for j in eachindex(l)]...)
z4 = parent( l[y, :] )
@test z1 == z2 == z3 == z4
z1 = parent([ (d*l)[y[i], j] for i in eachindex(y), j in eachindex(l)])
z2 = hcat([ (d*l)[y[i], :] for i in eachindex(y)]...)'
z3 = hcat([ (d*l)[y, j] for j in eachindex(l)]...)
z4 = parent( (d*l)[y, :] )
@test z1 == z2 == z3 == z4
@test l(0.0, 0) == 1.0
@test l(0.5, 0) == 0.5
@test l(1.0, 0) == 0.0
@test l(0.0, 1) == 0.0
@test l(0.5, 1) == 0.5
@test l(1.0, 1) == 1.0
@test l[0.0, 0] == 1.0
@test l[0.5, 0] == 0.5
@test l[1.0, 0] == 0.0
@test l[0.0, 1] == 0.0
@test l[0.5, 1] == 0.5
@test l[1.0, 1] == 1.0
@test l[0, 0] == +1.0
@test l[1, 0] == 0.0
@test l[2, 0] == -1.0
@test l[0, 1] == 0.0
@test l[1, 1] == 1.0
@test l[2, 1] == 2.0
@test (d*l)[0.0, 0] == -1.0
@test (d*l)[0.5, 0] == -1.0
@test (d*l)[1.0, 0] == -1.0
@test (d*l)[0.0, 1] == +1.0
@test (d*l)[0.5, 1] == +1.0
@test (d*l)[1.0, 1] == +1.0
@test (d*l)[0, 0] == -1.0
@test (d*l)[1, 0] == -1.0
@test (d*l)[2, 0] == -1.0
@test (d*l)[0, 1] == +1.0
@test (d*l)[1, 1] == +1.0
@test (d*l)[2, 1] == +1.0
end
|
{"hexsha": "77a4bbd137fb6eeca6544503fb6586318df1ad01", "size": 2125, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/bernstein_tests.jl", "max_stars_repo_name": "JuliaGNI/CompactBasisFunctions.jl", "max_stars_repo_head_hexsha": "5a76714aca25c399d0856643aff3683d8e0f103a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/bernstein_tests.jl", "max_issues_repo_name": "JuliaGNI/CompactBasisFunctions.jl", "max_issues_repo_head_hexsha": "5a76714aca25c399d0856643aff3683d8e0f103a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-12-11T18:50:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T16:34:58.000Z", "max_forks_repo_path": "test/bernstein_tests.jl", "max_forks_repo_name": "JuliaGNI/CompactBasisFunctions.jl", "max_forks_repo_head_hexsha": "5a76714aca25c399d0856643aff3683d8e0f103a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1354166667, "max_line_length": 75, "alphanum_fraction": 0.4517647059, "num_tokens": 1012}
|
#! /usr/bin/env python3
""" Full-Monty Python3 and the Holy Grail """
__copyright__ = "Copyright (C) 2009, Innovations Anonymous"
__version__ = "4.0"
__license__ = "Public Domain"
__status__ = "Development"
__author__ = "Brahmjot Singh"
__maintainer__ = "Brahmjot Singh"
__email__ = "InnovAnon-Inc@protonmail.com"
__contact__ = "(801) 448-7855"
__credits__ = [
"https://stackoverflow.com/questions/70936788/out-of-core-external-memory-combinatorics-in-python",
]
import ast
#from dask import delayed
from itertools import product as ip
#from joblib import delayed, Parallel
from collections.abc import Iterable
#from pprint import pprint
from random import randrange, choice, random, getrandbits
from string import ascii_letters, digits, punctuation
from types import GeneratorType
#import numpy as np
#from tatsu.ast import AST
#from tatsu.objectmodel import Node
#from tatsu.semantics import ModelBuilderSemantics
#import tatsu
#from tatsu.walkers import NodeWalker
from cg_abs import CGAbs
#from cg_type import CGType
#from cg import CG
#np.random.seed(1)
from functools import wraps
from dask import bag, delayed
# TODO
#def product(*args, repeat=1):
# # product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# # product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
# pools = [tuple(pool) for pool in args] * repeat
# result = [[]]
# for pool in pools:
# result = [x+[y] for x in result for y in pool]
# for prod in result:
# yield tuple(prod)
#def product(*args, repeat=1):
# r = ip(*args, repeat=repeat)
# return bag.from_sequence(r)
def product(*funcs, repeat=None):
__credits__ = [
"https://stackoverflow.com/questions/70936788/out-of-core-external-memory-combinatorics-in-python",
]
if not funcs:
yield ()
return
if repeat is not None:
funcs *= repeat
func, *rest = funcs
for val in func():
for res in product(*rest):
yield (val, ) + res
from functools import partial
#values = product(partial(gen1, arg1, arg2), partial(gen2, arg1))
#root = dbopen('test.fs')
def out_of_core(func):
@wraps(func)
def eager(*args, **kwargs):
#print(func.__name__ + " was called")
#r = delayed(func)(*args, **kwargs)
#return bag.from_delayed(r)
#root['A'] = A = ZBigArray((10,), object)
#transaction.commit()
#return A
#r = func(*args, **kwargs)
#return bag.from_sequence(r)
return func(*args, **kwargs)
return eager
def trace(func):
@wraps(func)
def log(*args, **kwargs):
#i = '\t' *
#print("enter %s(%s, %s)" % (func, args, kwargs,), flush=True)
#print("enter %s" % (func.__name__,), flush=True)
r = func(*args, **kwargs)
#print("leave %s(%s, %s)" % (func, args, kwargs,), flush=True)
return r
return log
class CG(object):
def __init__(self, max_rd=3):
self.max_rd = max_rd
@trace
def build_module_ast(self):
#pprint("build_module()")
#A = delayed(self.make_Module)()
A = self.make_Module()
#pprint("build_module A: %s" % (A,))
for a in A:#.compute():
assert not isinstance(a, GeneratorType)
#pprint("build_module a: %s" % (a,))
a = ast.fix_missing_locations(a)
#pprint("build_module a: %s" % (a,))
yield a
@trace
def compile_module(self):
A = self.build_module_ast()
for a in A:
assert a is not None
try:
b = compile(a, filename="", mode='exec', optimize=2)
#pprint("compile_module b: %s" % (b,))
yield a, b
#except(SyntaxError, ValueError): pass
#except TypeError as e: #pprint("TypeError: %s %s %s" % (e, a, b,))
except SyntaxError: pass
except ValueError as e:
#pprint("ValueError: %s %s %s" % (e, a, b,))
yield a, None
@trace
def exec_module(self):
A = self.compile_module()
for a, b in A:
assert a is not None
if b is None: continue # yield a, b, None
try:
c = exec(b)
#pprint("exec_module b: %s" % (b,))
yield a, b, c
except Exception as e:
#pprint("Error: %s %s %s %s" % (e, a, b, c,))
yield a, b, None
@trace
def build_expression_ast(self):
#pprint("build_expression()")
A = self.make_Expression()
for a in A:
assert not isinstance(a, GeneratorType)
#pprint("build_expression a: %s" % (a,))
a = ast.fix_missing_locations(a)
#pprint("build_expression a: %s" % (a,))
yield a
@trace
def compile_expression(self):
A = self.build_expression_ast()
for a in A:
try:
a = compile(a, filename="", mode='eval', optimize=2)
#pprint("compile_expression a: %s" % (a,))
yield a
except SyntaxError: pass
@trace
def exec_expression(self):
A = self.compile_expression()
for a in A:
try:
b = eval(a)
#pprint("exec_expression b: %s" % (a,))
yield a, b
except: pass
@out_of_core
@trace
def choice(self, C):
#pprint("choice(C=%s)" % (C,))
# TODO
for c in C:
#pprint("choice c: %s" % (c,))
yield c
#@delayed
@out_of_core
@trace
def make_star(self, f, d):
#pprint("make_star(f=%s)" % (f,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
N = 10 # TODO
S = []
#yield S
#yield None
yield []
for n in range(N):
#S.append(f(d+1)) # f() -> GeneratorType
#S.append(f(d)) # f() -> GeneratorType
S.append(partial(f, d)) # f() -> GeneratorType
#yield S
# TODO
yield from product(*S)
#yield from delayed(product)(*S)
#for k in product(*S):
# assert not isinstance(k, GeneratorType)
# yield k
@out_of_core
@trace
def make_optional(self, f, d):
#pprint("make_optional(f=%s)" % (f,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield None
#yield []
#yield [f(d+1)] # TODO from?
#yield [f(d)] # TODO from?
yield from f(d)
@out_of_core
@trace
def make_mod(self, d=0):
#pprint("make_mod(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_Module,
self.make_Interactive,
self.make_Expression,
self.make_FunctionType,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_mod c: %s" % (c,), indent=d)
yield from c(d)
#@delayed
@out_of_core
@trace
def make_Module(self, d=0):
#pprint("make_Module(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#body = self.make_star(self.make_stmt, d)
#type_ignores = self.make_star(self.make_type_ignore, d)
body = partial(self.make_star, self.make_stmt, d)
type_ignores = partial(self.make_star, self.make_type_ignore, d)
for b, ti in product(body, type_ignores):
assert not isinstance(b, GeneratorType)
assert not isinstance(ti, GeneratorType)
assert isinstance(b, Iterable)
#pprint("make_Module b: %s, ti: %s" % (b, ti,), indent=d)
assert len(b) == 0 or not isinstance(b[0], GeneratorType)
yield ast.Module(body=list(b), type_ignores=list(ti))
@out_of_core
@trace
def make_Interactive(self, d=0):
#pprint("make_Interactive(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
body = self.make_star(self.make_stmt, d)
for b in body:
assert not isinstance(b, GeneratorType)
#pprint("make_Interactive b: %s" % (b,), indent=d)
yield ast.Interactive(body=list(b))
@out_of_core
@trace
def make_Expression(self, d=0):
#pprint("make_Expression(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
body = self.make_star(self.make_stmt, d)
for b in body:
assert not isinstance(b, GeneratorType)
#pprint("make_Expression b: %s" % (b,), indent=d)
yield ast.Expression(body=list(b))
@out_of_core
@trace
def make_FunctionType(self, d=0):
#pprint("make_FunctionType(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#argtypes = self.make_star(self.make_expr, d)
#returns = self.make_expr(d) # TODO
argtypes = partial(self.make_star, self.make_expr, d)
returns = partial(self.make_expr, d) # TODO
for a, r in product(argtypes, returns):
assert not isinstance(a, GeneratorType)
assert not isinstance(r, GeneratorType)
#pprint("make_FunctionType a: %s, r: %s" % (a, r,), indent=d)
yield ast.FunctionType(argtypes=a, returns=r)
@out_of_core
@trace
def make_stmt(self, d):
#pprint("make_stmt(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_FunctionDef,
self.make_AsyncFunctionDef,
self.make_ClassDef,
self.make_Return,
self.make_Delete,
self.make_Assign,
self.make_AugAssign,
self.make_AnnAssign,
self.make_For,
self.make_AsyncFor,
self.make_While,
self.make_If,
self.make_With,
self.make_AsyncWith,
self.make_Match,
self.make_Raise,
self.make_Try,
self.make_Assert,
self.make_Import,
self.make_ImportFrom,
self.make_Global,
self.make_Nonlocal,
self.make_Expr,
self.make_Pass,
self.make_Break,
self.make_Continue,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_stmt c: %s" % (c,), indent=d)
#for k in c(d):
# assert not isinstance(k, GeneratorType)
# #pprint("make_stmt k: %s" % (k,), indent=d)
# yield k
yield from c(d)
@out_of_core
@trace
def make_FunctionDef(self, d):
#pprint("make_FunctionDef(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#name = self.make_identifier(d)
#args = self.make_arguments(d)
#body = self.make_star(self.make_stmt, d)
#decorator_list = self.make_star(self.make_expr, d)
#returns = self.make_optional(self.make_expr, d)
#type_comment = self.make_optional(self.make_string, d)
name = partial(self.make_identifier, d)
args = partial(self.make_arguments, d)
body = partial(self.make_star, self.make_stmt, d)
decorator_list = partial(self.make_star, self.make_expr, d)
returns = partial(self.make_optional, self.make_expr, d)
type_comment = partial(self.make_optional, self.make_string, d)
for n, a, b, dl, r, tc in product(name, args, body, decorator_list, returns, type_comment):
assert not isinstance(n, GeneratorType)
assert not isinstance(a, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(dl, GeneratorType)
assert not isinstance(r, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_FunctionDef n: %s, a: %s, b: %s, dl: %s, r: %s, tc: %s" % (n, a, b, dl, r, tc,), indent=d)
yield ast.FunctionDef(name=n, args=a, body=list(b), decorator_list=dl, returns=r, type_comment=tc)
@out_of_core
@trace
def make_AsyncFunctionDef(self, d):
#pprint("make_AsyncFunctionDef(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#name = self.make_identifier(d)
#args = self.make_arguments(d)
#body = self.make_star(self.make_stmt, d)
#decorator_list = self.make_star(self.make_expr, d)
#returns = self.make_optional(self.make_expr, d)
#type_comment = self.make_optional(self.make_string, d)
name = partial(self.make_identifier, d)
args = partial(self.make_arguments, d)
body = partial(self.make_star, self.make_stmt, d)
decorator_list = partial(self.make_star, self.make_expr, d)
returns = partial(self.make_optional, self.make_expr, d)
type_comment = partial(self.make_optional, self.make_string, d)
for n, a, b, dl, r, tc in product(name, args, body, decorator_list, returns, type_comment):
assert not isinstance(n, GeneratorType)
assert not isinstance(a, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(dl, GeneratorType)
assert not isinstance(r, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_AsyncFunctionDef n: %s, a: %s, b: %s, dl: %s, r: %s, tc: %s" % (n, a, b, dl, r, tc,), indent=d)
yield ast.AsyncFunctionDef(name=n, args=a, body=list(b), decorator_list=dl, returns=r, type_comment=tc)
@out_of_core
@trace
def make_ClassDef(self, d):
#pprint("make_ClassDef(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#name = self.make_identifier(d)
#bases = self.make_star(self.make_expr, d)
#keywords = self.make_star(self.make_keyword, d)
#body = self.make_star(self.make_stmt, d)
#decorator_list = self.make_star(self.make_expr, d)
name = partial(self.make_identifier, d)
bases = partial(self.make_star, self.make_expr, d)
keywords = partial(self.make_star, self.make_keyword, d)
body = partial(self.make_star, self.make_stmt, d)
decorator_list = partial(self.make_star, self.make_expr, d)
for n, ba, k, bo, dl in product(name, bases, keywords, body, decorator_list):
assert not isinstance(n, GeneratorType)
assert not isinstance(ba, GeneratorType)
assert not isinstance(k, GeneratorType)
assert not isinstance(bo, GeneratorType)
assert not isinstance(dl, GeneratorType)
#pprint("make_ClassDef n: %s, ba: %s, k: %s, bo: %s, dl: %s" % (n, ba, k, bo, dl,), indent=d)
yield ast.ClassDef(name=n, bases=ba, keywords=k, body=list(bo), decorator_list=dl)
@out_of_core
@trace
def make_Return(self, d):
#pprint("make_Return(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_optional(self.make_expr, d)
#value = partial(self.make_optional, self.make_expr, d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_Return v: %s" % (v,), indent=d)
yield ast.Return(value=v)
@out_of_core
@trace
def make_Delete(self, d):
#pprint("make_Delete(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
targets = self.make_star(self.make_expr, d)
#targets = partial(self.make_star, self.make_expr, d)
for t in targets:
assert not isinstance(t, GeneratorType)
#pprint("make_Delete t: %s" % (t,), indent=d)
yield ast.Delete(targets=t)
@out_of_core
@trace
def make_Assign(self, d):
#pprint("make_Assign(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
targets = partial(self.make_star, self.make_expr, d)
value = partial(self.make_expr, d)
type_comment = partial(self.make_optional, self.make_string, d)
for t, v, tc in product(targets, value, type_comment):
assert not isinstance(t, GeneratorType)
assert not isinstance(v, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_Assign t: %s, v: %s, tc: %s" % (t, v, tc,), indent=d)
yield ast.Assign(targets=t, value=v, type_comment=tc)
@out_of_core
@trace
def make_AugAssign(self, d):
#pprint("make_AugAssign(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#target = self.make_expr(d)
#op = self.make_operator(d)
#value = self.make_expr(d)
target = partial(self.make_expr, d)
op = partial(self.make_operator, d)
value = partial(self.make_expr, d)
for t, o, v in product(target, op, value):
assert not isinstance(t, GeneratorType)
assert not isinstance(o, GeneratorType)
assert not isinstance(v, GeneratorType)
#pprint("make_AugAssign t: %s, o: %s, v: %s" % (t, o, v,), indent=d)
yield ast.AugAssign(target=t, op=o, value=v)
@out_of_core
@trace
def make_AnnAssign(self, d):
#pprint("make_AnnAssign(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#target = self.make_expr(d)
#annotation = self.make_expr(d)
#value = self.make_optional(self.make_expr, d)
#simple = self.make_int(d)
target = partial(self.make_expr, d)
annotation = partial(self.make_expr, d)
value = partial(self.make_optional, self.make_expr, d)
simple = partial(self.make_int, d)
for t, a, v, s in product(target, annotation, value, simple):
assert not isinstance(t, GeneratorType)
assert not isinstance(a, GeneratorType)
assert not isinstance(v, GeneratorType)
assert not isinstance(s, GeneratorType)
#pprint("make_AnnAssign t: %s, a: %s, v: %s, s: %s" % (t, a, v, s,), indent=d)
yield ast.AnnAssign(target=t, annotation=a, value=v, simple=s)
@out_of_core
@trace
def make_For(self, d):
#pprint("make_For(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#target = self.make_expr(d)
#iter_ = self.make_expr(d)
#body = self.make_star(self.make_stmt, d)
#orelse = self.make_star(self.make_stmt, d)
#type_comment = self.make_optional(self.make_string, d)
target = partial(self.make_expr, d)
iter_ = partial(self.make_expr, d)
body = partial(self.make_star, self.make_stmt, d)
orelse = partial(self.make_star, self.make_stmt, d)
type_comment = partial(self.make_optional, self.make_string, d)
for t, i, b, o, tc in product(target, iter_, body, orelse, type_comment):
assert not isinstance(t, GeneratorType)
assert not isinstance(i, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(o, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_For t: %s, i: %s, b: %s, o: %s, tc: %s" % (t, i, b, o, tc,), indent=d)
yield ast.For(target=t, iter=i, body=list(b), orelse=list(o), type_comment=tc)
@out_of_core
@trace
def make_AsyncFor(self, d):
#pprint("make_AsyncFor(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#target = self.make_expr(d)
#iter_ = self.make_expr(d)
#body = self.make_star(self.make_stmt, d)
#orelse = self.make_star(self.make_stmt, d)
#type_comment = self.make_optional(self.make_string, d)
target = partial(self.make_expr, d)
iter_ = partial(self.make_expr, d)
body = partial(self.make_star, self.make_stmt, d)
orelse = partial(self.make_star, self.make_stmt, d)
type_comment = partial(self.make_optional, self.make_string, d)
for t, i, b, o, tc in product(target, iter_, body, orelse, type_comment):
assert not isinstance(t, GeneratorType)
assert not isinstance(i, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(o, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_AsyncFor t: %s, i: %s, b: %s, o: %s, tc: %s" % (t, i, b, o, tc,), indent=d)
yield ast.AsyncFor(target=t, iter=i, body=list(b), orelse=list(o), type_comment=tc)
@out_of_core
@trace
def make_While(self, d):
#pprint("make_While(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#test = self.make_expr(d)
#body = self.make_star(self.make_stmt, d)
#orelse = self.make_star(self.make_stmt, d)
test = partial(self.make_expr, d)
body = partial(self.make_star, self.make_stmt, d)
orelse = partial(self.make_star, self.make_stmt, d)
for t, b, o in product(test, body, orelse):
assert not isinstance(t, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(o, GeneratorType)
#pprint("make_While t: %s, b: %s, o: %s" % (t, b, o,), indent=d)
yield ast.While(test=t, body=list(b), orelse=list(o))
@out_of_core
@trace
def make_If(self, d):
#pprint("make_If(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#test = self.make_expr(d)
#body = self.make_star(self.make_stmt, d)
#orelse = self.make_star(self.make_stmt, d)
test = partial(self.make_expr, d)
body = partial(self.make_star, self.make_stmt, d)
orelse = partial(self.make_star, self.make_stmt, d)
for t, b, o in product(test, body, orelse):
assert not isinstance(t, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(o, GeneratorType)
#pprint("make_If t: %s, b: %s, o: %s" % (t, b, o,), indent=d)
yield ast.If(test=t, body=list(b), orelse=list(o))
@out_of_core
@trace
def make_With(self, d):
#pprint("make_With(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#items = self.make_star(self.make_withitem, d)
#body = self.make_star(self.make_stmt, d)
#type_comment = self.make_optional(self.make_string, d)
items = partial(self.make_star, self.make_withitem, d)
body = partial(self.make_star, self.make_stmt, d)
type_comment = partial(self.make_optional, self.make_string, d)
for i, b, tc in product(items, body, type_comment):
assert not isinstance(i, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_With i: %s, b: %s, tc: %s" % (i, b, tc,), indent=d)
yield ast.With(items=i, body=list(b), type_comment=tc)
@out_of_core
@trace
def make_AsyncWith(self, d):
#pprint("make_AsyncWith(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#items = self.make_star(self.make_withitem, d)
#body = self.make_star(self.make_stmt, d)
#type_comment = self.make_optional(self.make_string, d)
items = partial(self.make_star, self.make_withitem, d)
body = partial(self.make_star, self.make_stmt, d)
type_comment = partial(self.make_optional, self.make_string, d)
for i, b, tc in product(items, body, type_comment):
assert not isinstance(i, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_AsyncWith i: %s, b: %s, tc: %s" % (i, b, tc,), indent=d)
yield ast.AsyncWith(items=i, body=list(b), type_comment=tc)
@out_of_core
@trace
def make_Match(self, d):
#pprint("make_Match(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#subject = self.make_expr(d)
#cases = self.make_star(self.make_match_case, d)
subject = partial(self.make_expr, d)
cases = partial(self.make_star, self.make_match_case, d)
for s, c in product(subject, cases):
assert not isinstance(s, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Match s: %s, c: %s" % (s, c,), indent=d)
yield ast.Match(subject=s, cases=c)
@out_of_core
@trace
def make_Raise(self, d):
#pprint("make_Raise(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#exc = self.make_optional(self.make_expr, d)
#cause = self.make_optional(self.make_expr, d)
exc = partial(self.make_optional, self.make_expr, d)
cause = partial(self.make_optional, self.make_expr, d)
for e, c in product(exc, cause):
assert not isinstance(e, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Raise e: %s, c: %s" % (e, c,), indent=d)
yield ast.Raise(exc=e, cause=c)
@out_of_core
@trace
def make_Try(self, d):
#pprint("make_Try(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#body = self.make_star(self.make_stmt, d)
#handlers = self.make_star(self.make_excepthandler, d)
#orelse = self.make_star(self.make_stmt, d)
#finalbody = self.make_star(self.make_stmt, d)
body = partial(self.make_star, self.make_stmt, d)
handlers = partial(self.make_star, self.make_excepthandler, d)
orelse = partial(self.make_star, self.make_stmt, d)
finalbody = partial(self.make_star, self.make_stmt, d)
for b, h, o, f in product(body, handlers, orelse, finalbody):
assert not isinstance(b, GeneratorType)
assert not isinstance(h, GeneratorType)
assert not isinstance(o, GeneratorType)
assert not isinstance(f, GeneratorType)
#pprint("make_Try b: %s, h: %s, o: %s, f: %s" % (b, h, o, f,), indent=d)
yield ast.Try(body=list(b), handlers=h, orelse=list(o), finalbody=list(f))
@out_of_core
@trace
def make_Assert(self, d):
#pprint("make_Assert(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#test = self.make_expr(d)
#msg = self.make_optional(self.make_expr, d)
test = partial(self.make_expr, d)
msg = partial(self.make_optional, self.make_expr, d)
for t, m in product(test, msg):
assert not isinstance(t, GeneratorType)
assert not isinstance(m, GeneratorType)
#pprint("make_Assert t: %s, m: %s" % (t, m,), indent=d)
yield ast.Assert(test=t, msg=m)
@out_of_core
@trace
def make_Import(self, d):
#pprint("make_Import(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
names = self.make_star(self.make_alias, d)
for n in names:
assert not isinstance(n, GeneratorType)
#pprint("make_Import n: %s" % (n,), indent=d)
yield ast.Import(names=list(n))
@out_of_core
@trace
def make_ImportFrom(self, d):
#pprint("make_ImportFrom(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#module = self.make_optional(self.make_identifier, d)
#names = self.make_star(self.make_alias, d)
#level = self.make_optional(self.make_int, d)
module = partial(self.make_optional, self.make_identifier, d)
names = partial(self.make_star, self.make_alias, d)
level = partial(self.make_optional, self.make_int, d)
for m, n, l in product(module, names, level):
assert not isinstance(m, GeneratorType)
assert not isinstance(n, GeneratorType)
assert not isinstance(l, GeneratorType)
#pprint("make_ImportFrom m: %s, n: %s, l: %s" % (m, n, l,), indent=d)
yield ast.ImportFrom(module=m, names=list(n), level=l)
@out_of_core
@trace
def make_Global(self, d):
#pprint("make_Global(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
names = self.make_star(self.make_identifier, d)
for n in names:
assert not isinstance(n, GeneratorType)
#pprint("make_Global n: %s" % (n,), indent=d)
yield ast.Global(names=list(n))
@out_of_core
@trace
def make_Nonlocal(self, d):
#pprint("make_Nonlocal(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
names = self.make_star(self.make_identifier, d)
for n in names:
assert not isinstance(n, GeneratorType)
#pprint("make_Nonlocal n: %s" % (n,), indent=d)
yield ast.Nonlocal(names=list(n))
@out_of_core
@trace
def make_Expr(self, d):
#pprint("make_Expr(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_expr(d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_Expr v: %s" % (v,), indent=d)
yield ast.Expr(value=v)
@out_of_core
@trace
def make_Pass(self, d):
#pprint("make_Pass(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Pass()
@out_of_core
@trace
def make_Break(self, d):
#pprint("make_Break(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Break()
@out_of_core
@trace
def make_Continue(self, d):
#pprint("make_Continue(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Continue()
@out_of_core
@trace
def make_expr(self, d):
#pprint("make_expr(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_BoolOp,
self.make_NamedExpr,
self.make_BinOp,
self.make_UnaryOp,
self.make_Lambda,
self.make_IfExp,
self.make_Dict,
self.make_Set,
self.make_ListComp,
self.make_SetComp,
self.make_DictComp,
self.make_GeneratorExp,
self.make_Await,
self.make_Yield,
self.make_YieldFrom,
self.make_Compare,
self.make_Call,
self.make_FormattedValue,
self.make_JoinedStr,
self.make_Constant,
self.make_Attribute,
self.make_Subscript,
self.make_Starred,
self.make_Name,
self.make_List,
self.make_Tuple,
self.make_Slice,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_expr c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_BoolOp(self, d):
#pprint("make_BoolOp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#op = self.make_boolop(d)
#values = self.make_star(self.make_expr, d)
op = partial(self.make_boolop, d)
values = partial(self.make_star, self.make_expr, d)
for o, v in product(op, values):
assert not isinstance(o, GeneratorType)
assert not isinstance(v, GeneratorType)
#pprint("make_BoolOp o: %s, v: %s" % (o, v,), indent=d)
yield ast.BoolOp(op=o, values=v)
@out_of_core
@trace
def make_NamedExpr(self, d):
#pprint("make_NamedExpr(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#target = self.make_expr(d)
#value = self.make_expr(d)
target = partial(self.make_expr, d)
value = partial(self.make_expr, d)
for t, v in product(target, value):
assert not isinstance(t, GeneratorType)
assert not isinstance(v, GeneratorType)
#pprint("make_NamedExpr t: %s, v: %s" % (t, v,), indent=d)
yield ast.NamedExpr(target=t, value=v)
@out_of_core
@trace
def make_BinOp(self, d):
#pprint("make_BinOp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#left = self.make_expr(d)
#op = self.make_operator(d)
#right = self.make_expr(d)
left = partial(self.make_expr, d)
op = partial(self.make_operator, d)
right = partial(self.make_expr, d)
for l, o, r in product(left, op, right):
assert not isinstance(l, GeneratorType)
assert not isinstance(o, GeneratorType)
assert not isinstance(r, GeneratorType)
#pprint("make_BinOp l: %s, o: %s, r: %s" % (l, o, r,), indent=d)
yield ast.BinOp(left=l, op=o, right=r)
@out_of_core
@trace
def make_UnaryOp(self, d):
#pprint("make_UnaryOp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#op = self.make_unaryop(d)
#operand = self.make_expr(d)
op = partial(self.make_unaryop, d)
operand = partial(self.make_expr, d)
for o, a in product(op, operand):
assert not isinstance(o, GeneratorType)
assert not isinstance(a, GeneratorType)
#pprint("make_UnaryOp o: %s, a: %s" % (o, a,), indent=d)
yield ast.UnaryOp(op=o, operand=a)
@out_of_core
@trace
def make_Lambda(self, d):
#pprint("make_Lambda(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#args = self.make_arguments(d)
#body = self.make_expr(d)
args = partial(self.make_arguments, d)
body = partial(self.make_expr, d)
for a, b in product(args, body):
assert not isinstance(a, GeneratorType)
assert not isinstance(b, GeneratorType)
#pprint("make_Lambda a: %s, b: %s" % (a, b,), indent=d)
yield ast.Lambda(args=a, body=list(b))
@out_of_core
@trace
def make_IfExp(self, d):
#pprint("make_IfExp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#test = self.make_expr(d)
#body = self.make_expr(d)
#orelse = self.make_expr(d)
test = partial(self.make_expr, d)
body = partial(self.make_expr, d)
orelse = partial(self.make_expr, d)
for t, b, o in product(test, body, orelse):
assert not isinstance(t, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(o, GeneratorType)
#pprint("make_IfExp t: %s, b: %s, o: %s" % (t, b, o,), indent=d)
yield ast.IfExp(test=t, body=list(b), orelse=list(o))
@out_of_core
@trace
def make_Dict(self, d):
#pprint("make_Dict(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#keys = self.make_star(self.make_expr, d)
#values = self.make_star(self.make_expr, d)
keys = partial(self.make_star, self.make_expr, d)
values = partial(self.make_star, self.make_expr, d)
for k, v in product(keys, values):
assert not isinstance(k, GeneratorType)
assert not isinstance(v, GeneratorType)
#pprint("make_Dict k: %s, v: %s" % (k, v,), indent=d)
yield ast.Dict(keys=k, values=v)
@out_of_core
@trace
def make_Set(self, d):
#pprint("make_Set(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
elts = self.make_star(self.make_expr, d)
for e in elts:
assert not isinstance(e, GeneratorType)
#pprint("make_Set e: %s" % (e,), indent=d)
yield ast.Set(elts=e)
@out_of_core
@trace
def make_ListComp(self, d):
#pprint("make_ListComp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#elt = self.make_expr(d)
#generators = self.make_star(self.make_comprehension, d)
elt = partial(self.make_expr, d)
generators = partial(self.make_star, self.make_comprehension, d)
for e, g in product(elt, generators):
assert not isinstance(e, GeneratorType)
assert not isinstance(g, GeneratorType)
#pprint("make_ListComp e: %s, g: %s" % (e, g,), indent=d)
yield ast.ListComp(elt=e, generators=g)
@out_of_core
@trace
def make_SetComp(self, d):
#pprint("make_SetComp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#elt = self.make_expr(d)
#generators = self.make_star(self.make_comprehension, d)
elt = partial(self.make_expr, d)
generators = partial(self.make_star, self.make_comprehension, d)
for e, g in product(elt, generators):
assert not isinstance(e, GeneratorType)
assert not isinstance(g, GeneratorType)
#pprint("make_SetComp e: %s, g: %s" % (e, g,), indent=d)
yield ast.SetComp(elt=e, generators=g)
@out_of_core
@trace
def make_DictComp(self, d):
#pprint("make_DictComp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#key = self.make_expr(d)
#value = self.make_expr(d)
#generators = self.make_star(self.make_comprehension, d)
key = partial(self.make_expr, d)
value = partial(self.make_expr, d)
generators = partial(self.make_star, self.make_comprehension, d)
for k, v, g in product(key, value, generators):
assert not isinstance(k, GeneratorType)
assert not isinstance(v, GeneratorType)
assert not isinstance(g, GeneratorType)
#pprint("make_DictComp k: %s, v: %s, g: %s" % (k, v, g,), indent=d)
yield ast.DictComp(key=k, value=v, generators=g)
@out_of_core
@trace
def make_GeneratorExp(self, d):
#pprint("make_GeneratorExp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#elt = self.make_expr(d)
#generators = self.make_star(self.make_comprehension, d)
elt = partial(self.make_expr, d)
generators = partial(self.make_star, self.make_comprehension, d)
for e, g in product(elt, generators):
assert not isinstance(e, GeneratorType)
assert not isinstance(g, GeneratorType)
#pprint("make_GeneratorExp e: %s, g: %s" % (e, g,), indent=d)
yield ast.GeneratorExp(elt=e, generators=g)
@out_of_core
@trace
def make_Await(self, d):
#pprint("make_Await(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_expr(d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_Await v: %s" % (v,), indent=d)
yield ast.Await(value=v)
@out_of_core
@trace
def make_Yield(self, d):
#pprint("make_Yield(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_optional(self.make_expr, d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_Yield v: %s" % (v,), indent=d)
yield ast.Yield(value=v)
@out_of_core
@trace
def make_YieldFrom(self, d):
#pprint("make_YieldFrom(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_expr(d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_YieldFrom v: %s" % (v,), indent=d)
yield ast.YieldFrom(value=v)
@out_of_core
@trace
def make_Compare(self, d):
#pprint("make_Compare(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#left = self.make_expr(d)
#ops = self.make_star(self.make_cmpop, d)
#comparators = self.make_star(self.make_expr, d)
left = partial(self.make_expr, d)
ops = partial(self.make_star, self.make_cmpop, d)
comparators = partial(self.make_star, self.make_expr, d)
for l, o, c in product(left, ops, comparators):
assert not isinstance(l, GeneratorType)
assert not isinstance(o, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Compare l: %s, o: %s, c: %s" % (l, o, c,), indent=d)
yield ast.Compare(left=l, ops=o, comparators=c)
@out_of_core
@trace
def make_Call(self, d):
#pprint("make_Call(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#func = self.make_expr(d)
#args = self.make_star(self.make_expr, d)
#keywords = self.make_star(self.make_keyword, d)
func = partial(self.make_expr, d)
args = partial(self.make_star, self.make_expr, d)
keywords = partial(self.make_star, self.make_keyword, d)
for f, a, k in product(func, args, keywords):
assert not isinstance(f, GeneratorType)
assert not isinstance(a, GeneratorType)
assert not isinstance(k, GeneratorType)
#pprint("make_Call f: %s, a: %s, k: %s" % (f, a, k,), indent=d)
yield ast.Call(func=f, args=a, keywords=k)
@out_of_core
@trace
def make_FormattedValue(self, d):
#pprint("make_FormattedValue(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#value = self.make_expr(d)
#conversion = self.make_int(d)
#format_spec = self.make_optional(self.make_expr, d)
value = partial(self.make_expr, d)
conversion = partial(self.make_int, d)
format_spec = partial(self.make_optional, self.make_expr, d)
for v, c, f in product(value, conversion, format_spec):
assert not isinstance(v, GeneratorType)
assert not isinstance(c, GeneratorType)
assert not isinstance(f, GeneratorType)
#pprint("make_FormattedValue v: %s, c: %s, f: %s" % (v, c, f,), indent=d)
yield ast.FormattedValue(value=v, conversion=c, format_spec=f)
@out_of_core
@trace
def make_JoinedStr(self, d):
#pprint("make_JoinedStr(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
values = self.make_star(self.make_expr, d)
for v in values:
assert not isinstance(v, GeneratorType)
#pprint("make_JoinedStr v: %s" % (v,), indent=d)
yield ast.JoinedStr(values=v)
@out_of_core
@trace
def make_Constant(self, d):
#pprint("make_Constant(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#value = self.make_constant(d)
#kind = self.make_optional(self.make_string, d)
value = partial(self.make_constant, d)
kind = partial(self.make_optional, self.make_string, d)
for v, k in product(value, kind):
assert not isinstance(v, GeneratorType)
assert not isinstance(k, GeneratorType)
#pprint("make_Constant v: %s, k: %s" % (v, k,), indent=d)
yield ast.Constant(value=v, kind=k)
@out_of_core
@trace
def make_Attribute(self, d):
#pprint("make_Attribute(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#value = self.make_expr(d)
#attr = self.make_identifier(d)
#ctx = self.make_expr_context(d)
value = partial(self.make_expr, d)
attr = partial(self.make_identifier, d)
ctx = partial(self.make_expr_context, d)
for v, a, c in product(value, attr, ctx):
assert not isinstance(v, GeneratorType)
assert not isinstance(a, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Attribute v: %s, a: %s, c: %s" % (v, a, c,), indent=d)
yield ast.Attribute(value=v, attr=a, ctx=c)
@out_of_core
@trace
def make_Subscript(self, d):
#pprint("make_Subscript(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#value = self.make_expr(d)
#slice_ = self.make_expr(d)
#ctx = self.make_expr_context(d)
value = partial(self.make_expr, d)
slice_ = partial(self.make_expr, d)
ctx = partial(self.make_expr_context, d)
for v, s, c in product(value, slice_, ctx):
assert not isinstance(v, GeneratorType)
assert not isinstance(s, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Subscript v: %s, s: %s, c: %s" % (v, s, c,), indent=d)
yield ast.Subscript(value=v, slice=s, ctx=c)
@out_of_core
@trace
def make_Starred(self, d):
#pprint("make_Starred(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#value = self.make_expr(d)
#ctx = self.make_expr_context(d)
value = partial(self.make_expr, d)
ctx = partial(self.make_expr_context, d)
for v, c in product(value, ctx):
assert not isinstance(v, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Starred v: %s, c: %s" % (v, c,), indent=d)
yield ast.Starred(value=v, ctx=c)
@out_of_core
@trace
def make_Name(self, d):
#pprint("make_Name(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#id_ = self.make_identifier(d)
#ctx = self.make_expr_context(d)
id_ = partial(self.make_identifier, d)
ctx = partial(self.make_expr_context, d)
for i, c in product(id_, ctx):
assert not isinstance(i, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Name i: %s, c: %s" % (i, c,), indent=d)
yield ast.Name(id=i, ctx=c)
@out_of_core
@trace
def make_List(self, d):
#pprint("make_List(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#elts = self.make_star(self.make_expr, d)
#ctx = self.make_expr_context(d)
elts = partial(self.make_star, self.make_expr, d)
ctx = partial(self.make_expr_context, d)
for e, c in product(elts, ctx):
assert not isinstance(e, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_List e: %s, c: %s" % (e, c,), indent=d)
yield ast.List(elts=e, ctx=c)
@out_of_core
@trace
def make_Tuple(self, d):
#pprint("make_Tuple(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#elts = self.make_star(self.make_expr, d)
#ctx = self.make_expr_context(d)
elts = partial(self.make_star, self.make_expr, d)
ctx = partial(self.make_expr_context, d)
for e, c in product(elts, ctx):
assert not isinstance(e, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Tuple e: %s, c: %s" % (e, c,), indent=d)
yield ast.Tuple(elts=e, ctx=c)
@out_of_core
@trace
def make_Slice(self, d):
#pprint("make_Slice(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#lower = self.make_optional(self.make_expr, d)
#upper = self.make_optional(self.make_expr, d)
#step = self.make_optional(self.make_expr, d)
lower = partial(self.make_optional, self.make_expr, d)
upper = partial(self.make_optional, self.make_expr, d)
step = partial(self.make_optional, self.make_expr, d)
for l, u, s in product(lower, upper, step):
assert not isinstance(l, GeneratorType)
assert not isinstance(u, GeneratorType)
assert not isinstance(s, GeneratorType)
#pprint("make_Slice l: %s, u: %s, s: %s" % (l, u, s,), indent=d)
yield ast.Slice(lower=l, upper=u, step=s)
@out_of_core
@trace
def make_expr_context(self, d):
#pprint("make_expr_context(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_Load,
self.make_Store,
self.make_Del,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_expr_context c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_Load(self, d):
#pprint("make_Load(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Load()
@out_of_core
@trace
def make_Store(self, d):
#pprint("make_Store(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Store()
@out_of_core
@trace
def make_Del(self, d):
#pprint("make_Del(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Del()
@out_of_core
@trace
def make_boolop(self, d):
#pprint("make_boolop(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_And,
self.make_Or,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_boolop c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_And(self, d):
#pprint("make_And(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.And()
@out_of_core
@trace
def make_Or(self, d):
#pprint("make_Or(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Or()
@out_of_core
@trace
def make_operator(self, d):
#pprint("make_operator(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_Add,
self.make_Sub,
self.make_Mult,
self.make_MatMult,
self.make_Div,
self.make_Mod,
self.make_Pow,
self.make_LShift,
self.make_RShift,
self.make_BitOr,
self.make_BitXor,
self.make_BitAnd,
self.make_FloorDiv,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_operator c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_Add(self, d):
#pprint("make_Add(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Add()
@out_of_core
@trace
def make_Sub(self, d):
#pprint("make_Sub(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Sub()
@out_of_core
@trace
def make_Mult(self, d):
#pprint("make_Mult(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Mult()
@out_of_core
@trace
def make_MatMult(self, d):
#pprint("make_MatMult(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.MatMult()
@out_of_core
@trace
def make_Div(self, d):
#pprint("make_Div(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Div()
@out_of_core
@trace
def make_Mod(self, d):
#pprint("make_Mod(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Mod()
@out_of_core
@trace
def make_Pow(self, d):
#pprint("make_Pow(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Pow()
@out_of_core
@trace
def make_LShift(self, d):
#pprint("make_LShift(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.LShift()
@out_of_core
@trace
def make_RShift(self, d):
#pprint("make_RShift(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.RShift()
@out_of_core
@trace
def make_BitOr(self, d):
#pprint("make_BitOr(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.BitOr()
@out_of_core
@trace
def make_BitXor(self, d):
#pprint("make_BitXor(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.BitXor()
@out_of_core
@trace
def make_BitAnd(self, d):
#pprint("make_BitAnd(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.BitAnd()
@out_of_core
@trace
def make_FloorDiv(self, d):
#pprint("make_FloorDiv(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.FloorDiv()
@out_of_core
@trace
def make_unaryop(self, d):
#pprint("make_unaryop(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_Invert,
self.make_Not,
self.make_UAdd,
self.make_USub,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_unaryop c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_Invert(self, d):
#pprint("make_Invert(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Invert()
@out_of_core
@trace
def make_Not(self, d):
#pprint("make_Not(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Not()
@out_of_core
@trace
def make_UAdd(self, d):
#pprint("make_UAdd(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.UAdd()
@out_of_core
@trace
def make_USub(self, d):
#pprint("make_USub(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.USub()
@out_of_core
@trace
def make_cmpop(self, d):
#pprint("make_cmpop(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_Eq,
self.make_NotEq,
self.make_Lt,
self.make_LtE,
self.make_Gt,
self.make_GtE,
self.make_Is,
self.make_IsNot,
self.make_In,
self.make_NotIn,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_cmpop c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_Eq(self, d):
#pprint("make_Eq(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Eq()
@out_of_core
@trace
def make_NotEq(self, d):
#pprint("make_NotEq(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.NotEq()
@out_of_core
@trace
def make_Lt(self, d):
#pprint("make_Lt(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Lt()
@out_of_core
@trace
def make_LtE(self, d):
#pprint("make_LtE(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.LtE()
@out_of_core
@trace
def make_Gt(self, d):
#pprint("make_Gt(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Gt()
@out_of_core
@trace
def make_GtE(self, d):
#pprint("make_GtE(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.GtE()
@out_of_core
@trace
def make_Is(self, d):
#pprint("make_Is(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Is()
@out_of_core
@trace
def make_IsNot(self, d):
#pprint("make_IsNot(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.IsNot()
@out_of_core
@trace
def make_In(self, d):
#pprint("make_In(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.In()
@out_of_core
@trace
def make_NotIn(self, d):
#pprint("make_NotIn(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.NotIn()
@out_of_core
@trace
def make_comprehension(self, d):
#pprint("make_comprehension(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#targetl = self.make_expr(d)
#iter_ = self.make_expr(d)
#ifs = self.make_star(self.make_expr, d)
#is_async = self.make_int(d)
target = partial(self.make_expr, d)
iter_ = partial(self.make_expr, d)
ifs = partial(self.make_star, self.make_expr, d)
is_async = partial(self.make_int, d)
for t, it, i, a in product(target, iter_, ifs, is_async):
assert not isinstance(t, GeneratorType)
assert not isinstance(it, GeneratorType)
assert not isinstance(i, GeneratorType)
assert not isinstance(a, GeneratorType)
#pprint("make_comprehension t: %s, it: %s, i: %s, a: %s" % (t, it, i, a,), indent=d)
yield ast.comprehension(target=t, iter=it, ifs=i, is_async=a)
@out_of_core
@trace
def make_excepthandler(self, d):
#pprint("make_excepthandler(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#type_ = self.make_optional(self.make_expr, d)
#name = self.make_optional(self.make_identifier, d)
#body = self.make_star(self.make_stmt, d)
type_ = partial(self.make_optional, self.make_expr, d)
name = partial(self.make_optional, self.make_identifier, d)
body = partial(self.make_star, self.make_stmt, d)
for t, n, b in product(type_, name, body):
assert not isinstance(t, GeneratorType)
assert not isinstance(n, GeneratorType)
assert not isinstance(b, GeneratorType)
#pprint("make_excepthandler t: %s, n: %s, b: %s" % (t, n, b,), indent=d)
yield ast.excepthandler(type=t, name=n, body=list(b))
@out_of_core
@trace
def make_arguments(self, d):
#pprint("make_arguments(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#posonlyargs = self.make_star(self.make_arg, d)
#args = self.make_star(self.make_arg, d)
#vararg = self.make_optional(self.make_arg, d)
#kwonlyargs = self.make_star(self.make_arg, d)
#kw_defaults = self.make_star(self.make_expr, d)
#kwarg = self.make_optional(self.make_arg, d)
#defaults = self.make_star(self.make_expr, d)
posonlyargs = partial(self.make_star, self.make_arg, d)
args = partial(self.make_star, self.make_arg, d)
vararg = partial(self.make_optional, self.make_arg, d)
kwonlyargs = partial(self.make_star, self.make_arg, d)
kw_defaults = partial(self.make_star, self.make_expr, d)
kwarg = partial(self.make_optional, self.make_arg, d)
defaults = partial(self.make_star, self.make_expr, d)
for p, a, v, kwo, kwd, kwa, df in product(posonlyargs, args, vararg, kwonlyargs, kw_defaults, kwarg, defaults):
assert not isinstance(p, GeneratorType)
assert not isinstance(a, GeneratorType)
assert not isinstance(v, GeneratorType)
assert not isinstance(kwo, GeneratorType)
assert not isinstance(kwd, GeneratorType)
assert not isinstance(kwa, GeneratorType)
assert not isinstance(df, GeneratorType)
#pprint("make_arguments p: %s, a: %s, v: %s, kwo: %s, kwd: %s, kwa: %s, df: %s" % (p, a, v, kwo, kwd, kwa, df,), indent=d)
yield ast.arguments(posonlyargs=p, args=a, vararg=v, kwonlyargs=kwo, kw_defaults=kwd, kwarg=kwa, defaults=df)
@out_of_core
@trace
def make_arg(self, d):
#pprint("make_arg(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#arg = self.make_identifier(d)
#annotation = self.make_optional(self.make_expr, d)
#type_comment = self.make_optional(self.make_string, d)
arg = partial(self.make_identifier, d)
annotation = partial(self.make_optional, self.make_expr, d)
type_comment = partial(self.make_optional, self.make_string, d)
for a, n, t in product(arg, annotation, type_comment):
assert not isinstance(a, GeneratorType)
assert not isinstance(n, GeneratorType)
assert not isinstance(t, GeneratorType)
#pprint("make_arg a: %s, n: %s, t: %s" % (a, n, t,), indent=d)
yield ast.arg(arg=a, annotation=n, type_comment=t)
@out_of_core
@trace
def make_keyword(self, d):
#pprint("make_keyword(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#arg = self.make_optional(self.make_identifier, d)
#value = self.make_expr(d)
arg = partial(self.make_optional, self.make_identifier, d)
value = partial(self.make_expr, d)
for a, v in product(arg, value):
assert not isinstance(a, GeneratorType)
assert not isinstance(v, GeneratorType)
#pprint("make_keyword a: %s, v: %s" % (a, v,), indent=d)
yield ast.keyword(arg=a, value=v)
@out_of_core
@trace
def make_alias(self, d):
#pprint("make_alias(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#name = self.make_identifier(d)
#asname = self.make_optional(self.make_identifier, d)
name = partial(self.make_identifier, d)
asname = partial(self.make_optional, self.make_identifier, d)
for n, a in product(name, asname):
assert not isinstance(n, GeneratorType)
assert not isinstance(a, GeneratorType)
#pprint("make_alias n: %s, a: %s" % (n, a,), indent=d)
yield ast.alias(name=n, asname=a)
@out_of_core
@trace
def make_withitem(self, d):
#pprint("make_withitem(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#context_expr = self.make_expr(d)
#optional_vars = self.make_optional(self.make_expr, d)
context_expr = partial(self.make_expr, d)
optional_vars = partial(self.make_optional, self.make_expr, d)
for c, o in product(context_expr, optional_vars):
assert not isinstance(c, GeneratorType)
assert not isinstance(o, GeneratorType)
#pprint("make_withitem c: %s, o: %s" % (c, o,), indent=d)
yield ast.withitem(context_expr=c, optional_vars=o)
@out_of_core
@trace
def make_match_case(self, d):
#pprint("make_match_case(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#pattern = self.make_pattern(d)
#guard = self.make_optional(self.make_expr, d)
#body = self.make_star(self.make_stmt, d)
pattern = partial(self.make_pattern, d)
guard = partial(self.make_optional, self.make_expr, d)
body = partial(self.make_star, self.make_stmt, d)
for p, g, b in product(pattern, guard, body):
assert not isinstance(p, GeneratorType)
assert not isinstance(g, GeneratorType)
assert not isinstance(b, GeneratorType)
#pprint("make_match_case p: %s, g: %s, b: %s" % (p, g, b,), indent=d)
yield ast.match_case(pattern=p, guard=g, body=list(b))
@out_of_core
@trace
def make_pattern(self, d):
#pprint("make_pattern(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_MatchValue,
self.make_MatchSingleton,
self.make_MatchSequence,
self.make_MatchMapping,
self.make_MatchClass,
self.make_MatchStar,
self.make_MatchAs,
self.make_MatchOr,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_pattern c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_MatchValue(self, d):
#pprint("make_MatchValue(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_expr(d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_MatchValue v: %s" % (v,), indent=d)
yield ast.MatchValue(value=v)
@out_of_core
@trace
def make_MatchSingleton(self, d):
#pprint("make_MatchSingleton(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_constant(d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_MatchSingleton v: %s" % (v,), indent=d)
yield ast.MatchSingleton(value=v)
@out_of_core
@trace
def make_MatchSequence(self, d):
#pprint("make_MatchSequence(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
patterns = self.make_star(self.make_pattern, d)
for p in patterns:
assert not isinstance(p, GeneratorType)
#pprint("make_MatchSequence v: %s" % (p,), indent=d)
yield ast.MatchSequence(patterns=p)
@out_of_core
@trace
def make_MatchMapping(self, d):
#pprint("make_MatchMapping(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#keys = self.make_star(self.make_expr, d)
#patterns = self.make_star(self.make_pattern, d)
#rest = self.make_optional(self.make_identifier, d)
keys = partial(self.make_star, self.make_expr, d)
patterns = partial(self.make_star, self.make_pattern, d)
rest = partial(self.make_optional, self.make_identifier, d)
for k, p, r in product(keys, patterns, rest):
assert not isinstance(k, GeneratorType)
assert not isinstance(p, GeneratorType)
assert not isinstance(r, GeneratorType)
#pprint("make_MatchMapping k: %s, p: %s, r: %s" % (k, p, r,), indent=d)
yield ast.MatchMapping(keys=k, patterns=p, rest=r)
@out_of_core
@trace
def make_MatchClass(self, d):
#pprint("make_MatchClass(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#cls = self.make_expr(d)
#patterns = self.make_star(self.make_pattern, d)
#kwd_attrs = self.make_star(self.make_identifier, d)
#kwd_patterns = self.make_star(self.make_pattern, d)
cls = partial(self.make_expr, d)
patterns = partial(self.make_star, self.make_pattern, d)
kwd_attrs = partial(self.make_star, self.make_identifier, d)
kwd_patterns = partial(self.make_star, self.make_pattern, d)
for c, p, ka, kp in product(cls, patterns, kwd_attrs, kwd_patterns):
assert not isinstance(c, GeneratorType)
assert not isinstance(p, GeneratorType)
assert not isinstance(ka, GeneratorType)
assert not isinstance(kp, GeneratorType)
#pprint("make_MatchClass c: %s, p: %s, ka: %s, kp: %s" % (c, p, ka, kp,), indent=d)
yield ast.MatchClass(cls=c, patterns=p, kwd_attrs=ka, kwd_patterns=kp)
@out_of_core
@trace
def make_MatchStar(self, d):
#pprint("make_MatchStar(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
name = self.make_optional(self.make_identifier, d)
for n in name:
assert not isinstance(n, GeneratorType)
#pprint("make_MatchStar n: %s" % (n,), indent=d)
yield ast.MatchStar(name=n)
@out_of_core
@trace
def make_MatchAs(self, d):
#pprint("make_MatchAs(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#pattern = self.make_optional(self.make_pattern, d)
#name = self.make_optional(self.make_identifier, d)
pattern = partial(self.make_optional, self.make_pattern, d)
name = partial(self.make_optional, self.make_identifier, d)
for p, n in product(pattern, name):
assert not isinstance(p, GeneratorType)
assert not isinstance(n, GeneratorType)
#pprint("make_MatchAs p: %s, n: %s" % (p, n,), indent=d)
yield ast.MatchAs(pattern=p, name=n)
@out_of_core
@trace
def make_MatchOr(self, d):
#pprint("make_MatchOr(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
patterns = self.make_star(self.make_pattern, d)
for p in patterns:
assert not isinstance(p, GeneratorType)
#pprint("make_MatchOr p: %s" % (p,), indent=d)
yield ast.MatchOr(patterns=p)
@out_of_core
@trace
def make_type_ignore(self, d):
#pprint("make_TypeIgnore(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#lineno = self.make_int(d)
#tag = self.make_string(d)
lineno = partial(self.make_int, d)
tag = partial(self.make_string, d)
for l, t in product(lineno, tag):
assert not isinstance(l, GeneratorType)
assert not isinstance(t, GeneratorType)
#pprint("make_TypeIgnore l: %s, t: %s" % (l, t,), indent=d)
yield ast.TypeIgnore(lineno=l, tag=t)
@out_of_core
@trace
def make_int(self, d):
#pprint("make_int(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
# TODO
i = randrange(-10, 10)
yield i
@out_of_core
@trace
def make_string(self, d):
#pprint("make_string(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
# TODO
n = randrange(10)
c = ascii_letters + punctuation + digits
f = lambda _: choice(c)
s = map(f, range(n))
r = ''.join(s)
yield r
@out_of_core
@trace
def make_identifier(self, d):
# TODO identifier scope
# TODO declare identifier, reference identifier
#pprint("make_identifier(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
# TODO
n = randrange(10)
c = ascii_letters + digits
f = lambda _: choice(c)
s = map(f, range(n))
c = ascii_letters
s = (choice(c), *s)
r = ''.join(s)
yield r
@out_of_core
@trace
def make_constant(self, d):
#pprint("make_constant(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
# integer
# float
# complex
# string
# boolean
choices = [
self.make_int,
self.make_float,
#self.make_complex,
self.make_string,
self.make_boolean,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
yield from c(d)
@out_of_core
@trace
def make_float(self, d):
r = random()
yield r
#def make_complex(self, d): pass
@out_of_core
@trace
def make_boolean(self, d):
b = bool(getrandbits(1))
yield b
if __name__ == '__main__':
for rd in range(10):
print("rd: %s" % (rd,))
A = CG(rd)
for a, c, e in A.exec_module(): print("a: %s\nc: %s\ne: %s\n" % (ast.dump(a), c, e,))
print()
|
{"hexsha": "1fb62e2b26c45176a943b4eed2e26c5e3c1fa4f5", "size": 64587, "ext": "py", "lang": "Python", "max_stars_repo_path": "cgc.py", "max_stars_repo_name": "InnovAnon-Inc/ProgramSynthesis", "max_stars_repo_head_hexsha": "e7132c144cba34ef167de981c063b71c23075456", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-02T16:49:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T16:49:18.000Z", "max_issues_repo_path": "cgc.py", "max_issues_repo_name": "InnovAnon-Inc/ProgramSynthesis", "max_issues_repo_head_hexsha": "e7132c144cba34ef167de981c063b71c23075456", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cgc.py", "max_forks_repo_name": "InnovAnon-Inc/ProgramSynthesis", "max_forks_repo_head_hexsha": "e7132c144cba34ef167de981c063b71c23075456", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1725731895, "max_line_length": 125, "alphanum_fraction": 0.6598231842, "include": true, "reason": "import numpy", "num_tokens": 20709}
|
from __future__ import division
import pickle, random
import numpy as np
from itertools import cycle
import torch
from torch.autograd import Variable
all_feature_lengths = {'v_enc_onehot': 100,
'v_enc_embedding': 300,
'v_enc_dim300': 300,
'v_enc_dim2': 2,
'v_enc_dim10': 10,
'v_enc_dim50': 50,
'v_enc_dim100': 100,
'v_freq_freq': 1,
'v_freq_rank': 1,
'v_deg': 1,
'v_sense': 1,
'e_vertexsim': 1,
'e_dir': 3,
'e_rel': 46,
'e_weight': 1,
'e_source': 6,
'e_weightsource': 6,
'e_srank_abs': 1,
'e_srank_rel': 1,
'e_trank_abs': 1,
'e_trank_rel': 1,
'e_sense': 1}
class Dataset:
def __init__(self, feature_names, train_test_split_fraction, gpu):
self.feature_names = feature_names
self.cached_features = dict()
self.gpu = gpu
for f in feature_names:
print 'loading '+f
self.cached_features[f] = pickle.load(
open('features/%s.pkl'%f, 'rb'))
sampled_problems = pickle.load(open(
'../../data/science/paths.pkl', 'rb'))
self.texts = dict()
print 'loading problem plain texts'
for id_num in sampled_problems:
f_short = sampled_problems[id_num]['forward']['short']
r_short = sampled_problems[id_num]['reverse']['short']
self.texts[id_num+'f'] = f_short
self.texts[id_num+'r'] = r_short
print 'loading labeled pairs'
self.all_pairs = [] # list of id tuples (good, bad)
for l in open('../../data/science/answers.txt'):
first, second, good = l.strip().split('_')
if first==good:
bad = second
elif second==good:
bad = first
g_len = (len(self.texts[good].strip().split(' '))+1)/2
b_len = (len(self.texts[bad].strip().split(' '))+1)/2
if g_len!=4 or b_len!=4:
continue
self.all_pairs.append((good, bad))
random.shuffle(self.all_pairs)
split = int(train_test_split_fraction*len(self.all_pairs))
self.train_pairs = self.all_pairs[:split]
self.test_pairs = self.all_pairs[split:]
self.train_pairs = self.train_pairs[:len(self.train_pairs)]
self.cycled_train_pairs = cycle(self.train_pairs)
def get_fea_len(self):
return [all_feature_lengths[f] for f in self.feature_names]
def get_v_fea_len(self):
return [all_feature_lengths[f] for f in self.feature_names if f.startswith('v')]
def get_e_fea_len(self):
return [all_feature_lengths[f] for f in self.feature_names if f.startswith('e')]
def get_chain_len(self, id):
return len(self.get_features(id)[0])
def get_features(self, id):
v_features = []
e_features = []
for f in self.feature_names:
if f.startswith('v'):
v_features.append(self.cached_features[f][id])
else:
e_features.append(self.cached_features[f][id])
v_features = zip(*v_features)
e_features = zip(*e_features)
return v_features, e_features
def prepare_feature_placeholder(self, N):
v_features = [[],[],[],[]]
e_features = [[],[],[]]
for feature in v_features:
for f in self.feature_names:
if f.startswith('v'):
feature.append(
np.zeros((N, all_feature_lengths[f]), dtype='float32')
)
for feature in e_features:
for f in self.feature_names:
if f.startswith('e'):
feature.append(
np.zeros((N, all_feature_lengths[f]), dtype='float32')
)
return v_features, e_features
def get_train_pairs(self, N, randomize_dir=True):
'''
return a list of two lists, X_A and X_B, as well as a list y
each list consists of two lists, which are vertex and edge representations
each list consists of #V or #E lists, which are individual vertices/edges
each list consists of several N x feature_len torch Variables, which are individual features
currently only keeping chains of length 4
if for i-th problem, the good chain is in X_A, then y[i]==1, else y[i]==0
'''
v_features_A, e_features_A = self.prepare_feature_placeholder(N)
v_features_B, e_features_B = self.prepare_feature_placeholder(N)
y = np.zeros(N, dtype='int64')
for instance_idx in xrange(N):
good, bad = next(self.cycled_train_pairs)
if randomize_dir:
good = good[:-1]+random.choice(['f','r'])
bad = bad[:-1]+random.choice(['f','r'])
v_good, e_good = self.get_features(good)
v_bad, e_bad = self.get_features(bad)
label = random.random()>0.5
y[instance_idx] = label
for v_idx in xrange(4):
for v_fea_idx in xrange(len(v_good[v_idx])):
if label:
v_features_A[v_idx][v_fea_idx][instance_idx] = v_good[v_idx][v_fea_idx]
v_features_B[v_idx][v_fea_idx][instance_idx] = v_bad[v_idx][v_fea_idx]
else:
v_features_B[v_idx][v_fea_idx][instance_idx] = v_good[v_idx][v_fea_idx]
v_features_A[v_idx][v_fea_idx][instance_idx] = v_bad[v_idx][v_fea_idx]
for e_idx in xrange(3):
for e_fea_idx in xrange(len(e_good[e_idx])):
if label:
e_features_A[e_idx][e_fea_idx][instance_idx] = e_good[e_idx][e_fea_idx]
e_features_B[e_idx][e_fea_idx][instance_idx] = e_bad[e_idx][e_fea_idx]
else:
e_features_B[e_idx][e_fea_idx][instance_idx] = e_good[e_idx][e_fea_idx]
e_features_A[e_idx][e_fea_idx][instance_idx] = e_bad[e_idx][e_fea_idx]
for features in [v_features_A, e_features_A, v_features_B, e_features_B]:
for feature in features:
for i in xrange(len(feature)):
feature[i] = Variable(torch.from_numpy(feature[i]))
if self.gpu:
feature[i] = feature[i].cuda()
y = Variable(torch.from_numpy(y))
if self.gpu:
y = y.cuda()
return ((v_features_A, e_features_A), (v_features_B, e_features_B), y)
def get_test_pairs(self, randomize_dir=True, return_id=False):
'''
return a list of two lists, X_A and X_B, as well as a list y
each list consists of two lists, which are vertex and edge representations
each list consists of #V or #E lists, which are individual vertices/edges
each list consists of several N x feature_len torch Variables, which are individual features
currently only keeping chains of length 4
if for i-th problem, the good chain is in X_A, then y[i]==1, else y[i]==0
'''
N = len(self.test_pairs)
v_features_A, e_features_A = self.prepare_feature_placeholder(N)
v_features_B, e_features_B = self.prepare_feature_placeholder(N)
y = np.zeros(N, dtype='int64')
if return_id:
ids = [[], []]
for instance_idx in xrange(N):
good, bad = self.test_pairs[instance_idx]
if randomize_dir:
good = good[:-1]+random.choice(['f','r'])
bad = bad[:-1]+random.choice(['f','r'])
v_good, e_good = self.get_features(good)
v_bad, e_bad = self.get_features(bad)
label = random.random()>0.5
y[instance_idx] = label
if return_id:
if label:
ids[0].append(good)
ids[1].append(bad)
else:
ids[0].append(bad)
ids[1].append(good)
for v_idx in xrange(4):
for v_fea_idx in xrange(len(v_good[v_idx])):
if label:
v_features_A[v_idx][v_fea_idx][instance_idx] = v_good[v_idx][v_fea_idx]
v_features_B[v_idx][v_fea_idx][instance_idx] = v_bad[v_idx][v_fea_idx]
else:
v_features_B[v_idx][v_fea_idx][instance_idx] = v_good[v_idx][v_fea_idx]
v_features_A[v_idx][v_fea_idx][instance_idx] = v_bad[v_idx][v_fea_idx]
for e_idx in xrange(3):
for e_fea_idx in xrange(len(e_good[e_idx])):
if label:
e_features_A[e_idx][e_fea_idx][instance_idx] = e_good[e_idx][e_fea_idx]
e_features_B[e_idx][e_fea_idx][instance_idx] = e_bad[e_idx][e_fea_idx]
else:
e_features_B[e_idx][e_fea_idx][instance_idx] = e_good[e_idx][e_fea_idx]
e_features_A[e_idx][e_fea_idx][instance_idx] = e_bad[e_idx][e_fea_idx]
for features in [v_features_A, e_features_A, v_features_B, e_features_B]:
for feature in features:
for i in xrange(len(feature)):
feature[i] = Variable(torch.from_numpy(feature[i]))
if self.gpu:
feature[i] = feature[i].cuda()
y = Variable(torch.from_numpy(y))
if self.gpu:
y = y.cuda()
if not return_id:
return (v_features_A, e_features_A), (v_features_B, e_features_B), y
else:
return (v_features_A, e_features_A), (v_features_B, e_features_B), y, ids
def get_pairs_for_ids(self, ids):
'''
ids are list of (first_chain, second_chain) tuples
return a list of two lists, X_A and X_B
each list consists of two lists, which are vertex and edge representations
each list consists of #V or #E lists, which are individual vertices/edges
each list consists of several N x feature_len torch Variables, which are individual features
currently only keeping chains of length 4
'''
N = len(ids)
v_features_A, e_features_A = self.prepare_feature_placeholder(N)
v_features_B, e_features_B = self.prepare_feature_placeholder(N)
for instance_idx, (first, second) in enumerate(ids):
v_first, e_first = self.get_features(first)
v_second, e_second = self.get_features(second)
for v_idx in xrange(4):
for v_fea_idx in xrange(len(v_first[v_idx])):
v_features_A[v_idx][v_fea_idx][instance_idx] = v_first[v_idx][v_fea_idx]
v_features_B[v_idx][v_fea_idx][instance_idx] = v_second[v_idx][v_fea_idx]
for e_idx in xrange(3):
for e_fea_idx in xrange(len(e_first[e_idx])):
e_features_A[e_idx][e_fea_idx][instance_idx] = e_first[e_idx][e_fea_idx]
e_features_B[e_idx][e_fea_idx][instance_idx] = e_second[e_idx][e_fea_idx]
for features in [v_features_A, e_features_A, v_features_B, e_features_B]:
for feature in features:
for i in xrange(len(feature)):
feature[i] = Variable(torch.from_numpy(feature[i]))
if self.gpu:
feature[i] = feature[i].cuda()
return ((v_features_A, e_features_A), (v_features_B, e_features_B))
# if __name__ == '__main__':
# d = Dataset(['v_freq_freq', 'v_sense', 'e_source', 'e_dir', 'e_sense'], 0.9, False)
# good, bad = d.get_train_pairs(1000)
# v_good, e_good = good
# v_bad, e_bad = bad
# print len(v_good)
# for feature in e_good[0]:
# print feature
# print e_good
# print v_bad
# print e_bad
|
{"hexsha": "7bfa672c5f69325048135decce88820e4d40f2a5", "size": 10192, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/science/dataset.py", "max_stars_repo_name": "YilunZhou/path-naturalness-prediction", "max_stars_repo_head_hexsha": "dec384a58297e1cd88f44eb31771d0251e0b06d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-06-24T19:12:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T12:23:46.000Z", "max_issues_repo_path": "code/science/dataset.py", "max_issues_repo_name": "YilunZhou/path-naturalness-prediction", "max_issues_repo_head_hexsha": "dec384a58297e1cd88f44eb31771d0251e0b06d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/science/dataset.py", "max_forks_repo_name": "YilunZhou/path-naturalness-prediction", "max_forks_repo_head_hexsha": "dec384a58297e1cd88f44eb31771d0251e0b06d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6618705036, "max_line_length": 95, "alphanum_fraction": 0.6699372057, "include": true, "reason": "import numpy", "num_tokens": 2898}
|
import csv
import os
import re
from collections import defaultdict, OrderedDict
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from smartva.data.common_data import MALE, FEMALE, ADULT, CHILD, NEONATE
from smartva.grapher_prep import GrapherPrep
from smartva.loggers import status_logger, warning_logger
from smartva.utils import status_notifier
INPUT_FILENAME_TEMPLATE = '{:s}-predictions.csv'
OUTPUT_FILENAME_TEMPLATE = '{:s}-figure.png'
MODULE_LABELS = (ADULT, CHILD, NEONATE)
AGE_DATA = OrderedDict(
(
(80.0, '80+ years'),
(70.0, '70-79 years'),
(60.0, '60-69 years'),
(50.0, '50-59 years'),
(40.0, '40-49 years'),
(30.0, '30-39 years'),
(20.0, '20-29 years'),
(12.0, '12-19 years'),
(5.0, '5-11 years'),
(1.0, '1-4 years'),
(29 / 365.0, '29 days - 1 year'),
(0.0, '0-28 days')
)
)
GENDER_DATA = OrderedDict(
(
(MALE, 'male'),
(FEMALE, 'female'),
)
)
# default dict for cause of death graph
def get_default_dict():
"""Helper function to create a graph data default dict template.
:return: Graph data default dict template.
"""
default_dict = dict()
for gender in GENDER_DATA:
default_dict[gender] = OrderedDict.fromkeys(reversed(AGE_DATA.values()), 0)
return default_dict
def get_age_key(age_value):
"""Helper function to identify age group by age.
:param age_value: Age in years.
:return: String representation of age group.
"""
for k, v in AGE_DATA.items():
if age_value >= k:
return v
return 'Unknown'
# make and save cause graph
def make_graph(graph_data, cause_key, output_dir):
"""Generate and save a cause graph.
:param graph_data: Graph data dict.
:param cause_key: Name of the cause for which to generate graph.
:param output_dir: Directory in which to save graph.
"""
male_data = graph_data[MALE].values()
female_data = graph_data[FEMALE].values()
graph_title = cause_key.capitalize() + ' by age and sex'
graph_filename = re.sub('[^\w_\. ]', '-', cause_key.replace('(', '').replace(')', '')).replace(' ', '-').lower()
max_value = max(max(male_data), max(female_data))
xlocations = np.arange(len(AGE_DATA)) # the x locations for the groups
bar_width = 0.25 # the width of the bars
# Interactive mode off.
plt.ioff()
fig, ax = plt.subplots()
# Place male and female bars next to each other.
rects1 = ax.bar(xlocations, male_data, bar_width, color='#C44440', align='center')
rects2 = ax.bar(xlocations + bar_width, female_data, bar_width, color='#1D72AA', align='center')
ax.set_title(graph_title)
ax.set_ylabel('Number of VAs')
ax.yaxis.grid()
ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
ax.set_xticklabels(list(reversed(AGE_DATA.values())), rotation=90)
ax.set_xticks(xlocations + bar_width / 2)
# Push legend outside of the plot.
ax.legend((rects1[0], rects2[0]), GENDER_DATA.values(), loc='upper center', bbox_to_anchor=(0.5, -0.375), ncol=2)
# Add whitespace at top of bar.
ax.set_ylim(top=max_value + max_value * 0.1)
# Add whitespace before first bar and after last.
plt.xlim([min(xlocations) - .5, max(xlocations) + 1.0])
# Add some spacing for rotated xlabels.
plt.subplots_adjust(bottom=0.35)
# Save graph figure.
plt.savefig(os.path.join(output_dir, OUTPUT_FILENAME_TEMPLATE.format(graph_filename)), dpi=150)
# Clear the current figure.
plt.clf()
plt.close()
class CauseGrapher(GrapherPrep):
"""Generate and save a graph for each cause, and one for all causes."""
def _update_status(self):
status_logger.info('Making cause graphs')
status_notifier.update({'progress': 1})
def _read_graph_data(self):
graph_data = defaultdict(get_default_dict)
status_notifier.update({'sub_progress': (0, len(MODULE_LABELS))})
for cnt, module_key in enumerate(MODULE_LABELS):
status_notifier.update({'sub_progress': (cnt,)})
try:
with open(os.path.join(self.input_dir_path, INPUT_FILENAME_TEMPLATE.format(module_key)), 'rb') as f:
reader = csv.DictReader(f)
for row in reader:
self.check_abort()
try:
age_key = get_age_key(float(row['age']))
if age_key not in AGE_DATA.values():
raise ValueError('Unknown age group.')
sex_key = int(row['sex'])
if sex_key not in [1,2]:
raise ValueError('Cannot yet plot when sex is not M/F')
except ValueError as e:
# Age or sex is invalid. Log warning and skip this item.
warning_logger.warning('Cause Grapher :: SID {} value for age or sex is invalid.'
.format(row['sid'], e.message))
continue
graph_data[row['cause34']][sex_key][age_key] += 1
graph_data['All'][sex_key][age_key] += 1
except IOError:
# The file isn't there, there was no data or an error, so just skip it.
continue
return graph_data
def _make_graphs(self, graph_data):
# Make cause of death graphs.
status_notifier.update({'sub_progress': (0, len(graph_data))})
for cnt, (cause_key, data) in enumerate(graph_data.items()):
self.check_abort()
status_notifier.update({'sub_progress': (cnt,)})
make_graph(data, cause_key, self.output_dir_path)
status_notifier.update({'sub_progress': None})
|
{"hexsha": "ff2c13772d8dcae20508bb7ba01015c162ce7b8d", "size": 5972, "ext": "py", "lang": "Python", "max_stars_repo_path": "smartva/cause_grapher.py", "max_stars_repo_name": "rileyhazard/SmartVA-Analyze-1", "max_stars_repo_head_hexsha": "0573eeff27d03f54e7506db4f1631c0cd9f54bbb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-01-23T12:57:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-18T17:13:08.000Z", "max_issues_repo_path": "smartva/cause_grapher.py", "max_issues_repo_name": "rileyhazard/SmartVA-Analyze-1", "max_issues_repo_head_hexsha": "0573eeff27d03f54e7506db4f1631c0cd9f54bbb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-01-09T22:10:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-16T04:57:06.000Z", "max_forks_repo_path": "smartva/cause_grapher.py", "max_forks_repo_name": "rileyhazard/SmartVA-Analyze-1", "max_forks_repo_head_hexsha": "0573eeff27d03f54e7506db4f1631c0cd9f54bbb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2018-12-11T22:01:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-07T11:38:02.000Z", "avg_line_length": 33.3631284916, "max_line_length": 117, "alphanum_fraction": 0.6063295378, "include": true, "reason": "import numpy", "num_tokens": 1415}
|
from abc import abstractmethod
from typing import List
import numpy as np
from reinvent_chemistry.link_invent.linker_descriptors import LinkerDescriptors
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.score_components import BaseScoreComponent
from reinvent_scoring.scoring.score_summary import ComponentSummary
class BaseLinkInventComponent(BaseScoreComponent):
def __init__(self, parameters: ComponentParameters):
super().__init__(parameters)
self._linker_descriptor = LinkerDescriptors()
def calculate_score(self, labeled_molecules: List, step=-1) -> ComponentSummary:
score, raw_score = self._calculate_score(labeled_molecules)
score_summary = ComponentSummary(total_score=score, parameters=self.parameters, raw_score=raw_score)
return score_summary
def _calculate_score(self, query_labeled_mols) -> np.array:
scores = []
for mol in query_labeled_mols:
try:
score = self._calculate_linker_property(mol)
except ValueError:
score = 0.0
scores.append(score)
transform_params = self.parameters.specific_parameters.get(
self.component_specific_parameters.TRANSFORMATION, {}
)
transformed_scores = self._transformation_function(scores, transform_params)
return np.array(transformed_scores, dtype=np.float32), np.array(scores, dtype=np.float32)
@abstractmethod
def _calculate_linker_property(self, labeled_mol):
raise NotImplementedError("_calculate_linker_property method is not implemented")
|
{"hexsha": "b7616bdf8923f69af474b3d82a140925f9871d85", "size": 1654, "ext": "py", "lang": "Python", "max_stars_repo_path": "reinvent_scoring/scoring/score_components/link_invent/base_link_invent_component.py", "max_stars_repo_name": "MolecularAI/reinvent-scoring", "max_stars_repo_head_hexsha": "f7e052ceeffd29e17e1672c33607189873c82a45", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reinvent_scoring/scoring/score_components/link_invent/base_link_invent_component.py", "max_issues_repo_name": "MolecularAI/reinvent-scoring", "max_issues_repo_head_hexsha": "f7e052ceeffd29e17e1672c33607189873c82a45", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-01T23:19:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-22T23:41:39.000Z", "max_forks_repo_path": "reinvent_scoring/scoring/score_components/link_invent/base_link_invent_component.py", "max_forks_repo_name": "MolecularAI/reinvent-scoring", "max_forks_repo_head_hexsha": "f7e052ceeffd29e17e1672c33607189873c82a45", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-18T13:14:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T07:52:57.000Z", "avg_line_length": 43.5263157895, "max_line_length": 108, "alphanum_fraction": 0.7496977025, "include": true, "reason": "import numpy", "num_tokens": 326}
|
"""
Adapted from the swiss_roll.py example
packaged with Scikit-Learn.
"""
import matplotlib.pyplot as plt
import retina.core.axes
import retina.nldr as nldr
import numpy as np
from matplotlib import gridspec
from sklearn import manifold, datasets
class EventSystem(object):
def __init__(self, fig):
self.fig = fig
self.hover_sec = None
self.click_sec = None
self.fig.canvas.mpl_connect('motion_notify_event', self.mouse_over)
self.fig.canvas.mpl_connect('button_press_event', self.mouse_click)
def mouse_over(self, event):
ax = event.inaxes
try:
sec = nld.get_layer(ax.title._text)
except:
return
if sec is not self.hover_sec and self.hover_sec:
self.hover_sec.unbound()
sec.set_prop('alpha', 1)
sec.bound()
for ax in self.fig.get_axes():
try:
nonhover_sec = nld.get_layer(ax.title._text)
if nonhover_sec is not sec:
nonhover_sec.set_prop('alpha', 0.2)
except:
pass
self.hover_sec = sec
def mouse_click(self, event):
ax = event.inaxes
try:
sec = nld.get_layer(ax.title._text)
except:
return
if sec is not self.click_sec and self.click_sec:
nld.showcase(sec.name)
self.click_sec = sec
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
fig = plt.figure(figsize=(20, 20))
gs = gridspec.GridSpec(2, 3)
nld = plt.subplot(gs[0,0], projection='Fovea3D')
num_sections = 5
sections = nldr.mapping.ordered_section(X, num_sections, axis=0)
color = color[X[:, 0].argsort()]
colors = [color[i*len(color)/num_sections:(i+1)*len(color)/num_sections] for i in range(num_sections)]
for i, j, sec, clr in zip([0, 0, 1, 1, 1], range(num_sections), sections, colors):
swiss_sec = nld.add_layer('section ' + str(j))
swiss_sec.add_data(sec[:, 0], sec[:, 1], sec[:, 2])
nld.build_layer(swiss_sec.name, plot=nld.scatter, c=clr, cmap=plt.cm.Spectral)
ax = plt.subplot(gs[i, (j + 1) % 3], projection='Fovea2D')
X_r, err = manifold.locally_linear_embedding(sec, n_neighbors=50,
n_components=2)
proj = ax.add_layer('section ' + str(j) + ' proj')
proj.add_data(X_r[:, 0], X_r[:, 1])
ax.build_layer(proj.name, plot=ax.scatter, c=clr, cmap=plt.cm.Spectral)
ax.set_title('section ' + str(j))
handler = EventSystem(fig)
print("Hover over a projected data plot to see the corresponding segment in the original Swiss Roll.")
print("Click on a subplot to see the corresponding segment of the original Swiss Roll be showcased.")
nld.set_title("Original data")
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.show()
|
{"hexsha": "b27ec238bfcc54f45894e75fc8a55b694e8f321a", "size": 2943, "ext": "py", "lang": "Python", "max_stars_repo_path": "demos/nldr/swiss_roll.py", "max_stars_repo_name": "mcneela/Retina", "max_stars_repo_head_hexsha": "a2a671f6372848ac3bb3b304e681394cc6d90e85", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-08-01T03:59:58.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-02T20:11:56.000Z", "max_issues_repo_path": "demos/nldr/swiss_roll.py", "max_issues_repo_name": "mcneela/Retina", "max_issues_repo_head_hexsha": "a2a671f6372848ac3bb3b304e681394cc6d90e85", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2016-06-17T16:27:56.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-28T19:28:50.000Z", "max_forks_repo_path": "demos/nldr/swiss_roll.py", "max_forks_repo_name": "mcneela/Retina", "max_forks_repo_head_hexsha": "a2a671f6372848ac3bb3b304e681394cc6d90e85", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-06-22T02:06:59.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-19T19:37:58.000Z", "avg_line_length": 35.8902439024, "max_line_length": 102, "alphanum_fraction": 0.6313285763, "include": true, "reason": "import numpy", "num_tokens": 744}
|
import random as rnd
import networkx
class GraphGenerator:
"""
Basic graph generator class.
build(size) is to be override by inheritors, called by TheGame class at the initiation
There are lots of possible network configuration. The following is implemented:
- GraphGeneratorSync.py
- GraphGeneratorAsync.py
"""
def __init__(self):
self.size = 0
self.graph = list()
pass
def build(self, size):
self.size = size
pass
def print(self):
for k in range(self.size):
print([int(j in self.graph[k]) for j in range(self.size)])
def check_connectivity(self, a, b):
if a == b:
return True
front_wave = {a}
while True:
new_wave = set()
for element in front_wave:
new_wave |= set(self.graph[element])
if len(new_wave - front_wave) == 0:
return False
if b in new_wave:
return True
front_wave |= new_wave
def check_total_connectivity(self):
for a in range(self.size):
for b in range(0, self.size):
if not self.check_connectivity(a, b):
return False
return True
def export_networkx(self):
exp = networkx.Graph()
exp.add_nodes_from(list(range(self.size)))
for k in range(self.size):
for j in self.graph[k]:
exp.add_edges_from([(k,j),(j,k)])
return exp
def dropout(self, goal_number_of_breaks, max_retries=-1):
if max_retries == -1:
max_retries = max(1000, goal_number_of_breaks*2)
print("dropout...")
retry_counter = 0
n_breaks = 0
while goal_number_of_breaks > 0 and retry_counter != max_retries:
node_a = rnd.randint(0, self.size-1)
links_from_a = self.graph[node_a]
if len(links_from_a) <= 1:
continue
node_b = rnd.choice(list(links_from_a))
if len(self.graph[node_b]) <= 1:
continue
self.graph[node_a] -= {node_b, }
self.graph[node_b] -= {node_a, }
if not self.check_connectivity(node_a, node_b):
self.graph[node_a] |= {node_b, }
self.graph[node_b] |= {node_a, }
retry_counter += 1
continue
#retry_counter = 0
goal_number_of_breaks -= 1
n_breaks += 1
print("end dropout")
return n_breaks
def random_connected_node(self, node_a):
return rnd.choice(list(self.graph[node_a]))
|
{"hexsha": "a32923fd249e2a86430729cc85f4dc96ce65c8a9", "size": 2666, "ext": "py", "lang": "Python", "max_stars_repo_path": "Graph/GraphGenerator.py", "max_stars_repo_name": "wolf-null/resource-network-sim", "max_stars_repo_head_hexsha": "45662a84b03156047ac9441c0e1c8c0b57b6cefe", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Graph/GraphGenerator.py", "max_issues_repo_name": "wolf-null/resource-network-sim", "max_issues_repo_head_hexsha": "45662a84b03156047ac9441c0e1c8c0b57b6cefe", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Graph/GraphGenerator.py", "max_forks_repo_name": "wolf-null/resource-network-sim", "max_forks_repo_head_hexsha": "45662a84b03156047ac9441c0e1c8c0b57b6cefe", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9550561798, "max_line_length": 90, "alphanum_fraction": 0.5498874719, "include": true, "reason": "import networkx", "num_tokens": 608}
|
"""
SkyLib astrometric reduction package
Built around the local Astrometry.net engine binding. Users must create an
Astrometry.net solver using :func:`create_solver`, which loads indexes, and
then use :func:`solve_field` to obtain an :class:`astropy.wcs.WCS` instance
given a list of XY positions of field stars.
"""
from __future__ import absolute_import, division, print_function
import os
from glob import glob
import ctypes
import numpy
from astropy.wcs import Sip, WCS
from . import an_engine
__all__ = ['Solver', 'solve_field']
class Solver(object):
"""
Class that encapsulates the :class:`skylib.astrometry.an_engine.solver_t`
object and the list of indexes. An instance is created in each solver thread
and is supplied to :func:`solve_field`.
Attributes::
solver: Astrometry.net engine :class:`an_engine.solver_t` object
indexes: list of :class:`an_engine.index_t` instances
"""
def __init__(self, index_path):
"""
Create solver
:param str | list index_path: directory or list of directories
containing index files
"""
if isinstance(index_path, str):
index_path = [index_path]
self.solver = an_engine.solver_new()
self.indexes = []
for path in index_path:
for fn in glob(os.path.join(path, '*')):
# noinspection PyBroadException
try:
idx = an_engine.index_load(fn, 0, None)
if idx is not None:
self.indexes.append(idx)
except Exception:
pass
if not self.indexes:
raise ValueError('No indexes found')
# Sort indexes by the number of quads (try smaller indexes first -
# should be faster)
self.indexes.sort(key=lambda _idx: _idx.nquads)
class Solution(object):
"""
Class that encapsulates the results of astrometric reduction, including WCS
and some solution statistics
Attributes::
wcs: :class:`astropy.wcs.WCS` containing the World Coordinate System
info for solution; None if solution was not found
log_odds: logodds of best match
n_match: number of matched sources
n_conflict: number of conflicts
n_field: total number of sources
index_name: index file name that solved the image
"""
wcs = None
log_odds = None
n_match = None
n_conflict = None
n_field = None
index_name = None
def array_from_swig(data, shape, dtype=numpy.float64):
a = numpy.empty(shape, dtype)
ctypes.memmove(a.ctypes, int(data), a.nbytes)
return a
def solve_field(engine, xy, flux=None, width=None, height=None, ra_hours=0,
dec_degs=0, radius=180, min_scale=0.1, max_scale=10,
parity=None, sip_order=3, crpix_center=True, max_sources=None,
retry_lost=True, callback=None):
"""
Obtain astrometric solution given XY coordinates of field stars
:param :class:`Solver` engine: Astrometry.net engine solver instance
:param array_like xy: (n x 2) array of 0-based X and Y pixel coordinates
of stars
:param array_like flux: optional n-element array of star fluxes
:param int width: image width in pixels; defaults to the maximum minus
minimum X coordinate of stars
:param int height: image height in pixels; defaults to the maximum minus
minimum Y coordinate of stars
:param float ra_hours: optional RA of image center in hours; default: 0
:param float dec_degs: optional Dec of image center in degrees; default: 0
:param float radius: optional field search radius in degrees; default: 180
(search over the whole sky)
:param float min_scale: optional minimum pixel scale in arcseconds per
pixel; default: 0.1
:param float max_scale: optional maximum pixel scale in arcseconds per
pixel; default: 10
:param bool | None parity: image parity (sign of coordinate transformation
matrix determinant): True = normal parity, False = flipped image, None
(default) = try both
:param int sip_order: order of SIP distortion terms; default: 3; 0 - disable
calculation of distortion
:param bool crpix_center: set reference pixel to image center
:param int max_sources: use only the given number of brightest sources;
0/""/None (default) = no limit
:param bool retry_lost: if solution failed, retry in the "lost in space"
mode, i.e. without coordinate restrictions (`radius` = 180) and with
opposite parity, unless the initial search already had these
restrictions disabled
:param callable callback: optional callable that is regularly called
by the solver, accepts no arguments, and returns 0 to interrupt
the solution and 1 otherwise
:return: astrometric solution object; its `wcs` attribute is set to None if
solution was not found
:rtype: :class:`Solution`
"""
solver = engine.solver
ra = float(ra_hours)*15
dec = float(dec_degs)
r = float(radius)
# Set timer callback if requested
if callback is not None:
an_engine.set_timer_callback(
solver,
ctypes.cast(
ctypes.CFUNCTYPE(ctypes.c_int)(callback),
ctypes.c_voidp).value)
else:
an_engine.set_timer_callback(solver, 0)
# Set field star position array
n = len(xy)
xy = numpy.asanyarray(xy)
field = an_engine.starxy_new(n, flux is not None, False)
if flux is not None:
flux = numpy.asanyarray(flux)
if len(flux) != n:
raise ValueError(
'Flux array must be of the same length as XY array')
if max_sources:
order = numpy.argsort(flux)[::-1]
xy, flux = xy[order], flux[order]
del order
an_engine.starxy_set_flux_array(field, flux)
an_engine.starxy_set_xy_array(field, xy.ravel())
an_engine.solver_set_field(solver, field)
try:
# Initialize solver parameters
if width:
minx, maxx = 0, int(width) - 1
else:
minx, maxx = xy[:, 0].min(), xy[:, 0].max()
if height:
miny, maxy = 0, int(height) - 1
else:
miny, maxy = xy[:, 1].min(), xy[:, 1].max()
an_engine.solver_set_field_bounds(solver, minx, maxx, miny, maxy)
solver.quadsize_min = 0.1*min(maxx - minx + 1, maxy - miny + 1)
if crpix_center != '':
solver.set_crpix = solver.set_crpix_center = int(crpix_center)
an_engine.solver_set_radec(solver, ra, dec, r)
solver.funits_lower = float(min_scale)
solver.funits_upper = float(max_scale)
solver.logratio_tokeep = numpy.log(1e12)
solver.distance_from_quad_bonus = True
if parity is None or parity == '':
solver.parity = an_engine.PARITY_BOTH
elif int(parity):
solver.parity = an_engine.PARITY_NORMAL
else:
solver.parity = an_engine.PARITY_FLIP
enable_sip = sip_order and int(sip_order) >= 2
if enable_sip:
solver.do_tweak = True
solver.tweak_aborder = int(sip_order)
solver.tweak_abporder = int(sip_order) + 1
else:
solver.do_tweak = False
if max_sources:
solver.endobj = max_sources
else:
solver.endobj = 0
# Find indexes needed to solve the field
fmin = solver.quadsize_min*min_scale
fmax = numpy.hypot(width, height)*max_scale
indices = []
for index in engine.indexes:
if fmin > index.index_scale_upper or fmax < index.index_scale_lower:
continue
if not an_engine.index_is_within_range(index, ra, dec, r):
continue
indices.append(index)
if not len(indices):
raise ValueError(
'No indexes found for the given scale and position')
# Sort indices by scale (larger scales/smaller indices first - should
# be faster) then by distance from expected position
indices.sort(
key=lambda _idx: (
-_idx.index_scale_upper,
an_engine.healpix_distance_to_radec(
_idx.healpix, _idx.hpnside, ra, dec)[0]
if _idx.healpix >= 0 else 0,
))
an_engine.solver_clear_indexes(solver)
for index in indices:
an_engine.solver_add_index(solver, index)
# Run the solver
an_engine.solver_run(solver)
sol = Solution()
if solver.have_best_match:
best_match = solver.best_match
sol.log_odds = solver.best_logodds
sol.n_match = best_match.nmatch
sol.n_conflict = best_match.nconflict
sol.n_field = best_match.nfield
if best_match.index is not None:
sol.index_name = best_match.index.indexname
else:
best_match = None
if solver.best_match_solves:
# Get WCS parameters of best solution
sol.wcs = WCS(naxis=2)
if enable_sip:
sip = best_match.sip
wcstan = sip.wcstan
sol.wcs.wcs.ctype = ('RA---TAN-SIP', 'DEC--TAN-SIP')
a_order, b_order = sip.a_order, sip.b_order
ap_order, bp_order = sip.ap_order, sip.bp_order
a = array_from_swig(
sip.a, (an_engine.SIP_MAXORDER, an_engine.SIP_MAXORDER))
b = array_from_swig(
sip.b, (an_engine.SIP_MAXORDER, an_engine.SIP_MAXORDER))
ap = array_from_swig(
sip.ap, (an_engine.SIP_MAXORDER, an_engine.SIP_MAXORDER))
bp = array_from_swig(
sip.bp, (an_engine.SIP_MAXORDER, an_engine.SIP_MAXORDER))
sol.wcs.sip = Sip(
a[:a_order + 1, :a_order + 1],
b[:b_order + 1, :b_order + 1],
ap[:ap_order + 1, :ap_order + 1],
bp[:bp_order + 1, :bp_order + 1],
sol.wcs.wcs.crpix)
else:
wcstan = best_match.wcstan
sol.wcs.wcs.ctype = ('RA---TAN', 'DEC--TAN')
sol.wcs.wcs.crpix = array_from_swig(wcstan.crpix, (2,))
sol.wcs.wcs.crval = array_from_swig(wcstan.crval, (2,))
sol.wcs.wcs.cd = array_from_swig(wcstan.cd, (2, 2))
elif retry_lost and (radius < 180 or parity is not None):
# When no solution was found, retry with all constraints relaxed
an_engine.solver_cleanup_field(solver)
return solve_field(
engine, xy, flux, width, height, 0, 0, 180, min_scale,
max_scale, None, sip_order, crpix_center, max_sources,
retry_lost=False)
return sol
finally:
# Make solver ready for the next solution
an_engine.solver_cleanup_field(solver)
|
{"hexsha": "3c42ad3270452d842e167e3f86d9b16312d8b99a", "size": 11125, "ext": "py", "lang": "Python", "max_stars_repo_path": "skylib/astrometry/main.py", "max_stars_repo_name": "SkynetRTN/skylib", "max_stars_repo_head_hexsha": "58fe57053db6a048f8a72d7b453ae411a2302545", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-17T19:59:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T19:59:14.000Z", "max_issues_repo_path": "skylib/astrometry/main.py", "max_issues_repo_name": "SkynetRTN/skylib", "max_issues_repo_head_hexsha": "58fe57053db6a048f8a72d7b453ae411a2302545", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skylib/astrometry/main.py", "max_forks_repo_name": "SkynetRTN/skylib", "max_forks_repo_head_hexsha": "58fe57053db6a048f8a72d7b453ae411a2302545", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3322147651, "max_line_length": 80, "alphanum_fraction": 0.611505618, "include": true, "reason": "import numpy,from astropy", "num_tokens": 2664}
|
(**************************************************************************)
(* This is part of STATES, it is distributed under the terms of the *)
(* GNU Lesser General Public License version 3 *)
(* (see file LICENSE for more details) *)
(* *)
(* Copyright 2015: Jean-Guillaume Dumas, Dominique Duval *)
(* Burak Ekici, Damien Pous. *)
(**************************************************************************)
Require Import Relations Morphisms.
Require Import Program.
Require Memory Terms.
Set Implicit Arguments.
Require Import ZArith.
Open Scope Z_scope.
Require Import Bool.
Module Make(Import M: Memory.T).
Module Export DecorationsExp := Terms.Make(M).
Inductive kind := pure | ro | rw.
Inductive ekind := epure | ppg | ctc.
Inductive is : ((kind * ekind)%type) -> forall X Y, term X Y -> Prop :=
| is_tpure : forall X Y (f: X -> Y), is (pure, epure) (@tpure X Y f)
| is_comp : forall k X Y Z (f: term X Y) (g: term Y Z), is k f -> is k g -> is k (f o g)
| is_pair : forall k1 k2 X Y Z (f: term X Z) (g: term Y Z), is (ro, k2) f -> is (k1, k2) f -> is (k1, k2) g -> is (k1, k2) (pair f g) (* FIXED *)
| is_copair : forall k1 k2 X Y Z (f: term Z X) (g: term Z Y), is (k1, ppg) f -> is (k1, k2) f -> is (k1, k2) g -> is (k1, k2) (copair f g) (* FIXED *)
| is_downcast : forall X Y (f: term X Y), is (pure, ppg) (@downcast X Y f)
| is_lookup : forall i, is (ro, epure) (lookup i)
| is_update : forall i, is (rw, epure) (update i)
| is_tag : forall e, is (pure, ppg) (tag e)
| is_untag : forall e, is (pure, ctc) (untag e)
| is_pure_ro : forall X Y k (f: term X Y), is (pure, k) f -> is (ro, k) f
| is_ro_rw : forall X Y k (f: term X Y), is (ro, k) f -> is (rw, k) f
| is_pure_ppg : forall X Y k (f: term X Y), is (k, epure) f -> is (k, ppg) f
| is_ppg_ctc : forall X Y k (f: term X Y), is (k, ppg) f -> is (k, ctc) f.
Hint Constructors is.
Ltac decorate := solve[
repeat (apply is_comp || apply is_pair || apply is_copair)
||
(apply is_tpure || apply is_lookup || apply is_update ||
apply is_downcast || apply is_tag || apply is_untag || assumption)
||
(apply is_pure_ro)
||
(apply is_ro_rw)
||
(apply is_pure_ppg)
||
(apply is_ppg_ctc)
].
Ltac edecorate := solve[
repeat (apply is_comp || apply is_pair || apply is_copair)
||
(apply is_tpure || apply is_lookup || apply is_update ||
apply is_downcast || apply is_tag || apply is_untag || assumption)
||
(apply is_pure_ppg)
||
(apply is_ppg_ctc)
||
(apply is_pure_ro)
||
(apply is_ro_rw)
].
Definition dmax (k1 k2: kind): kind :=
match k1, k2 with
| pure, pure => pure
| pure, ro => ro
| pure, rw => rw
| ro, pure => ro
| rw, pure => rw
| ro, ro => ro
| ro, rw => rw
| rw, ro => rw
| rw, rw => rw
end.
Definition edmax (k1 k2: ekind): ekind :=
match k1, k2 with
| epure, epure => epure
| epure, ppg => ppg
| epure, ctc => ctc
| ppg, epure => ppg
| ctc, epure => ctc
| ppg, ppg => ppg
| ppg, ctc => ctc
| ctc, ppg => ctc
| ctc, ctc => ctc
end.
Lemma _is_comp: forall k1 k2 k3 k4 X Y Z (f: term X Y) (g: term Y Z), is (k1, k3) f -> is (k2, k4) g -> is ((dmax k1 k2), (edmax k3 k4)) (f o g).
Proof. intros.
case_eq k1; case_eq k2; case_eq k3; case_eq k4; cbn; intros; subst; try edecorate; try decorate;
apply is_comp; try (decorate || edecorate); apply is_ro_rw; apply is_ppg_ctc; easy.
Qed.
Lemma _is_pair: forall k1 k2 k3 k4 X Y Z (f: term X Z) (g: term Y Z),
is (ro, k4) f -> is (k1, k3) f -> is (k2, k4) g -> is ((dmax k1 k2), (edmax k3 k4)) (pair f g).
Proof. intros.
case_eq k1; case_eq k2; case_eq k3; case_eq k4; cbn; intros; subst; try edecorate; try decorate;
apply is_pair; try (decorate || edecorate); apply is_ro_rw; apply is_ppg_ctc; easy.
Qed.
Lemma _is_copair: forall k1 k2 k3 k4 X Y Z (f: term Z X) (g: term Z Y),
is (k1, ppg) f -> is (k1, k3) f -> is (k2, k4) g -> is ((dmax k1 k2), (edmax k3 k4)) (copair f g).
Proof. intros.
case_eq k1; case_eq k2; case_eq k3; case_eq k4; cbn; intros; subst; try edecorate; try decorate;
apply is_copair; try (decorate || edecorate); apply is_ro_rw; apply is_ppg_ctc; easy.
Qed.
Class PURE {A B: Type} (k: ekind) (f: term A B) := ispr : is (pure, k) f.
Hint Extern 0 (PURE _) => decorate : typeclass_instances.
Class RO {A B: Type} (k: ekind) (f: term A B) := isro : is (ro, k) f.
Hint Extern 0 (RO _) => decorate : typeclass_instances.
Class RW {A B: Type} (k: ekind) (f: term A B) := isrw : is (rw, k) f.
Hint Extern 0 (RW _) => decorate : typeclass_instances.
Class EPURE {A B: Type} (k: kind) (f: term A B) := isepr : is (k, epure) f.
Hint Extern 0 (EPURE _) => edecorate : typeclass_instances.
Class PPG {A B: Type} (k: kind) (f: term A B) := isthrw : is (k, ppg) f.
Hint Extern 0 (PPG _) => edecorate : typeclass_instances.
Class CTC {A B: Type} (k: kind) (f: term A B) := isctch : is (k, ctc) f.
Hint Extern 0 (CTC _) => edecorate : typeclass_instances.
End Make.
|
{"author": "ekiciburak", "repo": "impex-on-decorated-logic", "sha": "cdfd22e36e6e0c4b001d23f0cf30c73a2c6867bd", "save_path": "github-repos/coq/ekiciburak-impex-on-decorated-logic", "path": "github-repos/coq/ekiciburak-impex-on-decorated-logic/impex-on-decorated-logic-cdfd22e36e6e0c4b001d23f0cf30c73a2c6867bd/Decorations.v"}
|
#include "crab_llvm/config.h"
/**
* Heap abstraction based on sea-dsa (https://github.com/seahorn/sea-dsa).
*/
#include "llvm/IR/Module.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/Support/raw_ostream.h"
#include "sea_dsa/Graph.hh"
#include "sea_dsa/Global.hh"
#include "crab_llvm/SeaDsaHeapAbstraction.hh"
#include "crab/common/debug.hpp"
#include <set>
#include <boost/unordered_map.hpp>
#include <boost/range/iterator_range.hpp>
#include "boost/range/algorithm/set_algorithm.hpp"
namespace crab_llvm {
using namespace llvm;
using namespace sea_dsa;
namespace seadsa_heap_abs_impl {
template <typename Set>
void set_difference(Set &s1, Set &s2) {
Set s3;
boost::set_difference(s1, s2, std::inserter(s3, s3.end()));
std::swap(s3, s1);
}
template <typename Set>
void set_union(Set &s1, Set &s2) {
Set s3;
boost::set_union(s1, s2, std::inserter(s3, s3.end()));
std::swap(s3, s1);
}
struct isInteger: std::unary_function<const llvm::Type*, bool> {
unsigned m_bitwidth;
isInteger(): m_bitwidth(0) {}
bool operator()(const llvm::Type* t) {
bool is_int = (t->isIntegerTy() && !t->isIntegerTy(1));
if (is_int) {
// XXX: We use bitwidth for overflow purposes so taking the
// minimum is the most conservative choice.
m_bitwidth = (m_bitwidth == 0 ? t->getIntegerBitWidth() :
std::min(m_bitwidth, t->getIntegerBitWidth()));
}
return is_int;
}
};
struct isBool: std::unary_function<const llvm::Type*, bool> {
bool operator()(const llvm::Type* t) const {
return t->isIntegerTy(1);
}
};
struct isIntegerOrBool: std::unary_function<const llvm::Type*, bool> {
bool operator()(const llvm::Type* t) const {
return t->isIntegerTy();
}
};
template <typename Set>
void markReachableNodes (const Node *n, Set &set) {
if (!n) return;
assert (!n->isForwarding () && "Cannot mark a forwarded node");
if (set.insert (n).second)
for (auto const &edg : n->links ())
markReachableNodes (edg.second->getNode (), set);
}
template <typename Set>
void reachableNodes (const Function &fn, Graph &g, Set &inputReach, Set& retReach) {
// formal parameters
for (Function::const_arg_iterator I = fn.arg_begin(), E = fn.arg_end(); I != E; ++I) {
const Value &arg = *I;
if (g.hasCell (arg)) {
Cell &c = g.mkCell (arg, Cell ());
markReachableNodes (c.getNode (), inputReach);
}
}
// globals
for (auto &kv : boost::make_iterator_range (g.globals_begin (),
g.globals_end ())) {
markReachableNodes (kv.second->getNode (), inputReach);
}
// return value
if (g.hasRetCell (fn)) {
markReachableNodes (g.getRetCell (fn).getNode(), retReach);
}
}
/// Computes Node reachable from the call arguments in the graph.
/// reach - all reachable nodes
/// outReach - subset of reach that is only reachable from the return node
template <typename Set1, typename Set2>
void argReachableNodes(const llvm::Function&fn, Graph &G,
Set1 &reach, Set2 &outReach) {
reachableNodes (fn, G, reach, outReach);
seadsa_heap_abs_impl::set_difference (outReach, reach);
seadsa_heap_abs_impl::set_union (reach, outReach);
}
} // end namespace seadsa_heap_abs_impl
// return a value if the node corresponds to a typed single-cell
// global memory cell, or nullptr otherwise.
template<typename Pred>
static const llvm::Value* getTypedSingleton(const Node* n, Pred& is_typed) {
if (!n) return nullptr;
if (const llvm::Value* v = n->getUniqueScalar()) {
if (const llvm::GlobalVariable *gv =
llvm::dyn_cast<const llvm::GlobalVariable>(v)) {
if (is_typed(gv->getType()->getElementType()))
return v;
}
}
return nullptr;
}
// return true if the cell (n,o) contains a value of a specified
// type by is_typed
template<typename Pred>
static bool isTypedCell(const Node* n, unsigned o, Pred& is_typed) {
if (!n) {
return false;
}
if (n->hasAccessedType(o)) {
for (const llvm::Type* t: n->getAccessedType(o)){
if (!is_typed(t)) {
return false;
}
}
}
return true;
}
// return true if the cell (n,o) points to an array of elements of
// some type specified by is_typed.
template<typename Pred>
static bool isTypedArrayCell(const Node* n, unsigned o, Pred& is_typed) {
if (!n) {
return false;
}
// sea-dsa only allows arrays at offset 0, otherwise it collapses
// the node.
if (!n->isArray() || o != 0)
return false;
if (n->hasAccessedType(o)) {
for (const llvm::Type* t: n->getAccessedType(o)) {
if (!is_typed(t)) {
return false;
}
}
}
return true;
}
// canBeDisambiguated succeeds if returned valued != UNTYPED_REGION
static region_info canBeDisambiguated(const Cell& c,
bool disambiguate_unknown,
bool disambiguate_ptr_cast,
bool disambiguate_external) {
if (c.isNull()) {
return region_info(UNTYPED_REGION, 0);
}
const Node* n = c.getNode();
unsigned offset = c.getOffset();
CRAB_LOG("heap-abs",
llvm::errs () << "\t*** Checking whether node at offset " << offset
<< " can be disambiguated ... \n"
<< *n << "\n";);
if (n->isCollapsed()) {
CRAB_LOG("heap-abs",
llvm::errs() << "\tCannot be disambiguated: node is already collapsed.\n";);
return region_info(UNTYPED_REGION, 0);
}
// if (n->isUnknown()) {
// if (!disambiguate_unknown) {
// CRAB_LOG("heap-abs",
// llvm::errs() << "\tCannot be disambiguated: node is unknown.\n";);
// return region_info(UNTYPED_REGION, 0);
// }
// }
// if (n->isIncomplete()) {
// if (!disambiguate_external) {
// CRAB_LOG("heap-abs",
// llvm::errs() << "\tCannot be disambiguated: node is incomplete.\n";);
// return region_info(UNTYPED_REGION, 0);
// }
// }
if (n->isIntToPtr() || n->isPtrToInt()) {
if (!disambiguate_ptr_cast) {
CRAB_LOG("heap-abs",
llvm::errs() << "\tCannot be disambiguated: node is casted "
<< "from/to an integer.\n";);
return region_info(UNTYPED_REGION, 0);
}
}
if (n->isExternal()) {
if (!disambiguate_external) {
CRAB_LOG("heap-abs",
llvm::errs() << "\tCannot be disambiguated: node is external.\n";);
return region_info(UNTYPED_REGION, 0);
}
}
seadsa_heap_abs_impl::isInteger int_pred;
if (isTypedCell(n, offset, int_pred) || isTypedArrayCell(n, offset, int_pred)) {
CRAB_LOG("heap-abs", llvm::errs() << "\tDisambiguation succeed!\n";);
return region_info(INT_REGION, int_pred.m_bitwidth);
}
seadsa_heap_abs_impl::isBool bool_pred;
if (isTypedCell(n, offset, bool_pred) || isTypedArrayCell(n, offset, bool_pred)) {
CRAB_LOG("heap-abs", llvm::errs() << "\tDisambiguation succeed!\n";);
return region_info(BOOL_REGION, 1);
}
// TODO: modify here to consider cells containing pointers.
CRAB_LOG("heap-abs",
llvm::errs() << "\tCannot be disambiguated: do not contain integer.\n";);
return region_info(UNTYPED_REGION, 0);
}
///////
// class methods
///////
int SeaDsaHeapAbstraction::getId(const Cell& c) {
const Node* n = c.getNode();
unsigned offset = c.getOffset();
auto it = m_node_ids.find(n);
if (it != m_node_ids.end()) {
return it->second + offset;
}
unsigned id = m_max_id;
m_node_ids[n] = id;
// XXX: we only have the reverse map for the offset 0. That's
// fine because we use this map only in getSingleton which can
// only succeed if offset 0.
m_rev_node_ids[id] = n;
if (n->size() == 0) {
// XXX: nodes can have zero size
assert (offset == 0);
m_max_id++;
return id;
}
// -- allocate enough ids for every byte of the object
assert (n->size() > 0);
m_max_id += n->size();
return id + offset;
}
// compute and cache the set of read, mod and new nodes of a whole
// function such that mod nodes are a subset of the read nodes and
// the new nodes are disjoint from mod nodes.
void SeaDsaHeapAbstraction::cacheReadModNewNodes(const llvm::Function& f) {
if (!m_dsa || !(m_dsa->hasGraph(f))) {
return;
}
Graph &G = m_dsa->getGraph(f);
// hook: skip shadow mem functions created by SeaHorn
// We treat them as readnone functions
if (f.getName().startswith("shadow.mem")) return;
std::set<const Node*> reach, retReach;
seadsa_heap_abs_impl::argReachableNodes(f, G, reach, retReach);
region_set_t reads, mods, news;
for (const Node* n : reach) {
if (!n->isRead() && !n->isModified()) {
continue;
}
// Iterate over all cells of the node and extract regions from there
for (auto &kv: n->types()) {
Cell c(const_cast<Node*>(n), kv.first);
region_info r_info = canBeDisambiguated(c,
m_disambiguate_unknown,
m_disambiguate_ptr_cast,
m_disambiguate_external);
if (r_info.get_type() != UNTYPED_REGION) {
int id = getId(c);
if ((n->isRead() || n->isModified()) && !retReach.count(n)) {
reads.insert(region_t(static_cast<HeapAbstraction*>(this), id, r_info));
}
if (n->isModified() && !retReach.count(n)) {
mods.insert(region_t(static_cast<HeapAbstraction*>(this), id, r_info));
}
if (n->isModified() && retReach.count(n)) {
news.insert(region_t(static_cast<HeapAbstraction*>(this), id, r_info));
}
}
}
}
m_func_accessed[&f] = reads;
m_func_mods[&f] = mods;
m_func_news[&f] = news;
}
// Compute and cache the set of read, mod and new nodes of a
// callsite such that mod nodes are a subset of the read nodes and
// the new nodes are disjoint from mod nodes.
void SeaDsaHeapAbstraction::cacheReadModNewNodesFromCallSite(llvm::CallInst& I) {
if (!m_dsa)
return;
/// ignore inline assembly
if (I.isInlineAsm())
return;
ImmutableCallSite ICS(&I);
DsaCallSite CS(ICS);
if (!CS.getCallee())
return;
// hook: skip shadow mem functions created by SeaHorn
// We treat them as readnone functions
if (CS.getCallee()->getName().startswith("shadow.mem"))
return;
const Function &CalleeF = *CS.getCallee();
const Function &CallerF = *CS.getCaller();
if (!m_dsa->hasGraph(CalleeF))
return;
if (!m_dsa->hasGraph(CallerF))
return;
Graph &callerG = m_dsa->getGraph(CallerF);
Graph &calleeG = m_dsa->getGraph(CalleeF);
// -- compute callee nodes reachable from arguments and returns
std::set<const Node*> reach;
std::set<const Node*> retReach;
seadsa_heap_abs_impl::argReachableNodes (CalleeF, calleeG, reach, retReach);
// -- compute mapping between callee and caller graphs
SimulationMapper simMap;
Graph::computeCalleeCallerMapping (CS, calleeG, callerG, simMap);
region_set_t reads, mods, news;
for (const Node* n : reach) {
if (!n->isRead() && !n->isModified())
continue;
// Iterate over all cells of the node and extract regions
for (auto &kv: n->types()) {
Cell c(const_cast<Node*>(n), kv.first);
region_info r_info = canBeDisambiguated(c,
m_disambiguate_unknown,
m_disambiguate_ptr_cast,
m_disambiguate_external);
if (r_info.get_type() != UNTYPED_REGION) {
// Map the callee node to the node in the caller's callsite
Cell callerC = simMap.get(c);
if (callerC.isNull()) {
continue;
}
int id = getId(callerC);
if ((n->isRead() || n->isModified()) && !retReach.count(n)) {
reads.insert(region_t(static_cast<HeapAbstraction*>(this), id, r_info));
}
if (n->isModified() && !retReach.count(n)) {
mods.insert(region_t(static_cast<HeapAbstraction*>(this), id, r_info));
}
if (n->isModified() && retReach.count(n)) {
news.insert(region_t(static_cast<HeapAbstraction*>(this), id, r_info));
}
}
}
}
// -- add the region of the lhs of the call site
region_t ret = getRegion(*(I.getParent()->getParent()), &I);
if (!ret.isUnknown()) mods.insert(ret);
m_callsite_accessed [&I] = reads;
m_callsite_mods [&I] = mods;
m_callsite_news [&I] = news;
}
SeaDsaHeapAbstraction::SeaDsaHeapAbstraction(llvm::Module& M,
sea_dsa::GlobalAnalysis* dsa,
bool disambiguate_unknown,
bool disambiguate_ptr_cast,
bool disambiguate_external)
: m_M(M), m_dsa(dsa), m_max_id(0),
m_disambiguate_unknown(disambiguate_unknown),
m_disambiguate_ptr_cast(disambiguate_ptr_cast),
m_disambiguate_external(disambiguate_external) {
// --- Pre-compute all the information per function and
// callsites
CRAB_LOG("heap-abs",
llvm::errs() << "========= HeapAbstraction using sea-dsa =========\n");
CRAB_VERBOSE_IF(3,
for (auto& F: M) {
if (m_dsa->hasGraph(F)) {
auto& G = m_dsa->getGraph(F);
G.write(errs());
errs() << "\n";
}
});
for (auto &F: boost::make_iterator_range(m_M)) {
cacheReadModNewNodes(F);
llvm::inst_iterator InstIt = inst_begin(F), InstItEnd = inst_end(F);
for (; InstIt != InstItEnd; ++InstIt) {
if (llvm::CallInst *Call = llvm::dyn_cast<llvm::CallInst>(&*InstIt)) {
cacheReadModNewNodesFromCallSite(*Call);
}
}
}
}
// f is used to know in which Graph we should search for V
SeaDsaHeapAbstraction::region_t
SeaDsaHeapAbstraction::getRegion(const llvm::Function& fn, llvm::Value* V) {
if (!m_dsa || !m_dsa->hasGraph(fn)) {
return region_t();
}
Graph& G = m_dsa->getGraph(fn);
if (!G.hasCell(*V)) {
return region_t();
}
const Cell& c = G.getCell(*V);
if (c.isNull()) {
return region_t();
}
region_info r_info = canBeDisambiguated(c,
m_disambiguate_unknown,
m_disambiguate_ptr_cast,
m_disambiguate_external);
return (r_info.get_type() == UNTYPED_REGION ?
region_t() :
region_t(static_cast<HeapAbstraction*>(this), getId(c), r_info));
}
const llvm::Value* SeaDsaHeapAbstraction::getSingleton(int region) const {
auto const it = m_rev_node_ids.find(region);
if (it == m_rev_node_ids.end())
return nullptr;
// TODO: consider also singleton containing pointers.
seadsa_heap_abs_impl::isIntegerOrBool pred;
return getTypedSingleton(it->second, pred);
}
SeaDsaHeapAbstraction::region_set_t
SeaDsaHeapAbstraction::getAccessedRegions(const llvm::Function& fn) {
return m_func_accessed[&fn];
}
SeaDsaHeapAbstraction::region_set_t
SeaDsaHeapAbstraction::getOnlyReadRegions(const llvm::Function& fn) {
region_set_t s1 = m_func_accessed[&fn];
region_set_t s2 = m_func_mods[&fn];
seadsa_heap_abs_impl::set_difference(s1,s2);
return s1;
}
SeaDsaHeapAbstraction::region_set_t
SeaDsaHeapAbstraction::getModifiedRegions(const llvm::Function& fn) {
return m_func_mods[&fn];
}
SeaDsaHeapAbstraction::region_set_t
SeaDsaHeapAbstraction::getNewRegions(const llvm::Function& fn) {
return m_func_news[&fn];
}
SeaDsaHeapAbstraction::region_set_t
SeaDsaHeapAbstraction::getAccessedRegions(llvm::CallInst& I) {
return m_callsite_accessed[&I];
}
SeaDsaHeapAbstraction::region_set_t
SeaDsaHeapAbstraction::getOnlyReadRegions(llvm::CallInst& I) {
region_set_t s1 = m_callsite_accessed[&I];
region_set_t s2 = m_callsite_mods[&I];
seadsa_heap_abs_impl::set_difference(s1,s2);
return s1;
}
SeaDsaHeapAbstraction::region_set_t
SeaDsaHeapAbstraction::getModifiedRegions(llvm::CallInst& I) {
return m_callsite_mods[&I];
}
SeaDsaHeapAbstraction::region_set_t
SeaDsaHeapAbstraction::getNewRegions(llvm::CallInst& I) {
return m_callsite_news[&I];
}
} // end namespace
|
{"hexsha": "406de96007ccded92c6790940ae62890ed9f6b4b", "size": 16168, "ext": "cc", "lang": "C++", "max_stars_repo_path": "lib/CrabLlvm/SeaDsaHeapAbstraction.cc", "max_stars_repo_name": "kuhar/crab-llvm", "max_stars_repo_head_hexsha": "fa548efd6c6c104d509d48d2ae7af09b7b7f1576", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/CrabLlvm/SeaDsaHeapAbstraction.cc", "max_issues_repo_name": "kuhar/crab-llvm", "max_issues_repo_head_hexsha": "fa548efd6c6c104d509d48d2ae7af09b7b7f1576", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/CrabLlvm/SeaDsaHeapAbstraction.cc", "max_forks_repo_name": "kuhar/crab-llvm", "max_forks_repo_head_hexsha": "fa548efd6c6c104d509d48d2ae7af09b7b7f1576", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5575868373, "max_line_length": 90, "alphanum_fraction": 0.6344012865, "num_tokens": 4663}
|
from keras.applications.resnet50 import ResNet50
from keras import activations
from keras.preprocessing import image
from keras.layers import Input,Flatten,Dense
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.applications.resnet50 import preprocess_input
import numpy as np
import os
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
def model_inceptionv3(height,width,depth,classes):
#Built Inception with imagenet pre-trained weights
model_inceptionv3 = InceptionV3(weights='imagenet', include_top=False, input_shape=(width,height,depth))
#Freeze convolutional layers
for layer in model_inceptionv3.layers:
layer.trainable = False
"""
#avoid input routinely error based on backend
img_rows,img_cols = 299,299
if K.image_data_format() == 'channels_first':
input_crop = input_crop.reshape(input_crop.shape[0], 3, img_rows, img_cols)
input_shape = (3, img_rows, img_cols)
else:
input_crop = input_crop.reshape(input_crop.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
"""
#Use InceptionV3
output_inceptionv3 = model_inceptionv3.output
glob_avrg_pool = GlobalAveragePooling2D()(output_inceptionv3)
fc_1 = Dense(1024, activation='elu', name='fc_1')(glob_avrg_pool)
# and a logistic layer
predictions = Dense(classes, activation='softmax', name='predictions')(fc_1)
#New trainable model
model = Model(inputs=model_inceptionv3.input, output= predictions)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
|
{"hexsha": "edbf5ffa15be1298b588336e467b183f7359c1a4", "size": 1849, "ext": "py", "lang": "Python", "max_stars_repo_path": "inceptionv3_highfive_model.py", "max_stars_repo_name": "alexandrosstergiou/Inception_v3_TV_Human_Interactions", "max_stars_repo_head_hexsha": "524ad7b5a0630d05b3aa4f2d5636bf097bd4d7a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-10-20T16:09:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T22:06:13.000Z", "max_issues_repo_path": "inceptionv3_highfive_model.py", "max_issues_repo_name": "alexandrosstergiou/Inception_v3_TV_Human_Interactions", "max_issues_repo_head_hexsha": "524ad7b5a0630d05b3aa4f2d5636bf097bd4d7a7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-04-15T08:22:39.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-18T08:52:13.000Z", "max_forks_repo_path": "inceptionv3_highfive_model.py", "max_forks_repo_name": "alexandrosstergiou/Inception_v3_TV_Human_Interactions", "max_forks_repo_head_hexsha": "524ad7b5a0630d05b3aa4f2d5636bf097bd4d7a7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-12-18T14:47:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-03T14:00:26.000Z", "avg_line_length": 31.8793103448, "max_line_length": 108, "alphanum_fraction": 0.7533802055, "include": true, "reason": "import numpy", "num_tokens": 442}
|
%----------------------------------------------------------------------------------------
% PACKAGES AND THEMES
%----------------------------------------------------------------------------------------
\documentclass[aspectratio=169,xcolor=dvipsnames]{beamer}
\usetheme{SimplePlus}
\usepackage{hyperref}
\usepackage{graphicx} % Allows including images
\usepackage{booktabs} % Allows the use of \toprule, \midrule and \bottomrule in tables
%----------------------------------------------------------------------------------------
% TITLE PAGE
%----------------------------------------------------------------------------------------
\title[short title]{ECHO Slides} % The short title appears at the bottom of every slide, the full title is only on the title page
\subtitle{These figures are in-process}
\author[Bonham] {Kevin Bonham}
\institute[Wellesley College] % Your institution as it will appear on the bottom of every slide, may be shorthand to save space
{
Department of Biological Sciences \\
Wellesley College
}
\date{\today} % Date, can be changed to a custom date
%----------------------------------------------------------------------------------------
% PRESENTATION SLIDES
%----------------------------------------------------------------------------------------
\begin{document}
\begin{frame}
% Print the title page as the first slide
\titlepage
\end{frame}
\begin{frame}{Overview}
% Throughout your presentation, if you choose to use \section{} and \subsection{} commands, these will automatically be printed on this slide as an overview of your presentation
\tableofcontents
\end{frame}
%------------------------------------------------
\section{Demographics}
%------------------------------------------------
\input{subsections/demographics}
%------------------------------------------------
\section{Sample details}
%------------------------------------------------
\input{subsections/samples}
%------------------------------------------------
\section{Brain stuff}
%------------------------------------------------
\input{subsections/brain_figures}
%------------------------------------------------
\section{Metabolites}
%------------------------------------------------
\input{subsections/metabolites}
%------------------------------------------------
\section{Linear Models}
%------------------------------------------------
\input{subsections/models}
\begin{frame}
\Huge{\centerline{\textbf{The End}}}
\end{frame}
%----------------------------------------------------------------------------------------
\end{document}
|
{"hexsha": "05e57697b9b1b3459458e217e67b9ef9405034e7", "size": 2592, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "slides/figures.tex", "max_stars_repo_name": "Klepac-Ceraj-Lab/ResonanceAnalysis", "max_stars_repo_head_hexsha": "bc29d9a2085b441a7d2ccea5a290cce0b285eec5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "slides/figures.tex", "max_issues_repo_name": "Klepac-Ceraj-Lab/ResonanceAnalysis", "max_issues_repo_head_hexsha": "bc29d9a2085b441a7d2ccea5a290cce0b285eec5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slides/figures.tex", "max_forks_repo_name": "Klepac-Ceraj-Lab/ResonanceAnalysis", "max_forks_repo_head_hexsha": "bc29d9a2085b441a7d2ccea5a290cce0b285eec5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0, "max_line_length": 181, "alphanum_fraction": 0.4274691358, "num_tokens": 443}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.