text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import numpy as np
import cv2
import time
import os
import math
import random
from musthe import *
from synthesizer import Player, Synthesizer, Waveform
import skimage.measure
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def root():
return render_template('index.html')
@app.route('/convert', methods=['POST'])
def play():
####################
player = Player()
player.open_stream()
synthesizer = Synthesizer(osc1_waveform=Waveform.sine, osc1_volume=1.0, use_osc2=False)
#####################
chordOrNot = [0,0,0,1]
#SCALING_FACTOR = 7.0/255 #TODO
durations = (0.25,0.5)#,0.5,0.75,1.0)
# TODO: Gaussian
# Part 0: Chord dictionary
freq_update = {}
freq = {'A0': 27.5, 'A#0': 29.14, 'B0': 30.87, 'C1': 32.7, 'C#1': 34.65, 'D1': 36.71, 'D#1': 38.89, 'E1': 41.2, 'F1': 43.65, 'F#1': 46.25, 'G1': 49.0, 'G#1': 51.91, 'A1': 55.0, 'A#1': 58.27, 'B1': 61.74, 'C2': 65.41, 'C#2': 69.3, 'D2': 73.42, 'D#2': 77.78, 'E2': 82.41, 'F2': 87.31, 'F#2': 92.5, 'G2': 98.0, 'G#2': 103.83, 'A2': 110.0, 'A#2': 116.54, 'B2': 123.47, 'C3': 130.81, 'C#3': 138.59, 'D3': 146.83, 'D#3': 155.56, 'E3': 164.81, 'F3': 174.61, 'F#3': 185.0, 'G3': 196.0, 'G#3': 207.65, 'A3': 220.0, 'A#3': 233.08, 'B3': 246.94, 'C4': 261.63, 'C#4': 277.18, 'D4': 293.66, 'D#4': 311.13, 'E4': 329.63, 'F4': 349.23, 'F#4': 369.99, 'G4': 392.0, 'G#4': 415.3, 'A4': 440.0, 'A#4': 466.16, 'B4': 493.88, 'C5': 523.25, 'C#5': 554.37, 'D5': 587.33, 'D#5': 622.25, 'E5': 659.26, 'F5': 698.46, 'F#5': 739.99, 'G5': 783.99, 'G#5': 830.61, 'A5': 880.0, 'A#5': 932.33, 'B5': 987.77, 'C6': 1046.5, 'C#6': 1108.73, 'D6': 1174.66, 'D#6': 1244.51, 'E6': 1318.51, 'F6': 1396.91, 'F#6': 1479.98, 'G6': 1567.98, 'G#6': 1661.22, 'A6': 1760.0, 'A#6': 1864.66, 'B6': 1975.53, 'C7': 2093.0, 'C#7': 2217.46, 'D7': 2349.32, 'D#7': 2489.02, 'E7': 2637.02, 'F7': 2793.83, 'F#7': 2959.96, 'G7': 3135.96, 'G#7': 3322.44, 'A7': 3520.0, 'A#7': 3729.31, 'B7': 3951.07, 'C8': 4186.01}
for k,v in freq.items():
note = k[:-1]
octave = int(k[-1])
freq = v
if octave == 4:
freq_update[note] = freq
freq = freq_update
# Part 1: Choose a scale. Extract the notes and chords from that scale.
#all_possible_scales = list(Scale.all('major'))
m = 'major'
all_possible_scales = [Scale('C4',m), Scale('A4',m), Scale('F4',m), Scale('G4',m)]
choice_of_scale = random.choice(all_possible_scales)
notes = [choice_of_scale[i] for i in range(len(choice_of_scale))]
# Part 2: Choose a permutation of chords and notes from the list.
## Once it is over, pick a new random permutation and keep going unless stopped.
# Part 3: Go through the image and based on pixed values, play the permutation.
# Part 3 -->
image = cv2.imread('images/nature.jpg', 0)
#image = str(request.get('img'))
image = skimage.measure.block_reduce(image, (150,150), np.mean)
image = image.flatten()
# pooling stuff happens here
image = np.random.permutation(image)
for px in image: #px is the pixel value
if px == 255:
px = px-1
isChord = random.choice(chordOrNot)
note = math.trunc(px*len(notes)/255.0)
duration = random.choice(durations)
if note >= len(notes):
continue
note = str(notes[note])
if note not in freq:
flatOrSharp = note[-1]
if flatOrSharp == '#':
note = chr(ord(note[0])+1)
else:
note = chr(ord(note[0])-1)
if note not in freq:
continue
fr = freq[note]
if(isChord):
# play a chord
notes_in_chord = Chord(Note(note), 'M').notes
freq_list = []
for n in notes_in_chord:
a = str(n)
if a not in freq:
flatOrSharp = a[-1]
if flatOrSharp == '#':
a = chr(ord(a[0])+1)
else:
a = chr(ord(a[0])-1)
if a not in freq:
break
freq_list.append(freq[a])
player.play_wave(synthesizer.generate_chord(freq_list, duration))
else:
# play a note
player.play_wave(synthesizer.generate_constant_wave(fr, duration))
return "Successfully vocalized image";
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
|
{"hexsha": "cc98458d22b5748448ea7533846981821e21a5ce", "size": 4526, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "abhishekbabu/musique", "max_stars_repo_head_hexsha": "e94ecb19ab0210797718750d4d1f0eb2759d768a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "abhishekbabu/musique", "max_issues_repo_head_hexsha": "e94ecb19ab0210797718750d4d1f0eb2759d768a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "abhishekbabu/musique", "max_forks_repo_head_hexsha": "e94ecb19ab0210797718750d4d1f0eb2759d768a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1454545455, "max_line_length": 1261, "alphanum_fraction": 0.5368979231, "include": true, "reason": "import numpy", "num_tokens": 1687}
|
"""Provides Elite."""
from typing import NamedTuple, Tuple, Union
import numpy as np
class Elite(NamedTuple):
"""Represents a single elite in an archive.
Note that since this class is a namedtuple, its fields may be accessed
either by name or by integer indices.
"""
#: Parameters of the elite's solution.
sol: np.ndarray
#: Objective value evaluation.
obj: float
#: Behavior values.
beh: np.ndarray
#: Index of the elite's behavior values in the archive (see
#: :meth:`ArchiveBase.get_index`).
idx: Union[int, Tuple[int]]
#: Metadata object for the elite.
meta: object
|
{"hexsha": "f137250354b4520ee5aaa3e8daa78a828faa5a33", "size": 639, "ext": "py", "lang": "Python", "max_stars_repo_path": "ribs/archives/_elite.py", "max_stars_repo_name": "icaros-usc/pyribs", "max_stars_repo_head_hexsha": "ef289a930e7a8a51286cf657f7e4b29551277350", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 108, "max_stars_repo_stars_event_min_datetime": "2021-01-30T10:54:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T20:49:31.000Z", "max_issues_repo_path": "ribs/archives/_elite.py", "max_issues_repo_name": "icaros-usc/pyribs", "max_issues_repo_head_hexsha": "ef289a930e7a8a51286cf657f7e4b29551277350", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 70, "max_issues_repo_issues_event_min_datetime": "2021-01-29T10:12:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T22:22:05.000Z", "max_forks_repo_path": "ribs/archives/_elite.py", "max_forks_repo_name": "icaros-usc/pyribs", "max_forks_repo_head_hexsha": "ef289a930e7a8a51286cf657f7e4b29551277350", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2021-01-30T18:48:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T16:28:41.000Z", "avg_line_length": 22.0344827586, "max_line_length": 74, "alphanum_fraction": 0.6666666667, "include": true, "reason": "import numpy", "num_tokens": 148}
|
import iris # NOQA
import netCDF4 # NOQA
import numpy # NOQA
import glob # NOQA
import collections # NOQA
import datetime # NOQA
import matplotlib # NOQA
import matplotlib.pyplot as plt # NOQA
import cf_units # NOQA
from .utility import * # NOQA
from .dataset import * # NOQA
from .extract_timeseries import * # NOQA
from .cmems_reader import * # NOQA
from .plot_timeseries import * # NOQA
from .plot_profile import * # NOQA
from .plot_taylor_diag import * # NOQA
from .plot_target_diag import * # NOQA
from .plot_map import * # NOQA
from .plot_timeprofile import * # NOQA
from .plot_timetransect import * # NOQA
from .plot_scatter import * # NOQA
from . import nemo_reader # NOQA
from . import cmems_reader # NOQA
from . import ices_reader # NOQA
from . import statistics # NOQA
from . import numpy_interface # NOQA
|
{"hexsha": "2ba92c54db9e8dce0126b1eafcdce93548c9e351", "size": 845, "ext": "py", "lang": "Python", "max_stars_repo_path": "galene/__init__.py", "max_stars_repo_name": "tkarna/galene", "max_stars_repo_head_hexsha": "a05463d3d0c9191c51893df4593d9ce0252d25fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "galene/__init__.py", "max_issues_repo_name": "tkarna/galene", "max_issues_repo_head_hexsha": "a05463d3d0c9191c51893df4593d9ce0252d25fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "galene/__init__.py", "max_forks_repo_name": "tkarna/galene", "max_forks_repo_head_hexsha": "a05463d3d0c9191c51893df4593d9ce0252d25fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1379310345, "max_line_length": 41, "alphanum_fraction": 0.7349112426, "include": true, "reason": "import numpy", "num_tokens": 262}
|
import numpy as np
def np_mul_2_3():
return np.multiply(2, 3)
def np_add_2_3():
return np.add(2, 3)
def py_mul_2_3():
return 2 * 3
def py_add_2_3():
return 2 + 3
def entry_py_mul_2_3():
return py_mul_2_3()
def entry_py_add_2_3():
return py_add_2_3()
|
{"hexsha": "2b75393f0ac390b794fc5474fbaa85709c05010f", "size": 286, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/funcs.py", "max_stars_repo_name": "NaleRaphael/bytejection", "max_stars_repo_head_hexsha": "67a9ca2bc4533cee4742969925f9d678aed4c3b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/funcs.py", "max_issues_repo_name": "NaleRaphael/bytejection", "max_issues_repo_head_hexsha": "67a9ca2bc4533cee4742969925f9d678aed4c3b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-10-17T14:55:21.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-05T15:52:16.000Z", "max_forks_repo_path": "tests/funcs.py", "max_forks_repo_name": "NaleRaphael/bytejection", "max_forks_repo_head_hexsha": "67a9ca2bc4533cee4742969925f9d678aed4c3b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 11.0, "max_line_length": 28, "alphanum_fraction": 0.6433566434, "include": true, "reason": "import numpy", "num_tokens": 109}
|
#---------------train the CNN or the ensemble-----------------
#importing required libraries and modules
import os
import sys
import cv2
import numpy as np
import tflearn
from preprocess import Preprocess
from data_split import Load
from conv_net import CNN
from ensemble import Ensemble
def load_numpy_data(arg, folder):
#loading the numpy data (.npy files) from the required directory
X_train = list(np.load('bin/'+folder+'/'+arg+'/X_train.npy'))
X_val = list(np.load('bin/'+folder+'/'+arg+'/X_val.npy'))
X_test = list(np.load('bin/'+folder+'/'+arg+'/X_test.npy'))
Y_train = list(np.load('bin/'+folder+'/'+arg+'/Y_train.npy'))
Y_val = list(np.load('bin/'+folder+'/'+arg+'/Y_val.npy'))
Y_test = list(np.load('bin/'+folder+'/'+arg+'/Y_test.npy'))
return X_train, X_val, X_test, Y_train, Y_val, Y_test
def train_CNN(arg, X_train, X_val, X_test, Y_train, Y_val, Y_test):
#training the CNN model
neural_net = CNN()
model = neural_net.create_1ConvModel()
model = neural_net.train_1ConvModel(arg, model, X_train[0], Y_train[0], X_val[0], Y_val[0])
#predicting the test data and saving it to 'test_prediction/full' to avoid same computation every time
neural_net.predict_test_data(arg, model, X_test[0], Y_test[0])
def train_Ensemble(arg, X_train, X_val, X_test, Y_train, Y_val, Y_test):
#loading the model and training its corresponding SVR classifier
data_size = 'full'
neural_net = CNN()
#creating the model structure and loading the saved trained model
model = neural_net.create_1ConvModel()
model.load('DNN/'+data_size+'/'+arg+'.model')
#defining an ensemble class and training the SVR for the particular classifier
en = Ensemble()
en.regressor(arg, model, X_val[0], Y_val[0])
if __name__ == '__main__':
#take two arguments from the terminal
folder = sys.argv[1] #the folder where all the data are saved, for 15 class classification, it is 'full'
arg = sys.argv[2] #the type of feature on which the classifier(CNN) will be trained
X_train, X_val, X_test, Y_train, Y_val, Y_test = load_numpy_data(arg, folder) #loading numpy data form the saved files
#uncomment train_CNN() or train_Ensemble() which ever you want to train
#for the first time, you need to train the CNN first, then the Ensemble
train_CNN(arg, X_train, X_val, X_test, Y_train, Y_val, Y_test) #training the CNN
#train_Ensemble(arg, X_train, X_val, X_test, Y_train, Y_val, Y_test) #training the Ensemble
|
{"hexsha": "e187e7460a9b13e80ebca1d140f982efb3ef1787", "size": 2487, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/train.py", "max_stars_repo_name": "chatdip98/Acoustic-Scene-Classification", "max_stars_repo_head_hexsha": "cf410f14a1efb3e3dd2bbc240c24882969be98a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/train.py", "max_issues_repo_name": "chatdip98/Acoustic-Scene-Classification", "max_issues_repo_head_hexsha": "cf410f14a1efb3e3dd2bbc240c24882969be98a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/train.py", "max_forks_repo_name": "chatdip98/Acoustic-Scene-Classification", "max_forks_repo_head_hexsha": "cf410f14a1efb3e3dd2bbc240c24882969be98a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5735294118, "max_line_length": 120, "alphanum_fraction": 0.7080820265, "include": true, "reason": "import numpy", "num_tokens": 666}
|
# Copyright 2019 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from abc import abstractmethod
import warnings
import numpy as np
from asreview.config import DEFAULT_N_INSTANCES, LABEL_NA
from asreview.state.utils import open_state
from asreview.models.nb import NBModel
from asreview.query_strategies.max import MaxQuery
from asreview.balance_strategies.simple import SimpleBalance
from asreview.query_strategies.random import RandomQuery
from asreview.settings import ASReviewSettings
from asreview.feature_extraction.tfidf import Tfidf
def get_pool_idx(X, train_idx):
return np.delete(np.arange(X.shape[0]), train_idx, axis=0)
def _merge_prior_knowledge(included, excluded, return_labels=True):
"""Merge prior included and prior excluded."""
if included is None:
included = []
if excluded is None:
excluded = []
prior_indices = np.array(np.append(included, excluded), dtype=np.int)
if return_labels:
prior_included_labels = np.ones((len(included),), dtype=int)
prior_excluded_labels = np.zeros((len(excluded),), dtype=int)
labels = np.concatenate([
prior_included_labels,
prior_excluded_labels
])
return prior_indices, labels
return prior_indices
class BaseReview(ABC):
"""Base class for Systematic Review"""
name = "base"
def __init__(self,
as_data,
model=None,
query_model=None,
balance_model=None,
feature_model=None,
n_papers=None,
n_instances=DEFAULT_N_INSTANCES,
n_queries=None,
start_idx=[],
state_file=None,
log_file=None,
# final_labels=None,
verbose=1,
data_fp=None,
):
""" Initialize base class for systematic reviews.
Arguments
---------
X: np.array
The feature matrix for the current dataset.
y: np.array
Labels of each paper, 1 for included, 0 for excluded.
Can be set to None, to indicate inclusion data is not available.
model: BaseModel
Initialized model to fit the data during active learning.
See asreview.models.utils.py for possible models.
query_model: BaseQueryModel
Initialized model to query new instances for review, such as random
sampling or max sampling.
See asreview.query_strategies.utils.py for query models.
balance_model: BaseBalanceModel
Initialized model to redistribute the training data during the
active learning process. They might either resample or undersample
specific papers.
n_papers: int
Number of papers to review during the active learning process,
excluding the number of initial priors. To review all papers, set
n_papers to None.
n_instances: int
Number of papers to query at each step in the active learning
process.
n_queries: int
Number of steps/queries to perform. Set to None for no limit.
prior_included: list
List of papers (ids) that are included a priori.
prior_excluded: list
List of papers (ids) that are excluded a priori.
state_file: str
Path to state file. Replaces log_file argument.
final_labels: np.array
Final labels if we're using a two step inclusion process.
For example, if at one step a paper is considered after reading the
abstract and then at the second step, a final decision is made on
the basis of the full text.
"""
super(BaseReview, self).__init__()
# Default to Naive Bayes model
if model is None:
model = NBModel()
if query_model is None:
query_model = MaxQuery()
if balance_model is None:
balance_model = SimpleBalance()
if feature_model is None:
feature_model = Tfidf()
self.as_data = as_data
self.y = as_data.labels
if self.y is None:
self.y = np.full(len(as_data), LABEL_NA)
self.model = model
self.balance_model = balance_model
self.query_model = query_model
self.feature_model = feature_model
self.shared = {"query_src": {}, "current_queries": {}}
self.model.shared = self.shared
self.query_model.shared = self.shared
self.balance_model.shared = self.shared
self.n_papers = n_papers
self.n_instances = n_instances
self.n_queries = n_queries
if log_file is not None:
warnings.warn("The log_file argument for BaseReview will be"
" replaced by state_file.", category=FutureWarning)
self.state_file = log_file
else:
self.state_file = state_file
self.verbose = verbose
self.query_i = 0
self.query_i_classified = 0
self.train_idx = np.array([], dtype=np.int)
self.model_trained = False
self.data_fp = data_fp
with open_state(self.state_file) as state:
if not state.is_empty():
startup = state.startup_vals()
if not set(startup["train_idx"]) >= set(start_idx):
new_idx = list(set(start_idx)-set(startup["train_idx"]))
self.classify(new_idx, self.y[new_idx], state,
method="initial")
startup = state.startup_vals()
self.train_idx = startup["train_idx"]
self.y = startup["labels"]
self.shared["query_src"] = startup["query_src"]
self.query_i = startup["query_i"]
self.query_i_classified = startup["query_i_classified"]
else:
state.set_labels(self.y)
state.settings = self.settings
self.classify(start_idx, self.y[start_idx], state,
method="initial")
self.query_i_classified = len(start_idx)
try:
self.X = state.get_feature_matrix(as_data.hash())
except KeyError:
self.X = feature_model.fit_transform(
as_data.texts, as_data.headings, as_data.bodies,
as_data.keywords)
state._add_as_data(as_data, feature_matrix=self.X)
if self.X.shape[0] != len(self.y):
raise ValueError("The state file does not correspond to the "
"given data file, please use another state "
"file or dataset.")
self.load_current_query(state)
@property
def settings(self):
extra_kwargs = {}
if hasattr(self, 'n_prior_included'):
extra_kwargs['n_prior_included'] = self.n_prior_included
if hasattr(self, 'n_prior_excluded'):
extra_kwargs['n_prior_excluded'] = self.n_prior_excluded
return ASReviewSettings(
mode=self.name, model=self.model.name,
query_strategy=self.query_model.name,
balance_strategy=self.balance_model.name,
feature_extraction=self.feature_model.name,
n_instances=self.n_instances,
n_queries=self.n_queries,
n_papers=self.n_papers,
model_param=self.model.param,
query_param=self.query_model.param,
balance_param=self.balance_model.param,
feature_param=self.feature_model.param,
data_name=self.as_data.data_name,
**extra_kwargs)
@abstractmethod
def _get_labels(self, ind):
"""Classify the provided indices."""
pass
def _stop_iter(self, query_i, n_pool):
"""Criteria for stopping iteration.
Stop iterating if:
- n_queries is reached
- the pool is empty
"""
stop_iter = False
n_train = self.X.shape[0] - n_pool
# if the pool is empty, always stop
if n_pool == 0:
stop_iter = True
# If we are exceeding the number of papers, stop.
if self.n_papers is not None and n_train >= self.n_papers:
stop_iter = True
# don't stop if there is no stopping criteria
if self.n_queries is not None and query_i >= self.n_queries:
stop_iter = True
return stop_iter
def n_pool(self):
return self.X.shape[0] - len(self.train_idx)
def _next_n_instances(self): # Could be merged with _stop_iter someday.
"""Get the batch size for the next query."""
n_instances = self.n_instances
n_pool = self.n_pool()
n_instances = min(n_instances, n_pool)
if self.n_papers is not None:
papers_left = self.n_papers - len(self.train_idx)
n_instances = min(n_instances, papers_left)
return n_instances
def _do_review(self, state, stop_after_class=True, instant_save=False):
if self._stop_iter(self.query_i, self.n_pool()):
return
# train the algorithm with prior knowledge
self.train()
self.log_probabilities(state)
n_pool = self.X.shape[0] - len(self.train_idx)
while not self._stop_iter(self.query_i-1, n_pool):
# STEP 1: Make a new query
query_idx = self.query(
n_instances=self._next_n_instances()
)
self.log_current_query(state)
# STEP 2: Classify the queried papers.
if instant_save:
for idx in query_idx:
idx_array = np.array([idx], dtype=np.int)
self.classify(idx_array, self._get_labels(idx_array),
state)
self.query_i_classified += 1
else:
self.classify(query_idx, self._get_labels(query_idx), state)
self.query_i_classified += len(query_idx)
# Option to stop after the classification set instead of training.
if (stop_after_class and
self._stop_iter(self.query_i, self.n_pool())):
break
# STEP 3: Train the algorithm with new data
# Update the training data and pool afterwards
self.train()
self.log_probabilities(state)
def review(self, *args, **kwargs):
"""Do the systematic review, writing the results to the state file.
Arguments
---------
stop_after_class: bool
When to stop; if True stop after classification step, otherwise
stop after training step.
instant_save: bool
If True, save results after each single classification.
"""
with open_state(self.state_file) as state:
self._do_review(state, *args, **kwargs)
def log_probabilities(self, state):
"""Store the modeling probabilities of the training indices and
pool indices."""
if not self.model_trained:
return
pool_idx = get_pool_idx(self.X, self.train_idx)
# Log the probabilities of samples in the pool being included.
pred_proba = self.shared.get('pred_proba', np.array([]))
if len(pred_proba) == 0:
pred_proba = self.model.predict_proba(self.X)
self.shared['pred_proba'] = pred_proba
proba_1 = np.array([x[1] for x in pred_proba])
state.add_proba(pool_idx, self.train_idx, proba_1, self.query_i)
def log_current_query(self, state):
state.set_current_queries(self.shared["current_queries"])
def load_current_query(self, state):
try:
self.shared["current_queries"] = state.get_current_queries()
except KeyError:
self.shared["current_queries"] = {}
def query(self, n_instances, query_model=None):
"""Query new results.
Arguments
---------
n_instances: int
Batch size of the queries, i.e. number of papers to be queried.
query_model: BaseQueryModel
Query strategy model to use. If None, the query model of the
reviewer is used.
Returns
-------
np.array:
Indices of papers queried.
"""
pool_idx = get_pool_idx(self.X, self.train_idx)
n_instances = min(n_instances, len(pool_idx))
# If the model is not trained, choose random papers.
if not self.model_trained and query_model is None:
query_model = RandomQuery()
if not self.model_trained:
classifier = None
else:
classifier = self.model
if query_model is None:
query_model = self.query_model
# Make a query from the pool.
query_idx, _ = query_model.query(
X=self.X,
classifier=classifier,
pool_idx=pool_idx,
n_instances=n_instances,
shared=self.shared,
)
return query_idx
def classify(self, query_idx, inclusions, state, method=None):
""" Classify new papers and update the training indices.
It automaticaly updates the state.
Arguments
---------
query_idx: list, np.array
Indices to classify.
inclusions: list, np.array
Labels of the query_idx.
state: BaseLogger
Logger to store the classification in.
"""
query_idx = np.array(query_idx, dtype=np.int)
self.y[query_idx] = inclusions
query_idx = query_idx[np.isin(query_idx, self.train_idx, invert=True)]
self.train_idx = np.append(self.train_idx, query_idx)
if method is None:
methods = []
for idx in query_idx:
method = self.shared["current_queries"].pop(idx, None)
if method is None:
method = "unknown"
methods.append(method)
if method in self.shared["query_src"]:
self.shared["query_src"][method].append(idx)
else:
self.shared["query_src"][method] = [idx]
else:
methods = np.full(len(query_idx), method)
if method in self.shared["query_src"]:
self.shared["query_src"][method].extend(
query_idx.tolist())
else:
self.shared["query_src"][method] = query_idx.tolist()
state.add_classification(query_idx, inclusions, methods=methods,
query_i=self.query_i)
state.set_labels(self.y)
def train(self):
"""Train the model."""
num_zero = np.count_nonzero(self.y[self.train_idx] == 0)
num_one = np.count_nonzero(self.y[self.train_idx] == 1)
if num_zero == 0 or num_one == 0:
return
# Get the training data.
X_train, y_train = self.balance_model.sample(
self.X, self.y, self.train_idx, shared=self.shared)
# Train the model on the training data.
self.model.fit(
X=X_train,
y=y_train,
)
self.shared["pred_proba"] = self.model.predict_proba(self.X)
self.model_trained = True
if self.query_i_classified > 0:
self.query_i += 1
self.query_i_classified = 0
def statistics(self):
"Get a number of statistics about the current state of the review."
try:
n_initial = len(self.shared['query_src']['initial'])
except KeyError:
n_initial = 0
try:
if np.count_nonzero(self.y[self.train_idx[n_initial:]] == 1) == 0:
last_inclusion = len(self.train_idx[n_initial:])
else:
last_inclusion = np.nonzero(
self.y[self.train_idx[n_initial:]][::-1] == 1)[0][0]
except ValueError:
last_inclusion = 0
stats = {
"n_included": np.count_nonzero(self.y[self.train_idx] == 1),
"n_excluded": np.count_nonzero(self.y[self.train_idx] == 0),
"n_papers": len(self.y),
"n_reviewed": len(self.train_idx),
"n_pool": self.n_pool(),
"last_inclusion": last_inclusion,
"n_initial": n_initial,
}
return stats
# def save(self, pickle_fp):
# """Dump the self object to a pickle fill (using dill).
#
# Keras models cannot be dumped, so they are written to a separate
# h5 file. The model is briefly popped out of the object to allow the
# rest to be written to a file. Do not rely on this method for long term
# storage of the class, since library changes could easily break it.
# In those cases, use the state + h5 file instead.
# """
# if isinstance(self.model, KerasClassifier) and self.model_trained:
# model_fp = os.path.splitext(pickle_fp)[0]+".h5"
# self.model.model.save(model_fp)
# current_model = self.model.__dict__.pop("model", None)
# with open(pickle_fp, "wb") as fp:
# dill.dump(self, fp)
# setattr(self.model, "model", current_model)
# else:
# dill.dump(self, fp)
#
# @classmethod
# def load(cls, pickle_fp):
# """
# Create a BaseReview object from a pickle file.
# """
# with open(pickle_fp, "rb") as fp:
# my_instance = dill.load(fp)
# try:
# model_fp = os.path.splitext(pickle_fp)[0]+".h5"
# current_model = load_model(model_fp)
# setattr(my_instance.model, "model", current_model)
# except Exception:
# pass
# return my_instance
|
{"hexsha": "904ec940ef4b67ac57b10c7960a443de25633aec", "size": 18596, "ext": "py", "lang": "Python", "max_stars_repo_path": "asreview/review/base.py", "max_stars_repo_name": "Sybren-UU/asreview", "max_stars_repo_head_hexsha": "a5ec3ca0ff6b1e1b60ad9b34d1d0f664f8cbedc2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "asreview/review/base.py", "max_issues_repo_name": "Sybren-UU/asreview", "max_issues_repo_head_hexsha": "a5ec3ca0ff6b1e1b60ad9b34d1d0f664f8cbedc2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "asreview/review/base.py", "max_forks_repo_name": "Sybren-UU/asreview", "max_forks_repo_head_hexsha": "a5ec3ca0ff6b1e1b60ad9b34d1d0f664f8cbedc2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8968253968, "max_line_length": 80, "alphanum_fraction": 0.5882447838, "include": true, "reason": "import numpy", "num_tokens": 3876}
|
# ### Loading some packages
using GeoPhyInv
using SparseArrays
using StatsBase
using LinearAlgebra
using Random
using LinearAlgebra
using Test
using ForwardDiff
using Calculus
#src # include("core.jl")
#src # include("expt.jl")
# ### Solve for ``ψ`` in a `PoissonExpt`
# This module represents an explicit, direct sparse 2D finite-difference Poisson solver for heterogeneous media,
# i.e. media having spatially varying (space-dependent) medium parameters.
# Current implementation assumes Neumann boundary conditions at all the boundaries.
# Consider the following Poisson experiment:
# ```math
# ∇⋅(σ(x,z)∇) ψ(t) = ∇⋅(Q(x,z)∇) p(t),
# ```
# ```math
# Q = k * Q_v / η.
# ```
# We start with the dimensions and spatial grids are allocated as follows.
nx=21
nz=21
nt=4
nznx=nz*nx
mgrid=[range(-div(nz,2), step=1.0, length=nz), range(-div(nx,2), step=1.0, length=nx)]
tgrid=range(0.0,step=0.5, length=nt)
@info "Grids are all set."
# Now lets allocate the inputs for a toy experiment.
# These medium parameters are used to generate the *observed* field ``ψ``.
Qv=abs.(randn(nz,nx))
η=abs.(randn(nz,nx))
k=abs.(randn(nz,nx))
σ=abs.(randn(nz,nx))
p=randn(nz,nx,nt)
@info "Medium parameters allocated."
# ### Acquisition
# Now, we will generate an acquisition ageometry and allocate a projection matrix `ACQ`.
ageom=AGeom(mgrid, SSrcs(1), Srcs(1), Recs(30))
update!(ageom, SSrcs(), [0,0], 5, [0,2π])
update!(ageom, Recs(), [0,0], 5, [0,2π])
ACQ=SparseMatrixCSC(ageom[1],mgrid);
@info "ACQ will be used to project ψ onto receivers."
# ### Generate `PoissonExpt` and then applying `mod!`
# This will first
# * apply operator ``A=∇⋅(Q(x,z)∇)`` on a field ``p``;
# * then apply ``(∇⋅(σ(x,z)∇))^{-1}`` in order to solve for ``ψ``;
# * finally, records ``ψ`` at the receiver locations to generate data.
paE=PoissonExpt(p, tgrid, mgrid, Qv, k, η, σ, ACQ)
GeoPhyInv.mod!(paE)
# ### Extracting data from `Expt`
data=paE[:data]
@info string("The dimensions of data are (nt,nr)=",size(data))
|
{"hexsha": "66c21e1317c886128bbcd191cf908294d948f915", "size": 1995, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Poisson/forw.jl", "max_stars_repo_name": "ayushinav/GeoPhyInv.jl", "max_stars_repo_head_hexsha": "b0ce642161cb5300e2e7a5bd737b58fe37ddbfeb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-01-05T04:30:50.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-26T18:32:37.000Z", "max_issues_repo_path": "test/Poisson/forw.jl", "max_issues_repo_name": "ayushinav/GeoPhyInv.jl", "max_issues_repo_head_hexsha": "b0ce642161cb5300e2e7a5bd737b58fe37ddbfeb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-11-14T19:59:38.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-16T19:55:36.000Z", "max_forks_repo_path": "test/Poisson/forw.jl", "max_forks_repo_name": "ayushinav/GeoPhyInv.jl", "max_forks_repo_head_hexsha": "b0ce642161cb5300e2e7a5bd737b58fe37ddbfeb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-08-15T14:19:53.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-03T21:18:19.000Z", "avg_line_length": 26.9594594595, "max_line_length": 112, "alphanum_fraction": 0.6842105263, "num_tokens": 684}
|
//
// Simulator.cpp
// proteintools
//
// Created by Salik Syed on 10/5/17.
// Copyright © 2017 N/A. All rights reserved.
//
#include "Simulator.hpp"
#include "PDBGeometry.hpp"
#include "Residue.hpp"
#include <iostream>
#include <Eigen/Geometry>
using namespace std;
#define TORSION_EPSILON 0.00001f
Simulator::Simulator(Chain& chain, ForceField& forcefield) {
size_t atom_count = 0;
ResidueTypeIterator it = chain.first_residue();
while (it != chain.last_residue()) {
const Residue * r = forcefield.getResidue(*it,
it==chain.first_residue(),
it==chain.last_residue() - 1);
_residues.push_back(r);
atom_count += r->numAtoms();
it++;
}
_numAtoms = atom_count;
_evaluator = (EnergyEvaluator*) new CPUEvaluator((unsigned int)_numAtoms, forcefield.getLj14scale(), forcefield.getC14scale());
// initialize the conformation
_conformation = new Conformation(_residues.size());
// init torsion params
for(size_t i = 0; i < _conformation->numTorsionParameters(); i++) {
_conformation->setTorsion(i, 0.0f);
}
// Allocate space for original and transformed atom positions
_atoms = new Eigen::Matrix4Xf(4, atom_count);
_atomsTransformed = new Eigen::Matrix4Xf(4, atom_count);
// Allocate space for the charge parameters:
_atomParams = new Eigen::Matrix4Xf(4, atom_count);
it = chain.first_residue();
size_t curr_atom = 0;
for(size_t i = 0; i < _residues.size(); i++) {
const Residue * r = _residues[i];
ResidueType type = r->geometry_name();
PDBGeometry & geom = PDBGeometry::get(type);
for(size_t j = 0; j < r->numAtoms(); j++) {
pair<AtomName,AtomType> a = r->getAtom(j);
_atomParams->col(curr_atom)<<
-1.0f,
a.second.charge,
a.second.sigma,
a.second.epsilon;
AtomName name(a.first);
if (geom.hasGeometry(name)) {
_atomParams->col(curr_atom)<<
1.0f,
a.second.charge,
a.second.sigma,
a.second.epsilon;
Eigen::Vector4f pos = geom.position(name);
_atoms->col(curr_atom)<<pos.x(), pos.y(), pos.z(), 1.0;
_atomsTransformed->col(curr_atom)<<pos.x(), pos.y(), pos.z(), 1.0;
} else {
_atoms->col(curr_atom)<<0.0, 0.0, 0.0, 0.0;
_atomsTransformed->col(curr_atom)<<0.0, 0.0, 0.0, 0.0;
}
curr_atom++;
}
if (i != 0) {
Eigen::Matrix4f m = _residues[i - 1]->getTransformForChild(r);
_translationTransforms.push_back(m);
_matrixStack.push_back(m*_matrixStack[_matrixStack.size() -1]);
} else {
_translationTransforms.push_back(Eigen::Matrix4f::Identity(4, 4));
_matrixStack.push_back(Eigen::Matrix4f::Identity(4, 4));
}
}
setConformation(*_conformation, true);
}
void Simulator::setConformation(Conformation &conformation, bool forceUpdate) {
// update the transformed atom data
size_t curr = _residues[0]->numAtoms();
for(size_t i = 1; i < _residues.size(); i++) {
if (conformation.getTorsion(i) > TORSION_EPSILON) {
Eigen::Matrix4f rot;
rot.setIdentity();
Eigen::Vector3f transformedAxis = (_matrixStack[i-1] * _residues[i]->getBondAxis()).head<3>();
transformedAxis.normalize();
rot.block<3,3>(0,0) = Eigen::AngleAxisf(conformation.getTorsion(i),
transformedAxis).matrix();
rot.rightCols(1) = Eigen::Vector4f(0.0, 0.0, 0.0, 1.0);
_matrixStack[i] = _translationTransforms[i] *
rot *
_matrixStack[i - 1];
} else {
_matrixStack[i] = _translationTransforms[i] *
_matrixStack[i - 1];
}
for(size_t j = 0; j < _residues[i]->numAtoms(); j++) {
_atomsTransformed->col(curr + j) << _matrixStack[i] * _atoms->col(curr + j);
}
curr += _residues[i]->numAtoms();
}
}
Conformation Simulator::getConformation() const {
Conformation conformation(_conformation->numTorsionParameters());
for(size_t i = 0; i < _conformation->numTorsionParameters(); i++) {
conformation.setTorsion(i, _conformation->getTorsion(i));
}
return conformation;
}
void Simulator::getAtoms(vector<AtomInfo>& atomList) const{
size_t idx = 0;
for(size_t i = 0; i < _residues.size(); i++) {
const Residue * r = _residues[i];
ResidueType type = r->name();
for(size_t j = 0; j < r->numAtoms(); j++){
pair<AtomName, AtomType> a = r->getAtom(j);
AtomInfo info;
AtomName name(a.first);
PDBGeometry & geom = PDBGeometry::get(r->geometry_name().c_str());
info.geometryValid = geom.hasGeometry(name);
info.atom = a.second;
info.isBackboneChild = j == r->parentAtomIndex();
info.isBackboneChild = j == r->childAtomIndex();
info.residue = type;
info.position = _atomsTransformed->col(idx);
atomList.push_back(info);
idx++;
}
}
}
void Simulator::getBonds(vector<pair<int, int>> & bondList) const{
size_t offset = 0;
for(size_t i = 0; i < _residues.size(); i++) {
const Residue * r = _residues[i];
BondIterator a_it = r->first_bond();
while(a_it != r->last_bond()) {
size_t a1 = a_it->atom1_idx + offset;
size_t a2 = a_it->atom2_idx + offset;
bondList.push_back(pair<int, int>((int)a1, (int)a2));
a_it++;
}
offset += r->numAtoms();
}
}
float Simulator::getEnergy() const {
return _evaluator->getEnergy(_atomParams, _atomsTransformed);
}
Simulator::~Simulator() {
delete _evaluator;
delete _atoms;
delete _atomsTransformed;
delete _atomParams;
}
|
{"hexsha": "bbcfaca21a2c2261bccdcd8e1dcab7286204a350", "size": 6177, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "proteintools/proteintools/Simulator.cpp", "max_stars_repo_name": "saliksyed/protein-tools-cpp", "max_stars_repo_head_hexsha": "0101d1e0da125bcb36e70291290d25387999e197", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "proteintools/proteintools/Simulator.cpp", "max_issues_repo_name": "saliksyed/protein-tools-cpp", "max_issues_repo_head_hexsha": "0101d1e0da125bcb36e70291290d25387999e197", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "proteintools/proteintools/Simulator.cpp", "max_forks_repo_name": "saliksyed/protein-tools-cpp", "max_forks_repo_head_hexsha": "0101d1e0da125bcb36e70291290d25387999e197", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7052023121, "max_line_length": 131, "alphanum_fraction": 0.564351627, "num_tokens": 1641}
|
from pandas import read_csv
import numpy as np
from matplotlib import pyplot
from scipy.optimize import curve_fit
url='https://raw.githubusercontent.com/jbrownlee/Datasets/master/longley.csv'
dataframe= read_csv(url,header=None)
data=dataframe.values
xdata,ydata=data[:,4],data[:,-1]
def objective(x, a, b, c, d, e, f, g, k):
return (a * x) + (b * x**2) + (c * x**3) + (d * x**4) + (e * x**5) + (f * x**6) + (g * x**7) + k
def objective2(x, a, k):
return (a * x) + k
def objective3(x, a,b,c,k):
return (a * x) + (b * x**2) + (c * x**3) + k
def objective4(x, a,b,c,d,e,k):
return (a * x) + (b * x**2) + (c * x**3) + (d * x**4) + (e * x**5) + k
popt,_ =curve_fit(objective,xdata,ydata,method='dogbox')
popt2,_ =curve_fit(objective2,xdata,ydata,method='dogbox')
popt3,_ =curve_fit(objective3,xdata,ydata,method='dogbox')
popt4,_ =curve_fit(objective4,xdata,ydata,method='dogbox')
pyplot.subplot(2,2,1)
pyplot.scatter(xdata, ydata)
x_line = np.arange(min(xdata), max(xdata), 1)
y_line = objective(x_line, *popt)
pyplot.plot(x_line, y_line, '--', color='red')
pyplot.title('7th order polynomial linear regression')
pyplot.xlabel("Population size ")
pyplot.ylabel("Employed people ")
pyplot.subplot(2,2,2)
pyplot.scatter(xdata, ydata)
x_line3 = np.arange(min(xdata), max(xdata), 1)
y_line3 = objective4(x_line3, *popt4)
pyplot.plot(x_line3, y_line3, '--', color='red')
pyplot.title('5th order polynomial linear regression')
pyplot.xlabel("Population size ")
pyplot.ylabel("Employed people ")
pyplot.subplot(2,2,3)
pyplot.scatter(xdata, ydata)
x_line2 = np.arange(min(xdata), max(xdata), 1)
y_line2 = objective3(x_line2, *popt3)
pyplot.plot(x_line2, y_line2, '--', color='red')
pyplot.title('3rd order polynomial linear regression')
pyplot.xlabel("Population size ")
pyplot.ylabel("Employed people ")
pyplot.subplot(2,2,4)
pyplot.scatter(xdata, ydata)
x_line1 = np.arange(min(xdata), max(xdata), 1)
y_line1 = objective2(x_line1, *popt2)
pyplot.plot(x_line1, y_line1, '--', color='red')
pyplot.title('1st order polynomial linear regression')
pyplot.xlabel("Population size ")
pyplot.ylabel("Employed people ")
pyplot.show()
|
{"hexsha": "70faeaa7b815d0485634437cb9dcb2e12574b563", "size": 2204, "ext": "py", "lang": "Python", "max_stars_repo_path": "Regression Curve Fitting.py", "max_stars_repo_name": "HarduinLearnsCoding/Pattern-Recognition", "max_stars_repo_head_hexsha": "d2275a851fb3aaa71936fb45c23be74d641625a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Regression Curve Fitting.py", "max_issues_repo_name": "HarduinLearnsCoding/Pattern-Recognition", "max_issues_repo_head_hexsha": "d2275a851fb3aaa71936fb45c23be74d641625a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Regression Curve Fitting.py", "max_forks_repo_name": "HarduinLearnsCoding/Pattern-Recognition", "max_forks_repo_head_hexsha": "d2275a851fb3aaa71936fb45c23be74d641625a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9420289855, "max_line_length": 98, "alphanum_fraction": 0.6724137931, "include": true, "reason": "import numpy,from scipy", "num_tokens": 707}
|
from PIL import Image
import numpy as np
from robustness.datasets import ImageNet
from robustness.model_utils import make_and_restore_model
import torch
import matplotlib.pyplot as plt
ds = ImageNet('/tmp')
model, _ = make_and_restore_model(arch='resnet50', dataset=ds,
resume_path='/home/siddhant/Downloads/imagenet_l2_3_0.pt')
model.eval()
img = np.asarray(Image.open('/home/siddhant/CMU/robustness_applications/sample_inputs/img_bear.jpg').resize((224, 224)))
img = img/254.
img = np.transpose(img, (2, 0, 1))
_IMAGENET_MEAN = [0.485, 0.456, 0.406]
_IMAGENET_STDDEV = [0.229, 0.224, 0.225]
img_var = torch.tensor(img, dtype=torch.float)[None, :]
img = img_var.clone().detach().cpu().numpy()
img = img[0]
img = img.transpose((1, 2, 0))
img *= 255
img[img < 0] = 0
img = np.uint8(img)
plt.imshow(img)
plt.show()
ATTACK_EPS = 40
ATTACK_STEPSIZE = 1
ATTACK_STEPS = 200
NUM_WORKERS = 8
BATCH_SIZE = 10
def generation_loss(mod, inp, targ):
op = mod(inp)
loss = torch.nn.CrossEntropyLoss(reduction='none')(op, targ)
return loss, None
kwargs = {
'custom_loss' : generation_loss,
'constraint':'2',
'eps': ATTACK_EPS,
'step_size': ATTACK_STEPSIZE,
'iterations': ATTACK_STEPS,
'targeted': True,
'do_tqdm': True
}
target = torch.tensor([386], dtype=torch.long).cuda()
_, im_adv = model(img_var, target, make_adv=True, **kwargs)
img = im_adv.clone().detach().cpu().numpy()
# print(img.shape)
img = img[0]
img = img.transpose((1, 2, 0))
img *= 255
img[img < 0] = 0
img = np.uint8(img)
plt.imshow(img)
plt.show()
|
{"hexsha": "c73299d0bc791d7e6f9fb4d3ac414cff506287f5", "size": 1568, "ext": "py", "lang": "Python", "max_stars_repo_path": "gen_targeted_adv_samples.py", "max_stars_repo_name": "agarwalsiddhant10/blackbox-smoothing", "max_stars_repo_head_hexsha": "cf18a9dc45f807494955d0cf19a3d1dd4315b54f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gen_targeted_adv_samples.py", "max_issues_repo_name": "agarwalsiddhant10/blackbox-smoothing", "max_issues_repo_head_hexsha": "cf18a9dc45f807494955d0cf19a3d1dd4315b54f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gen_targeted_adv_samples.py", "max_forks_repo_name": "agarwalsiddhant10/blackbox-smoothing", "max_forks_repo_head_hexsha": "cf18a9dc45f807494955d0cf19a3d1dd4315b54f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1230769231, "max_line_length": 120, "alphanum_fraction": 0.6881377551, "include": true, "reason": "import numpy", "num_tokens": 497}
|
[STATEMENT]
lemma trans_le_add1_hmset: "i \<le> j \<Longrightarrow> i \<le> j + m" for i j m :: hmultiset
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. i \<le> j \<Longrightarrow> i \<le> j + m
[PROOF STEP]
by (simp add: add_increasing2)
|
{"llama_tokens": 102, "file": "Nested_Multisets_Ordinals_Hereditary_Multiset", "length": 1}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import tensorflow_probability as tfp
import datetime
import os, sys
from argparse import ArgumentParser
# Debug module
# from tensorflow.python import debug as tf_debug
import numpy as np
import warnings
from keras.datasets import mnist
from tensorflow.python.summary.writer.writer import FileWriter
import matplotlib.pyplot as plt
warnings.simplefilter('error', UserWarning)
class IWAE:
def __init__(self, input_shape, batch_size, layer_specs, k_samples, lr, sess, small):
self.data_ph = tf.placeholder(dtype=tf.float32, shape=(None, k_samples, input_shape))
self.train_ph = tf.placeholder(dtype=tf.bool)
self.tot_obj_loss = tf.placeholder(dtype=tf.float32)
self.log2pi = tf.log(2 * np.pi)
self.q_probs = []
self.h_units = layer_specs
self.batch_size = batch_size
self.small = small
self.init = tf.placeholder(dtype=tf.bool)
self.k = k
self.sess = sess
self.recon, self.log_sum_w = self.model(self.data_ph)
self.obj_loss = self.objective_function()
with tf.name_scope('Optimizer'):
self.optimizer = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.9, beta2=0.999).minimize(self.obj_loss)
self.summary = tf.Summary()
loss_summary = tf.summary.scalar('Objective loss', self.tot_obj_loss)
self.merge_op = tf.summary.merge_all()
print('Logging to:', './logs/' + str(datetime.datetime.now()))
self.writer = tf.summary.FileWriter('./logs/' + str(datetime.datetime.now()))
def dense(self, x_, num_units, init_scale=0.01, scope_name=''):
"""
Dense layer including Weight normalization and initialization
as presented by (Kingma & Salimans, Weight normalization, 2016)
based on code from: https://github.com/openai/weightnorm/blob/master/tensorflow/nn.py
currently not giving any good desirable results
:param x: input data
:param num_units: number of units in the dense layer
:param init_scale: initialization scale
:param scope_name: name of current scope
:return: data run through dense layer
"""
with tf.variable_scope(scope_name):
ema = tf.train.ExponentialMovingAverage(decay=0.998)
if self.init is not False:
V = tf.get_variable('V', shape=[int(x_.get_shape()[-1]), num_units], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
g = tf.get_variable('g', shape=[num_units], dtype=tf.float32,
initializer=tf.constant_initializer(1.), trainable=True)
b = tf.get_variable('b', shape=[num_units], dtype=tf.float32,
initializer=tf.constant_initializer(0.), trainable=True)
else:
V = tf.get_variable('V')
g = tf.get_variable('g')
b = tf.get_variable('b')
tf.assert_variables_initialized([V, g, b])
ema.apply([V, g, b])
g_ = tf.expand_dims(g, 0)
g_ = tf.tile(g_, [self.k, 1])
# use weight normalization (Salimans & Kingma, 2016)
x = tf.matmul(x_, V)
scaler = g_ / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
b_ = tf.expand_dims(b, 0)
b_ = tf.tile(b_, [self.k, 1])
x = tf.reshape(scaler, [1, self.k, num_units]) * x + tf.reshape(b_, [1, self.k, num_units])
if self.init is not False: # normalize x
m_init, v_init = tf.nn.moments(x, [0])
m_init = m_init[0]
scale_init = init_scale / tf.sqrt(v_init + 1e-10)
scale_init = scale_init[0]
with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]):
# x = tf.identity(x)
g_s = tf.expand_dims(g, 0)
g_s = tf.tile(g_s, [self.k, 1])
x = tf.matmul(x_, V)
scaler = g_s / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
b_ = tf.expand_dims(b, 0)
b_ = tf.tile(b_, [self.k, 1])
x = tf.reshape(scaler, [1, self.k, num_units]) * x + tf.reshape(b_, [1, self.k, num_units])
return x
def MLP_layer(self, x, mlp_units, out_dims, scope_name=''):
"""
MLP layer with sampling built in
:param x: input data
:param mlp_units: dimensions of the MLP layers
:param out_dims: output dimension for matching the next MLP layer
:param scope_name: set the scope_name for WeightNorm, currently not working properly
:return: nu, rho
"""
# 2 regular linear dense layers with leaky Relu activations
# x = self.dense(x, num_units=mlp_units, init_scale=1., scope_name=scope_name + '_dense1')
x = tf.layers.dense(x, mlp_units)
h_inter = tf.nn.leaky_relu(x, alpha=0.1)
# h_i = self.dense(h_inter, num_units=mlp_units, init_scale=1., scope_name=scope_name + '_dense2')
h_i = tf.layers.dense(h_inter, mlp_units)
h_i = tf.nn.leaky_relu(h_i, alpha=0.1)
# nu = self.dense(h_i, num_units=out_dims, init_scale=1., scope_name=scope_name + '_dense3')
nu = tf.layers.dense(h_i, out_dims)
# rho = 0.01 + tf.nn.softplus(self.dense(h_i, num_units=out_dims, init_scale=1., scope_name=scope_name + '_dense4'))
rho = 0.01 + tf.nn.softplus(tf.layers.dense(h_i, out_dims))
return nu, rho
def sample_z(self, nu, rho, value=None, bern=False):
"""
sample from N(nu, rho)
:param nu: mean
:param rho: stddev
:param value: None or the latent variables from the corresponding encoder layer (if we are in the decoder layer)
:param bern: Flag for using a bernoulli distribution
:return: logprob(z|nu,rho) & z
"""
# flag for using a bernoulli distribution
if bern:
sample_dist = tf.distributions.Bernoulli(logits=nu, dtype=tf.float32)
nu_bern = sample_dist.mean()
l = self.bincrossentropy(value, nu)
l_ = tf.expand_dims(l, axis=-1)
l_tiled = tf.tile(l_, [1, 1, self.k])
return nu_bern, l_tiled
eps = tf.random_normal(tf.shape(nu), dtype=tf.float32)
z_next = nu + rho * eps
if value is not None:
estimate = value
else:
estimate = z_next
estimate_ = tf.reshape(tf.expand_dims(estimate, axis=-1), [-1, 1, estimate.get_shape()[1], estimate.get_shape()[-1]])
estimate_tiled = tf.tile(estimate_, [1, self.k, 1, 1])
nu_ = tf.reshape(tf.expand_dims(nu, axis=-1), [-1, self.k, 1, nu.get_shape()[-1]])
nu_tiled = tf.tile(nu_, [1, 1, self.k, 1])
rho_ = tf.reshape(tf.expand_dims(rho, axis=-1), [-1, self.k, 1, rho.get_shape()[-1]])
rho_tiled = tf.tile(rho_, [1, 1, self.k, 1])
sample_dist = tf.distributions.Normal(nu_tiled, rho_tiled)
logprob_z = tf.reduce_sum(sample_dist.log_prob(estimate_tiled), axis=-1)
return z_next, logprob_z
def bincrossentropy(self, x, x_hat):
"""
calculate binary cross-entropy between true image and reconstruction
:param x: true image
:param x_hat: reconstructed image at the bernoulli layer of the decoder
:return: binary cross-entropy
"""
x_hat = tf.nn.sigmoid(x_hat)
bce = x * tf.log(x_hat + 1e-8) + (1 - x) * tf.log(1 - x_hat + 1e-8)
return tf.reduce_sum(bce, axis=-1)
def logmmmeanexp(self, X, Y):
"""
Numerically stable tensor matrix multiplications as described in the appendix of
https://arxiv.org/pdf/1806.08593.pdf
:param X: Matrix
:param Y: Matrix
:return: returns a numerically stable tensor matrix multiplication
"""
x = tf.reduce_max(X, axis=-1, keepdims=True)[0]
y = tf.reduce_max(Y, axis=-2, keepdims=True)[0]
X = (X - x)
Y = (Y - y)
return x + y + tf.log(tf.matmul(
tf.exp(X), tf.exp(Y))) - tf.log(tf.cast(X.get_shape()[-1], dtype=tf.float64))
def objective_function(self):
"""
Calculate the objective function loss
:return: objective function loss
"""
with tf.name_scope('Loss'):
obj_loss = - tf.reduce_sum(self.log_sum_w[:, 0, 0])
return obj_loss
def train(self, trn_data):
trn_data = np.array([self.k * [x] for x in trn_data])
_, recon, obj_loss, log_sum_w = self.sess.run([self.optimizer,
self.recon,
self.obj_loss,
self.log_sum_w],
feed_dict={
self.train_ph: True,
self.data_ph: trn_data,
self.init: False
})
return recon, obj_loss, log_sum_w
def test(self, test_data):
test_data = np.array([self.k * [x] for x in test_data])
recon, obj_loss = self.sess.run([self.recon,
self.obj_loss],
feed_dict={
self.data_ph: test_data,
self.train_ph: False,
self.init: False
})
return recon, obj_loss
def data_based_initialize(self, mb_data):
test_data = np.array([self.k * [x] for x in mb_data])
empt = self.sess.run([], feed_dict={self.data_ph: test_data, self.init: True})
def model(self, q_z_next):
"""
TMC model structure for the Non-facturized case
:param q_z_next: input data
:return: returns a reconstructed image
"""
q_nu_next = None
q_rho_next = None
recon = None
q_zs = [q_z_next]
lP = []
lQ = []
if self.small is True:
mult = 2
else:
mult = 8
# Encoder portion
for mlp_units in self.h_units:
with tf.name_scope('Q_MLP_layer'):
q_dense_name = 'Q_MLP_layer_{}_'.format(mlp_units)
q_nu_next, q_rho_next = self.MLP_layer(q_z_next, mlp_units=mult * mlp_units,
out_dims=mlp_units, scope_name=q_dense_name)
with tf.name_scope('Q_stochastic_layer'):
q_z_next, q_logprob = self.sample_z(q_nu_next, q_rho_next)
q_zs.append(q_z_next)
lQ.append(tf.cast(q_logprob, dtype=tf.float64))
# account for prior ~ N(0,1)
with tf.name_scope('Prior'):
prior_nu = tf.zeros_like(q_nu_next)
prior_rho = tf.ones_like(q_rho_next)
_, prior_logprob = self.sample_z(prior_nu, prior_rho, q_z_next)
lP.append(tf.cast(prior_logprob, dtype=tf.float64))
# Decoder portion
for p_out, mlp_units, q_z_in, q_z_out in zip([8, 16, 32, 64, 784],
self.h_units[::-1],
q_zs[:0:-1],
q_zs[-2::-1]):
# at last decoder layer, sample from Bernoulli dist
if p_out == 784:
bern = True
else:
bern = False
with tf.name_scope('P_MLP_layer'):
p_dense_name = 'P_MLP_layer_{}_'.format(mlp_units)
p_nu, p_rho = self.MLP_layer(
q_z_in, mlp_units=2 * mlp_units, out_dims=p_out, scope_name=p_dense_name)
with tf.name_scope('P_stochastic_layer'):
p_z_next, p_logprob = self.sample_z(p_nu, p_rho, q_z_out, bern=bern)
if bern:
recon = p_z_next
lP.append(tf.cast(p_logprob, dtype=tf.float64))
lQ = [(tf.reduce_logsumexp(lq, axis=1, keepdims=True)-
tf.log(tf.cast(tf.shape(lq)[1], dtype=tf.float64))) for lq in lQ[::-1]]
# lP = [tf.reduce_logsumexp(lp, axis=1, keepdims=True) for lp in lP]
log_sum_w = lP[0] - lQ[0]
for i in range(1, len(lQ)):
log_sum_w = self.logmmmeanexp(log_sum_w, lP[i] - lQ[i])
log_sum_w = self.logmmmeanexp(log_sum_w, lP[-1])
print(recon.get_shape())
return recon, log_sum_w
def mb(x, batch_size):
"""
Minibatch generator
:param x: input data
:param batch_size: desired batch size
:return: yield a new batch each call
"""
n_samples = x.shape[0]
n_batches = int(np.ceil(n_samples / batch_size))
while True:
permutation = np.random.permutation(x.shape[0])
for b in range(n_batches):
batch_idx = permutation[b *
batch_size:(b + 1) * batch_size]
batch = x[batch_idx]
if batch.shape[0] is not batch_size:
continue
yield batch
parser = ArgumentParser("Tensorflow implementation of IWAE in TMC-paper from NeurIPS 2019")
parser.add_argument('-k', dest='k', type=int, default=20, help="Option for choosing k")
parser.add_argument('--epochs', dest='epochs', type=int, default=1200, help="Option for choosing number of epochs")
parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help="Option for choosing batch size")
parser.add_argument('--model_type', dest='model_type', type=str, default='small', help="Option for using small or large model")
args = parser.parse_args()
print("Batch size: ", args.batch_size)
print("Number of epochs: ", args.epochs)
print("Model type: ", args.model_type)
print("k: ", args.k)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
batch_size = 128
# TODO TEST WITH k = 5, k = 20, k = 50, k = 100
model_type = args.model_type
if model_type == 'small':
small = True
else:
small = False
lr = 1e-3
batch_size = args.batch_size
# want to test with k = 5, 20, 50, 100
k = args.k
epochs = args.epochs
save_path = 'TMC_model_non_fac_{}_k_{}'.format(model_type, k)
restore_and_recon = True
if not os.path.exists(save_path):
os.mkdir(save_path)
with tf.Session() as sess:
IWAE_net = IWAE(batch_size=batch_size, input_shape=784, k_samples=k, layer_specs=[64, 32, 16, 8, 4],
lr=lr, sess=sess, small=small)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
x_gen = mb(x_train, batch_size)
x_gen_test = mb(x_test, batch_size)
# x_gen_init = mb(x_train, batch_size)
test_err = []
if restore_and_recon:
saver.restore(sess, '{}'.format(tf.train.latest_checkpoint(
'/home/linus/DD2412-Reproducibility-project-Group-61/IWAE/TMC_model_non_fac_small_k_20_objective_TMC/')))
for k in range(50):
test_batch = next(x_gen_test).reshape(batch_size, 784)
recon, obj_loss = IWAE_net.test(test_data=test_batch)
fig, axes = plt.subplots(10, 10)
fig.subplots_adjust(hspace=0)
fig.subplots_adjust(wspace=0)
for j in range(10):
for i in range(10):
axes[j][0].imshow(test_batch[j].reshape(28, 28))
axes[j][0].axis('off')
axes[j][i].imshow(recon[j][i].reshape(28, 28))
axes[j][i].axis('off')
plt.axis('off')
axes[0][0].set_title('Original')
for c in range(1, i+1):
axes[0][c].set_title('Recon')
plt.tight_layout()
plt.show()
else:
try:
for epoch in range(1, epochs+1):
# used for the WeightNorm initialization, our implementation is flawed and not used
# if epoch == 1:
# init_batch = next(x_gen_init).reshape(batch_size, 784)
# IWAE_net.data_based_initialize(init_batch)
# iterate enough times to see all of the training data each epoch 1 -> (len(train_data)/batch_size)
for mb_epoch in range(1, 470):
x_batch = next(x_gen).reshape(batch_size, 784)
recon, obj_loss, log_sum_w = IWAE_net.train(x_batch)
test_batch_counter = 0
batch_test_err = 0
# iterate enough times to see all of the test data each epoch 1 -> (len(test_data)/batch_size)
for test_epoch in range(1, 80):
x_batch_test = next(x_gen_test).reshape(batch_size, 784)
test_batch_counter += x_batch_test.shape[0]
recon, obj_loss = IWAE_net.test(x_batch_test)
batch_test_err += obj_loss
testing_err = batch_test_err/int(test_batch_counter) # normalize total error over the nr of batch samples
summary = IWAE_net.sess.run(IWAE_net.merge_op, feed_dict={IWAE_net.tot_obj_loss: testing_err})
IWAE_net.writer.add_summary(summary, global_step=epoch)
# ugly hack for resetting the loss between epochs, only needed for tensorboard
summary = IWAE_net.sess.run(IWAE_net.merge_op, feed_dict={IWAE_net.tot_obj_loss: 0})
test_err.append(testing_err)
print('=====> Objective loss at epoch {}: {}'.format(str(epoch), str(testing_err)))
if epoch == epochs:
# save model at end of runs
print('got to end for model TMC non-factorized {} with k: {}'.format(model_type, k))
total_obj_loss_model = np.array(test_err)
np.save(save_path+"/tot_obj_loss_k_{}_non_fac_{}".format(k, model_type), total_obj_loss_model)
saver.save(sess,
save_path+"/model_TMC_forward_non_fac_{}_with_k{}.ckpt".format(model_type, k))
print(test_err)
except KeyboardInterrupt:
# possibility to save model before all epochs have run
print('Stopped training and testing at epoch {} for model TMC non-factorized {} with k: {}'.format(epoch,
model_type,
k))
total_obj_loss_model = np.array(test_err)
np.save(save_path + "/tot_obj_loss_k_{}_non_fac_{}".format(k, model_type), total_obj_loss_model)
saver.save(sess,
save_path + "/model_TMC_forward_non_fac_{}_with_k{}.ckpt".format(model_type, k))
|
{"hexsha": "53782e53026d2f474f816208c2cfa70442a11422", "size": 19440, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/TMC.py", "max_stars_repo_name": "LinuNils/TMC_reproduced", "max_stars_repo_head_hexsha": "91c5c877d1adc89626bb80e59233f72228a6d4f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/TMC.py", "max_issues_repo_name": "LinuNils/TMC_reproduced", "max_issues_repo_head_hexsha": "91c5c877d1adc89626bb80e59233f72228a6d4f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/TMC.py", "max_forks_repo_name": "LinuNils/TMC_reproduced", "max_forks_repo_head_hexsha": "91c5c877d1adc89626bb80e59233f72228a6d4f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.1844660194, "max_line_length": 127, "alphanum_fraction": 0.5594650206, "include": true, "reason": "import numpy", "num_tokens": 4626}
|
import tensorflow as tf
import numpy as np
import tools.processing as pre
text = pre.get_text("data/ref_text2.txt")
sentences = text.replace("\n", ";")
vocab = pre.Vocabulary(sentences)
embedding_dimension = 3
word2index_map = {}
index = 0
# for sent in sentences:
# for word in sent.lower().split():
# if word not in word2index_map:
# word2index_map[word] = index
# index += 1
#index2word_map = {index: word for word, index in word2index_map.items()}
index2word_map = vocab.index2word_map
word2index_map = vocab._dict
vocabulary_size = len(index2word_map)
tf.reset_default_graph()
with tf.name_scope("embeddings"):
embeddings = tf.get_variable("embedding", shape=[vocabulary_size, embedding_dimension])
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
saver = tf.train.Saver(var_list = {"embeddings": embeddings})
import sys
# Later, launch the model, use the saver to restore variables from disk, and
# do some work with the model.
with tf.Session() as sess:
# Restore variables from disk.
# saver.restore(sess, "logs/word2vec_intro/final_embeddings.ckpt")
saver.restore(sess, "logs/word2vec_intro/embeddings.ckpt-" + sys.argv[1])
#print(vars_in_checkpoint)
print("Model restored.")
normalized_embeddings_matrix = sess.run(normalized_embeddings)
ref_word = normalized_embeddings_matrix[word2index_map[sys.argv[2]]]
cosine_dists = np.dot(normalized_embeddings_matrix, ref_word)
ff = np.argsort(cosine_dists)[::-1][0:10]
for f in ff:
print(index2word_map[f])
print(cosine_dists[f])
|
{"hexsha": "db18f198284f33e2c3074e0d5882f8f80cc87292", "size": 1664, "ext": "py", "lang": "Python", "max_stars_repo_path": "deprecated/loader.py", "max_stars_repo_name": "frankzl/deep-rap", "max_stars_repo_head_hexsha": "f992081b136e02d6ee5f976f0343f7e3220a1f39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-01-10T08:38:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T15:07:05.000Z", "max_issues_repo_path": "deprecated/loader.py", "max_issues_repo_name": "frankzl/deep-rap", "max_issues_repo_head_hexsha": "f992081b136e02d6ee5f976f0343f7e3220a1f39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deprecated/loader.py", "max_forks_repo_name": "frankzl/deep-rap", "max_forks_repo_head_hexsha": "f992081b136e02d6ee5f976f0343f7e3220a1f39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-23T08:06:45.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-23T08:06:45.000Z", "avg_line_length": 28.2033898305, "max_line_length": 91, "alphanum_fraction": 0.71875, "include": true, "reason": "import numpy", "num_tokens": 414}
|
[STATEMENT]
lemma fls_inverse_X_power:
"inverse ((fls_X::'a::division_ring fls) ^ n) = fls_X_inv ^ n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. inverse (fls_X ^ n) = fls_X_inv ^ n
[PROOF STEP]
by (simp add: fls_inverse_X_power')
|
{"llama_tokens": 116, "file": null, "length": 1}
|
import argparse
import glob
import os
import subprocess
import chainer
import cupy as cp
import neural_renderer
import numpy as np
import scipy.misc
import tqdm
import deep_dream_3d
def make_gif(working_directory, filename):
# generate gif (need ImageMagick)
options = '-delay 8 -loop 0 -layers optimize'
subprocess.call('convert %s %s/_tmp_*.png %s' % (options, working_directory, filename), shell=True)
for filename in glob.glob('%s/_tmp_*.png' % working_directory):
os.remove(filename)
def run():
# settings
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--filename_obj', type=str)
parser.add_argument('-o', '--filename_output', type=str)
parser.add_argument('-d', '--output_directory', type=str)
parser.add_argument('-al', '--adam_lr', type=float, default=0.01)
parser.add_argument('-ab1', '--adam_beta1', type=float, default=0.9)
parser.add_argument('-bs', '--batch_size', type=int, default=4)
parser.add_argument('-ni', '--num_iteration', type=int, default=1000)
parser.add_argument('-cd', '--camera_distance', type=float, default=2.5)
parser.add_argument('-ib', '--init_bias', type=str, default='(0,0,0)')
parser.add_argument('-g', '--gpu', type=int, default=0)
args = parser.parse_args()
args.init_bias = tuple([float(v) for v in args.init_bias[1:-1].split(',')])
# create output directory
if not os.path.exists(args.output_directory):
os.makedirs(args.output_directory)
# setup chainer
chainer.cuda.get_device_from_id(args.gpu).use()
cp.random.seed(0)
np.random.seed(0)
# setup scene & optimizer
model = deep_dream_3d.DeepDreamModel(
args.filename_obj,
camera_distance=args.camera_distance,
init_bias=args.init_bias)
model.to_gpu()
optimizer = neural_renderer.Adam(alpha=args.adam_lr, beta1=args.adam_beta1)
optimizer.setup(model)
# optimization
loop = tqdm.tqdm(range(args.num_iteration))
for _ in loop:
optimizer.target.cleargrads()
loss = model(args.batch_size)
loss.backward()
optimizer.update()
loop.set_description('Optimizing. Loss %.4f' % loss.data)
# draw object
model.renderer.background_color = (1, 1, 1)
loop = tqdm.tqdm(range(0, 360, 4))
for num, azimuth in enumerate(loop):
loop.set_description('Drawing')
model.renderer.eye = neural_renderer.get_points_from_angles(2.732, 30, azimuth)
images = model.renderer.render(*model.mesh.get_batch(1))
image = images.data.get()[0].transpose((1, 2, 0))
scipy.misc.toimage(image, cmin=0, cmax=1).save('%s/_tmp_%04d.png' % (args.output_directory, num))
make_gif(args.output_directory, args.filename_output)
if __name__ == '__main__':
run()
|
{"hexsha": "10c870559c4f0d665968fdb18bf7982cdef0bc1f", "size": 2801, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/run.py", "max_stars_repo_name": "hiroharu-kato/deep_dream_3d", "max_stars_repo_head_hexsha": "8f9f4ab0897bb7c453a09bf11652c4dbe80cb714", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 76, "max_stars_repo_stars_event_min_datetime": "2018-01-18T02:23:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T19:51:03.000Z", "max_issues_repo_path": "examples/run.py", "max_issues_repo_name": "EXYNOS-999/deep_dream_3d", "max_issues_repo_head_hexsha": "8f9f4ab0897bb7c453a09bf11652c4dbe80cb714", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/run.py", "max_forks_repo_name": "EXYNOS-999/deep_dream_3d", "max_forks_repo_head_hexsha": "8f9f4ab0897bb7c453a09bf11652c4dbe80cb714", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2018-01-18T13:23:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T21:40:15.000Z", "avg_line_length": 34.5802469136, "max_line_length": 105, "alphanum_fraction": 0.6772581221, "include": true, "reason": "import numpy,import scipy,import cupy", "num_tokens": 713}
|
# ============================================================================
# 付録 C ガス給湯機及びガス給湯温水暖房機の給湯部
# ============================================================================
import numpy as np
# ============================================================================
# C.2 消費電力量
# ============================================================================
def calc_E_E_hs_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_b2_d_t, W_dash_ba1_d_t,
theta_ex_d_Ave_d,
L_dashdash_ba2_d_t):
"""1時間当たりの給湯機の消費電力量 (kWh/h) (1)
Args:
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/h)
W_dash_b2_d_t(ndarray): 1時間当たりの自動湯はり時における太陽熱補正給湯負荷 (MJ/h)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h)
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの給湯機の消費電力量 (kWh/d)
"""
# 給湯機の待機時及び水栓給湯時の補機による消費電力 (2)
E_E_hs_aux1_d_t = get_E_E_hs_aux1_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t,
theta_ex_d_Ave_d)
# 給湯機の湯はり時の補機による消費電力量 (3)
E_E_hs_aux2_d_t = get_E_E_hs_aux2_d_t(W_dash_b2_d_t)
# 給湯機の保温時の補機による消費電力量 (4)
E_E_hs_aux3_d_t = calc_E_E_hs_aux3_d_t(L_dashdash_ba2_d_t)
print('E_E_hs_aux1 = {}'.format(np.sum(E_E_hs_aux1_d_t)))
print('E_E_hs_aux2 = {}'.format(np.sum(E_E_hs_aux2_d_t)))
print('E_E_hs_aux3 = {}'.format(np.sum(E_E_hs_aux3_d_t)))
return E_E_hs_aux1_d_t + E_E_hs_aux2_d_t + E_E_hs_aux3_d_t
def get_E_E_hs_aux1_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t, theta_ex_d_Ave_d):
"""1時間当たりの給湯機の待機時及び水栓給湯時の補機による消費電力 (2)
Args:
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/h)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h)
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
Returns:
ndarray: 1時間当たりの給湯機の待機時及び水栓給湯時の補機による消費電力量 (kWh/h)
"""
return ((-0.00172 * np.repeat(theta_ex_d_Ave_d, 24) + 0.2822) / 24
+ 0.000393 * (W_dash_k_d_t + W_dash_s_d_t + W_dash_w_d_t + W_dash_b1_d_t + W_dash_ba1_d_t)) * 10 ** 3 / 3600
def get_E_E_hs_aux2_d_t(W_dash_b2_d_t):
"""1時間当たりの給湯機の湯はり時の補機による消費電力量 (3)
Args:
W_dash_b2_d_t(ndarray): 1時間当たりの自動湯はり時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯機の湯はり時の補機による消費電力量
"""
E_E_hs_aux2_d_t = np.zeros(24 * 365)
# 1日ごとにまとめる
W_dash_b2_d = np.repeat(np.sum(W_dash_b2_d_t.reshape(365, 24), axis=1), 24)
# W_dash_b2_d > 0 の場合
f = W_dash_b2_d > 0
E_E_hs_aux2_d_t[f] = (0.07 * 10 ** 3 / 3600) * W_dash_b2_d_t[f] / W_dash_b2_d[f]
return E_E_hs_aux2_d_t
def calc_E_E_hs_aux3_d_t(L_dashdash_ba2_d_t):
"""1時間当たりの給湯機の保温時の補機による消費電力量 (4)
Args:
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯機の保温時の補機による消費電力量
"""
E_E_hs_aux3_d_t = np.zeros(24 * 365)
L_dashdash_ba2_d = get_L_dashdash_ba2_d(L_dashdash_ba2_d_t)
L_dashdash_ba2_d = np.repeat(L_dashdash_ba2_d, 24)
# L_dashdash_ba2_d > 0 の場合
f = L_dashdash_ba2_d > 0
E_E_hs_aux3_d_t[f] = (0.01723 * L_dashdash_ba2_d[f] + 0.06099) * 10 ** 3 / 3600 \
* L_dashdash_ba2_d_t[f] / L_dashdash_ba2_d[f]
return E_E_hs_aux3_d_t
# ============================================================================
# C.3 ガス消費量
# ============================================================================
def calc_E_G_hs_d_t(theta_ex_d_Ave_d, L_dashdash_k_d_t, L_dashdash_s_d_t, L_dashdash_w_d_t, L_dashdash_b1_d_t,
L_dashdash_b2_d_t,
L_dashdash_ba1_d_t, L_dashdash_ba2_d_t, bath_function, hw_type=None, e_rtd=None, e_dash_rtd=None):
"""1時間当たりの給湯機のガス消費量 (5)
Args:
theta_ex_d_Ave_d(ndarray): 日平均外気温 (℃)
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/h)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/h)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/h)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/h)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h)
bath_function(str): ふろ機能の種類
hw_type(str, optional): 給湯機の種類 (Default value = None)
e_rtd(float, optional): 当該給湯機の効率 (Default value = None)
e_dash_rtd(float, optional): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」 (Default value = None)
Returns:
ndarray: 1時間当たりの給湯機のガス消費量 (MJ/h)
"""
# 効率の決定
if e_rtd is None:
if e_dash_rtd is None:
e_rtd = get_e_rtd_default(hw_type)
else:
e_rtd = get_e_rtd(e_dash_rtd, bath_function)
# 当該給湯機に対する効率の補正係数
f_hs = get_f_hs(e_rtd)
# 1日当たりの太陽熱補正給湯熱負荷
L_dashdash_k_d = get_L_dashdash_k_d(L_dashdash_k_d_t)
L_dashdash_s_d = get_L_dashdash_s_d(L_dashdash_s_d_t)
L_dashdash_w_d = get_L_dashdash_w_d(L_dashdash_w_d_t)
L_dashdash_b1_d = get_L_dashdash_b1_d(L_dashdash_b1_d_t)
L_dashdash_b2_d = get_L_dashdash_b2_d(L_dashdash_b2_d_t)
L_dashdash_ba1_d = get_L_dashdash_ba1_d(L_dashdash_ba1_d_t)
L_dashdash_ba2_d = get_L_dashdash_ba2_d(L_dashdash_ba2_d_t)
# 日平均給湯機効率
e_k_d = get_e_k_d(theta_ex_d_Ave_d, L_dashdash_k_d, L_dashdash_w_d, f_hs)
e_s_d = get_e_s_d(theta_ex_d_Ave_d, L_dashdash_s_d, f_hs)
e_w_d = get_e_w_d(theta_ex_d_Ave_d, L_dashdash_k_d, L_dashdash_w_d, f_hs)
if bath_function == '給湯単機能':
# 日平均給湯機効率
e_b1_d = get_e_b1_d(theta_ex_d_Ave_d, L_dashdash_b1_d, f_hs)
e_ba1_d = get_e_ba1_d(theta_ex_d_Ave_d, L_dashdash_ba1_d, f_hs)
# (5a)
return L_dashdash_k_d_t / np.repeat(e_k_d, 24) \
+ L_dashdash_s_d_t / np.repeat(e_s_d, 24) \
+ L_dashdash_w_d_t / np.repeat(e_w_d, 24) \
+ L_dashdash_b1_d_t / np.repeat(e_b1_d, 24) \
+ L_dashdash_ba1_d_t / np.repeat(e_ba1_d, 24)
elif bath_function == 'ふろ給湯機(追焚なし)':
# 日平均給湯機効率
e_b2_d = get_e_b2_d(theta_ex_d_Ave_d, L_dashdash_b2_d, f_hs)
e_ba1_d = get_e_ba1_d(theta_ex_d_Ave_d, L_dashdash_ba1_d, f_hs)
# (5b)
return L_dashdash_k_d_t / np.repeat(e_k_d, 24) \
+ L_dashdash_s_d_t / np.repeat(e_s_d, 24) \
+ L_dashdash_w_d_t / np.repeat(e_w_d, 24) \
+ L_dashdash_b2_d_t / np.repeat(e_b2_d, 24) \
+ L_dashdash_ba1_d_t / np.repeat(e_ba1_d, 24)
elif bath_function == 'ふろ給湯機(追焚あり)':
# 日平均給湯機効率
e_b2_d = get_e_b2_d(theta_ex_d_Ave_d, L_dashdash_b2_d, f_hs)
e_ba2_d = get_e_ba2_d(theta_ex_d_Ave_d, L_dashdash_ba2_d, f_hs)
# (5c)
return L_dashdash_k_d_t / np.repeat(e_k_d, 24) \
+ L_dashdash_s_d_t / np.repeat(e_s_d, 24) \
+ L_dashdash_w_d_t / np.repeat(e_w_d, 24) \
+ L_dashdash_b2_d_t / np.repeat(e_b2_d, 24) \
+ L_dashdash_ba2_d_t / np.repeat(e_ba2_d, 24)
else:
raise ValueError(bath_function)
def get_e_k_d(theta_ex_d_Ave_d, L_dashdash_k_d, L_dashdash_w_d, f_hs):
"""台所水栓の給湯使用時における日平均給湯機効率 (6a)
Args:
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d(ndarray): 1日当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_w_d(ndarray): 1日当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/d)
f_hs(float): 当該給湯機に対する効率の補正係数
Returns:
ndarray: 台所水栓の給湯使用時における日平均給湯機効率
"""
# 日平均給湯機効率を計算するための回帰係数
a_std_k = get_table_c_3()[0][0]
b_std_k = get_table_c_3()[1][0]
c_std_k = get_table_c_3()[2][0]
a_k = a_std_k * f_hs
b_k = b_std_k * f_hs
c_k = c_std_k * f_hs
e_k = np.clip(a_k * theta_ex_d_Ave_d + b_k * (L_dashdash_k_d + L_dashdash_w_d) + c_k, 0, 1)
return e_k
def get_e_s_d(theta_ex_d_Ave_d, L_dashdash_s_d, f_hs):
"""浴室シャワー水栓の給湯使用時における日平均給湯機効率 (6b)
Args:
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_s_d(ndarray): 浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/d)
f_hs(float): 当該給湯機に対する効率の補正係数
Returns:
ndarray: 浴室シャワー水栓の給湯使用時における日平均給湯機効率
"""
# 日平均給湯機効率を計算するための回帰係数
a_std_s = get_table_c_3()[0][1]
b_std_s = get_table_c_3()[1][1]
c_std_s = get_table_c_3()[2][1]
a_s = a_std_s * f_hs
b_s = b_std_s * f_hs
c_s = c_std_s * f_hs
e_s = np.clip(a_s * theta_ex_d_Ave_d + b_s * L_dashdash_s_d + c_s, 0, 1)
return e_s
def get_e_w_d(theta_ex_d_Ave_d, L_dashdash_k_d, L_dashdash_w_d, f_hs):
"""洗面水栓の給湯使用時における日平均給湯機効率 (6c)
Args:
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d(ndaray): 1日当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_w_d(ndarray): 1日当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/d)
f_hs(float): 当該給湯機に対する効率の補正係数
Returns:
ndarray: 洗面水栓の給湯使用時における日平均給湯機効率
"""
# 日平均給湯機効率を計算するための回帰係数
a_std_w = get_table_c_3()[0][2]
b_std_w = get_table_c_3()[1][2]
c_std_w = get_table_c_3()[2][2]
a_w = a_std_w * f_hs
b_w = b_std_w * f_hs
c_w = c_std_w * f_hs
e_w = np.clip(a_w * theta_ex_d_Ave_d + b_w * (L_dashdash_k_d + L_dashdash_w_d) + c_w, 0, 1)
return e_w
def get_e_b1_d(theta_ex_d_Ave_d, L_dashdash_b1_d, f_hs):
"""浴槽水栓湯はり時における日平均給湯機効率 (6d)
Args:
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_b1_d(ndarray): 1日当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/d)
f_hs(float): 当該給湯機に対する効率の補正係数
Returns:
ndarray: 浴槽水栓湯はり時における日平均給湯機効率
"""
# 日平均給湯機効率を計算するための回帰係数
a_std_b1 = get_table_c_3()[0][3]
b_std_b1 = get_table_c_3()[1][3]
c_std_b1 = get_table_c_3()[2][3]
a_b1 = a_std_b1 * f_hs
b_b1 = b_std_b1 * f_hs
c_b1 = c_std_b1 * f_hs
e_b1 = np.clip(a_b1 * theta_ex_d_Ave_d + b_b1 * L_dashdash_b1_d + c_b1, 0, 1)
return e_b1
def get_e_b2_d(theta_ex_d_Ave_d, L_dashdash_b2_d, f_hs):
"""浴槽自動湯はり時における日平均給湯機効率 (6e)
Args:
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_b2_d(ndarray): 1日当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/d)
f_hs(float): 当該給湯機に対する効率の補正係数
Returns:
ndarray: 浴槽自動湯はり時における日平均給湯機効率
"""
# 日平均給湯機効率を計算するための回帰係数
a_std_b2 = get_table_c_3()[0][4]
b_std_b2 = get_table_c_3()[1][4]
c_std_b2 = get_table_c_3()[2][4]
a_b2 = a_std_b2 * f_hs
b_b2 = b_std_b2 * f_hs
c_b2 = c_std_b2 * f_hs
e_b2 = np.clip(a_b2 * theta_ex_d_Ave_d + b_b2 * L_dashdash_b2_d + c_b2, 0, 1)
return e_b2
def get_e_ba1_d(theta_ex_d_Ave_d, L_dashdash_ba1_d, f_hs):
"""浴槽水栓さし湯時における日平均給湯機効率 (6f)
Args:
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_ba1_d(ndarray): 1日当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/d)
f_hs(floatr): 当該給湯機に対する効率の補正係数
Returns:
ndarray: 浴槽水栓さし湯時における日平均給湯機効率
"""
# 日平均給湯機効率を計算するための回帰係数
a_std_ba1 = get_table_c_3()[0][5]
b_std_ba1 = get_table_c_3()[1][5]
c_std_ba1 = get_table_c_3()[2][5]
a_ba1 = a_std_ba1 * f_hs
b_ba1 = b_std_ba1 * f_hs
c_ba1 = c_std_ba1 * f_hs
e_ba1 = np.clip(a_ba1 * theta_ex_d_Ave_d + b_ba1 * L_dashdash_ba1_d + c_ba1, 0, 1)
return e_ba1
def get_e_ba2_d(theta_ex_d_Ave_d, L_dashdash_ba2_d, f_hs):
"""浴槽追追焚時における日平均給湯機効率 (6g)
Args:
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_ba2_d(ndarray): 1日当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/d)
f_hs(float): 当該給湯機に対する効率の補正係数
Returns:
ndarray: 浴槽追追焚時における日平均給湯機効率
"""
# 日平均給湯機効率を計算するための回帰係数
a_std_ba2 = get_table_c_3()[0][6]
b_std_ba2 = get_table_c_3()[1][6]
c_std_ba2 = get_table_c_3()[2][6]
a_ba2 = a_std_ba2 * f_hs
b_ba2 = b_std_ba2 * f_hs
c_ba2 = c_std_ba2 * f_hs
e_ba2 = np.clip(a_ba2 * theta_ex_d_Ave_d + b_ba2 * L_dashdash_ba2_d + c_ba2, 0, 1)
return e_ba2
def get_table_c_3():
"""表C.3 ガス給湯機効率の回帰係数a_std_u, b_std_u, c_std_u
Args:
Returns:
list: ガス給湯機効率の回帰係数a_std_u, b_std_u, c_std_u
"""
table_c_3 = [
(0.0019, 0.0006, 0.0019, 0.0000, 0.0000, 0.0000, 0.0033),
(0.0013, 0.0005, 0.0013, 0.0002, -0.0005, 0.0002, 0.0194),
(0.6533, 0.7414, 0.6533, 0.7839, 0.7828, 0.7839, 0.5776)
]
return table_c_3
def get_f_hs(e_rtd):
"""当該給湯機に対する効率の補正係数 f_hs (8)
Args:
e_rtd(float): 当該給湯機の効率
Returns:
float: 当該給湯機に対する効率の補正係数
"""
return (0.8754 * e_rtd + 0.060) / 0.745
def get_e_rtd_default(hw_type):
"""当該給湯機の効率
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
Returns:
float: 当該給湯機の効率
"""
if hw_type in ['ガス潜熱回収型給湯機', 'ガス潜熱回収型給湯温水暖房機']:
return 0.836
elif hw_type in ['ガス従来型給湯機', 'ガス従来型給湯温水暖房機']:
return 0.704
else:
raise ValueError(hw_type)
def get_e_rtd(e_dash_rtd, bath_function):
"""「エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)
に定義される「エネルギー消費効率」 から 当該給湯器の効率を取得 (9)
Args:
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
bath_function(str): ふろ機能の種類
Returns:
float: 換算された当該給湯器の効率
"""
if bath_function == '給湯単機能' or bath_function == 'ふろ給湯機(追焚なし)':
return e_dash_rtd - 0.046 # (9a)
elif bath_function == 'ふろ給湯機(追焚あり)':
return e_dash_rtd - 0.064 # (9b)
else:
raise ValueError()
# ============================================================================
# C.4 灯油消費量
# ============================================================================
def get_E_K_hs_d_t():
"""1日当たりの給湯機の灯油消費量
Args:
Returns:
ndarray: 1日当たりの給湯機の灯油消費量
"""
# 1日当たりの給湯機の灯油消費量は0とする
return np.zeros(24 * 365)
# ============================================================================
# C.5 1日当たりの太陽熱補正給湯熱負荷
# ============================================================================
def get_L_dashdash_k_d(L_dashdash_k_d_t):
"""1日当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
Args:
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1日当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
"""
return np.sum(L_dashdash_k_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_s_d(L_dashdash_s_d_t):
"""1日当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_s_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_w_d(L_dashdash_w_d_t):
"""1日当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_w_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_b1_d(L_dashdash_b1_d_t):
"""1日当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_b1_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_b2_d(L_dashdash_b2_d_t):
"""1日当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_b2_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_ba1_d(L_dashdash_ba1_d_t):
"""1日当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_ba1_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_ba2_d(L_dashdash_ba2_d_t):
"""1日当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_ba2_d_t.reshape((365, 24)), axis=1)
|
{"hexsha": "3020625b755379f264993fc543c46f5040f8078e", "size": 16361, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pyhees/section7_1_c.py", "max_stars_repo_name": "jjj-design/pyhees", "max_stars_repo_head_hexsha": "d63e7cd84abfc2f509bc1cd1256598a10aac1825", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pyhees/section7_1_c.py", "max_issues_repo_name": "jjj-design/pyhees", "max_issues_repo_head_hexsha": "d63e7cd84abfc2f509bc1cd1256598a10aac1825", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-01-04T07:29:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T08:02:51.000Z", "max_forks_repo_path": "src/pyhees/section7_1_c.py", "max_forks_repo_name": "jjj-design/pyhees", "max_forks_repo_head_hexsha": "d63e7cd84abfc2f509bc1cd1256598a10aac1825", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-01-19T07:57:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T00:25:54.000Z", "avg_line_length": 29.3207885305, "max_line_length": 136, "alphanum_fraction": 0.6438481755, "include": true, "reason": "import numpy", "num_tokens": 8971}
|
\section{201912-5}
\input{problem/18/201912-5-p.tex}
|
{"hexsha": "99edc64deed820314392093eef828945f6e6fe8b", "size": 53, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "problem/18/201912-5.tex", "max_stars_repo_name": "xqy2003/CSP-Project", "max_stars_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-14T01:47:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T01:47:19.000Z", "max_issues_repo_path": "problem/18/201912-5.tex", "max_issues_repo_name": "xqy2003/CSP-Project", "max_issues_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "problem/18/201912-5.tex", "max_forks_repo_name": "xqy2003/CSP-Project", "max_forks_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.6666666667, "max_line_length": 33, "alphanum_fraction": 0.7358490566, "num_tokens": 22}
|
import random
import gym
import numpy as np
from preprocessing import process_frame
class GameWrapper:
"""Wrapper for the environment provided by Gym"""
def __init__(self, env_name, no_op_steps=10, history_length=4):
self.env = gym.make(env_name)
self.no_op_steps = no_op_steps
self.history_length = history_length
self.state = None
self.last_lives = 0
def reset(self, evaluation=False):
"""Resets the environment
Arguments:
evaluation: Set to True when the agent is being evaluated. Takes a random number of no-op steps if True.
"""
frame = self.env.reset()
self.last_lives = 0
# If evaluating, take a random number of no-op steps.
# This adds an element of randomness, so that each
# evaluation is slightly different.
if evaluation:
for _ in range(random.randint(0, self.no_op_steps)):
self.env.step(1)
# For the initial state, we stack the first frame four times
self.state = np.repeat(process_frame(frame), self.history_length, axis=2)
def step(self, action, render_mode=None):
"""Performs an action and observes the result
Arguments:
action: An integer describe action the agent chose
render_mode: None doesn't render anything, 'human' renders the screen in a new window, 'rgb_array' returns an np.array with rgb values
Returns:
processed_frame: The processed new frame as a result of that action
reward: The reward for taking that action
terminal: Whether the game has ended
life_lost: Whether a life has been lost
new_frame: The raw new frame as a result of that action
If render_mode is set to 'rgb_array' this also returns the rendered rgb_array
"""
new_frame, reward, terminal, info = self.env.step(action)
# In the commonly ignored 'info' or 'meta' data returned by env.step
# we can get information such as the number of lives the agent has.
# We use this here to find out when the agent loses a life, and
# if so, we set life_lost to True.
# We use life_lost to force the agent to start the game
# and not sit around doing nothing.
if info["ale.lives"] < self.last_lives:
life_lost = True
else:
life_lost = terminal
self.last_lives = info["ale.lives"]
processed_frame = process_frame(new_frame)
self.state = np.append(self.state[:, :, 1:], processed_frame, axis=2)
if render_mode == "rgb_array":
return (
processed_frame,
reward,
terminal,
life_lost,
self.env.render(render_mode),
)
elif render_mode == "human":
self.env.render()
elif render_mode == "explain":
return new_frame, processed_frame, reward, terminal, life_lost
return processed_frame, reward, terminal, life_lost
|
{"hexsha": "27f78a521738015b6361451b5f82f4035079ef92", "size": 3098, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/gamewrapper.py", "max_stars_repo_name": "alexcosta13/explainable-breakout", "max_stars_repo_head_hexsha": "483448b04747cd4bc8609be2e4141176e3b05b94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-25T09:10:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T09:10:31.000Z", "max_issues_repo_path": "src/gamewrapper.py", "max_issues_repo_name": "alexcosta13/explainable-breakout", "max_issues_repo_head_hexsha": "483448b04747cd4bc8609be2e4141176e3b05b94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/gamewrapper.py", "max_forks_repo_name": "alexcosta13/explainable-breakout", "max_forks_repo_head_hexsha": "483448b04747cd4bc8609be2e4141176e3b05b94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4470588235, "max_line_length": 146, "alphanum_fraction": 0.6213686249, "include": true, "reason": "import numpy", "num_tokens": 657}
|
[STATEMENT]
lemma euler_witness_exists_nat:
assumes "odd n" "\<not>prime n" "2 < n"
shows "\<exists>a. euler_witness (int a) n \<and> coprime a n \<and> 0 < a \<and> a < n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>a. euler_witness (int a) n \<and> coprime a n \<and> 0 < a \<and> a < n
[PROOF STEP]
using euler_witness_exists[OF assms]
[PROOF STATE]
proof (prove)
using this:
\<exists>a. euler_witness a n \<and> coprime a (int n) \<and> 0 < a \<and> a < int n
goal (1 subgoal):
1. \<exists>a. euler_witness (int a) n \<and> coprime a n \<and> 0 < a \<and> a < n
[PROOF STEP]
using zero_less_imp_eq_int
[PROOF STATE]
proof (prove)
using this:
\<exists>a. euler_witness a n \<and> coprime a (int n) \<and> 0 < a \<and> a < int n
0 < ?k \<Longrightarrow> \<exists>n>0. ?k = int n
goal (1 subgoal):
1. \<exists>a. euler_witness (int a) n \<and> coprime a n \<and> 0 < a \<and> a < n
[PROOF STEP]
by fastforce
|
{"llama_tokens": 421, "file": "Probabilistic_Prime_Tests_Euler_Witness", "length": 3}
|
from functools import partial
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from jax import nn
from rljax.network.base import MLP
from rljax.network.conv import DQNBody, SLACDecoder, SLACEncoder
class CumProbNetwork(hk.Module):
"""
Fraction Proposal Network for FQF.
"""
def __init__(self, num_quantiles=64):
super(CumProbNetwork, self).__init__()
self.num_quantiles = num_quantiles
def __call__(self, x):
w_init = hk.initializers.Orthogonal(scale=1.0 / np.sqrt(3.0))
p = nn.softmax(hk.Linear(self.num_quantiles, w_init=w_init)(x))
cum_p = jnp.concatenate([jnp.zeros((p.shape[0], 1)), jnp.cumsum(p, axis=1)], axis=1)
cum_p_prime = (cum_p[:, 1:] + cum_p[:, :-1]) / 2.0
return cum_p, cum_p_prime
def make_quantile_nerwork(
rng,
state_space,
action_space,
fn,
num_quantiles,
env_type
):
"""
Make Quantile Nerwork for FQF.
"""
fake_state = state_space.sample()[None, ...]
if len(state_space.shape) == 1:
fake_state = fake_state.astype(np.float32)
network_dict = {}
params_dict = {}
if env_type == 'atari' or env_type == 'minatar':
network_dict["feature"] = hk.without_apply_rng(hk.transform(lambda s: DQNBody()(s, env_type)))
dim = 0
if env_type == 'atari':
dim = 7 * 7 * 64
else:
dim = 8 * 8 * 16 #convolutional layer output size
fake_feature = np.zeros((1, dim), dtype=np.float32) #1024 for minatar
else:
network_dict["feature"] = hk.without_apply_rng(hk.transform(lambda s: s))
fake_feature = fake_state
params_dict["feature"] = network_dict["feature"].init(next(rng), fake_state)
fake_cum_p = np.empty((1, num_quantiles), dtype=np.float32)
network_dict["quantile"] = hk.without_apply_rng(hk.transform(fn))
params_dict["quantile"] = network_dict["quantile"].init(next(rng), fake_feature, fake_cum_p)
network_dict = hk.data_structures.to_immutable_dict(network_dict)
params_dict = hk.data_structures.to_immutable_dict(params_dict)
return network_dict, params_dict, fake_feature
class SACLinear(hk.Module):
"""
Linear layer for SAC+AE.
"""
def __init__(self, feature_dim):
super().__init__()
self.feature_dim = feature_dim
def __call__(self, x):
w_init = hk.initializers.Orthogonal(scale=1.0)
x = hk.Linear(self.feature_dim, w_init=w_init)(x)
x = hk.LayerNorm(axis=1, create_scale=True, create_offset=True)(x)
x = jnp.tanh(x)
return x
class ConstantGaussian(hk.Module):
"""
Constant diagonal gaussian distribution for SLAC.
"""
def __init__(self, output_dim, std):
super().__init__()
self.output_dim = output_dim
self.std = std
def __call__(self, x):
mean = jnp.zeros((x.shape[0], self.output_dim))
std = jnp.ones((x.shape[0], self.output_dim)) * self.std
return jax.lax.stop_gradient(mean), jax.lax.stop_gradient(std)
class Gaussian(hk.Module):
"""
Diagonal gaussian distribution with state dependent variances for SLAC.
"""
def __init__(self, output_dim, hidden_units=(256, 256), negative_slope=0.2):
super().__init__()
self.output_dim = output_dim
self.hidden_units = hidden_units
self.negative_slope = negative_slope
def __call__(self, x):
x = MLP(
output_dim=2 * self.output_dim,
hidden_units=self.hidden_units,
hidden_activation=partial(nn.leaky_relu, negative_slope=self.negative_slope),
)(x)
mean, log_std = jnp.split(x, 2, axis=1)
std = nn.softplus(log_std) + 1e-5
return mean, std
def make_stochastic_latent_variable_model(
rng,
state_space,
action_space,
num_sequences,
units_model,
z1_dim,
z2_dim,
feature_dim,
):
"""
Make Stochastic Latent Variable Model for SLAC.
"""
# Fake input for JIT compilations.
fake_state_ = jnp.empty((1, num_sequences, *state_space.shape), dtype=jnp.uint8)
fake_action_ = jnp.empty((1, num_sequences, *action_space.shape))
fake_action = jnp.empty((1, *action_space.shape))
fake_feature = jnp.empty((1, feature_dim))
fake_z_ = jnp.empty((1, num_sequences, z1_dim + z2_dim))
fake_z1_ = jnp.empty((1, num_sequences, z1_dim))
fake_z2_ = jnp.empty((1, num_sequences, z2_dim))
fake_z1 = jnp.empty((1, z1_dim))
fake_z2 = jnp.empty((1, z2_dim))
def fn_z1_prior(z2, a):
return Gaussian(output_dim=z1_dim, hidden_units=units_model)(jnp.concatenate([z2, a], axis=1))
def fn_z1_post(f, z2, a):
return Gaussian(output_dim=z1_dim, hidden_units=units_model)(jnp.concatenate([f, z2, a], axis=1))
def fn_z2(z1, z2, a):
return Gaussian(output_dim=z2_dim, hidden_units=units_model)(jnp.concatenate([z1, z2, a], axis=1))
def fn_reward(z_, a_, n_z_):
x = jnp.concatenate([z_, a_, n_z_], axis=-1)
B, S, X = x.shape
mean, std = Gaussian(output_dim=1, hidden_units=units_model)(x.reshape([B * S, X]))
return mean.reshape([B, S, 1]), std.reshape([B, S, 1])
def fn_encoder(x):
return SLACEncoder(output_dim=feature_dim)(x)
def fn_decoder(z1_, z2_):
return SLACDecoder(state_space=state_space, std=np.sqrt(0.1))(jnp.concatenate([z1_, z2_], axis=-1))
network_dict = {}
params_dict = {}
network_dict["z1_prior_init"] = hk.without_apply_rng(hk.transform(lambda x: ConstantGaussian(z1_dim, 1.0)(x)))
params_dict["z1_prior_init"] = network_dict["z1_prior_init"].init(next(rng), fake_action)
network_dict["z1_prior"] = hk.without_apply_rng(hk.transform(fn_z1_prior))
params_dict["z1_prior"] = network_dict["z1_prior"].init(next(rng), fake_z2, fake_action)
network_dict["z1_post_init"] = hk.without_apply_rng(hk.transform(lambda x: Gaussian(z1_dim, units_model)(x)))
params_dict["z1_post_init"] = network_dict["z1_post_init"].init(next(rng), fake_feature)
network_dict["z1_post"] = hk.without_apply_rng(hk.transform(fn_z1_post))
params_dict["z1_post"] = network_dict["z1_post"].init(next(rng), fake_feature, fake_z2, fake_action)
network_dict["z2_init"] = hk.without_apply_rng(hk.transform(lambda x: Gaussian(z2_dim, units_model)(x)))
params_dict["z2_init"] = network_dict["z2_init"].init(next(rng), fake_z1)
network_dict["z2"] = hk.without_apply_rng(hk.transform(fn_z2))
params_dict["z2"] = network_dict["z2"].init(next(rng), fake_z1, fake_z2, fake_action)
network_dict["reward"] = hk.without_apply_rng(hk.transform(fn_reward))
params_dict["reward"] = network_dict["reward"].init(next(rng), fake_z_, fake_action_, fake_z_)
network_dict["encoder"] = hk.without_apply_rng(hk.transform(fn_encoder))
params_dict["encoder"] = network_dict["encoder"].init(next(rng), fake_state_)
network_dict["decoder"] = hk.without_apply_rng(hk.transform(fn_decoder))
params_dict["decoder"] = network_dict["decoder"].init(next(rng), fake_z1_, fake_z2_)
network_dict = hk.data_structures.to_immutable_dict(network_dict)
params_dict = hk.data_structures.to_immutable_dict(params_dict)
return network_dict, params_dict
|
{"hexsha": "ac1a533c289080abea50b571d38d8f368b0f1547", "size": 7286, "ext": "py", "lang": "Python", "max_stars_repo_path": "rljax/network/misc.py", "max_stars_repo_name": "julio-cmdr/rljax", "max_stars_repo_head_hexsha": "cbca4638deb6d4e960e71a862573129ba4c5ea79", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rljax/network/misc.py", "max_issues_repo_name": "julio-cmdr/rljax", "max_issues_repo_head_hexsha": "cbca4638deb6d4e960e71a862573129ba4c5ea79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rljax/network/misc.py", "max_forks_repo_name": "julio-cmdr/rljax", "max_forks_repo_head_hexsha": "cbca4638deb6d4e960e71a862573129ba4c5ea79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8916256158, "max_line_length": 114, "alphanum_fraction": 0.6701894043, "include": true, "reason": "import numpy,import jax,from jax", "num_tokens": 1999}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 20:00:16 2019
@author: kristl
"""
"""
# Example for SOPLS
import pandas as pd
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import SOPLS
Y_df = pd.read_table('./data/D.txt', index_col=0)
Y = Y_df.values
X1_df = pd.read_table('./data/A.txt', index_col=0)
X1 = X1_df.values
X2_df = pd.read_table('./data/B.txt', index_col=0)
X2 = X2_df.values
X3_df = pd.read_table('./data/C.txt', index_col=0)
X3 = X3_df.values
X = np.hstack([X1, X2, X3])
blocks = np.hstack([np.ones(X1.shape[1]),np.ones(X2.shape[1])*2,np.ones(X3.shape[1])*3])
mlf = make_pipeline(SOPLS.SOPLS(blocks=blocks, ncomp=[5,3,7], max_comp=10, wide_data=True))
mlf.fit(X,Y)
mlf.predict(X)
mlf2 = make_pipeline(StandardScaler(),SOPLS.SOPLS(blocks=blocks, ncomp=[5,3,7], max_comp=10, wide_data=True))
mlf2.fit(X,Y)
mlf2.predict(X)
"""
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_is_fitted #check_X_y, check_array,
class SOPLS(BaseEstimator):
""" A template estimator to be used as a reference implementation.
For more information regarding how to build your own estimator, read more
in the :ref:`User Guide <user_guide>`.
Parameters
----------
demo_param : str, default='demo_param'
A parameter used for demonstation of how to pass and store paramters.
"""
def __init__(self, blocks=None, ncomp = 'max', max_comp = 20, wide_data = 'auto'):
self.ncomp = ncomp
self.max_comp = max_comp
self.wide_data = wide_data
assert len(blocks)>1, "Please, specify blocks as an integer vector of length equal to the number of variables"
self.blocks = blocks
def fit(self, X, Y):
"""A reference implementation of a fitting function.
Parameters
----------
X : list of {array-like, sparse matrices}, shape (n_samples, n_features)
The training input samples.
Y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
# Store data shapes
unique_blocks = np.unique(self.blocks)
self.nblock = len(unique_blocks)
self.n = Y.shape[0]
# Split X into blocks
Xsplit = []
for i in range(self.nblock):
Xsplit.append(X[:,self.blocks == unique_blocks[i]])
X = Xsplit
del Xsplit
p = []
for i in range(self.nblock):
p.append(X[i].shape[1])
if self.ncomp == 'max':
ncomp = []
for i in range(self.nblock):
ncomp.append(min(self.n-1, p[i]))
self.ncomp = ncomp
# Check if data are wide or tall
if self.wide_data == 'auto':
if sum(p) > np.sqrt(Y.shape[0]):
self.wide_data = True
else:
self.wide_data = False
# Main fitting
fit = SOPLS_fit(X, Y, self.ncomp, self.max_comp, self.wide_data)
self.decomp = {'Q' : fit[0][0], 'T' : fit[0][1], 'Ry' : fit[0][2]}
self.comp_order = {'comp_list' : fit[0][3], 'change_block' : fit[0][4]}
if self.wide_data == True:
self.C = fit[1][0]
self.X = X
self.Y = Y
self.is_fitted_ = True
# `fit` should always return `self`
return self
def predict(self, X, comps = []):
""" A reference implementation of a predicting function.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
Returns
-------
y : ndarray, shape (n_samples,)
Returns an array of ones.
"""
# Store data shapes
unique_blocks = np.unique(self.blocks)
Xsplit = []
for i in range(self.nblock):
Xsplit.append(X[:,self.blocks == unique_blocks[i]])
X = Xsplit
del Xsplit
# X = check_array(X, accept_sparse=True)
check_is_fitted(self, 'is_fitted_')
if self.wide_data:
if len(comps) == 0:
comps = self.comp_order['comp_list'][-1]
Y_pred = SOPLS_predict_wide(self, X, comps)
return Y_pred
#################################################################
# SO-PLS fit function (branching out to wide and tall algorithms)
def SOPLS_fit(X, Y, ncomp, max_comp, wide_data='auto'):
nblock = len(X)
# Calculate means and centre input matrices, store dimensions
X_mean = []
for i in range(nblock):
X_mean.append(X[i].mean(axis=0))
X[i] = X[i] - X_mean[i]
Y_mean = Y.mean(axis=0)
Y = Y - Y_mean
# Select algorithm (wide or tall)
if wide_data:
# Liland's kernel PLS
C = []
for i in range(nblock):
C.append(X[i]@X[i].T)
SO = SOPLS_wide(C,Y, ncomp, max_comp)
return (SO, C)
else:
# NIPALS based computations
print('something')
#################################
# SO-PLS workhorse for wide data
def SOPLS_wide(C,Y, ncomp, max_comp, Cval = None):
nblock = len(C)
n, nresp = Y.shape
# Prepare for low redundancy computations
comp_list, change_block, block_usage = component_combos(nblock, ncomp, max_comp)
block_combo = block_usage[0]
block_index = block_usage[1]
n_combos = max(block_index)
n_comps = np.sum(comp_list, axis=1)
tot_comps = len(change_block)
# All combinations of block usage
sumC = []
for i in range(n_combos):
sumC.append(0)
for j in range(nblock):
if block_combo[i,j]:
sumC[i] = sumC[i] + C[j]
# Check for prediction
pred = False
if not (Cval == None):
pred = True
sumCval = []
sumCval.append(0)
for j in range(nblock):
if block_combo[i,j]:
sumCval[i] = sumCval[i] + Cval[j]
Y_mean = np.mean(Y, axis=0)
nval = Y.shape[0]
Crval_currB = []
for b in range(nblock):
Crval_currB[b] = np.zeros([nval, max_comp])
# Prepare storage
Ry = np.zeros([n, tot_comps])
T = Ry.copy()
Q = np.zeros([nresp, tot_comps])
Ry_curr = np.zeros([n, max_comp])
T_curr = Ry_curr.copy()
Q_curr = np.zeros([nresp, max_comp])
Cr_currB = []
Y_currB = []
for b in range(nblock):
Y_currB.append(Y.copy())
Cr_currB.append(np.zeros([n, max_comp]))
Y_curr = Y.copy()
if pred:
Y_pred = np.zeros([nval, nresp, tot_comps])
# --------- Component extraction loop -------------
for comp in range(1,tot_comps):
cb = change_block[comp]
comp_curr = n_comps[comp]
Y_curr = Y_currB[cb]
t = C[cb] @ Y_curr
if nresp > 1: # Multi-response
usv = np.linalg.svd(Y_curr.T @ t)
w = usv[0][:,0]
t = t @ w
else:
t = t.flatten()
if comp_curr > 1: # Orthogonalize on previous
t = t - T_curr[:,:comp_curr-1] @ (T_curr[:,:comp_curr-1].T @ t)
t = t/np.sqrt(sum(t*t))
if nresp > 1:
ry = Y_curr @ w
else:
ry = Y_curr.copy().flatten()
q = t.T @ Y_curr
Y_curr = Y_curr - t[:,np.newaxis] @ q[:,np.newaxis].T # Deflation
for b in range(cb,nblock):
Y_currB[b] = Y_curr.copy()
# Store t, q, ry
T_curr[:, comp_curr-1] = t
Q_curr[:, comp_curr-1] = q
Ry_curr[:,comp_curr-1] = ry
if not pred:
T[:, comp] = t
Q[:, comp] = q
Ry[:,comp] = ry
if pred:
# Update "X_val*W" ~= C*Ry with current component
Cr_currB[cb][:,comp_curr-1] = C[cb] @ ry
Crval_currB[cb][:,comp_curr-1] = Cval[cb] @ ry
if cb < nblock:
for b in range(cb+1, nblock):
Cr_currB[b] = Cr_currB[cb].copy()
Crval_currB[b] = Crval_currB[cb].copy()
# Perform prediction at the end of each "curr"-series
if pred and (comp == tot_comps or change_block[comp+1] < nblock):
comp_last_block = comp_list[comp, nblock-1] # Length of current series
if comp_curr - comp_last_block == 0: # Compensate for first series starting at 0
comp_last_block = comp_last_block - 1
# XW(P'W)^-1, ie. WB without Q
no_Q = Crval_currB[cb][:,:comp_curr] @ \
np.linalg.inv(T_curr[:,:comp_curr].T @ Cr_currB[cb][:,:comp_curr])
# Prediction per response
for r in range(nresp):
Yp_long = np.cumsum(no_Q * np.repeat(Q_curr[r,:comp_curr], nval, 0))
Y_pred[:, r, comp-comp_last_block:comp] = Yp_long[:, comp_curr-comp_last_block:comp_curr]
# -------------- End component extraction loop ---------------
# If prediction, return predicted values
if pred:
Y_pred = Y_pred + Y_mean
return (Y_pred, comp_list, change_block)
else:
# Otherwise, return decomposition
return (Q, T, Ry, comp_list, change_block)
#####################
# SO-PLS prediction #
#####################
def SOPLS_predict_wide(SO, Xval, comps):
X = SO.X
Y = SO.Y
nval = Xval[0].shape[0]
nblock = len(X)
nresp = Y.shape[1]
path, hits = pathComps(comps, SO.comp_order['comp_list'])
tot_comp = len(hits)
Y_pred = np.zeros([nval, nresp, tot_comp])
Cr = 0; Crval = 0
for i in range(nblock):
if comps[i] > 0:
Xval[i] = Xval[i] - np.mean(X[i], axis=0)
X[i] = X[i] - np.mean(X[i], axis=0)
Cr = Cr + (X[i] @ X[i].T) @ SO.decomp['Ry'][:, hits]
Crval = Crval + (Xval[i] @ X[i].T) @ SO.decomp['Ry'][:, hits]
# XW(P'W)^-1, ie. WB without Q
no_Q = Crval @ np.linalg.inv(SO.decomp['T'][:,hits].T @ Cr)
# Prediction per response
for r in range(nresp):
Yp_long = np.cumsum(no_Q * SO.decomp['Q'][r,hits], axis=1)
Y_pred[:,r,:] = Yp_long
Y_pred = Y_pred + np.mean(Y,axis=0)[np.newaxis,:,np.newaxis]
return Y_pred
#############################################
# Create no-redundance sequence of component
def component_combos(nblock, ncomp, max_comps):
# Determine block order
unfiltered = np.array(list(range(ncomp[0]+1)))[:,np.newaxis]
for i in range(1,nblock):
unfiltered = np.hstack([np.repeat(list(range(ncomp[i]+1)), unfiltered.shape[0])[:,np.newaxis],
np.tile(unfiltered, (ncomp[i]+1, 1))])
# names(unfiltered) <- paste0('block ', 1:nblock)
comp_list = unfiltered[np.sum(unfiltered,1) <= max_comps]
first_great = lambda a: np.argmax(a>0)
change_block = np.hstack([nblock-1, np.apply_along_axis(first_great,1,np.diff(comp_list,axis=0))])
# Determine involved blocks
block_usage = np.unique(comp_list!=0,axis=0, return_inverse=True)
return (comp_list, change_block, block_usage)
########################
# Path through compList
def pathComps(comps, comp_list):
nblock = len(comps)
mat = np.zeros([0, nblock])
for b in range(nblock):
base = np.arange(1,comps[b]+1)[np.newaxis,:]
base = [list(range(1,comps[b]+1))]
if b > 0:
for c in range(b-1,-1,-1):
base.insert(0,[comps[c]]*comps[b])
if b < nblock:
for c in range(b+1,nblock):
base.append([0]*comps[b])
mat = np.append(mat, np.array(base).T, axis=0)
hits = []
for i in range(mat.shape[0]):
hits.append(np.where(np.sum(mat[i]==comp_list, axis=1)==nblock)[0][0])
return (mat, hits)
|
{"hexsha": "b58920dbada38e2cd214c9f01589c61f82391ffa", "size": 12029, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/legacy/resources/SOPLS.py", "max_stars_repo_name": "NMBU-Data-Science/multi-hoggorm", "max_stars_repo_head_hexsha": "a1de0044073c84845031b79a6a183748a0178ce8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/legacy/resources/SOPLS.py", "max_issues_repo_name": "NMBU-Data-Science/multi-hoggorm", "max_issues_repo_head_hexsha": "a1de0044073c84845031b79a6a183748a0178ce8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/legacy/resources/SOPLS.py", "max_forks_repo_name": "NMBU-Data-Science/multi-hoggorm", "max_forks_repo_head_hexsha": "a1de0044073c84845031b79a6a183748a0178ce8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4138888889, "max_line_length": 118, "alphanum_fraction": 0.5476764486, "include": true, "reason": "import numpy", "num_tokens": 3337}
|
# Raytracer.jl
# Raytracing for the generation of photorealistic images in Julia
# Copyright (c) 2021 Samuele Colombo, Paolo Galli
# Unit test file for world.jl
@testset "World" begin
@testset "RayIntersection" begin
world = World()
sphere1 = Sphere(transformation=translation(VEC_X * 2f0))
sphere2 = Sphere(transformation=translation(VEC_X * 8f0))
push!(world, sphere1)
push!(world, sphere2)
intersection1 = ray_intersection(Ray(Point(0f0, 0f0, 0f0), VEC_X), world)
@test intersection1 !== nothing
@test intersection1.world_point ≈ Point(1f0, 0f0, 0f0)
intersection2 = ray_intersection(Ray(Point(10f0, 0f0, 0f0), -VEC_X), world)
@test intersection2 !== nothing
@test intersection2.world_point ≈ Point(9f0, 0f0, 0f0)
end
@testset "QuickRayIntersection" begin
world = World()
sphere1 = Sphere(transformation=translation(VEC_X * 2))
sphere2 = Sphere(transformation=translation(VEC_X * 8))
push!(world, sphere1)
push!(world, sphere2)
@test !is_point_visible(world, Point(10.0, 0.0, 0.0), ORIGIN)
@test !is_point_visible(world, Point(5.0, 0.0, 0.0), ORIGIN)
@test is_point_visible(world, Point(5.0, 0.0, 0.0), Point(4.0, 0.0, 0.0))
@test is_point_visible(world, Point(0.5, 0.0, 0.0), ORIGIN)
@test is_point_visible(world, Point(0.0, 10.0, 0.0), ORIGIN)
@test is_point_visible(world, Point(0.0, 0.0, 10.0), ORIGIN)
end
end
|
{"hexsha": "070b2ed02ab46b3e38fb273c83464b90524bf1a2", "size": 1521, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_world.jl", "max_stars_repo_name": "Paolo97Gll/Raytracer.jl", "max_stars_repo_head_hexsha": "14abd7ae89e0adfc9b2b79d0ad516be40215dad3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-03-17T09:47:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T15:35:06.000Z", "max_issues_repo_path": "test/test_world.jl", "max_issues_repo_name": "Paolo97Gll/Raytracer.jl", "max_issues_repo_head_hexsha": "14abd7ae89e0adfc9b2b79d0ad516be40215dad3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2021-03-11T09:28:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-02T23:30:37.000Z", "max_forks_repo_path": "test/test_world.jl", "max_forks_repo_name": "Paolo97Gll/Raytracer.jl", "max_forks_repo_head_hexsha": "14abd7ae89e0adfc9b2b79d0ad516be40215dad3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5681818182, "max_line_length": 83, "alphanum_fraction": 0.644312952, "num_tokens": 496}
|
(* Title: HOL/Algebra/Product_Groups.thy
Author: LC Paulson (ported from HOL Light)
*)
section \<open>Product and Sum Groups\<close>
theory Product_Groups
imports Elementary_Groups "HOL-Library.Equipollence"
begin
subsection \<open>Product of a Family of Groups\<close>
definition product_group:: "'a set \<Rightarrow> ('a \<Rightarrow> ('b, 'c) monoid_scheme) \<Rightarrow> ('a \<Rightarrow> 'b) monoid"
where "product_group I G \<equiv> \<lparr>carrier = (\<Pi>\<^sub>E i\<in>I. carrier (G i)),
monoid.mult = (\<lambda>x y. (\<lambda>i\<in>I. x i \<otimes>\<^bsub>G i\<^esub> y i)),
one = (\<lambda>i\<in>I. \<one>\<^bsub>G i\<^esub>)\<rparr>"
lemma carrier_product_group [simp]: "carrier(product_group I G) = (\<Pi>\<^sub>E i\<in>I. carrier (G i))"
by (simp add: product_group_def)
lemma one_product_group [simp]: "one(product_group I G) = (\<lambda>i\<in>I. one (G i))"
by (simp add: product_group_def)
lemma mult_product_group [simp]: "(\<otimes>\<^bsub>product_group I G\<^esub>) = (\<lambda>x y. \<lambda>i\<in>I. x i \<otimes>\<^bsub>G i\<^esub> y i)"
by (simp add: product_group_def)
lemma product_group [simp]:
assumes "\<And>i. i \<in> I \<Longrightarrow> group (G i)" shows "group (product_group I G)"
proof (rule groupI; simp)
show "(\<lambda>i. x i \<otimes>\<^bsub>G i\<^esub> y i) \<in> (\<Pi> i\<in>I. carrier (G i))"
if "x \<in> (\<Pi>\<^sub>E i\<in>I. carrier (G i))" "y \<in> (\<Pi>\<^sub>E i\<in>I. carrier (G i))" for x y
using that assms group.subgroup_self subgroup.m_closed by fastforce
show "(\<lambda>i. \<one>\<^bsub>G i\<^esub>) \<in> (\<Pi> i\<in>I. carrier (G i))"
by (simp add: assms group.is_monoid)
show "(\<lambda>i\<in>I. (if i \<in> I then x i \<otimes>\<^bsub>G i\<^esub> y i else undefined) \<otimes>\<^bsub>G i\<^esub> z i) =
(\<lambda>i\<in>I. x i \<otimes>\<^bsub>G i\<^esub> (if i \<in> I then y i \<otimes>\<^bsub>G i\<^esub> z i else undefined))"
if "x \<in> (\<Pi>\<^sub>E i\<in>I. carrier (G i))" "y \<in> (\<Pi>\<^sub>E i\<in>I. carrier (G i))" "z \<in> (\<Pi>\<^sub>E i\<in>I. carrier (G i))" for x y z
using that by (auto simp: PiE_iff assms group.is_monoid monoid.m_assoc intro: restrict_ext)
show "(\<lambda>i\<in>I. (if i \<in> I then \<one>\<^bsub>G i\<^esub> else undefined) \<otimes>\<^bsub>G i\<^esub> x i) = x"
if "x \<in> (\<Pi>\<^sub>E i\<in>I. carrier (G i))" for x
using assms that by (fastforce simp: Group.group_def PiE_iff)
show "\<exists>y\<in>\<Pi>\<^sub>E i\<in>I. carrier (G i). (\<lambda>i\<in>I. y i \<otimes>\<^bsub>G i\<^esub> x i) = (\<lambda>i\<in>I. \<one>\<^bsub>G i\<^esub>)"
if "x \<in> (\<Pi>\<^sub>E i\<in>I. carrier (G i))" for x
by (rule_tac x="\<lambda>i\<in>I. inv\<^bsub>G i\<^esub> x i" in bexI) (use assms that in \<open>auto simp: PiE_iff group.l_inv\<close>)
qed
lemma inv_product_group [simp]:
assumes "f \<in> (\<Pi>\<^sub>E i\<in>I. carrier (G i))" "\<And>i. i \<in> I \<Longrightarrow> group (G i)"
shows "inv\<^bsub>product_group I G\<^esub> f = (\<lambda>i\<in>I. inv\<^bsub>G i\<^esub> f i)"
proof (rule group.inv_equality)
show "Group.group (product_group I G)"
by (simp add: assms)
show "(\<lambda>i\<in>I. inv\<^bsub>G i\<^esub> f i) \<otimes>\<^bsub>product_group I G\<^esub> f = \<one>\<^bsub>product_group I G\<^esub>"
using assms by (auto simp: PiE_iff group.l_inv)
show "f \<in> carrier (product_group I G)"
using assms by simp
show "(\<lambda>i\<in>I. inv\<^bsub>G i\<^esub> f i) \<in> carrier (product_group I G)"
using PiE_mem assms by fastforce
qed
lemma trivial_product_group: "trivial_group(product_group I G) \<longleftrightarrow> (\<forall>i \<in> I. trivial_group(G i))"
(is "?lhs = ?rhs")
proof
assume L: ?lhs
then have "inv\<^bsub>product_group I G\<^esub> (\<lambda>a\<in>I. \<one>\<^bsub>G a\<^esub>) = \<one>\<^bsub>product_group I G\<^esub>"
by (metis group.is_monoid monoid.inv_one one_product_group trivial_group_def)
have [simp]: "\<one>\<^bsub>G i\<^esub> \<otimes>\<^bsub>G i\<^esub> \<one>\<^bsub>G i\<^esub> = \<one>\<^bsub>G i\<^esub>" if "i \<in> I" for i
unfolding trivial_group_def
proof -
have 1: "(\<lambda>a\<in>I. \<one>\<^bsub>G a\<^esub>) i = \<one>\<^bsub>G i\<^esub>"
by (simp add: that)
have "(\<lambda>a\<in>I. \<one>\<^bsub>G a\<^esub>) = (\<lambda>a\<in>I. \<one>\<^bsub>G a\<^esub>) \<otimes>\<^bsub>product_group I G\<^esub> (\<lambda>a\<in>I. \<one>\<^bsub>G a\<^esub>)"
by (metis (no_types) L group.is_monoid monoid.l_one one_product_group singletonI trivial_group_def)
then show ?thesis
using 1 by (simp add: that)
qed
show ?rhs
using L
by (auto simp: trivial_group_def product_group_def PiE_eq_singleton intro: groupI)
next
assume ?rhs
then show ?lhs
by (simp add: PiE_eq_singleton trivial_group_def)
qed
lemma PiE_subgroup_product_group:
assumes "\<And>i. i \<in> I \<Longrightarrow> group (G i)"
shows "subgroup (PiE I H) (product_group I G) \<longleftrightarrow> (\<forall>i \<in> I. subgroup (H i) (G i))"
(is "?lhs = ?rhs")
proof
assume L: ?lhs
then have [simp]: "PiE I H \<noteq> {}"
using subgroup_nonempty by force
show ?rhs
proof (clarify; unfold_locales)
show sub: "H i \<subseteq> carrier (G i)" if "i \<in> I" for i
using that L by (simp add: subgroup_def) (metis (no_types, lifting) L subgroup_nonempty subset_PiE)
show "x \<otimes>\<^bsub>G i\<^esub> y \<in> H i" if "i \<in> I" "x \<in> H i" "y \<in> H i" for i x y
proof -
have *: "\<And>x. x \<in> Pi\<^sub>E I H \<Longrightarrow> (\<forall>y \<in> Pi\<^sub>E I H. \<forall>i\<in>I. x i \<otimes>\<^bsub>G i\<^esub> y i \<in> H i)"
using L by (auto simp: subgroup_def Pi_iff)
have "\<forall>y\<in>H i. f i \<otimes>\<^bsub>G i\<^esub> y \<in> H i" if f: "f \<in> Pi\<^sub>E I H" and "i \<in> I" for i f
using * [OF f] \<open>i \<in> I\<close>
by (subst(asm) all_PiE_elements) auto
then have "\<forall>f \<in> Pi\<^sub>E I H. \<forall>i \<in> I. \<forall>y\<in>H i. f i \<otimes>\<^bsub>G i\<^esub> y \<in> H i"
by blast
with that show ?thesis
by (subst(asm) all_PiE_elements) auto
qed
show "\<one>\<^bsub>G i\<^esub> \<in> H i" if "i \<in> I" for i
using L subgroup.one_closed that by fastforce
show "inv\<^bsub>G i\<^esub> x \<in> H i" if "i \<in> I" and x: "x \<in> H i" for i x
proof -
have *: "\<forall>y \<in> Pi\<^sub>E I H. \<forall>i\<in>I. inv\<^bsub>G i\<^esub> y i \<in> H i"
proof
fix y
assume y: "y \<in> Pi\<^sub>E I H"
then have yc: "y \<in> carrier (product_group I G)"
by (metis (no_types) L subgroup_def subsetCE)
have "inv\<^bsub>product_group I G\<^esub> y \<in> Pi\<^sub>E I H"
by (simp add: y L subgroup.m_inv_closed)
moreover have "inv\<^bsub>product_group I G\<^esub> y = (\<lambda>i\<in>I. inv\<^bsub>G i\<^esub> y i)"
using yc by (simp add: assms)
ultimately show "\<forall>i\<in>I. inv\<^bsub>G i\<^esub> y i \<in> H i"
by auto
qed
then have "\<forall>i\<in>I. \<forall>x\<in>H i. inv\<^bsub>G i\<^esub> x \<in> H i"
by (subst(asm) all_PiE_elements) auto
then show ?thesis
using that(1) x by blast
qed
qed
next
assume R: ?rhs
show ?lhs
proof
show "Pi\<^sub>E I H \<subseteq> carrier (product_group I G)"
using R by (force simp: subgroup_def)
show "x \<otimes>\<^bsub>product_group I G\<^esub> y \<in> Pi\<^sub>E I H" if "x \<in> Pi\<^sub>E I H" "y \<in> Pi\<^sub>E I H" for x y
using R that by (auto simp: PiE_iff subgroup_def)
show "\<one>\<^bsub>product_group I G\<^esub> \<in> Pi\<^sub>E I H"
using R by (force simp: subgroup_def)
show "inv\<^bsub>product_group I G\<^esub> x \<in> Pi\<^sub>E I H" if "x \<in> Pi\<^sub>E I H" for x
proof -
have x: "x \<in> (\<Pi>\<^sub>E i\<in>I. carrier (G i))"
using R that by (force simp: subgroup_def)
show ?thesis
using assms R that by (fastforce simp: x assms subgroup_def)
qed
qed
qed
lemma product_group_subgroup_generated:
assumes "\<And>i. i \<in> I \<Longrightarrow> subgroup (H i) (G i)" and gp: "\<And>i. i \<in> I \<Longrightarrow> group (G i)"
shows "product_group I (\<lambda>i. subgroup_generated (G i) (H i))
= subgroup_generated (product_group I G) (PiE I H)"
proof (rule monoid.equality)
have [simp]: "\<And>i. i \<in> I \<Longrightarrow> carrier (G i) \<inter> H i = H i" "(\<Pi>\<^sub>E i\<in>I. carrier (G i)) \<inter> Pi\<^sub>E I H = Pi\<^sub>E I H"
using assms by (force simp: subgroup_def)+
have "(\<Pi>\<^sub>E i\<in>I. generate (G i) (H i)) = generate (product_group I G) (Pi\<^sub>E I H)"
proof (rule group.generateI)
show "Group.group (product_group I G)"
using assms by simp
show "subgroup (\<Pi>\<^sub>E i\<in>I. generate (G i) (H i)) (product_group I G)"
using assms by (simp add: PiE_subgroup_product_group group.generate_is_subgroup subgroup.subset)
show "Pi\<^sub>E I H \<subseteq> (\<Pi>\<^sub>E i\<in>I. generate (G i) (H i))"
using assms by (auto simp: PiE_iff generate.incl)
show "(\<Pi>\<^sub>E i\<in>I. generate (G i) (H i)) \<subseteq> K"
if "subgroup K (product_group I G)" "Pi\<^sub>E I H \<subseteq> K" for K
using assms that group.generate_subgroup_incl by fastforce
qed
with assms
show "carrier (product_group I (\<lambda>i. subgroup_generated (G i) (H i))) =
carrier (subgroup_generated (product_group I G) (Pi\<^sub>E I H))"
by (simp add: carrier_subgroup_generated cong: PiE_cong)
qed auto
lemma finite_product_group:
assumes "\<And>i. i \<in> I \<Longrightarrow> group (G i)"
shows
"finite (carrier (product_group I G)) \<longleftrightarrow>
finite {i. i \<in> I \<and> ~ trivial_group(G i)} \<and> (\<forall>i \<in> I. finite(carrier(G i)))"
proof -
have [simp]: "\<And>i. i \<in> I \<Longrightarrow> carrier (G i) \<noteq> {}"
using assms group.is_monoid by blast
show ?thesis
by (auto simp: finite_PiE_iff PiE_eq_empty_iff group.trivial_group_alt [OF assms] cong: Collect_cong conj_cong)
qed
subsection \<open>Sum of a Family of Groups\<close>
definition sum_group :: "'a set \<Rightarrow> ('a \<Rightarrow> ('b, 'c) monoid_scheme) \<Rightarrow> ('a \<Rightarrow> 'b) monoid"
where "sum_group I G \<equiv>
subgroup_generated
(product_group I G)
{x \<in> \<Pi>\<^sub>E i\<in>I. carrier (G i). finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>}}"
lemma subgroup_sum_group:
assumes "\<And>i. i \<in> I \<Longrightarrow> group (G i)"
shows "subgroup {x \<in> \<Pi>\<^sub>E i\<in>I. carrier (G i). finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>}}
(product_group I G)"
proof unfold_locales
fix x y
have *: "{i. (i \<in> I \<longrightarrow> x i \<otimes>\<^bsub>G i\<^esub> y i \<noteq> \<one>\<^bsub>G i\<^esub>) \<and> i \<in> I}
\<subseteq> {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>} \<union> {i \<in> I. y i \<noteq> \<one>\<^bsub>G i\<^esub>}"
by (auto simp: Group.group_def dest: assms)
assume
"x \<in> {x \<in> \<Pi>\<^sub>E i\<in>I. carrier (G i). finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>}}"
"y \<in> {x \<in> \<Pi>\<^sub>E i\<in>I. carrier (G i). finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>}}"
then
show "x \<otimes>\<^bsub>product_group I G\<^esub> y \<in> {x \<in> \<Pi>\<^sub>E i\<in>I. carrier (G i). finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>}}"
using assms
apply (auto simp: Group.group_def monoid.m_closed PiE_iff)
apply (rule finite_subset [OF *])
by blast
next
fix x
assume "x \<in> {x \<in> \<Pi>\<^sub>E i\<in>I. carrier (G i). finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>}}"
then show "inv\<^bsub>product_group I G\<^esub> x \<in> {x \<in> \<Pi>\<^sub>E i\<in>I. carrier (G i). finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>}}"
using assms
by (auto simp: PiE_iff assms group.inv_eq_1_iff [OF assms] conj_commute cong: rev_conj_cong)
qed (use assms [unfolded Group.group_def] in auto)
lemma carrier_sum_group:
assumes "\<And>i. i \<in> I \<Longrightarrow> group (G i)"
shows "carrier(sum_group I G) = {x \<in> \<Pi>\<^sub>E i\<in>I. carrier (G i). finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>}}"
proof -
interpret SG: subgroup "{x \<in> \<Pi>\<^sub>E i\<in>I. carrier (G i). finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>}}" "(product_group I G)"
by (simp add: assms subgroup_sum_group)
show ?thesis
by (simp add: sum_group_def subgroup_sum_group carrier_subgroup_generated_alt)
qed
lemma one_sum_group [simp]: "\<one>\<^bsub>sum_group I G\<^esub> = (\<lambda>i\<in>I. \<one>\<^bsub>G i\<^esub>)"
by (simp add: sum_group_def)
lemma mult_sum_group [simp]: "(\<otimes>\<^bsub>sum_group I G\<^esub>) = (\<lambda>x y. (\<lambda>i\<in>I. x i \<otimes>\<^bsub>G i\<^esub> y i))"
by (auto simp: sum_group_def)
lemma sum_group [simp]:
assumes "\<And>i. i \<in> I \<Longrightarrow> group (G i)" shows "group (sum_group I G)"
proof (rule groupI)
note group.is_monoid [OF assms, simp]
show "x \<otimes>\<^bsub>sum_group I G\<^esub> y \<in> carrier (sum_group I G)"
if "x \<in> carrier (sum_group I G)" and
"y \<in> carrier (sum_group I G)" for x y
proof -
have *: "{i \<in> I. x i \<otimes>\<^bsub>G i\<^esub> y i \<noteq> \<one>\<^bsub>G i\<^esub>} \<subseteq> {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>} \<union> {i \<in> I. y i \<noteq> \<one>\<^bsub>G i\<^esub>}"
by auto
show ?thesis
using that
apply (simp add: assms carrier_sum_group PiE_iff monoid.m_closed conj_commute cong: rev_conj_cong)
apply (blast intro: finite_subset [OF *])
done
qed
show "\<one>\<^bsub>sum_group I G\<^esub> \<otimes>\<^bsub>sum_group I G\<^esub> x = x"
if "x \<in> carrier (sum_group I G)" for x
using that by (auto simp: assms carrier_sum_group PiE_iff extensional_def)
show "\<exists>y\<in>carrier (sum_group I G). y \<otimes>\<^bsub>sum_group I G\<^esub> x = \<one>\<^bsub>sum_group I G\<^esub>"
if "x \<in> carrier (sum_group I G)" for x
proof
let ?y = "\<lambda>i\<in>I. m_inv (G i) (x i)"
show "?y \<otimes>\<^bsub>sum_group I G\<^esub> x = \<one>\<^bsub>sum_group I G\<^esub>"
using that assms
by (auto simp: carrier_sum_group PiE_iff group.l_inv)
show "?y \<in> carrier (sum_group I G)"
using that assms
by (auto simp: carrier_sum_group PiE_iff group.inv_eq_1_iff group.l_inv cong: conj_cong)
qed
qed (auto simp: assms carrier_sum_group PiE_iff group.is_monoid monoid.m_assoc)
lemma inv_sum_group [simp]:
assumes "\<And>i. i \<in> I \<Longrightarrow> group (G i)" and x: "x \<in> carrier (sum_group I G)"
shows "m_inv (sum_group I G) x = (\<lambda>i\<in>I. m_inv (G i) (x i))"
proof (rule group.inv_equality)
show "(\<lambda>i\<in>I. inv\<^bsub>G i\<^esub> x i) \<otimes>\<^bsub>sum_group I G\<^esub> x = \<one>\<^bsub>sum_group I G\<^esub>"
using x by (auto simp: carrier_sum_group PiE_iff group.l_inv assms intro: restrict_ext)
show "(\<lambda>i\<in>I. inv\<^bsub>G i\<^esub> x i) \<in> carrier (sum_group I G)"
using x by (simp add: carrier_sum_group PiE_iff group.inv_eq_1_iff assms conj_commute cong: rev_conj_cong)
qed (auto simp: assms)
thm group.subgroups_Inter (*REPLACE*)
theorem subgroup_Inter:
assumes subgr: "(\<And>H. H \<in> A \<Longrightarrow> subgroup H G)"
and not_empty: "A \<noteq> {}"
shows "subgroup (\<Inter>A) G"
proof
show "\<Inter> A \<subseteq> carrier G"
by (simp add: Inf_less_eq not_empty subgr subgroup.subset)
qed (auto simp: subgr subgroup.m_closed subgroup.one_closed subgroup.m_inv_closed)
thm group.subgroups_Inter_pair (*REPLACE*)
lemma subgroup_Int:
assumes "subgroup I G" "subgroup J G"
shows "subgroup (I \<inter> J) G" using subgroup_Inter[ where ?A = "{I,J}"] assms by auto
lemma sum_group_subgroup_generated:
assumes "\<And>i. i \<in> I \<Longrightarrow> group (G i)" and sg: "\<And>i. i \<in> I \<Longrightarrow> subgroup (H i) (G i)"
shows "sum_group I (\<lambda>i. subgroup_generated (G i) (H i)) = subgroup_generated (sum_group I G) (PiE I H)"
proof (rule monoid.equality)
have "subgroup (carrier (sum_group I G) \<inter> Pi\<^sub>E I H) (product_group I G)"
by (rule subgroup_Int) (auto simp: assms carrier_sum_group subgroup_sum_group PiE_subgroup_product_group)
moreover have "carrier (sum_group I G) \<inter> Pi\<^sub>E I H
\<subseteq> carrier (subgroup_generated (product_group I G)
{x \<in> \<Pi>\<^sub>E i\<in>I. carrier (G i). finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>}})"
by (simp add: assms subgroup_sum_group subgroup.carrier_subgroup_generated_subgroup carrier_sum_group)
ultimately
have "subgroup (carrier (sum_group I G) \<inter> Pi\<^sub>E I H) (sum_group I G)"
by (simp add: assms sum_group_def group.subgroup_subgroup_generated_iff)
then have *: "{f \<in> \<Pi>\<^sub>E i\<in>I. carrier (subgroup_generated (G i) (H i)). finite {i \<in> I. f i \<noteq> \<one>\<^bsub>G i\<^esub>}}
= carrier (subgroup_generated (sum_group I G) (carrier (sum_group I G) \<inter> Pi\<^sub>E I H))"
apply (simp only: subgroup.carrier_subgroup_generated_subgroup)
using subgroup.subset [OF sg]
apply (auto simp: set_eq_iff PiE_def Pi_def assms carrier_sum_group subgroup.carrier_subgroup_generated_subgroup)
done
then show "carrier (sum_group I (\<lambda>i. subgroup_generated (G i) (H i))) =
carrier (subgroup_generated (sum_group I G) (Pi\<^sub>E I H))"
by simp (simp add: assms group.subgroupE(1) group.group_subgroup_generated carrier_sum_group)
qed (auto simp: sum_group_def subgroup_generated_def)
lemma iso_product_groupI:
assumes iso: "\<And>i. i \<in> I \<Longrightarrow> G i \<cong> H i"
and G: "\<And>i. i \<in> I \<Longrightarrow> group (G i)" and H: "\<And>i. i \<in> I \<Longrightarrow> group (H i)"
shows "product_group I G \<cong> product_group I H" (is "?IG \<cong> ?IH")
proof -
have "\<And>i. i \<in> I \<Longrightarrow> \<exists>h. h \<in> iso (G i) (H i)"
using iso by (auto simp: is_iso_def)
then obtain f where f: "\<And>i. i \<in> I \<Longrightarrow> f i \<in> iso (G i) (H i)"
by metis
define h where "h \<equiv> \<lambda>x. (\<lambda>i\<in>I. f i (x i))"
have hom: "h \<in> iso ?IG ?IH"
proof (rule isoI)
show hom: "h \<in> hom ?IG ?IH"
proof (rule homI)
fix x
assume "x \<in> carrier ?IG"
with f show "h x \<in> carrier ?IH"
using PiE by (fastforce simp add: h_def PiE_def iso_def hom_def)
next
fix x y
assume "x \<in> carrier ?IG" "y \<in> carrier ?IG"
with f show "h (x \<otimes>\<^bsub>?IG\<^esub> y) = h x \<otimes>\<^bsub>?IH\<^esub> h y"
apply (simp add: h_def PiE_def iso_def hom_def)
using PiE by (fastforce simp add: h_def PiE_def iso_def hom_def intro: restrict_ext)
qed
with G H interpret GH : group_hom "?IG" "?IH" h
by (simp add: group_hom_def group_hom_axioms_def)
show "bij_betw h (carrier ?IG) (carrier ?IH)"
unfolding bij_betw_def
proof (intro conjI subset_antisym)
have "\<gamma> i = \<one>\<^bsub>G i\<^esub>"
if \<gamma>: "\<gamma> \<in> (\<Pi>\<^sub>E i\<in>I. carrier (G i))" and eq: "(\<lambda>i\<in>I. f i (\<gamma> i)) = (\<lambda>i\<in>I. \<one>\<^bsub>H i\<^esub>)" and "i \<in> I"
for \<gamma> i
proof -
have "inj_on (f i) (carrier (G i))" "f i \<in> hom (G i) (H i)"
using \<open>i \<in> I\<close> f by (auto simp: iso_def bij_betw_def)
then have *: "\<And>x. \<lbrakk>f i x = \<one>\<^bsub>H i\<^esub>; x \<in> carrier (G i)\<rbrakk> \<Longrightarrow> x = \<one>\<^bsub>G i\<^esub>"
by (metis G Group.group_def H hom_one inj_onD monoid.one_closed \<open>i \<in> I\<close>)
show ?thesis
using eq \<open>i \<in> I\<close> * \<gamma> by (simp add: fun_eq_iff) (meson PiE_iff)
qed
then show "inj_on h (carrier ?IG)"
apply (simp add: iso_def bij_betw_def GH.inj_on_one_iff flip: carrier_product_group)
apply (force simp: h_def)
done
next
show "h ` carrier ?IG \<subseteq> carrier ?IH"
unfolding h_def using f
by (force simp: PiE_def Pi_def Group.iso_def dest!: bij_betwE)
next
show "carrier ?IH \<subseteq> h ` carrier ?IG"
unfolding h_def
proof (clarsimp simp: iso_def bij_betw_def)
fix x
assume "x \<in> (\<Pi>\<^sub>E i\<in>I. carrier (H i))"
with f have x: "x \<in> (\<Pi>\<^sub>E i\<in>I. f i ` carrier (G i))"
unfolding h_def by (auto simp: iso_def bij_betw_def)
have "\<And>i. i \<in> I \<Longrightarrow> inj_on (f i) (carrier (G i))"
using f by (auto simp: iso_def bij_betw_def)
let ?g = "\<lambda>i\<in>I. inv_into (carrier (G i)) (f i) (x i)"
show "x \<in> (\<lambda>g. \<lambda>i\<in>I. f i (g i)) ` (\<Pi>\<^sub>E i\<in>I. carrier (G i))"
proof
show "x = (\<lambda>i\<in>I. f i (?g i))"
using x by (auto simp: PiE_iff fun_eq_iff extensional_def f_inv_into_f)
show "?g \<in> (\<Pi>\<^sub>E i\<in>I. carrier (G i))"
using x by (auto simp: PiE_iff inv_into_into)
qed
qed
qed
qed
then show ?thesis
using is_iso_def by auto
qed
lemma iso_sum_groupI:
assumes iso: "\<And>i. i \<in> I \<Longrightarrow> G i \<cong> H i"
and G: "\<And>i. i \<in> I \<Longrightarrow> group (G i)" and H: "\<And>i. i \<in> I \<Longrightarrow> group (H i)"
shows "sum_group I G \<cong> sum_group I H" (is "?IG \<cong> ?IH")
proof -
have "\<And>i. i \<in> I \<Longrightarrow> \<exists>h. h \<in> iso (G i) (H i)"
using iso by (auto simp: is_iso_def)
then obtain f where f: "\<And>i. i \<in> I \<Longrightarrow> f i \<in> iso (G i) (H i)"
by metis
then have injf: "inj_on (f i) (carrier (G i))"
and homf: "f i \<in> hom (G i) (H i)" if "i \<in> I" for i
using \<open>i \<in> I\<close> f by (auto simp: iso_def bij_betw_def)
then have one: "\<And>x. \<lbrakk>f i x = \<one>\<^bsub>H i\<^esub>; x \<in> carrier (G i)\<rbrakk> \<Longrightarrow> x = \<one>\<^bsub>G i\<^esub>" if "i \<in> I" for i
by (metis G H group.subgroup_self hom_one inj_on_eq_iff subgroup.one_closed that)
have fin1: "finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>} \<Longrightarrow> finite {i \<in> I. f i (x i) \<noteq> \<one>\<^bsub>H i\<^esub>}" for x
using homf by (auto simp: G H hom_one elim!: rev_finite_subset)
define h where "h \<equiv> \<lambda>x. (\<lambda>i\<in>I. f i (x i))"
have hom: "h \<in> iso ?IG ?IH"
proof (rule isoI)
show hom: "h \<in> hom ?IG ?IH"
proof (rule homI)
fix x
assume "x \<in> carrier ?IG"
with f fin1 show "h x \<in> carrier ?IH"
by (force simp: h_def PiE_def iso_def hom_def carrier_sum_group assms conj_commute cong: conj_cong)
next
fix x y
assume "x \<in> carrier ?IG" "y \<in> carrier ?IG"
with homf show "h (x \<otimes>\<^bsub>?IG\<^esub> y) = h x \<otimes>\<^bsub>?IH\<^esub> h y"
by (fastforce simp add: h_def PiE_def hom_def carrier_sum_group assms intro: restrict_ext)
qed
with G H interpret GH : group_hom "?IG" "?IH" h
by (simp add: group_hom_def group_hom_axioms_def)
show "bij_betw h (carrier ?IG) (carrier ?IH)"
unfolding bij_betw_def
proof (intro conjI subset_antisym)
have \<gamma>: "\<gamma> i = \<one>\<^bsub>G i\<^esub>"
if "\<gamma> \<in> (\<Pi>\<^sub>E i\<in>I. carrier (G i))" and eq: "(\<lambda>i\<in>I. f i (\<gamma> i)) = (\<lambda>i\<in>I. \<one>\<^bsub>H i\<^esub>)" and "i \<in> I"
for \<gamma> i
using \<open>i \<in> I\<close> one that by (simp add: fun_eq_iff) (meson PiE_iff)
show "inj_on h (carrier ?IG)"
apply (simp add: iso_def bij_betw_def GH.inj_on_one_iff assms one flip: carrier_sum_group)
apply (auto simp: h_def fun_eq_iff carrier_sum_group assms PiE_def Pi_def extensional_def one)
done
next
show "h ` carrier ?IG \<subseteq> carrier ?IH"
using homf GH.hom_closed
by (fastforce simp: h_def PiE_def Pi_def dest!: bij_betwE)
next
show "carrier ?IH \<subseteq> h ` carrier ?IG"
unfolding h_def
proof (clarsimp simp: iso_def bij_betw_def carrier_sum_group assms)
fix x
assume x: "x \<in> (\<Pi>\<^sub>E i\<in>I. carrier (H i))" and fin: "finite {i \<in> I. x i \<noteq> \<one>\<^bsub>H i\<^esub>}"
with f have xf: "x \<in> (\<Pi>\<^sub>E i\<in>I. f i ` carrier (G i))"
unfolding h_def
by (auto simp: iso_def bij_betw_def)
have "\<And>i. i \<in> I \<Longrightarrow> inj_on (f i) (carrier (G i))"
using f by (auto simp: iso_def bij_betw_def)
let ?g = "\<lambda>i\<in>I. inv_into (carrier (G i)) (f i) (x i)"
show "x \<in> (\<lambda>g. \<lambda>i\<in>I. f i (g i))
` {x \<in> \<Pi>\<^sub>E i\<in>I. carrier (G i). finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>}}"
proof
show xeq: "x = (\<lambda>i\<in>I. f i (?g i))"
using x by (clarsimp simp: PiE_iff fun_eq_iff extensional_def) (metis iso_iff f_inv_into_f f)
have "finite {i \<in> I. inv_into (carrier (G i)) (f i) (x i) \<noteq> \<one>\<^bsub>G i\<^esub>}"
apply (rule finite_subset [OF _ fin])
using G H group.subgroup_self hom_one homf injf inv_into_f_eq subgroup.one_closed by fastforce
with x show "?g \<in> {x \<in> \<Pi>\<^sub>E i\<in>I. carrier (G i). finite {i \<in> I. x i \<noteq> \<one>\<^bsub>G i\<^esub>}}"
apply (auto simp: PiE_iff inv_into_into conj_commute cong: conj_cong)
by (metis (no_types, opaque_lifting) iso_iff f inv_into_into)
qed
qed
qed
qed
then show ?thesis
using is_iso_def by auto
qed
end
|
{"author": "seL4", "repo": "isabelle", "sha": "e1ab32a3bb41728cd19541063283e37919978a4c", "save_path": "github-repos/isabelle/seL4-isabelle", "path": "github-repos/isabelle/seL4-isabelle/isabelle-e1ab32a3bb41728cd19541063283e37919978a4c/src/HOL/Algebra/Product_Groups.thy"}
|
(**
CoLoR, a Coq library on rewriting and termination.
See the COPYRIGHTS and LICENSE files.
- Frederic Blanqui, 2008-02-22, 2009-10-20 (rpo)
convert CoLoR terms into Coccinelle terms
*)
Set Implicit Arguments.
From CoLoR Require Import LogicUtil ATerm VecUtil.
From CoLoR Require VecUtil more_list APosition AContext ordered_set.
From Coq Require Inverse_Image Max.
(***********************************************************************)
(** convert a CoLoR signature into a Coccinelle signature *)
From CoLoR Require Import term_spec EqUtil.
Module Make_Signature (Import S : SIG) <: Signature.
Module Symb <: decidable_set.S.
Definition A := symbol Sig.
Definition eq_bool := @beq_symb Sig.
Lemma eq_bool_ok : forall a1 a2,
match eq_bool a1 a2 with true => a1 = a2 | false => ~ a1 = a2 end.
Proof.
intros a1 a2. unfold eq_bool. case_beq_symb Sig a1 a2. refl.
rewrite <- (beq_ko (@beq_symb_ok Sig)). hyp.
Qed.
End Symb.
Definition arity (f : Sig) := Free (arity f).
End Make_Signature.
(***********************************************************************)
(** convert CoLoR variables to Coccinelle variables *)
From CoLoR Require Import NatUtil.
Module Var <: decidable_set.S.
Definition A := nat.
Definition eq_bool := beq_nat.
Lemma eq_bool_ok : forall a1 a2,
match eq_bool a1 a2 with true => a1 = a2 | false => ~ a1 = a2 end.
Proof.
intros a1 a2. unfold eq_bool. case_beq_nat a1 a2. refl.
rewrite <- (beq_ko beq_nat_ok). hyp.
Qed.
End Var.
(***********************************************************************)
(** convert CoLoR terms into Coccinelle terms *)
From Coq Require Import List Relations.
From CoLoR Require Import term SN ASubstitution.
Module Make_Term (Import S : SIG) <: Term.
Notation aterm := (term Sig). Notation aterms := (vector aterm).
Notation AVar := ATerm.Var.
Module Sig := Make_Signature S.
Include (term.Make' Sig Var).
Fixpoint term_of_aterm (t : aterm) :=
match t with
| AVar x => Var x
| Fun f ts =>
let fix terms_of_aterms n (ts : aterms n) :=
match ts with
| Vnil => nil
| Vcons u us => term_of_aterm u :: terms_of_aterms _ us
end in Term f (terms_of_aterms (arity f) ts)
end.
Fixpoint terms_of_aterms n (ts : aterms n) :=
match ts with
| Vnil => nil
| Vcons u us => term_of_aterm u :: terms_of_aterms us
end.
Lemma terms_of_aterms_eq : forall n (ts : aterms n),
(fix terms_of_aterms n (ts : aterms n) :=
match ts with
| Vnil => nil
| Vcons u us => term_of_aterm u :: terms_of_aterms _ us
end) n ts = terms_of_aterms ts.
Proof. induction ts; simpl; intros. refl. rewrite IHts. refl. Qed.
Lemma term_of_aterm_fun : forall f ts,
term_of_aterm (Fun f ts) = Term f (terms_of_aterms ts).
Proof. intros. simpl. rewrite terms_of_aterms_eq. refl. Qed.
Import VecUtil.
Lemma terms_of_aterms_cast : forall n (ts : aterms n) p (e : n=p),
terms_of_aterms (Vcast ts e) = terms_of_aterms ts.
Proof.
induction ts; destruct p; simpl; intros; try discr.
rewrite Vcast_refl. refl.
inversion e. subst p. rewrite Vcast_cons. simpl.
rewrite IHts. refl.
Qed.
Lemma terms_of_aterms_app : forall n (ts : aterms n) p (us : aterms p),
terms_of_aterms (Vapp ts us) = terms_of_aterms ts ++ terms_of_aterms us.
Proof. induction ts; simpl; intros. refl. rewrite IHts. refl. Qed.
Lemma length_terms_of_aterms : forall n (ts : aterms n),
length (terms_of_aterms ts) = n.
Proof. induction ts; simpl; intros. refl. rewrite IHts. refl. Qed.
Fixpoint sub_of_asub (s : ASubstitution.substitution Sig) n :=
match n with
| 0 => nil
| S n' => (n', term_of_aterm (s n')) :: sub_of_asub s n'
end.
Import more_list.
Notation find := (@find _ eq_var_bool _).
Lemma find_sub_of_asub : forall s n v, find v (sub_of_asub s n) =
if bgt_nat n v then Some (term_of_aterm (s v)) else None.
Proof.
induction n; intros. refl. simpl sub_of_asub. simpl more_list.find.
rewrite IHn. unfold eq_var_bool. case_beq_nat v n.
assert (bgt_nat (S v) v = true). rewrite bgt_nat_ok. lia. rewrite H. refl.
case_eq (bgt_nat n v); intros; case_eq (bgt_nat (S n) v); intros.
refl. rewrite bgt_nat_ok in H0. rewrite bgt_nat_ko in H1. lia.
rewrite bgt_nat_ok in H1. rewrite bgt_nat_ko in H0.
rewrite (beq_ko beq_nat_ok) in H. lia. refl.
Qed.
Lemma term_of_aterm_sub : forall s k t, k > maxvar t ->
term_of_aterm (sub s t) = apply_subst (sub_of_asub s k) (term_of_aterm t).
Proof.
intros s k t; pattern t; apply ATerm.term_ind
with (Q := fun n (ts : aterms n) =>
k > maxvars ts -> terms_of_aterms (Vmap (sub s) ts) =
map (apply_subst (sub_of_asub s k)) (terms_of_aterms ts)); clear t.
simpl. intros. rewrite find_sub_of_asub. case_eq (bgt_nat k x); intros.
refl. rewrite bgt_nat_ko in H0. lia.
intros. simpl sub. rewrite !term_of_aterm_fun. simpl.
f_equal. apply H. hyp.
refl. intros t n ts. simpl. rewrite maxvars_cons, gt_max.
intros. destruct H1. rewrite H. 2: hyp. rewrite H0. 2: hyp. refl.
Qed.
Import APosition AContext.
Lemma term_of_aterm_fill : forall u t c, term_of_aterm (fill c t) =
replace_at_pos (term_of_aterm (fill c u)) (term_of_aterm t) (pos_context c).
Proof.
induction c; intros. refl. simpl fill. simpl pos_context.
rewrite !term_of_aterm_fun, replace_at_pos_unfold.
f_equal.
rewrite !terms_of_aterms_cast, !terms_of_aterms_app. simpl.
rewrite replace_at_pos_list_replace_at_pos_in_subterm, <- IHc. refl.
rewrite length_terms_of_aterms. refl.
Qed.
Lemma is_a_pos_context : forall u c,
is_a_pos (term_of_aterm (fill c u)) (pos_context c) = true.
Proof.
induction c; intros. refl. simpl fill. rewrite term_of_aterm_fun. simpl.
rewrite terms_of_aterms_cast, terms_of_aterms_app. simpl.
assert (nth_error (terms_of_aterms t ++ term_of_aterm (fill c u) ::
terms_of_aterms t0) i = nth_error (terms_of_aterms t ++ term_of_aterm
(fill c u) :: terms_of_aterms t0) (length (terms_of_aterms t))).
f_equal. rewrite length_terms_of_aterms. refl.
rewrite H, nth_error_at_pos. hyp.
Qed.
End Make_Term.
(***********************************************************************)
(** module type for using Coccinelle's RPO *)
From CoLoR Require Import rpo rpo_extension.
Module Type PRECEDENCE.
Parameter Sig : Signature.
Parameter status : Sig -> status_type.
Parameter prec_nat : Sig -> nat.
Parameter bb : nat.
Parameter prec_eq_status :
forall f g, prec_eq prec_nat f g -> status f = status g.
End PRECEDENCE.
(***********************************************************************)
(** convert Coccinelle RPO into a CoLoR WeakRedPair *)
From CoLoR Require Import ARedPair ARelation RelUtil BoolUtil.
Module WP_RPO (Import P : PRECEDENCE) <: WeakRedPair.
Definition Prec := Precedence status prec_nat prec_eq_status.
Module S. Definition Sig := Sig. End S.
Module Import Term := Make_Term S.
Module Import Rpo := rpo.Make Term.
Notation rpo := (rpo Prec P.bb).
Definition Sig := Sig.
Definition succ := transp (Rof rpo term_of_aterm).
Import Inverse_Image.
Lemma wf_succ : WF succ.
Proof.
apply wf_WF_transp. apply wf_inverse_image with (f:=term_of_aterm).
apply wf_rpo. apply (prec_wf prec_nat).
Qed.
Import Max.
Lemma sc_succ : substitution_closed succ.
Proof.
intros t u s h. unfold succ, transp, Rof. set (k:=max(maxvar t)(maxvar u)).
rewrite term_of_aterm_sub with (k:=S k). 2: apply le_n_S; apply le_max_r.
rewrite term_of_aterm_sub with (k:=S k). 2: apply le_n_S; apply le_max_l.
apply rpo_subst. hyp.
Qed.
Notation empty_rpo_infos := (empty_rpo_infos Prec P.bb).
Notation rpo_eval := (rpo_eval empty_rpo_infos P.bb).
Notation rpo_eval_is_sound := (rpo_eval_is_sound_weak empty_rpo_infos P.bb).
Import ordered_set.
Definition bsucc t u :=
match rpo_eval (term_of_aterm t) (term_of_aterm u) with
| Some Greater_than => true
| _ => false
end.
Lemma bsucc_ok : forall t u, bsucc t u = true -> succ t u.
Proof.
intros t u. unfold bsucc.
gen (rpo_eval_is_sound (term_of_aterm t) (term_of_aterm u)).
case (rpo_eval (term_of_aterm t) (term_of_aterm u)); try discr.
destruct c; try discr. unfold succ, transp, Rof. auto.
Qed.
Lemma bsucc_sub : rel_of_bool bsucc << succ.
Proof. intros t u. unfold rel. intro h. apply bsucc_ok. hyp. Qed.
Definition equiv_aterm := Rof (equiv Prec) term_of_aterm.
Definition succeq := succ U equiv_aterm.
Lemma sc_succeq : substitution_closed succeq.
Proof.
intros t u s [h|h]. left. apply sc_succ. hyp. right.
unfold equiv_aterm, Rof. set (k := max (maxvar t) (maxvar u)).
rewrite term_of_aterm_sub with (k:=S k). 2: apply le_n_S; apply le_max_l.
rewrite term_of_aterm_sub with (k:=S k). 2: apply le_n_S; apply le_max_r.
apply equiv_subst. hyp.
Qed.
Lemma cc_succ : context_closed succ.
Proof.
intros t u c h. unfold succ, transp, Rof.
rewrite term_of_aterm_fill with (u := AVar 0) (t:=t),
term_of_aterm_fill with (u := AVar 0) (t:=u).
apply rpo_add_context. hyp. apply is_a_pos_context.
Qed.
Lemma cc_equiv_aterm : context_closed equiv_aterm.
Proof.
intros t u c h. unfold equiv_aterm, Rof.
rewrite term_of_aterm_fill with (u := AVar 0) (t:=t),
term_of_aterm_fill with (u := AVar 0) (t:=u).
apply equiv_add_context. hyp. apply is_a_pos_context.
Qed.
Lemma cc_succeq : context_closed succeq.
Proof.
intros t u c [h|h]. left. apply cc_succ. hyp.
right. apply cc_equiv_aterm. hyp.
Qed.
Lemma refl_succeq : reflexive succeq.
Proof.
intro t. right. apply Eq.
Qed.
Lemma succ_succeq_compat : absorbs_left succ succeq.
Proof.
intros t v [u [[h1|h1] h2]]. apply rpo_trans with (term_of_aterm u); hyp.
unfold succ, transp, Rof. rewrite equiv_rpo_equiv_1. apply h2. hyp.
Qed.
Definition bsucceq t u :=
match rpo_eval (term_of_aterm t) (term_of_aterm u) with
| Some Greater_than | Some Equivalent => true
| _ => false
end.
Lemma bsucceq_ok : forall t u, bsucceq t u = true -> succeq t u.
Proof.
intros t u. unfold bsucceq.
gen (rpo_eval_is_sound (term_of_aterm t) (term_of_aterm u)).
case (rpo_eval (term_of_aterm t) (term_of_aterm u)); try discr.
destruct c; try discr; unfold succeq, Relation_Operators.union,
equiv_aterm, succ, transp, Rof; auto.
Qed.
Definition bsucceq_sub : rel_of_bool bsucceq << succeq.
Proof. intros t u. unfold rel. intro h. apply bsucceq_ok. hyp. Qed.
Lemma trans_succ : transitive succ.
Proof.
unfold succ. apply transp_trans. apply Rof_trans.
intros t u v htu huv. apply rpo_trans with u; hyp.
Qed.
Lemma trans_equiv_aterm : transitive equiv_aterm.
Proof.
unfold equiv_aterm. apply Rof_trans.
apply (@RelationClasses.Equivalence_Transitive _ _ (equiv_equiv Prec)).
Qed.
Lemma trans_succeq : transitive succeq.
Proof.
unfold succeq, Relation_Operators.union, transitive. intuition.
left. apply trans_succ with y; hyp.
left. revert H. unfold equiv_aterm, succ, transp, Rof. intro.
rewrite <- equiv_rpo_equiv_2. apply H1. hyp.
left. revert H1. unfold equiv_aterm, succ, transp, Rof. intro.
rewrite equiv_rpo_equiv_1. apply H. hyp.
right. apply trans_equiv_aterm with y; hyp.
Qed.
End WP_RPO.
(***********************************************************************)
(** decide compatibility of statuses wrt precedences *)
Definition beq_status s1 s2 :=
match s1, s2 with
| Lex, Lex
| Mul, Mul => true
| _, _ => false
end.
Lemma beq_status_ok : forall s1 s2, beq_status s1 s2 = true <-> s1 = s2.
Proof.
beq_symb_ok.
Qed.
Section prec_eq_status.
Variables (Sig : Signature) (status : Sig -> status_type)
(prec_nat : Sig -> nat).
Lemma prec_eq_ok : forall f g,
prec_eq_bool prec_nat f g = true <-> prec_eq prec_nat f g.
Proof.
intros f g. gen (prec_eq_bool_ok prec_nat f g). intuition.
rewrite H1 in H. hyp. case_eq (prec_eq_bool prec_nat f g); intros.
refl. rewrite H2 in H. absurd (prec_eq prec_nat f g); hyp.
Qed.
Definition bprec_eq_status_symb f g :=
implb (prec_eq_bool prec_nat f g) (beq_status (status f) (status g)).
Lemma bprec_eq_status_symb_ok : forall f g,
bprec_eq_status_symb f g = true
<-> (prec_eq prec_nat f g -> status f = status g).
Proof.
intros f g. unfold bprec_eq_status_symb, implb.
case_eq (prec_eq_bool prec_nat f g); intros.
rewrite prec_eq_ok in H. rewrite beq_status_ok. intuition.
intuition. rewrite <- prec_eq_ok, H in H1. discr.
Qed.
Section bprec_eq_status_aux1.
Variable f : Sig.
Fixpoint bprec_eq_status_aux1 b gs :=
match gs with
| nil => b
| g :: gs' => bprec_eq_status_aux1 (b && bprec_eq_status_symb f g) gs'
end.
Lemma bprec_eq_status_aux1_true : forall gs b,
bprec_eq_status_aux1 b gs = true -> b = true.
Proof.
induction gs; simpl; intros. hyp.
cut (b && bprec_eq_status_symb f a = true). rewrite andb_eq. intuition.
apply IHgs. hyp.
Qed.
Arguments bprec_eq_status_aux1_true [gs b] _.
Lemma bprec_eq_status_aux1_ok : forall gs b,
bprec_eq_status_aux1 b gs = true ->
forall g, In g gs -> prec_eq prec_nat f g -> status f = status g.
Proof.
induction gs; simpl; intros. contr. destruct H0.
subst g. ded (bprec_eq_status_aux1_true H). rewrite andb_eq in H0.
destruct H0. rewrite bprec_eq_status_symb_ok in H2. intuition.
eapply IHgs. apply H. hyp. hyp.
Qed.
End bprec_eq_status_aux1.
Arguments bprec_eq_status_aux1_ok [f gs b] _ _ _ _.
Fixpoint bprec_eq_status_aux2 b fs :=
match fs with
| nil => b
| f :: fs' => bprec_eq_status_aux2 (bprec_eq_status_aux1 f b fs') fs'
end.
Lemma bprec_eq_status_aux2_true : forall fs b,
bprec_eq_status_aux2 b fs = true -> b = true.
Proof.
induction fs; simpl; intros. hyp. eapply bprec_eq_status_aux1_true.
apply IHfs. apply H.
Qed.
Arguments bprec_eq_status_aux2_true [fs b] _.
Lemma bprec_eq_status_aux2_ok : forall fs b,
bprec_eq_status_aux2 b fs = true -> forall f g, In f fs -> In g fs ->
prec_eq prec_nat f g -> status f = status g.
Proof.
induction fs; simpl; intros. contr. destruct H0; destruct H1.
subst f. subst g. refl.
subst f. ded (bprec_eq_status_aux2_true H).
apply (bprec_eq_status_aux1_ok H0); hyp.
subst g. ded (bprec_eq_status_aux2_true H).
sym. apply (bprec_eq_status_aux1_ok H1). hyp. apply prec_eq_sym. hyp.
eapply IHfs; ehyp.
Qed.
Definition bprec_eq_status := bprec_eq_status_aux2 true.
Variable (Fs : list Sig) (Fs_ok : forall f, In f Fs).
Lemma bprec_eq_status_ok : bprec_eq_status Fs = true ->
forall f g, prec_eq prec_nat f g -> status f = status g.
Proof.
intros. eapply bprec_eq_status_aux2_ok. ehyp.
apply Fs_ok. apply Fs_ok. hyp.
Qed.
End prec_eq_status.
Arguments bprec_eq_status_ok [Sig] _ _ [Fs] _ _ _ _ _.
Ltac prec_eq_status s p o := apply (bprec_eq_status_ok s p o); check_eq
|| fail 10 "statuses incompatible with precedences".
|
{"author": "fblanqui", "repo": "color", "sha": "f2ef98f7d13c5d71dd2a614ed2e6721703a34532", "save_path": "github-repos/coq/fblanqui-color", "path": "github-repos/coq/fblanqui-color/color-f2ef98f7d13c5d71dd2a614ed2e6721703a34532/Conversion/Coccinelle.v"}
|
"""
Adapted from http://www.astrobetter.com/visualization-fun-with-python-2d-histogram-with-1d-histograms-on-axes/
Thanks Jess K!
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter, MaxNLocator
plt.ion()
def centroid(data, x, y):
"""
Determine centroid of 2D data array with values located at x, y
"""
xc = np.dot(x, np.sum(data, axis=1))
yc = np.dot(y, np.sum(data, axis=0))
total = np.sum(np.sum(data))
return xc/total, yc/total
def binner(x, y, n_bins, bin_size):
"""
Bin random x and y data into array (n_bins, n_bins)
"""
xlims = [-(n_bins/2.0 * bin_size), (n_bins/2.0 * bin_size)]
ylims = [-(n_bins/2.0 * bin_size), (n_bins/2.0 * bin_size)]
data, xedges, yedges = np.histogram2d(x, y, n_bins, range=[xlims, ylims])
return data, xedges, yedges
def detector_bin(data, n_pix):
"""
Take data array and bin/sum into array of (n_pix, n_pix)
"""
assert (np.mod(data.shape[0], n_pix) == 0) & (np.mod(data.shape[1], n_pix) == 0), "Non-integer number of bins per pixel."
shape = n_pix, data.shape[0]//n_pix, n_pix, data.shape[1]//n_pix
return data.reshape(shape).sum(-1).sum(1)
def add_poisson_noise(dimension, noise):
return np.random.poisson(lam=noise, size=dimensoin**2).reshape(dimension, dimension)
def add_gaussian_noise(dimension, noise):
"""
Return array (size, size) with random noise at the specified level.
"""
return np.random.normal(loc=noise, scale=1.0, size=dimension**2).reshape(dimension, dimension)
def fiber_mask(fiber_radius, n_bins, bin_size, refl):
"""
Returns centered circular fiber mask
"""
assert n_bins%2 == 0, "Number of bins in fiber mask must be even."
r = fiber_radius / bin_size;
# Check if bin size is even or odd
x, y = np.ogrid[0:n_bins/2, 0:n_bins/2]
mask = (x*x + y*y >= r*r).astype(int) + (x*x + y*y < r*r).astype(int)*refl # 1/4 of mask, must concatenate
mask = np.column_stack((np.fliplr(mask), mask)) # 1/2 of mask
mask = np.vstack((np.flipud(mask), mask)) # all of mask
return mask
def source_photons(bandpass, wavelength, magnitude, diameter, extinction):
"""
Number of photons collected per second by a telescope given diameter from source of given magnitude
Adopted from Massey et al. 1988
Specify bandpass and mean wavelength of observation
Does not assume transmission or detector efficiency
"""
return np.floor(4.5e10 / wavelength * 10**(-(magnitude + extinction) / 2.5) * diameter**2 * bandpass)
# Define a function to make the ellipses
def ellipse(ra,rb,ang,x0,y0,Nb=100):
xpos, ypos = x0, y0
radm, radn = ra, rb
an = ang
co, si = np.cos(an), np.sin(an)
the = np.linspace(0,2*np.pi,Nb)
X = radm * np.cos(the) * co - si * radn * np.sin(the) + xpos
Y = radm * np.cos(the) * si + co * radn * np.sin(the) + ypos
return X, Y
def plotter(x, y, n_pix, pixel_size, grid_size, fiber_size):
# Set up default x and y limits
xlims = [-(np_pix/2.0), (n_pix/2.0)] * pixel_size
ylims = [-(np_pix/2.0), (n_pix/2.0)] * pixel_size
# Define the locations for the axes
left, width = 0.12, 0.55
bottom, height = 0.12, 0.55
bottom_h = left_h = left+width+0.02
# Set up the geometry of the three plots
rect_temperature = [left, bottom, width, height] # dimensions of temp plot
rect_histx = [left, bottom_h, width, 0.25] # dimensions of x-histogram
rect_histy = [left_h, bottom, 0.25, height] # dimensions of y-histogram
# Set up the size of the figure
fig = plt.figure(1, figsize=(9.5,9))
# Find the min/max of the data
xmin = np.min(xlims)
xmax = np.max(xlims)
ymin = np.min(ylims)
ymax = np.max(ylims)
# Make the 'main' temperature plot
# Define the number of bins
nxbins = 50
nybins = 50
nbins = 100
xbins = np.linspace(start = xmin, stop = xmax, num = nxbins)
ybins = np.linspace(start = ymin, stop = ymax, num = nybins)
xcenter = (xbins[0:-1]+xbins[1:])/2.0
ycenter = (ybins[0:-1]+ybins[1:])/2.0
aspectratio = 1.0*(xmax - 0)/(1.0*ymax - 0)
H, xedges,yedges = np.histogram2d(x, y, pixel_size)
X = xcenter
Y = ycenter
Z = H
# Make the three plots
axTemperature = plt.axes(rect_temperature) # temperature plot
axHistx = plt.axes(rect_histx) # x histogram
axHisty = plt.axes(rect_histy) # y histogram
# Remove the inner axes numbers of the histograms
nullfmt = NullFormatter()
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# Plot the temperature data
cax = (axTemperature.imshow(H, extent=[xmin,xmax,ymin,ymax],
interpolation='nearest', origin='lower',aspect=aspectratio))
# Plot the temperature plot contours
contourcolor = 'white'
xcenter = np.mean(x)
ycenter = np.mean(y)
ra = np.std(x)
rb = np.std(y)
ang = 0
X,Y=ellipse(ra,rb,ang,xcenter,ycenter)
axTemperature.plot(X,Y,"k:",ms=1,linewidth=2.0)
axTemperature.annotate('$1\\sigma$', xy=(X[15], Y[15]), xycoords='data',xytext=(10, 10),
textcoords='offset points', horizontalalignment='right',
verticalalignment='bottom',fontsize=25)
X,Y=ellipse(2*ra,2*rb,ang,xcenter,ycenter)
axTemperature.plot(X,Y,"k:",color = contourcolor,ms=1,linewidth=2.0)
axTemperature.annotate('$2\\sigma$', xy=(X[15], Y[15]), xycoords='data',xytext=(10, 10),
textcoords='offset points',horizontalalignment='right',
verticalalignment='bottom',fontsize=25, color = contourcolor)
X,Y=ellipse(3*ra,3*rb,ang,xcenter,ycenter)
axTemperature.plot(X,Y,"k:",color = contourcolor, ms=1,linewidth=2.0)
axTemperature.annotate('$3\\sigma$', xy=(X[15], Y[15]), xycoords='data',xytext=(10, 10),
textcoords='offset points',horizontalalignment='right',
verticalalignment='bottom',fontsize=25, color = contourcolor)
#Plot the axes labels
axTemperature.set_xlabel(xlabel,fontsize=25)
axTemperature.set_ylabel(ylabel,fontsize=25)
#Make the tickmarks pretty
ticklabels = axTemperature.get_xticklabels()
for label in ticklabels:
label.set_fontsize(18)
label.set_family('serif')
ticklabels = axTemperature.get_yticklabels()
for label in ticklabels:
label.set_fontsize(18)
label.set_family('serif')
#Set up the plot limits
axTemperature.set_xlim(xlims)
axTemperature.set_ylim(ylims)
#Set up the histogram bins
xbins = np.arange(xmin, xmax, (xmax-xmin)/nbins)
ybins = np.arange(ymin, ymax, (ymax-ymin)/nbins)
#Plot the histograms
axHistx.hist(x, bins=xbins, color = 'blue')
axHisty.hist(y, bins=ybins, orientation='horizontal', color = 'red')
#Set up the histogram limits
axHistx.set_xlim( np.min(x), np.max(x) )
axHisty.set_ylim( np.min(y), np.max(y) )
#Make the tickmarks pretty
ticklabels = axHistx.get_yticklabels()
for label in ticklabels:
label.set_fontsize(12)
label.set_family('serif')
#Make the tickmarks pretty
ticklabels = axHisty.get_xticklabels()
for label in ticklabels:
label.set_fontsize(12)
label.set_family('serif')
#Cool trick that changes the number of tickmarks for the histogram axes
axHisty.xaxis.set_major_locator(MaxNLocator(4))
axHistx.yaxis.set_major_locator(MaxNLocator(4))
#Show the plot
plt.draw()
|
{"hexsha": "fb1da93f0acaee09b9a8209ff72322cf7d77ca0a", "size": 7727, "ext": "py", "lang": "Python", "max_stars_repo_path": "plotRoutine.py", "max_stars_repo_name": "tmccrack/fttWIYN", "max_stars_repo_head_hexsha": "1586e97a62b3c2bd6b460015a6bc045d8f88e0ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plotRoutine.py", "max_issues_repo_name": "tmccrack/fttWIYN", "max_issues_repo_head_hexsha": "1586e97a62b3c2bd6b460015a6bc045d8f88e0ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plotRoutine.py", "max_forks_repo_name": "tmccrack/fttWIYN", "max_forks_repo_head_hexsha": "1586e97a62b3c2bd6b460015a6bc045d8f88e0ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1074766355, "max_line_length": 125, "alphanum_fraction": 0.6358224408, "include": true, "reason": "import numpy", "num_tokens": 2270}
|
'''
@author: luislortega
'''
import cv2 as cv
import numpy as np
import os
from time import time
from windowcapture import WindowCapture
from vision import Vision
import pyautogui
# Change the working directory to the folder this script is in.
# Doing this because I'll be putting the files from each video in their own folder on GitHub
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# initialize the WindowCapture class
#wincap = WindowCapture('LDPlayer')
# load the trained model
cascade_limestone = cv.CascadeClassifier('cascade/cascade.xml')
# load an empty Vision class
vision_limestone = Vision(None)
#loop_time = time()
while(True):
# get an updated image of the game
#screenshot = wincap.get_screenshot()
screenshot = pyautogui.screenshot()
screenshot = np.array(screenshot)
screenshot = screenshot[:,:,::-1].copy()
# do object detection
rectangles = cascade_limestone.detectMultiScale(screenshot, 2)
# draw the detection results onto the original image
detection_image = vision_limestone.draw_rectangles(screenshot, rectangles)
imS = cv.resize(detection_image, (400, 300))
# display the images
cv.imshow('Matches', imS)
# debug the loop rate
#print('FPS {}'.format(1 / (time() - loop_time)))
#loop_time = time()
# press 'q' with the output window focused to exit.
# press 'f' to save screenshot as a positive image, press 'd' to
# save as a negative image.
# waits 1 ms every loop to process key presses
key = cv.waitKey(1)
if key == ord('q'):
cv.destroyAllWindows()
break
'''
elif key == ord('f'):
cv.imwrite('positive/{}.jpg'.format(loop_time), screenshot)
elif key == ord('d'):
cv.imwrite('negative/{}.jpg'.format(loop_time), screenshot)
'''
print('Done.')
|
{"hexsha": "660c81f42d00b9de8aa39b18c6219a952091e4d7", "size": 1815, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "luislortega/AimPro", "max_stars_repo_head_hexsha": "abc436a79eff42acfcd00b4a4d2d07c5a6fef4e6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-23T05:00:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-20T23:16:48.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "luislortega/AimPro", "max_issues_repo_head_hexsha": "abc436a79eff42acfcd00b4a4d2d07c5a6fef4e6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "luislortega/AimPro", "max_forks_repo_head_hexsha": "abc436a79eff42acfcd00b4a4d2d07c5a6fef4e6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7627118644, "max_line_length": 92, "alphanum_fraction": 0.6914600551, "include": true, "reason": "import numpy", "num_tokens": 440}
|
/**
* @project zapdos
* @file include/utils/SharedTable.hpp
* @author S Roychowdhury < sroycode at gmail dot com >
* @version 1.0.0
*
* @section LICENSE
*
* Copyright (c) 2018-2020 S Roychowdhury
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* @section DESCRIPTION
*
* SharedTable.hpp : Shared Table of system data Headers
*
*/
#ifndef _ZPDS_UTILS_SHAREDTABLE_HPP_
#define _ZPDS_UTILS_SHAREDTABLE_HPP_
#include <memory>
#include <vector>
#include <random>
#include <boost/asio.hpp>
#include "http/AsioCompat.hpp"
#include "utils/SharedCounter.hpp"
#include "utils/SharedPairMap.hpp"
#include "utils/SharedMap.hpp"
// #include "utils/SharedQueue.hpp"
#include "store/StoreLevel.hpp"
#include "store/CacheContainer.hpp"
#ifdef ZPDS_BUILD_WITH_XAPIAN
#include "search/WriteIndex.hpp"
#include "jamspell/StoreJam.hpp"
#endif
#include "crypto/CryptoBase.hpp"
namespace zpds {
namespace utils {
class SharedTable : public std::enable_shared_from_this<SharedTable> {
public:
using pointer=std::shared_ptr<SharedTable>;
using dbpointer=zpds::store::StoreLevel::dbpointer;
using SharedString = SharedObject<std::string>;
using SharedUnsigned = SharedObject<uint64_t>;
using SharedDBPointer = SharedObject<dbpointer>;
using SharedBool = SharedObject<bool>;
// using JobQueue = SharedQueue<std::string>;
using SharedRemote = SharedPairMap<std::string,uint64_t,uint64_t>;
using RemoteMapT = SharedPairMap<std::string,uint64_t,uint64_t>::PairMapT;
using SharedTrans = SharedMap<uint64_t,std::string>;
using SharedCache = zpds::store::CacheContainer::pointer;
#ifdef ZPDS_BUILD_WITH_XAPIAN
using SharedXap = zpds::search::WriteIndex::pointer;
using SharedJam = zpds::jamspell::StoreJam::pointer;
#endif
using KeyRingT = std::unordered_map<std::string,std::shared_ptr<zpds::crypto::CryptoBase> >;
// using LockT = boost::shared_mutex;
// io_whatever
std::shared_ptr<::zpds::http::io_whatever> io_whatever;
// map of shared followers
SharedRemote remotes;
// map of shared transactions
SharedTrans transactions;
// string shared
SharedString master;
SharedString shared_secret;
SharedString hostname;
SharedString thisurl;
SharedString lastslave;
// keyring
KeyRingT keyring;
#ifdef ZPDS_BUILD_WITH_XAPIAN
// xapian
SharedString xapath;
SharedXap xapdb;
// spellcheck
SharedJam jamdb;
// xapian dont use
SharedBool no_xapian;
#endif
// counter shared
SharedCounter maincounter;
SharedCounter logcounter;
// database pointers
SharedDBPointer maindb;
SharedDBPointer logdb;
// cache pointers
SharedCache dbcache;
SharedCache tmpcache;
// booleans
SharedBool is_master;
SharedBool is_ready;
SharedBool force_commit;
SharedBool lock_updates;
// shared
SharedUnsigned max_fetch_records;
SharedUnsigned max_user_sessions;
// queue
// JobQueue jobqueue;
/**
* make noncopyable
*/
SharedTable(const SharedTable&) = delete;
SharedTable& operator=(const SharedTable&) = delete;
/**
* create : static construction creates new first time
*
* @return
* pointer
*/
static pointer create()
{
return pointer{new SharedTable};
}
/**
* share : return instance
*
* @return
* pointer
*/
pointer share()
{
return shared_from_this();
}
/**
* destructor
*/
virtual ~SharedTable () {}
private:
/**
* Constructor : default private
*
* @return
* none
*/
SharedTable() :
dbcache(zpds::store::CacheContainer::create()),
tmpcache(zpds::store::CacheContainer::create())
{}
};
} // namespace utils
} // namespace zpds
#endif /* _ZPDS_UTILS_SHAREDTABLE_HPP_ */
|
{"hexsha": "77d8f7372a359a5975d7d8edb969fb1a1d5554cb", "size": 4601, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/utils/SharedTable.hpp", "max_stars_repo_name": "sroycode/zapdos", "max_stars_repo_head_hexsha": "8818ef109e072dcbe990914d9a2a6d70ef190d3e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2018-11-11T21:09:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-25T12:46:41.000Z", "max_issues_repo_path": "include/utils/SharedTable.hpp", "max_issues_repo_name": "vnaad/zapdos", "max_issues_repo_head_hexsha": "8818ef109e072dcbe990914d9a2a6d70ef190d3e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-08-02T09:12:57.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-02T09:12:57.000Z", "max_forks_repo_path": "include/utils/SharedTable.hpp", "max_forks_repo_name": "vnaad/zapdos", "max_forks_repo_head_hexsha": "8818ef109e072dcbe990914d9a2a6d70ef190d3e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.2373737374, "max_line_length": 93, "alphanum_fraction": 0.745924799, "num_tokens": 1180}
|
import numpy as np
import matplotlib.pyplot as plt
import json
import os.path
class APSTrainingScore:
def __init__(self):
self.filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'training_score.json')
with open(self.filename) as json_data:
self.score_dict = json.load(json_data)
def get_score(self, position, size, distribution, trigger, probability, dangerlevel):
pos = self.score_dict['AvalancheProblemId'][str(int(position))]
s = self.score_dict['DestructiveSizeExtId'][str(int(size))]
d = self.score_dict['AvalPropagationId'][str(int(distribution))]
t = self.score_dict['AvalTriggerSimpleId'][str(int(trigger))]
p = self.score_dict['AvalProbabilityId'][str(int(probability))]
dl = self.score_dict['DangerLevel'][str(int(dangerlevel))]
self.score = pos + (s * d) + (t * p) + dl
def load_config():
filename = 'training_score.json'
with open(filename) as json_data:
score = json.load(json_data)
distribution = np.array(list(score['AvalPropagationId'].values())) # why is it called "propagation"?
size = np.array(list(score['DestructiveSizeExtId'].values()))
trigger = np.array(list(score['AvalTriggerSimpleId'].values()))
probability = np.array(list(score['AvalProbabilityId'].values()))
position = np.array(list(score['AvalancheProblemId'].values()))
dangerlevel = np.array(list(score['DangerLevel'].values()))
def get_score(position, size, distribution, trigger, probability, dangerlevel):
return position + (size * distribution) + (trigger * probability) + dangerlevel
def get_score_range(position, size, distribution, trigger, probability, dangerlevel):
score_range = []
for d in distribution:
for s in size:
for t in trigger:
for p in probability:
for pos in position:
for dl in dangerlevel:
score_range.append(get_score(pos, s, d, t, p, dl))
return np.array(score_range)
if __name__ == "__main__":
position, size, distribution, trigger, probability, dangerlevel = load_config()
ts = get_score_range(position, size, distribution, trigger, probability, dangerlevel)
print(ts.max())
plt.plot(ts)
plt.show()
|
{"hexsha": "a6351638300dda20e738a0bf2a992aefd3ee7389", "size": 2322, "ext": "py", "lang": "Python", "max_stars_repo_path": "aps/config/training_score.py", "max_stars_repo_name": "kmunve/APS", "max_stars_repo_head_hexsha": "4c2f254ede83a3a311cbedc90c76db9ee367a000", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aps/config/training_score.py", "max_issues_repo_name": "kmunve/APS", "max_issues_repo_head_hexsha": "4c2f254ede83a3a311cbedc90c76db9ee367a000", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-12-14T14:47:13.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-14T14:47:13.000Z", "max_forks_repo_path": "aps/config/training_score.py", "max_forks_repo_name": "kmunve/APS", "max_forks_repo_head_hexsha": "4c2f254ede83a3a311cbedc90c76db9ee367a000", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.28125, "max_line_length": 104, "alphanum_fraction": 0.6640826873, "include": true, "reason": "import numpy", "num_tokens": 526}
|
from pathlib import Path
import numpy as np
from scipy.optimize import minimize_scalar
def align(input):
arr = np.fromstring(input, sep=",")
def min_fun(x):
return np.sum(np.abs(arr - x))
return min_fun
def align2(input):
arr = np.fromstring(input, sep=",")
def min_fun(x):
return np.sum([(xs * (xs + 1)) / 2.0 for xs in np.abs(arr - x)])
return min_fun
def load_data():
root = Path(__file__).parent
with open(root / "input.txt") as in_file:
data = in_file.read()
return data
def optimize(input, align_fn):
min_fun = align_fn(input)
res = minimize_scalar(min_fun)
return round(res.x), min_fun(round(res.x))
def task1():
data = load_data()
optimal_x, optimal_fuel = optimize(data, align)
print(f"Optimal horizontal position: {optimal_x}")
print(f"Minimal fuel consumption: {optimal_fuel}")
def task2():
data = load_data()
optimal_x, optimal_fuel = optimize(data, align2)
print(f"Optimal horizontal position: {optimal_x}")
print(f"Minimal fuel consumption: {optimal_fuel}")
if __name__ == "__main__":
print("----- Task 1 -----")
task1()
print("----- Task 2 -----")
task2()
|
{"hexsha": "e8724590cc48cd644513b09b5c90e898e230deed", "size": 1210, "ext": "py", "lang": "Python", "max_stars_repo_path": "day7/src.py", "max_stars_repo_name": "shimst3r/aoc2021", "max_stars_repo_head_hexsha": "980a3d87e7748ac7f5b53c13288c5fb814993640", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-04T15:03:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T11:48:11.000Z", "max_issues_repo_path": "day7/src.py", "max_issues_repo_name": "shimst3r/aoc2021", "max_issues_repo_head_hexsha": "980a3d87e7748ac7f5b53c13288c5fb814993640", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "day7/src.py", "max_forks_repo_name": "shimst3r/aoc2021", "max_forks_repo_head_hexsha": "980a3d87e7748ac7f5b53c13288c5fb814993640", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.2280701754, "max_line_length": 72, "alphanum_fraction": 0.6314049587, "include": true, "reason": "import numpy,from scipy", "num_tokens": 320}
|
import time
import numpy as np
from .player import Player
class PlayerFinder:
TIMEOUT_TIME = 25
JOINING_TIME = 3
def __init__(self, joining_stage=True):
self.players = {}
self.joining = {}
self.joining_stage = joining_stage
def update(self, bounding_boxes, ids):
'''Register new players into joining list'''
if len(ids) != 0 and self.joining_stage:
for id, bounding_box in zip(ids, bounding_boxes):
if not (id in self.players or id in self.joining):
# print(f"{id} is a new player, waiting to join")
self.joining[id] = Player(id, bounding_box, time.time())
'''Transfering to player list'''
if len(self.joining) != 0:
for player in list(self.joining):
if not player in ids:
self.joining.pop(player)
# print(f"{player} was removed from the joining queue.")
elif time.time() - self.joining[player].last_seen >= self.JOINING_TIME and not player in self.players:
self.players[player] = self.joining[player]
self.joining.pop(player)
# print(f"{player} added to the player list.")
'''Removing from players when marker is missing for given time'''
if len(self.players) != 0:
for player in list(self.players):
if player in ids:
index = np.where(ids==player)[0][0]
self.players[player].update(bounding_boxes[index], time.time())
elif time.time() - self.players[player].last_seen >= self.TIMEOUT_TIME:
self.players.pop(player)
#print(f"Player {player} timed out.")
elif time.time() - self.players[player].last_seen >= self.TIMEOUT_TIME/2:
# print(f"Player {player} is about to time out.")
pass
|
{"hexsha": "5d436b8a37a365e5f03c5abca87cb5ac24b81740", "size": 1972, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/player_finder.py", "max_stars_repo_name": "thomsen85/LegoPokerDealer", "max_stars_repo_head_hexsha": "89fbf0123d1f4463493801349ad8b5ab06705a83", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-11-02T11:34:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-23T23:30:30.000Z", "max_issues_repo_path": "app/player_finder.py", "max_issues_repo_name": "thomsen85/LegoPokerDealer", "max_issues_repo_head_hexsha": "89fbf0123d1f4463493801349ad8b5ab06705a83", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-11-16T22:59:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-17T16:59:51.000Z", "max_forks_repo_path": "app/player_finder.py", "max_forks_repo_name": "thomsen85/LegoPokerDealer", "max_forks_repo_head_hexsha": "89fbf0123d1f4463493801349ad8b5ab06705a83", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2448979592, "max_line_length": 118, "alphanum_fraction": 0.5562880325, "include": true, "reason": "import numpy", "num_tokens": 409}
|
#! /usr/bin/env python3
# Copyright 2019 Kyle Steckler
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import SciServer
from SciServer import Authentication, SkyServer, CasJobs, SkyQuery
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from getpass import getpass
import pdb
import sys
def create_datafiles(n_galaxies = 200,galaxy_type = 'both', lower_z_limit=0.1, upper_z_limit = 0.3, lower_flux_limit = 50,
upper_flux_limit = 500, data_release = 'DR15', image_data = True, image_scale_factor = 0.01):
"""
Function to Query SDSS Database and grab labeled image data of galaxies within given constraints.
Creates 2 files: galaxy_images.npy & galaxy_labels.npy
"""
auth_login = input("Username: ")
auth_pass = getpass()
try:
Authentication.login(auth_login,auth_pass)
print('Login Successful: Getting Tables...')
except Exception:
sys.exit("Login Failed. (May be locked out if excessive attempts are made)")
CasJobs.getTables(context='MyDB')
try:
SkyQuery.dropTable('galaxies', datasetName='MyDB')
except Exception:
print("WARNRING: Unable to drop table (it may not exist yet)")
SQL_Query = f"SELECT TOP {n_galaxies} g.objid, g.ra, g.dec,g.petroR90_g, g.petroFlux_g, "
SQL_Query += "g.specObjID, g.lnLDeV_g, s.z FROM Galaxy as g "
SQL_Query += "JOIN SpecObjAll as s ON g.specObjID = s.specObjID "
SQL_Query += "INTO MyDB.galaxies "
SQL_Query += f"WHERE s.z BETWEEN {lower_z_limit} AND {upper_z_limit} "
SQL_Query += f"AND (g.petroFlux_g BETWEEN {lower_flux_limit} AND {upper_flux_limit}) "
SQL_Query += f"AND g.type=3 AND clean=1 "
if galaxy_type == 'both':
SQL_Query += "AND (g.lnLDeV_r > g.lnLExp_r + 0.1 AND g.lnLExp_r > -999.0 AND g.lnLDeV_g > -999.0) "
SQL_Query += "OR (g.lnLDeV_g < -2000.0 AND g.lnLDeV_g + 0.1 < g.lnLExp_g );"
elif galaxy_type == 'spiral':
SQL_Query += "AND (g.lnLDeV_g < -1500.0 AND g.lnLDeV_g < g.lnLExp_g); "
elif galaxy_type == 'elliptical':
SQL_Query += "AND g.lnLDeV_r > g.lnLExp_r + 0.1 AND g.lnLExp_r > -999.0 AND g.lnLDeV_g > -999.0;"
else:
raise Exception("Invalid Galaxy Type: valid options are both, spiral, elliptical")
print('Querying Database...')
job_id = CasJobs.submitJob(sql=SQL_Query, context=data_release)
state = CasJobs.waitForJob(job_id,verbose=True)
del state
df = SkyQuery.getTable('galaxies',datasetName='MyDB',top=n_galaxies)
if len(df) != n_galaxies:
print(f"Was only able to find {len(df)} galaxies with current search parameters")
## 0: 'probably spiral', 1: 'probably elliptical'
df['Classification'] = df['lnLDeV_g'].apply(lambda x: 0 if x < -1000.0 else 1)
print(f"DISTRIBUTION: \n Spirals: {len(df[df['Classification'] == 0])} \n Ellipticals: {len(df[df['Classification'] == 1])}\n")
if image_data:
print("Grabbing Image Data ...")
images = get_image_data(df, scaling_factor = image_scale_factor)
np.save('galaxy_images', images)
print("File Created: galaxy_images.npy")
galaxy_labels = np.array(df['Classification'])
np.save('galaxy_labels.npy', galaxy_labels)
print("File Created: galaxy_labels.npy")
def get_image_data(results, scaling_factor = 0.01):
# RA, DEC, RADIUS OF 90% FLUX
RA = np.array(results['ra'])
DEC = np.array(results['dec'])
petroR90 = np.array(results['petroR90_g'])
# Scale for images in arcsec/pixel
scaler = scaling_factor * petroR90
data = []
count = 0
for ra,dec,scale in zip(RA, DEC, scaler):
print(f"{count}/{len(RA)} Images Done" )
data.append(SkyServer.getJpegImgCutout(ra,dec, scale=scale, width=256, height=256))
count += 1
return np.array(data)
|
{"hexsha": "7f8c66171bdfe2f4180056753e0fb58902a385af", "size": 4887, "ext": "py", "lang": "Python", "max_stars_repo_path": "sdss_query.py", "max_stars_repo_name": "kylesteckler/galaxy-classification", "max_stars_repo_head_hexsha": "7d9311301a6d2885a2488d6510fde0f402fdaffc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sdss_query.py", "max_issues_repo_name": "kylesteckler/galaxy-classification", "max_issues_repo_head_hexsha": "7d9311301a6d2885a2488d6510fde0f402fdaffc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sdss_query.py", "max_forks_repo_name": "kylesteckler/galaxy-classification", "max_forks_repo_head_hexsha": "7d9311301a6d2885a2488d6510fde0f402fdaffc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7857142857, "max_line_length": 131, "alphanum_fraction": 0.6844689994, "include": true, "reason": "import numpy", "num_tokens": 1331}
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Here, we adapted run_glue.py for V&L datasets.
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from datasets import load_dataset, load_metric
# We will load spedific V&L datasets when determined which dataset is used.
import importlib
from eval_vl_glue import transformers_volta as transformers
from eval_vl_glue.transformers_volta import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from eval_vl_glue.transformers_volta.trainer_utils import get_last_checkpoint, is_main_process
# For CustomTrainer Class
import time
import collections
import torch
import datasets
from typing import Union, Optional, Dict, Callable, Tuple, List
from eval_vl_glue.transformers_volta.data.data_collator import DataCollator
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from eval_vl_glue.transformers_volta.modeling_utils import PreTrainedModel
from eval_vl_glue.transformers_volta.trainer_utils import EvalPrediction, speed_metrics
from eval_vl_glue.transformers_volta.trainer_callback import TrainerCallback, ProgressCallback
# setup for datasets
vl_task_to_keys = {
"nlvr2": ("sentence", None),
}
custom_auto_config_kwargs = {
"nlvr2": {'num_images':2, 'classifier_dims':[1536]}
}
requires_formatter = True
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
defined_tasks = list(vl_task_to_keys.keys())
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: " + ", ".join(defined_tasks)},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
task_dir: Optional[str] = field(default=None, metadata={"help": "Path to the dataset."})
def __post_init__(self):
if self.task_name is not None:
self.task_name = self.task_name.lower()
if self.task_name not in self.defined_tasks:
raise ValueError("Unknown task, you should pick one in " + ",".join(defined_tasks))
else:
raise RuntimeError('Currently, only support for running with task_name')
@property
def task_source(self):
if not hasattr(self, '_task_source'):
task_source = None
if self.task_name is None:
task_source = 'custom'
elif self.task_name in vl_task_to_keys.keys():
task_source = 'vl'
else:
raise Exception('unknown task_name: %s'%self.task_name)
self._task_source = task_source
return self._task_source
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
class CustomTrainer(Trainer):
"""We overwrite Trainer to pass dict of dataset for evaluation (eval_dataset)"""
@staticmethod
def _is_dict(x):
return isinstance(x, (dict, collections.OrderedDict))
def _assert_sized_data(self, data):
if not self._is_dict(data):
data = {'':data}
for k, v in data.items():
if v is not None and not isinstance(v, collections.abc.Sized):
_msg = '%s@eval_dataset'%k if k else 'eval_dataset'
raise ValueError("%s must implement __len__"%_msg)
def _maybe_remove_unused_columns(self, data, description='evaluation'):
if transformers.file_utils.is_datasets_available():
if not self._is_dict(data):
data = {'':data}
for k, v in data.items():
if isinstance(v, datasets.Dataset):
_desc = '%s@%s'%(k, description) if k else description
self._remove_unused_columns(v, description=_desc)
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
super().__init__(model, args, data_collator, train_dataset, None,
tokenizer, model_init, compute_metrics, callbacks, optimizers)
self._assert_sized_data(eval_dataset)
self._maybe_remove_unused_columns(eval_dataset)
self.eval_dataset = eval_dataset
def get_eval_dataloader(
self,
eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
) -> Union[DataLoader, Dict[str, DataLoader]]:
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
self._assert_sized_data(eval_dataset)
self._maybe_remove_unused_columns(eval_dataset)
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if self._is_dict(eval_dataset):
single_loader = False
else:
eval_dataset = {'':eval_dataset}
single_loader = True
loaders = collections.OrderedDict()
for _dataset_key, _dataset_val in eval_dataset.items():
eval_sampler = self._get_eval_sampler(_dataset_val)
loaders[_dataset_key] = DataLoader(
_dataset_val,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
return loaders[''] if single_loader else loaders
def evaluate(
self,
eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
# Search ProgressCallback
# uses tqdm update
progress_callback = None
for callback in self.callback_handler.callbacks:
if isinstance(callback, ProgressCallback):
progress_callback = callback
break
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output_metrics = {}
n_samples = 0
if not self._is_dict(eval_dataloader):
eval_dataloader = {'':eval_dataloader}
for _loader_key, _loader_val in eval_dataloader.items():
_desc = _loader_key+'@evaluation' if _loader_key else 'Evaluation'
_prefix = metric_key_prefix + (('_'+_loader_key) if _loader_key else '')
output = self.prediction_loop(
_loader_val,
description=_desc,
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=_prefix,
)
output_metrics.update(output.metrics)
n_samples += len(_loader_val.dataset)
if progress_callback is not None:
# We do not end evaluation, but want to switch new tqdm
# So call just the event of pregress_callback
progress_callback.on_evaluate(self.args, self.state, self.control)
output_metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output_metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output_metrics)
self._memory_tracker.stop_and_update_metrics(output_metrics)
return output_metrics
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# To keep columns of image ids
training_args.remove_unused_columns = False
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Load dataset
if data_args.task_source == 'vl':
sys.path.append(os.path.dirname(__file__))
m = importlib.import_module(f'dataset_{data_args.task_name}')
datasets = m.load_dataset_vl(dataset_dir=data_args.task_dir)
else:
raise RuntimeError('Currently, only support for running with task_name')
# Set class labels
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
auto_config_kwargs = {
'num_labels': num_labels,
'finetuning_task': data_args.task_name,
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
auto_config_kwargs.update(custom_auto_config_kwargs.get(data_args.task_name, {}))
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
**auto_config_kwargs,
)
del auto_config_kwargs
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the datasets
if data_args.task_source == 'vl':
sentence1_key, sentence2_key = vl_task_to_keys[data_args.task_name]
else:
raise RuntimeError('Currently, only support for running with task_name')
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warn(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_source == 'custom' and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and "label" in examples:
result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)
# post processing after mapping
if requires_formatter:
datasets.set_format(type='image_feature', model_config=config, dataset_dir=data_args.task_dir)
train_dataset = datasets["train"]
eval_dataset = datasets["validation"]
if data_args.task_name is not None or data_args.test_file is not None:
test_dataset = datasets["test"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
trainer = CustomTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
checkpoint = last_checkpoint
# avoid overwriting num_labels from local pretrained model
#elif os.path.isdir(model_args.model_name_or_path):
# checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
eval_results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_result = trainer.evaluate(eval_dataset=eval_dataset)
trainer.log_metrics("eval", eval_result)
trainer.save_metrics("eval", eval_result)
if training_args.do_predict:
logger.info("*** Test ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
test_datasets = [test_dataset]
for test_dataset, task in zip(test_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer won't like that.
test_dataset.remove_columns_("label")
predictions = trainer.predict(test_dataset=test_dataset).predictions
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_test_file, "w") as writer:
logger.info(f"***** Test results {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
return eval_results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
{"hexsha": "4f08d155de92e03e4b6d197deab5440f60a8e630", "size": 23454, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluation/vl_tasks/run_vl.py", "max_stars_repo_name": "Alab-NII/eval_vl_glue", "max_stars_repo_head_hexsha": "74e7691828f394554370158f852fe04af9be0d79", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-11-10T10:06:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T02:47:07.000Z", "max_issues_repo_path": "evaluation/vl_tasks/run_vl.py", "max_issues_repo_name": "Alab-NII/eval_vl_glue", "max_issues_repo_head_hexsha": "74e7691828f394554370158f852fe04af9be0d79", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-19T05:37:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-19T07:15:05.000Z", "max_forks_repo_path": "evaluation/vl_tasks/run_vl.py", "max_forks_repo_name": "Alab-NII/eval_vl_glue", "max_forks_repo_head_hexsha": "74e7691828f394554370158f852fe04af9be0d79", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1473684211, "max_line_length": 119, "alphanum_fraction": 0.6623177283, "include": true, "reason": "import numpy", "num_tokens": 5011}
|
using Pkg
Pkg.activate(".")
verbose = true
if verbose println("# Loading RvSpecML") end
using RvSpectML
if verbose println("# Loading other packages") end
using DataFrames, Query, Statistics
using Dates
# USER: The default paths that specify where datafiles can be entered here or overridden in examples/data_paths.jl
target_subdir = "101501" # USER: Replace with directory of your choice
fits_target_str = "101501"
output_dir = "examples/output"
default_paths_to_search = [pwd(),"examples",joinpath(pkgdir(RvSpectML),"examples"),"/gpfs/group/ebf11/default/ebf11/expres/inputs"]
make_plots = true
write_ccf_to_csv = true
write_rvs_to_csv = true
write_order_ccf_to_csv = true
write_template_to_csv = true
write_spectral_grid_to_jld2 = true
write_dcpca_to_csv = true
write_lines_to_csv = true
has_loaded_data = false
has_computed_ccfs = false
has_computed_rvs = false
has_computed_order_ccfs = false
has_computed_template = false
has_computed_dcpca = false
has_found_lines = false
has_computed_ccfs2 = false
has_computed_rvs2 = false
has_computed_template2 = false
has_computed_dcpca2 = false
if !has_loaded_data
if verbose println("# Finding what data files are avaliable.") end
df_files = make_manifest(target_subdir, EXPRES )
if verbose println("# Reading in customized parameters.") end
eval(code_to_include_param_jl())
if verbose println("# Reading in FITS files.") end
@time expres_data = map(EXPRES.read_data,eachrow(df_files_use))
has_loaded_data = true
if verbose println("# Extracting order list timeseries from spectra.") end
order_list_timeseries = RvSpectML.make_order_list_timeseries(expres_data)
order_list_timeseries = RvSpectML.filter_bad_chunks(order_list_timeseries,verbose=true)
RvSpectML.normalize_spectra!(order_list_timeseries,expres_data);
end
if !has_computed_ccfs || true
if verbose println("# Reading line list for CCF: ", linelist_for_ccf_filename, ".") end
lambda_range_with_good_data = get_λ_range(expres_data)
espresso_filename = joinpath(pkgdir(RvSpectML),"data","masks",linelist_for_ccf_filename)
espresso_df = RvSpectML.read_linelist_espresso(espresso_filename)
line_list_df = EXPRES.filter_line_list(espresso_df,first(expres_data).inst)
if verbose println("# Removing lines with telluric contamination.") end # Currently only works for EXPRES data
Δv_to_avoid_tellurics = 14000.0
line_list_to_search_for_tellurics = copy(line_list_df)
line_list_to_search_for_tellurics.lambda_lo = line_list_to_search_for_tellurics.lambda./calc_doppler_factor(Δv_to_avoid_tellurics)
line_list_to_search_for_tellurics.lambda_hi = line_list_to_search_for_tellurics.lambda.*calc_doppler_factor(Δv_to_avoid_tellurics)
chunk_list_timeseries = RvSpectML.make_chunk_list_timeseries(expres_data,line_list_to_search_for_tellurics)
line_list_to_search_for_tellurics.min_telluric_model_all_obs = RvSpectML.find_worst_telluric_in_each_chunk( chunk_list_timeseries, expres_data)
line_list_no_tellurics_df = line_list_to_search_for_tellurics |> @filter(_.min_telluric_model_all_obs == 1.0) |> DataFrame
#=
Δv_to_avoid_tellurics = RvSpectMLBase.max_bc # default_Δv_to_avoid_tellurics
line_list_no_tellurics_df.lambda_lo = line_list_no_tellurics_df.lambda./calc_doppler_factor(Δv_to_avoid_tellurics)
line_list_no_tellurics_df.lambda_hi = line_list_no_tellurics_df.lambda.*calc_doppler_factor(Δv_to_avoid_tellurics)
#find_overlapping_chunks(line_list_no_tellurics_df)
#chunk_list_no_tellurics_merged_df = chunk_list_no_tellurics_merged_df |> @take(1) |> DataFrame
#chunk_list_no_tellurics_merged_df = RvSpectML.merge_chunks(line_list_no_tellurics_df)
#@assert find_overlapping_chunks(chunk_list_no_tellurics_merged_df) == nothing
#chunk_list_timeseries = RvSpectML.make_chunk_list_timeseries(expres_data,chunk_list_no_tellurics_merged_df)
chunk_list_timeseries = RvSpectML.make_chunk_list_timeseries(expres_data,line_list_no_tellurics_df)
#chunk_list_timeseries = RvSpectML.make_chunk_list_timeseries(expres_data,chunk_list_no_tellurics_merged_df)
=#
# Compute CCF's & measure RVs
if verbose println("# Computing CCF.") end
mask_shape = RvSpectML.CCF.TopHatCCFMask(order_list_timeseries.inst, scale_factor=tophap_ccf_mask_scale_factor)
line_list = RvSpectML.CCF.BasicLineList(line_list_df.lambda, line_list_df.weight)
line_list = RvSpectML.CCF.BasicLineList(line_list_no_tellurics_df.lambda, line_list_no_tellurics_df.weight)
#line_list = RvSpectML.CCF.BasicLineList(chunk_list_no_tellurics_merged_df.lambda, chunk_list_no_tellurics_merged_df.weight)
ccf_plan = RvSpectML.CCF.BasicCCFPlan(mask_shape = mask_shape, line_list=line_list, midpoint=ccf_mid_velocity)
v_grid = RvSpectML.CCF.calc_ccf_v_grid(ccf_plan)
@time ccfs = RvSpectML.CCF.calc_ccf_chunklist_timeseries(order_list_timeseries, ccf_plan)
#@time ccfs = RvSpectML.CCF.calc_ccf_chunklist_timeseries(chunk_list_timeseries, ccf_plan)
# Write CCFs to file
if write_ccf_to_csv
using CSV
CSV.write(joinpath(output_dir,target_subdir * "_ccfs.csv"),Tables.table(ccfs',header=Symbol.(v_grid)))
end
has_computed_ccfs = true
end
#make_plots = true
if make_plots
using Plots
t_idx = 1:size(ccfs,2)
using Plots
#plot(v_grid,ccfs[:,t_idx]./maximum(ccfs[:,t_idx],dims=1),label=:none)
plot(v_grid,ccfs[:,t_idx],label=:none)
xlabel!("v (m/s)")
ylabel!("CCF")
end
if !has_computed_rvs
if verbose println("# Measuring RVs from CCF.") end
rvs_ccf_gauss = RvSpectML.RVFromCCF.measure_rv_from_ccf(v_grid,ccfs,fit_type = "gaussian")
# Store estimated RVs in metadata
map(i->order_list_timeseries.metadata[i][:rv_est] = rvs_ccf_gauss[i]-mean(rvs_ccf_gauss), 1:length(order_list_timeseries) )
if write_rvs_to_csv
using CSV
CSV.write(joinpath(output_dir,target_subdir * "_rvs_ccf.csv"),DataFrame("Time [MJD]"=>order_list_timeseries.times,"CCF RV [m/s]"=>rvs_ccf_gauss))
end
has_computed_rvs = true
end
if !has_computed_order_ccfs # Compute order CCF's & measure RVs
tstart = now() # Compute CCFs for each order
# TODO: Need to mask tellurics somehow
RvSpectML.CCF.calc_order_ccf_chunklist_timeseries(order_list_timeseries, ccf_plan)
#=
orders_to_use = RvSpectML.orders_to_use_default(chunk_list_timeseries.inst)
num_obs = length(chunk_list_timeseries)
order_ccfs = Array{Float64,3}(undef,length(v_grid), length(orders_to_use), num_obs)
for (i,ord) in enumerate(orders_to_use)
chunk_list_order_timeseries = RvSpectML.extract_chunk_list_timeseries_for_order(order_list_timeseries, ord)
if isnothing(chunk_list_order_timeseries)
println("# Warning: No usable lines in order = ", ord)
order_ccfs[:,i,:] .= 0.0
else
order_ccfs[:,i,:] .= RvSpectML.CCF.calc_ccf_chunklist_timeseries(order_list_order_timeseries, ccf_plan)
end
end
=#
println("# Order CCFs runtime: ", now()-tstart)
if write_order_ccf_to_csv
using CSV
inst = EXPRES.EXPRES2D()
for (i, order) in orders_to_use
if !(sum(order_ccfs[:,i,:]) > 0) continue end
local t = Tables.table( order_ccfs[:,i,:]', header=Symbol.(v_grid) )
CSV.write(joinpath(output_dir,target_subdir * "_ccf_order=" * string(order) * ".csv"),t)
end
end
has_computed_order_ccfs = true
end
if !has_computed_template
if verbose println("# Making template spectra.") end
@time ( spectral_orders_matrix, f_mean, var_mean, deriv, deriv2, order_grids ) = RvSpectML.make_template_spectra(order_list_timeseries)
if write_template_to_csv
using CSV
CSV.write(joinpath(output_dir,target_subdir * "_template.csv"),DataFrame("λ"=>spectral_orders_matrix.λ,"flux_template"=>f_mean,"var"=>var_mean, "dfluxdlnλ_template"=>deriv,"d²fluxdlnλ²_template"=>deriv2))
end
if write_spectral_grid_to_jld2
using JLD2, FileIO
save(joinpath(output_dir,target_subdir * "_matrix.jld2"), Dict("λ"=>spectral_orders_matrix.λ,"spectra"=>spectral_orders_matrix.flux,"var_spectra"=>spectral_orders_matrix.var,"flux_template"=>f_mean,"var"=>var_mean, "dfluxdlnλ_template"=>deriv,"d²fluxdlnλ²_template"=>deriv2))
end
has_computed_template = true
end
if !has_computed_dcpca
if verbose println("# Performing Doppler constrained PCA analysis.") end
using MultivariateStats
dcpca_out, M = RvSpectML.DCPCA.doppler_constrained_pca(spectral_orders_matrix.flux, deriv, rvs_ccf_gauss)
frac_var_explained = 1.0.-cumsum(principalvars(M))./tvar(M)
println("# Fraction of variance explained = ", frac_var_explained[1:min(5,length(frac_var_explained))])
if write_dcpca_to_csv
using CSV
CSV.write(joinpath(output_dir,target_subdir * "_dcpca_basis.csv"), Tables.table(M.proj) )
CSV.write(joinpath(output_dir,target_subdir * "_dcpca_scores.csv"), Tables.table(dcpca_out) )
end
has_computed_dcpca = true
end
if make_plots # Ploting results from DCPCA
using Plots
# Set parameters for plotting analysis
plt_order = 42
plt_order_pix = 4500:5000
plt = scatter(frac_var_explained, xlabel="Number of PCs", ylabel="Frac Variance Unexplained")
display(plt)
end
if make_plots
plt_order = 13
RvSpectML.plot_basis_vectors(order_grids, f_mean, deriv, M.proj, idx_plt = spectral_orders_matrix.chunk_map[plt_order], num_basis=3 )
#xlims!(5761.5,5766)
end
if make_plots
RvSpectML.plot_basis_scores(order_list_timeseries.times, rvs_ccf_gauss, dcpca_out, num_basis=3 )
end
if make_plots
RvSpectML.plot_basis_scores_cor( rvs_ccf_gauss, dcpca_out, num_basis=3)
end
has_found_lines = false
write_lines_to_csv= false
if !has_found_lines
if verbose println("# Performing fresh search for lines in template spectra.") end
cl = ChunkList(map(grid->ChuckOfSpectrum(spectral_orders_matrix.λ,f_mean, var_mean, grid), spectral_orders_matrix.chunk_map))
spectral_orders_matrix = nothing # We're done with this, so can release memory
GC.gc()
lines_in_template = RvSpectML.LineFinder.find_lines_in_chunklist(cl)
if verbose println("# Finding above lines in all spectra.") end
@time fits_to_lines = RvSpectML.LineFinder.fit_all_lines_in_chunklist_timeseries(order_list_timeseries, lines_in_template )
if verbose println("# Rejecting lines that have telluric contamination at any time.") end
telluric_info = RvSpectML.LineFinder.find_worst_telluric_in_each_line_fit!(fits_to_lines, order_list_timeseries, expres_data )
# Look at distribution of standard deviations for line properties
fit_distrib = fits_to_lines |> @groupby(_.line_id) |>
@map( { median_a=median(_.fit_a), median_b=median(_.fit_b), median_depth=median(_.fit_depth), median_σ²=median(_.fit_σ²), median_λc=median(_.fit_λc),
std_a=std(_.fit_a), std_b=std(_.fit_b), std_depth=std(_.fit_depth), std_σ²=std(_.fit_σ²), std_λc=std(_.fit_λc), min_telluric_model_all_obs=minimum(_.min_telluric_model_this_obs), line_id=_.line_id } ) |>
@filter(_.min_telluric_model_all_obs == 1.0 ) |> # No telluric in any observations
@filter(_.std_b < 0.257) |> # ~90th quantile
@filter( 0.001 < _.median_σ² ) |> # ~99th quantile
@filter( _.std_σ² < 0.00844) |> # ~99th quantile
@filter( _.median_depth > 0.05 ) |> # ~99th quantile
@filter( _.std_depth < 0.11 ) |> # ~95th quantile
#@filter( -0.25 < _.median_b < 0.25) |> #~5th to 95th percentiles
DataFrame # ~75th quantile
good_lines = fit_distrib |> @filter(_.std_λc < 0.0316 ) |> DataFrame
bad_lines = fit_distrib |> @filter(_.std_λc > 0.0316 ) |> DataFrame
#scatter(good_lines.median_σ², good_lines.median_depth )
#scatter!(bad_lines.median_σ², bad_lines.median_depth )
lines_to_try = lines_in_template[first.(good_lines[!,:line_id]),:]
if write_lines_to_csv
using CSV
CSV.write(joinpath(output_dir,target_subdir * "_linefinder_lines.csv"), lines_in_template )
CSV.write(joinpath(output_dir,target_subdir * "_linefinder_line_fits.csv"), fits_to_lines )
CSV.write(joinpath(output_dir,target_subdir * "_linefinder_line_fits_clean.csv"), lines_to_try )
fits_to_lines = nothing
telluric_info = nothing
fit_distrib = nothing
good_lines = nothing
bad_lines = nothing
GC.gc()
end
has_found_lines = true
end
if has_computed_ccfs2
if verbose println("# Computing CCFs with new line list.") end
#mask_shape = RvSpectML.CCF.TopHatCCFMask(order_list_timeseries.inst, scale_factor=tophap_ccf_mask_scale_factor)
perm = sortperm(lines_to_try.fit_λc)
line_list2 = RvSpectML.CCF.BasicLineList(lines_to_try.fit_λc[perm], lines_to_try.fit_depth[perm].*lines_to_try.fit_a[perm] )
ccf_plan2 = RvSpectML.CCF.BasicCCFPlan(mask_shape = mask_shape, line_list=line_list2, midpoint=0.0)
v_grid2 = RvSpectML.CCF.calc_ccf_v_grid(ccf_plan2)
@time ccfs2 = RvSpectML.CCF.calc_ccf_chunklist_timeseries(order_list_timeseries, ccf_plan2)
# Write CCFs to file
if write_ccf_to_csv
using CSV
CSV.write(joinpath(output_dir,target_subdir * "_ccfs2.csv"),Tables.table(ccfs2',header=Symbol.(v_grid)))
end
has_computed_ccfs2 = true
end
if !has_computed_rvs2
if verbose println("# Measuring RVs from CCF.") end
#rvs_ccf_gauss = [ RvSpectML.RVFromCCF.measure_rv_from_ccf(v_grid,ccfs[:,i],fit_type = "gaussian") for i in 1:length(order_list_timeseries) ]
rvs_ccf_gauss2 = RvSpectML.RVFromCCF.measure_rv_from_ccf(v_grid2,ccfs2,fit_type = "gaussian")
# Store estimated RVs in metadata
map(i->order_list_timeseries.metadata[i][:rv_est] = rvs_ccf_gauss2[i]-mean(rvs_ccf_gauss2), 1:length(order_list_timeseries) )
if write_rvs_to_csv
using CSV
CSV.write(joinpath(output_dir,target_subdir * "_rvs_ccf2.csv"),DataFrame("Time [MJD]"=>order_list_timeseries.times,"CCF RV [m/s]"=>rvs_ccf_gauss))
end
has_computed_rvs2 = true
end
if !has_computed_template2
# Do we need to expand region around lines to include more of wings for DCPCA to work well?
# If so, should probably avoid tellurics encroaching on exapnded wavelength region. Sigh.
chunk_list_df2 = lines_to_try |> @select(:fit_min_λ,:fit_max_λ) |> @rename(:fit_min_λ=>:lambda_lo, :fit_max_λ=>:lambda_hi) |> DataFrame
chunk_list_timeseries2 = RvSpectML.make_chunk_list_timeseries(expres_data,chunk_list_df2)
# Check that no NaN's included
(chunk_list_timeseries2, chunk_list_df2) = RvSpectML.filter_bad_chunks(chunk_list_timeseries2,chunk_list_df2)
println(size(chunk_list_df2), " vs ", num_chunks(chunk_list_timeseries2) )
if verbose println("# Making template spectra.") end
@time ( spectral_orders_matrix2, f_mean2, var_mean2, deriv_2, deriv2_2, order_grids2 ) = RvSpectML.make_template_spectra(chunk_list_timeseries2)
if write_template_to_csv
using CSV
CSV.write(joinpath(output_dir,target_subdir * "_template2.csv"),DataFrame("λ"=>spectral_orders_matrix2.λ,"flux_template"=>f_mean2,"var"=>var_mean2, "dfluxdlnλ_template"=>deriv_2,"d²fluxdlnλ²_template"=>deriv2_2))
end
if write_spectral_grid_to_jld2
using JLD2, FileIO
save(joinpath(output_dir,target_subdir * "_matrix2.jld2"), Dict("λ"=>spectral_orders_matrix2.λ,"spectra"=>spectral_orders_matrix2.flux,"var_spectra"=>spectral_orders_matrix2.var,"flux_template"=>f_mean2,"var"=>var_mean2, "dfluxdlnλ_template"=>deriv_2,"d²fluxdlnλ²_template"=>deriv2_2))
end
has_computed_template2 = true
end
has_computed_dcpca2 = false
if !has_computed_dcpca2
if verbose println("# Performing Doppler constrained PCA analysis.") end
using MultivariateStats
dcpca2_out, M2 = RvSpectML.DCPCA.doppler_constrained_pca(spectral_orders_matrix2.flux, deriv_2, rvs_ccf_gauss2)
frac_var_explained2 = 1.0.-cumsum(principalvars(M2))./tvar(M2)
println("# Fraction of variance explained = ", frac_var_explained2[1:min(5,length(frac_var_explained2))])
if write_dcpca_to_csv
using CSV
CSV.write(joinpath(output_dir,target_subdir * "_dcpca_basis2.csv"), Tables.table(M.proj) )
CSV.write(joinpath(output_dir,target_subdir * "_dcpca_scores2.csv"), Tables.table(dcpca_out) )
end
has_computed_dcpca2 = true
end
if make_plots # Ploting results from DCPCA
using Plots
# Set parameters for plotting analysis
plt_order = 42
plt_order_pix = 4500:5000
plt = scatter(frac_var_explained, xlabel="Number of PCs", ylabel="Frac Variance Unexplained")
display(plt)
end
if make_plots
plt_line = 170
RvSpectML.plot_basis_vectors(order_grids, f_mean, deriv, M.proj, idx_plt = spectral_orders_matrix.chunk_map[plt_line], num_basis=3 )
#xlims!(5761.5,5766)
end
if make_plots
RvSpectML.plot_basis_scores(order_list_timeseries.times, rvs_ccf_gauss, dcpca_out, num_basis=3 )
end
if make_plots
RvSpectML.plot_basis_scores_cor( rvs_ccf_gauss, dcpca_out, num_basis=3)
end
has_loaded_data = false
has_computed_ccfs = false
has_computed_rvs = false
has_computed_template = false
has_computed_dcpca = false
has_found_lines = false
|
{"hexsha": "466b755461cbfe5995636d98fc9b03b262c9f957", "size": 16917, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples_old/old/expres_pipeline_1_broken.jl", "max_stars_repo_name": "alexander-wise/RvSpectML.jl", "max_stars_repo_head_hexsha": "8fd030f4a8b6478193ed36be7a3174cd2ea7b5aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-08-29T19:40:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-02T15:50:41.000Z", "max_issues_repo_path": "examples_old/old/expres_pipeline_1_broken.jl", "max_issues_repo_name": "alexander-wise/RvSpectML.jl", "max_issues_repo_head_hexsha": "8fd030f4a8b6478193ed36be7a3174cd2ea7b5aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2020-10-15T17:28:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T21:21:14.000Z", "max_forks_repo_path": "examples_old/old/expres_pipeline_1_broken.jl", "max_forks_repo_name": "alexander-wise/RvSpectML.jl", "max_forks_repo_head_hexsha": "8fd030f4a8b6478193ed36be7a3174cd2ea7b5aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-09-02T11:53:52.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-15T18:17:59.000Z", "avg_line_length": 46.9916666667, "max_line_length": 291, "alphanum_fraction": 0.7724182775, "num_tokens": 4978}
|
The Davis International Folk Dancers have fun doing dancing dances from Bulgaria, Israel, Armenia, Romania, Russia, Greece, Turkey, and other countries. Take this quiz to find out if you might like international folk dancing:
Do you want to learn some fancy moves for your feet?
Are you interested in hearing strange and wonderful music from other parts of the globe?
Do you like a bit of a mental as well as physical challenge?
Are you tired of looking for a partner for couple dances?
Do you enjoy getting in the conga line?
Are you bored with the same old cardio routine?
Do you know your left from your right?
Would you like to be welcomed by a community of friendly people?
If you answered yes to any of these questions, you should give international folk dancing a try. Join us this Sunday at 7 pm at the Davis Art Center, Studio E (enter from the parking lot on Covell and go to the opposite corner of the building) for our ongoing classes. Dances range from simple to challenging. Teaching is from 7 to 9, suitable for all levels, followed by dancing to requests until 10pm. Bring gritfree nonmarking shoes. Still not sure? Check out these http://ifdvl.org/default.shtml videos of international folk dancing. You might love it! Dont put it off! Get your weekend chores done early so you can unwind with us this Sunday night!
Beginning Folk Dance class
Barbara, the leader of the Davis International Folkdancers, will teach a 4week beginner class on Tuesday nights, 7:308:30 pm, February 526, at the Davis Art Center, featuring nonpartner dances from the Balkans, Israel, and maybe Japan. The total cost is $38, or $28 for Art Center members.
Special Beginners Night
February 17 will be a night of all easy dances, suitable for novices and firsttimers. There will be teaching throughout the evening. Bring a date for a late Valentines Day celebration. Its at the usual time and place, and as always its free for firsttimers.
Note: There will be no class on March 24.
|
{"hexsha": "85f9ed4f8263877ac1b5add3376cda7c1ea0ba9c", "size": 2020, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Davis_International_Folk_Dancers.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Davis_International_Folk_Dancers.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Davis_International_Folk_Dancers.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 87.8260869565, "max_line_length": 662, "alphanum_fraction": 0.7772277228, "num_tokens": 462}
|
########### Importing Libraries ##############
from preprocessing import Functions
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import ConvLSTM2D,Conv2DTranspose, LayerNormalization, BatchNormalization, TimeDistributed, Conv2D, Flatten, Dense, Dropout
import keras
import pprint
########### Model ##################
class Model(Functions):
def __init__(self):
Functions.__init__(self)
self.output1 = None
self.output = None
def anom(self, algo2 = False):
'''
Docstring : Spatial and Temporal series based anomaly detection algorithm
'''
inputs = tf.keras.layers.Input(shape=[10, self.img_size[0], self.img_size[1], 1])
encode = [
self.spatial(64, (5,5), stride = 2, pading="same", cnv=True),
self.temporal(64, (3,3), pading='same'),
self.temporal(32, (3,3), pading='same')
]
decode = [
self.temporal(64, (3,3), pading='same'),
self.spatial(64,(5,5), stride = 2, pading="same", cnv = False),
self.spatial(128, (11,11), stride= 2, pading="same", cnv= False)
]
seq = tf.keras.Sequential()
x = TimeDistributed(Conv2D(128, (11, 11), strides=4, padding="same"), batch_input_shape=(None, 10, self.img_size[0], self.img_size[1], 1))(inputs)
x = LayerNormalization()(x)
for enc in encode:
x = enc(x)
self.output1 = x
print(x.shape)
if algo2:
return self.output1
for dec in decode:
x = dec(x)
output = TimeDistributed(Conv2D(1, (11, 11), activation="sigmoid", padding="same"))(x)
return tf.keras.Model(inputs=inputs, outputs = output)
def spatial(self, filters, filter_size,stride , cnv = True, pading="same"):
'''
Docstring : Spatial Encoding
'''
seq = tf.keras.Sequential()
if cnv:
seq.add(TimeDistributed(Conv2D(filters, filter_size, padding=pading)))
else:
seq.add(TimeDistributed(Conv2DTranspose(filters, filter_size, strides=stride, padding=pading)))
seq.add(LayerNormalization())
return seq
def temporal(self, filters, filter_size, pading = "same", return_sequence=True):
'''
Docstring : Temporal Encoding
'''
seq = tf.keras.Sequential()
seq.add(ConvLSTM2D(filters, filter_size, padding=pading, return_sequences=return_sequence))
seq.add(LayerNormalization())
return seq
def anom_class(self):
'''
Docstring : Video classification model using 3d convolutional
'''
inputs = tf.keras.layers.Input(shape=[self.frm_cnt, self.img_size[0], self.img_size[1], 1])
x = tf.keras.layers.Conv3D(128, (3,3,3), activation='relu', input_shape = (self.frm_cnt, self.img_size[0], self.img_size[1], 1))(inputs)
x = tf.keras.layers.MaxPool3D((2,2,2))(x)
x = LayerNormalization()(x)
x = tf.keras.layers.Conv3D(32, (3,3,3), activation='relu')(x)
x = tf.keras.layers.MaxPool3D((2,2,2))(x)
x = LayerNormalization()(x)
x = tf.keras.layers.Conv3D(8, (3,3,3), activation='relu')(x)
x = Dense(20, activation='relu')(x)
x = Dropout(0.4)(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.5)(x)
outputs = Dense(13, activation='softmax')(x)
return tf.keras.Model(inputs = inputs, outputs=outputs)
|
{"hexsha": "2e5398adf872e5810cfbc968132dc8794afd73ed", "size": 3307, "ext": "py", "lang": "Python", "max_stars_repo_path": "Train/model.py", "max_stars_repo_name": "NIK-99/IRIS", "max_stars_repo_head_hexsha": "262ea244ed883266505d6be07e5e6ac77cbe2fae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-25T11:24:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-22T13:11:54.000Z", "max_issues_repo_path": "Train/model.py", "max_issues_repo_name": "NIK-99/IRIS", "max_issues_repo_head_hexsha": "262ea244ed883266505d6be07e5e6ac77cbe2fae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-16T15:40:39.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-16T15:40:39.000Z", "max_forks_repo_path": "Train/model.py", "max_forks_repo_name": "NIK-99/IRIS", "max_forks_repo_head_hexsha": "262ea244ed883266505d6be07e5e6ac77cbe2fae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-10-16T15:26:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-23T18:05:26.000Z", "avg_line_length": 36.7444444444, "max_line_length": 152, "alphanum_fraction": 0.6443906864, "include": true, "reason": "import numpy", "num_tokens": 930}
|
# coding: utf-8
# In[1]:
# import matplotlib
# matplotlib.use('Agg')
# get_ipython().magic(u'matplotlib inline')
# import matplotlib.pyplot as plt
# plt.rcParams['image.cmap'] = 'gray'
from glob import glob
import SimpleITK as sitk
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
# plt.rc('font', size=SMALL_SIZE) # controls default text sizes
# plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
# plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
# plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
# plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
# plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
# plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# from ipywidgets import interact, interactive
# from ipywidgets import widgets
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
import cv2
import os
import numpy as np
from tqdm import tqdm
import torch as t
# t.backends.cudnn.benchmark = True
t.backends.cudnn.enabled = True
from torch.utils import data
# from torchvision import transforms as tsf
TRAIN_PATH = './data/trainpddca15_crp_v2_pool1.pth'
TEST_PATH = './data/testpddca15_crp_v2_pool1.pth'
CET_PATH = './data/trainpddca15_cet_crp_v2_pool1.pth'
PET_PATH = './data/trainpddca15_pet_crp_v2_pool1.pth'
os.environ["CUDA_VISIBLE_DEVICES"]="7"
# In[2]:
import SimpleITK as sitk
import math
from scipy.ndimage.interpolation import zoom
def getdatamaskfilenames(path, maskname):
data, masks_data = [], []
for pth in path: # get data files and mask files
maskfiles = []
for seg in maskname:
if os.path.exists(os.path.join(pth, './structures/'+seg+'_crp_v2.npy')):
maskfiles.append(os.path.join(pth, './structures/'+seg+'_crp_v2.npy'))
else:
print('missing annotation', seg, pth.split('/')[-1])
maskfiles.append(None)
data.append(os.path.join(pth, 'img_crp_v2.npy'))
masks_data.append(maskfiles)
return data, masks_data
def imfit(img, newz, newy, newx):
z, y, x = img.shape
retimg = np.zeros((newz, newy, newx), img.dtype)
bz, ez = newz/2, newz/2+1
while ez - bz < z:
if bz - 1 >= 0:
bz -= 1
if ez - bz < z:
if ez + 1 <= z:
ez += 1
by, ey = newy/2, newy/2+1
while ey - by < y:
if by - 1 >= 0:
by -= 1
if ey - by < y:
if ey + 1 <= y:
ey += 1
bx, ex = newx/2, newx/2+1
while ex - bx < x:
if bx - 1 >= 0:
bx -= 1
if ex - bx < x:
if ex + 1 <= x:
ex += 1
retimg[bz:ez, by:ey, bx:ex] = img
return retimg
def getdatamask(data, mask_data, debug=False): # read data and mask, reshape
datas = []
for fnm, masks in tqdm(zip(data, mask_data)):
item = {}
img = np.load(fnm) # z y x
nz, ny, nx = img.shape
tnz, tny, tnx = math.ceil(nz/8.)*8., math.ceil(ny/8.)*8., math.ceil(nx/8.)*8.
img = imfit(img, int(tnz), int(tny), int(tnx)) #zoom(img, (tnz/nz,tny/ny,tnx/nx), order=2, mode='nearest')
item['img'] = t.from_numpy(img)
item['mask'] = []
for idx, maskfnm in enumerate(masks):
if maskfnm is None:
ms = np.zeros((nz, ny, nx), np.uint8)
else:
ms = np.load(maskfnm).astype(np.uint8)
assert ms.min() == 0 and ms.max() == 1
mask = imfit(ms, int(tnz), int(tny), int(tnx)) #zoom(ms, (tnz/nz,tny/ny,tnx/nx), order=0, mode='constant')
item['mask'].append(mask)
assert len(item['mask']) == 9
item['name'] = str(fnm)#.split('/')[-1]
datas.append(item)
return datas
def process(path='/data/wtzhu/dataset/pddca18/', debug=False):
trfnmlst, trfnmlstopt, tefnmlstoff, tefnmlst = [], [], [], [] # get train and test files
train_files, train_filesopt, test_filesoff, test_files = [], [], [], [] # MICCAI15 and MICCAI16 use different test
for pid in os.listdir(path):
if '0522c0001' <= pid <= '0522c0328':
trfnmlst.append(pid)
train_files.append(os.path.join(path, pid))
elif '0522c0329' <= pid <= '0522c0479':
trfnmlstopt.append(pid)
train_filesopt.append(os.path.join(path, pid))
elif '0522c0555' <= pid <= '0522c0746':
tefnmlstoff.append(pid)
test_filesoff.append(os.path.join(path, pid))
elif '0522c0788' <= pid <= '0522c0878':
tefnmlst.append(pid)
test_files.append(os.path.join(path, pid))
else:
print(pid)
assert 1 == 0
print('train file names', trfnmlst)
print('optional train file names', trfnmlstopt)
print('offsite test file names', tefnmlstoff)
print('onsite test file names', tefnmlst)
print('Total train files', len(train_files), 'total test files', len(test_files))
print('Train optional files', len(train_filesopt), 'test optional files', len(test_filesoff))
assert len(trfnmlst) == 25 and len(trfnmlstopt) == 8 and len(tefnmlstoff) == 10 and len(tefnmlst) == 5
assert len(train_files) == 25 and len(train_filesopt) == 8 and len(test_filesoff) == 10 and len(test_files) == 5
structurefnmlst = ('BrainStem', 'Chiasm', 'Mandible', 'OpticNerve_L', 'OpticNerve_R', 'Parotid_L', 'Parotid_R', 'Submandibular_L', 'Submandibular_R')
train_data, train_masks_data = getdatamaskfilenames(train_files, structurefnmlst)
train_dataopt, train_masks_dataopt = getdatamaskfilenames(train_filesopt, structurefnmlst)
test_data, test_masks_data = getdatamaskfilenames(test_files, structurefnmlst)
test_dataoff, test_masks_dataoff = getdatamaskfilenames(test_filesoff, structurefnmlst)
return getdatamask(train_data+train_dataopt+test_data, train_masks_data+train_masks_dataopt+test_masks_data,debug=debug), getdatamask(test_dataoff, test_masks_dataoff,debug=debug)
def processCET(path='/data/wtzhu/dataset/HNCetuximabclean/', debug=False):
trfnmlst = [] # get train and test files
train_files = [] # MICCAI15 and MICCAI16 use different test
for pid in os.listdir(path):
trfnmlst.append(pid)
train_files.append(os.path.join(path, pid))
print('train file names', trfnmlst)
print('Total train files', len(train_files))
structurefnmlst = ('BrainStem', 'Chiasm', 'Mandible', 'OpticNerve_L', 'OpticNerve_R', 'Parotid_L', 'Parotid_R', 'Submandibular_L', 'Submandibular_R')
train_data, train_masks_data = getdatamaskfilenames(train_files, structurefnmlst)
return getdatamask(train_data, train_masks_data,debug=debug)
# You can skip this if you have alreadly done it.
if not os.path.isfile(TRAIN_PATH):
train_data, test_data = process('/data/wtzhu/dataset/pddca18/')
print('use train', len(train_data), 'use test', len(test_data))
t.save(train_data, TRAIN_PATH)
t.save(test_data, TEST_PATH)
if not os.path.isfile(CET_PATH):
train_data, test_data = process('/data/wtzhu/dataset/pddca18/')
print('use train', len(train_data), 'use test', len(test_data))
data = processCET('/data/wtzhu/dataset/HNCetuximabclean/')
print('use ', len(data))
t.save(data+train_data, CET_PATH)
if not os.path.isfile(PET_PATH):
train_data, test_data = process('/data/wtzhu/dataset/pddca18/')
print('use train', len(train_data), 'use test', len(test_data))
data = processCET('/data/wtzhu/dataset/HNCetuximabclean/')
print('use ', len(data))
petdata = processCET('/data/wtzhu/dataset/HNPETCTclean/')
print('use ', len(petdata))
t.save(data+train_data+petdata, PET_PATH)
# In[3]:
class DatasetStg1():
def __init__(self,path, istranform=True, alpha=1000, sigma=30, alpha_affine=0.04, istest=False):
self.datas = t.load(path)
self.ist = istranform
self.alpha = alpha
self.sigma = sigma
self.alpha_affine = alpha_affine
self.istest = istest
def __getitem__(self, index):
data = self.datas[index]
img = data['img'].numpy().astype(np.float32)
if not self.istest:
for mask in data['mask']: # for multi-task
if mask is None:
print(data['name'])
assert 1 == 0
if not self.ist: #[::2, ::2, ::2]
masklst = []
for mask in data['mask']:
if mask is None: mask = np.zeros((1,img.shape[0],img.shape[1],img.shape[2])).astype(np.uint8)
masklst.append(mask.astype(np.uint8).reshape((1,img.shape[0],img.shape[1],img.shape[2])))
mask0 = np.zeros_like(masklst[0]).astype(np.uint8)
for mask in masklst:
mask0 = np.logical_or(mask0, mask).astype(np.uint8)
mask0 = 1 - mask0
return t.from_numpy(img.reshape((1, img.shape[0], img.shape[1], img.shape[2]))), t.from_numpy(np.concatenate([mask0]+masklst, axis=0)), True
im_merge = np.concatenate([img[...,None]]+[mask.astype(np.float32)[...,None] for mask in data['mask']], axis=3)
# Apply transformation on image
im_merge_t, new_img = self.elastic_transform3Dv2(im_merge,self.alpha,self.sigma,min(im_merge.shape[1:-1])*self.alpha_affine)
# Split image and mask ::2, ::2, ::2
im_t = im_merge_t[...,0]
im_mask_t = im_merge_t[..., 1:].astype(np.uint8).transpose(3, 0, 1, 2)
mask0 = np.zeros_like(im_mask_t[0, :, :, :]).reshape((1,)+im_mask_t.shape[1:]).astype(np.uint8)
im_mask_t_lst = []
flagvect = np.ones((10,), np.float32)
retflag = True
for i in range(9):
im_mask_t_lst.append(im_mask_t[i,:,:,:].reshape((1,)+im_mask_t.shape[1:]))
if im_mask_t[i,:,:,:].max() != 1:
retflag = False
flagvect[i+1] = 0
mask0 = np.logical_or(mask0, im_mask_t[i,:,:,:]).astype(np.uint8)
if not retflag: flagvect[0] = 0
mask0 = 1 - mask0
return t.from_numpy(im_t.reshape((1,)+im_t.shape[:3])), t.from_numpy(np.concatenate([mask0]+im_mask_t_lst, axis=0)), flagvect
def __len__(self):
return len(self.datas)
def elastic_transform3Dv2(self, image, alpha, sigma, alpha_affine, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications).
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
"""
# affine and deformation must be slice by slice and fixed for slices
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape # image is contatenated, the first channel [:,:,:,0] is the image, the second channel
# [:,:,:,1] is the mask. The two channel are under the same tranformation.
shape_size = shape[:-1] # z y x
# Random affine
shape_size_aff = shape[1:-1] # y x
center_square = np.float32(shape_size_aff) // 2
square_size = min(shape_size_aff) // 3
pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
new_img = np.zeros_like(image)
for i in range(shape[0]):
new_img[i,:,:,0] = cv2.warpAffine(image[i,:,:,0], M, shape_size_aff[::-1], borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
for j in range(1, 10):
new_img[i,:,:,j] = cv2.warpAffine(image[i,:,:,j], M, shape_size_aff[::-1], flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_TRANSPARENT, borderValue=0)
dx = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
dy = gaussian_filter((random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
x, y = np.meshgrid(np.arange(shape_size_aff[1]), np.arange(shape_size_aff[0]))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
new_img2 = np.zeros_like(image)
for i in range(shape[0]):
new_img2[i,:,:,0] = map_coordinates(new_img[i,:,:,0], indices, order=1, mode='constant').reshape(shape[1:-1])
for j in range(1, 10):
new_img2[i,:,:,j] = map_coordinates(new_img[i,:,:,j], indices, order=0, mode='constant').reshape(shape[1:-1])
return np.array(new_img2), new_img
traindataset = DatasetStg1(PET_PATH, istranform=True)
traindataloader = t.utils.data.DataLoader(traindataset,num_workers=10,batch_size=1, shuffle=True)
testdataset = DatasetStg1(TEST_PATH, istranform=False)
testdataloader = t.utils.data.DataLoader(testdataset,num_workers=10,batch_size=1)
print(len(traindataloader), len(testdataloader))
# In[4]:
# sub-parts of the U-Net model
from torch import nn
import torch.nn.functional as F
from scipy.spatial.distance import dice
def conv3x3x3(in_planes, out_planes, stride=1):
"3x3x3 convolution with padding"
return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock3D(nn.Module):
def __init__(self, inplanes, planes, stride=1):
super(BasicBlock3D, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
if inplanes != planes:
self.downsample = nn.Sequential(nn.Conv3d(inplanes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm3d(planes))
else:
self.downsample = lambda x: x
self.stride = stride
def forward(self, x):
# print(x.size())
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
# print(x.size(), residual.size(), out.size())
out += residual
out = self.relu(out)
return out
def Deconv3x3x3(in_planes, out_planes, stride=2):
"3x3x3 deconvolution with padding"
return nn.ConvTranspose3d(in_planes, out_planes, kernel_size=2, stride=stride)
class SELayer3D(nn.Module):
def __init__(self, channel, reduction=15):
super(SELayer3D, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.LeakyReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid())
def forward(self, x):
b, c, _, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1, 1)
return x * y
class SEBasicBlock3D(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=15):
super(SEBasicBlock3D, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes, 1)
self.bn2 = nn.BatchNorm3d(planes)
self.se = SELayer3D(planes, reduction)
if inplanes != planes:
self.downsample = nn.Sequential(nn.Conv3d(inplanes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm3d(planes))
else:
self.downsample = lambda x: x
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
# if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class UpSEBasicBlock3D(nn.Module):
def __init__(self, inplanes1, inplanes2, planes, stride=1, downsample=None, reduction=16):
super(UpSEBasicBlock3D, self).__init__()
inplanes3 = inplanes1 + inplanes2
if stride == 2:
self.deconv1 = Deconv3x3x3(inplanes1, inplanes1//2)
inplanes3 = inplanes1 // 2 + inplanes2
self.stride = stride
# self.conv1x1x1 = nn.Conv3d(inplanes2, planes, kernel_size=1, stride=1)#, padding=1)
self.conv1 = conv3x3x3(inplanes3, planes)#, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.se = SELayer3D(planes, reduction)
if inplanes3 != planes:
self.downsample = nn.Sequential(nn.Conv3d(inplanes3, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm3d(planes))
else:
self.downsample = lambda x: x
self.stride = stride
def forward(self, x1, x2):
# print(x1.size(), x2.size())
if self.stride == 2: x1 = self.deconv1(x1)
# x2 = self.conv1x1x1(x2)
#print(x1.size(), x2.size())
out = t.cat([x1, x2], dim=1) #x1 + x2
residual = self.downsample(out)
#print(residual.size(), x1.size(), x2.size())
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
#print(out.size(), residual.size())
out += residual
out = self.relu(out)
return out
class UpBasicBlock3D(nn.Module):
def __init__(self, inplanes1, inplanes2, planes, stride=2):
super(UpBasicBlock3D, self).__init__()
inplanes3 = inplanes1 + inplanes2
if stride == 2:
self.deconv1 = Deconv3x3x3(inplanes1, inplanes1//2)
inplanes3 = inplanes1//2 + inplanes2
self.stride = stride
# elif inplanes1 != planes:
# self.deconv1 = nn.Conv3d(inplanes1, planes, kernel_size=1, stride=1)
# self.conv1x1x1 = nn.Conv3d(inplanes2, planes, kernel_size=1, stride=1)#, padding=1)
self.conv1 = conv3x3x3(inplanes3, planes)#, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
if inplanes3 != planes:
self.downsample = nn.Sequential(nn.Conv3d(inplanes3, planes, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm3d(planes))
else:
self.downsample = lambda x: x
self.stride = stride
def forward(self, x1, x2):
# print(x1.size(), x2.size())
if self.stride == 2: x1 = self.deconv1(x1)
#print(self.stride, x1.size(), x2.size())
out = t.cat([x1, x2], dim=1)
residual = self.downsample(out)
#print(out.size(), residual.size())
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class ResNetUNET3D(nn.Module):
def __init__(self, block, upblock, upblock1, n_size, num_classes=2, in_channel=1): # BasicBlock, 3
super(ResNetUNET3D, self).__init__()
self.inplane = 28
self.conv1 = nn.Conv3d(in_channel, self.inplane, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm3d(self.inplane)
self.relu = nn.LeakyReLU(inplace=True)
self.layer1 = self._make_layer(block, 30, blocks=n_size, stride=1)
self.layer2 = self._make_layer(block, 32, blocks=n_size, stride=1)
self.layer3 = self._make_layer(block, 34, blocks=n_size, stride=1)
self.layer4 = upblock(34, 32, 32, stride=1)
self.inplane = 32
self.layer5 = self._make_layer(block, 32, blocks=n_size-1, stride=1)
self.layer6 = upblock(32, 30, 30, stride=1)
self.inplane = 30
self.layer7 = self._make_layer(block, 30, blocks=n_size-1, stride=1)
self.layer8 = upblock(30, 28, 28, stride=1)
self.inplane = 28
self.layer9 = self._make_layer(block, 28, blocks=n_size-1, stride=1)
self.inplane = 28
self.layer10 = upblock1(28, 1, 14, stride=2)
self.layer11 = nn.Sequential(#nn.Conv3d(16, 14, kernel_size=3, stride=1, padding=1, bias=True),
#nn.ReLU(inplace=True),
nn.Conv3d(14, num_classes, kernel_size=3, stride=1, padding=1, bias=True))
# self.outconv = nn.ConvTranspose3d(self.inplane, num_classes, 2, stride=2)
self.initialize()
def initialize(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose3d):
nn.init.kaiming_normal_(m.weight)
def _make_layer(self, block, planes, blocks, stride):
strides = [stride] + [1] * (blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.inplane, planes, stride))
self.inplane = planes
return nn.Sequential(*layers)
def forward(self, x0):
x = self.conv1(x0) # 16 1/2
x = self.bn1(x)
x1 = self.relu(x)
x2 = self.layer1(x1) # 16 1/4 16 1/4 res 16 1/4 - 16 1/4 16 1/4 res 16 1/4 - 16 1/4 16 1/4 res 16 1/4
x3 = self.layer2(x2) # 32 1/8 32 1/8 res 32 1/8 - 32 1/8 32 1/8 res 32 1/8 - 32 1/8 32 1/8 res 32 1/8
x4 = self.layer3(x3) # 64 1/16 64 1/16 res 64 1/16 - 64 1/16 64 1/16 res 64 1/16 - 64 1/16 64 1/16 res 64 1/16
# print('x4', x4.size())
x5 = self.layer4(x4, x3) # 16 1/8 48 1/8 32 1/8 32 1/8 res 32 1/8 - 32 1/8 32 1/8 res 32 1/8 - 32 1/8 32 1/8 res 32 1/8
x5 = self.layer5(x5)
x6 = self.layer6(x5, x2) # 8 1/4 24 1/4 16 1/4 16 1/4 res 16 1/4 - 16 1/4 16 1/4 res 16 1/4 - 16 1/4 16 1/4 res 16 1/4
x6 = self.layer7(x6)
x7 = self.layer8(x6, x1) # 4 1/2 20 1/2 16 1/2 16 1/2 res 16 1/2 - 16 1/2 16 1/2 res 16 1/2 - 16 1/2 16 1/2 res 16 1/2
x7 = self.layer9(x7)
x8 = self.layer10(x7, x0)
x9 = self.layer11(x8)
# print(x0.size(), x.size(), x1.size(), x2.size(), x3.size(), x4.size(), x5.size(), x6.size(), \
# x7.size(), x8.size(), x9.size())
return F.softmax(x9, dim=1)
# out = self.outconv(x7)
# return F.softmax(out, dim=1)
# Ref: salehi17, "Twersky loss function for image segmentation using 3D FCDN"
# -> the score is computed for each class separately and then summed
# alpha=beta=0.5 : dice coefficient
# alpha=beta=1 : tanimoto coefficient (also known as jaccard)
# alpha+beta=1 : produces set of F*-scores
# implemented by E. Moebel, 06/04/18
def tversky_loss_wmask(y_pred, y_true, flagvec):
alpha = 0.5
beta = 0.5
ones = t.ones_like(y_pred) #K.ones(K.shape(y_true))
# print(type(ones.data), type(y_true.data), type(y_pred.data), ones.size(), y_pred.size())
p0 = y_pred # proba that voxels are class i
p1 = ones-y_pred # proba that voxels are not class i
g0 = y_true.type(t.cuda.FloatTensor)
g1 = ones-g0
num = t.sum(t.sum(t.sum(t.sum(p0*g0, 4),3),2),0) #(0,2,3,4)) #K.sum(p0*g0, (0,1,2,3))
den = num + alpha*t.sum(t.sum(t.sum(t.sum(p0*g1,4),3),2),0) + beta*t.sum(t.sum(t.sum(t.sum(p1*g0,4),3),2),0) #(0,2,3,4))
T = t.sum((num * flagvec.cuda())/(den+1e-5))
# Ncl = y_pred.size(1)*1.0
# print(Ncl, T)
return t.sum(flagvec.cuda())-T
def focal(y_pred, y_true, flagvec):
retv = - t.mean(t.mean(t.mean(t.mean(t.log(t.clamp(y_pred,1e-6,1))*y_true.type(t.cuda.FloatTensor)*t.pow(1-y_pred,2),4),3),2),0)\
* flagvec.cuda()
return t.sum(retv)
def caldice(y_pred, y_true):
# print(y_pred.sum(), y_true.sum())
y_pred = y_pred.data.cpu().numpy().transpose(1,0,2,3,4) # inference should be arg max
y_pred = np.argmax(y_pred, axis=0).squeeze() # z y x
y_true = y_true.data.numpy().transpose(1,0,2,3,4).squeeze() # .cpu()
avgdice = []
y_pred_1 = y_pred==1
y_true_1 = y_true[1,:,:,:]
if y_pred_1.sum() + y_true_1.sum() == 0: avgdice.append(-1)
else: avgdice.append(2.*(np.logical_and(y_pred_1, y_true_1).sum()) / (1.0*(y_pred_1.sum() + y_true_1.sum())))
y_pred_1 = y_pred==2
y_true_1 = y_true[2,:,:,:]
if y_pred_1.sum() + y_true_1.sum() == 0: avgdice.append(-1)
else: avgdice.append(2.*(np.logical_and(y_pred_1, y_true_1).sum()) / (1.0*(y_pred_1.sum() + y_true_1.sum())))
y_pred_1 = y_pred==3
y_true_1 = y_true[3,:,:,:]
if y_pred_1.sum() + y_true_1.sum() == 0: avgdice.append(-1)
else: avgdice.append(2.*(np.logical_and(y_pred_1, y_true_1).sum()) / (1.0*(y_pred_1.sum() + y_true_1.sum())))
y_pred_1 = y_pred==4
y_true_1 = y_true[4,:,:,:]
if y_pred_1.sum() + y_true_1.sum() == 0: avgdice.append(-1)
else: avgdice.append(2.*(np.logical_and(y_pred_1, y_true_1).sum()) / (1.0*(y_pred_1.sum() + y_true_1.sum())))
y_pred_1 = y_pred==5
y_true_1 = y_true[5,:,:,:]
if y_pred_1.sum() + y_true_1.sum() == 0: avgdice.append(-1)
else: avgdice.append(2.*(np.logical_and(y_pred_1, y_true_1).sum()) / (1.0*(y_pred_1.sum() + y_true_1.sum())))
y_pred_1 = y_pred==6
y_true_1 = y_true[6,:,:,:]
if y_pred_1.sum() + y_true_1.sum() == 0: avgdice.append(-1)
else: avgdice.append(2.*(np.logical_and(y_pred_1, y_true_1).sum()) / (1.0*(y_pred_1.sum() + y_true_1.sum())))
y_pred_1 = y_pred==7
y_true_1 = y_true[7,:,:,:]
if y_pred_1.sum() + y_true_1.sum() == 0: avgdice.append(-1)
else: avgdice.append(2.*(np.logical_and(y_pred_1, y_true_1).sum()) / (1.0*(y_pred_1.sum() + y_true_1.sum())))
y_pred_1 = y_pred==8
y_true_1 = y_true[8,:,:,:]
if y_pred_1.sum() + y_true_1.sum() == 0: avgdice.append(-1)
else: avgdice.append(2.*(np.logical_and(y_pred_1, y_true_1).sum()) / (1.0*(y_pred_1.sum() + y_true_1.sum())))
y_pred_1 = y_pred==9
y_true_1 = y_true[9,:,:,:]
if y_pred_1.sum() + y_true_1.sum() == 0: avgdice.append(-1)
else: avgdice.append(2.*(np.logical_and(y_pred_1, y_true_1).sum()) / (1.0*(y_pred_1.sum() + y_true_1.sum())))
for dice in avgdice:
if dice != -1:
assert 0 <= dice <= 1
return avgdice
model = ResNetUNET3D(SEBasicBlock3D, UpSEBasicBlock3D, UpBasicBlock3D, 2, num_classes=9+1, in_channel=1).cuda()
lossweight = np.array([2.22, 1.31, 1.99, 1.13, 1.93, 1.93, 1.0, 1.0, 1.90, 1.98], np.float32)
# pretraind_dict = t.load('./model/unet10pool3e2e_seres18_conc_pet_wmask_2_rmsp_1')["weight"]
# model_dict = model.state_dict()
# pretraind_dict = {k: v for k, v in pretraind_dict.items() if k in model_dict}
# model_dict.update(pretraind_dict)
# model.load_state_dict(pretraind_dict)
savename = './model/unet10pool3e2e_seres18_conc_pet_wmask_2_rmsp_lru_1_'
# In[5]:
optimizer = t.optim.RMSprop(model.parameters(),lr = 5e-4)
maxloss = [0 for _ in range(9)]
for epoch in range(150):
tq = tqdm(traindataloader, desc='loss', leave=True)
trainloss = 0
for x_train, y_train, flagvec in tq:
x_train = t.autograd.Variable(x_train.cuda())
y_train = t.autograd.Variable(y_train.cuda())
optimizer.zero_grad()
o = model(x_train)
loss = tversky_loss_wmask(o, y_train, flagvec*t.from_numpy(lossweight))
loss.backward()
optimizer.step()
tq.set_description("epoch %i loss %f" % (epoch, loss.item()))
tq.refresh() # to show immediately the update
trainloss += loss.item()
del loss, x_train, y_train, o
testtq = tqdm(testdataloader, desc='test loss', leave=True)
testloss = [0 for _ in range(9)]
for x_test, y_test, _ in testtq:
# print(x_test.numpy().shape)
with t.no_grad():
x_test = t.autograd.Variable(x_test.cuda())
# y_test = t.autograd.Variable(y_test.cuda())
o = model(x_test)
loss = caldice(o, y_test)
testtq.set_description("epoch %i test loss %f" % (epoch, sum(loss)/9))
testtq.refresh() # to show immediately the update
testloss = [l+tl for l,tl in zip(loss, testloss)]
del x_test, y_test, o
testloss = [l / len(testtq) for l in testloss]
for cls in range(9):
if maxloss[cls] < testloss[cls]:
maxloss[cls] = testloss[cls]
state = {"epoch": epoch, "weight": model.state_dict()}
t.save(state, savename+str(cls+1))
# model.load_state_dict(t.load(savename)["weight"])
# t.save(model, savename+str(cls+1))
print('epoch %i TRAIN loss %.4f' % (epoch, trainloss/len(tq)))
print('test loss %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f' % tuple(testloss))
print('best test loss %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f' % tuple(maxloss))
if epoch % 10 == 0:
testloss = [0 for _ in range(9)]
ntest = [0 for _ in range(9)]
testtq = tqdm(traindataloader, desc='loss', leave=True)
for x_test, y_test, _ in testtq:
# print(x_test.numpy().shape)
with t.no_grad():
x_test = t.autograd.Variable(x_test.cuda())
# y_test = t.autograd.Variable(y_test.cuda())
o = model(x_test)
loss = caldice(o, y_test)
testtq.set_description("epoch %i test loss %f" % (epoch, sum(loss)/9))
testtq.refresh() # to show immediately the update
testloss = [l+tl if l != -1 else tl for l,tl in zip(loss, testloss)]
ntest = [n+1 if l != -1 else n for l, n in zip(loss, ntest)]
del x_test, y_test, o
testloss = [l / n for l,n in zip(testloss, ntest)]
print('train loss %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f' % tuple(testloss))
# In[6]:
optimizer = t.optim.SGD(model.parameters(), 1e-4, momentum = 0.9)#, weight_decay = 1e-4)
for epoch in range(50):
tq = tqdm(traindataloader, desc='loss', leave=True)
trainloss = 0
for x_train, y_train, flagvec in tq:
x_train = t.autograd.Variable(x_train.cuda())
y_train = t.autograd.Variable(y_train.cuda())
optimizer.zero_grad()
o = model(x_train)
loss = tversky_loss_wmask(o, y_train, flagvec*t.from_numpy(lossweight))
loss.backward()
optimizer.step()
tq.set_description("epoch %i loss %f" % (epoch, loss.item()))
tq.refresh() # to show immediately the update
trainloss += loss.item()
del loss, x_train, y_train, o
testtq = tqdm(testdataloader, desc='test loss', leave=True)
testloss = [0 for _ in range(9)]
for x_test, y_test, _ in testtq:
# print(x_test.numpy().shape)
with t.no_grad():
x_test = t.autograd.Variable(x_test.cuda())
# y_test = t.autograd.Variable(y_test.cuda())
o = model(x_test)
loss = caldice(o, y_test)
testtq.set_description("epoch %i test loss %f" % (epoch, sum(loss)/9))
testtq.refresh() # to show immediately the update
testloss = [l+tl for l,tl in zip(loss, testloss)]
del x_test, y_test, o
testloss = [l / len(testtq) for l in testloss]
for cls in range(9):
if maxloss[cls] < testloss[cls]:
maxloss[cls] = testloss[cls]
state = {"epoch": epoch, "weight": model.state_dict()}
t.save(state, savename+str(cls+1))
# model.load_state_dict(t.load(savename)["weight"])
# t.save(model, savename+str(cls+1))
print('epoch %i TRAIN loss %.4f' % (epoch, trainloss/len(tq)))
print('test loss %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f' % tuple(testloss))
print('best test loss %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f' % tuple(maxloss))
if epoch % 10 == 0:
testloss = [0 for _ in range(9)]
ntest = [0 for _ in range(9)]
testtq = tqdm(traindataloader, desc='loss', leave=True)
for x_test, y_test, _ in testtq:
# print(x_test.numpy().shape)
with t.no_grad():
x_test = t.autograd.Variable(x_test.cuda())
# y_test = t.autograd.Variable(y_test.cuda())
o = model(x_test)
loss = caldice(o, y_test)
testtq.set_description("epoch %i test loss %f" % (epoch, sum(loss)/9))
testtq.refresh() # to show immediately the update
testloss = [l+tl if l != -1 else tl for l,tl in zip(loss, testloss)]
ntest = [n+1 if l != -1 else n for l, n in zip(loss, ntest)]
del x_test, y_test, o
testloss = [l / n for l,n in zip(testloss, ntest)]
print('train loss %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f' % tuple(testloss))
|
{"hexsha": "76f68cd99834ecd8279cc34214e510eee4c96dc8", "size": 33893, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/AnatomyNet.py", "max_stars_repo_name": "wentaozhu/AnatomyNet-for-anatomical-segmentation", "max_stars_repo_head_hexsha": "296227838c1c68baef5836ba5a9d31ea311f35a3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 185, "max_stars_repo_stars_event_min_datetime": "2018-08-15T18:10:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T03:05:27.000Z", "max_issues_repo_path": "src/AnatomyNet.py", "max_issues_repo_name": "wentaozhu/AnatomyNet-for-anatomical-segmentation", "max_issues_repo_head_hexsha": "296227838c1c68baef5836ba5a9d31ea311f35a3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2019-01-10T03:44:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-18T07:04:06.000Z", "max_forks_repo_path": "src/AnatomyNet.py", "max_forks_repo_name": "wentaozhu/AnatomyNet-for-anatomical-segmentation", "max_forks_repo_head_hexsha": "296227838c1c68baef5836ba5a9d31ea311f35a3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 49, "max_forks_repo_forks_event_min_datetime": "2018-09-09T00:34:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T23:32:00.000Z", "avg_line_length": 46.7489655172, "max_line_length": 217, "alphanum_fraction": 0.6004189656, "include": true, "reason": "import numpy,from scipy", "num_tokens": 10303}
|
\subsection{Definitions}
\begin{itemize}
\item $\lambda$ : intrinsic coordinate, including masses and spins.
\item $\theta$ : extrinsic coordinate, including $d,RA,DEC,\iota,\psi_L,t,\phi_{\rm orb}$
\item $p_s(\theta)$: (joint) sampling prior in extrinsic dimensions
\item $p(\theta)$ : prior on extrinsic parameters
\item $\Like(\lambda,\theta)$ : likelihood. In terms of individual detector strains $H_k$ and power spectra, provided by
\begin{eqnarray}
\ln L &\equiv \sum_k \ln L_k = \ln L_{\rm model} + \ln L_{\rm data} \\
\ln L_{\rm model} &\equiv -\frac{1}{2} \sum_k \qmstateproduct{H_k}{H_k}_k \\
\ln L_{\rm data} &\equiv \sum_k \text{Re} \qmstateproduct{H_k}{\hat{H}_k}_k
\end{eqnarray}
\item $Z(\lambda) \equiv L_{\rm red}(\lambda,\theta)$ : reduced or integrated likelihood, derived from $L$ via
\begin{eqnarray}
Z(\lambda) = L_{\rm red}(\lambda) = \int d\theta \; p(\theta) L(\lambda,\theta)
\end{eqnarray}
\item
$w=Lp/p_s$ : weight
\item
$n_{\rm eff}$ : ``effective number of samples''
\begin{eqnarray}
n_{\rm eff} \equiv \frac{\sum_k w_k}{\text{max}_k w_k}
\end{eqnarray}
\item
$h(t|\lambda,x)=h_+-i h_\times$ : complex gravitational wave strain
\item
$h_{lm}(t)$: coefficients of a spin-weighted spherical harmonic decomposition
\begin{eqnarray}
\label{eq:def:hSpinWeightEmissionDirection}
h(t|\lambda,\theta) = \sum_{lm} h_{lm}(t|\lambda) e^{-2i\psi}\Y{-2}_{lm}(\theta_{JN}\phi_{JN})
\end{eqnarray}
\item
$\tilde{h}(f)$ : two-sided Fourier transform of the complex function $h(t)$
\begin{eqnarray}
h(t) = \int_{-\infty}^{\infty} \frac{d \omega}{2\pi} \; e^{-i\omega t} \tilde{h}(\omega)
\end{eqnarray}
%% \item
%% ${\cal I}$ : complex conjugation in time. Provided to avoid confusion with $\tilde{h}^*$. \textbf{Hopefully we won't
%% need it.}
\item
$\vec{x}_k$ : Position of the $k$th detector
\item
$F_{+}$, $F_{\times},F$ : detector response function to the $+,\times$ polarizations for sources visible in the
$\hat{n}$ direction relative to detector
\begin{eqnarray}
F(\hat{n}) = F_+(\hat{n}) +i F_\times(\hat{n})
\end{eqnarray}
\item
$\hat{H}_k$ : measured strain in the $k$th detector
\item
$H_k$ : strain response of the $k$th detector to an incident strain $h$
\begin{align}
H_k(t) &=F_{+,k}(t) h_+(t-\vec{x}_k(t)\cdot \hat{k}) + F_\times(t) h_\times(t-\vec{x}_k(t)\cdot \hat{k}) \\
&= \frac{F h(t-\vec{x}_k\cdot \hat{k}) }{2} + \frac{F^*h^*(t-\vec{x}_k\cdot \hat{k})}{2}
\end{align}
\item
$S_k$ : noise power spectrum for the $k$th detector
\item
$\qmstateproduct{a}{b}_k$ : complex-valued inner product defined by the $k$th detector's noise power spectrum:
\begin{eqnarray}
\qmstateproduct{a}{b}_k \equiv 2 \int_{-\infty}^{\infty} df \frac{[\tilde{a}(f)]^*\tilde{b}(f)}{S_h(|f|) }
\end{eqnarray}
\end{itemize}
|
{"hexsha": "3d90c7fa023e5737cc2fa56a943ee761ecbf1d8c", "size": 2781, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "MonteCarloMarginalizeCode/Notes/paper/notation_etc.tex", "max_stars_repo_name": "spfanning/research-projects-RIT", "max_stars_repo_head_hexsha": "34afc69ccb502825c81285733dac8ff993f79503", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-10-23T01:18:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-09T18:24:36.000Z", "max_issues_repo_path": "MonteCarloMarginalizeCode/Notes/paper/notation_etc.tex", "max_issues_repo_name": "spfanning/research-projects-RIT", "max_issues_repo_head_hexsha": "34afc69ccb502825c81285733dac8ff993f79503", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-01-03T14:38:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-17T16:57:02.000Z", "max_forks_repo_path": "MonteCarloMarginalizeCode/Notes/paper/notation_etc.tex", "max_forks_repo_name": "spfanning/research-projects-RIT", "max_forks_repo_head_hexsha": "34afc69ccb502825c81285733dac8ff993f79503", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2019-10-23T01:19:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-20T23:35:39.000Z", "avg_line_length": 33.5060240964, "max_line_length": 121, "alphanum_fraction": 0.6627112549, "num_tokens": 1018}
|
using ApproxFun, Base.Test
c = rand(1000)
x=rand(10000)
f=Fun(c,Chebyshev)
y=f(x)
y=f(x)
@time y=f(x)
println("Clenshaw large coeffs, many points: Time should be ~0.024")
# 0.012482274 with unsafe_view
# 0.024306262 with inbounds
y=f(.1)
y=f(.1)
y=f(.1)
@time y=f(.1);
println("Clenshaw large coeffs, 1 point: Time should be ~6e-6")
# @time is 8.853e-6 seconds
f=Fun(exp)
x=sample(f,100000)
x=sample(f,100000)
@time x=sample(f,100000)
println("Sample: Time should be ~0.25")
# 0.213793292 with unsafe_view
# 0.268162181 with inbounds
f=Fun(x->cos(x),20)
roots(f)
roots(f)
@time for k=1:100
roots(f)
end
println("Small roots: Time should be ~0.015")
f=Fun(x->cos(1000x),1000)
roots(f)
roots(f)
@time roots(f)
println("Roots: Time should be ~0.13")
|
{"hexsha": "8058a4b599b8e4d9a3808d46c76a8f850b1aca12", "size": 765, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/SpeedTest.jl", "max_stars_repo_name": "JuliaPackageMirrors/ApproxFun.jl", "max_stars_repo_head_hexsha": "f73e9d168b0d139efa2953b1bad7fac808db2d8d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/SpeedTest.jl", "max_issues_repo_name": "JuliaPackageMirrors/ApproxFun.jl", "max_issues_repo_head_hexsha": "f73e9d168b0d139efa2953b1bad7fac808db2d8d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/SpeedTest.jl", "max_forks_repo_name": "JuliaPackageMirrors/ApproxFun.jl", "max_forks_repo_head_hexsha": "f73e9d168b0d139efa2953b1bad7fac808db2d8d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.612244898, "max_line_length": 68, "alphanum_fraction": 0.6797385621, "num_tokens": 293}
|
import numpy as np
import os
import random
import bpy
# set current path
abspath = os.path.abspath(__file__)
dname = os.path.dirname(os.path.dirname(abspath))
os.chdir(dname)
scn = bpy.context.scene
FPS = scn.render.fps
# set output folder and get highest index
data_folder = '../../output/Cartpole/'
data_folder += sorted(os.listdir(data_folder))[-1]
# load data
X = np.loadtxt(open(f"{data_folder}/X.txt", "rb"))
U = np.loadtxt(open(f"{data_folder}/U.txt", "rb"))
t = np.loadtxt(open(f"{data_folder}/t.txt", "rb"))
# get objects
cart = bpy.data.objects["Cart"]
pole = bpy.data.objects["Pole"]
# get timesteps, set total frames and timestep
K = X.shape[1]
trajectory_frames = FPS * t
step_size = trajectory_frames / K
# clear animation data
cart.animation_data_clear()
pole.animation_data_clear()
current_frame = 1
scn.frame_current = 1
# for each timestep in trajectory
for i in range(K):
current_frame += step_size
scn.frame_current = int(current_frame)
x = X[:, i]
# location and rotation
cart.location[0] = x[0]
pole.rotation_euler[1] = x[2]
cart.keyframe_insert(data_path='location')
pole.keyframe_insert(data_path='rotation_euler')
scn.frame_current += FPS
# set frame range
scn.frame_start = 1
scn.frame_end = scn.frame_current
# go back to start
scn.frame_current = 1
# select all objects
bpy.context.area.type = 'VIEW_3D'
bpy.ops.object.select_all(action='SELECT')
# set all to linear interpolation
bpy.context.area.type = 'GRAPH_EDITOR'
bpy.ops.graph.select_all(action='SELECT')
bpy.ops.graph.interpolation_type(type='LINEAR')
# deselect all objects
bpy.context.area.type = 'VIEW_3D'
bpy.ops.object.select_all(action='DESELECT')
bpy.context.area.type = 'TEXT_EDITOR'
|
{"hexsha": "b943c746e5f677d5e8a28d1a1a0cbd3908c0b7e7", "size": 1729, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluation/Cartpole/import_cartpole.py", "max_stars_repo_name": "boyali/SCpp", "max_stars_repo_head_hexsha": "3bc49a169e7edfb0144575dfa55807df40eea58d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "evaluation/Cartpole/import_cartpole.py", "max_issues_repo_name": "boyali/SCpp", "max_issues_repo_head_hexsha": "3bc49a169e7edfb0144575dfa55807df40eea58d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evaluation/Cartpole/import_cartpole.py", "max_forks_repo_name": "boyali/SCpp", "max_forks_repo_head_hexsha": "3bc49a169e7edfb0144575dfa55807df40eea58d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-12-18T12:58:00.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-18T12:58:00.000Z", "avg_line_length": 23.3648648649, "max_line_length": 52, "alphanum_fraction": 0.7281665703, "include": true, "reason": "import numpy", "num_tokens": 460}
|
from distutils.core import setup
from distutils.extension import Extension
import distutils.sysconfig
import numpy
import tempfile
import os
import subprocess
import shutil
def check_for_openmp():
"""Check whether the default compiler supports OpenMP.
This routine is adapted from yt, thanks to Nathan
Goldbaum. See https://github.com/pynbody/pynbody/issues/124"""
if os.getenv('DISABLE_OPENMP') is not None:
return False
# Create a temporary directory
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
# Get compiler invocation
compiler = os.environ.get('CC',
distutils.sysconfig.get_config_var('CC'))
# make sure to use just the compiler name without flags
compiler = compiler.split()[0]
# Attempt to compile a test script.
# See http://openmp.org/wp/openmp-compilers/
filename = r'test.c'
with open(filename,'w') as f :
f.write(
"#include <omp.h>\n"
"#include <stdio.h>\n"
"int main() {\n"
"#pragma omp parallel\n"
"printf(\"Hello from thread %d, nthreads %d\\n\", omp_get_thread_num(), omp_get_num_threads());\n"
"}"
)
try:
with open(os.devnull, 'w') as fnull:
exit_code = subprocess.call([compiler, '-fopenmp', filename],
stdout=fnull, stderr=fnull)
except OSError :
exit_code = 1
# Clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
if exit_code == 0:
return True
else:
import multiprocessing, platform
cpus = multiprocessing.cpu_count()
if cpus>1:
print ("""WARNING
OpenMP support is not available in your default C compiler, even though
your machine has more than one core available.
Some routines in pynbody are parallelized using OpenMP and these will
only run on one core with your current configuration.
""")
if platform.uname()[0]=='Darwin':
print ("""Since you are running on Mac OS, it's likely that the problem here
is Apple's Clang, which does not support OpenMP at all. The easiest
way to get around this is to download the latest version of gcc from
here: http://hpc.sourceforge.net. After downloading, just point the
CC environment variable to the real gcc and OpenMP support should
get enabled automatically. Something like this -
sudo tar -xzf /path/to/download.tar.gz /
export CC='/usr/local/bin/gcc'
python setup.py clean
python setup.py build
""")
print ("""Continuing your build without OpenMP...\n""")
return False
extra_compile_args=['-ffast-math',]
extra_link_args = ['-ffast-math',]
try:
gsl_libs = subprocess.check_output(["gsl-config", "--libs"], stderr=subprocess.STDOUT, universal_newlines=True)
extra_link_args += gsl_libs.split()
gsl_incl = subprocess.check_output(["gsl-config", "--cflags"], stderr=subprocess.STDOUT, universal_newlines=True)
extra_compile_args += gsl_incl.split()
except subprocess.CalledProcessError as e:
print(e.output)
raise
if check_for_openmp():
extra_compile_args += ['-fopenmp',]
#gcc specific
extra_link_args += ['-lgomp',]
cmodule = [
Extension("fake_spectra._spectra_priv",
["fake_spectra/py_module.cpp",
"fake_spectra/absorption.cpp",
"fake_spectra/index_table.cpp",
"fake_spectra/Faddeeva.cpp",
"fake_spectra/part_int.cpp",
],
depends = [
"fake_spectra/Faddeeva.h",
"fake_spectra/absorption.h",
"fake_spectra/index_table.h",
"fake_spectra/part_int.h",
"fake_spectra/singleabs.h",]
,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
include_dirs = ["fake_spectra/", numpy.get_include()])]
setup(
name="fake_spectra",
version='1.2.1',
author="Simeon Bird",
author_email="spb@ias.edu",
#Use the subclass which adds openmp flags as appropriate
# cmdclass = {'build_ext': build_ext_subclass },
url="http://github.com/sbird/fake_spectra",
description="Analysis tools for generating artificial spectra from simulations.",
packages = ['fake_spectra', 'fake_spectra.tests', 'fake_spectra.cloudy_tables'],
requires=['numpy', 'h5py','scipy'],
package_data = {
'fake_spectra.tests': ['*.npz'],
'fake_spectra': ['data/TREECOOL*', '*.dat'],
'fake_spectra.cloudy_tables': ['ion_out_*/cloudy_table.npz']
},
ext_modules = cmodule,
classifiers = ["Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Astronomy",
"Topic :: Scientific/Engineering :: Visualization"]
)
|
{"hexsha": "7172044bbc1cb710b72e659e8e0ef9ec91f3c905", "size": 5074, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "xiaohanzai/fake_spectra", "max_stars_repo_head_hexsha": "170b42ac7732eb4f299617a1049cd3eabecfa3a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "setup.py", "max_issues_repo_name": "xiaohanzai/fake_spectra", "max_issues_repo_head_hexsha": "170b42ac7732eb4f299617a1049cd3eabecfa3a7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "setup.py", "max_forks_repo_name": "xiaohanzai/fake_spectra", "max_forks_repo_head_hexsha": "170b42ac7732eb4f299617a1049cd3eabecfa3a7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2837837838, "max_line_length": 117, "alphanum_fraction": 0.6296807253, "include": true, "reason": "import numpy", "num_tokens": 1181}
|
# -*- coding: UTF-8 -*-
#!/usr/bin/python3
"""
CLDC task classifier
"""
#************************************************************
# Imported Libraries
#************************************************************
import numpy as np
import torch
import torch.nn as nn
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
from .base_mlp import BaseMLP
import pdb
class CLDCClassifier(nn.Module):
def __init__(self, params, classifier_config):
super(CLDCClassifier, self).__init__()
self.mlp = BaseMLP(classifier_config)
self.criterion = nn.CrossEntropyLoss()
# sig
#self.criterion = nn.BCEWithLogitsLoss()
# sig
# vis
self.vis_x = []
self.vis_y = []
# vis
self.use_cuda = params.cuda
def forward(self, x, y, training, vis = False):
if training:
self.train()
pred_logits = self.mlp(x)
else:
self.eval()
with torch.no_grad():
pred_logits = self.mlp(x)
# bs
pred = torch.argmax(pred_logits, dim = 1)
# bs, label_size
pred_p = torch.softmax(pred_logits, dim = 1)
'''
# sig
pred_p = torch.sigmoid(pred_logits)
pred = pred_p > 0.5
pred_p = torch.cat([1 - pred_p, pred_p], dim = -1)
# sig
'''
# vis
if self.training is False and vis:
last_hid = self.mlp.mlp[:-1](x)
self.vis_x.append(last_hid.detach().cpu().numpy())
self.vis_y.append(y.detach().cpu().numpy())
# vis
cldc_loss = None
if training and y is not None:
cldc_loss = self.criterion(pred_logits, y)
# sig
#cldc_loss = self.criterion(pred_logits.squeeze(), y.float())
# sig
return cldc_loss, pred_p, pred
|
{"hexsha": "20b206f598e32213201e719bc1660dd69cac2738", "size": 1702, "ext": "py", "lang": "Python", "max_stars_repo_path": "nn_model/mlp/cldc_classifier.py", "max_stars_repo_name": "onlyrico/mling_sdgms", "max_stars_repo_head_hexsha": "ef6015d1a815a317f16fa1e42cbb048e4fe443f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-06-01T02:06:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T02:14:07.000Z", "max_issues_repo_path": "nn_model/mlp/cldc_classifier.py", "max_issues_repo_name": "onlyrico/mling_sdgms", "max_issues_repo_head_hexsha": "ef6015d1a815a317f16fa1e42cbb048e4fe443f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nn_model/mlp/cldc_classifier.py", "max_forks_repo_name": "onlyrico/mling_sdgms", "max_forks_repo_head_hexsha": "ef6015d1a815a317f16fa1e42cbb048e4fe443f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-28T05:48:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T11:59:13.000Z", "avg_line_length": 22.1038961039, "max_line_length": 67, "alphanum_fraction": 0.5734430082, "include": true, "reason": "import numpy", "num_tokens": 445}
|
import os
import sys
_default_backend = 'numpy'
if 'GEOMSTATS_BACKEND' in os.environ:
_backend = os.environ['GEOMSTATS_BACKEND']
else:
_backend = _default_backend
_BACKEND = _backend
from .common import * # NOQA
if _BACKEND == 'numpy':
sys.stderr.write('Using numpy backend\n')
from .numpy import * # NOQA
from . import numpy_linalg as linalg
from . import numpy_random as random
from . import numpy_testing as testing
elif _BACKEND == 'tensorflow':
sys.stderr.write('Using tensorflow backend\n')
from .tensorflow import * # NOQA
from . import tensorflow_linalg as linalg # NOQA
from . import tensorflow_random as random # NOQA
elif _BACKEND == 'pytorch':
raise NotImplementedError('pytorch backend not implemented yet')
def backend():
return _BACKEND
|
{"hexsha": "7cfeaff03a77145b1b82da722d4bc66a96363fda", "size": 816, "ext": "py", "lang": "Python", "max_stars_repo_path": "geomstats/backend/__init__.py", "max_stars_repo_name": "oesteban/geomstats", "max_stars_repo_head_hexsha": "e0b53777cc27cf446d55eeac1533f4c3bc0ae681", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-05-23T20:18:23.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-23T20:18:23.000Z", "max_issues_repo_path": "geomstats/backend/__init__.py", "max_issues_repo_name": "leslie-chu/geomstats", "max_issues_repo_head_hexsha": "fbed39b47b16eab4a48179106e8d0c1a5891243d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "geomstats/backend/__init__.py", "max_forks_repo_name": "leslie-chu/geomstats", "max_forks_repo_head_hexsha": "fbed39b47b16eab4a48179106e8d0c1a5891243d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5, "max_line_length": 68, "alphanum_fraction": 0.7144607843, "include": true, "reason": "import numpy", "num_tokens": 206}
|
using DiffEqFlux, Flux, OrdinaryDiffEq, Test, Optim, DiffEqSensitivity
x = Float32[0.8; 0.8]
tspan = (0.0f0,10.0f0)
ann = Chain(Dense(2,10,tanh), Dense(10,1))
p = Float32[-2.0,1.1]
p2,re = Flux.destructure(ann)
_p = [p;p2]
θ = [x;_p]
function dudt2_(u,p,t)
x, y = u
[(re(p[3:end])(u)[1]),p[1]*y + p[2]*x]
end
prob = ODEProblem(dudt2_,x,tspan,_p)
solve(prob,Tsit5())
function predict_rd(θ)
Array(solve(prob,Tsit5(),u0=θ[1:2],p=θ[3:end],abstol=1e-7,reltol=1e-5,sensealg=TrackerAdjoint()))
end
loss_rd(p) = sum(abs2,x-1 for x in predict_rd(p))
l = loss_rd(θ)
cb = function (θ,l)
println(l)
#display(plot(solve(remake(prob,u0=Flux.data(_x),p=Flux.data(p)),Tsit5(),saveat=0.1),ylim=(0,6)))
false
end
# Display the ODE with the current parameter values.
cb(θ,l)
loss1 = loss_rd(θ)
res = DiffEqFlux.sciml_train(loss_rd, θ, BFGS(initial_stepnorm = 0.01), cb = cb)
loss2 = res.minimum
@test 10loss2 < loss1
## Partial Neural Adjoint
u0 = Float32[0.8; 0.8]
tspan = (0.0f0,25.0f0)
ann = Chain(Dense(2,10,tanh), Dense(10,1))
p1,re = Flux.destructure(ann)
p2 = Float32[-2.0,1.1]
p3 = [p1;p2]
θ = [u0;p3]
function dudt_(du,u,p,t)
x, y = u
du[1] = re(p[1:41])(u)[1]
du[2] = p[end-1]*y + p[end]*x
end
prob = ODEProblem(dudt_,u0,tspan,p3)
solve(prob,Tsit5(),abstol=1e-8,reltol=1e-6)
function predict_adjoint(θ)
Array(solve(prob,Tsit5(),u0=θ[1:2],p=θ[3:end],saveat=0.0:1:25.0))
end
loss_adjoint(θ) = sum(abs2,x-1 for x in predict_adjoint(θ))
l = loss_adjoint(θ)
cb = function (θ,l)
println(l)
#display(plot(solve(remake(prob,p=Flux.data(p3),u0=Flux.data(u0)),Tsit5(),saveat=0.1),ylim=(0,6)))
false
end
# Display the ODE with the current parameter values.
cb(θ,l)
loss1 = loss_adjoint(θ)
res1 = DiffEqFlux.sciml_train(loss_adjoint, θ, ADAM(0.01), cb = cb, maxiters = 100)
res = DiffEqFlux.sciml_train(loss_adjoint, res1.minimizer, BFGS(initial_stepnorm = 0.01), cb = cb)
loss2 = res.minimum
@test 10loss2 < loss1
|
{"hexsha": "27c4083ef72c312d6438f691badfee94cbabef13", "size": 2026, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/partial_neural.jl", "max_stars_repo_name": "jonniedie/DiffEqFlux.jl", "max_stars_repo_head_hexsha": "0b2b4db87c1658e2008c770ffd0bd39427837fc2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/partial_neural.jl", "max_issues_repo_name": "jonniedie/DiffEqFlux.jl", "max_issues_repo_head_hexsha": "0b2b4db87c1658e2008c770ffd0bd39427837fc2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/partial_neural.jl", "max_forks_repo_name": "jonniedie/DiffEqFlux.jl", "max_forks_repo_head_hexsha": "0b2b4db87c1658e2008c770ffd0bd39427837fc2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.325, "max_line_length": 101, "alphanum_fraction": 0.6411648569, "num_tokens": 819}
|
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pytype: skip-file
"""Tests for rax._src.losses."""
import doctest
import functools
import math
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
import rax
from rax._src import losses
# Export symbols from math for conciser test value definitions.
exp = math.exp
log = math.log
logloss = lambda x: log(1. + exp(-x))
sigmoid = lambda x: 1. / (1. + exp(-x))
class LossesTest(parameterized.TestCase):
@parameterized.parameters([{
"loss_fn":
losses.softmax_loss,
"expected_value":
-(log(exp(2.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))) +
log(exp(1.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))))
}, {
"loss_fn":
losses.listmle_loss,
"expected_value":
-sum([
log(exp(1.) / (exp(1.) + exp(2.) + exp(0.) + exp(3.))),
log(exp(2.) / (exp(2.) + exp(0.) + exp(3.))),
log(exp(0.) / (exp(0.) + exp(3.))),
log(exp(3.) / (exp(3.))),
])
}, {
"loss_fn":
losses.poly1_softmax_loss,
"expected_value":
-(log(exp(2.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))) +
log(exp(1.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.)))) +
(1. - (0.5 * exp(2.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.)) + 0.5 *
(exp(1.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.)))))
}, {
"loss_fn":
functools.partial(losses.poly1_softmax_loss, epsilon=0.1),
"expected_value":
-(log(exp(2.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))) +
log(exp(1.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.)))) + 0.1 *
(1. - (0.5 * exp(2.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.)) + 0.5 *
(exp(1.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.)))))
}, {
"loss_fn": losses.pairwise_hinge_loss,
"expected_value": (3. - 1. + 1.) + (3. - 2. + 1.)
}, {
"loss_fn":
losses.pairwise_logistic_loss,
"expected_value":
logloss(1. - 0.) + logloss(1. - 3.) + logloss(2. - 3.) +
logloss(2. - 0.)
}, {
"loss_fn":
losses.pointwise_sigmoid_loss,
"expected_value":
-log(1. - sigmoid(0.)) - log(1. - sigmoid(3.)) - log(sigmoid(1.)) -
log(sigmoid(2.))
}, {
"loss_fn":
losses.pointwise_mse_loss,
"expected_value":
(0. - 0.)**2 + (3. - 0.)**2 + (1. - 1.)**2 + (2. - 1.)**2
}, {
"loss_fn":
losses.pairwise_mse_loss,
"expected_value":
((0. - 3.) - (0. - 0.))**2 + ((0. - 1.) - (0. - 1.))**2 +
((0. - 2.) - (0. - 1.))**2 + ((3. - 0.) - (0. - 0.))**2 +
((3. - 1.) - (0. - 1.))**2 + ((3. - 2.) - (0. - 1.))**2 +
((1. - 0.) - (1. - 0.))**2 + ((1. - 3.) - (1. - 0.))**2 +
((1. - 2.) - (1. - 1.))**2 + ((2. - 0.) - (1. - 0.))**2 +
((2. - 3.) - (1. - 0.))**2 + ((2. - 1.) - (1. - 1.))**2
}])
def test_computes_loss_value(self, loss_fn, expected_value):
scores = jnp.asarray([0., 3., 1., 2.])
labels = jnp.asarray([0., 0., 1., 1.])
loss = loss_fn(scores, labels)
np.testing.assert_allclose(jnp.asarray(expected_value), loss)
@parameterized.parameters([{
"loss_fn":
losses.softmax_loss,
"expected_value":
-((-2.1e26 - (0. + -2.1e26 + 3.4e37 + 42.)) +
(3.4e37 - (0. + -2.1e26 + 3.4e37 + 42.)))
}, {
"loss_fn": losses.listmle_loss,
"expected_value": 3.4e37
}, {
"loss_fn":
losses.poly1_softmax_loss,
"expected_value":
-((-2.1e26 - (0. + -2.1e26 + 3.4e37 + 42.)) +
(3.4e37 - (0. + -2.1e26 + 3.4e37 + 42.)))
}, {
"loss_fn": losses.pairwise_hinge_loss,
"expected_value": (1. - (-2.1e26 - 0.)) + (1. - (-2.1e26 - 42.0))
}, {
"loss_fn": losses.pairwise_logistic_loss,
"expected_value": 2.1e26 + 2.1e26
}, {
"loss_fn": losses.pointwise_sigmoid_loss,
"expected_value": 2.1e26 - log(1. - sigmoid(0.)) + 42.0
}, {
"loss_fn":
losses.pointwise_mse_loss,
"expected_value":
(0. - 0.)**2 + (-2.1e26 - 1.)**2 + (3.4e37 - 1.)**2 + (42. - 0.)**2
}, {
"loss_fn":
losses.pairwise_mse_loss,
"expected_value":
(2.1e26 - -1.)**2 + (-3.4e37 - -1.)**2 + (-42. - 0.)**2 +
(-2.1e26 - 1.)**2 + ((-2.1e26 - 3.4e37) - 0.)**2 +
((-2.1e26 - 42.) - 1.)**2 + (3.4e37 - 1.)**2 +
((3.4e37 - -2.1e26) - 0.)**2 + ((3.4e37 - 42.) - 1.)**2 +
(42. - 0.)**2 + ((42. - -2.1e26) - -1.)**2 + ((42. - 3.4e37) - -1.)**2
}])
def test_computes_loss_with_extreme_inputs(self, loss_fn, expected_value):
scores = jnp.asarray([0., -2.1e26, 3.4e37, 42.0])
labels = jnp.asarray([0., 1., 1., 0.])
loss = loss_fn(scores, labels)
np.testing.assert_allclose(jnp.asarray(expected_value), loss)
@parameterized.parameters([{
"loss_fn": losses.softmax_loss,
"expected_value": 0.
}, {
"loss_fn":
losses.listmle_loss,
"expected_value":
-sum([
log(exp(0.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))),
log(exp(3.) / (exp(3.) + exp(1.) + exp(2.))),
log(exp(1.) / (exp(1.) + exp(2.))),
log(exp(2.) / (exp(2.))),
])
}, {
"loss_fn":
losses.poly1_softmax_loss,
"expected_value":
1. - sum([
0.25 * (exp(0.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))),
0.25 * (exp(3.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))),
0.25 * (exp(1.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))),
0.25 * (exp(2.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))),
])
}, {
"loss_fn": losses.pairwise_hinge_loss,
"expected_value": 0.
}, {
"loss_fn": losses.pairwise_logistic_loss,
"expected_value": 0.
}, {
"loss_fn":
losses.pointwise_sigmoid_loss,
"expected_value":
-log(1. - sigmoid(0.)) - log(1. - sigmoid(3.)) -
log(1. - sigmoid(1.)) - log(1. - sigmoid(2.))
}, {
"loss_fn":
losses.pointwise_mse_loss,
"expected_value":
(0. - 0.)**2 + (3. - 0.)**2 + (1. - 0.)**2 + (2. - 0.)**2
}, {
"loss_fn":
losses.pairwise_mse_loss,
"expected_value": (-3.)**2 + (-1.)**2 + (-2.)**2 + 3.**2 + 2.**2 + 1.**2 +
1.**2 + (-2.)**2 + (-1.)**2 + 2.**2 + (-1.)**2 + 1.**2
}])
def test_computes_loss_for_zero_labels(self, loss_fn, expected_value):
scores = jnp.asarray([0., 3., 1., 2.])
labels = jnp.asarray([0., 0., 0., 0.])
loss = loss_fn(scores, labels)
np.testing.assert_allclose(jnp.asarray(expected_value), loss)
@parameterized.parameters([{
"loss_fn":
losses.poly1_softmax_loss,
"expected_value":
-(2. * log(exp(2.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))) +
log(exp(1.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.)))) +
(1. - (2. / 3. * exp(2.) /
(exp(0.) + exp(3.) + exp(1.) + exp(2.)) + 1. / 3. *
(exp(1.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.)))))
}, {
"loss_fn": losses.pairwise_hinge_loss,
"expected_value": 7.
}, {
"loss_fn": losses.pairwise_logistic_loss,
"expected_value": 5.320569
}, {
"loss_fn":
losses.pointwise_sigmoid_loss,
"expected_value":
-log(1. - sigmoid(0.)) - log(1. - sigmoid(3.)) -
2. * log(sigmoid(2.)) - log(sigmoid(1.))
}, {
"loss_fn":
losses.pointwise_mse_loss,
"expected_value":
(0. - 0.)**2 + (3. - 0.)**2 + 2. * (2. - 1.)**2 + (1. - 1.)**2
}, {
"loss_fn":
losses.pairwise_mse_loss,
"expected_value":
(1. * ((-3. - 0.)**2 + (-2. - -1.)**2 + (-1. - -1.)**2)) +
(1. * ((3. - 0.)**2 + (1. - -1.)**2 + (2. - -1.)**2)) +
(2. * ((2. - 1.)**2 + (-1. - 1.)**2 + (1. - 0.)**2)) +
(1. * ((1. - 1.)**2 + (-2. - 1.)**2 + (-1. - 0.)**2))
}])
def test_computes_weighted_loss_value(self, loss_fn, expected_value):
scores = jnp.asarray([0., 3., 2., 1.])
labels = jnp.asarray([0., 0., 1., 1.])
weights = jnp.asarray([1., 1., 2., 1.])
loss = loss_fn(scores, labels, weights=weights)
np.testing.assert_allclose(jnp.asarray(expected_value), loss)
@parameterized.parameters([{
"loss_fn":
losses.softmax_loss,
"expected_value": [
-(log(exp(2.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))) +
log(exp(1.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.)))),
-(2. * log(exp(3.) / (exp(3.) + exp(1.) + exp(4.) + exp(2.))) +
log(exp(4.) / (exp(3.) + exp(1.) + exp(4.) + exp(2.))))
]
}, {
"loss_fn":
losses.listmle_loss,
"expected_value": [
-sum([
log(exp(1.) / (exp(1.) + exp(2.) + exp(0.) + exp(3.))),
log(exp(2.) / (exp(2.) + exp(0.) + exp(3.))),
log(exp(0.) / (exp(0.) + exp(3.))),
log(exp(3.) / (exp(3.))),
]), -sum([
log(exp(3.) / (exp(3.) + exp(4.) + exp(1.) + exp(2.))),
log(exp(4.) / (exp(4.) + exp(1.) + exp(2.))),
log(exp(1.) / (exp(1.) + exp(2.))),
log(exp(2.) / (exp(2.))),
])
]
}, {
"loss_fn":
losses.poly1_softmax_loss,
"expected_value": [
-(log(exp(2.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))) +
log(exp(1.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.)))) +
(1. - (0.5 * (exp(2.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))) +
0.5 * (exp(1.) / (exp(0.) + exp(3.) + exp(1.) + exp(2.))))),
-(2. * log(exp(3.) / (exp(3.) + exp(1.) + exp(4.) + exp(2.))) +
log(exp(4.) / (exp(3.) + exp(1.) + exp(4.) + exp(2.)))) +
(1. - (2. / 3. * (exp(3.) / (exp(3.) + exp(1.) + exp(4.) + exp(2.))) +
1. / 3. * (exp(4.) / (exp(3.) + exp(1.) + exp(4.) + exp(2.)))))
]
}, {
"loss_fn": losses.pairwise_hinge_loss,
"expected_value": [(3. - 1. + 1.) + (3. - 2. + 1.), (4. - 3. + 1.)]
}, {
"loss_fn":
losses.pairwise_logistic_loss,
"expected_value": [
logloss(1. - 0.) + logloss(1. - 3.) + logloss(2. - 3.) +
logloss(2. - 0.),
logloss(3. - 1.) + logloss(3. - 4.) + logloss(3. - 2.) +
logloss(4. - 1.) + logloss(4. - 2.)
]
}, {
"loss_fn":
losses.pointwise_sigmoid_loss,
"expected_value": [
-log(1. - sigmoid(0.)) - log(1. - sigmoid(3.)) - log(sigmoid(1.)) -
log(sigmoid(2.)),
-log(sigmoid(3.)) - log(1. - sigmoid(1.)) - log(sigmoid(4.)) -
log(1. - sigmoid(2.))
]
}, {
"loss_fn":
losses.pointwise_mse_loss,
"expected_value": [
(0. - 0.)**2 + (3. - 0.)**2 + (1. - 1.)**2 + (2. - 1.)**2,
(3. - 2.)**2 + (1. - 0.)**2 + (4. - 1.)**2 + (2. - 0.)**2
]
}, {
"loss_fn":
losses.pairwise_mse_loss,
"expected_value": [
(-3. - 0.)**2 + (-1. - -1.)**2 + (-2. - -1.)**2 +
(3. - 0.)**2 + (2. - -1.)**2 + (1. - -1.)**2 +
(1. - 1.)**2 + (-2. - 1.)**2 + (-1. - 0.)**2 +
(2. - 1.)**2 + (-1. - 1.)**2 + (1. - 0.)**2,
(2. - 2.)**2 + (-1. - 1.)**2 + (1. - 2.)**2 +
(-2. - -2.)**2 + (-3. - -1.)**2 + (-1. - 0.)**2 +
(1. - -1.)**2 + (3. - 1.)**2 + (2. - 1.)**2 +
(-1. - -2.)**2 + (1. - 0.)**2 + (-2. - -1.)**2
]
}]) # pyformat: disable
def test_computes_loss_value_with_vmap(self, loss_fn, expected_value):
scores = jnp.asarray([[0., 3., 1., 2.], [3., 1., 4., 2.]])
labels = jnp.asarray([[0., 0., 1., 1.], [2., 0., 1., 0.]])
vmap_loss_fn = jax.vmap(loss_fn, in_axes=(0, 0), out_axes=0)
loss = vmap_loss_fn(scores, labels)
np.testing.assert_allclose(jnp.asarray(expected_value), loss)
@parameterized.parameters([{
"loss_fn": losses.softmax_loss,
"expected_value": [
-log(exp(2.) / (exp(2.) + exp(1.) + exp(3.))),
-log(exp(1.5) / (exp(1.) + exp(0.5) + exp(1.5)))
],
"normalizer": 2.
}, {
"loss_fn": losses.listmle_loss,
"expected_value": [
-sum([
log(exp(2.) / (exp(2.) + exp(1.) + exp(3.))),
log(exp(1.) / (exp(1.) + exp(3.))),
log(exp(3.) / (exp(3.))),
]), -sum([
log(exp(1.5) / (exp(1.5) + exp(1.) + exp(0.5))),
log(exp(1.) / (exp(1.) + exp(0.5))),
log(exp(0.5) / (exp(0.5))),
])
],
"normalizer": 2.
}, {
"loss_fn": losses.poly1_softmax_loss,
"expected_value": [
-log(exp(2.) / (exp(2.) + exp(1.) + exp(3.))) +
(1. - (exp(2.) / (exp(2.) + exp(1.) + exp(3.)))),
-log(exp(1.5) / (exp(1.) + exp(0.5) + exp(1.5))) +
(1. - (exp(1.5) / (exp(1.) + exp(0.5) + exp(1.5))))
],
"normalizer": 2.
}, {
"loss_fn": losses.pairwise_hinge_loss,
"expected_value": [2., .5],
"normalizer": 4.
}, {
"loss_fn": losses.pairwise_logistic_loss,
"expected_value": [
logloss(2. - 1.) + logloss(2. - 3.),
logloss(1.5 - 1.) + logloss(1.5 - 0.5)
],
"normalizer": 4.
}, {
"loss_fn": losses.pointwise_sigmoid_loss,
"expected_value": [
-log(sigmoid(2.)) - log(1. - sigmoid(1.)) - log(1. - sigmoid(3.)),
-log(sigmoid(1.5)) - log(1. - sigmoid(1.)) - log(1. - sigmoid(0.5))
],
"normalizer": 6.
}, {
"loss_fn": losses.pointwise_mse_loss,
"expected_value": [(2. - 1.)**2 + (1. - 0.)**2 + (3. - 0.)**2,
(1. - 0.)**2 + (0.5 - 0.)**2 + (1.5 - 1.)**2],
"normalizer": 6.
}, {
"loss_fn": losses.pairwise_mse_loss,
"expected_value": [(1. - 1.)**2 + (-1. - 1.)**2 + (-1. - -1.)**2 +
(-2. - 0.)**2 + (1. - -1.)**2 + (2. - 0.)**2,
(0.5 - 0.)**2 + (-0.5 - -1.)**2 + (-0.5 - 0.)**2 +
(-1. - -1.)**2 + (0.5 - 1.)**2 + (1. - 1.)**2],
"normalizer": 9. + 9.
}])
def test_computes_reduced_loss(self, loss_fn, expected_value, normalizer):
scores = jnp.array([[2., 1., 3.], [1., 0.5, 1.5]])
labels = jnp.array([[1., 0., 0.], [0., 0., 1.]])
expected_value = jnp.asarray(expected_value)
mean_loss = loss_fn(scores, labels, reduce_fn=jnp.mean)
sum_loss = loss_fn(scores, labels, reduce_fn=jnp.sum)
np.testing.assert_allclose(
mean_loss, jnp.sum(expected_value) / normalizer, rtol=1E-5)
np.testing.assert_allclose(sum_loss, jnp.sum(expected_value))
@parameterized.parameters([{
"loss_fn": losses.softmax_loss,
"expected_shape": (2,)
}, {
"loss_fn": losses.listmle_loss,
"expected_shape": (2,)
}, {
"loss_fn": losses.poly1_softmax_loss,
"expected_shape": (2,)
}, {
"loss_fn": losses.pairwise_hinge_loss,
"expected_shape": (2, 9)
}, {
"loss_fn": losses.pairwise_logistic_loss,
"expected_shape": (2, 9)
}, {
"loss_fn": losses.pairwise_mse_loss,
"expected_shape": (2, 9)
}, {
"loss_fn": losses.pointwise_sigmoid_loss,
"expected_shape": (2, 3)
}, {
"loss_fn": losses.pointwise_mse_loss,
"expected_shape": (2, 3)
}])
def test_computes_unreduced_loss(self, loss_fn, expected_shape):
scores = jnp.array([[2., 1., 3.], [1., 0.5, 1.5]])
labels = jnp.array([[1., 0., 0.], [0., 0., 1.]])
none_loss = loss_fn(scores, labels, reduce_fn=None)
sum_loss = loss_fn(scores, labels, reduce_fn=jnp.sum)
self.assertEqual(none_loss.shape, expected_shape)
self.assertEqual(jnp.sum(none_loss), sum_loss)
@parameterized.parameters([
losses.softmax_loss, losses.listmle_loss, losses.pairwise_hinge_loss,
losses.pairwise_logistic_loss, losses.pointwise_sigmoid_loss,
losses.pointwise_mse_loss, losses.pairwise_mse_loss,
losses.poly1_softmax_loss
])
def test_computes_loss_value_with_where(self, loss_fn):
scores = jnp.asarray([0., 3., 1., 2.])
labels = jnp.asarray([0., 0., 1., 1.])
where = jnp.asarray([True, True, False, True])
expected_scores = jnp.asarray([0., 3., 2.])
expected_labels = jnp.asarray([0., 0., 1.])
loss = loss_fn(scores, labels, where=where)
expected_loss = loss_fn(expected_scores, expected_labels)
np.testing.assert_allclose(expected_loss, loss)
@parameterized.parameters([
losses.softmax_loss, losses.listmle_loss, losses.pairwise_hinge_loss,
losses.pairwise_logistic_loss, losses.pointwise_sigmoid_loss,
losses.pointwise_mse_loss, losses.pairwise_mse_loss,
losses.poly1_softmax_loss
])
def test_computes_loss_value_with_all_masked(self, loss_fn):
scores = jnp.asarray([0., 3., 1., 2.])
labels = jnp.asarray([0., 0., 1., 1.])
where = jnp.asarray([False, False, False, False])
loss = loss_fn(scores, labels, where=where)
np.testing.assert_allclose(jnp.asarray(0.), loss, atol=1E-7)
@parameterized.parameters([
losses.softmax_loss, losses.listmle_loss, losses.pairwise_hinge_loss,
losses.pairwise_logistic_loss, losses.pointwise_sigmoid_loss,
losses.pointwise_mse_loss, losses.pairwise_mse_loss,
losses.poly1_softmax_loss
])
def test_grad_does_not_return_nan_for_zero_labels(self, loss_fn):
scores = jnp.asarray([0., 3., 1., 2.])
labels = jnp.asarray([0., 0., 0., 0.])
grads = jax.grad(loss_fn)(scores, labels, reduce_fn=jnp.mean)
np.testing.assert_array_equal(
jnp.isnan(grads), jnp.zeros_like(jnp.isnan(grads)))
@parameterized.parameters([
losses.softmax_loss, losses.listmle_loss, losses.pairwise_hinge_loss,
losses.pairwise_logistic_loss, losses.pointwise_sigmoid_loss,
losses.pointwise_mse_loss, losses.pairwise_mse_loss,
losses.poly1_softmax_loss
])
def test_grad_does_not_return_nan_with_all_masked(self, loss_fn):
scores = jnp.asarray([0., 3., 1., 2.])
labels = jnp.asarray([0., 0., 1., 1.])
where = jnp.asarray([False, False, False, False])
grads = jax.grad(loss_fn)(scores, labels, where=where, reduce_fn=jnp.mean)
np.testing.assert_array_equal(
jnp.isnan(grads), jnp.zeros_like(jnp.isnan(grads)))
def load_tests(loader, tests, ignore):
del loader, ignore # Unused.
tests.addTests(
doctest.DocTestSuite(losses, globs={
"jax": jax,
"jnp": jnp,
"rax": rax
}))
return tests
if __name__ == "__main__":
absltest.main()
|
{"hexsha": "949ece336d09a95c27aa868b22096ab8bd13be6b", "size": 19217, "ext": "py", "lang": "Python", "max_stars_repo_path": "rax/_src/losses_test.py", "max_stars_repo_name": "google/rax", "max_stars_repo_head_hexsha": "d6370d574246db9fb0566317f7cac8cd331526d7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2022-01-25T12:37:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T17:12:45.000Z", "max_issues_repo_path": "rax/_src/losses_test.py", "max_issues_repo_name": "google/rax", "max_issues_repo_head_hexsha": "d6370d574246db9fb0566317f7cac8cd331526d7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-08T23:02:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T23:02:42.000Z", "max_forks_repo_path": "rax/_src/losses_test.py", "max_forks_repo_name": "google/rax", "max_forks_repo_head_hexsha": "d6370d574246db9fb0566317f7cac8cd331526d7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.258490566, "max_line_length": 80, "alphanum_fraction": 0.4878493001, "include": true, "reason": "import numpy,import jax", "num_tokens": 6822}
|
// Copyright 2012 John Maddock. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt
#ifndef BOOST_MP_CPP_INT_CHECKED_HPP
#define BOOST_MP_CPP_INT_CHECKED_HPP
#include <climits>
#include <limits>
#include <type_traits>
#include <stdexcept>
#include <string>
#include <boost/multiprecision/detail/standalone_config.hpp>
#include <boost/multiprecision/detail/no_exceptions_support.hpp>
namespace boost { namespace multiprecision { namespace backends { namespace detail {
//
// Simple routines for performing checked arithmetic with a builtin arithmetic type.
// Note that this is not a complete header, it must be included as part of boost/multiprecision/cpp_int.hpp.
//
template <typename T>
inline constexpr T type_max() noexcept
{
return
#ifdef BOOST_HAS_INT128
std::is_same<T, boost::multiprecision::int128_type>::value ? INT128_MAX :
std::is_same<T, boost::multiprecision::uint128_type>::value ? UINT128_MAX :
#endif
(std::numeric_limits<T>::max)();
}
template <typename T>
inline constexpr T type_min() noexcept
{
return
#ifdef BOOST_HAS_INT128
std::is_same<T, boost::multiprecision::int128_type>::value ? INT128_MIN :
std::is_same<T, boost::multiprecision::uint128_type>::value ? T(0) :
#endif
(std::numeric_limits<T>::min)();
}
inline void raise_overflow(std::string op)
{
BOOST_MP_THROW_EXCEPTION(std::overflow_error("overflow in " + op));
}
inline void raise_add_overflow()
{
raise_overflow("addition");
}
inline void raise_subtract_overflow()
{
BOOST_MP_THROW_EXCEPTION(std::range_error("Subtraction resulted in a negative value, but the type is unsigned"));
}
inline void raise_mul_overflow()
{
raise_overflow("multiplication");
}
inline void raise_div_overflow()
{
raise_overflow("division");
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_add_imp(A a, A b, const std::integral_constant<bool, true>&)
{
if (a > 0)
{
if ((b > 0) && ((type_max<A>() - b) < a))
raise_add_overflow();
}
else
{
if ((b < 0) && ((type_min<A>() - b) > a))
raise_add_overflow();
}
return a + b;
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_add_imp(A a, A b, const std::integral_constant<bool, false>&)
{
if ((type_max<A>() - b) < a)
raise_add_overflow();
return a + b;
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_add(A a, A b, const std::integral_constant<int, checked>&)
{
return checked_add_imp(a, b, std::integral_constant<bool, boost::multiprecision::detail::is_signed<A>::value && boost::multiprecision::detail::is_integral<A>::value > ());
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_add(A a, A b, const std::integral_constant<int, unchecked>&)
{
return a + b;
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_subtract_imp(A a, A b, const std::integral_constant<bool, true>&)
{
if (a > 0)
{
if ((b < 0) && ((type_max<A>() + b) < a))
raise_subtract_overflow();
}
else
{
if ((b > 0) && ((type_min<A>() + b) > a))
raise_subtract_overflow();
}
return a - b;
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_subtract_imp(A a, A b, const std::integral_constant<bool, false>&)
{
if (a < b)
raise_subtract_overflow();
return a - b;
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_subtract(A a, A b, const std::integral_constant<int, checked>&)
{
return checked_subtract_imp(a, b, std::integral_constant<bool, boost::multiprecision::detail::is_signed<A>::value && boost::multiprecision::detail::is_integral<A>::value>());
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_subtract(A a, A b, const std::integral_constant<int, unchecked>&)
{
return a - b;
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_multiply(A a, A b, const std::integral_constant<int, checked>&)
{
BOOST_MP_USING_ABS
if (a && (type_max<A>() / abs(a) < abs(b)))
raise_mul_overflow();
return a * b;
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_multiply(A a, A b, const std::integral_constant<int, unchecked>&)
{
return a * b;
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_divide(A a, A b, const std::integral_constant<int, checked>&)
{
if (b == 0)
raise_div_overflow();
return a / b;
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_divide(A a, A b, const std::integral_constant<int, unchecked>&)
{
return a / b;
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_left_shift(A a, unsigned long long shift, const std::integral_constant<int, checked>&)
{
if (a && shift)
{
if ((shift > sizeof(A) * CHAR_BIT) || (a >> (sizeof(A) * CHAR_BIT - shift)))
BOOST_MP_THROW_EXCEPTION(std::overflow_error("Shift out of range"));
}
return a << shift;
}
template <class A>
inline BOOST_MP_CXX14_CONSTEXPR A checked_left_shift(A a, unsigned long long shift, const std::integral_constant<int, unchecked>&)
{
return (shift >= sizeof(A) * CHAR_BIT) ? 0 : a << shift;
}
}}}} // namespace boost::multiprecision::backends::detail
#endif
|
{"hexsha": "5d621403d0e218a17403a689f50ac25c9ae47e57", "size": 5242, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/multiprecision/cpp_int/checked.hpp", "max_stars_repo_name": "mariospr/multiprecision", "max_stars_repo_head_hexsha": "4720edda9e3058ba68be8ae6c29342536b9ce142", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/multiprecision/cpp_int/checked.hpp", "max_issues_repo_name": "mariospr/multiprecision", "max_issues_repo_head_hexsha": "4720edda9e3058ba68be8ae6c29342536b9ce142", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/multiprecision/cpp_int/checked.hpp", "max_forks_repo_name": "mariospr/multiprecision", "max_forks_repo_head_hexsha": "4720edda9e3058ba68be8ae6c29342536b9ce142", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2849162011, "max_line_length": 177, "alphanum_fraction": 0.7027851965, "num_tokens": 1391}
|
//---------------------------------------------------------------------------//
// Copyright (c) 2013-2014 Kyle Lutz <kyle.r.lutz@gmail.com>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#ifndef BOOST_COMPUTE_INTEROP_OPENGL_OPENGL_RENDERBUFFER_HPP
#define BOOST_COMPUTE_INTEROP_OPENGL_OPENGL_RENDERBUFFER_HPP
#include <boost/compute/image/image_object.hpp>
#include <boost/compute/interop/opengl/gl.hpp>
#include <boost/compute/interop/opengl/cl_gl.hpp>
#include <boost/compute/type_traits/type_name.hpp>
#include <boost/compute/utility/extents.hpp>
namespace boost {
namespace compute {
/// \class opengl_renderbuffer
///
/// A OpenCL buffer for accessing an OpenGL renderbuffer object.
class opengl_renderbuffer : public image_object
{
public:
/// Creates a null OpenGL renderbuffer object.
opengl_renderbuffer()
: image_object()
{
}
/// Creates a new OpenGL renderbuffer object for \p mem.
explicit opengl_renderbuffer(cl_mem mem, bool retain = true)
: image_object(mem, retain)
{
}
/// Creates a new OpenGL renderbuffer object in \p context for
/// \p renderbuffer with \p flags.
///
/// \see_opencl_ref{clCreateFromGLRenderbuffer}
opengl_renderbuffer(const context &context,
GLuint renderbuffer,
cl_mem_flags flags = read_write)
{
cl_int error = 0;
m_mem = clCreateFromGLRenderbuffer(
context, flags, renderbuffer, &error
);
if(!m_mem){
BOOST_THROW_EXCEPTION(opencl_error(error));
}
}
/// Creates a new OpenGL renderbuffer object as a copy of \p other.
opengl_renderbuffer(const opengl_renderbuffer &other)
: image_object(other)
{
}
/// Copies the OpenGL renderbuffer object from \p other.
opengl_renderbuffer& operator=(const opengl_renderbuffer &other)
{
if(this != &other){
image_object::operator=(other);
}
return *this;
}
/// Destroys the OpenGL buffer object.
~opengl_renderbuffer()
{
}
/// Returns the size (width, height) of the renderbuffer.
extents<2> size() const
{
extents<2> size;
size[0] = get_image_info<size_t>(CL_IMAGE_WIDTH);
size[1] = get_image_info<size_t>(CL_IMAGE_HEIGHT);
return size;
}
/// Returns the origin of the renderbuffer (\c 0, \c 0).
extents<2> origin() const
{
return extents<2>();
}
/// Returns the OpenGL memory object ID.
///
/// \see_opencl_ref{clGetGLObjectInfo}
GLuint get_opengl_object() const
{
GLuint object = 0;
clGetGLObjectInfo(m_mem, 0, &object);
return object;
}
/// Returns the OpenGL memory object type.
///
/// \see_opencl_ref{clGetGLObjectInfo}
cl_gl_object_type get_opengl_type() const
{
cl_gl_object_type type;
clGetGLObjectInfo(m_mem, &type, 0);
return type;
}
};
namespace detail {
// set_kernel_arg() specialization for opengl_renderbuffer
template<>
struct set_kernel_arg<opengl_renderbuffer> : public set_kernel_arg<image_object> { };
} // end detail namespace
} // end compute namespace
} // end boost namespace
BOOST_COMPUTE_TYPE_NAME(boost::compute::opengl_renderbuffer, image2d_t)
#endif // BOOST_COMPUTE_INTEROP_OPENGL_OPENGL_RENDERBUFFER_HPP
|
{"hexsha": "ca55356f27750d8999e40c0986b2c8a480520ae6", "size": 3767, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ios/Pods/boost-for-react-native/boost/compute/interop/opengl/opengl_renderbuffer.hpp", "max_stars_repo_name": "rudylee/expo", "max_stars_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 8805.0, "max_stars_repo_stars_event_min_datetime": "2015-11-03T00:52:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:30:03.000Z", "max_issues_repo_path": "ios/Pods/boost-for-react-native/boost/compute/interop/opengl/opengl_renderbuffer.hpp", "max_issues_repo_name": "rudylee/expo", "max_issues_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": 14694.0, "max_issues_repo_issues_event_min_datetime": "2015-02-24T15:13:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:16:45.000Z", "max_forks_repo_path": "ios/Pods/boost-for-react-native/boost/compute/interop/opengl/opengl_renderbuffer.hpp", "max_forks_repo_name": "rudylee/expo", "max_forks_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 1329.0, "max_forks_repo_forks_event_min_datetime": "2015-11-03T20:25:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T18:10:38.000Z", "avg_line_length": 28.9769230769, "max_line_length": 86, "alphanum_fraction": 0.6161401646, "num_tokens": 841}
|
"""Unit tests for relentless.simulate.lammps."""
import tempfile
import unittest
try:
import lammps
except ImportError:
pass
import numpy
import relentless
from ..potential.test_pair import LinPot
@unittest.skipIf(not relentless.simulate.lammps._lammps_found, "LAMMPS not installed")
class test_LAMMPS(unittest.TestCase):
"""Unit tests for relentless.LAMMPS"""
def setUp(self):
self._tmp = tempfile.TemporaryDirectory()
self.directory = relentless.data.Directory(self._tmp.name)
#mock (NVT) ensemble and potential for testing
def ens_pot(self):
ens = relentless.ensemble.Ensemble(T=2.0, V=relentless.volume.Cube(L=10.0), N={'1':2,'2':3})
ens.P = 2.5
# setup potentials
pot = LinPot(ens.types,params=('m',))
for pair in pot.coeff:
pot.coeff[pair]['m'] = 2.0
pots = relentless.simulate.Potentials()
pots.pair.potentials.append(pot)
pots.pair.rmax = 10.0
pots.pair.num = 11
return (ens,pots)
def create_file(self):
file_ = self.directory.file('test.data')
with open(file_,'w') as f:
f.write(('LAMMPS test data\n'
'\n'
'5 atoms\n'
'2 atom types\n'
'\n'
'-5.0 5.0 xlo xhi\n'
'-5.0 5.0 ylo yhi\n'
'-5.0 5.0 zlo zhi\n'
'\n'
'Atoms\n'
'\n'
'1 1 -4.0 -4.0 -4.0\n'
'2 1 -2.0 -2.0 -2.0\n'
'3 2 0.0 0.0 0.0\n'
'4 2 2.0 2.0 2.0\n'
'5 2 4.0 4.0 4.0\n'
'\n'
'Masses\n'
'\n'
'1 0.3\n'
'2 0.1'))
return file_
def test_initialize(self):
"""Test running initialization simulation operations."""
#InitializeFromFile
ens,pot = self.ens_pot()
file_ = self.create_file()
op = relentless.simulate.lammps.InitializeFromFile(filename=file_)
l = relentless.simulate.lammps.LAMMPS(operations=op, quiet=False)
sim = l.run(ensemble=ens, potentials=pot, directory=self.directory)
pl = lammps.PyLammps(ptr=sim.lammps)
self.assertIsNotNone(pl.system)
#InitializeRandomly
op = relentless.simulate.lammps.InitializeRandomly(seed=1)
l = relentless.simulate.lammps.LAMMPS(operations=op, quiet=False)
sim = l.run(ensemble=ens, potentials=pot, directory=self.directory)
pl = lammps.PyLammps(ptr=sim.lammps)
self.assertIsNotNone(pl.system)
def test_minimization(self):
"""Test running energy minimization simulation operation."""
#MinimizeEnergy
ens,pot = self.ens_pot()
file_ = self.create_file()
op = [relentless.simulate.lammps.InitializeFromFile(filename=file_),
relentless.simulate.lammps.MinimizeEnergy(energy_tolerance=1e-7,
force_tolerance=1e-7,
max_iterations=1000,
dt=0.01)
]
l = relentless.simulate.lammps.LAMMPS(operations=op, quiet=False)
sim = l.run(ensemble=ens, potentials=pot, directory=self.directory)
def test_integrators(self):
"""Test adding and removing integrator operations."""
default_fixes = [{'name':''}]
init = relentless.simulate.lammps.InitializeRandomly(seed=1)
l = relentless.simulate.lammps.LAMMPS(operations=init, quiet=False)
#LangevinIntegrator
#float friction
ens,pot = self.ens_pot()
lgv = relentless.simulate.lammps.AddLangevinIntegrator(dt=0.5,
friction=1.5,
seed=2)
lgv_r = relentless.simulate.lammps.RemoveLangevinIntegrator(add_op=lgv)
l.operations = [init, lgv]
sim = l.run(ensemble=ens, potentials=pot, directory=self.directory)
pl = lammps.PyLammps(ptr=sim.lammps)
self.assertCountEqual(pl.fixes, default_fixes+[{'name':'1','style':'langevin','group':'all'},
{'name':'2','style':'nve','group':'all'}])
lgv_r(sim)
self.assertCountEqual(pl.fixes, default_fixes)
#dictionary friction
lgv = relentless.simulate.lammps.AddLangevinIntegrator(dt=0.5,
friction={'1':2.0,'2':5.0},
seed=2)
lgv_r = relentless.simulate.lammps.RemoveLangevinIntegrator(add_op=lgv)
l.operations = [init, lgv]
sim = l.run(ensemble=ens, potentials=pot, directory=self.directory)
pl = lammps.PyLammps(ptr=sim.lammps)
self.assertCountEqual(pl.fixes, default_fixes+[{'name':'3','style':'langevin','group':'all'},
{'name':'4','style':'nve','group':'all'}])
lgv_r(sim)
self.assertCountEqual(pl.fixes, default_fixes)
#single-type friction
ens_1 = relentless.ensemble.Ensemble(T=2.0, V=relentless.volume.Cube(L=10.0), N={'1':2})
lgv = relentless.simulate.lammps.AddLangevinIntegrator(dt=0.5,
friction={'1':3.0},
seed=2)
lgv_r = relentless.simulate.lammps.RemoveLangevinIntegrator(add_op=lgv)
l.operations = [init, lgv]
sim = l.run(ensemble=ens_1, potentials=pot, directory=self.directory)
pl = lammps.PyLammps(ptr=sim.lammps)
self.assertCountEqual(pl.fixes, default_fixes+[{'name':'5','style':'langevin','group':'all'},
{'name':'6','style':'nve','group':'all'}])
lgv_r(sim)
self.assertCountEqual(pl.fixes, default_fixes)
#invalid-type friction
lgv = relentless.simulate.lammps.AddLangevinIntegrator(dt=0.5,
friction={'2':5.0,'3':2.0},
seed=2)
l.operations = [init, lgv]
with self.assertRaises(KeyError):
sim = l.run(ensemble=ens, potentials=pot, directory=self.directory)
#NPTIntegrator
ens_npt = relentless.ensemble.Ensemble(T=100.0, P=5.5, N={'A':2,'B':3})
ens_npt.V = relentless.volume.Cube(L=10.0)
npt = relentless.simulate.lammps.AddNPTIntegrator(dt=0.5,
tau_T=1.0,
tau_P=1.5)
npt_r = relentless.simulate.lammps.RemoveNPTIntegrator(add_op=npt)
l.operations = [init, npt]
sim = l.run(ensemble=ens_npt, potentials=pot, directory=self.directory)
pl = lammps.PyLammps(ptr=sim.lammps)
self.assertCountEqual(pl.fixes, default_fixes+[{'name':'1','style':'npt','group':'all'}])
npt_r(sim)
self.assertCountEqual(pl.fixes, default_fixes)
#NVTIntegrator
nvt = relentless.simulate.lammps.AddNVTIntegrator(dt=0.5,
tau_T=1.0)
nvt_r = relentless.simulate.lammps.RemoveNVTIntegrator(add_op=nvt)
l.operations = [init, nvt]
sim = l.run(ensemble=ens, potentials=pot, directory=self.directory)
pl = lammps.PyLammps(ptr=sim.lammps)
self.assertEqual(pl.fixes[1]['style'], 'nvt')
self.assertEqual(pl.fixes[1]['group'], 'all')
nvt_r(sim)
self.assertCountEqual(pl.fixes, default_fixes)
def test_run(self):
"""Test run simulation operations."""
init = relentless.simulate.lammps.InitializeRandomly(seed=1)
l = relentless.simulate.lammps.LAMMPS(operations=init, quiet=False)
#Run
ens,pot = self.ens_pot()
run = relentless.simulate.lammps.Run(steps=1000)
l.operations = [init,run]
sim = l.run(ensemble=ens, potentials=pot, directory=self.directory)
#RunUpTo
run = relentless.simulate.lammps.RunUpTo(step=999)
l.operations = [init,run]
sim = l.run(ensemble=ens, potentials=pot, directory=self.directory)
def test_analyzer(self):
"""Test ensemble analyzer simulation operation."""
ens,pot = self.ens_pot()
init = relentless.simulate.lammps.InitializeRandomly(seed=1)
analyzer = relentless.simulate.lammps.AddEnsembleAnalyzer(check_thermo_every=5,
check_rdf_every=5,
rdf_dr=1.0)
run = relentless.simulate.lammps.Run(steps=500)
nvt = relentless.simulate.lammps.AddNVTIntegrator(dt=0.1,
tau_T=1.0)
op = [init,nvt,analyzer,run]
h = relentless.simulate.lammps.LAMMPS(operations=op,quiet=False)
sim = h.run(ensemble=ens, potentials=pot, directory=self.directory)
#extract ensemble
ens_ = analyzer.extract_ensemble(sim)
self.assertIsNotNone(ens_.T)
self.assertNotEqual(ens_.T, 0)
self.assertIsNotNone(ens_.P)
self.assertNotEqual(ens_.P, 0)
self.assertIsNotNone(ens_.V)
self.assertNotEqual(ens_.V.volume, 0)
for i,j in ens_.rdf:
self.assertEqual(ens_.rdf[i,j].table.shape, (len(pot.pair.r)-1,2))
def tearDown(self):
self._tmp.cleanup()
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "792597d242fd90461c1bc4295d02e31d0c3b2d66", "size": 9907, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/simulate/test_lammps.py", "max_stars_repo_name": "mphowardlab/relentless", "max_stars_repo_head_hexsha": "e89b0d461106273569d08f1cf268dad1f223ce8d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-13T18:12:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-27T00:33:32.000Z", "max_issues_repo_path": "tests/simulate/test_lammps.py", "max_issues_repo_name": "mphowardlab/relentless", "max_issues_repo_head_hexsha": "e89b0d461106273569d08f1cf268dad1f223ce8d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 38, "max_issues_repo_issues_event_min_datetime": "2020-06-15T18:45:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T17:52:36.000Z", "max_forks_repo_path": "tests/simulate/test_lammps.py", "max_forks_repo_name": "mphowardlab/relentless", "max_forks_repo_head_hexsha": "e89b0d461106273569d08f1cf268dad1f223ce8d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-17T21:59:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-17T21:59:17.000Z", "avg_line_length": 44.0311111111, "max_line_length": 101, "alphanum_fraction": 0.5423437973, "include": true, "reason": "import numpy", "num_tokens": 2402}
|
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2011 Bryce Lelbach
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
////////////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <boost/format.hpp>
int main()
{
std::cout << (boost::format("%02d%02d%02d")
% __GNUC__ % __GNUC_MINOR__ % __GNUC_PATCHLEVEL__);
}
|
{"hexsha": "5a23fdfd7c48d56bdb2c6dac277c6a377601fc75", "size": 537, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cmake/tests/gcc_version.cpp", "max_stars_repo_name": "akemp/hpx", "max_stars_repo_head_hexsha": "1ddf7282e322c30d82f2be044071aed14807ebe1", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cmake/tests/gcc_version.cpp", "max_issues_repo_name": "akemp/hpx", "max_issues_repo_head_hexsha": "1ddf7282e322c30d82f2be044071aed14807ebe1", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cmake/tests/gcc_version.cpp", "max_forks_repo_name": "akemp/hpx", "max_forks_repo_head_hexsha": "1ddf7282e322c30d82f2be044071aed14807ebe1", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5882352941, "max_line_length": 80, "alphanum_fraction": 0.4748603352, "num_tokens": 110}
|
INTEGER SIZE
PARAMETER(SIZE=50000)
|
{"hexsha": "e5b100f88ace86e7c985b7484f0211d3c1ccd5d2", "size": 48, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/PIPS/validation/SAC/kernels/ALPHABLENDING/ALPHABLENDING_INC.f", "max_stars_repo_name": "DVSR1966/par4all", "max_stars_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2015-01-31T01:51:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T02:01:50.000Z", "max_issues_repo_path": "packages/PIPS/validation/SAC/kernels/ALPHABLENDING/ALPHABLENDING_INC.f", "max_issues_repo_name": "DVSR1966/par4all", "max_issues_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-05-29T09:29:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-11T16:01:39.000Z", "max_forks_repo_path": "packages/PIPS/validation/SAC/kernels/ALPHABLENDING/ALPHABLENDING_INC.f", "max_forks_repo_name": "DVSR1966/par4all", "max_forks_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-03-26T08:05:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T02:01:51.000Z", "avg_line_length": 12.0, "max_line_length": 27, "alphanum_fraction": 0.6041666667, "num_tokens": 13}
|
import numpy as np
import itertools
import gpuscheduler
import argparse
import os
import uuid
import hashlib
import glob
import math
from itertools import product
from torch.optim.lr_scheduler import OneCycleLR
from os.path import join
parser = argparse.ArgumentParser(description='Compute script.')
parser.add_argument('--dry', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--p', type=float, default=1.0, help='Probability with which to select a configuration.')
parser.add_argument('--baseline', action='store_true', help='Run baseline transformer')
args = parser.parse_args()
gpus = 8
cmd = 'MKL_THREADING_LAYER=GNU OMP_NUM_THREADS=1 fairseq-train --task language_modeling --share-decoder-input-output-embed --sample-break-mode none --ddp-backend=no_c10d --log-format simple --log-interval 50 --fp16 --keep-best-checkpoints 1 --no-epoch-checkpoints --keep-interval-updates 5 --distributed-port 12597 --distributed-world-size {0} --valid-subset valid'.format(gpus)
args2 = {}
#baseline
if args.baseline:
#args2['optimizer'] = 'adam'
#args2['adam-betas'] = "'(0.9, 0.98)'"
#args2['lr'] = 0.0005
args2['arch'] = 'transformer_lm'
#args2['max-tokens'] = 2048
# moe
else:
#args2['optimizer'] = 'lamb'
#args2['lamb-betas'] = "'(0.9, 0.999)'"
#args2['fp16-no-flatten-grads'] = ''
#args2['warmup-updates'] = 400
#args2['optimizer'] = 'adam'
#args2['adam-betas'] = "'(0.9, 0.98)'"
#args2['lr'] = 0.0005
args2['arch'] = 'moe_lm'
args2['moe-start-layer'] = 0
args2['max-tokens'] = 2048
if args.baseline:
name = 'warmup1'
constraint = 'volta32gb'
else:
name = 'moe30'
constraint = 'volta32gb'
logfolder = 'cc_small/baselines/transformers/{0}'.format(name)
ckp_name = logfolder
#time_hours = 24*2
cores_per_job = 5
mem = 48*(8 if gpus > 8 else gpus)
num_seeds = 1
seed_offset = 0
time_hours = 12
time_minutes = 0
#account = 'cse'
#account = 'stf'
#account = 'ark'
#partition = 'scavenge'
#partition = 'scavenge,learnfair'
partition = 'learnfair'
#partition = 'uninterrupted'
#partition = 'dev'
change_dir = 'fairseq_private/'
repo = 'fairseq_private'
exclude = ''
s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False)
#s = gpuscheduler.SshScheduler(verbose=args.verbose)
#args2['dropout'] = 0.1
#args2['no-save'] = ''
args2['weight-decay'] = 0.00
#args2['max-tokens'] = 1024
fp16 = True
args3 = {}
model_dim = 64
doublings = 4
num_experts = 64
if not args.baseline:
key = ('decoder-embed-dim', 'decoder-ffn-embed-dim', 'moe-ff-dim', 'decoder-attention-heads', 'dummy', 'decoder-input-dim', 'decoder-output-dim', 'num-experts')
args3[key] = []
#for num_experts in [16, 32]:
args3[key].append((model_dim, ff_dim, moe_ff_dim, heads, 0, model_dim, model_dim, num_experts))
#for num_experts in [16]:
# #for ff_factor in [4, 8, 16, 32, 64]:
# for ff_factor in [128]:
# #for ff_factor in [8]:
# for i in range(doublings):
# if i < 3: continue
# factor = 2**i
# heads = base_heads*factor
# emb_dim = model_dim*factor
# ff_dim = model_dim*factor*ff_factor
# args3[key].append((emb_dim, ff_dim, ff_dim//num_experts, heads, i, emb_dim, emb_dim, num_experts))
# #args3[key].append((emb_dim, ff_dim, ff_dim//2, heads, i, emb_dim, emb_dim))
args3['epsilon'] = [0.2]
args3['moe-freq'] = [2]
#args3['sample-type'] = ['argmax']
args3[('sample-type', 'experts-per-batch')] = [('sample-constraint', 16)]
args3['criterion'] = ['moe_cross_entropy']
args3['use-ff-norm'] = [False]
args3['loss-type'] = ['mean-segment-diff-normal']
args3[('gate-type', 'experts-per-seq', 'iloss-weight', 'gate-sharing')] = []
#args3[('gate-type', 'experts-per-seq', 'iloss-weight', 'gate-sharing')].append(('segments', 7, 0.01, 'single'))
#args3[('gate-type', 'experts-per-seq', 'iloss-weight', 'gate-sharing')].append(('word-level', 511, 0.01, 'none'))
args3[('gate-type', 'experts-per-seq', 'iloss-weight', 'gate-sharing')].append(('word-level', 511, 0.01, 'single'))
#args3[('gate-type', 'experts-per-seq', 'sample-type', 'experts-per-batch', 'iloss-weight')].append(('word-level', 255, 'sample-constraint', 4, 0.01))
args3['agg-type'] = ['mean']
#args3['iloss-weight'] = [0.01]
#args3[('num-experts', 'iloss-weight')] = [(16, 0.01), (16, 0.05), (16, 0.1)]
#args3[('num-experts', 'iloss-weight')] = [(8, 0.02)]
#args3['num-experts'] = [16]
#args3[('gate-type', 'experts-per-seq')] = [('segments', 255), ('segments', 127), ('word-level', 255)]
#args3['iloss-weight'] = [0.01]
args2['special-eval'] = ''
else:
key = ('decoder-embed-dim', 'decoder-ffn-embed-dim', 'decoder-attention-heads', 'dummy', 'decoder-input-dim', 'decoder-output-dim')
args3[key] = []
for model_dim in [1024]:
heads = 8*(model_dim//512)
for ff_dim in [8192]:
args3[key].append((model_dim, ff_dim, heads, 0, model_dim, model_dim))
seqs_per_mini_batch = 512 # OpenAI scaling laws mini-batch size
args3['decoder-layers'] = [10]
args3[('max-tokens', 'update-freq', 'tokens-per-sample')] = []
#args3[('max-tokens', 'update-freq', 'memory-efficient-fp16', 'adam-bits', 'decoder-ffn-embed-dim')].append((2048, seqs_per_mini_batch//(2048//args2['tokens-per-sample'])//gpus, True, 32, 81920))
args3[('max-tokens', 'update-freq', 'tokens-per-sample')].append((2048, 128//gpus, 512))
#for ff_factor in [4, 8, 16, 32, 64]:
#for ff_factor in [128]:
# for i in range(doublings):
# if i < 3: continue
# factor = 2**i
# heads = base_heads*factor
# emb_dim = model_dim*factor
# ff_dim = model_dim*factor*ff_factor
# args3[key].append((emb_dim, ff_dim, heads, i, emb_dim, emb_dim))
args2['validate-interval-updates'] = 1000
#args3['decoder-layers'] = [4, 8]
#args3[('dropout', 'attention-dropout', 'relu-dropout')] = [(0.0, 0.0, 0.0), (0.1, 0.1, 0.1)]
args3[('dropout', 'attention-dropout', 'relu-dropout')] = [(0.0, 0.0, 0.0)]
# WT
#args3[('max-update', 'warmup-updates', '')] = [(80000, 24000, ' data/wikitext-103')]
#args3[('max-update', 'warmup-updates', '')] = [(31250, 10000, ' data/wikitext-25'), (50000, 15000, ' data/wikitext-50'), (100000, 30000, ' data/wikitext-103')]
#args3['tokens-per-sample'] = [256]
#args3['update-freq'] = [8//gpus]
# CC-News
#args3[('max-update', 'warmup-updates', '')] = [(150000, 15000, ' data/cc_news')]
args3[('max-update', 'warmup-updates', '')] = [(16000, 3000, ' data/cc_news_small')]
args2['save-interval-updates'] = 1000
#args3['tokens-per-sample'] = [256, 512]
#args3[('max-tokens', 'update-freq', 'memory-efficient-fp16', 'adam-bits', 'decoder-ffn-embed-dim')].append((2048, seqs_per_mini_batch//(2048//args2['tokens-per-sample'])//gpus, True, 8, 131072))
#args3['update-freq'] = [seqs_per_mini_batch//(args2['max-tokens']//args2['tokens-per-sample'])//gpus]
#args3['decoder-layers'] = [3]
args3['weight-decay'] = [0.00]
#args2['optimizer'] = 'lamb'
#args2['fp16-no-flatten-grads'] = ''
#args3['lr'] = [0.001]
#args2['warmup-init-lr'] = 1e-03
#args2['lr'] = 1e-03
key = ('lr', 'max-lr', 'min-lr', 'warmup-init-lr')
args3[key] = []
#for params in [1e4, 1e5, 1e6]:
#for params in [1e1, 1e3, 1e2, 5e3]:
for params in [1e3]:
lr = 0.003239 + (-0.0001395*math.log(params))
args3[key].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8))
#args3[key].append((lr, lr+1e-8, lr*0.1, lr*1.0 + 1e-8))
args2['lr-scheduler'] = 'cosine'
#args2['warmup-init-lr'] = 1e-03
#args2['lr'] = 1e-03
#args2['lr-scheduler'] = 'inverse_sqrt'
args2['optimizer'] = 'adam'
args2['fp16-no-flatten-grads'] = ''
args2['min-loss-scale'] = 1e-10
args3['fused'] = [False]
args3['dist-scale'] = [1.00]
#args3[('clip-norm', 'percentile-clipping')] = [(0.0, 2), (0.0, 5)]
args3['adam8bits-offset'] = [1/512]
#args3['emb-max-norm'] = [0.0, 1.0]
args3['prob-quant'] = [False]
args3['adam-betas'] = ["'(0.9, 0.995)'"]
args3['adam-eps'] = [1e-7]
args3['adam8bits-qfreq'] = [1, 5, 10, 25]
#args3['unorm'] = ['none', 'percentile', 'scale']
#args3[('adam8bits-method', 'use-emb-norm')] = [('quantile', True), ('dynamic_tree', True), ('linear', True)]
#args3[('adam8bits-method', 'use-emb-norm')] = [('quantile', True)]
#args3['adam8bits-method'] = ['quantile', 'dynamic_tree', 'linear']
args3['adam8bits-method'] = ['quantile', 'dynamic_tree']
args3['use-emb-norm'] = [True]
args3[('memory-efficient-fp16', 'adam-bits')] = [(True, 8)]
args3[('clip-norm', 'percentile-clipping')] = [(0.0, 5)]
#args3['clip-norm'] = [0.4, 0.8]
print(list(args3.keys()))
args4 = []
args5 = {}
args6 = {}
rdm = np.random.RandomState(5345)
for key, value in args2.items():
cmd = cmd + ' --{0} {1}'.format(key, value)
args_prod = []
for key, values in args3.items():
if isinstance(key, tuple):
keyvalues = []
for tups in values:
arg = ''
for i, v in enumerate(tups):
if v is True: v = ''
if v is False: continue
if len(key[i]) == 0:
arg += '{0} '.format(v)
else:
arg += '--{0} {1} '.format(key[i], v)
keyvalues.append(arg)
elif isinstance(key, str):
keyvalues = []
for v in values:
if v is True: v = ''
if v is False:
keyvalues.append('')
else:
keyvalues.append(' --{0} {1}'.format(key, v))
args_prod.append(keyvalues)
if len(args_prod) >= 2:
args_prod = list(product(*args_prod))
else:
new_args = []
if len(args_prod) > 0:
for arg in args_prod[0]:
new_args.append([arg])
args_prod = new_args
jobs = []
if len(args4) == 0: args4.append('')
for seed in range(num_seeds):
seed = seed + seed_offset
for arg4 in args4:
if len(args_prod) == 0: args_prod.append(('', ''))
for i, values in enumerate(args_prod):
job_cmd = cmd + arg4
for val in values:
job_cmd += ' {0}' .format(val)
#job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name)
if not fp16: job_cmd = job_cmd.replace('--fp16 ', ' ')
if any([k in job_cmd for k in args5.keys()]):
for substr, pdict in args5.items():
if substr in job_cmd:
for key, values in pdict.items():
for v in values:
job_cmd5 = job_cmd + ' --{0} {1}'.format(key, v)
job_cmd5 = job_cmd5 + ' --seed {0}'.format(seed)
checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd5).encode('utf-8')).hexdigest(), ckp_name)
save_dir = ' --save-dir {0}'.format(checkpoint_dir)
job_cmd5 = job_cmd5 + save_dir
cmds = [job_cmd5]
if rdm.rand(1) <= args.p:
jobs.append(job_cmd5)
s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus)
else:
job_cmd = job_cmd + ' --seed {0}'.format(seed)
checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name)
save_dir = ' --save-dir {0}'.format(checkpoint_dir)
job_cmd = job_cmd + save_dir
cmds = [job_cmd]
if rdm.rand(1) <= args.p:
jobs.append(job_cmd)
s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus)
if args.dry:
for i, job in enumerate(jobs):
print(i, job)
print('')
print('Total jobs', len(jobs))
print('Time hours: {0}'.format(time_hours))
print('GPUs: {0}'.format(gpus))
print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder)))
print('Jobs will be run on: {0}'.format(partition))
print('Run in folder: {0}'.format(change_dir))
if not args.dry:
s.run_jobs()
|
{"hexsha": "2eacddda826d89c40658fa9c1125609cd4bf058e", "size": 12601, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/cc_small/base_grid.py", "max_stars_repo_name": "TimDettmers/sched", "max_stars_repo_head_hexsha": "e16735f2c2eb6a51f5cf29ead534041574034e2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-22T17:49:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-22T17:49:48.000Z", "max_issues_repo_path": "scripts/cc_small/base_grid.py", "max_issues_repo_name": "TimDettmers/sched", "max_issues_repo_head_hexsha": "e16735f2c2eb6a51f5cf29ead534041574034e2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/cc_small/base_grid.py", "max_forks_repo_name": "TimDettmers/sched", "max_forks_repo_head_hexsha": "e16735f2c2eb6a51f5cf29ead534041574034e2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0123839009, "max_line_length": 378, "alphanum_fraction": 0.5936036822, "include": true, "reason": "import numpy", "num_tokens": 4042}
|
# -*- coding: utf-8 -*-
"""Make images using CASA."""
import numpy
import math
import shutil
import os
import time
from os.path import join
import json
import utilities
def fov_to_cellsize(fov, im_size):
"""Obatin cellsize from fov and image size."""
r_max = numpy.sin(numpy.array(fov, numpy.double) / 2. * (numpy.pi / 180.))
inc = r_max / (0.5 * numpy.array(im_size))
cell = numpy.arcsin(inc) * ((180. * 3600.) / numpy.pi)
return cell.tolist()
def casa_image(ms, rootname, data_column, imsize, fov, ra0, dec0,
weighting, w_planes=None):
"""Make an image using CASA.
http://casa.nrao.edu/docs/CasaRef/imager-Module.html#x636-6490002.5
"""
if not os.path.isdir(os.path.dirname(rootname)):
os.mkdir(os.path.dirname(rootname))
cell = fov_to_cellsize(fov, imsize) # arcsec
print '-' * 80
print '+ Size : %i pixels' % (imsize[0])
print '+ FoV : %.2f deg' % (fov[0])
print '+ Cellsize : %.4f arcsec' % (cell[0])
print '+ RA0 : %.4f deg' % (ra0)
print '+ Dec0 : %.4f deg' % (dec0)
print '-' * 80
im.open(ms, usescratch=False, compress=False)
im.defineimage(nx=imsize[0], ny=imsize[1], cellx='%.12farcsec' % cell[0],
celly='%.12farcsec' % cell[1],
stokes='I', mode='mfs', step=1, spw=[-1], outframe='',
veltype='radio',
phasecenter=me.direction('J2000', '%.14fdeg' % ra0,
'%.14fdeg' % dec0))
# im.weight(type='natural')
im.weight(type=weighting)
if w_planes:
im.setoptions(ftmachine='wproject', wprojplanes=w_planes,
gridfunction='SF', padding=1.2,
dopbgriddingcorrections=True, applypointingoffsets=False)
else:
im.setoptions(ftmachine='ft', gridfunction='SF', padding=1.2,
dopbgriddingcorrections=True, applypointingoffsets=False)
dirty = rootname + '_dirty.img'
# psf = rootname + '_psf.img'
if data_column == 'DATA':
# DATA column
im.makeimage(image=dirty, type='observed', verbose=False)
elif data_column == 'CORRECTED_DATA':
# CORRECTED_DATA column
im.makeimage(image=dirty, type='corrected', verbose=False)
elif data_column == 'MODEL_DATA':
# MODEL_DATA column
im.makeimage(image=dirty, type='model', verbose=False)
else:
print 'ERROR: Unknown data column!'
return
im.close()
ia.open(dirty)
ia.tofits(rootname + '.fits', overwrite=True)
ia.close()
# ia.open(psf)
# ia.tofits(rootname+'_psf.fits', overwrite=True)
# ia.close()
if os.path.isdir(dirty):
shutil.rmtree(dirty)
# if os.path.isdir(psf):
# shutil.rmtree(psf)
if __name__ == "__main__":
settings = utilities.byteify(json.load(open(config_file)))
sim_dir = settings['path']
ms_files = [f for f in os.listdir(os.path.abspath(sim_dir))
if f.endswith('.ms') and os.path.isdir(join(sim_dir, f))]
if settings.has_key('imaging'):
settings = settings['imaging']
column = settings['column']
for file in ms_files:
ms = join(sim_dir, file)
if not os.path.isdir(ms):
print 'WARNING: MS not found, skipping imaging. (%s)' % ms
continue
root_name = os.path.splitext(ms)[0]
if os.path.exists('{}.fits'.format(root_name)):
continue
print '+ Imaging with CASA ... [ms=%s -> %s : %s]' % (ms, root_name,
column)
t0 = time.time()
casa_image(ms, '{}'.format(root_name), column,
settings['size'], settings['fov_deg'],
settings['ra_deg'], settings['dec_deg'],
settings['weighting'], settings['w_planes'])
print '*' * 80
print ' - Finished imaging in %.3fs' % (time.time() - t0)
print '*' * 80
|
{"hexsha": "ec7477cc0d9869610773f4208890019104b218d4", "size": 4080, "ext": "py", "lang": "Python", "max_stars_repo_path": "time_smearing_tests/image.py", "max_stars_repo_name": "OxfordSKA/oskar_reference_simulations", "max_stars_repo_head_hexsha": "b2108f6dc963720391782e2ec843cfea5e441ace", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2016-05-09T10:34:05.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-09T17:39:56.000Z", "max_issues_repo_path": "time_smearing_tests/image.py", "max_issues_repo_name": "OxfordSKA/oskar_reference_simulations", "max_issues_repo_head_hexsha": "b2108f6dc963720391782e2ec843cfea5e441ace", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "time_smearing_tests/image.py", "max_forks_repo_name": "OxfordSKA/oskar_reference_simulations", "max_forks_repo_head_hexsha": "b2108f6dc963720391782e2ec843cfea5e441ace", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1724137931, "max_line_length": 80, "alphanum_fraction": 0.5549019608, "include": true, "reason": "import numpy", "num_tokens": 1066}
|
[STATEMENT]
lemma scast_distrib:
fixes M :: "'a::len word \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word"
fixes M' :: "'b::len word \<Rightarrow> 'b::len word \<Rightarrow> 'b::len word"
fixes L :: "int \<Rightarrow> int \<Rightarrow> int"
assumes lift_M: "\<And>x y. uint (M x y) = L (uint x) (uint y) mod 2 ^ LENGTH('a)"
assumes lift_M': "\<And>x y. uint (M' x y) = L (uint x) (uint y) mod 2 ^ LENGTH('b)"
assumes distrib: "\<And>x y. (L (x mod (2 ^ LENGTH('b))) (y mod (2 ^ LENGTH('b)))) mod (2 ^ LENGTH('b))
= (L x y) mod (2 ^ LENGTH('b))"
assumes is_down: "is_down (scast :: 'a word \<Rightarrow> 'b word)"
shows "scast (M a b) = M' (scast a) (scast b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. SCAST('a \<rightarrow> 'b) (M a b) = M' (SCAST('a \<rightarrow> 'b) a) (SCAST('a \<rightarrow> 'b) b)
[PROOF STEP]
apply (subst (1 2 3) down_cast_same [symmetric])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. is_down UCAST('a \<rightarrow> 'b)
2. UCAST('a \<rightarrow> 'b) (M a b) = M' (UCAST('a \<rightarrow> 'b) a) (UCAST('a \<rightarrow> 'b) b)
[PROOF STEP]
apply (insert is_down)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. is_down SCAST('a \<rightarrow> 'b) \<Longrightarrow> is_down UCAST('a \<rightarrow> 'b)
2. is_down SCAST('a \<rightarrow> 'b) \<Longrightarrow> UCAST('a \<rightarrow> 'b) (M a b) = M' (UCAST('a \<rightarrow> 'b) a) (UCAST('a \<rightarrow> 'b) b)
[PROOF STEP]
apply (clarsimp simp: is_down_def target_size source_size is_down)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_down SCAST('a \<rightarrow> 'b) \<Longrightarrow> UCAST('a \<rightarrow> 'b) (M a b) = M' (UCAST('a \<rightarrow> 'b) a) (UCAST('a \<rightarrow> 'b) b)
[PROOF STEP]
apply (rule ucast_distrib [where L=L, OF lift_M lift_M' distrib])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_down SCAST('a \<rightarrow> 'b) \<Longrightarrow> is_down UCAST('a \<rightarrow> 'b)
[PROOF STEP]
apply (insert is_down)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>is_down SCAST('a \<rightarrow> 'b); is_down SCAST('a \<rightarrow> 'b)\<rbrakk> \<Longrightarrow> is_down UCAST('a \<rightarrow> 'b)
[PROOF STEP]
apply (clarsimp simp: is_down_def target_size source_size is_down)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 980, "file": "Word_Lib_Word_Lemmas", "length": 7}
|
from my_widgets import LabelSlider
from process import Image, FitFunctions, FitBroadening
from process_monitor import Monitor
from PyQt5 import QtCore, QtWidgets, QtGui, QtChart
from sys import getsizeof
from sklearn.mixture import BayesianGaussianMixture
from sklearn.mixture._gaussian_mixture import _estimate_gaussian_parameters
from sklearn.mixture._gaussian_mixture import _compute_precision_cholesky
import configparser
import generate_report
import glob
import manual_fit
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.colors as mcolors
import numpy as np
import numbers
import os
import time
import sys
import profile_chart
import bar_chart
import pandas
from scipy.stats import rv_discrete
class Window(QtCore.QObject):
#Public Signals
STATUS_REQUESTED = QtCore.pyqtSignal()
PROGRESS_ADVANCE = QtCore.pyqtSignal(int,int,int)
PROGRESS_END = QtCore.pyqtSignal()
CONNECT_TO_CANVAS = QtCore.pyqtSignal()
DRAW_LINE_REQUESTED = QtCore.pyqtSignal(QtCore.QPointF,QtCore.QPointF,bool)
DRAW_RECT_REQUESTED = QtCore.pyqtSignal(QtCore.QPointF,QtCore.QPointF,float,bool)
COLOR = ['magenta','cyan','darkCyan','darkMagenta','darkRed','darkBlue','darkGray','green','darkGreen','darkYellow','yellow','black']
FONTS_CHANGED = QtCore.pyqtSignal(str,int)
STOP_WORKER = QtCore.pyqtSignal()
FEED_BACK_TO_FIT_WORKER = QtCore.pyqtSignal(list,tuple)
def __init__(self):
super(Window,self).__init__()
self.analysisRegion = [0,0,0,0,0]
self.config = configparser.ConfigParser()
self.config.read('./configuration.ini')
self.image_worker = Image()
self.fit_worker = FitFunctions()
self.stopped = False
primary_colors = ['salmon','bright green','bright pink','robin egg blue','bright lavender','deep sky blue','irish green','golden','greenish teal','light blue','butter yellow',\
'turquoise green','iris','off blue','plum','mauve','burgundy','coral','clay','emerald green','cadet blue','avocado','rose pink','aqua green','scarlet']
self.fit_colors = [mcolors.XKCD_COLORS['xkcd:'+name] for name in primary_colors]
for color in mcolors.XKCD_COLORS.keys():
if not color in primary_colors:
self.fit_colors.append(mcolors.XKCD_COLORS[color])
self.default_means = [[0,0],[2.3,0],[1.15,2],[-1.15,2],[-2.3,0],[-1.15,-2],[1.15,-2],[4.6,0],[2.3,4],[-2.3,4],[-4.6,0],[-2.3,-4],[2.3,-4],[3.45,2],[0,4],[-3.45,2],[-3.45,-2],[0,-4],[3.45,-2]]
def refresh(self,config):
self.config = config
try:
self.distributionChart.refresh(config)
self.costChart.refresh(config)
except:
pass
def set_status(self,status):
self.status = status
def main(self,path="c:/users/yux20/documents/05042018 MoS2/interpolated_2D_stack_large.csv"):
self.startIndex = "0"
self.endIndex = "3"
self.range = "5"
self.nsamp = '10'
self.ndraw = '2'
self.nzslices = '10'
self.nfeature = '2'
self.ncomp = '19'
self.tol = '0.001'
self.reg_covar = '1e-6'
self.max_itr = '1500'
self.n_init = '1'
self.wc_prior = '1000'
self.mean_precision_prior = '0.8'
self.dof = ''
self.rs = '2'
self.vb = '0'
self.vb_interval = '10'
self.defaultFileName = "GMM Fit"
self.cost_series_X = [1]
self.cost_series_Y = [1]
self.thread = QtCore.QThread(parent=self)
self.file_has_been_created = False
self.scatter_exist = False
self.path = os.path.dirname(path)
self.extension = os.path.splitext(path)[1]
self.currentSource = self.path
self.currentDestination = self.currentSource
self.Dialog = QtWidgets.QWidget()
self.Grid = QtWidgets.QGridLayout(self.Dialog)
self.LeftFrame = QtWidgets.QFrame()
self.RightFrame = QtWidgets.QFrame()
self.LeftGrid = QtWidgets.QGridLayout(self.LeftFrame)
self.RightGrid = QtWidgets.QGridLayout(self.RightFrame)
self.hSplitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
self.hSplitter.addWidget(self.RightFrame)
self.hSplitter.addWidget(self.LeftFrame)
self.hSplitter.setStretchFactor(0,1)
self.hSplitter.setStretchFactor(1,1)
self.hSplitter.setCollapsible(0,False)
self.hSplitter.setCollapsible(1,False)
self.leftScroll = QtWidgets.QScrollArea(self.hSplitter)
self.chooseSource = QtWidgets.QGroupBox("Input")
self.chooseSource.setStyleSheet('QGroupBox::title {color:blue;}')
self.sourceGrid = QtWidgets.QGridLayout(self.chooseSource)
self.sourceGrid.setAlignment(QtCore.Qt.AlignTop)
self.chooseSourceLabel = QtWidgets.QLabel("The input data directory is:\n"+self.currentSource)
self.chooseSourceLabel.setWordWrap(True)
self.chooseSourceButton = QtWidgets.QPushButton("Browse...")
self.chooseSourceButton.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)
self.chooseSourceButton.clicked.connect(self.choose_source)
self.loadButton = QtWidgets.QPushButton("Load")
self.loadButton.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)
self.loadButton.clicked.connect(self.load_data)
self.loadButton.setEnabled(False)
self.sourceGrid.addWidget(self.chooseSourceLabel,0,0,2,1)
self.sourceGrid.addWidget(self.chooseSourceButton,0,1,1,1)
self.sourceGrid.addWidget(self.loadButton,1,1,1,1)
self.information = QtWidgets.QGroupBox("Information")
self.information.setStyleSheet('QGroupBox::title {color:blue;}')
self.informationGrid = QtWidgets.QGridLayout(self.information)
self.informationGrid.setAlignment(QtCore.Qt.AlignTop)
self.informationLabel = QtWidgets.QLabel("")
self.informationLabel.setWordWrap(True)
self.informationGrid.addWidget(self.informationLabel,0,0)
self.chooseDestination = QtWidgets.QGroupBox("Output")
self.chooseDestination.setStyleSheet('QGroupBox::title {color:blue;}')
self.destinationGrid = QtWidgets.QGridLayout(self.chooseDestination)
self.chooseDestinationLabel = QtWidgets.QLabel("The output directory is:\n"+self.currentSource)
self.destinationNameLabel = QtWidgets.QLabel("The file name is:")
self.destinationNameEdit = QtWidgets.QLineEdit(self.defaultFileName)
self.fileTypeLabel = QtWidgets.QLabel("The file format is:")
self.fileType = QtWidgets.QComboBox()
self.fileType.addItem(".txt",".txt")
self.fileType.addItem(".xlsx",".xlsx")
self.chooseDestinationButton = QtWidgets.QPushButton("Browse...")
self.chooseDestinationButton.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)
self.chooseDestinationButton.clicked.connect(self.choose_destination)
self.saveResultLabel = QtWidgets.QLabel("Save Results?")
self.saveResult = QtWidgets.QCheckBox()
self.saveResult.setChecked(False)
self.destinationGrid.addWidget(self.chooseDestinationLabel,0,0)
self.destinationGrid.addWidget(self.chooseDestinationButton,0,1)
self.destinationGrid.addWidget(self.destinationNameLabel,1,0)
self.destinationGrid.addWidget(self.destinationNameEdit,1,1)
self.destinationGrid.addWidget(self.fileTypeLabel,2,0)
self.destinationGrid.addWidget(self.fileType,2,1)
self.destinationGrid.addWidget(self.saveResultLabel,3,0)
self.destinationGrid.addWidget(self.saveResult,3,1)
self.destinationGrid.setAlignment(self.chooseDestinationButton,QtCore.Qt.AlignRight)
self.appearance = QtWidgets.QGroupBox("Appearance")
self.appearance.setMaximumHeight(100)
self.appearance.setStyleSheet('QGroupBox::title {color:blue;}')
self.appearanceGrid = QtWidgets.QGridLayout(self.appearance)
self.fontListLabel = QtWidgets.QLabel("Change Font")
self.fontList = QtWidgets.QFontComboBox()
self.fontList.setCurrentFont(QtGui.QFont("Arial"))
self.fontList.currentFontChanged.connect(self.refresh_font_name)
self.fontSizeLabel = QtWidgets.QLabel("Adjust Font Size ({})".format(12))
self.fontSizeLabel.setFixedWidth(160)
self.fontSizeSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.fontSizeSlider.setMinimum(1)
self.fontSizeSlider.setMaximum(100)
self.fontSizeSlider.setValue(12)
self.fontSizeSlider.valueChanged.connect(self.refresh_font_size)
self.appearanceGrid.addWidget(self.fontListLabel,0,0)
self.appearanceGrid.addWidget(self.fontList,0,1)
self.appearanceGrid.addWidget(self.fontSizeLabel,1,0)
self.appearanceGrid.addWidget(self.fontSizeSlider,1,1)
self.sampleOptions = QtWidgets.QGroupBox("Sample")
self.sampleOptions.setStyleSheet('QGroupBox::title {color:blue;}')
self.sampleOptionsGrid = QtWidgets.QGridLayout(self.sampleOptions)
self.numberOfSamplesLabel = QtWidgets.QLabel("Number of Samples")
self.numberOfSamplesEdit = QtWidgets.QLineEdit(self.nsamp)
self.numberOfDrawsLabel = QtWidgets.QLabel("Number of Draws")
self.numberOfDrawsEdit = QtWidgets.QLineEdit(self.ndraw)
self.numberOfZsLabel = QtWidgets.QLabel("Number of Z Slices")
self.numberOfZsEdit = QtWidgets.QLineEdit(self.nzslices)
self.drawSampleButton = QtWidgets.QPushButton("Draw Z=0")
self.drawSampleButton.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)
self.drawSampleButton.clicked.connect(self.draw_sample)
self.drawSampleButton.setEnabled(False)
self.plotSampleButton = QtWidgets.QPushButton("Plot Z=0")
self.plotSampleButton.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)
self.plotSampleButton.clicked.connect(self.plot_sample)
self.plotSampleButton.setEnabled(False)
self.sampleOptionsGrid.addWidget(self.numberOfSamplesLabel,0,0,1,2)
self.sampleOptionsGrid.addWidget(self.numberOfSamplesEdit,0,2,1,4)
self.sampleOptionsGrid.addWidget(self.numberOfDrawsLabel,1,0,1,2)
self.sampleOptionsGrid.addWidget(self.numberOfDrawsEdit,1,2,1,4)
self.sampleOptionsGrid.addWidget(self.numberOfZsLabel,2,0,1,2)
self.sampleOptionsGrid.addWidget(self.numberOfZsEdit,2,2,1,4)
self.sampleOptionsGrid.addWidget(self.drawSampleButton,3,0,1,3)
self.sampleOptionsGrid.addWidget(self.plotSampleButton,3,3,1,3)
self.fitOptions = QtWidgets.QGroupBox("Parameters")
self.fitOptions.setStyleSheet('QGroupBox::title {color:blue;}')
self.fitOptionsGrid = QtWidgets.QGridLayout(self.fitOptions)
self.numberOfFeaturesLabel = QtWidgets.QLabel("Number of Features")
self.numberOfFeaturesEdit = QtWidgets.QLineEdit(self.nfeature)
self.fitOptionsGrid.addWidget(self.numberOfFeaturesLabel,10,0,1,2)
self.fitOptionsGrid.addWidget(self.numberOfFeaturesEdit,10,2,1,4)
self.numberOfFeaturesEdit.textChanged.connect(self.covar_prior_table_change_features)
self.numberOfFeaturesEdit.textChanged.connect(self.mean_prior_table_change_features)
self.numberOfComponentsLabel = QtWidgets.QLabel("Number of Components")
self.numberOfComponentsEdit = QtWidgets.QLineEdit(self.ncomp)
self.fitOptionsGrid.addWidget(self.numberOfComponentsLabel,20,0,1,2)
self.fitOptionsGrid.addWidget(self.numberOfComponentsEdit,20,2,1,4)
self.numberOfComponentsEdit.textChanged.connect(self.mean_prior_table_initialize)
self.numberOfComponentsEdit.textChanged.connect(self.covar_prior_table_initialize)
self.tolLabel = QtWidgets.QLabel("Convergence Threshold")
self.tolEdit = QtWidgets.QLineEdit(self.tol)
self.fitOptionsGrid.addWidget(self.tolLabel,30,0,1,2)
self.fitOptionsGrid.addWidget(self.tolEdit,30,2,1,4)
self.regCovarLabel = QtWidgets.QLabel("Covariance Reg.")
self.regCovarEdit = QtWidgets.QLineEdit(self.reg_covar)
self.fitOptionsGrid.addWidget(self.regCovarLabel,40,0,1,2)
self.fitOptionsGrid.addWidget(self.regCovarEdit,40,2,1,4)
self.maxItrLabel = QtWidgets.QLabel("EM Iterations")
self.maxItrEdit = QtWidgets.QLineEdit(self.max_itr)
self.fitOptionsGrid.addWidget(self.maxItrLabel,50,0,1,2)
self.fitOptionsGrid.addWidget(self.maxItrEdit,50,2,1,4)
self.nInitLabel = QtWidgets.QLabel("Number of Initializations")
self.nInitEdit = QtWidgets.QLineEdit(self.n_init)
self.fitOptionsGrid.addWidget(self.nInitLabel,60,0,1,2)
self.fitOptionsGrid.addWidget(self.nInitEdit,60,2,1,4)
self.covarianceType = QtWidgets.QLabel("Covariance Type")
self.covarianceType.setFixedWidth(160)
self.covarianceTypeCombo = QtWidgets.QComboBox()
for types in ('full','tied','diag','spherical'):
self.covarianceTypeCombo.addItem(types,types)
self.fitOptionsGrid.addWidget(self.covarianceType,70,0,1,2)
self.fitOptionsGrid.addWidget(self.covarianceTypeCombo,70,2,1,4)
self.initMethodType = QtWidgets.QLabel("Initialization Method")
self.initMethodType.setFixedWidth(160)
self.initMethodTypeCombo = QtWidgets.QComboBox()
for types in ('random','kmeans'):
self.initMethodTypeCombo.addItem(types,types)
self.fitOptionsGrid.addWidget(self.initMethodType,75,0,1,2)
self.fitOptionsGrid.addWidget(self.initMethodTypeCombo,75,2,1,4)
self.wcPriorType = QtWidgets.QLabel("Weight Prior Type")
self.wcPriorType.setFixedWidth(160)
self.wcPriorTypeCombo = QtWidgets.QComboBox()
for types in ('dirichlet_process','dirichlet_distribution'):
self.wcPriorTypeCombo.addItem(types,types)
self.fitOptionsGrid.addWidget(self.wcPriorType,80,0,1,2)
self.fitOptionsGrid.addWidget(self.wcPriorTypeCombo,80,2,1,4)
self.wcPriorLabel = QtWidgets.QLabel("Weight Prior")
self.wcPriorCheck = QtWidgets.QCheckBox()
self.wcPriorCheck.stateChanged.connect(self.wc_prior_check_changed)
self.wcPriorEdit = QtWidgets.QLineEdit(self.wc_prior)
if not self.wc_prior:
self.wcPriorCheck.setChecked(False)
self.wcPriorEdit.setEnabled(False)
else:
self.wcPriorCheck.setChecked(True)
self.fitOptionsGrid.addWidget(self.wcPriorLabel,90,0,1,2)
self.fitOptionsGrid.addWidget(self.wcPriorCheck,90,2,1,1)
self.fitOptionsGrid.addWidget(self.wcPriorEdit,90,3,1,3)
self.meanPrecPriorLabel = QtWidgets.QLabel("Mean Precision Prior")
self.meanPrecPriorCheck = QtWidgets.QCheckBox()
self.meanPrecPriorCheck.stateChanged.connect(self.mean_precision_prior_check_changed)
self.meanPrecPriorEdit = QtWidgets.QLineEdit(self.mean_precision_prior)
if not self.mean_precision_prior:
self.meanPrecPriorCheck.setChecked(False)
self.meanPrecPriorEdit.setEnabled(False)
else:
self.meanPrecPriorCheck.setChecked(True)
self.fitOptionsGrid.addWidget(self.meanPrecPriorLabel,100,0,1,2)
self.fitOptionsGrid.addWidget(self.meanPrecPriorCheck,100,2,1,1)
self.fitOptionsGrid.addWidget(self.meanPrecPriorEdit,100,3,1,3)
self.dofLabel = QtWidgets.QLabel("Deg. of Freedom Prior")
self.dofCheck = QtWidgets.QCheckBox()
self.dofCheck.stateChanged.connect(self.dof_check_changed)
self.dofEdit = QtWidgets.QLineEdit(self.dof)
if not self.dof:
self.dofCheck.setChecked(False)
self.dofEdit.setEnabled(False)
else:
self.dofCheck.setChecked(True)
self.fitOptionsGrid.addWidget(self.dofLabel,120,0,1,2)
self.fitOptionsGrid.addWidget(self.dofCheck,120,2,1,1)
self.fitOptionsGrid.addWidget(self.dofEdit,120,3,1,3)
self.rsLabel = QtWidgets.QLabel("Random State")
self.rsCheck = QtWidgets.QCheckBox()
self.rsCheck.stateChanged.connect(self.rs_check_changed)
self.rsEdit = QtWidgets.QLineEdit(self.rs)
if not self.rs:
self.rsCheck.setChecked(False)
self.rsEdit.setEnabled(False)
else:
self.rsCheck.setChecked(True)
self.fitOptionsGrid.addWidget(self.rsLabel,130,0,1,2)
self.fitOptionsGrid.addWidget(self.rsCheck,130,2,1,1)
self.fitOptionsGrid.addWidget(self.rsEdit,130,3,1,3)
self.vbLabel = QtWidgets.QLabel("Verbose")
self.vbEdit = QtWidgets.QLineEdit(self.vb)
self.fitOptionsGrid.addWidget(self.vbLabel,140,0,1,2)
self.fitOptionsGrid.addWidget(self.vbEdit,140,2,1,4)
self.vbIntvLabel = QtWidgets.QLabel("Verbose Interval")
self.vbIntvEdit = QtWidgets.QLineEdit(self.vb_interval)
self.fitOptionsGrid.addWidget(self.vbIntvLabel,150,0,1,2)
self.fitOptionsGrid.addWidget(self.vbIntvEdit,150,2,1,4)
self.warmStartLabel = QtWidgets.QLabel("Warm Start?")
self.warmStartCheck = QtWidgets.QCheckBox()
self.warmStartCheck.setChecked(False)
self.fitOptionsGrid.addWidget(self.warmStartLabel,160,0,1,2)
self.fitOptionsGrid.addWidget(self.warmStartCheck,160,2,1,4)
self.meanPriorTable = QtWidgets.QGroupBox("Mean Prior")
self.meanPriorTable.setStyleSheet('QGroupBox::title {color:blue;}')
self.meanPriorTableGrid = QtWidgets.QGridLayout(self.meanPriorTable)
self.meanPriorLabel = QtWidgets.QLabel("Use Mean Prior?")
self.meanPriorCheck = QtWidgets.QCheckBox()
self.meanPriorCheck.setChecked(False)
self.meanPriorCheck.stateChanged.connect(self.mean_prior_table_check_changed)
self.resetMeanPriorButton = QtWidgets.QPushButton("Reset")
self.resetMeanPriorButton.clicked.connect(self.set_default_mean_priors)
self.mean_prior_table = QtWidgets.QTableWidget()
self.mean_prior_table.setMinimumHeight(200)
self.mean_prior_table_initialize(int(self.ncomp))
self.meanPriorTableGrid.addWidget(self.meanPriorLabel,0,0,1,2)
self.meanPriorTableGrid.addWidget(self.meanPriorCheck,0,2,1,2)
self.meanPriorTableGrid.addWidget(self.resetMeanPriorButton,0,4,1,2)
self.meanPriorTableGrid.addWidget(self.mean_prior_table,1,0,1,6)
self.covarPriorTable = QtWidgets.QGroupBox("Covariance Prior")
self.covarPriorTable.setStyleSheet('QGroupBox::title {color:blue;}')
self.covarPriorTableGrid = QtWidgets.QGridLayout(self.covarPriorTable)
self.covarPriorLabel = QtWidgets.QLabel("Use Covariance Prior?")
self.covarPriorCheck = QtWidgets.QCheckBox()
self.covarPriorCheck.setChecked(False)
self.covarPriorCheck.stateChanged.connect(self.covar_prior_check_changed)
self.covarTab = QtWidgets.QTabWidget()
self.covarTab.setContentsMargins(0,0,0,0)
self.covarTab.setTabsClosable(False)
self.covar_prior_table_initialize(int(self.ncomp))
self.covarTab.widget(0).setEnabled(self.covarPriorCheck.isChecked())
self.covarPriorTableGrid.addWidget(self.covarPriorLabel,0,0,1,2)
self.covarPriorTableGrid.addWidget(self.covarPriorCheck,0,2,1,4)
self.covarPriorTableGrid.addWidget(self.covarTab,1,0,1,6)
self.statusBar = QtWidgets.QGroupBox("Log")
self.statusBar.setStyleSheet('QGroupBox::title {color:blue;}')
self.statusGrid = QtWidgets.QGridLayout(self.statusBar)
self.statusBar.setFixedHeight(150)
self.statusBar.setSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Fixed)
self.progressBar = QtWidgets.QProgressBar()
self.progressBar.setFixedHeight(12)
self.progressBar.setFixedWidth(800)
self.progressBar.setVisible(False)
self.progressBar.setOrientation(QtCore.Qt.Horizontal)
self.progressBarSizePolicy = self.progressBar.sizePolicy()
self.progressBarSizePolicy.setRetainSizeWhenHidden(True)
self.progressBar.setSizePolicy(self.progressBarSizePolicy)
self.PROGRESS_ADVANCE.connect(self.progress)
self.PROGRESS_END.connect(self.progress_reset)
self.logBox = QtWidgets.QTextEdit(QtCore.QTime.currentTime().toString("hh:mm:ss")+\
"\u00A0\u00A0\u00A0\u00A0Initialized!")
self.logBox.ensureCursorVisible()
self.logBox.setAlignment(QtCore.Qt.AlignTop)
self.logBox.setFrameShape(QtWidgets.QFrame.NoFrame)
self.logBoxScroll = QtWidgets.QScrollArea()
self.logBoxScroll.setWidget(self.logBox)
self.logBoxScroll.setWidgetResizable(True)
self.logBoxScroll.setFrameShape(QtWidgets.QFrame.NoFrame)
self.statusGrid.addWidget(self.logBoxScroll,0,0)
self.statusGrid.setAlignment(self.progressBar,QtCore.Qt.AlignRight)
self.ButtonBox = QtWidgets.QDialogButtonBox()
self.ButtonBox.addButton("Start",QtWidgets.QDialogButtonBox.ActionRole)
self.ButtonBox.addButton("Stop",QtWidgets.QDialogButtonBox.ActionRole)
self.ButtonBox.addButton("Reset",QtWidgets.QDialogButtonBox.ResetRole)
self.ButtonBox.addButton("Quit",QtWidgets.QDialogButtonBox.DestructiveRole)
self.ButtonBox.setCenterButtons(True)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[0].clicked.\
connect(self.start)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[1].clicked. \
connect(self.stop)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[2].clicked.\
connect(self.reset)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[3].clicked.\
connect(self.reject)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[0].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[1].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[2].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[3].setEnabled(False)
self.distributionChartTitle = QtWidgets.QLabel('Distribution')
self.distributionChart = profile_chart.ProfileChart(self.config)
self.distributionChart.set_fonts(self.fontList.currentFont().family(),self.fontSizeSlider.value())
self.distributionChart.setFixedSize(1300,1300)
self.FONTS_CHANGED.connect(self.distributionChart.adjust_fonts)
self.costChartTitle = QtWidgets.QLabel('ELBO Change')
self.costChart = profile_chart.ProfileChart(self.config)
self.costChart.set_fonts(self.fontList.currentFont().family(),self.fontSizeSlider.value())
self.FONTS_CHANGED.connect(self.costChart.adjust_fonts)
self.weightChart = bar_chart.BarChart(self.config)
self.weightChart.set_fonts(self.fontList.currentFont().family(),self.fontSizeSlider.value())
self.FONTS_CHANGED.connect(self.weightChart.adjust_fonts)
self.LeftGrid.addWidget(self.chooseSource,0,0)
self.LeftGrid.addWidget(self.information,1,0)
self.LeftGrid.addWidget(self.chooseDestination,2,0)
self.LeftGrid.addWidget(self.appearance,3,0)
self.LeftGrid.addWidget(self.sampleOptions,4,0)
self.LeftGrid.addWidget(self.fitOptions,5,0)
self.LeftGrid.addWidget(self.meanPriorTable,6,0)
self.LeftGrid.addWidget(self.covarPriorTable,7,0)
self.LeftGrid.addWidget(self.ButtonBox,8,0)
self.RightGrid.addWidget(self.distributionChartTitle,0,0)
self.RightGrid.addWidget(self.costChartTitle,0,1)
self.RightGrid.addWidget(self.distributionChart,1,0)
self.RightGrid.addWidget(self.costChart,1,1)
self.RightGrid.addWidget(self.weightChart,2,0,1,2)
self.RightGrid.addWidget(self.statusBar,3,0,1,2)
self.RightGrid.addWidget(self.progressBar,4,0,1,2)
self.Grid.addWidget(self.hSplitter,0,0)
self.leftScroll.setWidget(self.LeftFrame)
self.leftScroll.setMinimumWidth(800)
self.leftScroll.setWidgetResizable(True)
self.leftScroll.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Dialog.setWindowTitle("Gaussian Mixture Modeling")
self.Dialog.setWindowModality(QtCore.Qt.WindowModal)
self.Dialog.showMaximized()
self.set_default_mean_priors()
def wc_prior_check_changed(self,state):
if state == 0:
self.wcPriorEdit.setEnabled(False)
elif state == 2:
self.wcPriorEdit.setEnabled(True)
def mean_precision_prior_check_changed(self,state):
if state == 0:
self.meanPrecPriorEdit.setEnabled(False)
elif state == 2:
self.meanPrecPriorEdit.setEnabled(True)
def dof_check_changed(self,state):
if state == 0:
self.dofEdit.setEnabled(False)
elif state == 2:
self.dofEdit.setEnabled(True)
def rs_check_changed(self,state):
if state == 0:
self.rsEdit.setEnabled(False)
elif state == 2:
self.rsEdit.setEnabled(True)
def mean_prior_table_check_changed(self,state):
if state == 0:
for c in range(self.mean_prior_table.columnCount()):
if self.mean_prior_table.item(0,2*c):
self.mean_prior_table.item(0,2*c).setBackground(QtCore.Qt.lightGray)
elif state == 2:
for c in range(self.mean_prior_table.columnCount()):
if self.mean_prior_table.item(0,2*c):
self.mean_prior_table.item(0,2*c).setBackground(QtCore.Qt.transparent)
def covar_prior_check_changed(self,state):
if state == 0:
self.covarTab.widget(0).setEnabled(False)
elif state == 2:
self.covarTab.widget(0).setEnabled(True)
def mean_prior_table_change_features(self,text):
ncomp = int(self.numberOfComponentsEdit.text())
nfeatures = int(text)
if nfeatures > 3:
self.mean_prior_table.clear()
self.raise_error('Dimension > 3 not supported')
else:
self.mean_prior_table.clear()
self.mean_prior_table.setColumnCount(2*nfeatures)
coords = ['Prior X', 'Posterior X', 'Prior Y', 'Posterior Y', 'Prior Z', 'Posterior Z']
for n in range(2*nfeatures):
header_item = QtWidgets.QTableWidgetItem(coords[n])
self.mean_prior_table.setHorizontalHeaderItem(n,header_item)
self.mean_prior_table.setRowCount(ncomp)
self.mean_prior_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.mean_prior_table.horizontalHeader().setBackgroundRole(QtGui.QPalette.Highlight)
for i in range(ncomp):
icon_pm = QtGui.QPixmap(50,50)
icon_pm.fill(QtGui.QColor(self.fit_colors[i]))
icon = QtGui.QIcon(icon_pm)
item = QtWidgets.QTableWidgetItem(icon,'{}'.format(i+1))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.mean_prior_table.setVerticalHeaderItem(i,item)
def mean_prior_table_initialize(self,text):
ncomp = int(text)
nfeatures = int(self.numberOfFeaturesEdit.text())
if nfeatures > 3:
self.mean_prior_table.clear()
self.raise_error('Dimension > 3 not supported')
else:
self.mean_prior_table.clear()
self.mean_prior_table.setColumnCount(2*nfeatures)
coords = ['Prior X', 'Posterior X', 'Prior Y', 'Posterior Y', 'Prior Z', 'Posterior Z']
for n in range(2*nfeatures):
header_item = QtWidgets.QTableWidgetItem(coords[n])
self.mean_prior_table.setHorizontalHeaderItem(n,header_item)
self.mean_prior_table.setRowCount(ncomp)
self.mean_prior_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.mean_prior_table.horizontalHeader().setBackgroundRole(QtGui.QPalette.Highlight)
for i in range(ncomp):
icon_pm = QtGui.QPixmap(50,50)
icon_pm.fill(QtGui.QColor(self.fit_colors[i]))
icon = QtGui.QIcon(icon_pm)
item = QtWidgets.QTableWidgetItem(icon,'{}'.format(i+1))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.mean_prior_table.setVerticalHeaderItem(i,item)
def get_mean_posteriors(self):
means = []
for i in range(self.mean_prior_table.rowCount()):
row = []
for j in range(int(self.numberOfFeaturesEdit.text())):
if not self.mean_prior_table.item(i,j*2+1):
row.append(0)
else:
row.append(float(self.mean_prior_table.item(i,j*2+1).text()))
means.append(row)
return means
def get_mean_priors(self):
means = []
for i in range(self.mean_prior_table.rowCount()):
row = []
for j in range(int(self.numberOfFeaturesEdit.text())):
if not self.mean_prior_table.item(i,j*2):
row.append(0)
else:
row.append(float(self.mean_prior_table.item(i,j*2).text()))
means.append(row)
return means
def set_default_mean_priors(self):
self.numberOfComponentsEdit.setText(str(len(self.default_means)))
for i in range(len(self.default_means)):
for j in range(int(self.numberOfFeaturesEdit.text())):
if not self.mean_prior_table.item(i,j*2):
item = QtWidgets.QTableWidgetItem('{:.2f}'.format(self.default_means[i][j]))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.mean_prior_table.setItem(i,2*j,item)
else:
self.mean_prior_table.item(i,2*j).setText('{:.2f}'.format(self.default_means[i][j]))
def update_mean_posteriors(self,means):
for i in range(int(self.numberOfComponentsEdit.text())):
for j in range(int(self.numberOfFeaturesEdit.text())):
if not self.mean_prior_table.item(i,2*j+1):
item = QtWidgets.QTableWidgetItem('{:.2f}'.format(means[i,j]))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.mean_prior_table.setItem(i,2*j+1,item)
else:
self.mean_prior_table.item(i,2*j+1).setText('{:.2f}'.format(means[i,j]))
def update_mean_priors(self):
for i in range(int(self.numberOfComponentsEdit.text())):
for j in range(int(self.numberOfFeaturesEdit.text())):
value = float(self.mean_prior_table.item(i,j*2+1).text())
if not self.mean_prior_table.item(i,2*j):
item = QtWidgets.QTableWidgetItem('{:.2f}'.format(value))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.mean_prior_table.setItem(i,2*j,item)
else:
self.mean_prior_table.item(i,2*j).setText('{:.2f}'.format(value))
def covar_prior_table_initialize(self,text):
ncomp = int(text)
nfeatures = int(self.numberOfFeaturesEdit.text())
self.covarTab.clear()
if nfeatures > 3:
self.mean_prior_table.clear()
self.raise_error('Dimension > 3 not supported')
else:
for i in range(int(ncomp)+1):
covar_prior_table = QtWidgets.QTableWidget()
covar_prior_table.setColumnCount(nfeatures)
for j in range(nfeatures):
header_item = QtWidgets.QTableWidgetItem('C{}'.format(j))
covar_prior_table.setHorizontalHeaderItem(j,header_item)
covar_prior_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
covar_prior_table.horizontalHeader().setBackgroundRole(QtGui.QPalette.Highlight)
covar_prior_table.setMinimumHeight(200)
covar_prior_table.setRowCount(nfeatures)
for j in range(nfeatures):
item = QtWidgets.QTableWidgetItem('R{}'.format(j))
item.setTextAlignment(QtCore.Qt.AlignCenter)
covar_prior_table.setVerticalHeaderItem(j,item)
if i == 0:
self.covarTab.addTab(covar_prior_table,"Prior")
else:
icon_pm = QtGui.QPixmap(50,50)
icon_pm.fill(QtGui.QColor(self.fit_colors[i-1]))
icon = QtGui.QIcon(icon_pm)
self.covarTab.addTab(covar_prior_table,icon,"{}".format(i))
def covar_prior_table_change_features(self,text):
nfeatures = int(text)
self.covarTab.clear()
if nfeatures > 3:
self.mean_prior_table.clear()
self.raise_error('Dimension > 3 not supported')
else:
for i in range(int(self.numberOfComponentsEdit.text())+1):
covar_prior_table = QtWidgets.QTableWidget()
covar_prior_table.setColumnCount(nfeatures)
for j in range(nfeatures):
header_item = QtWidgets.QTableWidgetItem('C{}'.format(j))
covar_prior_table.setHorizontalHeaderItem(j,header_item)
covar_prior_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
covar_prior_table.horizontalHeader().setBackgroundRole(QtGui.QPalette.Highlight)
covar_prior_table.setMinimumHeight(200)
covar_prior_table.setRowCount(nfeatures)
for j in range(nfeatures):
item = QtWidgets.QTableWidgetItem('R{}'.format(j))
item.setTextAlignment(QtCore.Qt.AlignCenter)
covar_prior_table.setVerticalHeaderItem(j,item)
if i == 0:
self.covarTab.addTab(covar_prior_table,"Prior")
else:
icon_pm = QtGui.QPixmap(50,50)
icon_pm.fill(QtGui.QColor(self.fit_colors[i-1]))
icon = QtGui.QIcon(icon_pm)
self.covarTab.addTab(covar_prior_table,icon,"{}".format(i+1))
def get_covar_posteriors(self):
covars = []
for i in range(int(self.numberOfComponentsEdit.text())+1):
component = []
for j in range(int(self.numberOfFeaturesEdit.text())):
row = []
for k in range(int(self.numberOfFeaturesEdit.text())):
if not self.covarTab.widget(i).item(j,k):
row.append(0)
else:
row.append(float(self.covarTab.widget(i).item(j,k).text()))
component.append(row)
covars.append(component)
return covars
def update_covar_posteriors(self,covars):
for i in range(1,int(self.numberOfComponentsEdit.text())+1):
for j in range(int(self.numberOfFeaturesEdit.text())):
for k in range(int(self.numberOfFeaturesEdit.text())):
if not self.covarTab.widget(i).item(j,k):
item = QtWidgets.QTableWidgetItem('{:.2f}'.format(covars[i-1,j,k]))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.covarTab.widget(i).setItem(j,k,item)
else:
self.covarTab.widget(i).item(j,k).setText('{:.2f}'.format(covars[i-1,j,k]))
def update_covar_priors(self):
for j in range(int(self.numberOfFeaturesEdit.text())):
for k in range(int(self.numberOfFeaturesEdit.text())):
value = 0
for i in range(1,int(self.numberOfComponentsEdit.text())+1):
value += float(self.covarTab.widget(i).item(j,k).text())
value /= int(self.numberOfComponentsEdit.text())
if not self.covarTab.widget(0).item(j,k):
item = QtWidgets.QTableWidgetItem('{:.2f}'.format(value))
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.covarTab.widget(0).setItem(j,k,item)
else:
self.covarTab.widget(0).item(j,k).setText('{:.2f}'.format(value))
def get_covar_priors(self):
covars = []
for j in range(int(self.numberOfFeaturesEdit.text())):
row = []
for k in range(int(self.numberOfFeaturesEdit.text())):
if not self.covarTab.widget(0).item(j,k):
row.append(0)
else:
row.append(float(self.covarTab.widget(0).item(j,k).text()))
covars.append(row)
return covars
def choose_source(self):
path = QtWidgets.QFileDialog.getOpenFileName(None,"choose the input data file","c:/users/yux20/documents/05042018 MoS2/interpolated_2D_stack_large.csv",filter="CSV (*.csv)")[0]
self.currentSource = path
self.chooseSourceLabel.setText("The source directory is:\n"+self.currentSource)
self.loadButton.setEnabled(True)
def load_data(self):
self.update_log("Loading Data ... ")
self.loadButton.setEnabled(False)
QtCore.QCoreApplication.processEvents()
try:
self.grid_3d = pandas.read_csv(filepath_or_buffer=self.currentSource)
self.nz = 0
self.z_levels = np.unique(self.grid_3d["z"].to_numpy())
if int(self.numberOfZsEdit.text()) > len(self.z_levels):
self.numberOfZsEdit.setText(str(len(self.z_levels)))
self.update_log("Number of Z levels truncated")
self.informationLabel.setText(self.grid_3d.describe(include='all').applymap(lambda x: np.around(x,3)).to_string())
self.drawSampleButton.setEnabled(True)
self.loadButton.setEnabled(True)
self.update_log("Loading Complete")
self.ButtonBox.findChildren(QtWidgets.QPushButton)[0].setEnabled(True)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[1].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[2].setEnabled(True)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[3].setEnabled(True)
except:
self.raise_error("Wrong Input!")
def draw_sample(self,level=0,change_buttons = True):
if change_buttons:
self.ButtonBox.findChildren(QtWidgets.QPushButton)[0].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[1].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[2].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[3].setEnabled(False)
self.plotSampleButton.setEnabled(False)
self.drawSampleButton.setEnabled(False)
self.update_log("Drawing Samples for level {}".format(level))
QtCore.QCoreApplication.processEvents()
indices = []
mask = self.grid_3d["z"]==list(self.z_levels)[level]
for _ in range(int(self.numberOfDrawsEdit.text())):
if self.stopped:
break
else:
draw = rv_discrete(name='custm',values=(self.grid_3d[mask].index,self.grid_3d[mask]["intensity"]))
indices.append(draw.rvs(size=int(self.numberOfSamplesEdit.text())))
self.update_log("Draw {} finished".format(_+1))
QtCore.QCoreApplication.processEvents()
indices = np.concatenate(indices)
selected = self.grid_3d.iloc[indices]
x, y = selected["x"].to_numpy(),selected["y"].to_numpy()
self.inputdata = np.vstack([x,y]).T
self.update_log("Drawing for level {} Completed".format(level))
if change_buttons:
self.ButtonBox.findChildren(QtWidgets.QPushButton)[0].setEnabled(True)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[1].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[2].setEnabled(True)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[3].setEnabled(True)
self.plotSampleButton.setEnabled(True)
self.drawSampleButton.setEnabled(True)
def plot_sample(self,level=0):
self.update_log("Plotting Samples... ")
QtCore.QCoreApplication.processEvents()
self.distributionChart.add_chart(self.inputdata[level][:,0],self.inputdata[level][:,1],'scatter')
self.update_log("Plotting Complete")
self.scatter_exist = True
def choose_destination(self):
path = QtWidgets.QFileDialog.getExistingDirectory(None,"choose save destination",self.currentDestination,QtWidgets.QFileDialog.ShowDirsOnly)
self.currentDestination = path
self.chooseDestinationLabel.setText("The save destination is:\n"+self.currentDestination)
def refresh_font_size(self):
self.fontSizeLabel.setText("Adjust Font Size ({})".format(self.fontSizeSlider.value()))
self.FONTS_CHANGED.emit(self.fontList.currentFont().family(),self.fontSizeSlider.value())
def refresh_font_name(self):
self.FONTS_CHANGED.emit(self.fontList.currentFont().family(),self.fontSizeSlider.value())
def prepare(self):
# mean_precision_prior= 0.8 to minimize the influence of the prior
if self.meanPriorCheck.isChecked():
mean_priors = self.get_mean_priors()
else:
mean_priors = None
if self.covarPriorCheck.isChecked():
covar_priors = self.get_covar_priors()
else:
covar_priors = None
self.estimator = My_GMM(
n_components= int(self.numberOfComponentsEdit.text()),
covariance_type=self.covarianceTypeCombo.currentText(),
tol=float(self.tolEdit.text()),
reg_covar=float(self.regCovarEdit.text()),
max_iter=int(self.maxItrEdit.text()),
n_init=int(self.nInitEdit.text()),
init_params=self.initMethodTypeCombo.currentText(),
weight_concentration_prior_type=self.wcPriorTypeCombo.currentText(),
mean_precision_prior=float(self.meanPrecPriorEdit.text()),
mean_prior=None,
mean_priors=mean_priors,
degrees_of_freedom_prior=int(self.dofEdit.text()) if self.dofEdit.text() != '' else None,
covariance_prior=covar_priors,
random_state=int(self.rsEdit.text()),
warm_start=self.warmStartCheck.isChecked(),
verbose=int(self.vbEdit.text()),
verbose_interval=int(self.vbIntvEdit.text()),
weight_concentration_prior=float(self.wcPriorEdit.text()))
self.cost_series_X = [1]
self.cost_series_Y = [1]
if not self.scatter_exist:
self.distributionChart.add_chart(self.inputdata[:,0],self.inputdata[:,1],'scatter')
self.estimator.load_input(self.inputdata)
self.estimator.UPDATE_LOG.connect(self.update_log)
self.estimator.SEND_UPDATE.connect(self.update_plots)
self.estimator.moveToThread(self.thread)
self.estimator.FINISHED.connect(self.thread.quit)
self.estimator.FINISHED.connect(self.process_finished)
self.thread.started.connect(self.estimator.run)
self.STOP_WORKER.connect(self.estimator.stop)
return True
def start(self):
if self.stopped:
self.stopped = False
self.nz = 0
if self.nz == int(self.numberOfZsEdit.text()):
self.nz = 0
self.update_log("Z level {} started".format(self.nz))
if not hasattr(self,'inputdata'):
self.draw_sample(0)
ready = self.prepare()
if ready:
self.thread.start()
self.ButtonBox.findChildren(QtWidgets.QPushButton)[0].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[1].setEnabled(True)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[2].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[3].setEnabled(False)
self.fitOptions.setEnabled(False)
def stop(self):
self.stopped = True
self.nz = int(self.numberOfZsEdit.text()) + 1
self.STOP_WORKER.emit()
if self.thread.isRunning():
self.thread.terminate()
self.thread.wait()
self.ButtonBox.findChildren(QtWidgets.QPushButton)[0].setEnabled(True)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[1].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[2].setEnabled(True)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[3].setEnabled(True)
self.fitOptions.setEnabled(True)
def process_finished(self):
if not self.stopped:
self.update_log("Z level {} finished".format(self.nz))
self.nz += 1
if self.thread.isRunning():
self.thread.terminate()
self.thread.wait()
self.ButtonBox.findChildren(QtWidgets.QPushButton)[1].setEnabled(False)
QtCore.QCoreApplication.processEvents()
time.sleep(0.5)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[1].setEnabled(True)
if self.nz < int(self.numberOfZsEdit.text()):
self.draw_sample(self.nz,False)
#self.update_mean_priors()
#self.update_covar_priors()
self.start()
if self.stopped or self.nz == int(self.numberOfZsEdit.text()):
self.ButtonBox.findChildren(QtWidgets.QPushButton)[0].setEnabled(True)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[1].setEnabled(False)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[2].setEnabled(True)
self.ButtonBox.findChildren(QtWidgets.QPushButton)[3].setEnabled(True)
self.fitOptions.setEnabled(True)
def reset(self):
pass
def update_plots(self,n_iter,change,params):
if n_iter>1:
weight_concentration = params[0]
mean_precision = params[1]
means = params[2]
degrees_of_freedom = params[3]
covars = params[4]
precisions_cholesky = params[5]
if self.wcPriorTypeCombo.currentText() == "dirichlet_process":
weight_dirichlet_sum = (weight_concentration[0] + weight_concentration[1])
tmp = weight_concentration[1] / weight_dirichlet_sum
weights = (
weight_concentration[0] / weight_dirichlet_sum *
np.hstack((1, np.cumprod(tmp[:-1]))))
weights /= np.sum(weights)
else:
weights = (weight_concentration/np.sum(weight_concentration))
a, b, angle,colors = [], [], [], []
for i in range(means.shape[0]):
eig_vals, eig_vecs = np.linalg.eigh(covars[i])
unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0])
ang = np.arctan2(unit_eig_vec[1], unit_eig_vec[0])
ang = 180 * ang / np.pi
eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals)
a.append(eig_vals[0])
b.append(eig_vals[1])
angle.append(ang)
colors.append(self.fit_colors[i])
self.cost_series_X.append(n_iter)
self.cost_series_Y.append(np.abs(change))
self.costChart.add_chart(self.cost_series_X, self.cost_series_Y,'ELBO change')
self.distributionChart.append_to_chart(x=means[:,0],y=means[:,1],a=a,b=b,angle=angle,weights=weights,colors=colors,type='ellipse')
self.weightChart.add_chart(weights=weights,colors=colors,type='bar')
self.update_covar_posteriors(covars)
self.update_mean_posteriors(means)
def write_results(self,results):
self.fitting_results.append(results)
def close_results(self):
for result in self.fitting_results:
self.output.write(result)
self.output.close()
def update_log(self,message):
self.logBox.append(QtCore.QTime.currentTime().toString("hh:mm:ss")+"\u00A0\u00A0\u00A0\u00A0" + message)
self.logBox.moveCursor(QtGui.QTextCursor.End)
def reject(self):
self.Dialog.close()
def initial_parameters(self):
para = []
if self.fitFunctionCombo.currentText() == 'Gaussian':
j_list = [3,0,6]
elif self.fitFunctionCombo.currentText() == 'Voigt':
j_list = [0,3,6,9]
for j in j_list:
for i in range(self.table.rowCount()):
para.append(float(self.table.item(i,j).text()))
para.append(float(self.offset.get_value()))
return para
def update_results(self,results):
self.offset.set_value(results[-1])
if self.fitFunctionCombo.currentText() == 'Gaussian':
index=0
for j in [3,0,6]:
for i in range(self.table.rowCount()):
value = np.round(results[index],2)
item = QtWidgets.QTableWidgetItem('{}'.format(value))
variation = [0.5,0.5,0.5,0.5,0.5,0.5]
#Height
if j == 3:
item2 = QtWidgets.QTableWidgetItem('{}'.format(max(0,np.round(value-variation[0],2))))
item3 = QtWidgets.QTableWidgetItem('{}'.format(np.round(value+variation[1],2)))
#Center
elif j == 0:
item2 = QtWidgets.QTableWidgetItem('{}'.format(max(0,np.round(value-variation[2],2))))
item3 = QtWidgets.QTableWidgetItem('{}'.format(np.round(value+variation[3],2)))
#Width
else:
item2 = QtWidgets.QTableWidgetItem('{}'.format(max(0.1,np.round(value-variation[4],2))))
item3 = QtWidgets.QTableWidgetItem('{}'.format(np.round(value+variation[5],2)))
item.setForeground(QtGui.QBrush(QtGui.QColor(QtCore.Qt.red)))
item.setTextAlignment(QtCore.Qt.AlignCenter)
item2.setTextAlignment(QtCore.Qt.AlignCenter)
item3.setTextAlignment(QtCore.Qt.AlignCenter)
self.table.setItem(i,j,item)
self.table.setItem(i,j+1,item2)
self.table.setItem(i,j+2,item3)
index+=1
elif self.fitFunctionCombo.currentText() == 'Voigt':
index=0
for j in [0,3,6,9]:
for i in range(self.table.rowCount()):
value = np.round(results[index],2)
item = QtWidgets.QTableWidgetItem('{}'.format(value))
variation = [0.5,0.5,1,1,1,1,1,1]
#Center
if j == 0:
item2 = QtWidgets.QTableWidgetItem('{}'.format(max(0,np.round(value-variation[0],2))))
item3 = QtWidgets.QTableWidgetItem('{}'.format(np.round(value+variation[1],2)))
#Amplitude
elif j == 3:
item2 = QtWidgets.QTableWidgetItem('{}'.format(max(0,np.round(value-variation[2],2))))
item3 = QtWidgets.QTableWidgetItem('{}'.format(np.round(value+variation[3],2)))
#FL
elif j == 6:
item2 = QtWidgets.QTableWidgetItem('{}'.format(max(0.01,np.round(value-variation[4],2))))
item3 = QtWidgets.QTableWidgetItem('{}'.format(np.round(value+variation[5],2)))
#FG
elif j == 9:
item2 = QtWidgets.QTableWidgetItem('{}'.format(max(0.01,np.round(value-variation[6],2))))
item3 = QtWidgets.QTableWidgetItem('{}'.format(np.round(value+variation[7],2)))
item.setForeground(QtGui.QBrush(QtGui.QColor(QtCore.Qt.red)))
item.setTextAlignment(QtCore.Qt.AlignCenter)
item2.setTextAlignment(QtCore.Qt.AlignCenter)
item3.setTextAlignment(QtCore.Qt.AlignCenter)
self.table.setItem(i,j,item)
self.table.setItem(i,j+1,item2)
self.table.setItem(i,j+2,item3)
index+=1
self.FEED_BACK_TO_FIT_WORKER.emit(self.guess, (self.bound_low,self.bound_high))
def raise_error(self,message):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(message)
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.setEscapeButton(QtWidgets.QMessageBox.Close)
msg.exec()
def raise_attention(self,information):
info = QtWidgets.QMessageBox()
info.setIcon(QtWidgets.QMessageBox.Information)
info.setText(information)
info.setWindowTitle("Information")
info.setStandardButtons(QtWidgets.QMessageBox.Ok)
info.setEscapeButton(QtWidgets.QMessageBox.Close)
info.exec()
def progress(self,min,max,val):
self.progressBar.setVisible(True)
self.progressBar.setMinimum(min)
self.progressBar.setMaximum(max)
self.progressBar.setValue(val)
def progress_reset(self):
self.progressBar.reset()
self.progressBar.setVisible(False)
class Meta_QT_GMM(type(QtCore.QObject),type(BayesianGaussianMixture)):
pass
class My_GMM(QtCore.QObject, BayesianGaussianMixture,metaclass=Meta_QT_GMM):
UPDATE_LOG = QtCore.pyqtSignal(str)
SEND_UPDATE = QtCore.pyqtSignal(int,float,tuple)
FINISHED = QtCore.pyqtSignal()
def __init__(self,*,n_components=1, covariance_type='full', tol=1e-3,
reg_covar=1e-6, max_iter=100, n_init=1, init_params='kmeans',
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=None,
mean_precision_prior=None, mean_prior=None,mean_priors=None,
degrees_of_freedom_prior=None, covariance_prior=None,
random_state=None, warm_start=False, verbose=0,
verbose_interval=10):
super().__init__(n_components=n_components, covariance_type=covariance_type, tol=tol,
reg_covar=reg_covar, max_iter=max_iter, n_init=n_init, init_params=init_params,
weight_concentration_prior_type=weight_concentration_prior_type,
weight_concentration_prior=weight_concentration_prior,
mean_precision_prior=mean_precision_prior, mean_prior=mean_prior,
degrees_of_freedom_prior=degrees_of_freedom_prior, covariance_prior=covariance_prior,
random_state=random_state, warm_start=warm_start, verbose=verbose,
verbose_interval=verbose_interval)
self.mean_priors = mean_priors
def _initialize(self, X, resp):
"""Initialization of the mixture parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
nk, xk, sk = _estimate_gaussian_parameters(X, resp, self.reg_covar,
self.covariance_type)
self._estimate_weights(nk)
self._estimate_means(nk, xk,True)
self._estimate_precisions(nk, xk, sk,True)
def _estimate_precisions(self, nk, xk, sk,initialize=False):
"""Estimate the precisions parameters of the precision distribution.
Parameters
----------
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like
The shape depends of `covariance_type`:
'full' : (n_components, n_features, n_features)
'tied' : (n_features, n_features)
'diag' : (n_components, n_features)
'spherical' : (n_components,)
"""
{"full": self._estimate_wishart_full,
"tied": self._estimate_wishart_tied,
"diag": self._estimate_wishart_diag,
"spherical": self._estimate_wishart_spherical
}[self.covariance_type](nk, xk, sk,initialize)
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
def _estimate_means(self, nk, xk,initial=False):
"""Estimate the parameters of the Gaussian distribution.
Parameters
----------
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
"""
if self.mean_priors and initial:
self.mean_precision_ = self.mean_precision_prior_ + nk
self.means_=np.array(self.mean_priors)
else:
self.mean_precision_ = self.mean_precision_prior_ + nk
self.means_ = ((self.mean_precision_prior_ * self.mean_prior_ +
nk[:, np.newaxis] * xk) /
self.mean_precision_[:, np.newaxis])
def _estimate_wishart_full(self, nk, xk, sk,initialize=False):
"""Estimate the full Wishart distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like of shape (n_components, n_features, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` is
# the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
self.covariances_ = np.empty((self.n_components, n_features,
n_features))
if not initialize:
for k in range(self.n_components):
diff = xk[k] - self.mean_prior_
self.covariances_[k] = (self.covariance_prior_ + nk[k] * sk[k] +
nk[k] * self.mean_precision_prior_ /
self.mean_precision_[k] * np.outer(diff,
diff))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= (
self.degrees_of_freedom_[:, np.newaxis, np.newaxis])
else:
for k in range(self.n_components):
self.covariances_[k] = self.covariance_prior_
def _estimate_wishart_tied(self, nk, xk, sk,initialize=False):
"""Estimate the tied Wishart distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like of shape (n_features, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = (
self.degrees_of_freedom_prior_ + nk.sum() / self.n_components)
diff = xk - self.mean_prior_
if not initialize:
self.covariances_ = (
self.covariance_prior_ + sk * nk.sum() / self.n_components +
self.mean_precision_prior_ / self.n_components * np.dot(
(nk / self.mean_precision_) * diff.T, diff))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_
else:
self.covariances_ = self.covariance_prior_
def _estimate_wishart_diag(self, nk, xk, sk,initialize=False):
"""Estimate the diag Wishart distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like of shape (n_components, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
diff = xk - self.mean_prior_
if not initialize:
self.covariances_ = (
self.covariance_prior_ + nk[:, np.newaxis] * (
sk + (self.mean_precision_prior_ /
self.mean_precision_)[:, np.newaxis] * np.square(diff)))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis]
else:
self.covariances_ = self.covariance_prior_
def _estimate_wishart_spherical(self, nk, xk, sk,initialize=False):
"""Estimate the spherical Wishart distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like of shape (n_components,)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
diff = xk - self.mean_prior_
if not initialize:
self.covariances_ = (
self.covariance_prior_ + nk * (
sk + self.mean_precision_prior_ / self.mean_precision_ *
np.mean(np.square(diff), 1)))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_
else:
self.covariances_ = self.covariance_prior_
def load_input(self,X,y=None):
self.inputdata = X
self.label = y
def run(self):
self.abort_ = False
self.fit_predict(self.inputdata,self.label)
def stop(self):
self.abort_ = True
def fit_predict(self,X,y=None):
#X = _check_X(X, self.n_components, ensure_min_samples=2)
self._check_n_features(X, reset=True)
self._check_initial_parameters(X)
# if we enable warm_start, we will have a unique initialisation
do_init = not(self.warm_start and hasattr(self, 'converged_'))
n_init = self.n_init if do_init else 1
max_lower_bound = -np.infty
self.converged_ = False
if self.random_state is None or self.random_state is np.random:
random_state = np.random.mtrand._rand
elif isinstance(self.random_state,numbers.Integral):
random_state = np.random.RandomState(self.random_state)
elif isinstance(self.random_state,np.random.RandomState):
random_state = self.random_state
n_samples, _ = X.shape
for init in range(n_init):
if do_init:
self._initialize_parameters(X, random_state)
lower_bound = (-np.infty if do_init else self.lower_bound_)
for n_iter in range(1, self.max_iter + 1):
prev_lower_bound = lower_bound
log_prob_norm, log_resp = self._e_step(X)
self._m_step(X, log_resp)
lower_bound = self._compute_lower_bound(log_resp, log_prob_norm)
change = lower_bound - prev_lower_bound
self.SEND_UPDATE.emit(n_iter, change, self._get_parameters())
self.UPDATE_LOG.emit("[From sklearn.mixture.BayesGM] At iteration {}, change={}".format(n_iter,change))
time.sleep(0.05)
if abs(change) < self.tol:
self.converged_ = True
break
if self.abort_:
self.UPDATE_LOG.emit("[From sklearn.mixture.BayesGM] Aborted.")
break
QtCore.QCoreApplication.processEvents()
self.UPDATE_LOG.emit("[From sklearn.mixture.BayesGM] Final lower bound: {}".format(lower_bound))
if lower_bound > max_lower_bound:
max_lower_bound = lower_bound
best_params = self._get_parameters()
best_n_iter = n_iter
if not self.converged_ and not self.abort_:
self.UPDATE_LOG.emit('Initialization %d did not converge. Try different init parameters, or increase max_iter, tol or check for degenerate data.')
self._set_parameters(best_params)
self.n_iter_ = best_n_iter
self.lower_bound_ = max_lower_bound
# Always do a final e-step to guarantee that the labels returned by
# fit_predict(X) are always consistent with fit(X).predict(X)
# for any value of max_iter and tol (and any random_state).
_, log_resp = self._e_step(X)
self.FINISHED.emit()
return log_resp.argmax(axis=1)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = Window()
window.main()
sys.exit(app.exec_())
|
{"hexsha": "3cfce1e76b89bb6fb5e60b283d47fed03540167f", "size": 65944, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/gmm.py", "max_stars_repo_name": "yux1991/PyRHEED", "max_stars_repo_head_hexsha": "b39ad03651c92e3649069919ae48b1e5158cd3dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-01-08T14:32:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T21:07:10.000Z", "max_issues_repo_path": "source/gmm.py", "max_issues_repo_name": "yux1991/PyRHEED", "max_issues_repo_head_hexsha": "b39ad03651c92e3649069919ae48b1e5158cd3dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-05-14T08:56:36.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-22T16:44:30.000Z", "max_forks_repo_path": "source/gmm.py", "max_forks_repo_name": "yux1991/PyRHEED", "max_forks_repo_head_hexsha": "b39ad03651c92e3649069919ae48b1e5158cd3dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-03-12T20:03:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T14:24:46.000Z", "avg_line_length": 49.8442932729, "max_line_length": 227, "alphanum_fraction": 0.6442890938, "include": true, "reason": "import numpy,from scipy", "num_tokens": 14243}
|
#include "scoreboard.h"
#include <boost/optional/optional_io.hpp>
#include <sstream>
using namespace hangman;
scoreboard::scoreboard(std::shared_ptr<hangman::word> word,
std::shared_ptr<hangman::player> player)
: word_(word), player_(player) {
word->state_changed.connect(
boost::bind(&scoreboard::notify_stats_changed, this));
player->state_changed.connect(
boost::bind(&scoreboard::notify_stats_changed, this));
}
void scoreboard::notify_stats_changed() const { stats_changed(stats()); }
std::string scoreboard::format_lives() const {
return std::string(static_cast<size_t>(player_->lives()), 'I');
}
std::string scoreboard::format_word() const {
std::stringstream word_stream;
for (boost::optional<char> letter : word_->representation()) {
if (letter == boost::none) {
word_stream << '-';
} else {
word_stream << letter.get();
}
}
return word_stream.str();
}
std::string scoreboard::stats() const {
auto lives = format_lives();
auto word = format_word();
std::stringstream result_stream;
result_stream << "Lives: " << lives << " | Word: " << word;
return result_stream.str();
}
|
{"hexsha": "037ccf7712dd57dd083130418571ed1ccfd884ba", "size": 1174, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/scoreboard.cpp", "max_stars_repo_name": "a-ostrovsky/hangman_kata", "max_stars_repo_head_hexsha": "3eaa179148f5584b33e768d3b06d2cb24e5766e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/scoreboard.cpp", "max_issues_repo_name": "a-ostrovsky/hangman_kata", "max_issues_repo_head_hexsha": "3eaa179148f5584b33e768d3b06d2cb24e5766e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/scoreboard.cpp", "max_forks_repo_name": "a-ostrovsky/hangman_kata", "max_forks_repo_head_hexsha": "3eaa179148f5584b33e768d3b06d2cb24e5766e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6341463415, "max_line_length": 73, "alphanum_fraction": 0.6737649063, "num_tokens": 280}
|
import os
import scipy
import statsmodels.api as sm
from scipy import stats
from statsmodels.formula.api import ols
from utils.save_data import write_csv
import pandas as pd
def compare_variances(data, factor, outcome):
# Compare the variances
summary = []
for outcome in outcome:
grouped = data \
.groupby([factor], as_index=False) \
.agg(n=('trial_index', 'count'),
mean=(outcome, 'mean'),
var=(outcome, 'var'))
grouped['df'] = grouped['n'] - 1
grouped['measure'] = outcome
# Test that
F, p_value = scipy.stats.levene(
data.loc[data[factor] == 1, outcome],
data.loc[data[factor] == 2, outcome])
grouped[['F', 'p']] = [F, p_value]
print(f"""{grouped} \n""")
def anova_outcomes_factor(data, outcomes, factor, path):
summary = pd.DataFrame([],
columns=['sum_sq', 'df', 'F', 'PR(>F)', 'outcome'])
for var in outcomes:
linear_model = ols((var + ' ~ ' + factor), data=data).fit()
# Type 2 ANOVA DataFrame
outcome_table = sm.stats.anova_lm(linear_model, typ=2)
outcome_table['outcome'] = var
summary = summary.append(outcome_table)
summary = summary[['outcome', 'sum_sq', 'df', 'F', 'PR(>F)']]
summary['df'] = summary['df'].astype(int)
summary[['sum_sq', 'F']] = round(summary[['sum_sq', 'F']], 2)
summary[['PR(>F)']] = round(summary[['PR(>F)']], 3)
print(f"""Outcomes vs {factor} ANOVA: \n"""
f"""{summary} \n""")
write_csv(data=summary,
file_name='anova_outcomes_vs_' + factor + '.csv',
path=os.path.join(path))
|
{"hexsha": "35e9a9a259fe0e8a4846f8efc6395b8b7c523394", "size": 1711, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference/F.py", "max_stars_repo_name": "TimS70/WebET_Analysis", "max_stars_repo_head_hexsha": "32fc2e1b70c2dad5637ee1614a6a651bc8d458b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inference/F.py", "max_issues_repo_name": "TimS70/WebET_Analysis", "max_issues_repo_head_hexsha": "32fc2e1b70c2dad5637ee1614a6a651bc8d458b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference/F.py", "max_forks_repo_name": "TimS70/WebET_Analysis", "max_forks_repo_head_hexsha": "32fc2e1b70c2dad5637ee1614a6a651bc8d458b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5, "max_line_length": 78, "alphanum_fraction": 0.5587375804, "include": true, "reason": "import scipy,from scipy,import statsmodels,from statsmodels", "num_tokens": 455}
|
r"""
Ordination methods (:mod:`skbio.maths.stats.ordination`)
========================================================
.. currentmodule:: skbio.maths.stats.ordination
This module contains several ordination methods, including Principal
Coordinate Analysis, Correspondence Analysis, Redundancy Analysis and
Canonical Correspondence Analysis.
Classes
-------
.. autosummary::
:toctree: generated/
PCoA
CA
RDA
CCA
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from .correspondence_analysis import CA
from .redundancy_analysis import RDA
from .canonical_correspondence_analysis import CCA
from .principal_coordinate_analysis import PCoA
__all__ = ['CA', 'RDA', 'CCA', 'PCoA']
from numpy.testing import Tester
test = Tester().test
|
{"hexsha": "e5faf974e982ef9a9302a12810fb37517ca16e4b", "size": 1061, "ext": "py", "lang": "Python", "max_stars_repo_path": "skbio/maths/stats/ordination/__init__.py", "max_stars_repo_name": "Jorge-C/bipy", "max_stars_repo_head_hexsha": "1097cefafc6f9bbb9d96f25b569892a3fe3f3600", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skbio/maths/stats/ordination/__init__.py", "max_issues_repo_name": "Jorge-C/bipy", "max_issues_repo_head_hexsha": "1097cefafc6f9bbb9d96f25b569892a3fe3f3600", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skbio/maths/stats/ordination/__init__.py", "max_forks_repo_name": "Jorge-C/bipy", "max_forks_repo_head_hexsha": "1097cefafc6f9bbb9d96f25b569892a3fe3f3600", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-09-21T01:58:43.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-21T01:58:43.000Z", "avg_line_length": 26.525, "max_line_length": 78, "alphanum_fraction": 0.6088595664, "include": true, "reason": "from numpy", "num_tokens": 215}
|
This notebook supplements the manuscript "Data-driven modeling reveals a universal dynamic underlying the COVID-19 pandemic under social distancing" by Robert Marsland III and Pankaj Mehta.
In this work, we show that the cumulative fatalities $N(t)$ for every region with more than 500 deaths as of April 15, 2020 is well-described by the function:
\begin{align}
N(t) = N_{\rm max}\Phi\left(\frac{t-t_h}{\sigma}\right),
\end{align}
where
\begin{align}
\Phi(t) &= {1 \over \sqrt{2\pi}} \int_{-\infty}^t e^{-x^2/2}\,dx
\end{align}
is the cumulative normal distribution. The same functional form provides a good fit to the cumulative number of confirmed cases for most countries with sufficient data. This model has two key parameters, the final total number of fatalities (or cases) $N_{\rm max}$, and the infection timescale $\sigma$. The third parameter $t_h$, sets the overall location of the infection curve on the time axis.
The first cell of this notebook loads the current case and fatality data from the [Johns Hopkins github repository](https://github.com/CSSEGISandData/COVID-19) along with the best-fit values and confidence bounds for the three fitting parameters using data through April 15, 2020, and a set of functions for fitting, analyzing and simulating the data. Example code is provided for plotting the predictions, making new predictions and running simulations.
We emphasize that the confidence intervals shown here are <em>lower bounds</em> on the true amount of uncertainty. These intervals are calculated under the assumption that the data is accurately described by the fitting function given above, with no changes in parameter values, and with all deviations from this function due to random multiplicative noise. The confidence intervals <em>do not</em> account for possible future changes in social policy, or for systematic deviations from the fitting function that may arise at late times.
# Plot predictions
To plot the live data with the predictions made on April 15, replace the arguments of the "plot_region" function with the country and province/state of interest. Country-level data is accessed by using 'NaN' for the second argument. Dashed line marks April 15, the date the predictions were made.
```python
from covid_plotting import *
plot_region('Sweden','NaN',forecast_days=30)
```
Changing the horizontal axis to a log scale allows you to see the power law behavior at early times:
```python
plot_region('Sweden','NaN',forecast_days=30,log_scale=True)
```
To access the parameters for a given region, replace the two strings in the cells below with the country and state/province of interest.
For fatalities, use:
```python
predictions_deaths_apr15.loc['Sweden','NaN']
```
Nmax 2007.23
Nmax_low 1499.11
Nmax_high 3433.77
sigma 9.88884
sigma_low 8.60252
sigma_high 11.9405
th 2020-04-12 00:00:00
th_low 2020-04-09 00:00:00
th_high 2020-04-18 00:00:00
Name: (Sweden, NaN), dtype: object
For cases, use:
```python
predictions_cases_apr15.loc['Sweden','NaN']
```
Nmax 49328.6
Nmax_low 33886.3
Nmax_high 85215.3
sigma 21.8862
sigma_low 19.7903
sigma_high 24.6654
th 2020-04-29 00:00:00
th_low 2020-04-21 00:00:00
th_high 2020-05-10 00:00:00
Name: (Sweden, NaN), dtype: object
The complete spreadsheets can also be downloaded from the "output" folder of [the github repository](https://github.com/Emergent-Behaviors-in-Biology/covid19).
# Make new predictions
The cell below allows you to fit the full dataset including the latest data, for all countries and regions. It generates Pandas dataframes called `params_cases` and `params_deaths`, which contain the best-fit parameter values along with the RMS deviation of the data from the fitting function in log space ("score"). These parameters can be used as the starting point for generating confidence intervals below. The parameter `p0` sets a threshold for discarding data from the beginning of the pandemic in each region, in order to focus on the later, more universal part of the curve. All timepoints with fewer cumulative cases/fatalities than `p0` are discarded for the purposes of fitting.
```python
from covid_plotting import *
params_deaths = fit_all(deaths.iloc[:-1],plot=False,p0=50)
params_cases = fit_all(cases.iloc[:-1],plot=False,p0=5e2)
```
We can use the best-fit parameters to perform an updated data collapse:
```python
#params_deaths.loc['China','Hubei'] = params_china #Fitting Hubei fatality data only up through Feb 25 gives better collapse
plot_collapse(params_deaths,params_cases)
```
We can now look at the updated predictions for regions of interest, computing the 95 percent confidence interval just for the region plotted, following the method described in the manuscript. As noted above, the calculation <em>underestimates</em> the true amount of uncertainty, since it assumes that the data is accurately described by the fitting function given above, with no changes in parameter values, and with all deviations from this function due to random multiplicative noise.
```python
country = 'US'
province = 'Massachusetts'
predictions_deaths = predict_all(deaths,params_deaths.loc[[(country,province)]],p0=50)
predictions_cases = predict_all(cases,params_cases.loc[[(country,province)]],p0=500)
plot_region(country,province,forecast_days=30,new_predictions=(predictions_deaths,predictions_cases))
```
The next cell computes 95 percent confidence intervals for all countries and regions. The best-fit parameters and upper/lower bounds on the confidence interval are contained in the Pandas dataframes `predictions_deaths` and `predictions_cases`. Set `verbose` to `False` if you do not want each country's name to be printed as its predictions are generated. <em>Note that this cell takes about 10 minutes to run.</em>
```python
predictions_deaths = predict_all(deaths,params_deaths,p0=50,verbose=True)
predictions_cases = predict_all(cases,params_cases,p0=500,verbose=True)
```
Sweden, NaN
Sweden, NaN
# Run simulation
The following cell simulates disease spreading on a NetworkX graph `G` of your choice. When a node is infected, waiting times $T_g$ are randomly drawn for all neighboring nodes, and each neighbor is infected a time $T_g$ after the original node, if it is not already infected. The default distribution for drawing $T_g$ is a Gamma distribution, which is guaranteed to yield positive values. We parameterize the distribution by the mean $\mu_G$ and standard deviation $\sigma_G$. The Gamma distribution reduces to the exponential distribution (i.e., a Markovian spreading model) when these parameters are chosen such that $\mu_G = \sigma_G$.
```python
from covid_plotting import *
#Choose graph parameters
N=1000 #Number of nodes
#gamma = 4 #For scale-free graphs
#k = 1.5 #Mean degree for Erdos-Renyi
R0 = 1.5 #for fully connected graphs, we set the probability of infection to p = R0/N
p = R0/N
#Make the graph
#G = nx.expected_degree_graph(nx.utils.powerlaw_sequence(N, gamma), selfloops=False)
G = nx.gnp_random_graph(N, p)
#G = nx.complete_graph(N)
#Set up waiting time parameters
muG = 4 #Mean waiting time
sigG = 4 #standard deviation of waiting time
#Simulate
t,cum_cases = simulate_pandemic_edges(G,muG,sigG,tmax=80)
#Format like real data, and use same fitting script
t_dt = pd.to_datetime([datetime.today()-timedelta(days=int(np.max(t))-k) for k in range(int(np.max(t))+1)])
sim_data = pd.Series(cum_cases,index=t_dt)
th,logNmax,sigma,score = fit_erf(sim_data,p0=50)[0]
#Process output and plot
tau = ((t_dt-tref)/timedelta(days=1) - th)/sigma
Nmax = np.exp(logNmax)
fig,ax=plt.subplots()
ax.semilogy(t_dt,sim_data.values,'o-',label='Simulation data',ms=4)
ax.semilogy(t_dt,Nmax*norm.cdf(tau),label='Fit')
ax.xaxis.set_minor_locator(mdates.DayLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.set_xlabel('Date (mm-dd)')
ax.set_ylim((4,N))
ax.set_ylabel('Cumulative Cases')
fig.autofmt_xdate()
ax.legend()
plt.show()
#Also plot on log-log axes
fig,ax=plt.subplots()
ax.loglog(t,sim_data.values,'o-',label='Simulation data',ms=4)
ax.loglog(t,Nmax*norm.cdf(tau),label='Fit')
ax.set_xlabel('Elapsed Time')
ax.set_ylim((4,N))
ax.set_ylabel('Cumulative Cases')
ax.legend()
plt.show()
```
```python
#Also plot on log-log axes
fig,ax=plt.subplots()
ax.loglog(t+15,sim_data.values,'o-',label='Simulation data',ms=4)
ax.loglog(t+15,Nmax*norm.cdf(tau),label='Fit')
ax.set_xlabel('Elapsed Time')
ax.set_ylim((4,N))
ax.set_ylabel('Cumulative Cases')
ax.legend()
plt.show()
```
# Check confidence interval
```python
country = 'US'
region = 'Florida'
p0 = 50
data = deaths[country,region]
#Fit the data
params,parm0,s = fit_erf(data,p0=p0)
train = pd.to_numeric(data.loc[data>p0])
train.index=(train.index-tref)/timedelta(days=1)
sig_xi2 = params[-1]
fig,ax=plt.subplots(figsize=(4,3))
fig.subplots_adjust(bottom=0.2,left=0.2)
c = 0.95
M = len(train)
params_sweep = sweep_sigma(params,data,p0)
sigma, prob,scoremax = get_score_thresh(params_sweep,M,0.95)
ax.plot(sigma,prob,'k')
score = params_sweep[1:,3]
ax.fill_between(sigma[score<=scoremax],prob[score<=scoremax])
sigma = params_sweep[:,2]
dsig = np.diff(sigma)
sigma = sigma[1:]
pcut = np.exp(-scoremax*M/(2*sig_xi2))/(np.exp(-score*M/(2*sig_xi2))*dsig).sum()
ax.plot([sigma[0],sigma[-1]],[pcut,pcut],'k--')
ax.set_xlabel(r'$\sigma$')
ax.set_ylabel(r'$p(\sigma|N_i)$')
ax.set_title(country+', '+region)
plt.show()
```
```python
```
|
{"hexsha": "e036582f22da9ac7d7dca284f2745facc27d1fbc", "size": 643273, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "COVID-19 predictor.ipynb", "max_stars_repo_name": "Emergent-Behaviors-in-Biology/covid19", "max_stars_repo_head_hexsha": "ad2cd64f92ccd2d36db7cb5f78f0593719122fe5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-23T14:04:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T14:04:05.000Z", "max_issues_repo_path": "COVID-19 predictor.ipynb", "max_issues_repo_name": "Emergent-Behaviors-in-Biology/covid19", "max_issues_repo_head_hexsha": "ad2cd64f92ccd2d36db7cb5f78f0593719122fe5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "COVID-19 predictor.ipynb", "max_forks_repo_name": "Emergent-Behaviors-in-Biology/covid19", "max_forks_repo_head_hexsha": "ad2cd64f92ccd2d36db7cb5f78f0593719122fe5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-08T12:04:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-08T12:04:41.000Z", "avg_line_length": 1011.4355345912, "max_line_length": 103148, "alphanum_fraction": 0.9428889445, "converted": true, "num_tokens": 2514}
|
import pytest
import numpy as np
from pyrho.core.pgrid import PGrid
A, B = 1, 2
NX, NY = 3, 2
@pytest.fixture
def pgrid_example():
def f(x, y):
return np.sin(NX * x * 2 * np.pi) + np.cos(NY * y * 2 * np.pi)
xx = np.linspace(0, A, 20, endpoint=False)
yy = np.linspace(0, B, 40, endpoint=False)
X, Y = np.meshgrid(xx, yy, indexing="ij")
Z = f(X, Y)
return PGrid(Z, [[A, 0], [0, B]])
|
{"hexsha": "c007a6936087b860a01e8b3867b816c413fe9ad9", "size": 417, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pyrho/core/tests/test_pgrid.py", "max_stars_repo_name": "mattmcdermott/pyrho", "max_stars_repo_head_hexsha": "7ab3bd893a8b310b8be61f33a1105b090a46cd32", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2021-02-26T21:12:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T01:34:54.000Z", "max_issues_repo_path": "src/pyrho/core/tests/test_pgrid.py", "max_issues_repo_name": "mattmcdermott/pyrho", "max_issues_repo_head_hexsha": "7ab3bd893a8b310b8be61f33a1105b090a46cd32", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 61, "max_issues_repo_issues_event_min_datetime": "2021-02-27T00:55:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-20T17:18:07.000Z", "max_forks_repo_path": "src/pyrho/core/tests/test_pgrid.py", "max_forks_repo_name": "mattmcdermott/pyrho", "max_forks_repo_head_hexsha": "7ab3bd893a8b310b8be61f33a1105b090a46cd32", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-15T18:38:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T00:06:56.000Z", "avg_line_length": 21.9473684211, "max_line_length": 70, "alphanum_fraction": 0.5683453237, "include": true, "reason": "import numpy", "num_tokens": 155}
|
------------------------------------------------------------------------
-- Safe modules that use --erased-cubical and --guardedness
------------------------------------------------------------------------
{-# OPTIONS --safe --erased-cubical --guardedness #-}
module README.Safe.Cubical.Erased.Guardedness where
-- M-types for indexed containers, defined coinductively (in Cubical
-- Agda).
import Container.Indexed.Variant.M.Codata
import Container.Indexed.M.Codata
|
{"hexsha": "37a3a3842669512c07c0cb86108057bf1223e940", "size": 471, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "README/Safe/Cubical/Erased/Guardedness.agda", "max_stars_repo_name": "nad/equality", "max_stars_repo_head_hexsha": "402b20615cfe9ca944662380d7b2d69b0f175200", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-05-21T22:58:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-02T17:18:15.000Z", "max_issues_repo_path": "README/Safe/Cubical/Erased/Guardedness.agda", "max_issues_repo_name": "nad/equality", "max_issues_repo_head_hexsha": "402b20615cfe9ca944662380d7b2d69b0f175200", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "README/Safe/Cubical/Erased/Guardedness.agda", "max_forks_repo_name": "nad/equality", "max_forks_repo_head_hexsha": "402b20615cfe9ca944662380d7b2d69b0f175200", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6428571429, "max_line_length": 72, "alphanum_fraction": 0.5286624204, "num_tokens": 88}
|
\section{Constraint Satisfaction Problems}
Outline:
\begin{itemize}
\item A special subset of search problems
\item State is defined by variables Xi with values from a domain D (sometimes D depends on i)
\item Goal test is a set of constraints specifying allowable combinations of values for subsets of variables
\end{itemize}
\paragraph{Use cases}
\begin{itemize}
\item Assignment
\item Timetabling
\item Hardware configuration
\item Transportation scheduling
\item Factory scheduling
\item Circuit layout
\item Fault diagnosis
\end{itemize}
\subsection{Solving constraint satisfaction problems}
\paragraph{Backtracking}
\begin{itemize}
\item Fix an ordering for variables, and select values for variables in this order. Because assignments are commutative (e.g. assigning WA = Red, NT = Green is identical to NT = Green, WA = Red), this is valid.
\item When selecting values for a variable, only select values that don’t conflict with any previously assigned values. If no such values exist, backtrack and return to the previous variable, changing its value.
\end{itemize}
\subsection{Filtering}
An arc $X \rightarrow Y$ if consistent if and only if for every value $x$ in X there is some $y$ in $Y$ which could be assigned without violating a constraint.
\subsubsection{Arc consistency}
\begin{itemize}
\item Begin by storing all arcs in the constraint graph for the CSP in a queue Q. For a binary constraint $(X,Y)$, there are two arcs to add to the queue - $X \rightarrow Y$ and $Y \rightarrow X$
\item Check the arcs in the queue for consistency:
\begin{itemize}
\item If one arc $X \rightarrow Y$ is not consistent for a given value $x$, remove $x$ from the domain of $X$
\item If at least one value is removed for a variable $X_i$, add arcs of the form $X_k \rightarrow X_i$ to the queue, for all unassigned variables $X_k$ (skip duplicate arcs already in the queue.
\item Continue until Q is empty, or the domain of some variable is empty and triggers a backtrack.
\end{itemize}
\end{itemize}
\subsubsection{Forward checking}: whenever a value is assigned to a variable $X$, prune the domains of unassigned variables that share a constraint with $X$ that would violate the constraint
if assigned.
Forward checking is a special type of enforcing arc consistency, in which we only enforce the arcs pointing into the newly assigned variable.
\subsection{Ordering}
\paragraph{Minimum remaining values (MRV)} Choose the variable with the fewest legal options left in its domain
\paragraph{Iterative improvement}
\begin{itemize}
\item Take an assignment with unsatisfied constraints
\item While a goal has not been reached:
\begin{itemize}
\item Reassign one variable to another value. With min-conflict heuristic: choose a value that violates the fewest constraints
\end{itemize}
\end{itemize}
\subsection{K-Consistency}
\textbf{K-Consistency}: For each $k$ nodes, any consistent assignment to $k-1$ ca be extended to the $k^{th}$ node.
\textbf{Strong k-consistency}: also $k-1$, $k-2$, $\cdots$ $1$ consistent
strong n-consistency means we can solve without backtracking!
\subsection{Tree-Structured CSPs}
Theorem: if the constraint graph has no loops, the CSP can be solved in $O(n d^2)$ time, with $n$ the number of nodes and $d$.
\begin{itemize}
\item Pick an arbitrary node in the constraint graph for the CSP to serve as the root of the tree (it doesn’t matter which one because basic graph theory tells us any node of a tree can serve as a root).
\item Convert all undirected edges in the tree to directed edges that point away from the root. Then linearize (or topologically sort) the resulting directed acyclic graph
\item Perform a backwards pass of arc consistency. Iterating from i = n down to i = 2, enforce arc consistency for all arcs $Parent(Xi) \rightarrow Xi$
\item For the linearized CSP from above, this domain pruning will eliminate a few values, leaving us with the following
\item Perform a forward assignment. Starting from X1 and going to Xn, assign each Xi a value consistent with that of its parent. Because we’ve enforced arc consistency on all of these arcs, no matter what value we select for any node, we know that its children will each all have at least one consistent value. Hence, this iterative assignment guarantees a correct solution, a fact which can be proven inductively without difficulty.
\end{itemize}
The tree structured algorithm can be extended to CSPs that are reasonably close to being tree-structured
with cutset conditioning. Cutset conditioning involves first finding the smallest subset of variables in a constraint graph such that their removal results in a tree (such a subset is known as a cutset for the graph).
|
{"hexsha": "6703f9bdcd8b5c83e5708e7e051aaea3e3fa2f23", "size": 4829, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "notes/csp.tex", "max_stars_repo_name": "Calcifer777/columbia-ai", "max_stars_repo_head_hexsha": "aaa7173bca6f2bc9edfe6fe55b5a1a37ab310066", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notes/csp.tex", "max_issues_repo_name": "Calcifer777/columbia-ai", "max_issues_repo_head_hexsha": "aaa7173bca6f2bc9edfe6fe55b5a1a37ab310066", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notes/csp.tex", "max_forks_repo_name": "Calcifer777/columbia-ai", "max_forks_repo_head_hexsha": "aaa7173bca6f2bc9edfe6fe55b5a1a37ab310066", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.8117647059, "max_line_length": 437, "alphanum_fraction": 0.7601987989, "num_tokens": 1130}
|
"""
Module with utility functions for BLTandPantelides.
* Author: Hilding Elmqvist, Mogram AB
* Date: July-August 2016
* License: MIT
"""
module BLTandPantelidesUtilities
#using ..BLTandPantelides
using ..ModiaLogging
export buildExtendedSystem, addDependencies, buildFullIncidence
export invertDer, invertAssign
export createNames, printList, printAssignedEquations, printSortedEquations, printUnassigned, makeList
"""
function buildExtendedSystem(A)
Extend a system according to Pantelides equation (15), i.e. return the incidence for function h(x, der(x)).
* `A`: A[j] = if V[k] = der(V[j]) then k else 0
* `return G`: bipartite graph
Example:
julia> BLTandPantelidesUtilities.buildExtendedSystem([5,6,7,8,0,0,0,0,0])
4-element Array{Any,1}:
[1,5]
[2,6]
[3,7]
[4,8]
"""
function buildExtendedSystem(A)
G = []
for i in 1:length(A)
a = A[i]
if a > 0
push!(G, [i, a]) # h(x, der(x))
end
end
return G
end
function addDependencies(G, Vindices)
newG = []
for g in G
push!(newG, [g; Vindices])
end
return newG
end
"""
buildFullIncidence(n,m)
Build a bipartite graph with full incidence, i.e. all of the n E-nodes refer to all of the m V-nodes.
* `n`: number of E-nodes
* `m`: number of V-nodes
* `return G`: bipartite graph
Example:
julia> BLTandPantelidesUtilities.buildFullIncidence(2,3)
2-element Array{Any,1}:
[1,2,3]
[1,2,3]
"""
function buildFullIncidence(n,m)
G = []
for i in 1:n
push!(G, [j for j in 1:m])
end
return G
end
"""
function invertDer(A)
Invert derivative relationships for variables and equations
* `A`: A[j] = if V[k] = der(V[j]) then k else 0 (or correspondingly for E-nodes)
* `return orgIndex`: index of original variable or equation
* `return derOrder`: derivative order
Note that invertDer can be used to invert from list of E-nodes to list of V-nodes as well.
Example:
julia> BLTandPantelidesUtilities.invertDer([5,6,7,8,10,11,0,0,0,0,0])
([1,2,3,4,1,2,3,4,9,1,2],[0,0,0,0,1,1,1,1,0,2,2])
"""
function invertDer(A)
orgIndex = [i for i in 1:length(A)] # Index of original variable or equation
derOrder = fill(0, length(A)) # Derivative order
for i in 1:length(A)
a = A[i]
if a > 0
derOrder[a] = derOrder[i] + 1
orgIndex[a] = orgIndex[i]
end
end
return orgIndex, derOrder
end
"""
invertAssign(assign, n=length(assign))
Invert assignment relationships for variables and equations.
* `assign`: assign[j] contains the E-node to which V-node j is assigned or 0 if V-node j not assigned
* `n`: number of E-nodes
* `return invAssign`: invAssign[i] contains the V-node to which E-node i is assigned or 0 if E-node i not assigned
* `return unAssigned`: unassigned V-nodes
Note that invertAssign can be used to invert from list of E-nodes to list of V-nodes as well.
Example:
julia> inv=BLTandPantelidesUtilities.invertAssign([0,0,0,0,1,2,7,4,3,9,8])
([5,6,9,8,0,0,7,11,10,0,0],[1,2,3,4])
julia> BLTandPantelides.invertAssign(inv[1])
([0,0,0,0,1,2,7,4,3,9,8],[5,6,10,11])
"""
function invertAssign(assign, n=length(assign))
invAssign = fill(0, n)
unAssigned::Vector{Int} = []
for j in 1:length(assign)
i = assign[j]
if i > 0
invAssign[i] = j
else
push!(unAssigned, j)
end
end
return invAssign, unAssigned
end
"""
function createNames(infixes, A)
Creates names.
* `infixes`: infix strings for original variable
* `A`: A[j] = if V[k] = der(V[j]) then k else 0
Example:
julia> BLTandPantelidesUtilities.createNames(["x", "y", "w", "z", "", "", "", "", "T"], [5,6,7,8,10,11,0,0,0,0,0])
x, y, w, z, der(x), der(y), der(w), der(z), T, der2(x), der2(y)
"""
function createNames(infixes, A)
names = []
(orgIndex, derOrder) = invertDer(A)
for j in 1:length(A)
if derOrder[j] > 0
d = "der"
if derOrder[j] > 1
d = d*string(derOrder[j])
end
d = "$d($(infixes[orgIndex[j]]))"
else
d = infixes[orgIndex[j]]
end
push!(names, d)
end
names
end
"""
function printList(infixes, indices, A, vertical=false)
Print list of variables or equations.
* `infixes`: infix strings for original variable or equation
* `indices`: indices for the variables or equations to be printed
* `A`: A[j] = if V[k] = der(V[j]) then k else 0 (or correspondingly for E-nodes)
* `vertical`: if vertical then new line separation else comma separation
Example:
julia> BLTandPantelidesUtilities.printList(["x", "y", "w", "z", "", "", "", "", "T"], 1:11, [5,6,7,8,10,11,0,0,0,0,0])
x, y, w, z, der(x), der(y), der(w), der(z), T, der2(x), der2(y)
"""
function printList(infixes, indices, A, vertical=false)
(orgIndex, derOrder) = invertDer(A)
for ind in 1:length(indices)
j = indices[ind]
if j > 0
if ind > 1
if vertical
loglnModia()
else
logModia(", ")
end
end
if derOrder[j] > 0
if vertical
logModia("DER")
else
logModia("der")
end
if derOrder[j] > 1
logModia(derOrder[j])
end
logModia("(")
end
logModia(infixes[orgIndex[j]])
if derOrder[j] > 0
logModia(")")
end
end
end
loglnModia()
end
function makeList(infixes, indices, A, vertical=false)
l = String[]
(orgIndex, derOrder) = invertDer(A)
for ind in 1:length(indices)
s = ""
j = indices[ind]
if j > 0
if derOrder[j] > 0
if vertical
s = "DER"
else
s = "der"
end
if derOrder[j] > 1
s *= string(derOrder[j])
end
s *= "_" # "("
# s *= "this."
end
s *= string(infixes[orgIndex[j]])
if derOrder[j] > 0
# s *= ")"
end
push!(l, s)
end
end
return l
end
"""
printAssignedEquations(equations, variables, indices, assign, A, B)
Print assigned equations.
* `equations`: infix string for original equations
* `variables`: infix string for original variables
* `indices`: indices for the equations to be printed
* `assign`: assign[j] contains the E-node to which V-node j is assigned or 0 if V-node j not assigned
* `A`: A[j] = if V[k] = der(V[j]) then k else 0
* `B`: B[i] = if E[l] = der(E[l]) then l else 0
Example:
See testBLTandPantelides.testPendulum
"""
function printAssignedEquations(equations, variables, indices, assign, A, B)
(orgIndexVar, derOrderVar) = invertDer(A)
(orgIndexEqu, derOrderEqu) = invertDer(B)
(assignedVar, unAssigned) = invertAssign(assign)
if unAssigned != []
# @show assignedVar unAssigned
end
for i in indices
logModia(lpad(string(i)*":", 5, " "))
j = assignedVar[i]
if j > 0
if derOrderVar[j] == 1
prefix = "der("
suffix = ")"
elseif derOrderVar[j] > 1
prefix = "der"*string(derOrderVar[j])*"("
suffix = ")"
else
prefix = ""
suffix = ""
end
logModia(lpad(prefix*string(variables[orgIndexVar[j]])*suffix, 25, " "))
else
logModia(" "^25)
end
logModia(": ")
if derOrderEqu[i] == 1
prefix = "DER( "
suffix = " )"
elseif derOrderEqu[i] > 1
prefix = "DER"*string(derOrderEqu[i])*"( "
suffix = " )"
else
prefix = ""
suffix = ""
end
loglnModia(prefix*string(equations[orgIndexEqu[i]])*suffix)
end
end
"""
printSortedEquations(equations, variables, components, assign, A, B)
Print sorted equations.
* `equations`: infix string for original equations
* `variables`: infix string for original variables
* `components`: cell array of components. Each component is a list of indices to E-nodes
* `assign`: assign[j] contains the E-node to which V-node j is assigned or 0 if V-node j not assigned
* `A`: A[j] = if V[k] = der(V[j]) then k else 0
* `B`: B[i] = if E[l] = der(E[l]) then l else 0
Example:
See testBLTandPantelides.testPendulum
"""
function printSortedEquations(equations, variables, components, assign, A, B)
loglnModia("[assigned variable]: [differentiation] equation")
loglnModia("Strongly connected components are enclosed in []")
for c in components
if length(c) > 1
loglnModia("[")
end
printAssignedEquations(equations, variables, c, assign, A, B)
if length(c) > 1
loglnModia("]")
end
end
end
"""
printUnassigned(equations, variables, components, assign, A, B)
Print unassigned variables and equations.
* `equations`: infix string for original equations
* `variables`: infix string for original variables
* `assign`: assign[j] contains the E-node to which V-node j is assigned or 0 if V-node j not assigned
* `A`: A[j] = if V[k] = der(V[j]) then k else 0
* `B`: B[i] = if E[l] = der(E[l]) then l else 0
Example:
See testBLTandPantelides.testPendulum
"""
function printUnassigned(equations, variables, assign, A, B, vActive=[])
(invAssign, unAssignedVariables) = invertAssign(assign, length(B))
(ass, unAssignedEquations) = invertAssign(invAssign, length(assign))
if false # BLTandPantelides.log
@show vActive
@show invAssign unAssignedVariables
@show ass unAssignedEquations
end
if vActive != []
# Don't print not active variables
unass = []
for v in unAssignedVariables
if vActive[v]
push!(unass, v)
end
end
unAssignedVariables = unass
end
loglnModia("\nUnassigned variables:")
printList(variables, unAssignedVariables, A)
loglnModia("\nUnassigned equations:")
printList(equations, unAssignedEquations, B, true)
end
end
|
{"hexsha": "38090903fe0c1fcf31005673ab2466c1ed3501c4", "size": 9301, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/symbolic/BLTandPantelides/BLTandPantelidesUtilities.jl", "max_stars_repo_name": "traversaro/Modia.jl", "max_stars_repo_head_hexsha": "581e6062983020ad6cd6cb366b0ea838fc5f03c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/symbolic/BLTandPantelides/BLTandPantelidesUtilities.jl", "max_issues_repo_name": "traversaro/Modia.jl", "max_issues_repo_head_hexsha": "581e6062983020ad6cd6cb366b0ea838fc5f03c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/symbolic/BLTandPantelides/BLTandPantelidesUtilities.jl", "max_forks_repo_name": "traversaro/Modia.jl", "max_forks_repo_head_hexsha": "581e6062983020ad6cd6cb366b0ea838fc5f03c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7367021277, "max_line_length": 118, "alphanum_fraction": 0.6488549618, "num_tokens": 3025}
|
function test_ode_test ( )
%*****************************************************************************80
%
%% TEST_ODE_TEST tests the TEST_ODE library.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 23 February 2013
%
% Author:
%
% John Burkardt
%
timestamp ( );
fprintf ( 1, '\n' );
fprintf ( 1, 'TEST_ODE_TEST\n' );
fprintf ( 1, ' MATLAB version\n' );
fprintf ( 1, ' Test the TEST_ODE library.\n' );
test_ode_test01 ( );
test_ode_test02 ( );
test_ode_test03 ( );
test_ode_test04 ( );
test_ode_test05 ( );
%
% Terminate.
%
fprintf ( 1, '\n' );
fprintf ( 1, 'TEST_ODE_TEST\n' );
fprintf ( 1, ' Normal end of execution.\n' );
fprintf ( 1, '\n' );
timestamp ( );
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/test_ode/test_ode_test.m"}
|
[STATEMENT]
lemma cube:
shows "(v \\ t) \\ (u \\ t) = (v \\ u) \\ (t \\ u)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (v \ t) \ (u \ t) = (v \ u) \ (t \ u)
[PROOF STEP]
using cube_ax
[PROOF STATE]
proof (prove)
using this:
(?v \ ?t) \ (?u \ ?t) \<noteq> null \<Longrightarrow> (?v \ ?t) \ (?u \ ?t) = (?v \ ?u) \ (?t \ ?u)
goal (1 subgoal):
1. (v \ t) \ (u \ t) = (v \ u) \ (t \ u)
[PROOF STEP]
by metis
|
{"llama_tokens": 207, "file": "ResiduatedTransitionSystem_ResiduatedTransitionSystem", "length": 2}
|
/*
* This file is part of Poedit (https://poedit.net)
*
* Copyright (C) 2013-2020 Vaclav Slavik
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#include "transmem.h"
#include "catalog.h"
#include "errors.h"
#include "str_helpers.h"
#include "utility.h"
#include <wx/stdpaths.h>
#include <wx/utils.h>
#include <wx/dir.h>
#include <wx/filename.h>
#include <wx/translation.h>
#include <time.h>
#include <mutex>
#include <boost/uuid/uuid.hpp>
#include <boost/uuid/uuid_io.hpp>
#include <boost/uuid/name_generator.hpp>
#include <boost/uuid/string_generator.hpp>
#include <Lucene.h>
#include <LuceneException.h>
#include <MMapDirectory.h>
#include <SerialMergeScheduler.h>
#include <SimpleFSDirectory.h>
#include <StandardAnalyzer.h>
#include <IndexWriter.h>
#include <IndexSearcher.h>
#include <IndexReader.h>
#include <Document.h>
#include <Field.h>
#include <DateField.h>
#include <PrefixQuery.h>
#include <StringUtils.h>
#include <TermQuery.h>
#include <BooleanQuery.h>
#include <PhraseQuery.h>
#include <Term.h>
#include <ScoreDoc.h>
#include <TopDocs.h>
#include <StringReader.h>
#include <TokenStream.h>
#include <TermAttribute.h>
#include <PositionIncrementAttribute.h>
using namespace Lucene;
namespace
{
#define CATCH_AND_RETHROW_EXCEPTION \
catch (LuceneException& e) \
{ \
switch (e.getType()) \
{ \
case LuceneException::CorruptIndex: \
case LuceneException::FileNotFound: \
case LuceneException::NoSuchDirectory: \
throw Exception(wxString::Format(_("Translation memory database is corrupted: %s (%d)."), \
e.getError(), (int)e.getType())); \
default: \
throw Exception(wxString::Format(_("Translation memory error: %s (%d)."), \
e.getError(), (int)e.getType())); \
} \
} \
catch (std::exception& e) \
{ \
throw Exception(e.what()); \
}
// Manages IndexReader and Searcher instances in multi-threaded environment.
// Curiously, Lucene uses shared_ptr-based refcounting *and* explicit one as
// well, with a crucial part not well protected.
//
// See https://issues.apache.org/jira/browse/LUCENE-3567 for the exact issue
// encountered by Poedit's use as well. For an explanation of the manager
// class, see
// http://blog.mikemccandless.com/2011/09/lucenes-searchermanager-simplifies.html
// http://blog.mikemccandless.com/2011/11/near-real-time-readers-with-lucenes.html
class SearcherManager
{
public:
SearcherManager(IndexWriterPtr writer)
{
m_reader = writer->getReader();
m_searcher = newLucene<IndexSearcher>(m_reader);
}
~SearcherManager()
{
m_searcher.reset();
m_reader->decRef();
}
// Safe, properly ref-counting (in Lucene way, not just shared_ptr) holder.
template<typename T>
class SafeRef
{
public:
typedef boost::shared_ptr<T> TPtr;
SafeRef(SafeRef&& other) : m_mng(other.m_mng) { std::swap(m_ptr, other.m_ptr); }
~SafeRef() { if (m_ptr) m_mng.DecRef(m_ptr); }
TPtr ptr() { return m_ptr; }
T* operator->() const { return m_ptr.get(); }
SafeRef(const SafeRef&) = delete;
SafeRef& operator=(const SafeRef&) = delete;
private:
friend class SearcherManager;
explicit SafeRef(SearcherManager& m, TPtr ptr) : m_mng(m), m_ptr(ptr) {}
SearcherManager& m_mng;
boost::shared_ptr<T> m_ptr;
};
SafeRef<IndexReader> Reader()
{
std::lock_guard<std::mutex> guard(m_mutex);
ReloadReaderIfNeeded();
m_reader->incRef();
return SafeRef<IndexReader>(*this, m_reader);
}
SafeRef<IndexSearcher> Searcher()
{
std::lock_guard<std::mutex> guard(m_mutex);
ReloadReaderIfNeeded();
m_searcher->getIndexReader()->incRef();
return SafeRef<IndexSearcher>(*this, m_searcher);
}
private:
void ReloadReaderIfNeeded()
{
// contract: m_mutex is locked when this function is called
if (m_reader->isCurrent())
return; // nothing to do
auto newReader = m_reader->reopen();
auto newSearcher = newLucene<IndexSearcher>(newReader);
m_reader->decRef();
m_reader = newReader;
m_searcher = newSearcher;
}
void DecRef(IndexReaderPtr& r)
{
std::lock_guard<std::mutex> guard(m_mutex);
r->decRef();
}
void DecRef(IndexSearcherPtr& s)
{
std::lock_guard<std::mutex> guard(m_mutex);
s->getIndexReader()->decRef();
}
IndexReaderPtr m_reader;
IndexSearcherPtr m_searcher;
std::mutex m_mutex;
};
} // anonymous namespace
// ----------------------------------------------------------------
// TranslationMemoryImpl
// ----------------------------------------------------------------
class TranslationMemoryImpl
{
public:
#ifdef __WXMSW__
typedef SimpleFSDirectory DirectoryType;
#else
typedef MMapDirectory DirectoryType;
#endif
TranslationMemoryImpl() { Init(); }
~TranslationMemoryImpl()
{
m_mng.reset();
m_writer->close();
}
SuggestionsList Search(const Language& srclang, const Language& lang,
const std::wstring& source);
void ExportData(TranslationMemory::IOInterface& destination);
void ImportData(std::function<void(TranslationMemory::IOInterface&)> source);
std::shared_ptr<TranslationMemory::Writer> GetWriter() { return m_writerAPI; }
void GetStats(long& numDocs, long& fileSize);
static std::wstring GetDatabaseDir();
private:
void Init();
private:
AnalyzerPtr m_analyzer;
IndexWriterPtr m_writer;
std::shared_ptr<SearcherManager> m_mng;
std::shared_ptr<TranslationMemory::Writer> m_writerAPI;
};
std::wstring TranslationMemoryImpl::GetDatabaseDir()
{
wxString data;
#if defined(__UNIX__) && !defined(__WXOSX__)
if ( !wxGetEnv("XDG_DATA_HOME", &data) )
data = wxGetHomeDir() + "/.local/share";
data += "/poedit";
#else
data = wxStandardPaths::Get().GetUserDataDir();
#endif
// ensure the parent directory exists:
wxFileName::Mkdir(data, wxS_DIR_DEFAULT, wxPATH_MKDIR_FULL);
data += wxFILE_SEP_PATH;
data += "TranslationMemory";
return data.ToStdWstring();
}
namespace
{
static const int DEFAULT_MAXHITS = 10;
// Normalized score that must be met for a suggestion to be shown. This is
// an empirical guess of what constitutes good matches.
static const double QUALITY_THRESHOLD = 0.6;
// Maximum allowed difference in phrase length, in #terms.
static const int MAX_ALLOWED_LENGTH_DIFFERENCE = 2;
void AddOrUpdateResult(SuggestionsList& all, Suggestion&& r)
{
// Sometimes multiple hits may have the same translation, but different score
// because the source text may differ while the translation doesn't. E.g.
// "Open File" (Mac) -> "Otevřít soubor"
// "Open file" (Win) -> "Otevřít soubor"
// So we can't keep the first score, but need to update it if a better match
// with the same translation is found during the search.
auto found = std::find_if(all.begin(), all.end(),
[&r](const Suggestion& x){ return x.text == r.text; });
if (found == all.end())
{
all.push_back(std::move(r));
}
else
{
if (r.score > found->score)
*found = r;
}
}
// Return translation (or source) text field.
//
// Older versions of Poedit used to store C-like escaped text (e.g. "\n" instead
// of newline), but starting with 1.8, the "true" form of the text is stored.
// To preserve compatibility with older data, a version field is stored with
// TM documents and this function decides whether to decode escapes or not.
//
// TODO: remove this a few years down the road.
std::wstring get_text_field(DocumentPtr doc, const std::wstring& field)
{
auto version = doc->get(L"v");
auto value = doc->get(field);
if (version.empty()) // pre-1.8 data
return UnescapeCString(value);
else
return value;
}
template<typename T>
void PerformSearchWithBlock(IndexSearcherPtr searcher,
QueryPtr srclang, QueryPtr lang,
const std::wstring& exactSourceText,
QueryPtr query,
double scoreThreshold,
double scoreScaling,
T callback)
{
auto fullQuery = newLucene<BooleanQuery>();
fullQuery->add(srclang, BooleanClause::MUST);
fullQuery->add(lang, BooleanClause::MUST);
fullQuery->add(query, BooleanClause::MUST);
auto hits = searcher->search(fullQuery, DEFAULT_MAXHITS);
for (int i = 0; i < hits->scoreDocs.size(); i++)
{
const auto& scoreDoc = hits->scoreDocs[i];
auto score = scoreDoc->score / hits->maxScore;
if (score < scoreThreshold)
continue;
auto doc = searcher->doc(scoreDoc->doc);
auto src = get_text_field(doc, L"source");
if (src == exactSourceText)
{
score = 1.0;
}
else
{
if (score == 1.0)
{
score = 0.95; // can't score non-exact thing as 100%:
// Check against too small queries having perfect hit in a large stored text.
// Do this by penalizing too large difference in lengths of the source strings.
double len1 = exactSourceText.size();
double len2 = src.size();
score *= 1.0 - 0.4 * (std::abs(len1 - len2) / std::max(len1, len2));
}
score *= scoreScaling;
}
callback(doc, score);
}
}
void PerformSearch(IndexSearcherPtr searcher,
QueryPtr srclang, QueryPtr lang,
const std::wstring& exactSourceText,
QueryPtr query,
SuggestionsList& results,
double scoreThreshold,
double scoreScaling)
{
PerformSearchWithBlock
(
searcher, srclang, lang, exactSourceText, query,
scoreThreshold, scoreScaling,
[&results](DocumentPtr doc, double score)
{
auto t = get_text_field(doc, L"trans");
time_t ts = DateField::stringToTime(doc->get(L"created"));
Suggestion r {t, score, int(ts)};
r.id = StringUtils::toUTF8(doc->get(L"uuid"));
AddOrUpdateResult(results, std::move(r));
}
);
std::stable_sort(results.begin(), results.end());
}
} // anonymous namespace
SuggestionsList TranslationMemoryImpl::Search(const Language& srclang,
const Language& lang,
const std::wstring& source)
{
try
{
// TODO: query by srclang too!
auto srclangQ = newLucene<TermQuery>(newLucene<Term>(L"srclang", srclang.WCode()));
const Lucene::String fullLang = lang.WCode();
const Lucene::String shortLang = StringUtils::toUnicode(lang.Lang());
QueryPtr langPrimary = newLucene<TermQuery>(newLucene<Term>(L"lang", fullLang));
QueryPtr langSecondary;
if (fullLang == shortLang)
{
// for e.g. 'cs', search also 'cs_*' (e.g. 'cs_CZ')
langSecondary = newLucene<PrefixQuery>(newLucene<Term>(L"lang", shortLang + L"_"));
}
else
{
// search short variants of the language too
langSecondary = newLucene<TermQuery>(newLucene<Term>(L"lang", shortLang));
}
langSecondary->setBoost(0.85);
auto langQ = newLucene<BooleanQuery>();
langQ->add(langPrimary, BooleanClause::SHOULD);
langQ->add(langSecondary, BooleanClause::SHOULD);
SuggestionsList results;
const Lucene::String sourceField(L"source");
auto boolQ = newLucene<BooleanQuery>();
auto phraseQ = newLucene<PhraseQuery>();
auto stream = m_analyzer->tokenStream(sourceField, newLucene<StringReader>(source));
int sourceTokensCount = 0;
int sourceTokenPosition = -1;
while (stream->incrementToken())
{
sourceTokensCount++;
auto word = stream->getAttribute<TermAttribute>()->term();
sourceTokenPosition += stream->getAttribute<PositionIncrementAttribute>()->getPositionIncrement();
auto term = newLucene<Term>(sourceField, word);
boolQ->add(newLucene<TermQuery>(term), BooleanClause::SHOULD);
phraseQ->add(term, sourceTokenPosition);
}
auto searcher = m_mng->Searcher();
// Try exact phrase first:
PerformSearch(searcher.ptr(), srclangQ, langQ, source, phraseQ, results,
QUALITY_THRESHOLD, /*scoreScaling=*/1.0);
if (!results.empty())
return results;
// Then, if no matches were found, permit being a bit sloppy:
phraseQ->setSlop(1);
PerformSearch(searcher.ptr(), srclangQ, langQ, source, phraseQ, results,
QUALITY_THRESHOLD, /*scoreScaling=*/0.9);
if (!results.empty())
return results;
// As the last resort, try terms search. This will almost certainly
// produce low-quality results, but hopefully better than nothing.
boolQ->setMinimumNumberShouldMatch(std::max(1, boolQ->getClauses().size() - MAX_ALLOWED_LENGTH_DIFFERENCE));
PerformSearchWithBlock
(
searcher.ptr(), srclangQ, langQ, source, boolQ,
QUALITY_THRESHOLD, /*scoreScaling=*/0.8,
[=,&results](DocumentPtr doc, double score)
{
auto s = get_text_field(doc, sourceField);
auto t = get_text_field(doc, L"trans");
auto stream2 = m_analyzer->tokenStream(sourceField, newLucene<StringReader>(s));
int tokensCount2 = 0;
while (stream2->incrementToken())
tokensCount2++;
if (std::abs(tokensCount2 - sourceTokensCount) <= MAX_ALLOWED_LENGTH_DIFFERENCE)
{
time_t ts = DateField::stringToTime(doc->get(L"created"));
Suggestion r {t, score, int(ts)};
r.id = StringUtils::toUTF8(doc->get(L"uuid"));
AddOrUpdateResult(results, std::move(r));
}
}
);
std::stable_sort(results.begin(), results.end());
return results;
}
catch (LuceneException&)
{
return SuggestionsList();
}
}
void TranslationMemoryImpl::ExportData(TranslationMemory::IOInterface& destination)
{
try
{
auto reader = m_mng->Reader();
int32_t numDocs = reader->numDocs();
for (int32_t i = 0; i < numDocs; i++)
{
auto doc = reader->document(i);
destination.Insert
(
Language::TryParse(doc->get(L"srclang")),
Language::TryParse(doc->get(L"lang")),
get_text_field(doc, L"source"),
get_text_field(doc, L"trans"),
DateField::stringToTime(doc->get(L"created"))
);
}
}
CATCH_AND_RETHROW_EXCEPTION
}
void TranslationMemoryImpl::ImportData(std::function<void(TranslationMemory::IOInterface&)> source)
{
auto writer = TranslationMemory::Get().GetWriter();
source(*writer);
writer->Commit();
}
void TranslationMemoryImpl::GetStats(long& numDocs, long& fileSize)
{
try
{
auto reader = m_mng->Reader();
numDocs = reader->numDocs();
fileSize = wxDir::GetTotalSize(GetDatabaseDir()).GetValue();
}
CATCH_AND_RETHROW_EXCEPTION
}
// ----------------------------------------------------------------
// TranslationMemoryWriterImpl
// ----------------------------------------------------------------
class TranslationMemoryWriterImpl : public TranslationMemory::Writer
{
public:
TranslationMemoryWriterImpl(IndexWriterPtr writer) : m_writer(writer) {}
~TranslationMemoryWriterImpl() {}
void Commit() override
{
try
{
m_writer->commit();
}
CATCH_AND_RETHROW_EXCEPTION
}
void Rollback() override
{
try
{
m_writer->rollback();
}
CATCH_AND_RETHROW_EXCEPTION
}
void Insert(const Language& srclang, const Language& lang,
const std::wstring& source, const std::wstring& trans,
time_t creationTime) override
{
if (!lang.IsValid() || !srclang.IsValid() || lang == srclang)
return;
if (creationTime == 0)
creationTime = time(NULL);
// Compute unique ID for the translation:
static const boost::uuids::uuid s_namespace =
boost::uuids::string_generator()("6e3f73c5-333f-4171-9d43-954c372a8a02");
boost::uuids::name_generator gen(s_namespace);
std::wstring itemId(srclang.WCode());
itemId += lang.WCode();
itemId += source;
itemId += trans;
const std::wstring itemUUID = boost::uuids::to_wstring(gen(itemId));
try
{
// Then add a new document:
auto doc = newLucene<Document>();
doc->add(newLucene<Field>(L"uuid", itemUUID,
Field::STORE_YES, Field::INDEX_NOT_ANALYZED));
doc->add(newLucene<Field>(L"v", L"1",
Field::STORE_YES, Field::INDEX_NO));
doc->add(newLucene<Field>(L"created", DateField::timeToString(creationTime),
Field::STORE_YES, Field::INDEX_NO));
doc->add(newLucene<Field>(L"srclang", srclang.WCode(),
Field::STORE_YES, Field::INDEX_NOT_ANALYZED));
doc->add(newLucene<Field>(L"lang", lang.WCode(),
Field::STORE_YES, Field::INDEX_NOT_ANALYZED));
doc->add(newLucene<Field>(L"source", source,
Field::STORE_YES, Field::INDEX_ANALYZED));
doc->add(newLucene<Field>(L"trans", trans,
Field::STORE_YES, Field::INDEX_NOT_ANALYZED));
m_writer->updateDocument(newLucene<Term>(L"uuid", itemUUID), doc);
}
CATCH_AND_RETHROW_EXCEPTION
}
void Insert(const Language& srclang, const Language& lang,
const std::wstring& source, const std::wstring& trans) override
{
Insert(srclang, lang, source, trans, 0);
}
void Insert(const Language& srclang, const Language& lang, const CatalogItemPtr& item) override
{
if (!lang.IsValid() || !srclang.IsValid())
return;
// ignore translations with errors in them
if (item->HasError())
return;
// ignore untranslated, pre-translated and non-revised or unfinished translations
if (item->IsFuzzy() || item->IsPreTranslated() || !item->IsTranslated())
return;
// always store at least the singular translation
Insert(srclang, lang, str::to_wstring(item->GetString()), str::to_wstring(item->GetTranslation()));
// for plurals, try to support at least the simpler cases, with nplurals <= 2
if (item->HasPlural())
{
switch (lang.nplurals())
{
case 1:
// e.g. Chinese, Japanese; store translation for both singular and plural
Insert(srclang, lang, str::to_wstring(item->GetPluralString()), str::to_wstring(item->GetTranslation()));
break;
case 2:
// e.g. Germanic or Romanic languages, same 2 forms as English
Insert(srclang, lang, str::to_wstring(item->GetPluralString()), str::to_wstring(item->GetTranslation(1)));
break;
default:
// not supported, only singular stored above
break;
}
}
}
void Insert(const CatalogPtr& cat) override
{
auto srclang = cat->GetSourceLanguage();
auto lang = cat->GetLanguage();
if (!lang.IsValid() || !srclang.IsValid())
return;
for (auto& item: cat->items())
{
// Note that dt.IsModified() is intentionally not checked - we
// want to save old entries in the TM too, so that we harvest as
// much useful translations as we can.
Insert(srclang, lang, item);
}
}
void Delete(const std::string& uuid) override
{
try
{
m_writer->deleteDocuments(newLucene<Term>(L"uuid", StringUtils::toUnicode(uuid)));
}
CATCH_AND_RETHROW_EXCEPTION
}
void DeleteAll() override
{
try
{
m_writer->deleteAll();
}
CATCH_AND_RETHROW_EXCEPTION
}
private:
IndexWriterPtr m_writer;
};
void TranslationMemoryImpl::Init()
{
try
{
auto dir = newLucene<DirectoryType>(GetDatabaseDir());
m_analyzer = newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT);
m_writer = newLucene<IndexWriter>(dir, m_analyzer, IndexWriter::MaxFieldLengthLIMITED);
m_writer->setMergeScheduler(newLucene<SerialMergeScheduler>());
// get the associated realtime reader & searcher:
m_mng.reset(new SearcherManager(m_writer));
m_writerAPI = std::make_shared<TranslationMemoryWriterImpl>(m_writer);
}
CATCH_AND_RETHROW_EXCEPTION
}
// ----------------------------------------------------------------
// Singleton management
// ----------------------------------------------------------------
static std::once_flag initializationFlag;
TranslationMemory *TranslationMemory::ms_instance = nullptr;
TranslationMemory& TranslationMemory::Get()
{
std::call_once(initializationFlag, []() {
ms_instance = new TranslationMemory;
});
return *ms_instance;
}
void TranslationMemory::CleanUp()
{
if (ms_instance)
{
delete ms_instance;
ms_instance = nullptr;
}
}
TranslationMemory::TranslationMemory() : m_impl(nullptr)
{
try
{
m_impl = new TranslationMemoryImpl;
}
catch (...)
{
m_error = std::current_exception();
}
}
TranslationMemory::~TranslationMemory() { delete m_impl; }
// ----------------------------------------------------------------
// public API
// ----------------------------------------------------------------
SuggestionsList TranslationMemory::Search(const Language& srclang,
const Language& lang,
const std::wstring& source)
{
if (!m_impl)
std::rethrow_exception(m_error);
return m_impl->Search(srclang, lang, source);
}
dispatch::future<SuggestionsList> TranslationMemory::SuggestTranslation(const SuggestionQuery&& q)
{
try
{
return dispatch::make_ready_future(Search(q.srclang, q.lang, q.source));
}
catch (...)
{
return dispatch::make_exceptional_future_from_current<SuggestionsList>();
}
}
void TranslationMemory::Delete(const std::string& id)
{
auto tm = TranslationMemory::Get().GetWriter();
tm->Delete(id);
tm->Commit();
}
void TranslationMemory::ExportData(IOInterface& destination)
{
if (!m_impl)
std::rethrow_exception(m_error);
return m_impl->ExportData(destination);
}
void TranslationMemory::ImportData(std::function<void(IOInterface&)> source)
{
if (!m_impl)
std::rethrow_exception(m_error);
return m_impl->ImportData(source);
}
std::shared_ptr<TranslationMemory::Writer> TranslationMemory::GetWriter()
{
if (!m_impl)
std::rethrow_exception(m_error);
return m_impl->GetWriter();
}
void TranslationMemory::DeleteAllAndReset()
{
try
{
auto tm = TranslationMemory::Get().GetWriter();
tm->DeleteAll();
tm->Commit();
}
catch (...)
{
// Lucene database is corrupted, best we can do is delete it completely
wxFileName::Rmdir(TranslationMemoryImpl::GetDatabaseDir(), wxPATH_RMDIR_RECURSIVE);
// recreate implementation object
TranslationMemoryImpl *impl = new TranslationMemoryImpl;
std::swap(m_impl, impl);
delete impl;
m_error = nullptr;
}
}
void TranslationMemory::GetStats(long& numDocs, long& fileSize)
{
if (!m_impl)
std::rethrow_exception(m_error);
m_impl->GetStats(numDocs, fileSize);
}
|
{"hexsha": "ba7ea1268365e5e0c851c9e3f4b168bc9252e3a9", "size": 27164, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/tm/transmem.cpp", "max_stars_repo_name": "Fat-Zer/poedit", "max_stars_repo_head_hexsha": "a6fc147c6e1d342d4bfe28c3956a4dc1df0eea0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tm/transmem.cpp", "max_issues_repo_name": "Fat-Zer/poedit", "max_issues_repo_head_hexsha": "a6fc147c6e1d342d4bfe28c3956a4dc1df0eea0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tm/transmem.cpp", "max_forks_repo_name": "Fat-Zer/poedit", "max_forks_repo_head_hexsha": "a6fc147c6e1d342d4bfe28c3956a4dc1df0eea0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6883273165, "max_line_length": 126, "alphanum_fraction": 0.5682889118, "num_tokens": 5867}
|
[STATEMENT]
lemma card_le_PiE_subindex:
assumes "A \<subseteq> A'" "Pi\<^sub>E A' B \<noteq> {}"
shows "PiE A B \<lesssim> PiE A' B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
[PROOF STEP]
have "\<And>x. x \<in> A' \<Longrightarrow> \<exists>y. y \<in> B x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. x \<in> A' \<Longrightarrow> \<exists>y. y \<in> B x
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
A \<subseteq> A'
Pi\<^sub>E A' B \<noteq> {}
goal (1 subgoal):
1. \<And>x. x \<in> A' \<Longrightarrow> \<exists>y. y \<in> B x
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
?x \<in> A' \<Longrightarrow> \<exists>y. y \<in> B ?x
goal (1 subgoal):
1. Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?x \<in> A' \<Longrightarrow> \<exists>y. y \<in> B ?x
[PROOF STEP]
obtain g where g: "\<And>x. x \<in> A' \<Longrightarrow> g x \<in> B x"
[PROOF STATE]
proof (prove)
using this:
?x \<in> A' \<Longrightarrow> \<exists>y. y \<in> B ?x
goal (1 subgoal):
1. (\<And>g. (\<And>x. x \<in> A' \<Longrightarrow> g x \<in> B x) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
?x \<in> A' \<Longrightarrow> g ?x \<in> B ?x
goal (1 subgoal):
1. Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
[PROOF STEP]
let ?F = "\<lambda>f x. if x \<in> A then f x else if x \<in> A' then g x else undefined"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
[PROOF STEP]
have "Pi\<^sub>E A B \<subseteq> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Pi\<^sub>E A B \<subseteq> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> Pi\<^sub>E A B \<Longrightarrow> x \<in> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B
[PROOF STEP]
show "f \<in> Pi\<^sub>E A B \<Longrightarrow> f \<in> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B" for f
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f \<in> Pi\<^sub>E A B \<Longrightarrow> f \<in> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B
[PROOF STEP]
using \<open>A \<subseteq> A'\<close>
[PROOF STATE]
proof (prove)
using this:
A \<subseteq> A'
goal (1 subgoal):
1. f \<in> Pi\<^sub>E A B \<Longrightarrow> f \<in> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B
[PROOF STEP]
by (rule_tac x="?F f" in image_eqI) (auto simp: g fun_eq_iff)
[PROOF STATE]
proof (state)
this:
?f \<in> Pi\<^sub>E A B \<Longrightarrow> ?f \<in> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Pi\<^sub>E A B \<subseteq> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B
goal (1 subgoal):
1. Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Pi\<^sub>E A B \<subseteq> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B
[PROOF STEP]
have "Pi\<^sub>E A B \<lesssim> (\<lambda>f. \<lambda>i \<in> A. f i) ` Pi\<^sub>E A' B"
[PROOF STATE]
proof (prove)
using this:
Pi\<^sub>E A B \<subseteq> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B
goal (1 subgoal):
1. Pi\<^sub>E A B \<lesssim> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B
[PROOF STEP]
by (simp add: subset_imp_lepoll)
[PROOF STATE]
proof (state)
this:
Pi\<^sub>E A B \<lesssim> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B
goal (1 subgoal):
1. Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
Pi\<^sub>E A B \<lesssim> (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B
goal (1 subgoal):
1. Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
[PROOF STEP]
have "\<dots> \<lesssim> PiE A' B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>f. restrict f A) ` Pi\<^sub>E A' B \<lesssim> Pi\<^sub>E A' B
[PROOF STEP]
by (rule image_lepoll)
[PROOF STATE]
proof (state)
this:
(\<lambda>f. restrict f A) ` Pi\<^sub>E A' B \<lesssim> Pi\<^sub>E A' B
goal (1 subgoal):
1. Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
goal (1 subgoal):
1. Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
Pi\<^sub>E A B \<lesssim> Pi\<^sub>E A' B
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2047, "file": null, "length": 24}
|
#include "ManagerUtils/ArgHelper/interface/Parsermgr.hpp"
#include <boost/exception/diagnostic_information.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/multiprecision/cpp_dec_float.hpp>
#include <iostream>
namespace opt = boost::program_options;
using namespace std;
using Double = boost::multiprecision::cpp_dec_float_50;
namespace mgr{
Parsermgr::Parsermgr()
{
_desc.add_options()
( "help,h", "print help options and exit program" )
;
}
Parsermgr&
Parsermgr::AddOptions( const opt::options_description& de )
{
_desc.add( de );
return *this;
}
int
Parsermgr::ParseOptions( int argc, char* argv[] )
{
try {
opt::store( opt::parse_command_line( argc, argv, _desc ), _vm );
opt::notify( _vm );
}
catch( boost::exception& e ){
cerr << "Error parsing command!" << endl;
cerr << boost::diagnostic_information( e );
cerr << _desc << endl;
return FAIL_PARSER;
}
if( _vm.count( "help" ) ){
cout << _desc << endl;
return HELP_PARSER;
}
return PASS_PARSER;
}
bool
Parsermgr::CheckOption( const string& option )
{
return _vm.count( option );
}
string
Parsermgr::OptName()
{
vector<string> taglist;
for( const auto& opt : _namelist ){
string ans = "";
if( ans == "" ){
try {
ans = GetOption<string>( opt );
}
catch( ... ){
}
}
if( ans == "" ){
try {
ans = boost::lexical_cast<string>( GetOption<int>( opt ) );
}
catch( ... ){
}
}
if( ans == "" ){
try {
ans = boost::lexical_cast<string>( Double( GetOption<double>( opt ) ) );
}
catch( ... ){
}
}
taglist.push_back( ans );
}
for( const auto& opt : _cutlist ){
string ans = "";
if( ans == "" ){
try {
ans = GetOption<string>( opt );
}
catch( ... ){
}
}
if( ans == "" ){
try {
ans = boost::lexical_cast<string>( GetOption<int>( opt ) );
}
catch( ... ){
}
}
if( ans == "" ){
try {
ans = boost::lexical_cast<string>( Double( GetOption<double>( opt ) ) );
}
catch( ... ){
}
}
if( ans == "" ){
taglist.push_back( opt );
}
else{
taglist.push_back( opt + "_" + ans );
}
}
return CustomName( taglist );
}
string
Parsermgr::CustomName( const vector<string>& taglist )
{
string ans = "";
for( const auto& name : taglist ){
ans += ( "_" + name );
}
return ans;
}
void
Parsermgr::GetOptlist()
{
for( const auto& it : _vm ){
cout << it.first.c_str() << " ";
}
}
}
|
{"hexsha": "ca25678fa95453de45a630d09c32bfef46a7b7f1", "size": 3416, "ext": "cc", "lang": "C++", "max_stars_repo_path": "ArgHelper/src/Parsermgr.cc", "max_stars_repo_name": "sam7k9621/ManagerUtils", "max_stars_repo_head_hexsha": "7b9317df002b3df6f23ae9e559d35bb1fc15b6f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ArgHelper/src/Parsermgr.cc", "max_issues_repo_name": "sam7k9621/ManagerUtils", "max_issues_repo_head_hexsha": "7b9317df002b3df6f23ae9e559d35bb1fc15b6f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ArgHelper/src/Parsermgr.cc", "max_forks_repo_name": "sam7k9621/ManagerUtils", "max_forks_repo_head_hexsha": "7b9317df002b3df6f23ae9e559d35bb1fc15b6f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.2380952381, "max_line_length": 92, "alphanum_fraction": 0.4148126464, "num_tokens": 720}
|
#!/usr/bin/env python3
""" Base class for Faceswap :mod:`~plugins.convert.mask` Plugins """
import logging
import numpy as np
from plugins.convert._config import Config
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def _get_config(plugin_name, configfile=None):
""" Return the :attr:`lib.config.FaceswapConfig.config_dict` for the requested plugin.
Parameters
----------
plugin_name: str
The name of the plugin to retrieve the config for
configfile: str, optional
Optional location of custom configuration ``ini`` file. If ``None`` then use the default
config location. Default: ``None``
Returns
-------
dict
The configuration in dictionary form for the given plugin_name from
:attr:`lib.config.FaceswapConfig.config_dict`
"""
return Config(plugin_name, configfile=configfile).config_dict
class Adjustment():
""" Parent class for Mask Adjustment Plugins.
All mask plugins must inherit from this class.
Parameters
----------
mask_type: str
The type of mask that this plugin is being used for
output_size: int
The size, in pixels, of the output from the Faceswap model.
configfile: str, Optional
Optional location of custom configuration ``ini`` file. If ``None`` then use the default
config location. Default: ``None``
config: :class:`lib.config.FaceswapConfig`, Optional
Optional pre-loaded :class:`lib.config.FaceswapConfig`. If passed, then this will be used
over any configuration on disk. If ``None`` then it is ignored. Default: ``None``
Attributes
----------
config: dict
The configuration dictionary for this plugin.
mask_type: str
The type of mask that this plugin is being used for.
"""
def __init__(self, mask_type, output_size, configfile=None, config=None):
logger.debug("Initializing %s: (arguments: '%s', output_size: %s, "
"configfile: %s, config: %s)", self.__class__.__name__, mask_type,
output_size, configfile, config)
self.config = self._set_config(configfile, config)
logger.debug("config: %s", self.config)
self.mask_type = mask_type
self._dummy = np.zeros((output_size, output_size, 3), dtype='float32')
logger.debug("Initialized %s", self.__class__.__name__)
@property
def dummy(self):
""":class:`numpy.ndarray`: A dummy mask of all zeros of the shape:
(:attr:`output_size`, :attr:`output_size`, `3`)
"""
return self._dummy
@property
def skip(self):
"""bool: ``True`` if the blur type config attribute is ``None`` otherwise ``False`` """
return self.config.get("type", None) is None
def _set_config(self, configfile, config):
""" Set the correct configuration for the plugin based on whether a config file
or pre-loaded config has been passed in.
Parameters
----------
configfile: str
Location of custom configuration ``ini`` file. If ``None`` then use the
default config location
config: :class:`lib.config.FaceswapConfig`
Pre-loaded :class:`lib.config.FaceswapConfig`. If passed, then this will be
used over any configuration on disk. If ``None`` then it is ignored.
Returns
-------
dict
The configuration in dictionary form for the given from
:attr:`lib.config.FaceswapConfig.config_dict`
"""
section = ".".join(self.__module__.split(".")[-2:])
if config is None:
retval = _get_config(section, configfile=configfile)
else:
config.section = section
retval = config.config_dict
config.section = None
logger.debug("Config: %s", retval)
return retval
def process(self, *args, **kwargs):
""" Override for specific mask adjustment plugin processes.
Input parameters will vary from plugin to plugin.
Should return a :class:`numpy.ndarray` mask with the plugin's actions applied
"""
raise NotImplementedError
def run(self, *args, **kwargs):
""" Perform selected adjustment on face """
logger.trace("Performing mask adjustment: (plugin: %s, args: %s, kwargs: %s",
self.__module__, args, kwargs)
retval = self.process(*args, **kwargs)
return retval
|
{"hexsha": "0008dd35e262543034066f8b9de8a8aebeded499", "size": 4512, "ext": "py", "lang": "Python", "max_stars_repo_path": "faceswap/plugins/convert/mask/_base.py", "max_stars_repo_name": "huangjunxiong11/FaceMap", "max_stars_repo_head_hexsha": "f320ce517edcaa4d9963ad0571e686cbe07fdfbb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-02T06:45:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-30T06:35:31.000Z", "max_issues_repo_path": "plugins/convert/mask/_base.py", "max_issues_repo_name": "aaman123/faceswap", "max_issues_repo_head_hexsha": "a5825c3457b062c1824ef3f8b02e4f3fa4c2217f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-09-26T00:56:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:24:08.000Z", "max_forks_repo_path": "plugins/convert/mask/_base.py", "max_forks_repo_name": "aaman123/faceswap", "max_forks_repo_head_hexsha": "a5825c3457b062c1824ef3f8b02e4f3fa4c2217f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-16T03:31:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-16T03:31:24.000Z", "avg_line_length": 36.096, "max_line_length": 97, "alphanum_fraction": 0.6305407801, "include": true, "reason": "import numpy", "num_tokens": 988}
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-10-21
from triangular import LatticeTriangular as LT
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import matplotlib.animation as animation
import networkx as nx
# import pygraphviz
import numpy as np
class InsideString(object):
def __init__(self, Lx=40, Ly=40,
boundary={'h': 'periodic', 'v': 'periodic'},
initial_state=[(20, 20)],
plot=True,
plot_surface=True,
save_image=False,
save_video=False,
filename_image="",
filename_video="",
record_networks=False,
frames=1000,
beta = 2.,
interval=1,
pre_function=None,
post_function=None):
"""Init function of the class"""
self.plot = plot
self.plot_surface = plot_surface
self.save_image = save_image
self.save_video = save_video
self.record_networks = record_networks
if self.save_image:
if filename_image == "":
raise AttributeError("`filename_image` is empty.")
else:
self.filename_image = filename_image
if self.save_video:
if self.plot:
raise AttributeError("`save` and `plot` method can't be set both True.")
if filename_video == "":
raise AttributeError("`filename_video` is empty.")
else:
self.filename_video = filename_video
self.interval = interval
self.frames = frames
self.beta = beta
self.pre_function = pre_function
self.post_function = post_function
self.pre_func_res = []
self.post_func_res = []
self.init(Lx, Ly, boundary, initial_state)
self.start()
def init(self, Lx, Ly, boundary, initial_state):
self.lattice = LT(
np.zeros((Lx, Ly), dtype=np.int),
scale=float(max(Lx, Ly)),
boundary=boundary
)
if self.record_networks:
self.G = nx.Graph()
self.lattice_X = self.lattice.coordinates_x.reshape(
self.lattice.Lx,
self.lattice.Ly
)
self.lattice_Y = self.lattice.coordinates_y.reshape(
self.lattice.Lx,
self.lattice.Ly
)
self.kagome_Lx = 2 * self.lattice.Lx
self.kagome_Ly = self.lattice.Ly
x_even = self.lattice_X + 0.5 * self.lattice.dx
y_even = self.lattice_Y + self.lattice.dy / 2.
x_odd = np.roll(self.lattice_X, -1, axis=0)
y_odd = np.roll(self.lattice_Y, -1, axis=0) + (2 * self.lattice.dy) / 3.
self.kagome_X = np.hstack((x_even, x_odd)).reshape(self.kagome_Lx,
self.kagome_Ly)
self.kagome_Y = np.hstack((y_even, y_odd)).reshape(self.kagome_Lx,
self.kagome_Ly)
self.occupied = np.zeros((self.kagome_Lx, self.kagome_Ly),
dtype=np.bool)
self._create_weight_table()
self.growing_points = {} # {(x, y): weight}
# initial state
for pos in initial_state:
self.occupied[pos] = True
self.append_new_growing_point(pos)
if self.record_networks:
self.G.add_nodes_from(map(str, initial_state))
self.initial_state = initial_state
def start(self):
# Plot triangular-lattice points, string on it, and so on
if self.plot:
self.plot_all()
self.start_animation()
elif self.save_video:
self.plot_all()
self.start_animation(filename=self.filename_video)
else:
t = 0
while t < self.frames:
try:
self.update()
t += 1
except StopIteration:
break
if self.save_image:
if not self.__dict__.has_key('fig'):
self.plot_all()
self.fig.savefig(self.filename_image)
# print("Image file is successfully saved at '%s'." % filename_image)
plt.close()
def _create_weight_table(self):
"""Create the rule table."""
_weight_rule = (
(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 2048
(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1), # 2049
(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1), # 2051
(1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1), # 2055
(1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0), # 2304
(1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1), # 2305
(1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1), # 2307
(1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1), # 2311
(1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0), # 2432
(1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1), # 2433
(1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1), # 2435
(1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1), # 2439
(1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0), # 2496
(1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1), # 2497
(1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1), # 2499
(1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1), # 2503
(0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 1024
(0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0), # 1088
(0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0), # 1216
(0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0), # 1472
(0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0), # 1056
(0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0), # 1120
(0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0), # 1248
(0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0), # 1504
(0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0), # 1072
(0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0), # 1136
(0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0), # 1264
(0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0), # 1520
(0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0), # 1080
(0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0), # 1144
(0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0), # 1272
(0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0), # 1528
(0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 512
(0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0), # 520
(0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0), # 536
(0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0), # 568
(0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0), # 516
(0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0), # 524
(0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0), # 540
(0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0), # 572
(0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0), # 518
(0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0), # 526
(0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0), # 542
(0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0), # 574
(0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1), # 519
(0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1), # 527
(0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1), # 543
(0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1), # 575
)
_weight_list = np.array([
# -2., -1.5, -0.5, 0.,
# -1.5, -1., 0., 0.5,
# -0.5, 0., 1., 1.5,
# 0., 0.5, 1.5, 2.,
# -2., -1.5, -0.5, 0.,
# -1.5, -1., 0., 0.5,
# -0.5, 0., 1., 1.5,
# 0., 0.5, 1.5, 2.,
# -2., -1.5, -0.5, 0.,
# -1.5, -1., 0., 0.5,
# -0.5, 0., 1., 1.5,
# 0., 0.5, 1.5, 2.,
-1.5, -1., 0., 0.5,
-1., -0.5, 0.5, 1.,
0., 0.5, 1.5, 2.,
0.5, 1., 2., 2.5,
-1.5, -1., 0., 0.5,
-1., -0.5, 0.5, 1.,
0., 0.5, 1.5, 2.,
0.5, 1., 2., 2.5,
-1.5, -1., 0., 0.5,
-1., -0.5, 0.5, 1.,
0., 0.5, 1.5, 2.,
0.5, 1., 2., 2.5,
])
self.weight_rule = [int(''.join(map(str, t)), 2) for t in _weight_rule]
self.weight_list = np.exp(- self.beta * _weight_list)
self.weight_table = {k: v for k, v in
zip(self.weight_rule, self.weight_list)}
def _create_truth_table(self, i, nn2):
truth_table = [0, 0, 0]
truth_table[int(i) - 1] = 1
truth_table += [1 if self.occupied[_nn2] else 0 for _nn2 in nn2]
truth_table = int(''.join(map(str, truth_table)), 2)
return truth_table
def append_new_growing_point(self, pos):
"""新たに追加された点の格子座標posを元に,その周辺の非占有点の座標と
占有確率を取得してself.growing_pointsに追加する関数
--- Arguments ---
pos: tuple (pos_x, pos_y): 新たに追加された点の格子座標
--- Modify ---
self.growing_points: dict {(x, y): weight}:
"""
pos_x, pos_y = pos
even_or_odd = 'even' if pos_x % 2 == 0 else 'odd'
# 新たに追加された点の第一近傍を取得
nn = getattr(self, 'get_nn1_' + even_or_odd)(pos_x, pos_y)
for k, (x, y) in nn.items():
# 既に占有されていたら飛ばす
if self.occupied[(x, y)]:
continue
# その点の第一近傍を取得
even_or_odd_ = 'odd' if even_or_odd == 'even' else 'even'
nn1 = getattr(self, 'get_nn1_' + even_or_odd_)(x, y)
nn1 = {i: pos for i, pos in nn1.items() if self.occupied[pos]}
# 第一近傍がひとつだけ占有されている場合以外は飛ばす
if len(nn1) != 1:
continue
# この時点で
# nn1 = {'1': (x1, y1)}
# のようになる
# 点の周りの第二近傍を取得する
nn2 = getattr(self, 'get_nn2_' + even_or_odd_)(x, y)
# nn2 = {'4': (x4, y4), ...}
# 真偽表を作成
# 真偽表から(x, y)に関する重みを取得,growing_pointsに追加
truth_table = self._create_truth_table(nn1.keys()[0], nn2)
if truth_table in self.weight_rule:
self.growing_points[(x, y)] = self.weight_table[truth_table]
def cleanup_growing_point(self):
for pos in self.growing_points.keys():
pos_x, pos_y = pos
even_or_odd = 'even' if pos_x % 2 == 0 else 'odd'
nn1 = getattr(self, 'get_nn1_' + even_or_odd)(pos_x, pos_y)
nn1 = {i: pos for i, pos in nn1.items() if self.occupied[pos]}
if len(nn1) != 1:
del self.growing_points[(pos_x, pos_y)]
continue
nn2 = getattr(self, 'get_nn2_' + even_or_odd)(pos_x, pos_y)
truth_table = self._create_truth_table(nn1.keys()[0], nn2)
if not truth_table in self.weight_rule:
del self.growing_points[(pos_x, pos_y)]
def get_nn1_even(self, x, y):
"""格子座標(x, y)の第一近傍の点の座標を返す(xが偶数の時)"""
return {
'1': ((x - 1) % self.kagome_Lx, y),
'2': ((x + 1) % self.kagome_Lx, y),
'3': ((x + 1) % self.kagome_Lx, (y - 1) % self.kagome_Ly),
}
def get_nn1_odd(self, x, y):
"""格子座標(x, y)の第一近傍の点の座標を返す(xが奇数の時)"""
return {
'1': ((x - 1) % self.kagome_Lx, y),
'2': ((x - 1) % self.kagome_Lx, (y + 1) % self.kagome_Ly),
'3': ((x + 1) % self.kagome_Lx, y),
}
def get_nn2_even(self, x, y):
"""格子座標(x, y)の第二近傍の点の座標を返す(xが偶数の時)"""
return [
((x - 2) % self.kagome_Lx, (y + 1) % self.kagome_Ly),
((x - 1) % self.kagome_Lx, (y + 1) % self.kagome_Ly),
(x, (y + 1) % self.kagome_Ly),
((x + 2) % self.kagome_Lx, y),
((x + 3) % self.kagome_Lx, (y - 1) % self.kagome_Ly),
((x + 2) % self.kagome_Lx, (y - 1) % self.kagome_Ly),
(x, (y - 1) % self.kagome_Ly),
((x - 1) % self.kagome_Lx, (y - 1) % self.kagome_Ly),
((x - 2) % self.kagome_Lx, y)
]
def get_nn2_odd(self, x, y):
"""格子座標(x, y)の第二近傍の点の座標を返す(xが奇数の時)"""
return [
((x - 2) % self.kagome_Lx, y),
((x - 3) % self.kagome_Lx, (y + 1) % self.kagome_Ly),
((x - 2) % self.kagome_Lx, (y + 1) % self.kagome_Ly),
(x, (y + 1) % self.kagome_Ly),
((x + 1) % self.kagome_Lx, (y + 1) % self.kagome_Ly),
((x + 2) % self.kagome_Lx, y),
((x + 2) % self.kagome_Lx, (y - 1) % self.kagome_Ly),
((x + 1) % self.kagome_Lx, (y - 1) % self.kagome_Ly),
(x, (y - 1) % self.kagome_Ly)
]
def plot_all(self):
"""軸の設定,三角格子の描画,線分描画要素の用意などを行う
ここからFuncAnimationを使ってアニメーション表示を行うようにする
"""
self.fig, self.ax = plt.subplots(figsize=(8, 8))
lattice_X = self.lattice.coordinates_x
lattice_Y = self.lattice.coordinates_y
X_min, X_max = min(lattice_X) - 0.1, max(lattice_X) + 0.1
Y_min, Y_max = min(lattice_Y) - 0.1, max(lattice_Y) + 0.1
self.ax.set_xlim([X_min, X_max])
self.ax.set_ylim([Y_min, Y_max])
self.ax.set_xticklabels([])
self.ax.set_yticklabels([])
self.ax.set_aspect('equal')
triang = tri.Triangulation(lattice_X, lattice_Y)
self.ax.triplot(triang, color='#d5d5d5', lw=0.5)
# self.points = [self.ax.plot([], [], 'g^')[0],
# self.ax.plot([], [], 'gv')[0]]
self.points = [self.ax.plot([], [], 'k.')[0],
self.ax.plot([], [], 'k.')[0]]
if self.plot_surface:
self.points.append(self.ax.plot([], [], '.', color='#ff0000')[0])
self.plot_points()
def start_animation(self, filename=""):
if self.__dict__.has_key('frames'):
frames = self.frames
else:
frames = 1000
def init_func(*arg):
return self.points
ani = animation.FuncAnimation(self.fig, self.update, frames=frames,
init_func=init_func,
interval=self.interval,
blit=True, repeat=False)
if filename != "":
ani.save(filename, codec="libx264", bitrate=-1, fps=30)
# try:
# ani.save(filename, codec="libx264", bitrate=-1, fps=30)
# except:
# print("Can't saved.")
# else:
# print("Animation is successfully saved at '%s'." % filename)
else:
plt.show()
def plot_points(self):
"""self.occupiedを,グラフ上に図示する
self.plot_surfaceが指定されている時には,成長点もプロットする
"""
pos = np.where(self.occupied)
pos_even = (pos[0][pos[0] % 2 == 0], pos[1][pos[0] % 2 == 0])
pos_odd = (pos[0][pos[0] % 2 == 1], pos[1][pos[0] % 2 == 1])
X, Y = self.kagome_X[pos_even], self.kagome_Y[pos_even]
self.points[0].set_data(X, Y)
X, Y = self.kagome_X[pos_odd], self.kagome_Y[pos_odd]
self.points[1].set_data(X, Y)
if self.plot_surface:
pos_x, pos_y = np.array(self.growing_points.keys()).T
# pos = list(np.array(pos_x, pos_y).T)
X, Y = self.kagome_X[pos_x, pos_y], self.kagome_Y[pos_x, pos_y]
self.points[2].set_data(X, Y)
return self.points
def update(self, num=0):
"""FuncAnimationから各フレームごとに呼び出される関数
1時間ステップの間に行う計算はすべてここに含まれる。
"""
if len(self.growing_points) == 0:
print "no neighbors"
raise StopIteration
if self.pre_function is not None:
self.pre_func_res.append(self.pre_function(self))
positions = self.growing_points.keys()
weights = np.array([self.growing_points[key] for key in positions])
weights = weights / np.sum(weights)
# print(weights)
index = np.random.choice(range(len(positions)), p=weights)
x, y = positions[index]
self.occupied[x, y] = True
if self.record_networks:
self.G.add_node('({}, {})'.format(x, y))
even_or_odd = 'even' if x % 2 == 0 else 'odd'
nn1 = getattr(self, 'get_nn1_' + even_or_odd)(x, y)
for pos in nn1.values():
if self.occupied[pos]:
self.G.add_edge(str(pos), '({}, {})'.format(x, y))
self.cleanup_growing_point()
self.append_new_growing_point((x, y))
del self.growing_points[(x, y)]
if self.post_function is not None:
self.post_func_res.append(self.post_function(self))
if self.plot or self.save_video:
return self.plot_points()
if __name__ == '__main__':
L = 100
frames = 1000
beta = 4.
params = {
'Lx': L,
'Ly': L,
'frames': frames,
'beta': beta,
'boundary': {'h': 'periodic', 'v': 'periodic'},
# 'boundary': {'h': 'reflective', 'v': 'reflective'},
'plot': True,
'plot_surface': False,
'record_networks': False,
'interval': 1,
}
main = InsideString(initial_state=[(L / 2, L / 2 - 1)], **params)
|
{"hexsha": "032990b677da93221e02d4aaa0223bb01f837bd5", "size": 17009, "ext": "py", "lang": "Python", "max_stars_repo_path": "triangular_lattice/growing_string_inside.py", "max_stars_repo_name": "ssh0/growing-string", "max_stars_repo_head_hexsha": "2e43916e91157dfb4253775149b35ec9d81ef14d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "triangular_lattice/growing_string_inside.py", "max_issues_repo_name": "ssh0/growing-string", "max_issues_repo_head_hexsha": "2e43916e91157dfb4253775149b35ec9d81ef14d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-04-14T08:15:28.000Z", "max_issues_repo_issues_event_max_datetime": "2016-04-27T02:57:13.000Z", "max_forks_repo_path": "triangular_lattice/growing_string_inside.py", "max_forks_repo_name": "ssh0/growing-string", "max_forks_repo_head_hexsha": "2e43916e91157dfb4253775149b35ec9d81ef14d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1375545852, "max_line_length": 88, "alphanum_fraction": 0.4652830854, "include": true, "reason": "import numpy,import networkx", "num_tokens": 6419}
|
import os
from os.path import exists, join
import json
from utils import count_data
import argparse
import numpy as np
try:
DATA_DIR = os.environ['DATA']
except KeyError:
print('please use environment variable to specify data directories')
def main(args):
data_path = join(DATA_DIR, args.split)
n_data = count_data(data_path)
doc_sents_numbers = []
sum_sents_numbers = []
num_long_doc = 0
for i in range(n_data):
print('processing {}/{} ({:.2f}%%)\r'.format(i, n_data, 100 * i / n_data),
end='')
js_obj = json.load(open(join(data_path, "{}.json".format(i))))
abstract = js_obj['abstract']
article = js_obj['article']
doc_sents_numbers.append(len(article))
sum_sents_numbers.append(len(abstract))
if len(article) > 400:
num_long_doc += 1
doc_sents_numbers = np.array(doc_sents_numbers)
sum_sents_numbers = np.array(sum_sents_numbers)
print()
print("doc max: {}".format(doc_sents_numbers.max()))
print("doc mean: {}".format(doc_sents_numbers.mean()))
print("doc std: {}".format(doc_sents_numbers.std()))
print()
print("sum max: {}".format(sum_sents_numbers.max()))
print("sum mean: {}".format(sum_sents_numbers.mean()))
print("sum std: {}".format(sum_sents_numbers.std()))
print()
print("percent of long doc: {}".format(num_long_doc/n_data * 100))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Output statistics')
# choose metric to evaluate
parser.add_argument('--split', action='store', required=True,
help='directory of decoded summaries')
args = parser.parse_args()
main(args)
|
{"hexsha": "8ac51ca6a6ac7e2ca869a2836edf733a5b87db3b", "size": 1731, "ext": "py", "lang": "Python", "max_stars_repo_path": "compute_doc_len_stat.py", "max_stars_repo_name": "kenchan0226/AbsThenExtPublic", "max_stars_repo_head_hexsha": "567811d6c76fe51c2c368eeaca1761eb322db2a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-08-10T02:31:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T01:12:25.000Z", "max_issues_repo_path": "compute_doc_len_stat.py", "max_issues_repo_name": "kenchan0226/abs-then-ext-public", "max_issues_repo_head_hexsha": "567811d6c76fe51c2c368eeaca1761eb322db2a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "compute_doc_len_stat.py", "max_forks_repo_name": "kenchan0226/abs-then-ext-public", "max_forks_repo_head_hexsha": "567811d6c76fe51c2c368eeaca1761eb322db2a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3684210526, "max_line_length": 82, "alphanum_fraction": 0.6452917389, "include": true, "reason": "import numpy", "num_tokens": 409}
|
\documentclass[11pt]{article}
\usepackage[left=1in,right=1in,top=1in,bottom=1in]{geometry}
\usepackage{syntax}
\usepackage{multicol}
\usepackage{hyperref}
\usepackage{comment}
\newcommand{\sizet}{size\textunderscore{}t}
\title{MERCATOR Reference Manual, v0.9.8}
\begin{document}
\maketitle
\noindent
Copyright (C) 2021 Washington University in St. Louis. All rights
reserved.
\vspace{0.1in}
\noindent
MERCATOR is licensed under the Apache License, Version 2.0 (the
"License"); you may not use this software except in compliance with
the License. You may obtain a copy of the License at
\url{http://www.apache.org/licenses/LICENSE-2.0}
\noindent
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
\vspace{0.1in}
\noindent
Development of MERCATOR has been sponsored by NSF CISE awards
CNS-1500173 and CNS-1763503.
\vspace{0.1in}
\noindent
When using MERCATOR, please cite the following publications:
\begin{enumerate}
\item
S. V. Cole and J. Buhler, ``MERCATOR: A GPGPU Framework for Irregular
Streaming Applications,'' \textit{2017 International Conference on
High Performance Computing \& Simulation (HPCS)} 727-736, Genoa,
Italy, 2017.
\\
(This is the principal citation for MERCATOR.)
\item
T. Plano and J. Buhler. ``Scheduling irregular dataflow pipelines on
SIMD architectures.'' \textit{6th Wkshp. on Programming Models for SIMD/Vector
Processing} 1-9, San Diego, CA, 2020.
\\
(This paper describes the scheduling algorithm used by the runtime.)
%\item
%S. Timcheck and J. Buhler. ``Streaming computations with region-based
%state on SIMD architectures.'' \textit{13th Int'l Wkshp. on
% Programmability and Architectures for Heterogeneous Multicores} 1,
%Bologna, Italy, 2020.
\end{enumerate}
\newpage
\section{Overview and Key Concepts}
This page accidentally left blank.
\newpage
\section{Building MERCATOR}
Build requirements for MERCATOR include:
\begin{itemize}
\item CUDA 11.1 or higher
\item LLVM/Clang libraries v11 or higher (for type-checking)
\item GNU Flex 2.5.37 or higher (older versions might work)
\item GNU Bison 3.7 or higher
\item CMake 3.18 or higher
\end{itemize}
MERCATOR applications are known to run on devices with CUDA
architecture 5.2 or higher. C++14 support is required from the
compiler chain.
Assume the MERCATOR sources have been unpacked into directory
\texttt{source}. To build and install MERCATOR, do the following:
\begin{enumerate}
\item If needed, edit the user-configurable paths near the top of the file
\texttt{source/CMakeLists.txt}. In particular, edit the following:
\begin{itemize}
\item \texttt{CMAKE_INSTALL_PREFIX} -- the place where MERCATOR will be
installed
\item \texttt{LLVM_PATH} -- path to the local LLVM installation
\end{itemize}
\item Create and enter a build directory separate from the source tree, e.g.
\begin{quote}
\texttt{mkdir build ; cd build}
\end{quote}
\item Initialize the CMake build files:
\begin{quote}
\texttt{cmake /path/to/source}
\end{quote}
\item Build MERCATOR:
\begin{quote}
\texttt{make}
\end{quote}
\item Install MERCATOR:
\begin{quote}
\texttt{make install}
\end{quote}
\end{enumerate}
You may wish to build the MERCATOR examples in the source tree as
well. The procedure to do this is similar to that used to build
MERCATOR itself. You can build using either an installed version of
MERCATOR or a version that was compiled but not yet installed.
\begin{enumerate}
\item If needed, edit the user-configurable paths near the top of the
file \texttt{source/examples/CMakeLists.txt}. In particular, edit
the following:
\begin{itemize}
\item \texttt{MERCATOR_ROOT_DIR} -- path to the MERCATOR installation,
if installed, or to the source tree, if not
\item \texttt{BUILDING_IN_TREE} --
set true in order to build using a compiled but not-yet installed
version of MERCATOR
\item \texttt{MERCATOR_BUILD_DIR} --
if \texttt{BUILDING_IN_TREE} is true, path to the build directory
for the not-yet-installed MERCATOR
\item \texttt{CUB_ROOT_DIR} -- path to the local installation of CUB
\end{itemize}
\item Create and enter a build directory separate from the source tree, e.g.
\begin{quote}
\texttt{mkdir build ; cd build}
\end{quote}
\item Initialize the CMake build files:
\begin{quote}
\texttt{cmake /path/to/source/examples}
\end{quote}
\item Build the examples:
\begin{quote}
\texttt{make}
\end{quote}
\end{enumerate}
\newpage
\section{Compiling a MERCATOR App}
The steps needed to build a MERCATOR app are as follows.
\begin{enumerate}
\item Write a \emph{specification file} describing the app's topology.
\item Compile the app with the MERCATOR compiler using the \texttt{-K}
option to produce a \emph{skeleton file} with methods to be filled
in by the user.
\item Fill in the skeleton's methods with valid CUDA code.
\item Write a host-side \emph{driver} (in C++ or CUDA) that
instantiates the app and runs it. The driver may be part of
a larger program and may instantiate and run multiple MERCATOR
apps.
\item Build the driver code together with the filled-in skeleton
code and runtime support files generated by the MERCATOR compiler.
\end{enumerate}
The format of a MERCATOR specification file is described in the next
section. Here, we describe the behavior of the MERCATOR
compiler and the set of files it produces.
\subsection{MERCATOR Compiler Inputs and Outputs}
The MERCATOR compiler reads one or more specification files and
produces outputs for each app specified in each file. If the file
specifies an app named, say, \texttt{App}, then:
\begin{itemize}
\item If the option \texttt{-K <filename>} is passed to the compiler,
it emits a skeleton file \texttt{<filename>}, which contains stubs
of the app's device-side methods to be filled in by the user.
\item If the \texttt{-K} flag is not passed, then the compiler emits
the following:
\begin{itemize}
\item a host-side header \texttt{App.cuh}, which should be included
by any host-side code that instantiates or references the app
(in particular, the host-side driver);
\item a host-side initialization source file \texttt{App_init.cu};
\item a device-side header \texttt{App_dev.cuh}, which is already
included by the skeleton code.
\end{itemize}
\end{itemize}
The CMake build system for Mercator automatically compiles the
host-side initialization code and device-side skeleton code into a
library that is linked against the host-side driver.
Compilation of both the device-side code and host-code that includes
the host-side header must be able to find the MERCATOR runtime. Be
sure to add the runtime directory of your MERCATOR installation to the
compiler's include path. No special libraries are needed beyond those
required to build CUDA applications.
\subsection{MERCATOR Compiler Command-Line Syntax}
The following command-line syntax is recognized by the MERCATOR
compiler. Pass the \texttt{-h} flag to the compiler to see a help
message.
\begin{quote}
\texttt{mercator [ \textit{options} ] \textit{specfile} [ \textit{specfile} ... ]}
\end{quote}
The following options are recognized by the MERCATOR compiler:
\begin{itemize}
\item \texttt{-a \textit{app}} \\
given spec file(s) defining multiple applications, generate output
for only the single specified app.
\item \texttt{-I \textit{path}} \\
add the specified path to the include path for finding files
mentioned by reference statements in a spec file. By default,
MERCATOR checks the system include path of the local LLVM
installation, the CUDA include path, and the directory where the
spec file is located.
\item \texttt{-K \textit{filename}} \\
emit a skeleton file named \textit{filename} with methods for
the user to fill in (default: emit runtime support code for each app)
\item \texttt{-o \textit{path}} \\
write output files to the specified path (defaults to current directory).
% NOT SHOWN: -q option
\item \texttt{-t \textit{\#}} \\
set the number of threads per block for each generated application
(default 128). This number must be fixed at the time the app is
compiled.
\item \texttt{-H \textit{\#}} \\
set the size of the device heap in megabytes (default 32 MB).
Values less than the default are not recommended and may result in
app launch failures. If several instances of the app may be
constructed at once, the heap size should be increased by a factor
of the number of instances. Additional space may be needed if the
user's own code makes use of the device heap.
\item \texttt{-S \textit{\#}} \\
set the size of the device stack in kilobytes (default 8 KB).
Values less than the default are not recommended and may result
in app launch failures.
\item \texttt{-D} \\
do not generate code for any apps, but emit dependencies for
each app to be generated. Dependencies have the form
\begin{quote}
\textit{sourcefiles} : \textit{specfile}
\end{quote}
where ``sourcefiles'' includes the host and device headers and the
initialization source file (but not the skeleton). One such line is
generated per app specified in the input.
\item \texttt{-v} \\
print MERCATOR version information.
\end{itemize}
\subsection{CMake Support for Building MERCATOR Applications}
MERCATOR provides some CMake recipes to simplify building and using
applications. These recipes can be included in your CMake build
system by including the file \texttt{mercator-rules.txt} in the
top-level MERCATOR install directory.
To use the MERCATOR recipes, you must define the variable
\texttt{MERCATOR_ROOT_DIR} in your \texttt{CMakeLists.txt} file to
point to the the local Mercator installation. You can control the
compiler flags for your application using the standard CMake support
for CUDA, and you can set the MERCATOR_FLAGS variable to specify
flags to pass to the Mercator spec file compiler.
the \texttt{mercator-rules.txt} file defines two CMake functions:
\begin{itemize}
\item
\begin{verbatim}
add_mercator_app(TARGET <target>
SPECFILE <filename>
[APPNAME <appname>]
SOURCES <filename> [<filename> ...])
\end{verbatim}
Build a MERCATOR application from a specification file and one or more
user-provided sources (which should include the filled-in skeleton).
By default, the app name in the specification file is assumed to be
\texttt{<target>}; this name can be changed by specifying the optional
\texttt{APPNAME} parameter. CMake will construct a static library
\texttt{lib<target>.a} containing all the device-side code for the
application and will generate all the necessary support code and
headers as described above.
\item
\begin{verbatim}
add_mercator_executable(TARGET <exename>
APPS <app> [<app> ...]
SOURCES <filename> [<filename> ...])
\end{verbatim}
Build an executable that uses one or more MERCATOR applications.
The app names should be targets created using \texttt{add_mercator_app}.
The other sources may be arbitrary C++ or CUDA files. CMake will
ensure that the apps' headers and libraries are built before building
the executable, and that it is linked against the apps' libraries.
\end{itemize}
\newpage
\section{MERCATOR App Specification File Format}
This section describes the format of MERCATOR app specifications.
\subsection{Lexical Elements}
A MERCATOR app specification consists of lexical tokens optionally
separated by white space. The amount and location of white space between
tokens, including newlines, does not matter. Specifications may use
C++ style comments (e.g.\ ``\texttt{// comment}''); all text on a line
after the two-character sequence ``\texttt{//}'' is ignored.
\textit{Numbers} in a MERCATOR spec are unsigned decimal integers
consisting only of the digits 0 to 9.
MERCATOR specifications name a variety of entities using
\textit{identifiers}. A valid identifier is a string containing only
alphanumeric characters or the underscore symbol ``\texttt{_}'' and
not beginning with a number. Identifiers are case-sensitive.
Identifiers beginning with two underscores ``\texttt{__}'' are
reserved for use by the system.
MERCATOR reserves the following keywords, which are
case-\emph{in}sensitive. These keywords may not be used as
identifiers.
\begin{multicols}{4}
\begin{itemize}
\item \texttt{aggregate}
\item \texttt{allthreads}
\item \texttt{application}
\item \texttt{buffer}
\item \texttt{edge}
\item \texttt{enumerate}
\item \texttt{from}
\item \texttt{function}
\item \texttt{ilimit}
\item \texttt{mapping}
\item \texttt{module}
\item \texttt{node}
\item \texttt{nodeparam}
\item \texttt{nodestate}
\item \texttt{param}
\item \texttt{reference}
\item \texttt{source}
\item \texttt{sink}
\item \texttt{threadwidth}
\item \texttt{void}
\end{itemize}
\end{multicols}
MERCATOR specifications may sometimes need to refer to C++ types,
which are specified as \textit{typenames}. Anywhere a typename is
needed, it may be given as a \emph{type literal} enclosed in curly
braces ``\texttt{\{\}}'', within which any character other than a
newline is permitted and considered part of the type. For example,
the following are valid type literals:
\begin{itemize}
\item ``\texttt{\{int\}}''
\item ``\texttt{\{const unsigned int * const []\}}''
\item ``\texttt{\{Foo<Bar, Baz<Quux>, 3> *\}}''
\end{itemize}
Simple types whose names are either valid MERCATOR identifiers or
pointers to them may be specified without braces. For example, the
following types may be specified unbraced:
\begin{itemize}
\item \texttt{int}
\item \texttt{My_Type_Name}
\item \texttt{float *}
\item \texttt{My_Type_Name *}
\end{itemize}
\subsection{Grammar}
A MERCATOR specification file describes zero or more applications.
The file is divided into sections, each beginning with an
\emph{application statement} of the form
\begin{quote}
\texttt{application <identifier> ;}
\end{quote}
All statements after an application statement and before the next such
statement refer only to the named application, except that any
reference statements (see below) apply to every application specified
in the file.
An application specification declares one or more \emph{nodes}, each
of which has a \emph{module type} that specifies its input and output
properties. Nodes are connected into an application topology by
\emph{edges}. Node, module, and edge statements, as well as the
various other specialized statement types described below, may appear
in any order within an application section.
\subsubsection{Reference Statement}
All typenames mentioned in a specification file must be well-defined.
Typenames are interpreted by the LLVM Clang C++ compiler. To include
an external C++ or CUDA file that defines typenames used in the
specification, use a \textit{reference statement}:
\begin{quote}
\texttt{reference "<filename>" ;}
\end{quote}
\texttt{<filename>} is the name of a file that will be read by Clang
before attempting to resolve any typenames in the specification. CUDA
files are parsed in host mode, not device mode. Any number of
reference statements may be used; the contents of all referenced files
will be parsed in a single translation unit and used for type
checking.
Referenced files may be specified by absolute or relative pathnames.
Relative pathnames are assumed to be relative to either the current
working directory or a directory in the system include path (as
defined by the LLVM installation). The include path can be augmented
by passing additional directories to the MERCATOR compiler with
the \texttt{-I} option.
\subsubsection{Module Statement}
A \textit{module statement} names a module and specifies its type.
The following grammar describes the syntax for module statements:
\begin{grammar}
<module-stmt> ::= `module' <module-name> `:' <module-type> `;'
<module-name> ::= <identifier>
<module-type> ::= <input-type> `->' <output-types>
<input-type> ::= <typename>
<output-type> ::= void
\alt <channel-spec>
\alt <channel> [ `,' <channel> ... ]
<channel> ::= <channel-name> `<' <channel-spec> `>'
<channel-spec> ::= <typename> [ `:' <output-count> ]
<channel-name> ::= <identifier>
<output-count> ::= <number> [ `!' ]
\end{grammar}
A module has an input data type and zero or more output
\emph{channels} on which the module can emit data. A channel has a
name (optional if it is a module's only channel), an output data type,
and an optional \emph{output count} specifying the maximum number of
output items that can be generated on this channel for each input item
consumed by the module. The output count is a maximum value; the
module may produce fewer outputs (but may not produce more) than the
specified count for each input. If no output count is specified, it
is assumed to be 1, i.e.\ the module produces at most one output per
input. Adding the symbol ``\texttt{!}'' after an output count
indicates that the module produces \emph{exactly} this many outputs
for each input.
Output channel names must be unique within one module type but may be
duplicated across module types.
\subsubsection{Node Statement}
A \textit{node statement} specifies a node of the MERCATOR application
graph, along with its type. The following grammar describes the
syntax for node statements:
\begin{grammar}
<node-stmt> ::= `node' <node-name> `:' <node-type> `;'
<node-name> ::= <identifier>
<node-type> ::= <identifier>
\alt `sink' `<' <typename> `>'
\end{grammar}
A node's type is either the name of a module (specified in a module
statement) or a \emph{sink type}, which emits a data stream from the
application. Sink node types are parameterized by their data types,
which can be any valid typename.
A node assumes the input data type and output channel list of its
module. A node whose type is a sink type has the data type of the
sink as its input type and no output channels (since data sent to it
leaves the application).
Note that node and module names are in the same namespace. Hence, the
same name may not be used for both a module and a node within one
application.
\subsubsection{Source Statement}
Exactly one node of an application must be designated the \emph{source
node}. This node receives data from an external input stream.
The following grammar describes the syntax for source statements:
\begin{grammar}
<source-stmt> ::= `source' <node-name> [ `buffer' | `function' ] `;'
<node-name> ::= <identifier>
\end{grammar}
The source statement names the application's source node and
specifies the type of source from which it receives data. There are
three possible source types:
\begin{itemize}
\item If no qualifier is specified after the node name, the input
stream is assumed to be a list of contiguous integer values of type
\texttt{size_t}, which range from 0 to some maximum specified by the
host at runtime. The node's module must have input type
\texttt{size_t}, or the application will fail to type-check.
\item If the qualifier \texttt{buffer} is specified, the input stream
is assumed to be a GPU global memory buffer whose type matches the
input type of the node's module. See the next section for
information about the host-side interface for creating and
manipulating buffers.
\item If the qualifier \texttt{function} is specified, the input
stream is assumed to be generated by a user-supplied function. In
this case, the following function stubs are emitted in the skeleton
file as methods of the app's class and must be filled in by the
user:
\begin{itemize}
\item \texttt{EltT get(size_t idx) const} \\
Given an integer value, return a value of a type matching the input
type of the source node. If this type is \texttt{T}, then
\texttt{get()} returns either type \texttt{T} (if \texttt{T} is
\emph{scalar}, i.e., an integer, floating-point, boolean, or pointer
type) or \texttt{T\&} (if \texttt{T} is not scalar.)
The \texttt{get} function is passed integers between 0 and some
maximum value specified by the host at run-time. It is called
multithreaded, so different threads may receive different
integers and may produce different values. There is no
guarantee which value(s) will be passed to any call, so
\texttt{get()} must be able to map an integer statelessly to the
corresponding input value for the source node.
\item \texttt{void init()} \\
Called once at the start of a Mercator app's execution, before
any calls to \texttt{get()}.
\item \texttt{void cleanup()} \\
Called once at the end of a Mercator app's execution, after
any calls to \texttt{get()}.
\end{itemize}
\end{itemize}
All three of these functions have access to application-level
parameters using the interface described below.
\subsubsection{Edge Statement}
An \emph{edge statement} specifies a connection between two nodes.
The following grammar describes the syntax for edge statements:
\begin{grammar}
<edge-stmt> ::= `edge' <node-name> [`::' <channel-name>] `->' <node-name> `;'
<node-name> ::= <identifier>
<channel-name> ::= <identifier>
\end{grammar}
An edge statement connects a specified output channel of an
\emph{upstream} node to the input of a \emph{downstream} node. If
the upstream node has only a single output channel, the channel
name is optional; the node's unique output channel is assumed.
The two endpoints of an edge must have compatible types; more
specifically they must be equivalent C++ types up to aliasing (no
automatic conversions are applied). It is an error to specify an edge
with a sink node as the upstream endpoint or the source node as a
downstream endpoint.
\subsection{Topological Properties of Applications}
Not every possible graph structure that can be specified using node
and edge statements is a valid MERCATOR application. Valid application
graphs have the following properties:
\begin{enumerate}
\item An application has exactly one source node.
\item Every node must be reachable from the source.
\item No node may have two distinct incoming edges; hence,
applications have tree-structured topologies.
% unless one of them
% is part of a directed cycle involving that node.
%\item No node may be part of more than one directed cycle.
%\item Every output channel that is part of a directed cycle must
% produce no more than one output per input received by its node.
\end{enumerate}
\begin{comment}
The last three properties permit simple directed cycles in an
application graph but forbid more complex cycle structures and
arbitrary directed acyclic graphs. Effectively, an application must
be a connected tree rooted at its source node, except that back edges
from a node to one of its ancestors in the tree are permitted.
However, if there is a back edge from a node $v$ to its ancestor $u$,
no other back edge may originate from or terminate at any node on the
path from $u$ to $v$ (inclusive).
It is permissible to define a module type with no nodes of that type,
or to leave an output channel of a node unconnected to an edge. In the
former case, the module type is not emitted as part of the generated
application; in the latter, outputs sent to the channel will be
silently discarded. The MERCATOR compiler will generate warnings in
such cases.
\paragraph*{Rationale for Topological Restrictions}
Edges in MERCATOR application graphs are implemented by finite,
fixed-sized queues. Without careful reasoning about the amount of
space available in each edge's queue, the runtime system might
schedule the execution of nodes in a way that leads to \emph{deadlock}
-- a condition in which at least one node has queued inputs but no
node has room for its outputs on its downstream edges. The
topological restrictions on application graphs ensure that relatively
simple, efficient scheduling rules suffice to prevent deadlock for all
valid topologies. Future versions of MERCATOR might relax these
restrictions if workable scheduling rules are found that prevent
deadlock for more general graph topologies.
The restriction that each channel in a cycle must produce at most one
output per input ensures that one item entering a cycle at a node $v$
cannot result in two or more items appearing on the back edge into
$v$. If such amplification of inputs were permitted, it could again
lead to deadlock with fixed-sized queues.
\end{comment}
\subsubsection{Additional Properties of Modules}
MERCATOR module types have certain additional properties that can be
specified in optional statements separate from the module type
declaration.
\paragraph*{Input Limit Statement}
An \emph{input limit statement} limits the number of threads that
may concurrently receive inputs in a MERCATOR module's \texttt{run()}
function. This statement has the form
\begin{quote}
\texttt{ilimit <module-name> <number> ;}
\end{quote}
which states that a call to \texttt{run()} for the named module will
execute with inputs in at most the specified number of threads. By
default, the number of threads with inputs can be as large as the
GPU block size.
Input limits are useful when each input concurrently processed by
a module consumes scarce resources such as GPU shared memory. It
may be necessary to limit the number of inputs processed to avoid
exhausting such resources.
\begin{comment}
\paragraph*{Mapping Statement}
A \emph{mapping statement} changes the mapping of input items to
threads for a particular module's \texttt{run()} function. It is
possible to specify that multiple GPU threads should cooperatively
process an input item, or that multiple input items should be
processed sequentially by each thread.
The mapping statement has the form
\begin{quote}
\texttt{mapping <module-name> <nelts> :~<nthreads> ;}
\end{quote}
The parameter \textit{nelts} specifies the maximum number of input items
delivered to each group of GPU threads, while \textit{nthreads} specifies the
size of a group. For example, a mapping specification
``\texttt{2:4}'' would deliver up to two inputs to each group of four
consecutive GPU threads. All threads in each group receive the same
two inputs; the first thread in the group is responsible for pushing
any outputs from the group to downstream channels.
\emph{At this time, specifying more than one input item per thread is not
supported.} A future version of MERCATOR may lift this restriction.
However, thread group sizes $> 1$ are permitted. If the number of threads
in a block is not a multiple of the group size, the residual threads that
are not part of a full group will not receive inputs.
By default, each active thread in a call to \texttt{run()} receives
one input; that is, the default mapping is ``\texttt{1:1}''.
\end{comment}
\paragraph*{AllThreads Statement}
An \emph{allthreads statement} affects the behavior of a module's
\texttt{run()} function. The form of this statement is
\begin{quote}
\texttt{allthreads <module-name> ;}
\end{quote}
By default, if a call to a module's \texttt{run()} has enough inputs
to supply $n$ GPU threads with work, the function will be called with
only these $n$ threads active. $n$ may be less than the GPU block
size and will vary at runtime. This arrangement is simple and
convenient if each thread processes its input(s) independently of all
other threads.
However, more advanced GPU usage may require that all threads in a GPU
block be active in each call to \texttt{run()}. In particular,
cooperative behaviors spanning multiple GPU threads, which require a
call to \texttt{__syncthreads()}, are not safe unless every thread in
the block eventually reaches the synchronization point. In such
cases, an allthreads statement instructs MERCATOR to call
\texttt{run()} with all threads active, regardless of the number of
inputs to be processed. Running with all threads also eliminates
certain run-time overheads and so allows a Mercator app to run faster.
For modules designated to run with all threads, \texttt{run()} takes
an extra argument, which is the number of threads (starting from 0)
that actually have inputs. For such modules, the \texttt{push()}
function called to emit output from \texttt{run()} \textbf{must be
called with all threads} and takes an extra per-thread predicate
argument to indicate whether the thread is passing it a valid output
value.
\subsubsection{Application, Module, and Node Parameters}
MERCATOR applications can be configured at runtime by setting
\emph{parameters} whose values are accessible by modules running on
the GPU device. Parameters can be read and written from the host
before each run of a MERCATOR application but are unchanging and
read-only from the GPU during application's execution.
There are three kinds of parameters that can be defined for a
MERCATOR application. \emph{App-wide} parameters are shared by
all modules in an application. \emph{Module-wide} parameters are
shared by all nodes of a given module type. Finally, \emph{Node}
parameters are defined for all nodes of a module type but may have
different values for each node.
App-wide and module-wide parameters are defined using one or more
\textit{parameter statements} of the following form:
\begin{grammar}
<param-stmt> ::= `param' <param-name> `:' <param-type> `;'
<param-name> ::= [ <module-name> `::' ] <identifier>
<param-type> ::= <typename>
\end{grammar}
A parameter statement names a parameter and specifies its data type.
The parameter name may be a bare identifier, in which case it is
app-wide, or it may be scoped to a particular module, in which case
it is module-wide.
Node parameters are defined using the analogous \textit{node parameter
statement} of the following form:
\begin{grammar}
<nodeparam-stmt> ::= `nodeparam' <param-name> `:' <param-type> `;'
<param-name> ::= <module-name> `::' <identifier>
<param-type> ::= <typename>
\end{grammar}
Other than using a different keyword, the main difference between
parameter and node parameter statements is that the latter \emph{must}
be scoped to a particular module. Each node of the module type will
have a separate copy of the named parameter whose value can be set
independently from the host.
\subsubsection{Node State}
Unlike parameters, which are initialized on the host CPU and are
read-only on the GPU, \emph{state} variables are initialized on the
GPU and are read-write. They are not accessible from the host at all.
State variables are always defined per-node.
Node state is defined using a \textit{node state statement} as follows:
\begin{grammar}
<nodestate-stmt> ::= `nodestate' <var-name> `:' <var-type> `;'
<var-name> ::= <module-name> `::' <identifier>
<var-type> ::= <typename>
\end{grammar}
\paragraph*{Rationale for State}
State variables make sense only if they can be updated in a
sequentially consistent fashion. If a state variable were shared
across multiple nodes, there would be no way to guarantee the order
in which different nodes update the variable, since the order in
which nodes are executed varies dynamically at runtime. Hence,
MERCATOR does not define per-module or per-application state.
In contrast, for applications running within a single GPU block, there
is a consistent notion of ``stream order.'' Edges do not reorder
their inputs, and items earlier in a node's input queue are always
assigned to lower-numbered threads within a single call to
\texttt{run()}. Hence, a single node always processes its inputs in
stream order, and it can therefore maintain a state variable's value
to reflect the state after processing some prefix of the stream. In
practice, a MERCATOR application divides its input stream across GPU
blocks in an unpredictable fashion, so stream order is meaningful only
with respect to a single \emph{aggregate}, whose elements are
guaranteed to be processed within a single GPU block.
\begin{comment}
Moreover, for applications with cycles, MERCATOR does not guarantee
that items that enter a cycle first will exit the cycle first, since
any one item may go around the cycle a variable number of times.
Waiting for earlier items to exit the cycle before accepting later
items could have a serious negative impact on thread occupancy, since
one long-lived item would prevent any later items from being
processed. A somewhat performance-preserving solution would be to
reorder items into stream order as they exit the cycle, using a
reorder buffer.
\end{comment}
\newpage
\section{Host-Side App Interface}
This section describes how C++ and CUDA host-side code can instantiate
and use MERCATOR applications.
\subsection{A Usage Example}
The following example shows a C++ source file that instantiates and runs
a MERCATOR application named \texttt{MyApp}. Suppose that \texttt{MyApp}
was compiled from the following specification:
\begin{verbatim}
application MyApp;
ModuleType Foo : int -> out<int> ;
Node foo : Foo ;
Node snk : Sink<int> ;
Source foo buffer;
Edge foo -> snk;
Param y : float;
Param Foo::x : int;
\end{verbatim}
The following example, which will be explained fully below, shows how
to create and use an instance of the \texttt{MyApp} application:
\begin{verbatim}
#include "MyApp.cuh"
int myFunction()
{
// host-side buffers
int *input = new int [100];
int *output = new int [100];
Mercator::Buffer<int> ib(100);
Mercator::Buffer<int> ob(100);
MyApp myApp;
myApp.getParams()->y = 2.0;
myApp.Foo.getParams()->x = 3;
myApp.setSource(ib);
myApp.snk.setSink(ob);
// generate some input data
for (unsigned int j = 0; j < 100; j++)
input[j] = j;
ib.set(input, 100);
myApp.run();
ob.get(output, ob.size());
// print any results
for (unsigned int j = 0; j < ob.size(); j++)
cout << output[j];
}
\end{verbatim}
\subsection{Version Information}
The following values are defined in any source file that includes an
app's host-side header. They are also accessible in the device-side
stub code.
\begin{itemize}
\item \texttt{MERCATOR_MAJOR} -- major version of MERCATOR
\item \texttt{MERCATOR_MINOR} -- minor version of MERCATOR
\item \texttt{MERCATOR_PATCHLEVEL} -- patch level of MERCATOR
\end{itemize}
API changes will result in an increment to the major and/or minor
versions. Patch level changes are intended to be source-compatible.
\subsection{Instantiating a MERCATOR app}
To instantiate a MERCATOR app, the user must do two things: include
the generated header file that defines the host-side app interface,
and declare an object of the app's type. The host-side app interface
is defined in the CUDA header file with the same name as the app
itself (in this case, \texttt{MyApp.cuh}). The corresponding
device-side app header (whose name ends in \texttt{_dev.cuh}) should
\emph{not} be included in host-side code.
% Note: the host-side header could be a plain old .h file, except that
% it currently includes the Source and Sink headers, which hold
% template classes containing CUDA code. Defining these classes without
% exposing the CUDAism to the user seems challenging -- we'd need to
% have a CUDA-free pure virtual interface (still parameterized by T)
% and an implementation class, not exposed to the user, that has
% all the stuff in the existing classes (and generates the key pointer
% passed off to the device, which would need to be opaque to the user).
A MERCATOR application is instantiated by creating an object of its
type. The app object may simply be declared, as in the example, and
need not be allocated with \texttt{new}. The app constructor takes
two optional arguments:
\begin{quote}
\texttt{App(cudaStream_t stream = 0, int deviceId = -1)}
\end{quote}
the first argument is the CUDA stream in which to run the app, and
the second is the CUDA device to which to bind it. If the stream is
not specified, the default stream (0) is used. If the device is not
specified, the current CUDA device will be used.
The stream and device settings for an app are used any time it is run.
The device setting is immutable once the app is constructed, because
the app maintains state on the device even between runs. The stream
of an app may be changed at any time by calling the method
\begin{quote}
\texttt{bindToStream(cudaStream_t stream)}
\end{quote}
This call will block until all pending operations on the app's
previous stream are complete, then switch to the new stream.
\subsection{Querying App Properties}
Presently, the only property of an app that can be queried from the
host is the number of GPU blocks that it will use to execute. This
property can be accessed by the following function:
\begin{quote}
\texttt{int getNBlocks() const}
\end{quote}
This property is set when an app is instantiated and remains
constant for a given instance.
% FOR LATER -- query bound device and maybe stream?
% also perhaps heap and stack size?
% we may want to rethink the interface if we are
% going to add a lot of accessors, since each
% one currently needs its own function in the codegen'd class
\subsection{Setting the App's Parameters}
Before running a MERCATOR app, the user must set its app-level,
module-level, and node-level parameters. The application object
provides type-safe interfaces for parameter setting.
For each module and each node declared in the application, an object
with the same name as the module/node is defined as a public member of
the application's class. Other than sink nodes, each object provides
a method \texttt{getParams()} that returns a pointer to a writable
\emph{parameter structure}, which contains an appropriately typed
member for each parameter associated with the module/node.
Module-level parameters are accessed through the object with the same
name as the module, while node-level parameters are accessed through
the object with the same name as the node. App-level parameters are
accessed using the \texttt{getParams()} method of the entire app
object.
\textit{Note}: the type of the parameter structure returned from
\texttt{getParams()} is different for every app, module, and node
object. If the user wishes to store this pointer locally (e.g.\ to
set several parameters of a module all at once), a variable of type
\texttt{auto} should be be used to hold the pointer.
\subsection{Input and Output}
Data is exchanged between the host and a MERCATOR application using
typed objects. There are three ways to specify input to an
application: as a range of consecutive integers, as a \textit{buffer},
or as a user-defined device-side function. Output is always sent to a
buffer.
\subsubsection{Specifying Inputs and Outputs}
The \texttt{Source} statement in an app's specification indicates
which type of input it takes, as described in the previous section.
\begin{itemize}
\item If the source is the default (a range of integers starting at 0)
or is defined by a device-side function (which takes values from
such a range as its argument), the app needs to know the number of
input values to process. This quantity is specified by calling
\texttt{app.setNInputs(<N>)}, where \texttt{<N>} is a value of
type \texttt{size_t}.
\item If the source is a buffer, it is specified by calling
\texttt{app.setSource(<B>)}, where \texttt{<B>} is a Buffer object
whose type matches that of the source node's input.
\end{itemize}
An app may have one or more sink nodes of various types. Each sink
node \texttt{node} may be associated with a buffer by calling
\texttt{node.setSink(<B>)}, where \texttt{<B>} is a Buffer object
whose type matches that of the sink node's input. Note that sinks are
set at the node level, while the source is set at the app level
because only one source may exist per app.
Any associations set for an application's source and sinks, like
changes to its parameters, take effect next time the application is
run.
\subsubsection{Buffers}
A MERCATOR buffer, which has type \texttt{Buffer<T>}, is a handle for a
piece of device-side memory that can hold up to a specified number of
items of type \texttt{T}. Buffer objects can be read and written
by user code explicitly and can be used as input or output of
an application.
A buffer's constructor takes a single argument, which is the maximum
number of items that the buffer will hold (its \textit{capacity}). A
buffer can also be built via copy constructor or \texttt{operator=}
from an existing buffer, which makes a copy of this buffer.
Buffers support the following basic methods:
\begin{itemize}
\item \texttt{\sizet{} capacity() const} \\
Return the buffer's capacity.
\item \texttt{\sizet{} size() const} \\
Return the number of items actually present in the buffer.
\item \texttt{void get(T *p, \sizet{} n, \sizet{} offset = 0) const} \\
Copy $n$ items from the buffer to host memory $p$, starting at
a specified offset in the buffer.
\item \texttt{void set(const T *p, \sizet{} n)} \\
Copy $n$ items pointed to by $p$ to the buffer, and set its size to $n$.
The old size and content of the buffer is lost.
\item \texttt{void copy(const Buffer<T> *other, \sizet{} n)} \\
Copy $n$ items from the buffer ``other'' to the current buffer, and set
its size to $n$. The old size and content of the buffer is lost.
This call is done without touching the host's memory.
\item \texttt{void clear()} \\
Reset the buffer's size to 0.
\end{itemize}
The last four operations are \emph{synchronous} -- they do not return
control to the calling host thread until all prior operations on the
device are complete. Buffers also support asynchronous variants of these
operations, all of which take a CUDA stream as an optional argument:
\begin{itemize}
\item \texttt{void getAsync(T *p, \sizet{} n, \sizet{} offset = 0, cudaStream_t stream = 0) const}
\item \texttt{void setAsync(const T *p, \sizet{} n, cudaStream_t stream = 0)}
\item \texttt{void copyAsync(const Buffer<T> *other, \sizet{} n, cudaStream_t stream = 0)}
\item \texttt{void clearAsync(cudaStream_t stream = 0)}
\end{itemize}
All these functions are executed asynchronously with respect to the
host. They do not synchronize the device, but each operations will
execute \emph{only} after all previous operations in the same stream
have completed. Additionally, if the host pointer for
\texttt{getAsync} or \texttt{setAsync} points to page-locked memory
allocated with \texttt{cudaMallocHost()}, then these operations may
overlap with operations in another stream.
\textit{Warning}: buffers are always allocated on the current GPU
device. It is an error to use a buffer allocated on one device as
input or output to an app bound to a different device. In such cases,
use the \texttt{copy} function to copy the buffer's data to the
correct device first.
\subsection{Running an Application}
Once all parameters of a MERCATOR application, including its source
and sink associations, have been set, the app may be executed on the
device by calling one of application object's \textit{run methods}.
When an application runs, all values associated with its source are
passed to the application as its input stream. Source buffers are
\emph{not modified} and may be reused in subsequent runs. Any results
from the application's execution are \emph{appended} to the end of the
buffers designated as its sinks, which must have sufficient space to
receive them. Existing data in the sinks' buffers is not overwritten.
MERCATOR will use all available processors of the active device to
execute an application. \emph{The order of outputs from a sink is not
guaranteed to match that of the corresponding inputs} because
multiple GPU blocks asynchronously process different segments of the
input stream and may write their results to the common output sink out
of order. A future version of MERCATOR will provide global ordering
guarantees for the output stream.
\subsubsection{Synchronous vs.\ Asynchronous Runs}
An app may be run synchronously using the \texttt{run()} method,
or asynchronously, using the \texttt{runAsync()} method.
A call to \texttt{run()} will not return control to the host CPU
thread until the app (as well as any previous operations on its
stream) has finished. In contrast, a call to \texttt{runAsync()} will
issue commands to launch an app but may return before it is finished.
The host CPU can then perform work in parallel with the app. To
ensure that the app has finished its run, the host CPU thread should
call the app's \texttt{join()} method, which blocks until all pending
operations in the the app's stream are finished running.
If one is mixing MERCATOR apps with other CUDA functions, a call to
\texttt{join()} is equivalent to calling
\texttt{cudaStreamSynchronize()} on the app's stream.
\paragraph*{Safety with Asynchronous Operations}
App runs and calls to get, set, copy, or clear buffers are executed
sequentially within a single CUDA stream. In particular, the
asynchronous versions of these operations will execute sequentially in
the order that they are called, and there is no need to synchronize
the device between calls. For example, if an app is associated
with stream \texttt{str}, one may safely say
\begin{verbatim}
app.setSource(buffer);
buffer.setAsync(hostPointer1, n1, str);
app.runAsync();
buffer.setAsync(hostPointer2, n2, str);
app.runAsync();
join();
\end{verbatim}
The buffer will not be overwritten with the contents of
\texttt{hostPointer2} until after the first run is complete.
It is safe to modify the parameters of an app, including changing the
input and output specifications using \texttt{setNInputs},
\texttt{setSource}, and \texttt{setSink}, after calling
\texttt{runAsync()} but before calling \texttt{join()}. Any such
changes will take effect with the next call to \texttt{run()} or
\texttt{runAsync()}. For example, one may safely say
\begin{verbatim}
app.setSource(buffer1);
app.runAsync();
app.setSource(buffer2);
app.runAsync();
join();
\end{verbatim}
The change of the source buffer to \texttt{buffer2} will not take
effect until the second run.
The principal hazard in asynchronous execution is that the host should
not read or write data that is actively being updated by or
transferred to or from the device. In particular, once
\texttt{getAsync()} is called on a buffer, the destination data is not
guaranteed to be in its final state until after the next
\texttt{join()} (or other stream-synchronizing operation). Similarly,
once \texttt{setAsync()} or \texttt{clearAsync()} is called on a
buffer, or a \texttt{runAsync()} call is issued that uses the buffer
as an output sink, the size of the buffer as seen by the host is not
guaranteed to be accurate until after the next \texttt{join()}.
Moreover, the source data given to a \texttt{setAsync()} call should
not be modified by the host until after the next \texttt{join()}.
\paragraph*{Limits to Concurrency}
Calls to \texttt{get}, \texttt{set}, \texttt{copy}, or \texttt{clear}
on a buffer object are synchronous -- they do not return until the
operation completes and effectively synchronize the device, since they
execute in the default stream. To avoid these behaviors, use the
asynchronous versions of the operations.
Constructing or destroying a MERCATOR app or a buffer object
allocates/frees CUDA device memory and page-locked host memory, which
has the effect of synchronizing the device. (The same is true of any
call to the CUDA runtime that allocates or frees memory.) For best
performance, create the app and complete all allocations before any
runs of the app, and destroy them after all runs have finished.
It is not possible to use the same instance of an app to launch
multiple runs on different streams at once, since switching an app to
a new stream first synchronizes the old stream. If it is desired to
run the app in different streams concurrently, a separate instance
should be created for each stream.
\newpage
\section{Device-Side App Interface}
The user specifies the behavior of a MERCATOR application by filling
in device function stubs. Compiling an application specification with
the \texttt{-K} option produces a \textit{skeleton file} containing
these stubs. This section describes the stubs produced and how they
function as part of an application.
\subsection{Module Run Functions}
The most important functions supplied by the user are the \textit{run
functions} for each module in an application. The signature of a
run function is as follows:
\begin{quote}
\texttt{void <Module>::run(const T \&item)}
\end{quote}
where \texttt{<Module>} is the module type and \texttt{T} is the
module's input type. The module type may be a somewhat complex
template instantiation, but this type is not relevant to the
application developer -- it's enough to recognize the name of the
module in it.
Each time the run function is called, each active thread is given an
item to process. The number of items, and hence the number of active
threads, may vary from call to call.
If a module is declared to run with all threads active (via an
allthreads statement), the run function's prototype becomes
\begin{quote}
\texttt{void <Module>::run(const T \&item, unsigned int nInputs)}
\end{quote}
where \texttt{nInputs} is the number of threads (starting with thread
index 0) that have valid inputs.
\subsubsection{Emitting Output from a Module}
To produce output from a module on one of its channels, call its
\textit{push function} from inside the run function. the signature
of the push function is as follows:
\begin{quote}
\texttt{void push(const DST \&item, unsigned int channelIdx = 0)}
\end{quote}
where \texttt{DST} is the channel's output type (which may differ from
the input type of the module). Channels are indexed using an
enumerated type in a namespace \texttt{Out} defined within each
module. For example, if a module has channels named \texttt{acc} and
\texttt{rej}, the corresponding channel indices are \texttt{Out::acc}
and \texttt{Out::rej}. If no channel index is specified, the first
output channel specified for the module in the specification file will
be used; this default is particularly convenient for modules with a
single output channel.
For modules \emph{not} declared to be ``allthreads,'' the push function
may be called with any subset of threads. For allthreads modules,
however, it must be called with \emph{all} threads, and its prototype
becomes
\begin{quote}
\texttt{void push(const DST \&item, bool pred, unsigned int channelIdx = 0)}
\end{quote}
where \texttt{pred} is set true if the thread has an item to push.
\emph{It is essential that a module not push more outputs per thread
onto a channel than the maximum number given in the channel's
specification.} Pushing more outputs than this value will result in
undefined behavior. Pushing fewer outputs than the maximum is
perfectly fine unless the channel is specified to take a fixed number
of outputs per input with the ``\texttt{!}'' marker. For such
channels, pushing other than the specified number of outputs per input
may result in undefined behavior.
\subsubsection{Accessing Parameters and State}
Within a run function, the user may need to access an application's
parameters or the module's per-node state. Parameters are
\emph{read-only}, while state is writable.
App-wide parameters are accessible via a function
\texttt{getAppParams}, which returns a pointer to an app-wide
parameter structure identical to that used on the host (except that it
is read-only). Module-wide \emph{and} per-node parameters are both
accessed by a function \texttt{getParams} which returns a pointer to a
structure containing both kinds of parameters. All are accessed
exactly as on the host.
Mutable per-node state variables of a module are accessed via the
function \texttt{getState}, which returns a pointer to a state
structure with typed members as declared in the app's specification.
The user should take care that concurrent reads and writes to state
variables are correctly arbitrated, e.g.\ by using atomic updates or
by ensuring that only one thread per node does the writing.
\subsubsection{Utility Functions}
The following utility functions are available within a module's
\texttt{run} function.
\begin{itemize}
\item \texttt{unsigned int getNumActiveThreads() const} \\
Get the \emph{maximum} number of threads with which the run
function could ever be called. The actual number of active
threads in any given call may be less than this value. The
value will be equal to the number of threads per block unless
a lower input limit was specified for the module.
%\item \texttt{unsigned int getThreadGroupSize() const} \\
% Get the number of threads in each thread group.
%\item \texttt{bool isThreadGroupLeader() const} \\
% True iff a thread is the first thread in its group.
\end{itemize}
\subsection{Other Stub Functions: \texttt{init} and \texttt{cleanup}}
For each module with mutable per-node state, the skeleton defines two
stubs, \texttt{void init()} and \texttt{void cleanup()}. The first is
called once at the beginning of each run of the application, before
any module's run function is called. the second is called once at the
end of each run, after the last call to any module's run
function. These functions are intended to permit initialization and
finalization of the module's mutable per-node state.
The \texttt{init} and \texttt{cleanup} functions are always called
with all threads in the block active. The utility functions described
in the previous section, as well as \texttt{getParams},
\texttt{getAppParams}, and \texttt{getState}, are available within
these functions as well.
\emph{Note}: calling push from \texttt{init} or \texttt{cleanup} is
not permitted and may result in undefined behavior.
\end{document}
|
{"hexsha": "1c5ddfebea9973b4b2b551f4400258d5a4c9b0ea", "size": 54069, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/mercator-manual.tex", "max_stars_repo_name": "jdbuhler/mercator", "max_stars_repo_head_hexsha": "f61f2185bc9cdb96838ddd9700464cebed21b5d2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-09-12T03:56:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-01T17:17:53.000Z", "max_issues_repo_path": "doc/mercator-manual.tex", "max_issues_repo_name": "jdbuhler/mercator", "max_issues_repo_head_hexsha": "f61f2185bc9cdb96838ddd9700464cebed21b5d2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-09-20T15:53:20.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-16T12:55:53.000Z", "max_forks_repo_path": "doc/mercator-manual.tex", "max_forks_repo_name": "jdbuhler/mercator", "max_forks_repo_head_hexsha": "f61f2185bc9cdb96838ddd9700464cebed21b5d2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6207142857, "max_line_length": 98, "alphanum_fraction": 0.7641347168, "num_tokens": 13363}
|
import math
import numpy as np
import pandas as pd
def series_rolling(x, window, stride):
if not isinstance(window, (int, float)) or not isinstance(stride, (int, float)):
window_size = math.floor(window / x.index.freq)
stride_size = math.floor(stride / x.index.freq)
end_index = x.shape[0] - window_size
if stride_size == 0:
stride_size =1
for i in np.arange(0, end_index, stride_size):
yield x.iloc[i: i + window_size]
def pd_rolling(x, window, stride, resolution=None):
# For DatetimeIndex
if not isinstance(window, (int, float)) or not isinstance(stride, (int, float)):
try:
window = pd.Timedelta(window)
stride = pd.Timedelta(stride)
except ValueError:
raise ValueError("Input window & stride must either be real or"\
+ "offset strings")
window_size = math.floor(window / x.index.freq)
stride_size = math.floor(stride / x.index.freq)
# For real-valued index
elif resolution:
window_size = window // resolution
stride_size = stride // resolution
end_index = x.shape[0] - window_size
if stride_size == 0:
stride_size =1
for i in np.arange(0, end_index, stride_size):
yield x.iloc[i: i + window_size]
def compute_spike_mask(x, window, stride, factor=3.5):
'''Create a mask on x where spikes are True based on running mean and std.
This method should work datatype-agnostically on pd.DataFrame and
pd.Series.'''
# create a boolean mask for the entire dataset
x_mask = x.astype(bool)
x_mask.values[:] = False
# processing rolling operations
for x_sub in pd_rolling(x, window, stride):
x_mean = x_sub.mean()
x_std = x_sub.std()
x_sub_mask = np.abs(x_sub-x_mean) > (factor *x_std)
if x.ndim == 2:
x_mask[x_sub_mask] = True
elif x.ndim == 1:
x_mask[x_sub_mask.index[x_sub_mask.values]] = True
return x_mask
def hist_based_mask_series(x, window, bins, pct_thres=0.5):
# create a boolean mask for the entire dataset
x_mask = x.astype(bool)
x_mask.values[:] = False
# compute stride from given window size
window = pd.Timedelta(window)
stride = window / 2
# Processing rolling operations (This needs to be done seperately for
# series and dataframe)
for x_sub in pd_rolling(x, window, stride):
x_sub_no_nan = x_sub.dropna()
if x_sub_no_nan.size/x_sub.size > 0.2:
hist, bins = np.histogram(x_sub.dropna().values,
bins=bins,
range=[x_sub.min(), x_sub.max()])
if ((hist == 0).sum() / hist.size) >= pct_thres:
x_sub_mask = x_sub_no_nan.astype(bool)
x_sub_mask.values[:] = True
x_mask[x_sub_mask.index[x_sub_mask.values]] = True
return x_mask
def hist_based_mask_dataframe(x, window, bins, pct_thres=0.5):
return pd.concat([hist_based_mask_series(x[varname],window,bins,pct_thres) \
for varname in x.columns], axis=1)
# Generalise the function usage
hist_based_mask_func = {pd.core.series.Series : hist_based_mask_series,
pd.core.frame.DataFrame : hist_based_mask_dataframe}
def mean_ptp_ratio(x, window='2T'):
'''Compute the ratio of running mean range to record mean
Parameters:
x: pd.Series with DatetimeIndex. Record to measure
window: pd.DateOffset string. Running window size
'''
x_normed = (x - x.min()) / (x.max() - x.min())
x_mean_rolling = x_normed.rolling(window).mean()
return (x_mean_rolling.max() - x_mean_rolling.min())/x_normed.mean()
|
{"hexsha": "7bcbf65718e04311679ad8abe4081f61c5ae53f3", "size": 3779, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pyqc/utils.py", "max_stars_repo_name": "wangsen992/pyqc", "max_stars_repo_head_hexsha": "7909426111bd069f295cc477c65f343aa5a7e437", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pyqc/utils.py", "max_issues_repo_name": "wangsen992/pyqc", "max_issues_repo_head_hexsha": "7909426111bd069f295cc477c65f343aa5a7e437", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-03-22T04:25:41.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-22T04:25:41.000Z", "max_forks_repo_path": "src/pyqc/utils.py", "max_forks_repo_name": "wangsen992/pyqc", "max_forks_repo_head_hexsha": "7909426111bd069f295cc477c65f343aa5a7e437", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.045045045, "max_line_length": 84, "alphanum_fraction": 0.6242392167, "include": true, "reason": "import numpy", "num_tokens": 929}
|
"""
basic operations II
"""
import numpy as np
arr = np.array([[1, 5, 6],
[4, 7, 2],
[3, 1, 9]])
# maximum element of array
print("Largest element is:", 0)
# minimum element of array
print("Smallest element is:", 0)
# maximum element per row
print("Row-wise maximum elements:",
[])
# minimum element per col
print("Column-wise minimum elements:",
[])
# sum of array elements
print("Sum of all array elements:",
0)
# cumulative sum along each row
print("Cumulative sum along each row:\n",
0)
c = np.array([[1, 2],
[3, 4]])
d = np.array([[4, 3],
[2, 1]])
# add arrays
print("Array sum:\n", None)
# multiply arrays (elementwise multiplication)
print("Array multiplication:\n", None)
|
{"hexsha": "3e424e504cb2f0ed9e02da4e0c7c8ec0a8abca3a", "size": 775, "ext": "py", "lang": "Python", "max_stars_repo_path": "pset_pandas1_basics/ndarrs/p5.py", "max_stars_repo_name": "mottaquikarim/pydev-psets", "max_stars_repo_head_hexsha": "9749e0d216ee0a5c586d0d3013ef481cc21dee27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-04-08T20:05:37.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-04T20:48:45.000Z", "max_issues_repo_path": "pset_pandas1_basics/ndarrs/p5.py", "max_issues_repo_name": "mottaquikarim/pydev-psets", "max_issues_repo_head_hexsha": "9749e0d216ee0a5c586d0d3013ef481cc21dee27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-04-15T15:16:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-12T10:33:32.000Z", "max_forks_repo_path": "pset_pandas1_basics/ndarrs/p5.py", "max_forks_repo_name": "mottaquikarim/pydev-psets", "max_forks_repo_head_hexsha": "9749e0d216ee0a5c586d0d3013ef481cc21dee27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-04-10T00:14:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-26T20:35:21.000Z", "avg_line_length": 18.023255814, "max_line_length": 46, "alphanum_fraction": 0.5883870968, "include": true, "reason": "import numpy", "num_tokens": 210}
|
# -*- coding: utf-8 -*-
import os
import platform
import numpy as np
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
RAW_PATH = os.path.expanduser("~") + "/data/quora/"
FEAT_PATH = os.path.expanduser("~") + "/data/quora/features/"
SUB_PATH = os.path.expanduser("~") + "/data/quora/submission/"
# ---------------------- Overall -----------------------
TASK = "sample"
# # for testing data processing and feature generation
# TASK = "sample"
SAMPLE_SIZE = 1000
# size
TRAIN_SIZE = 404290
TEST_SIZE = 2345796
if TASK == "sample":
TRAIN_SIZE = SAMPLE_SIZE
TEST_SIZE = SAMPLE_SIZE
MISSING_VALUE_NUMERIC = -1
STR_MATCH_THRESHOLD = 0.6
VALID_SIZE_MAX = 60000 # 0.7 * TRAIN_SIZE
# bm25
BM25_K1 = 1.6
BM25_B = 0.75
RANDOM_SEED = 524
# svd
SVD_DIM = 10
SVD_N_ITER = 5
#tfidf
MIN_DF = 3
MAX_DF = 0.7
oof_random = 1988
ab_dup_test =[6750, 23693, 30851, 61404, 78271, 103525, 121182, 143641,
154513, 158473, 172120, 174071, 182820, 190035, 192380, 205866,
211669, 220517, 236250, 240964, 251464, 252019, 254962, 272794,
276854, 285520, 308063, 310728, 316633, 347129, 355138, 365306,
381782, 395473, 398714, 399243]
|
{"hexsha": "adc17fda7222d39d25f346fd663aad52d14c9dc3", "size": 1190, "ext": "py", "lang": "Python", "max_stars_repo_path": "quora/solution/config.py", "max_stars_repo_name": "zonemercy/Kaggle", "max_stars_repo_head_hexsha": "35ecb08272b6491f5e6756c97c7dec9c46a13a43", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2017-10-01T00:10:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T12:11:01.000Z", "max_issues_repo_path": "quora/solution/config.py", "max_issues_repo_name": "zonemercy/Kaggle", "max_issues_repo_head_hexsha": "35ecb08272b6491f5e6756c97c7dec9c46a13a43", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "quora/solution/config.py", "max_forks_repo_name": "zonemercy/Kaggle", "max_forks_repo_head_hexsha": "35ecb08272b6491f5e6756c97c7dec9c46a13a43", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-08-15T03:58:51.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-15T03:58:51.000Z", "avg_line_length": 22.037037037, "max_line_length": 75, "alphanum_fraction": 0.6596638655, "include": true, "reason": "import numpy", "num_tokens": 417}
|
####Inference Engine
from openvino.inference_engine import IENetwork, IEPlugin
import os
import time
import cv2
import argparse
import numpy as np
import tkinter as tk
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 300, height = 300)
canvas1.pack()
target_names = {1: 'Bilateral cerebellar hemispheres', 2: 'Bilateral cerebellar hemispheres and vermis', 3: 'Bilateral frontal lobes', 4: 'Bilateral occipital lobes', 5: 'Brainstem', 6: 'Lacunar infarct in dorsal aspect of pons', 7: 'Lacunar infarct in left parietal lobe', 8: 'Lacunar infarct in medulla oblongata on the left', 9: 'Lacunar infarct in pons on the left', 10: 'Lacunar infarct in posterior limb of left internal capsule', 11: 'Lacunar infarct in right corona radiata', 12: 'Lacunar infarct in right putamen', 13: 'Lacunar infarcts in bilateral occipital lobes', 14: 'Lacunar infarcts in left corona radiata', 15: 'Lacunar infarcts in the right parietal lobe', 16: 'Left centrum semi ovale and right parietal lobe', 17: 'Left cerebellar hemisphere', 18: 'Left cerebellar lacunar infarcts', 19: 'Left frontal lobe', 20: 'Left frontal lobe in precentral gyral location', 21: 'Left fronto-parietal lobe', 22: 'Left fronto-temporo-parietal region', 23: 'Left insula', 24: 'Left occipital and temporal lobes', 25: 'Left occipital lobe', 26: 'Left parietal lobe', 27: 'Left thalamic lacunar infarct', 28: 'Medial part of right frontal and parietal lobes', 29: 'Mid brain on right side', 30: 'Pontine infarct on the right', 31: 'Right anterior thalamic infarct', 32: 'Right cerebellar hemisphere', 33: 'Right cerebellar hemisphere infarct', 34: 'Right corona radiata', 35: 'Right frontal lobe', 36: 'Right fronto-parietal lobe', 37: 'Right fronto-parieto-temporo- occipital lobes', 38: 'Right ganglio-capsular region', 39: 'Right insula', 40: 'Right lentiform nucleus', 41: 'Right occipital lobe', 42: 'Right parietal lacunar infarct', 43: 'Right parietal lobe', 44: 'Right temporal lobe', 45: 'Right thalamus', 46: 'Splenium of the corpus callosum'}
ap = argparse.ArgumentParser()
ap.add_argument("-i","--input", help="path to input", required=True)
ap.add_argument("-d","--device", help="Device", required=True)
ap.add_argument("-m","--model", help="path to xml file", required=True)
args = ap.parse_args()
#reading the model----
model_xml = str(args.model)
model_bin = str(os.path.splitext(model_xml)[0])+ ".bin"
#Setup Devices----
plugin = IEPlugin(device=str(args.device)) #MYRIAD
net = IENetwork(model=model_xml, weights=model_bin)
#Allocating input and output blobs----
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
#load model to plugin----
exec_net = plugin.load(network=net, num_requests=2)
#read and preprocess input images---
n, c, h, w = net.inputs[input_blob].shape
#print(n,c,h,w)
"""Note: compile OpenCV with JPEG file support enabled."""
#print(args.input)
image = cv2.imread(str(args.input))
if(image[:-1] != (w,h)):
image = cv2.resize(image, (w, h))
image = image.transpose((2,0,1)) #as openvino expects in this format HWC to CHW
res = exec_net.infer(inputs={input_blob:image})
res = res[out_blob]
a = np.amax(res)
result = np.where(res == a)
out = target_names[(result[1][0])+1]
print("-----------------------------")
print(out)
print("-----------------------------")
label1 = tk.Label(root, text= out, fg='green', font=('helvetica', 12, 'bold'))
canvas1.create_window(150, 150, window=label1)
root.mainloop()
|
{"hexsha": "4587e136d452f52b1eda158e9e3c008d15bc83c4", "size": 3428, "ext": "py", "lang": "Python", "max_stars_repo_path": "mri_infer.py", "max_stars_repo_name": "yogya-ch/Acute_infarct", "max_stars_repo_head_hexsha": "c90220da1f8da8264f9461910ccb6ce5aa43ea70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mri_infer.py", "max_issues_repo_name": "yogya-ch/Acute_infarct", "max_issues_repo_head_hexsha": "c90220da1f8da8264f9461910ccb6ce5aa43ea70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mri_infer.py", "max_forks_repo_name": "yogya-ch/Acute_infarct", "max_forks_repo_head_hexsha": "c90220da1f8da8264f9461910ccb6ce5aa43ea70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.4126984127, "max_line_length": 1757, "alphanum_fraction": 0.7161610268, "include": true, "reason": "import numpy", "num_tokens": 1049}
|
[STATEMENT]
lemma cong_trans_a: "R \<in> congruences \<Longrightarrow> R a b \<Longrightarrow> R b c \<Longrightarrow> R a c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>R \<in> congruences; R a b; R b c\<rbrakk> \<Longrightarrow> R a c
[PROOF STEP]
apply (simp add: congruences_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>equivp R \<and> (\<forall>a b c d. R a b \<and> R c d \<longrightarrow> R (a * c) (b * d) \<and> R (a l\<rightarrow> c) (b l\<rightarrow> d) \<and> R (a r\<rightarrow> c) (b r\<rightarrow> d)); R a b; R b c\<rbrakk> \<Longrightarrow> R a c
[PROOF STEP]
apply (rule_tac y = b in equivp_transp)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>equivp R \<and> (\<forall>a b c d. R a b \<and> R c d \<longrightarrow> R (a * c) (b * d) \<and> R (a l\<rightarrow> c) (b l\<rightarrow> d) \<and> R (a r\<rightarrow> c) (b r\<rightarrow> d)); R a b; R b c\<rbrakk> \<Longrightarrow> equivp R
2. \<lbrakk>equivp R \<and> (\<forall>a b c d. R a b \<and> R c d \<longrightarrow> R (a * c) (b * d) \<and> R (a l\<rightarrow> c) (b l\<rightarrow> d) \<and> R (a r\<rightarrow> c) (b r\<rightarrow> d)); R a b; R b c\<rbrakk> \<Longrightarrow> R a b
3. \<lbrakk>equivp R \<and> (\<forall>a b c d. R a b \<and> R c d \<longrightarrow> R (a * c) (b * d) \<and> R (a l\<rightarrow> c) (b l\<rightarrow> d) \<and> R (a r\<rightarrow> c) (b r\<rightarrow> d)); R a b; R b c\<rbrakk> \<Longrightarrow> R b c
[PROOF STEP]
by simp_all
|
{"llama_tokens": 611, "file": "PseudoHoops_PseudoHoopFilters", "length": 3}
|
import whatsup.plan as plan
from whatsup.imports import *
import numpy as np
from exopop.Confirmed import Confirmed
p = plan.Plan(semester='2016A', start='2016-04-16', finish='2016-05-24', maxairmass=2.5, maxsun=-6.0)
p.known = Confirmed()
distance = 100.0
transmission = p.known.standard[np.array([p.known.find('GJ1132b')[0]])]
for i in range(len(transmission)):
transmission['name'][i] = transmission['name'][i] + ' (T)'
p.selectInteresting(table=transmission)
p.findTransits()
p.printTransits()
p.plotTransits()
p.movie(filename='gj1132b_2016a.mp4')
|
{"hexsha": "aa8e3f25241efb9d0486069f5635529e7635b0af", "size": 562, "ext": "py", "lang": "Python", "max_stars_repo_path": "testscripts/gj1132.py", "max_stars_repo_name": "zkbt/whatsup", "max_stars_repo_head_hexsha": "0a9a878b47a42973f5f9ffdda051960c1cb560b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-11-03T14:36:11.000Z", "max_stars_repo_stars_event_max_datetime": "2016-11-03T14:36:11.000Z", "max_issues_repo_path": "testscripts/gj1132.py", "max_issues_repo_name": "zkbt/whatsup", "max_issues_repo_head_hexsha": "0a9a878b47a42973f5f9ffdda051960c1cb560b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-05-18T00:08:11.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-18T00:09:33.000Z", "max_forks_repo_path": "testscripts/gj1132.py", "max_forks_repo_name": "zkbt/whatsup", "max_forks_repo_head_hexsha": "0a9a878b47a42973f5f9ffdda051960c1cb560b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5789473684, "max_line_length": 102, "alphanum_fraction": 0.7277580071, "include": true, "reason": "import numpy", "num_tokens": 182}
|
#!/usr/bin/env python3
from argparse import ArgumentParser
import os
import pickle
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_validate
from scipy.stats import norm
import tqdm
from chg.db import database
from chg.defaults import CHG_PROJ_RANKER
from chg.search import embedded_search
from chg.embed.basic import BasicEmbedder, remove_color_ascii
# TODO: we could replace this RF model
# with a NN, and use that to tune the
# CodeBERT embeddings as well
class RFModel(object):
def __init__(self):
self.model = RandomForestRegressor()
def fit(self, X, y):
self.model.fit(X, y)
def predict(self, x):
return self.model.predict(x)
def expected_improvement(self, x, curr_loss=None):
x = x.reshape(1, -1)
pred_mean = self.predict(x)[0]
pred_std = np.std([
tree.predict(x)[0] for tree in self.model.estimators_
])
z = (curr_loss - pred_mean) / pred_std
ei = (curr_loss - pred_mean) * norm.cdf(z) + pred_std * norm.pdf(z)
return ei
class QuestionRanker(object):
def __init__(self, delta=0.05, train_every=1, negative_k=3):
self.embed_model = BasicEmbedder()
self.loss_model = RFModel()
self.database = database.get_store()
self.curr = {}
self.delta = delta
self.X = []
self.y = []
self.train_every = train_every
self.negative_k = negative_k
self.step_ct = 0
def sample_negative_code_vecs(self, exclude_id=None):
query = """
SELECT code_embedding FROM Embeddings
"""
if exclude_id is not None:
query += " WHERE NOT chunk_id={}".format(exclude_id)
# sample some number of these
query += " ORDER BY RANDOM() LIMIT {}".format(self.negative_k)
results = []
for row in self.database.run_query(query):
# comes out as a tuple by default, so take first elem
code_blob = row[0]
code_embedding = self.database.blob_to_array(code_blob)
results.append(code_embedding)
mat = np.vstack(results)
return mat
def compute_loss(self, code_vec, nl_vec, neg_code_vecs):
code_vec = code_vec.reshape(1, -1)
nl_vec = nl_vec.reshape(1, -1).T
pos_sim = np.dot(code_vec, nl_vec)
neg_sims = np.dot(neg_code_vecs, nl_vec)
# hinge loss w/ positive and negative pairs
losses = self.delta - pos_sim + neg_sims
losses[losses < 0] = 0.0
mean_loss = np.mean(losses)
return mean_loss
def embed_nl(self, _input):
return self.embed_model.embed_nl(_input)
def embed_dialogue(self, _input):
return self.embed_model.embed_dialogue(_input)
def embed_code(self, _input):
return self.embed_model.embed_code(_input)
def get_features_and_curr_loss(
self,
code,
dialogue,
negative_examples=None,
embed_code=True,
embed_dialogue=True
):
if embed_code:
code_vec = self.embed_code(code)
else:
code_vec = code
if embed_dialogue:
nl_vec = self.embed_dialogue(dialogue)
else:
nl_vec = dialogue
if negative_examples is None:
neg_code_vecs = self.sample_negative_code_vecs()
else:
neg_code_vecs = negative_examples
flat_neg_code_vecs = neg_code_vecs.flatten()
context_vec = np.concatenate([code_vec, nl_vec, flat_neg_code_vecs])
# hinge loss based on current dialogue for this chunk
curr_loss = self.compute_loss(code_vec, nl_vec, neg_code_vecs)
result = {
"code_vec": code_vec,
"neg_code_vecs": neg_code_vecs,
"context_vec": context_vec,
"curr_loss": curr_loss,
}
return result
def predict(self, code, dialogue, questions):
info = self.get_features_and_curr_loss(
code,
dialogue,
negative_examples=None,
)
# context corresponds to code embedding,
# embedded dialogue up to this point
# and negative code embeddings sampled
context_vec = info["context_vec"]
# ranking loss
curr_loss = info["curr_loss"]
best_score = None
best_i = None
best_x = None
scores = []
# candidate questions: pick the one that
# we believe will produce the best score
for i, q in enumerate(questions):
q_vec = self.embed_nl(q)
x = np.concatenate((context_vec, q_vec))
y = self.loss_model.expected_improvement(x, curr_loss)
scores.append(y)
# larger expected improvement than previous best
if best_score is None or y > best_score:
best_score = y
best_x = x
best_i = i
# keep around some state
# so we can compute realized loss later on
# (after user types out answer to proposed question)
self.curr = {
"code_vec": info["code_vec"],
"neg_code_vecs": info["neg_code_vecs"],
"x": best_x,
}
return best_i, best_score
def fit_model(self, X=None, y=None, cv=None):
if X is None:
X = self.X
if y is None:
y = self.y
if cv is not None:
metric = "r2"
cv_results = cross_validate(
self.loss_model.model,
X,
y,
cv=cv,
scoring=metric,
)
scores = cv_results["test_score"]
print("{}-fold CV".format(cv))
print(
"Mean {} (sd): {:.2f} ({:.2f})".format(
metric, scores.mean(), scores.std()
)
)
self.loss_model.fit(X, y)
def update(self, code, dialogue):
# not doing anything with code right now
# embed full dialogue (including answer to latest
# proposed question)
nl_vec = self.embed_dialogue(dialogue)
# compute *realized* loss
real_loss = self.compute_loss(
self.curr["code_vec"],
nl_vec,
self.curr["neg_code_vecs"],
)
# add observations to training data
self.X = np.vstack((self.X, self.curr["x"]))
self.y = np.append(self.y, real_loss)
self.step_ct += 1
if (self.train_every > 0) and self.step_ct % self.train_every == 0:
self.fit_model()
def build_ranker_from_git_log():
store = database.get_store()
ranker = QuestionRanker()
X = []
y = []
rows = store.run_query("SELECT id FROM Chunks WHERE chunk IS NOT NULL")
chunk_ids = [row[0] for row in rows]
print("Training ranker")
for chunk_id in tqdm.tqdm(chunk_ids):
code_embedding, _ = store.get_embeddings_by_chunk_id(chunk_id)
# q/a associated with this code chunk change
dialogue = store.run_query(
"SELECT question, answer FROM Dialogue WHERE chunk_id={}".
format(chunk_id)
)
for i, (current_q, future_answer) in enumerate(dialogue):
past_dialogue = dialogue[:i]
# sample negative code examples
negative_code_vecs = ranker.sample_negative_code_vecs(
exclude_id=chunk_id,
)
info = ranker.get_features_and_curr_loss(
code=code_embedding,
dialogue=past_dialogue,
negative_examples=negative_code_vecs,
embed_code=False,
embed_dialogue=True,
)
# embed the current question
current_q_vec = ranker.embed_nl(current_q)
# add this embedded question to context to create feature vec
features = np.concatenate([info["context_vec"], current_q_vec])
# realized loss once the answer is given
# our goal is to *predict* this loss based on the
# code, context, and question
full_dialogue_vec = ranker.embed_dialogue(dialogue[:(i + 1)])
realized_loss = ranker.compute_loss(
code_embedding,
full_dialogue_vec,
negative_code_vecs,
)
X.append(features)
y.append(realized_loss)
X = np.vstack(X)
y = np.array(y)
ranker.X = X
ranker.y = y
ranker.fit_model(cv=5)
return ranker
def load_ranker():
ranker_dir = os.path.dirname(CHG_PROJ_RANKER)
if not os.path.exists(ranker_dir):
print("Creating folder for chg ranker at", ranker_dir)
os.makedirs(ranker_dir)
with open(CHG_PROJ_RANKER, "rb") as fin:
ranker = pickle.load(fin)
# skip pickling of model or sqlite3 connections
ranker.embed_model = BasicEmbedder()
ranker.database = database.get_store()
return ranker
def store_ranker(ranker):
with open(CHG_PROJ_RANKER, "wb") as fout:
# can't pickle fasttext or sqlite3
ranker.embed_model = None
ranker.database = None
pickle.dump(ranker, fout)
def get_args():
parser = ArgumentParser(
description="Train question ranker based on git log"
)
return parser.parse_args()
def main():
_ = get_args()
print("Building ranking model")
ranker = build_ranker_from_git_log()
store_ranker(ranker)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
|
{"hexsha": "303017180f0115e8fa869d8989f10af9b34b3c82", "size": 9674, "ext": "py", "lang": "Python", "max_stars_repo_path": "chg/ranker/model_based_ranking.py", "max_stars_repo_name": "josepablocam/changestructor", "max_stars_repo_head_hexsha": "21712cb11951564b255287cbdc4a3a5b73c70ffd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chg/ranker/model_based_ranking.py", "max_issues_repo_name": "josepablocam/changestructor", "max_issues_repo_head_hexsha": "21712cb11951564b255287cbdc4a3a5b73c70ffd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chg/ranker/model_based_ranking.py", "max_forks_repo_name": "josepablocam/changestructor", "max_forks_repo_head_hexsha": "21712cb11951564b255287cbdc4a3a5b73c70ffd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4090909091, "max_line_length": 76, "alphanum_fraction": 0.5923092826, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2195}
|
SUBROUTINE STASEA (STATEB,LIMIT,ITEMA)
C SEARCH FOR A COMPATIBLE STATE
C VERSION WITHOUT 'NUCS' DATA STRUCTURE
C GF 30.07.1980
C
INCLUDE 'PARS.f'
INCLUDE 'ITES.f'
INCLUDE 'PRES.f'
INCLUDE 'STAS.f'
INCLUDE 'SYMS.f'
INTEGER*2 I1,I2
= ,GOT ! RESULT OF 'ITEMA1/2', =1 IF STATE WAS READ
= ,ITEMA ! -> FIRST ITEM WITH SYMBOL 'SYMA' IN 'STATEA'
= ,ITEMC ! -> ... IN 'STATEC'
= ,LIMIT ! NOT USED IN THIS VERSION
= ,PRED ! -> LIST OF PREDECESSORS OF 'STATED'
= ,STATEB ! NEW AND RESULTING STATE
= ,STATEC ! A PREDECESSOR OF 'STATED'
= ,STATED ! A STATE WHICH IS PERHAPS COMPATIBLE WITH 'STATEB'
= ,SYMA ! 'STATEB' AND 'STATED' ARE REACHED BY THIS SYMBOL
C STATEA T H E PREDECESSOR OF 'STATEB'
INCLUDE 'ENDS.f'
C
SYMA = ITESYM(ITEMA)
STATED = SYMRST(SYMA)
1 IF(STATED .EQ. STAHIB) GOTO 2
PRED = STAPRE(STATED)
IF (PRED .EQ. PREHIB) GOTO 3
STATEC = PRESTA(PRED)
CALL ITEMA1 (STATEC, ITEMC,GOT)
4 IF(ITESYM(ITEMC) .EQ. SYMA .OR. ITEMC .EQ. ITEHIB) GOTO 5
ITEMC = ITE(ITEMC)
GOTO 4
5 CONTINUE ! LOOK FOR 'SYMA'
I1 = ITEMA
I2 = ITEMC
6 IF(ITESYM(I1) .NE. SYMA .OR. ITESYM(I2) .NE. SYMA .OR.
= ITEACT(I1) .NE. SHIFT .OR. ITEACT(I2) .NE. SHIFT .OR.
= ITEPOS(I1) .NE. ITEPOS(I2)
= ) GOTO 7
I1 = ITE(I1)
I2 = ITE(I2)
GOTO 6
7 CONTINUE
IF ((ITESYM(I1) .EQ. SYMA .AND. ITEACT(I1) .EQ. SHIFT) .OR.
= (ITESYM(I2) .EQ. SYMA .AND. ITEACT(I2) .EQ. SHIFT)
= ) GOTO 8
STATEB = STATED
CALL ITEMA8 (STATEC,GOT)
RETURN
8 CONTINUE
CALL ITEMA8 (STATEC,GOT)
3 CONTINUE ! .NE. PREHIB
STATED = STARST(STATED)
GOTO 1
2 CONTINUE ! WHILE .NE. STAHIB
RETURN
END
|
{"hexsha": "ac28688c1bb2d465f2fadbc164eb07c2a2c6858b", "size": 2032, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "legacy/bofor_2005/STASEA.f", "max_stars_repo_name": "gfis/jextra", "max_stars_repo_head_hexsha": "bdad8fd33fdf633cf2ff4c1879e1f61935c3d636", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "legacy/bofor_2005/STASEA.f", "max_issues_repo_name": "gfis/jextra", "max_issues_repo_head_hexsha": "bdad8fd33fdf633cf2ff4c1879e1f61935c3d636", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "legacy/bofor_2005/STASEA.f", "max_forks_repo_name": "gfis/jextra", "max_forks_repo_head_hexsha": "bdad8fd33fdf633cf2ff4c1879e1f61935c3d636", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4406779661, "max_line_length": 70, "alphanum_fraction": 0.5123031496, "num_tokens": 706}
|
#!/usr/bin/env python
# Standard library
from typing import Tuple
# 3rd party packages
import numpy as np
# Local source
from parametrization_clean.domain.crossover.strategy import ICrossoverStrategy
from parametrization_clean.domain.individual import Individual
from parametrization_clean.domain.root_individual import RootIndividual
class UniformCross(ICrossoverStrategy):
@staticmethod
def crossover(parent1: Individual, parent2: Individual, root_individual: RootIndividual,
**kwargs) -> Tuple[Individual, Individual]:
"""Execute uniform crossover. Take the ith row of parent1 and randomly swap bits with the ith row of p2."""
sieve = np.random.randint(2, size=len(parent1.params)) # Array of 0's and 1's
not_sieve = sieve ^ 1 # Complement of sieve
child1 = Individual(list(parent1.params * sieve + parent2.params * not_sieve), root_individual=root_individual)
child2 = Individual(list(parent1.params * not_sieve + parent2.params * sieve), root_individual=root_individual)
return child1, child2
|
{"hexsha": "7ed5982ace081183ca1104ac2f6c6a70c27d15e6", "size": 1082, "ext": "py", "lang": "Python", "max_stars_repo_path": "parametrization_clean/domain/crossover/uniform.py", "max_stars_repo_name": "cdaksha/parametrization_clean", "max_stars_repo_head_hexsha": "702243d87c2045cf8155f3c18134665871f3b170", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-12-05T06:29:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T16:23:46.000Z", "max_issues_repo_path": "parametrization_clean/domain/crossover/uniform.py", "max_issues_repo_name": "chemshift/parametrization_clean", "max_issues_repo_head_hexsha": "702243d87c2045cf8155f3c18134665871f3b170", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-08-25T16:12:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T02:23:06.000Z", "max_forks_repo_path": "parametrization_clean/domain/crossover/uniform.py", "max_forks_repo_name": "chemshift/parametrization_clean", "max_forks_repo_head_hexsha": "702243d87c2045cf8155f3c18134665871f3b170", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-05T06:22:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-05T06:22:49.000Z", "avg_line_length": 38.6428571429, "max_line_length": 119, "alphanum_fraction": 0.7458410351, "include": true, "reason": "import numpy", "num_tokens": 235}
|
'''
-----------------------------------------------------------------------
Additional Documentation
Made by Zachary A Brader, Kieran Coito, Pedro Goncalves Mokarzel
while attending University of Washington Bothell
Made in 03/09/2020
Based on instruction in CSS 458,
taught by professor Johnny Lin
Notes:
- Written for Python 3.7.3.
- No executable
- Modules necessary: numpy, matplotlib.pyplot
- External necessities: variables.py, and model.py
- Holds methods used for the analysis of our model
=======================================================================
'''
# =======================================================================
# ============================= Imports =================================
# =======================================================================
# Python modules
import numpy as np
import matplotlib.pyplot as plt
from os.path import join
# Create simulation code
import variables as v
from model import model
from fullday_model import Fullday
# =======================================================================
# =============================== Methods ===============================
# =======================================================================
# ----------------------------- Mean ---------------------------
def plot_all_mean_values(number_of_epochs_for_simulation, number_of_av_simulations=200,
number_of_people=100, configCashiers=10, configSelfCheck=10):
""" Preform a number of simulations and get the average values over the
course of a day.
This will be preformed for the specified type of line.
Precondition:
- number_of_epochs_for_simulation: number of epochs each simulation is going to
be ran for
- number_of_av_simulations: Number of simulations that are going to be ran to
calculate the average. Default = 200
- sensitivity_range: Range for the sensitivity test. Default = 10
- number_of_people: number of people to enter the system. Default = 100
- configCashiers: Number of cashiers inside the tests configuration.
Default = 10
- configSelfCheck: Number of self checkouts inside the tests configuration.
Default = 10
Postcondition:
- A number png images stored inside analysis_images/means that will show
the final average results of an average of a set number of tests for
each of the lines
"""
print("Gathering equal line averages...")
plot_mean_values(number_of_epochs_for_simulation, "equal",
number_of_av_simulations, number_of_people,
configCashiers, configSelfCheck)
print("Gathering cashier line averages")
plot_mean_values(number_of_epochs_for_simulation, "cashier",
number_of_av_simulations, number_of_people,
configCashiers, configSelfCheck)
print("Gathering customer line averages...")
plot_mean_values(number_of_epochs_for_simulation, "customer",
number_of_av_simulations, number_of_people,
configCashiers, configSelfCheck)
def plot_mean_values(number_of_epochs_for_simulation, model_name="equal",
number_of_av_simulations=200, number_of_people=100,
configCashiers=10, configSelfCheck=10):
""" Preform a number of simulations and get the average values over the
course of a day.
This will be preformed for the specified type of line.
Precondition:
- number_of_epochs_for_simulation: number of epochs each simulation is going to
be ran for
- model_name: Type of line to use. Default = "equal"
- number_of_av_simulations: Number of simulations that are going to be ran to
calculate the average. Default = 200
- sensitivity_range: Range for the sensitivity test. Default = 10
- number_of_people: number of people to enter the system. Default = 100
- configCashiers: Number of cashiers inside the tests configuration.
Default = 10
- configSelfCheck: Number of self checkouts inside the tests configuration.
Default = 10
Postcondition:
- 4 png images stored inside analysis_images/configuration that will show
the final average results of an average of a set number of tests for
a certain configuration.
"""
# Create a group of lists to store the data
mean_cust_left = np.zeros(number_of_epochs_for_simulation)
mean_cust_waiting = np.zeros(number_of_epochs_for_simulation)
mean_cust_queue = np.zeros(number_of_epochs_for_simulation)
mean_items_checked = np.zeros(number_of_epochs_for_simulation)
mean_maintenance = np.zeros(number_of_epochs_for_simulation)
# Preform a number of simulations
for j in range(number_of_av_simulations):
self_check_model = model(model_name, number_of_people, configCashiers, configSelfCheck,
cashier_IPM_p_influence=0.1,
customer_IPM_p_influence=0.2)
customers_left, \
customers_in_line, \
customers_in_queue, \
items_checked, \
maintenance_costs \
= self_check_model.execute_simulation(number_of_epochs_for_simulation,
show=False, showAnim=False)
# Add the values over the day to the lists
mean_cust_left += np.array(customers_left)
mean_cust_waiting += np.array(customers_in_line)
mean_cust_queue += np.array(customers_in_queue)
mean_items_checked += np.array(items_checked)
mean_maintenance += np.array(maintenance_costs)
# Get the averages of the day
mean_cust_left /= number_of_av_simulations
mean_cust_waiting /= number_of_av_simulations
mean_cust_queue /= number_of_av_simulations
mean_items_checked /= number_of_av_simulations
mean_maintenance /= number_of_av_simulations
# Plot trackable data
plt.figure(1)
plt.clf()
plt.title("Mean Values for Customers Out of System")
plt.xlabel("Number of Epochs")
plt.ylabel("Mean of Customers Out of System")
plt.plot(np.array([x for x in range(number_of_epochs_for_simulation)]),
mean_cust_left)
plt.savefig(join("analysis_images", "mean", model_name + "_mean_cust_out_" +
str(number_of_epochs_for_simulation) + "_epochs.png"))
plt.figure(2)
plt.clf()
plt.title("Mean Values for Customers In Queues")
plt.xlabel("Number of Epochs")
plt.ylabel("Mean of Customers Being Helped")
plt.plot(np.array([x for x in range(number_of_epochs_for_simulation)]),
mean_cust_queue)
plt.savefig(join("analysis_images", "mean", model_name + "_mean_cust_in_queue_" +
str(number_of_epochs_for_simulation) + "_epochs.png"))
plt.figure(3)
plt.clf()
plt.title("Mean Values for Items Checked")
plt.xlabel("Number of Epochs")
plt.ylabel("Items Checked")
plt.plot(np.array([x for x in range(number_of_epochs_for_simulation)]),
mean_items_checked)
plt.savefig(join("analysis_images", "mean", model_name + "_mean_items_checked_" +
str(number_of_epochs_for_simulation) + "_epochs.png"))
plt.figure(4)
plt.clf()
plt.title("Mean Values for Maintenance Costs")
plt.xlabel("Number of Epochs")
plt.ylabel("Mean of Maintenance Costs")
plt.plot(np.array([x for x in range(number_of_epochs_for_simulation)]),
mean_maintenance)
plt.savefig(join("analysis_images", "mean", model_name + "_mean_maintenance_" +
str(number_of_epochs_for_simulation) + "_epochs.png"))
# ----------------------------- Configuration ---------------------------
def configuration(number_of_epochs_for_simulation, number_of_av_simulations=200,
sensitivity_range=10, number_of_people=100):
""" Makes tests relative to config, involving:
- Number of customers out of system
- Number of customers in queue
- Number checked items
- Costs of Maintenance
This will be preformed for all types of lines.
Precondition:
- number_of_epochs_for_simulation: number of epochs each simulation is going to
be ran for
- number_of_av_simulations: Numbe of simulations that are going to be ran to
calculate the average. Default = 200
- sensitivity_range: Range for the sensitivity test. Default = 10
- number_of_people: number of people to enter the system. Default = 100
Postcondition:
- 4 png images stored inside analysis_images/configuration that will show
the final average results of an average of a set number of tests for
a certain configuration.
"""
# Preform tests using equal distribution line
for k in range(1, 5):
sensitivity_cashiers_to_self_checkout(number_of_epochs_for_simulation, "equal",
number_of_av_simulations, sensitivity_range, number_of_people, 3 * k)
# Preform tests using customer line
for k in range(1, 5):
sensitivity_cashiers_to_self_checkout(number_of_epochs_for_simulation, "customer",
number_of_av_simulations, sensitivity_range, number_of_people, 3 * k)
# Preform tests using cashier line
for k in range(1, 5):
sensitivity_cashiers_to_self_checkout(number_of_epochs_for_simulation, "cashier",
number_of_av_simulations, sensitivity_range, number_of_people, 3 * k)
print("Test complete")
def sensitivity_cashiers_to_self_checkout(number_of_epochs_for_simulation, model_name, number_of_av_simulations,
sensitivity_range=10, number_of_people=100, cashiers_to_self_checkouts=3):
''' Preform a number of tests and gather the average values for each tracked
piece of data.
Precondition:
- number_of_epochs_for_simulation: number of epochs each simulation is going to
be ran for
- number_of_av_simulations: Numbe of simulations that are going to be ran to
calculate the average. Default = 200
- sensitivity_range: Range for the sensitivity test. Default = 10
- number_of_people: number of people to enter the system. Default = 100
- cashiers_to_self_checkouts: The number of self checkouts to replace a single
cashier with. Default = 3
Postcondition:
- 5 png images stored inside analysis_images/configuration that will show
the final average results of an average of a set number of tests for
a certain configuration.
'''
# Create a set of variables to store averages and time
num_self_checkouts = []
avg_num_cust_left = []
avg_num_cust_not_in_line = []
avg_num_cust_being_helped = []
avg_num_items_checked = []
avg_num_maintenance = []
# Number of cashiers operating self checkouts
for i in range(sensitivity_range):
# Number of tests for sensitivity
num_self_checkouts.append(i)
cust_left = []
cust_line = []
cust_queue = []
items = []
maintenance = []
# Run a number of simulations to get the average
for j in range(number_of_av_simulations):
self_check_model = model(model_name, number_of_people, 10 - i, i * cashiers_to_self_checkouts,
cashier_IPM_p_influence=0.1,
customer_IPM_p_influence=0.2)
customers_left, \
customers_in_line, \
customers_in_queue, \
items_checked, \
maintenance_costs \
= self_check_model.execute_simulation(number_of_epochs_for_simulation,
show=False, showAnim=False)
# Add the average to the array
cust_left.append(customers_left[-1])
maintenance.append(maintenance_costs)
items.append(items_checked[-1])
cust_line.append(customers_in_line[-1])
cust_queue.append(customers_in_queue[-1])
# Return the average of this configuration into an array
avg_num_cust_left.append(sum(cust_left) / number_of_av_simulations)
avg_num_cust_being_helped.append(sum(cust_queue) / number_of_av_simulations)
avg_num_items_checked.append(sum(items) / number_of_av_simulations)
avg_num_maintenance.append(sum(maintenance) / number_of_av_simulations)
avg_num_cust_not_in_line.append(sum(cust_line) / number_of_av_simulations)
# Plot each set of data
plt.figure(1)
plt.clf()
plt.title("Sensitivity Analysis for Customers Helped with Different Configurations")
plt.xlabel("Cashiers Operating Self Checkouts")
plt.ylabel("Mean of Customers Out of System at %d" % number_of_epochs_for_simulation)
plt.plot(num_self_checkouts, avg_num_cust_left)
plt.savefig(join("analysis_images", "configuration", model_name +
"_cashier_to_" + str(cashiers_to_self_checkouts) +
"_checkouts_list_of_customers_out_of_system.png"))
plt.figure(2)
plt.clf()
plt.title("Sensitivity Analysis for Customers Waiting with Different Configurations")
plt.xlabel("Cashiers Operating Self Checkouts")
plt.ylabel("Mean of Customers Waiting at %d" % number_of_epochs_for_simulation)
plt.plot(num_self_checkouts, avg_num_cust_not_in_line)
plt.savefig(join("analysis_images", "configuration", model_name +
"_cashier_to_" + str(cashiers_to_self_checkouts) +
"_checkouts_list_of_cust_waiting.png"))
plt.figure(3)
plt.clf()
plt.title("Sensitivity Analysis for Customers In Cashier's Lines with Different Configurations")
plt.xlabel(
"Cashiers Operating Self Checkouts\n(1 Cashier = " + str(cashiers_to_self_checkouts) + " Self Checkouts)")
plt.ylabel("Mean of Customers Still In Line at %d" % number_of_epochs_for_simulation)
plt.plot(num_self_checkouts, avg_num_cust_being_helped)
plt.savefig(join("analysis_images", "configuration", model_name + "_cashier_to_" + str(
cashiers_to_self_checkouts) + "_checkouts_cust_in_lines.png"))
plt.figure(4)
plt.clf()
plt.title("Sensitivity Analysis for Total Items Checked with Different Configurations")
plt.xlabel(
"Cashiers Operating Self Checkouts\n(1 Cashier = " + str(cashiers_to_self_checkouts) + " Self Checkouts)")
plt.ylabel("Mean of Customers Still In Line at %d" % number_of_epochs_for_simulation)
plt.plot(num_self_checkouts, avg_num_items_checked)
plt.savefig(join("analysis_images", "configuration", model_name + "_cashier_to_" + str(
cashiers_to_self_checkouts) + "_checkouts_items_checked.png"))
plt.figure(5)
plt.clf()
plt.title("Sensitivity Analysis for Maintenance Costs with Different Configurations")
plt.xlabel(
"Cashiers Operating Self Checkouts\n(1 Cashier = " + str(cashiers_to_self_checkouts) + " Self Checkouts)")
plt.ylabel("Mean of Customers Still In Line at %d" % number_of_epochs_for_simulation)
plt.plot(num_self_checkouts, avg_num_maintenance)
plt.savefig(join("analysis_images", "configuration", model_name + "_cashier_to_" + str(
cashiers_to_self_checkouts) + "_checkouts_maintenance_costs.png"))
# ------------------------------- Sensitivity ---------------------------
def sensitivity_cashierIPM_analysis_for_all_lines(number_of_epochs_for_simulation, number_of_av_simulations=200,
sensitivity_range=10, number_of_people=100):
""" Sensitivity related to the IPM of cashier, test for all models type
Precondition:
- number_of_epochs_for_simulation: number of epochs each simulation is going to
be ran for
- number_of_av_simulations: Numbe of simulations that are going to be ran to
calculate the average. Default = 200
- sensitivity_range: Range for the sensitivity test. Default = 10
- number_of_people: number of people to enter the system. Default = 100
Postcondition:
- 4 png images saved to a file called "analysis_images"
- x-axis has the sensitivity test
- y-axis has the quantity related to the test
- Related to the outputs printed at the test.
"""
# Numbers for simulation
x_axis = np.arange(sensitivity_range) / 100
# Runs simulations with the different types of line:
sens_analysis_list_of_customers_out_of_system_CUSTOMER, sens_analysis_list_of_customers_in_line_CUSTOMER, \
sens_analysis_list_of_customers_on_cashier_queue_CUSTOMER, sens_analysis_list_of_customer_items_checked_CUSTOMER = \
sensitivity_cashierIPM_analysis(number_of_epochs_for_simulation, "customer", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
sens_analysis_list_of_customers_out_of_system_CASHIER, sens_analysis_list_of_customers_in_line_CASHIER, \
sens_analysis_list_of_customers_on_cashier_queue_CASHIER, sens_analysis_list_of_customer_items_checked_CASHIER = \
sensitivity_cashierIPM_analysis(number_of_epochs_for_simulation, "cashier", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
sens_analysis_list_of_customers_out_of_system_EQUAL, sens_analysis_list_of_customers_in_line_EQUAL, \
sens_analysis_list_of_customers_on_cashier_queue_EQUAL, sens_analysis_list_of_customer_items_checked_EQUAL = \
sensitivity_cashierIPM_analysis(number_of_epochs_for_simulation, "equal", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
# Prints number of customers out of system
plt.figure(1)
plt.clf()
plt.title("Sensitivity Analysis for Customers Out of the System")
plt.xlabel("Probability Increment")
plt.ylabel("Percentage of Mean of Customers Out of System at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_out_of_system_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customers_out_of_system_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customers_out_of_system_EQUAL, 'ro')
plt.savefig("analysis_images\cashierIPM_sens_analysis_list_of_customers_out_of_system_%d.png" % number_of_people)
plt.figure(2)
plt.clf()
plt.title("Sensitivity Analysis for People in Line Average of %d" % number_of_av_simulations)
plt.xlabel("Probability Increment")
plt.ylabel("Percentage of Mean of Customers in Line at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_in_line_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customers_in_line_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customers_in_line_EQUAL, 'ro')
plt.savefig("analysis_images\cashierIPM_sens_analysis_list_of_customers_in_line_%d.png" % number_of_people)
plt.figure(3)
plt.clf()
plt.title("Sensitivity Customers in Queue Average of %d" % number_of_av_simulations)
plt.xlabel("Probability Increment")
plt.ylabel(
"Percentage of Mean of Cashiers on Queue in the End of Simulation at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_on_cashier_queue_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customers_on_cashier_queue_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customers_on_cashier_queue_EQUAL, 'ro')
plt.savefig("analysis_images\cashierIPM_sens_analysis_list_of_customers_on_cashier_queue_%d.png" % number_of_people)
plt.figure(4)
plt.clf()
plt.title("Sensitivity Analysis for Items Checked Average of %d" % number_of_av_simulations)
plt.xlabel("Probability Increment")
plt.ylabel("Mean of Items checkedout at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customer_items_checked_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customer_items_checked_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customer_items_checked_EQUAL, 'ro')
plt.savefig("analysis_images\cashierIPM_sens_analysis_list_of_items_checked_%d.png" % number_of_people)
def sensitivity_cashierIPM_analysis(number_of_epochs_for_simulation, model_name, number_of_av_simulations,
sensitivity_range, \
number_of_people=100):
""" Sensitivity related to the IPM of cashier, test for specific model type
Precondition:
- number_of_epochs_for_simulation: number of epochs used to run the simulation
- model_name,number_of_av_simulations: name of the model used during the test
- Chosen between "equal", "cashier", and "customer"
- sensitivity_range: range of sensitivity to be tested
- number_of_people: Number of people to go into the system begin. Default = 100
Postcondition:
- sens_analysis_list_of_customers_out_of_system: an int list of the average total number of
customers out of system at the end of the simulation
- sens_analysis_list_of_customers_in_line: an int list of the average customers in line
at the end of the simulation
- sens_analysis_list_of_customers_on_cashier_queue: an int list of the average customers on
queues at the end of the simulation
- sens_analysis_list_of_customer_items_checked: an int list of the average total items checked
at the end of the simulation
"""
# Sets up analysis list
sens_analysis_list_of_customers_out_of_system = []
sens_analysis_list_of_customers_in_line = []
sens_analysis_list_of_customers_on_cashier_queue = []
sens_analysis_list_of_customer_items_checked = []
# Loops between sensitivities
for simulation_i in range(sensitivity_range):
sens_customers_out_of_system_average = 0
sens_customers_in_line_average = 0
sens_customers_on_cashier_queue_average = 0
sens_items_customer_checked_average = 0
# Number of simulations used for average of simulation
for i in range(number_of_av_simulations):
self_check_model = model(model_name, number_of_people, 10, 0, cashier_IPM_p_influence=simulation_i / 100,
customer_IPM_p_influence=0)
list_of_customers_out_of_system, \
list_of_customers_in_line, \
list_of_customers_on_cashier_queue, \
list_of_items_checked, \
cost_for_maintenance \
= self_check_model.execute_simulation(number_of_epochs_for_simulation, show=False, showAnim=False)
# Adds to average
sens_customers_out_of_system_average = sens_customers_out_of_system_average \
+ list_of_customers_out_of_system[-1]
sens_customers_in_line_average = sens_customers_in_line_average \
+ list_of_customers_in_line[-1]
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average \
+ list_of_customers_on_cashier_queue[-1]
sens_items_customer_checked_average = sens_items_customer_checked_average \
+ list_of_items_checked[0] - list_of_items_checked[-1]
# Calulates average
sens_customers_out_of_system_average = sens_customers_out_of_system_average / number_of_av_simulations
sens_customers_in_line_average = sens_customers_in_line_average / number_of_av_simulations
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average / number_of_av_simulations
sens_items_customer_checked_average = sens_items_customer_checked_average / number_of_av_simulations
sens_customers_out_of_system_average = sens_customers_out_of_system_average / number_of_people
sens_customers_in_line_average = sens_customers_in_line_average / number_of_people
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average / number_of_people
# Appends to average list
sens_analysis_list_of_customers_out_of_system.append(sens_customers_out_of_system_average)
sens_analysis_list_of_customers_in_line.append(sens_customers_in_line_average)
sens_analysis_list_of_customers_on_cashier_queue.append(sens_customers_on_cashier_queue_average)
sens_analysis_list_of_customer_items_checked.append(sens_items_customer_checked_average)
return sens_analysis_list_of_customers_out_of_system, sens_analysis_list_of_customers_in_line, \
sens_analysis_list_of_customers_on_cashier_queue, sens_analysis_list_of_customer_items_checked
def sensitivity_customerIPM_analysis_for_all_lines(number_of_epochs_for_simulation, number_of_av_simulations=200, \
sensitivity_range=10, number_of_people=100):
""" Sensitivity related to the IPM of customer, test for all models type
Precondition:
- number_of_epochs_for_simulation: number of epochs each simulation is going to
be ran for
- number_of_av_simulations: Numbe of simulations that are going to be ran to
calculate the average. Default = 200
- sensitivity_range: Range for the sensitivity test. Default = 10
- number_of_people: number of people to enter the system. Default = 100
Postcondition:
- 4 png images saved to a file called "analysis_images"
- x-axis has the sensitivity test
- y-axis has the quantity related to the test
- Related to the outputs printed at the test.
"""
x_axis = np.arange(sensitivity_range) / 100
# Runs simulations with the different types of line:
sens_analysis_list_of_customers_out_of_system_CUSTOMER, sens_analysis_list_of_customers_in_line_CUSTOMER, \
sens_analysis_list_of_customers_on_cashier_queue_CUSTOMER, sens_analysis_list_of_customer_items_checked_CUSTOMER = \
sensitivity_customerIPM_analysis(number_of_epochs_for_simulation, "customer", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
sens_analysis_list_of_customers_out_of_system_CASHIER, sens_analysis_list_of_customers_in_line_CASHIER, \
sens_analysis_list_of_customers_on_cashier_queue_CASHIER, sens_analysis_list_of_customer_items_checked_CASHIER = \
sensitivity_customerIPM_analysis(number_of_epochs_for_simulation, "cashier", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
sens_analysis_list_of_customers_out_of_system_EQUAL, sens_analysis_list_of_customers_in_line_EQUAL, \
sens_analysis_list_of_customers_on_cashier_queue_EQUAL, sens_analysis_list_of_customer_items_checked_EQUAL = \
sensitivity_customerIPM_analysis(number_of_epochs_for_simulation, "equal", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
# Prints number of customers out of system
plt.figure(5)
plt.clf()
plt.title("Sensitivity Analysis for Customers Out of the System")
plt.xlabel("Probability Increment")
plt.ylabel("Percentage of Mean of Customers Out of System at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_out_of_system_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customers_out_of_system_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customers_out_of_system_EQUAL, 'ro')
plt.savefig("analysis_images\customerIPM_sens_analysis_list_of_customers_out_of_system_%d.png" % number_of_people)
plt.figure(6)
plt.clf()
plt.title("Sensitivity Analysis for People in Line Average of %d" % number_of_av_simulations)
plt.xlabel("Probability Increment")
plt.ylabel("Percentage of Mean of Customers in Line at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_in_line_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customers_in_line_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customers_in_line_EQUAL, 'ro')
plt.savefig("analysis_images\customerIPM_sens_analysis_list_of_customers_in_line_%d.png" % number_of_people)
plt.figure(7)
plt.clf()
plt.title("Sensitivity Customers in Queue Average of %d" % number_of_av_simulations)
plt.xlabel("Probability Increment")
plt.ylabel(
"Percentage of Mean of Cashiers on Queue in the End of Simulation at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_on_cashier_queue_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customers_on_cashier_queue_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customers_on_cashier_queue_EQUAL, 'ro')
plt.savefig("analysis_images\customerIPM_sens_analysis_list_of_customers_on_cashier_queue%d.png" % number_of_people)
plt.figure(8)
plt.clf()
plt.title("Sensitivity Analysis for Items Checked Average of %d" % number_of_av_simulations)
plt.xlabel("Probability Increment")
plt.ylabel("Mean of Items checkedout at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customer_items_checked_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customer_items_checked_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customer_items_checked_EQUAL, 'ro')
plt.savefig("analysis_images\customerIPM_sens_analysis_list_of_items_checked_%d.png" % number_of_people)
def sensitivity_customerIPM_analysis(number_of_epochs_for_simulation, model_name, number_of_av_simulations,
sensitivity_range, \
number_of_people=240):
""" Sensitivity related to the IPM of customer, test for specific model type
Precondition:
- number_of_epochs_for_simulation: number of epochs used to run the simulation
- model_name,number_of_av_simulations: name of the model used during the test
- Chosen between "equal", "cashier", and "customer"
- sensitivity_range: range of sensitivity to be tested
- number_of_people: Number of people to go into the system begin. Default = 100
Postcondition:
- sens_analysis_list_of_customers_out_of_system: an int list of the average total number of
customers out of system at the end of the simulation
- sens_analysis_list_of_customers_in_line: an int list of the average customers in line
at the end of the simulation
- sens_analysis_list_of_customers_on_cashier_queue: an int list of the average customers on
queues at the end of the simulation
- sens_analysis_list_of_customer_items_checked: an int list of the average total items checked
at the end of the simulation
"""
# Sets up analysis list
sens_analysis_list_of_customers_out_of_system = []
sens_analysis_list_of_customers_in_line = []
sens_analysis_list_of_customers_on_cashier_queue = []
sens_analysis_list_of_customer_items_checked = []
# Loops between sensitivities
for simulation_i in range(sensitivity_range):
sens_customers_out_of_system_average = 0
sens_customers_in_line_average = 0
sens_customers_on_cashier_queue_average = 0
sens_items_customer_checked_average = 0
# Number of simulations used for average of simulation
for i in range(number_of_av_simulations):
self_check_model = model(model_name, number_of_people, 0, 10, cashier_IPM_p_influence=0,
customer_IPM_p_influence=simulation_i / 100)
list_of_customers_out_of_system, \
list_of_customers_in_line, \
list_of_customers_on_cashier_queue, \
list_of_items_checked, \
cost_for_maintenance \
= self_check_model.execute_simulation(number_of_epochs_for_simulation, show=False, showAnim=False)
# Adds to average
sens_customers_out_of_system_average = sens_customers_out_of_system_average \
+ list_of_customers_out_of_system[-1]
sens_customers_in_line_average = sens_customers_in_line_average \
+ list_of_customers_in_line[-1]
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average \
+ list_of_customers_on_cashier_queue[-1]
sens_items_customer_checked_average = sens_items_customer_checked_average \
+ list_of_items_checked[0] - list_of_items_checked[-1]
# Calulates average
sens_customers_out_of_system_average = sens_customers_out_of_system_average / number_of_av_simulations
sens_customers_in_line_average = sens_customers_in_line_average / number_of_av_simulations
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average / number_of_av_simulations
sens_items_customer_checked_average = sens_items_customer_checked_average / number_of_av_simulations
sens_customers_out_of_system_average = sens_customers_out_of_system_average / number_of_people
sens_customers_in_line_average = sens_customers_in_line_average / number_of_people
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average / number_of_people
# Appends to average list
sens_analysis_list_of_customers_out_of_system.append(sens_customers_out_of_system_average)
sens_analysis_list_of_customers_in_line.append(sens_customers_in_line_average)
sens_analysis_list_of_customers_on_cashier_queue.append(sens_customers_on_cashier_queue_average)
sens_analysis_list_of_customer_items_checked.append(sens_items_customer_checked_average)
return sens_analysis_list_of_customers_out_of_system, sens_analysis_list_of_customers_in_line, \
sens_analysis_list_of_customers_on_cashier_queue, sens_analysis_list_of_customer_items_checked
def sensitivity_itemNumb_analysis_for_all_lines(number_of_epochs_for_simulation, number_of_av_simulations=200, \
sensitivity_range=30, number_of_people=100):
""" Sensitivity related to the number of items, test for all models type
Precondition:
- number_of_epochs_for_simulation: number of epochs each simulation is going to
be ran for
- number_of_av_simulations: Numbe of simulations that are going to be ran to
calculate the average. Default = 200
- sensitivity_range: Range for the sensitivity test. Default = 10
- number_of_people: number of people to enter the system. Default = 100
Postcondition:
- 4 png images saved to a file called "analysis_images"
- x-axis has the sensitivity test
- y-axis has the quantity related to the test
- Related to the outputs printed at the test.
"""
# Numbers for simulation
x_axis = (np.arange(sensitivity_range) / 100) - (sensitivity_range / 200)
# Runs simulations with the different types of line:
sens_analysis_list_of_customers_out_of_system_CUSTOMER, sens_analysis_list_of_customers_in_line_CUSTOMER, \
sens_analysis_list_of_customers_on_cashier_queue_CUSTOMER, sens_analysis_list_of_customer_items_checked_CUSTOMER = \
sensitivity_itemNumb_analysis(number_of_epochs_for_simulation, "customer", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
sens_analysis_list_of_customers_out_of_system_CASHIER, sens_analysis_list_of_customers_in_line_CASHIER, \
sens_analysis_list_of_customers_on_cashier_queue_CASHIER, sens_analysis_list_of_customer_items_checked_CASHIER = \
sensitivity_itemNumb_analysis(number_of_epochs_for_simulation, "cashier", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
sens_analysis_list_of_customers_out_of_system_EQUAL, sens_analysis_list_of_customers_in_line_EQUAL, \
sens_analysis_list_of_customers_on_cashier_queue_EQUAL, sens_analysis_list_of_customer_items_checked_EQUAL = \
sensitivity_itemNumb_analysis(number_of_epochs_for_simulation, "equal", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
# Prints number of customers out of system
plt.figure(9)
plt.clf()
plt.title("Sensitivity Analysis for Customers Out of the System")
plt.xlabel("Probability Increment")
plt.ylabel("Percentage of Mean of Customers Out of System at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_out_of_system_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customers_out_of_system_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customers_out_of_system_EQUAL, 'ro')
plt.savefig("analysis_images\itemNumb_sens_analysis_list_of_customers_out_of_system_%d.png" % number_of_people)
plt.figure(10)
plt.clf()
plt.title("Sensitivity Analysis for People in Line Average of %d" % number_of_av_simulations)
plt.xlabel("Probability Increment")
plt.ylabel("Percentage of Mean of Customers in Line at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_in_line_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customers_in_line_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customers_in_line_EQUAL, 'ro')
plt.savefig("analysis_images\itemNumb_sens_analysis_list_of_customers_in_line_%d.png" % number_of_people)
plt.figure(11)
plt.clf()
plt.title("Sensitivity Customers in Queue Average of %d" % number_of_av_simulations)
plt.xlabel("Probability Increment")
plt.ylabel(
"Percentage of Mean of Cashiers on Queue in the End of Simulation at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_on_cashier_queue_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customers_on_cashier_queue_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customers_on_cashier_queue_EQUAL, 'ro')
plt.savefig("analysis_images\itemNumb_sens_analysis_list_of_customers_on_cashier_queue_%d.png" % number_of_people)
plt.figure(12)
plt.clf()
plt.title("Sensitivity Analysis for Items Checked Average of %d" % number_of_av_simulations)
plt.xlabel("Probability Increment")
plt.ylabel("Mean of Items checkedout at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customer_items_checked_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customer_items_checked_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customer_items_checked_EQUAL, 'ro')
plt.savefig("analysis_images\itemNumb_sens_analysis_list_of_items_checked_%d.png" % number_of_people)
def sensitivity_itemNumb_analysis(number_of_epochs_for_simulation, model_name, number_of_av_simulations,
sensitivity_range, \
number_of_people=150):
""" Sensitivity related to the number of items in the customer's cashier, test for specific model type
Precondition:
- number_of_epochs_for_simulation: number of epochs used to run the simulation
- model_name,number_of_av_simulations: name of the model used during the test
- Chosen between "equal", "cashier", and "customer"
- sensitivity_range: range of sensitivity to be tested
- number_of_people: Number of people to go into the system begin. Default = 100
Postcondition:
- sens_analysis_list_of_customers_out_of_system: an int list of the average total number of
customers out of system at the end of the simulation
- sens_analysis_list_of_customers_in_line: an int list of the average customers in line
at the end of the simulation
- sens_analysis_list_of_customers_on_cashier_queue: an int list of the average customers on
queues at the end of the simulation
- sens_analysis_list_of_customer_items_checked: an int list of the average total items checked
at the end of the simulation
"""
# Sets up analysis list
sens_analysis_list_of_customers_out_of_system = []
sens_analysis_list_of_customers_in_line = []
sens_analysis_list_of_customers_on_cashier_queue = []
sens_analysis_list_of_customer_items_checked = []
# Loops between sensitivities
# 0.8 -> 0.95
for item_iterator in range(sensitivity_range):
sens_customers_out_of_system_average = 0
sens_customers_in_line_average = 0
sens_customers_on_cashier_queue_average = 0
sens_items_customer_checked_average = 0
# Number of simulations used for average of simulation
for i in range(number_of_av_simulations):
self_check_model = model(model_name, number_of_people, 10, 6, cashier_IPM_p_influence=0,
item_creation_sensitivity=(item_iterator - (sensitivity_range / 2)) / 100)
list_of_customers_out_of_system, \
list_of_customers_in_line, \
list_of_customers_on_cashier_queue, \
list_of_items_checked, \
cost_for_maintenance \
= self_check_model.execute_simulation(number_of_epochs_for_simulation, show=False, showAnim=False)
# Adds to average
sens_customers_out_of_system_average = sens_customers_out_of_system_average \
+ list_of_customers_out_of_system[-1]
sens_customers_in_line_average = sens_customers_in_line_average \
+ list_of_customers_in_line[-1]
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average \
+ list_of_customers_on_cashier_queue[-1]
sens_items_customer_checked_average = sens_items_customer_checked_average \
+ list_of_items_checked[0] - list_of_items_checked[-1]
# Calulates average
sens_customers_out_of_system_average = sens_customers_out_of_system_average / number_of_av_simulations
sens_customers_in_line_average = sens_customers_in_line_average / number_of_av_simulations
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average / number_of_av_simulations
sens_items_customer_checked_average = sens_items_customer_checked_average / number_of_av_simulations
sens_customers_out_of_system_average = sens_customers_out_of_system_average / number_of_people
sens_customers_in_line_average = sens_customers_in_line_average / number_of_people
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average / number_of_people
# Appends to average list
sens_analysis_list_of_customers_out_of_system.append(sens_customers_out_of_system_average)
sens_analysis_list_of_customers_in_line.append(sens_customers_in_line_average)
sens_analysis_list_of_customers_on_cashier_queue.append(sens_customers_on_cashier_queue_average)
sens_analysis_list_of_customer_items_checked.append(sens_items_customer_checked_average)
return sens_analysis_list_of_customers_out_of_system, sens_analysis_list_of_customers_in_line, \
sens_analysis_list_of_customers_on_cashier_queue, sens_analysis_list_of_customer_items_checked
def sensitivity_chitchatness_analysis_for_all_lines(number_of_epochs_for_simulation, number_of_av_simulations=200, \
sensitivity_range=30, number_of_people=100):
""" Sensitivity related to the chitchatness of cashier, test for all models type
Precondition:
- number_of_epochs_for_simulation: number of epochs each simulation is going to
be ran for
- number_of_av_simulations: Numbe of simulations that are going to be ran to
calculate the average. Default = 200
- sensitivity_range: Range for the sensitivity test. Default = 10
- number_of_people: number of people to enter the system. Default = 100
Postcondition:
- 4 png images saved to a file called "analysis_images"
- x-axis has the sensitivity test
- y-axis has the quantity related to the test
- Related to the outputs printed at the test.
"""
# Numbers for simulation
x_axis = np.arange(sensitivity_range) / 10
# Runs simulations with the different types of line:
sens_analysis_list_of_customers_out_of_system_CUSTOMER, sens_analysis_list_of_customers_in_line_CUSTOMER, \
sens_analysis_list_of_customers_on_cashier_queue_CUSTOMER, sens_analysis_list_of_customer_items_checked_CUSTOMER = \
sensitivity_chitchatness_analysis(number_of_epochs_for_simulation, "customer", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
sens_analysis_list_of_customers_out_of_system_CASHIER, sens_analysis_list_of_customers_in_line_CASHIER, \
sens_analysis_list_of_customers_on_cashier_queue_CASHIER, sens_analysis_list_of_customer_items_checked_CASHIER = \
sensitivity_chitchatness_analysis(number_of_epochs_for_simulation, "cashier", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
sens_analysis_list_of_customers_out_of_system_EQUAL, sens_analysis_list_of_customers_in_line_EQUAL, \
sens_analysis_list_of_customers_on_cashier_queue_EQUAL, sens_analysis_list_of_customer_items_checked_EQUAL = \
sensitivity_chitchatness_analysis(number_of_epochs_for_simulation, "equal", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
# Prints number of customers out of system
plt.figure(10)
plt.clf()
plt.title("Sensitivity Analysis for Customers Out of the System")
plt.xlabel("Probability Increment")
plt.ylabel("Percentage of Mean of Customers Out of System at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_out_of_system_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customers_out_of_system_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customers_out_of_system_EQUAL, 'ro')
plt.savefig("analysis_images\chitchatness_sens_analysis_list_of_customers_out_of_system_%d.png" % number_of_people)
plt.figure(11)
plt.clf()
plt.title("Sensitivity Analysis for People in Line Average of %d" % number_of_av_simulations)
plt.xlabel("Probability Increment")
plt.ylabel("Percentage of Mean of Customers in Line at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_in_line_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customers_in_line_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customers_in_line_EQUAL, 'ro')
plt.savefig("analysis_images\chitchatness_sens_analysis_list_of_customers_in_line_%d.png" % number_of_people)
plt.figure(12)
plt.clf()
plt.title("Sensitivity Customers in Queue Average of %d" % number_of_av_simulations)
plt.xlabel("Probability Increment")
plt.ylabel(
"Percentage of Mean of Cashiers on Queue in the End of Simulation at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_on_cashier_queue_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customers_on_cashier_queue_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customers_on_cashier_queue_EQUAL, 'ro')
plt.savefig(
"analysis_images\chitchatness_sens_analysis_list_of_customers_on_cashier_queue_%d.png" % number_of_people)
plt.figure(13)
plt.clf()
plt.title("Sensitivity Analysis for Items Checked Average of %d" % number_of_av_simulations)
plt.xlabel("Probability Increment")
plt.ylabel("Mean of Items checkedout at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customer_items_checked_CUSTOMER, 'g^', \
x_axis, sens_analysis_list_of_customer_items_checked_CASHIER, 'bs', \
x_axis, sens_analysis_list_of_customer_items_checked_EQUAL, 'ro')
plt.savefig("analysis_images\chitchatness_sens_analysis_list_of_items_checked_%d.png" % number_of_people)
def sensitivity_chitchatness_analysis(number_of_epochs_for_simulation, model_name, number_of_av_simulations,
sensitivity_range, \
number_of_people=150):
""" Sensitivity related to the number of cashier chitchatness, test for specific model type
Precondition:
- number_of_epochs_for_simulation: number of epochs used to run the simulation
- model_name,number_of_av_simulations: name of the model used during the test
- Chosen between "equal", "cashier", and "customer"
- sensitivity_range: range of sensitivity to be tested
- number_of_people: Number of people to go into the system begin. Default = 100
Postcondition:
- sens_analysis_list_of_customers_out_of_system: an int list of the average total number of
customers out of system at the end of the simulation
- sens_analysis_list_of_customers_in_line: an int list of the average customers in line
at the end of the simulation
- sens_analysis_list_of_customers_on_cashier_queue: an int list of the average customers on
queues at the end of the simulation
- sens_analysis_list_of_customer_items_checked: an int list of the average total items checked
at the end of the simulation
"""
# Sets up analysis list
sens_analysis_list_of_customers_out_of_system = []
sens_analysis_list_of_customers_in_line = []
sens_analysis_list_of_customers_on_cashier_queue = []
sens_analysis_list_of_customer_items_checked = []
# Loops between sensitivities
# 0.8 -> 0.95
for item_iterator in range(sensitivity_range):
sens_customers_out_of_system_average = 0
sens_customers_in_line_average = 0
sens_customers_on_cashier_queue_average = 0
sens_items_customer_checked_average = 0
# Number of simulations used for average of simulation
for i in range(number_of_av_simulations):
self_check_model = model(model_name, number_of_people, 10, 0, cashier_IPM_p_influence=0,
chitchatness_influence=(item_iterator / 100))
list_of_customers_out_of_system, \
list_of_customers_in_line, \
list_of_customers_on_cashier_queue, \
list_of_items_checked, \
cost_for_maintenance \
= self_check_model.execute_simulation(number_of_epochs_for_simulation, show=False, showAnim=False)
# Adds to average
sens_customers_out_of_system_average = sens_customers_out_of_system_average \
+ list_of_customers_out_of_system[-1]
sens_customers_in_line_average = sens_customers_in_line_average \
+ list_of_customers_in_line[-1]
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average \
+ list_of_customers_on_cashier_queue[-1]
sens_items_customer_checked_average = sens_items_customer_checked_average \
+ list_of_items_checked[0] - list_of_items_checked[-1]
# Calulates average
sens_customers_out_of_system_average = sens_customers_out_of_system_average / number_of_av_simulations
sens_customers_in_line_average = sens_customers_in_line_average / number_of_av_simulations
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average / number_of_av_simulations
sens_items_customer_checked_average = sens_items_customer_checked_average / number_of_av_simulations
sens_customers_out_of_system_average = sens_customers_out_of_system_average / number_of_people
sens_customers_in_line_average = sens_customers_in_line_average / number_of_people
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average / number_of_people
# Appends to average list
sens_analysis_list_of_customers_out_of_system.append(sens_customers_out_of_system_average)
sens_analysis_list_of_customers_in_line.append(sens_customers_in_line_average)
sens_analysis_list_of_customers_on_cashier_queue.append(sens_customers_on_cashier_queue_average)
sens_analysis_list_of_customer_items_checked.append(sens_items_customer_checked_average)
return sens_analysis_list_of_customers_out_of_system, sens_analysis_list_of_customers_in_line, \
sens_analysis_list_of_customers_on_cashier_queue, sens_analysis_list_of_customer_items_checked
def sensitivity_customer_number_analysis_for_all_lines(number_of_epochs_for_simulation, number_of_av_simulations=200, \
sensitivity_range=80, number_of_people=100):
""" Sensitivity related number of customers in the system, test for all models type
Precondition:
- number_of_epochs_for_simulation: number of epochs each simulation is going to
be ran for
- number_of_av_simulations: Numbe of simulations that are going to be ran to
calculate the average. Default = 200
- sensitivity_range: Range for the sensitivity test. Default = 10
- number_of_people: number of people to enter the system. Default = 100
Postcondition:
- 4 png images saved to a file called "analysis_images"
- x-axis has the sensitivity test
- y-axis has the quantity related to the test
- Related to the outputs printed at the test.
"""
# Numbers for simulation
x_axis = np.arange(sensitivity_range)
# Runs simulations with the different types of line:
sens_analysis_list_of_customers_out_of_system_CUSTOMER, sens_analysis_list_of_customers_in_line_CUSTOMER, \
sens_analysis_list_of_customers_on_cashier_queue_CUSTOMER, sens_analysis_list_of_customer_items_checked_CUSTOMER = \
sensitivity_number_of_customers_analysis(number_of_epochs_for_simulation, "customer", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
sens_analysis_list_of_customers_out_of_system_CASHIER, sens_analysis_list_of_customers_in_line_CASHIER, \
sens_analysis_list_of_customers_on_cashier_queue_CASHIER, sens_analysis_list_of_customer_items_checked_CASHIER = \
sensitivity_number_of_customers_analysis(number_of_epochs_for_simulation, "cashier", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
sens_analysis_list_of_customers_out_of_system_EQUAL, sens_analysis_list_of_customers_in_line_EQUAL, \
sens_analysis_list_of_customers_on_cashier_queue_EQUAL, sens_analysis_list_of_customer_items_checked_EQUAL = \
sensitivity_number_of_customers_analysis(number_of_epochs_for_simulation, "equal", number_of_av_simulations,
sensitivity_range, \
number_of_people=number_of_people)
# Prints number of customers out of system
plt.figure(14)
plt.clf()
plt.title("Sensitivity Analysis for Customers Out of the System")
plt.xlabel("Increment in the Number of Customers")
plt.ylabel("Percentage of Mean of Customers Out of System at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_out_of_system_CUSTOMER, 'g', \
x_axis, sens_analysis_list_of_customers_out_of_system_CASHIER, 'b', \
x_axis, sens_analysis_list_of_customers_out_of_system_EQUAL, 'r')
plt.savefig(
"analysis_images\\number_of_customers_sens_analysis_list_of_customers_out_of_system_%d.png" % number_of_people)
plt.figure(15)
plt.clf()
plt.title("Sensitivity Analysis for People in Line Average of %d" % number_of_av_simulations)
plt.xlabel("Increment in the Number of Customers")
plt.ylabel("Percentage of Mean of Customers in Line at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_in_line_CUSTOMER, 'g', \
x_axis, sens_analysis_list_of_customers_in_line_CASHIER, 'b', \
x_axis, sens_analysis_list_of_customers_in_line_EQUAL, 'r')
plt.savefig(
"analysis_images\\number_of_customers_sens_analysis_list_of_customers_in_line_%d.png" % number_of_people)
plt.figure(16)
plt.clf()
plt.title("Sensitivity Customers in Queue Average of %d" % number_of_av_simulations)
plt.xlabel("Increment in the Number of Customers")
plt.ylabel(
"Percentage of Mean of Cashiers on Queue in the End of Simulation at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customers_on_cashier_queue_CUSTOMER, 'g', \
x_axis, sens_analysis_list_of_customers_on_cashier_queue_CASHIER, 'b', \
x_axis, sens_analysis_list_of_customers_on_cashier_queue_EQUAL, 'r')
plt.savefig(
"analysis_images\\number_of_customers_sens_analysis_list_of_customers_on_cashier_queue_%d.png" % number_of_people)
plt.figure(17)
plt.clf()
plt.title("Sensitivity Analysis for Items Checked Average of %d" % number_of_av_simulations)
plt.xlabel("Increment in the Number of Customers")
plt.ylabel("Mean of Items checkedout at %d Epochs" % number_of_epochs_for_simulation)
plt.plot(x_axis, sens_analysis_list_of_customer_items_checked_CUSTOMER, 'g', \
x_axis, sens_analysis_list_of_customer_items_checked_CASHIER, 'b', \
x_axis, sens_analysis_list_of_customer_items_checked_EQUAL, 'r')
plt.savefig("analysis_images\\number_of_customers_sens_analysis_list_of_items_checked_%d.png" % number_of_people)
def sensitivity_number_of_customers_analysis(number_of_epochs_for_simulation, model_name, number_of_av_simulations,
sensitivity_range, \
number_of_people=150):
""" Sensitivity related to the number of customers in the system, test for specific model type
Precondition:
- number_of_epochs_for_simulation: number of epochs used to run the simulation
- model_name,number_of_av_simulations: name of the model used during the test
- Chosen between "equal", "cashier", and "customer"
- sensitivity_range: range of sensitivity to be tested
- number_of_people: Number of people to go into the system begin. Default = 100
Postcondition:
- sens_analysis_list_of_customers_out_of_system: an int list of the average total number of
customers out of system at the end of the simulation
- sens_analysis_list_of_customers_in_line: an int list of the average customers in line
at the end of the simulation
- sens_analysis_list_of_customers_on_cashier_queue: an int list of the average customers on
queues at the end of the simulation
- sens_analysis_list_of_customer_items_checked: an int list of the average total items checked
at the end of the simulation
"""
# Sets up analysis list
sens_analysis_list_of_customers_out_of_system = []
sens_analysis_list_of_customers_in_line = []
sens_analysis_list_of_customers_on_cashier_queue = []
sens_analysis_list_of_customer_items_checked = []
# Loops between sensitivities
# 0.8 -> 0.95
for item_iterator in range(sensitivity_range):
sens_customers_out_of_system_average = 0
sens_customers_in_line_average = 0
sens_customers_on_cashier_queue_average = 0
sens_items_customer_checked_average = 0
# Number of simulations used for average of simulation
for i in range(number_of_av_simulations):
self_check_model = model(model_name, number_of_people + (item_iterator), 10, 0)
list_of_customers_out_of_system, \
list_of_customers_in_line, \
list_of_customers_on_cashier_queue, \
list_of_items_checked, \
cost_for_maintenance \
= self_check_model.execute_simulation(number_of_epochs_for_simulation, show=False, showAnim=False)
# Adds to average
sens_customers_out_of_system_average = sens_customers_out_of_system_average \
+ list_of_customers_out_of_system[-1]
sens_customers_in_line_average = sens_customers_in_line_average \
+ list_of_customers_in_line[-1]
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average \
+ list_of_customers_on_cashier_queue[-1]
sens_items_customer_checked_average = sens_items_customer_checked_average \
+ list_of_items_checked[0] - list_of_items_checked[-1]
# Calulates average
sens_customers_out_of_system_average = sens_customers_out_of_system_average / number_of_av_simulations
sens_customers_in_line_average = sens_customers_in_line_average / number_of_av_simulations
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average / number_of_av_simulations
sens_items_customer_checked_average = sens_items_customer_checked_average / number_of_av_simulations
sens_customers_out_of_system_average = sens_customers_out_of_system_average / number_of_people
sens_customers_in_line_average = sens_customers_in_line_average / number_of_people
sens_customers_on_cashier_queue_average = sens_customers_on_cashier_queue_average / number_of_people
# Appends to average list
sens_analysis_list_of_customers_out_of_system.append(sens_customers_out_of_system_average)
sens_analysis_list_of_customers_in_line.append(sens_customers_in_line_average)
sens_analysis_list_of_customers_on_cashier_queue.append(sens_customers_on_cashier_queue_average)
sens_analysis_list_of_customer_items_checked.append(sens_items_customer_checked_average)
return sens_analysis_list_of_customers_out_of_system, sens_analysis_list_of_customers_in_line, \
sens_analysis_list_of_customers_on_cashier_queue, sens_analysis_list_of_customer_items_checked
def full_day_analysis_frontLoaded():
"""
This will run three full day models for a "frontloaded" population
distribution which will cover each line type
It will then graph the major statistics customers out of system,
items out of the system, customers in queue, and customers not in queue.
It will graph these statistics over the course of the day that is being
simulated.
"""
# run full day simulation for all three line types
equal = Fullday('equal', 6, 4, day_type='front')
customer = Fullday('customer', 6, 4, day_type='front')
cashier = Fullday('cashier', 6, 4, day_type='front')
equal.execute_simulation()
customer.execute_simulation()
cashier.execute_simulation()
customers_out = plt.plot(equal.list_of_customers_out_of_system,
'r',
customer.list_of_customers_out_of_system,
'b',
cashier.list_of_customers_out_of_system,
'g')
plt.xlabel("Time in epochs (4 second increments)")
plt.ylabel("Customers who have left the store")
plt.title("Customers out of the system over a full day")
plt.legend(["Equal Line", "Customer select line", "Cashier select line"])
plt.savefig("customers_out_frontloaded.png")
plt.show()
customers_in_line = plt.plot(equal.list_of_customers_in_line,
'r',
customer.list_of_customers_in_line,
'b',
cashier.list_of_customers_in_line,
'g')
plt.legend(["Equal Line", "Customer select line", "Cashier select line"])
plt.xlabel("Time in epochs (4 second increments)")
plt.ylabel("Customers not line")
plt.title("Customers not in line over time")
plt.savefig("customers_not_line_frontloaded.png")
plt.show()
customer_in_queue = plt.plot(equal.list_of_customers_on_cashier_queue,
'r',
customer.list_of_customers_on_cashier_queue,
'b',
cashier.list_of_customers_on_cashier_queue,
'g')
plt.legend(["Equal Line", "Customer select line", "Cashier select line"])
plt.xlabel("Time in epochs (4 second increments)")
plt.ylabel("Customers in specific cashier queues")
plt.title("Customers in line over time")
plt.savefig("customers_in_queue_frontloaded.png")
plt.show()
items_check = plt.plot(equal.list_of_items_checked, 'r',
customer.list_of_items_checked, 'b',
cashier.list_of_items_checked, 'g')
plt.legend(["Equal Line", "Customer select line", "Cashier select line"])
plt.xlabel("Time in epochs (4 second increments)")
plt.ylabel("Numer of items scanned out of the store")
plt.title("Items scanned out of the store over time")
plt.savefig("items_out_frontloaded.png")
plt.show()
def full_day_analysis_backLoaded():
"""
This will run three full day models for a "backloaded" population
distribution which will cover each line type
It will then graph the major statistics customers out of system,
items out of the system, customers in queue, and customers not in queue.
It will graph these statistics over the course of the day that is being
simulated.
"""
#run full day simulation for all three line types
equal = Fullday('equal', 6, 4, day_type='back')
customer = Fullday('customer', 6, 4, day_type='back')
cashier = Fullday('cashier', 6, 4, day_type='back')
equal.execute_simulation()
customer.execute_simulation()
cashier.execute_simulation()
time_axis = np.arange(start=9, step=(12 * 60 * v.TIME_STEP))
customers_out = plt.plot(equal.list_of_customers_out_of_system,
'r',
customer.list_of_customers_out_of_system,
'b',
cashier.list_of_customers_out_of_system,
'g')
plt.xlabel("Time in epochs (4 second increments)")
plt.ylabel("Customers who have left the store")
plt.title("Customers out of the system over a full day")
plt.legend(["Equal Line", "Customer select line", "Cashier select line"])
plt.savefig("customers_out_backloaded.png")
plt.show()
customers_in_line = plt.plot(equal.list_of_customers_in_line,
'r',
customer.list_of_customers_in_line,
'b',
cashier.list_of_customers_in_line,
'g')
plt.legend(["Equal Line", "Customer select line", "Cashier select line"])
plt.xlabel("Time in epochs (4 second increments)")
plt.ylabel("Customers not line")
plt.title("Customers not in line over time")
plt.savefig("customers_not_line_backloaded.png")
plt.show()
customer_in_queue = plt.plot(equal.list_of_customers_on_cashier_queue,
'r',
customer.list_of_customers_on_cashier_queue,
'b',
cashier.list_of_customers_on_cashier_queue,
'g')
plt.legend(["Equal Line", "Customer select line", "Cashier select line"])
plt.xlabel("Time in epochs (4 second increments)")
plt.ylabel("Customers in specific cashier queues")
plt.title("Customers in line over time")
plt.savefig("customers_in_queue_backloaded.png")
plt.show()
items_check = plt.plot(equal.list_of_items_checked, 'r',
customer.list_of_items_checked, 'b',
cashier.list_of_items_checked, 'g')
plt.legend(["Equal Line", "Customer select line", "Cashier select line"])
plt.xlabel("Time in epochs (4 second increments)")
plt.ylabel("Numer of items scanned out of the store")
plt.title("Items scanned out of the store over time")
plt.savefig("items_out_backloaded.png")
plt.show()
def full_day_analysis_normal():
"""
This will run three full day models for a "normal" population distribution
which will cover each line type
It will then graph the major statistics customers out of system,
items out of the system, customers in queue, and customers not in queue.
It will graph these statistics over the course of the day that is being
simulated.
"""
# run full day simulation for all three line types
equal = Fullday('equal', 6, 4)
customer = Fullday('customer', 6, 4)
cashier = Fullday('cashier', 6, 4)
equal.execute_simulation()
customer.execute_simulation()
cashier.execute_simulation()
time_axis = np.arange(start=9, step=(12 * 60 * v.TIME_STEP))
customers_out = plt.plot(equal.list_of_customers_out_of_system,
'r',
customer.list_of_customers_out_of_system,
'b',
cashier.list_of_customers_out_of_system,
'g')
plt.xlabel("Time in epochs (4 second increments)")
plt.ylabel("Customers who have left the store")
plt.title("Customers out of the system over a full day")
plt.legend(["Equal Line", "Customer select line", "Cashier select line"])
plt.savefig("customers_out_normal.png")
plt.show()
customers_in_line = plt.plot(equal.list_of_customers_in_line,
'r',
customer.list_of_customers_in_line,
'b',
cashier.list_of_customers_in_line,
'g')
plt.legend(["Equal Line", "Customer select line", "Cashier select line"])
plt.xlabel("Time in epochs (4 second increments)")
plt.ylabel("Customers not line")
plt.title("Customers not in line over time")
plt.savefig("customers_not_line_normal.png")
plt.show()
customer_in_queue = plt.plot(equal.list_of_customers_on_cashier_queue,
'r',
customer.list_of_customers_on_cashier_queue,
'b',
cashier.list_of_customers_on_cashier_queue,
'g')
plt.legend(["Equal Line", "Customer select line", "Cashier select line"])
plt.xlabel("Time in epochs (4 second increments)")
plt.ylabel("Customers in specific cashier queues")
plt.title("Customers in line over time")
plt.savefig("customers_in_queue_normal.png")
plt.show()
items_check = plt.plot(equal.list_of_items_checked, 'r',
customer.list_of_items_checked, 'b',
cashier.list_of_items_checked, 'g')
plt.legend(["Equal Line", "Customer select line", "Cashier select line"])
plt.xlabel("Time in epochs (4 second increments)")
plt.ylabel("Numer of items scanned out of the store")
plt.title("Items scanned out of the store over time")
plt.savefig("items_out_normal.png")
plt.show()
def fullday_analysis():
"""
This method will execute all full day analysis
"""
full_day_analysis_normal()
full_day_analysis_backLoaded()
full_day_analysis_frontLoaded()
|
{"hexsha": "f61f8526ee68b3e1a0a31286cb521ed404d8935f", "size": 73767, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis_utils.py", "max_stars_repo_name": "pgmoka/checkout-simulator", "max_stars_repo_head_hexsha": "bce7e68ba47b9309f19514a9199d43bdbbbc4ffc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis_utils.py", "max_issues_repo_name": "pgmoka/checkout-simulator", "max_issues_repo_head_hexsha": "bce7e68ba47b9309f19514a9199d43bdbbbc4ffc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis_utils.py", "max_forks_repo_name": "pgmoka/checkout-simulator", "max_forks_repo_head_hexsha": "bce7e68ba47b9309f19514a9199d43bdbbbc4ffc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.8038654259, "max_line_length": 122, "alphanum_fraction": 0.7013163067, "include": true, "reason": "import numpy", "num_tokens": 15267}
|
#pragma once
#include <bunsan/utility/resolver.hpp>
#include <bunsan/utility/utility.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/serialization/access.hpp>
#include <boost/serialization/nvp.hpp>
#include <functional>
#include <string>
namespace bunsan::utility {
namespace detail {
template <typename Factory>
class configured_factory {
public:
using factory = Factory;
using factory_type = typename factory::factory_type;
using result_type = typename factory::bunsan_factory::result_type;
using arguments_size = typename factory::bunsan_factory::arguments_size;
public:
configured_factory(const factory_type &factory_,
const utility_config &config_)
: m_factory(factory_), m_config(config_) {}
result_type operator()(resolver &res) const {
return m_factory(m_config, res);
}
private:
factory_type m_factory;
utility_config m_config;
};
} // namespace detail
template <typename Factory>
struct factory_options {
using factory = Factory;
using result_type = typename factory::bunsan_factory::result_type;
using factory_type = std::function<result_type(resolver &)>;
template <typename Archive>
void serialize(Archive &ar, const unsigned int) {
ar & BOOST_SERIALIZATION_NVP(type);
ar & BOOST_SERIALIZATION_NVP(config);
}
/// Try to create instance of type using Factory.
result_type instance(resolver &res) const {
return factory::instance(type, config, res);
}
/// Try to create instance of type using Factory.
result_type instance_optional(resolver &res) const {
return factory::instance_optional(type, config, res);
}
factory_type configured_factory() const {
return detail::configured_factory<factory>(factory::factory(type), config);
}
std::string type;
utility_config config;
};
} // namespace bunsan::utility
|
{"hexsha": "91fd03697366bbaacc8532fa1e2b6180879e5056", "size": 1852, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "bunsan/utility/include/bunsan/utility/factory_options.hpp", "max_stars_repo_name": "bacsorg/bacs", "max_stars_repo_head_hexsha": "2b52feb9efc805655cdf7829cf77ee028d567969", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bunsan/utility/include/bunsan/utility/factory_options.hpp", "max_issues_repo_name": "bacsorg/bacs", "max_issues_repo_head_hexsha": "2b52feb9efc805655cdf7829cf77ee028d567969", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 10.0, "max_issues_repo_issues_event_min_datetime": "2018-02-06T14:46:36.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-20T13:37:20.000Z", "max_forks_repo_path": "bunsan/utility/include/bunsan/utility/factory_options.hpp", "max_forks_repo_name": "bacsorg/bacs", "max_forks_repo_head_hexsha": "2b52feb9efc805655cdf7829cf77ee028d567969", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-11-26T10:59:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-26T10:59:09.000Z", "avg_line_length": 26.4571428571, "max_line_length": 79, "alphanum_fraction": 0.7397408207, "num_tokens": 410}
|
import unittest
import os
import random
import glob
import numpy as np
from pymatgen import SETTINGS
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.analysis.surface_analysis import SurfaceEnergyAnalyzer
from pymatgen.util.testing import PymatgenTest
__author__ = "Richard Tran"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Richard Tran"
__email__ = "rit001@eng.ucsd.edu"
__date__ = "Aug 24, 2017"
def get_path(path_str):
cwd = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(cwd, "..", "..", "..", "test_files", "surface_tests",
path_str)
return path
@unittest.skipIf(not SETTINGS.get("PMG_MAPI_KEY"), "PMG_MAPI_KEY environment variable not set.")
class SurfaceEnergyAnalyzerTest(PymatgenTest):
def setUp(self):
vasprun_dict = {}
for v in glob.glob(os.path.join(get_path(""), "*")):
if ".xml.Cu" in v:
vasprun_dict[tuple([int(i) for i in v[-6:].strip(".gz")])] = [Vasprun(v)]
self.vasprun_dict = vasprun_dict
self.Cu_analyzer = SurfaceEnergyAnalyzer("mp-30", self.vasprun_dict, "Cu")
def test_gamma_calculator(self):
# make sure we've loaded all our files correctly
self.assertEqual(len(self.vasprun_dict.keys()), 13)
for hkl, vaspruns in self.vasprun_dict.items():
se_range = self.Cu_analyzer.calculate_gamma(vaspruns[0])
# For a stoichiometric system, we expect surface
# energy to be independent of chemical potential
self.assertEqual(se_range[0], se_range[1])
def test_get_intersections(self):
# Just test if its working, for Cu, there are no different
# terminations so everything should be a nonetype
for hkl in self.vasprun_dict.keys():
self.assertFalse(self.Cu_analyzer.get_intersections(hkl))
def test_get_wulff_shape_dict(self):
# for pure Cu, all facets are independent of chemical potential,
# so we assume Wulff does not change wrt chemical potential
wulff_dict = self.Cu_analyzer.wulff_shape_dict(at_intersections=True)
self.assertEqual(len(wulff_dict.keys()), 1)
wulffshape = list(wulff_dict.values())[0]
# The Wulff shape of Cu should have at least 70% (100) and (111) facets
area_fraction_dict = wulffshape.area_fraction_dict
self.assertGreater(area_fraction_dict[(1,0,0)]+\
area_fraction_dict[(1,1,1)], 0.7)
# test out self.wulff_shape_from_chempot(), all Wulff
# shapes should the same regardless of chemical potential
wulff1 = self.Cu_analyzer.wulff_shape_from_chempot(\
min(self.Cu_analyzer.chempot_range))
wulff2 = self.Cu_analyzer.wulff_shape_from_chempot(\
max(self.Cu_analyzer.chempot_range))
for hkl in self.Cu_analyzer.vasprun_dict.keys():
self.assertEqual(wulff1.area_fraction_dict[hkl],
wulff2.area_fraction_dict[hkl])
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "dc3aadc47b068416ca51608d5cb1fb0ebf56f50d", "size": 3105, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymatgen/analysis/tests/test_surface_analysis.py", "max_stars_repo_name": "ltalirz/pymatgen", "max_stars_repo_head_hexsha": "894cdb2ec7b9bd74f0ac3cdad40d144203ccdcf6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pymatgen/analysis/tests/test_surface_analysis.py", "max_issues_repo_name": "ltalirz/pymatgen", "max_issues_repo_head_hexsha": "894cdb2ec7b9bd74f0ac3cdad40d144203ccdcf6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pymatgen/analysis/tests/test_surface_analysis.py", "max_forks_repo_name": "ltalirz/pymatgen", "max_forks_repo_head_hexsha": "894cdb2ec7b9bd74f0ac3cdad40d144203ccdcf6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3333333333, "max_line_length": 96, "alphanum_fraction": 0.6734299517, "include": true, "reason": "import numpy", "num_tokens": 799}
|
#----------------------------------------------------------------
# NAME || AM || e-mail
# Georgios Vardakas || 432 || geoo1995@gmail.com
# Dimitra Triantali || 431 || dimitra.triantali@gmail.com
#----------------------------------------------------------------
# Course: Optimization
# Project 1
# Written in Python 3.8.6
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy.linalg import inv, cholesky, norm
from numpy.linalg.linalg import LinAlgError
import time
import sys
# Globals
# Our data for the first 30 days. Training set.
df_covid_data = pd.read_csv("./Data/covid_data_30_GR.dat", delim_whitespace=True, names = ["Day", "Cases"])
# Scaling step
df_covid_data["Scaled Day"] = df_covid_data["Day"].div(10)
df_covid_data["Scaled Cases"] = df_covid_data["Cases"].div(10000)
# Our data for days 13 - 17 of November based on https://covid19.who.int/region/euro/country/gr
df_covid_data_testset = pd.DataFrame(data = np.array([[31, 66637], [32, 69675], [33, 72510], [34, 74205], [35, 76403]]),
columns = ["Day", "Cases"])
# Scaling step
df_covid_data_testset["Scaled Day"] = df_covid_data_testset["Day"].div(10)
df_covid_data_testset["Scaled Cases"] = df_covid_data_testset["Cases"].div(10000)
# Defining the polynomial model, its gradiend and its hessian matrix
def model(a, x):
return a[0] + a[1] * x + a[2] * np.power(x, 2) + a[3] * np.power(x, 3) + a[4] * np.power(x, 4)
def model_gradient(a, x):
gradient = np.array([[1], [x], [np.power(x, 2)], [np.power(x, 3)], [np.power(x, 4)]])
return gradient
def model_hessian(a, x):
hessian = np.zeros(shape=(5, 5))
return hessian
# Defining the error function, its first and its second derivative
def error_function(model_parameters):
total_error = 0
for index, row in df_covid_data.iterrows():
scaled_day, real_scaled_cases = row[["Scaled Day", "Scaled Cases"]]
model_error_i = model(model_parameters, scaled_day) - real_scaled_cases
total_error += np.power(model_error_i, 2)
return total_error
def gradient_of_error_function(model_parameters):
gradient = np.zeros(shape=model_parameters.shape)
for index, row in df_covid_data.iterrows():
scaled_day, real_scaled_cases = row[["Scaled Day", "Scaled Cases"]]
gradient_i = 2 * (model(model_parameters, scaled_day) - real_scaled_cases) * model_gradient(model_parameters, scaled_day)
gradient = np.add(gradient, gradient_i)
return gradient
def hessian_of_error_function(model_parameters):
hessian = np.zeros(shape=(model_parameters.shape[0], model_parameters.shape[0]))
for index, row in df_covid_data.iterrows():
scaled_day, real_scaled_cases = row[["Scaled Day", "Scaled Cases"]]
hessian_i = 2 * (model_gradient(model_parameters, scaled_day) @ np.transpose(model_gradient(model_parameters, scaled_day))
+ model(model_parameters, scaled_day) * model_hessian(model_parameters, scaled_day)
- real_scaled_cases * model_hessian(model_parameters, scaled_day))
hessian = np.add(hessian, hessian_i)
return hessian
# Cholesky method
def cholesky_method(hessian):
# If he is then the inverse of hessian is positive defiend too
# If not then cholesky will raise error
try:
cholesky(hessian)
return True
except LinAlgError as err:
print(err)
return False
## Hessian matrix modification
def hessian_modification(hessian_xk, beta):
diagonal_elemets = np.diag(hessian_xk)
min_diag_element = np.min(diagonal_elemets)
I = np.identity(hessian_xk.shape[0])
max_iteration = 100
t = 0
if (min_diag_element <= 0):
t = -min_diag_element + beta
for i in range(max_iteration):
try:
L = cholesky(np.add(hessian_xk, t * I))
return L
except LinAlgError as err:
t = max(t * 2, beta)
return I
# Routines needed for line search methods
# Strong Wolf Conditions
def strong_wolf_conditions(pk, xk, a_max = 2, c2 = 1e-1):
c1 = 10e-4
ai_low = 0
ai = interpolate(ai_low, a_max)
iteration = 0
line_a = lambda xk, a, pk: np.add(xk, a * pk)
while True:
# f(xk + a * pk)
f_ai = error_function(line_a(xk, ai, pk))[0]
# f(xk + 0 * pk)
f_0 = error_function(xk)[0]
# f'(xk + 0 * pk)
f_gradient_0 = (np.transpose(gradient_of_error_function(xk)) @ pk)[0][0]
# f(xk + ai_low * pk)
f_ai_low = error_function(line_a(xk, ai_low, pk))[0]
# Armijo condition false
armijo_condition = f_ai > f_0 + c1 * ai * f_gradient_0
if ((armijo_condition) or (f_ai >= f_ai_low and iteration > 0)):
#print("A")
return zoom(c1, c2, line_a, f_0, f_gradient_0, pk, xk, ai_low, ai)
# f'(xk + ai * pk)
f_gradient_ai = (np.transpose(gradient_of_error_function(line_a(xk, ai, pk))) @ pk)[0][0]
# Curvature condition
curvature_condition = abs(f_gradient_ai) <= -c2 * f_gradient_0
# If both conditions true return ai
if(curvature_condition):
return ai
if(f_gradient_ai >= 0):
return zoom(c1, c2, line_a, f_0, f_gradient_0, pk, xk, ai, ai_low)
ai_low = ai
ai = interpolate(ai_low, a_max)
iteration += 1
def zoom(c1, c2, line, f_0, f_gradient_0, pk, xk, a_low, a_high):
iteration = 0
max_iteration = 100
while True:
aj = interpolate(a_low, a_high)
# f(xk + aj * pk)
f_aj = error_function(line(xk, aj, pk))[0]
# Armijo condition false
armijo_condition = f_aj > f_0 + c1 * aj * f_gradient_0
# f(xk + a_low * pk)
f_a_low = error_function(line(xk, a_low, pk))[0]
# If armijo condition false make a_high smaller
if ((armijo_condition) or (f_aj >= f_a_low)):
a_high = aj
else:
# f'(xk + aj * pk)
f_gradient_aj = (np.transpose(gradient_of_error_function(line(xk, aj, pk))) @ pk)[0][0]
# Curvature condition
curvature_condition = abs(f_gradient_aj) <= -c2 * f_gradient_0
# If both conditions true return aj
if(curvature_condition):
return aj
# If gradient is positive then make a_high smaller
if(f_gradient_aj * (a_high - a_low) >= 0):
a_high = a_low
a_low = aj
iteration += 1
if(iteration > max_iteration):
return interpolate(a_low, a_high)
def interpolate(a_low, a_high):
# Using Bisection
return (a_low + a_high) / 2
# Line search methods
def steepest_descent_wolf_conditions(init_parameters):
iteration = 0
max_iterations = 1000
xk = init_parameters
error_list = list()
error_list.append(error_function(xk))
while (iteration < max_iterations and norm(gradient_of_error_function(xk)) > 1e-6):
# Calculating the gradient
gradient_xk = gradient_of_error_function(xk)
# Steepest descent as direction
pk = -gradient_xk
# Chosing step size, choosing c2=1e-1 for steepest_descent
a = strong_wolf_conditions(pk, xk, c2 = 1e-1)
# Updating the new xk_1
xk_1 = np.add(xk, a * pk)
# Calculating error just for ploting
error = error_function(xk_1)
error_list.append(error)
print("Iteration: %d, Step: %.1E, Error: %.4f" % (iteration, a, error))
# Update iteration and xk
iteration += 1
xk = xk_1
return xk, error_list
def newton_direction_wolf_conditions(init_parameters):
iteration = 0
max_iterations = 1000
xk = init_parameters
error_list = list()
error_list.append(error_function(xk))
while (iteration < max_iterations and norm(gradient_of_error_function(xk)) > 1e-6):
# Calculating the gradient and the hessian
gradient_xk = gradient_of_error_function(xk)
hessian_xk = hessian_of_error_function(xk)
# Checking if hessian is positive defined
pos_defined = cholesky_method(hessian_xk)
# If hessian is not positive defined then modify it
if (not pos_defined):
hessian_xk = hessian_modification(hessian_xk, beta=1)
# Newton direction
pk = -inv(hessian_xk) @ gradient_xk
# Chosing step size, choosing c2=9e-1 for newton diration
a = strong_wolf_conditions(pk, xk, c2=9e-1)
# Updating the new xk_1
xk_1 = np.add(xk, a * pk)
# Calculating error just for ploting
error = error_function(xk_1)
error_list.append(error)
print("Iteration: %d, Step: %.1E, Error: %.4f" % (iteration, a, error))
# Update iteration and xk
iteration += 1
xk = xk_1
return xk, error_list
def BFGS_wolf_conditions(init_parameters, hessian_approx):
iteration = 0
max_iterations = 1000
I = np.identity(init_parameters.shape[0])
hessian_xk = hessian_approx
xk = init_parameters
error_list = list()
error_list.append(error_function(xk)[0])
while (iteration < max_iterations and norm(gradient_of_error_function(xk)) > 1e-6):
# Calculating the gradient and the hessian
gradient_xk = gradient_of_error_function(xk)
# Checking if hessian is positive defined only in the first iteration
if(iteration == 0):
pos_defined = cholesky_method(hessian_xk)
if (not pos_defined):
hessian_xk = hessian_modification(hessian_xk, beta=1)
# Newton direction
pk = -hessian_xk @ gradient_xk
# Chosing step size
a = strong_wolf_conditions(pk, xk, c2 = 1e-1)
# Updating the new xk_1
xk_1 = np.add(xk, a * pk)
# Calculating sk
sk = np.subtract(xk_1, xk)
# Calculating the next gradient
gradient_xk_1 = gradient_of_error_function(xk_1)
# Calculating yk
yk = np.subtract(gradient_xk_1, gradient_xk)
# BFGS method for updating the Hessian
# Calculating rk
# To solve problem with numbers close to zero
if (np.transpose(yk) @ sk <= 0):
break
rk = 1 / (np.transpose(yk) @ sk)
# Update Hessian
matrix_1 = (I - rk * sk @ np.transpose(yk))
matrix_2 = (I - rk * yk @ np.transpose(sk))
hessian_xk = matrix_1 @ hessian_xk @ matrix_2 + rk * sk @ np.transpose(sk)
# Calculating error just for ploting
error = error_function(xk_1)
error_list.append(error[0])
print("Iteration: %d, Step: %.1E, Error: %.4f" % (iteration, a, error))
# Update iteration, xk and gradient_prev
iteration += 1
xk = xk_1
return xk, error_list
# Routines needed for trust region methods
def get_direction(xk, Bk, deltak):
# Calculating gradient at k just one time
gradient_k = gradient_of_error_function(xk)
pB = -inv(Bk) @ gradient_k
# If inside trust region
if (norm(pB, 2) <= deltak):
return pB, "Newton"
# If outside trust region
else:
# Cauchy point
pU = -((np.transpose(gradient_k) @ gradient_k)
/ (np.transpose(gradient_k) @ Bk @ gradient_k)) * gradient_k
# If outside trust region, cut it at trust region border
if (norm(pU, 2) >= deltak):
return -(deltak / norm(gradient_k, 2) * gradient_k), "Cauchy"
# If inside trust region, do dogleg
else:
t = solve(pB, pU, deltak)
return pU + (t - 1) * np.subtract(pB, pU), "Dogleg"
# Bisection method
def solve(pB, pU, deltak):
iteration = 0
t_lower = 1
t_upper = 2
error = 1e-9
equation = lambda ti: np.power(norm(pU + (ti - 1) * (pB - pU), 2), 2) - np.power(deltak, 2)
while True:
iteration += 1
ti = (1 / 2) * (t_upper + t_lower)
if (abs(equation(ti)) <= error):
return ti
else:
if (equation(t_lower) * equation(ti) < 0):
t_upper = ti
else:
t_lower = ti
if(t_upper - t_lower < error):
return ti
# Thrust region method
def newton_method_safe_region(init_parameters):
iteration = 0
max_iterations = 1000
delta_low, delta_max = 0, 1
delta_i = (delta_low + delta_max) / 2
htta = 1 / 4
xk = init_parameters
step = lambda xk, pk: np.add(xk, pk)
mk = lambda xk, pk, Bk: error_function(xk) + (np.transpose(gradient_of_error_function(xk)) @ pk)[0][0] + ((1 / 2) * np.transpose(pk) @ Bk @ pk)[0][0]
error_list = list()
error_list.append(error_function(xk))
point_counter = {"Newton": 0, "Cauchy" : 0, "Dogleg" : 0}
while (iteration < max_iterations and norm(gradient_of_error_function(xk)) > 1e-6):
# Calculating hessian
hessian_xk = hessian_of_error_function(xk)
# Checking if hessian is positive defined
pos_defined = cholesky_method(hessian_xk)
if (not pos_defined):
hessian_xk = hessian_modification(hessian_xk, 1)
# Chosing the descent direction
pk, point = get_direction(xk, hessian_xk, delta_i)
point_counter[point] += 1
# rk quantifies the quality of the solution
real_reduction = error_function(xk) - error_function(step(xk, pk))
approximate_reduction = mk(xk, np.zeros(pk.shape), hessian_xk) - mk(xk, pk, hessian_xk)
rk = real_reduction / approximate_reduction
# Fixing the trust region for next iteration
# Case rk close to 0
if (rk < 1 / 4):
delta_i = (1 / 4) * delta_i
# Case rk close to 1
elif ((rk > 3 / 4) and (norm(pk, 2) > delta_i)):
delta_i = min(2 * delta_i, delta_max)
# Case where 0 << rk < 1
else:
delta_i = delta_i
# Checking if we accept the step or not
# Step pk accepted
if (rk > htta):
xk_1 = np.add(xk, pk)
# Calculating error just for ploting
error = error_function(xk_1)
error_list.append(error)
print("Iteration: %d, Error: %.4f" % (iteration, error))
# Step pk rejected
else:
xk_1 = xk
# Update iteration and xk
iteration += 1
xk = xk_1
print("\nNewton point: %s, Cauchy point: %s, Dogleg point: %s." %
(point_counter["Newton"], point_counter["Cauchy"], point_counter["Dogleg"]))
return xk, error_list
def optimizer(method_choice, init_parameters):
time_1 = time.time()
if (method_choice == "1"):
print("Method: Newton with strong wolfe conditions.\n")
parameters, errors = newton_direction_wolf_conditions(init_parameters)
elif(method_choice == "2"):
print("Method: Newton with trust region.\n")
parameters, errors = newton_method_safe_region(init_parameters)
elif(method_choice == "3"):
print("Method: BFGS with strong wolfe conditions.\n")
hessian_approx = np.identity(init_parameters.shape[0])
parameters, errors = BFGS_wolf_conditions(init_parameters, hessian_approx)
elif(method_choice == "4"):
print("Method: Steepest descent with strong wolfe conditions.\n")
parameters, errors = steepest_descent_wolf_conditions(init_parameters)
time_2 = time.time()
time_difference = time_2 - time_1
print("\nTime passed in secods: %.2f.\n" % (time_difference))
return parameters, errors
# Model evaluation
def mean_squared_error(y_true, y_pred):
squared_error = 0
for index, _ in enumerate(y_true):
squared_error += np.power((y_true[index] - y_pred[index]), 2)
mse = squared_error / y_true.shape[0]
return mse
def plot_model_results(parameters):
answers_trainingset = []
predictions = []
#for mse
predictions_no_scaling = []
scale_day = 10
scale_cases = 10000
# divition with 10 for scaling
data_range = np.arange(0, 31, 1) / scale_day
predictions_range = np.arange(30, 36, 1) / scale_day
for i in data_range:
# multiplication with 10000 for scaling
answers_trainingset.append(model(parameters, i) * scale_cases)
for i in predictions_range:
# multiplication with 10000 for scaling
predictions.append(int((model(parameters, i) * scale_cases)[0]))
predictions_no_scaling.append(model(parameters, i))
print("Parameters after optimization: %s.\n" % (np.array2string(np.transpose(parameters))))
mse = mean_squared_error(np.asarray(predictions_no_scaling[1:]), df_covid_data_testset["Scaled Cases"].to_numpy())
print("Mean squared error (at testing set, without rescaling): %.4f.\n" % (mse))
data = {"Day Index":df_covid_data_testset["Day"].to_list(),
"Real Cases": df_covid_data_testset["Cases"].to_list(),
"Model Prediction": predictions[1:]}
df_real_predictions = pd.DataFrame(data=data)
df_real_predictions["Error"] = (df_real_predictions["Real Cases"] - df_real_predictions["Model Prediction"]).abs()
print(df_real_predictions)
print("")
title = "Trained model"
fontsize = 15
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
ax.scatter(df_covid_data["Day"], df_covid_data["Cases"], c="r")
ax.scatter(df_covid_data_testset["Day"], df_covid_data_testset["Cases"], c="b")
ax.plot(data_range * scale_day, answers_trainingset)
ax.plot(predictions_range * scale_day, predictions)
ax.set_xlabel("Days since 14/10/2020", fontsize=fontsize)
ax.set_ylabel("Cumulative covid-19 cases", fontsize=fontsize)
ax.set_title(title, fontsize=fontsize)
ax.grid(True)
ax.legend(["Fitted model", "Model's Prediction", "Training Set", "Testing Set"], fontsize=fontsize)
fig.savefig(title, facecolor='w')
plt.show()
def main():
if(len(sys.argv) != 5):
print("Error: wrong input.")
print("Usage: python3 optimization.py m method_number x initial_parameter_number")
print("method_number choices: 1, 2, 3, 4.")
print("initial_parameter_number choices: 0, 1, 2, 3, 4")
print("Example: python3 optimization.py m 1 x 0")
method_choice = sys.argv[2]
parameters_choice = sys.argv[4]
# possible choices for initial parameters
x0 = np.array([[5.025], [-4.898], [0.119], [3.981], [7.818]])
x1 = np.array([[9.185], [0.944], [-7.227], [-7.014], [-4.849]])
x2 = np.array([[6.814], [-4.914], [6.285], [-5.129], [8.585]])
x3 = np.array([[-2.966], [6.616], [1.705], [0.994], [8.343]])
x4 = np.array([[-7.401], [1.376], [-0.612], [-9.762], [-3.257]])
parameters_dict = {"0" : x0, "1" : x1, "2" : x2, "3" : x3, "4" : x4}
init_parameters = parameters_dict[parameters_choice]
print("Initial model parameters: x%s = %s.\n" %
(parameters_choice, np.array2string(np.transpose(init_parameters))))
parameters, errors = optimizer(method_choice, init_parameters)
plot_model_results(parameters)
if __name__ == "__main__":
main()
|
{"hexsha": "df1ff426c72111c190face3c452663eb0b8bc25f", "size": 19072, "ext": "py", "lang": "Python", "max_stars_repo_path": "optimization.py", "max_stars_repo_name": "giorgosVardakas/Gradient-Based-Optimization-Methods", "max_stars_repo_head_hexsha": "0166780cb5a2906feb8b27ba299439e16257621f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "optimization.py", "max_issues_repo_name": "giorgosVardakas/Gradient-Based-Optimization-Methods", "max_issues_repo_head_hexsha": "0166780cb5a2906feb8b27ba299439e16257621f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "optimization.py", "max_forks_repo_name": "giorgosVardakas/Gradient-Based-Optimization-Methods", "max_forks_repo_head_hexsha": "0166780cb5a2906feb8b27ba299439e16257621f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9944954128, "max_line_length": 153, "alphanum_fraction": 0.625471896, "include": true, "reason": "import numpy,from numpy", "num_tokens": 5308}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import tensorflow_datasets as tfds
import time
from source.Selective_walk import SelectiveWalk
from source.Evolution_ import Evolution
from source.sample import Sample
import logging
import jax.numpy as np
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
for device in gpu_devices:
tf.config.experimental.set_memory_growth(device, True)
start_time = time.time()
train_dataset, metadata = tfds.load('mnist:3.*.*', split='train[70%:]', as_supervised=True, with_info=True)
test_dataset = tfds.load('mnist:3.*.*', split='test[70%:]', as_supervised=True)
num_train_examples = int(metadata.splits['train'].num_examples*0.3)
num_test_examples = int(metadata.splits['test'].num_examples*0.3)
num_classes = metadata.features['label'].num_classes
td_x = np.zeros((3000, 28, 28, 1), dtype=np.uint8)
td_y = np.zeros((3000), dtype=np.uint8)
trd_x = np.zeros((18000, 28, 28, 1), dtype=np.uint8)
trd_y = np.zeros((18000), dtype=np.uint8)
i = 0
one = 0
three = 0
five = 0
seven = 0
nine = 0
for elem in test_dataset:
if elem[1].numpy() == 1 and one < 200:
td_x[i] = elem[0]
td_y[i] = 3
one += 1
elif elem[1].numpy() == 3 and three < 200:
td_x[i] = elem[0]
td_y[i] = 9
three += 1
elif elem[1].numpy() ==5 and five < 200:
td_x[i] = elem[0]
td_y[i] = 0
five += 1
elif elem[1].numpy() == 7 and seven < 200:
td_x[i] = elem[0]
td_y[i] = 4
seven += 1
elif elem[1].numpy() == 9 and nine < 200:
td_x[i] = elem[0]
td_y[i] = 2
nine += 1
else:
td_x[i] = elem[0]
td_y[i] = elem[1].numpy()
i += 1
i = 0
for elem in train_dataset:
trd_x[i] = elem[0]
trd_y[i] = elem[1].numpy()
i += 1
td_x = tf.data.Dataset.from_tensor_slices(td_x)
td_y = tf.data.Dataset.from_tensor_slices(td_y)
td = tf.data.Dataset.zip((td_x, td_y))
trd_x = tf.data.Dataset.from_tensor_slices(trd_x)
trd_y = tf.data.Dataset.from_tensor_slices(trd_y)
trd = tf.data.Dataset.zip((trd_x, trd_y))
def normalize(images, labels):
print(labels)
images = tf.cast(images, tf.float32)
images /= 255
return images, labels
del train_dataset
del test_dataset
td = td.map(normalize)
trd = trd.map(normalize)
BATCH_SIZE = 64
NUM_EPOCHS = 8
trd = trd.cache().shuffle(num_train_examples).batch(BATCH_SIZE).repeat(NUM_EPOCHS)
td = td.cache().shuffle(num_test_examples).batch(BATCH_SIZE).repeat(1)
data_information = {
'train_data': trd,
'test_data': td,
'nt_examples': num_train_examples,
'nT_examples': num_test_examples,
'nclasses': num_classes,
'shape': metadata.features['image'].shape,
'epochs': 6,
'batch': 64
}
#swalk = SelectiveWalk(30, 3, data_information)
#if tf.test.is_gpu_available():
# with tf.device("GPU:0"):
# swalk.walk(10)
#evo = Evolution(7,2,20,data_information)
#if tf.test.is_gpu_available():
# with tf.device("GPU:0"):
# evo.evolve(10)
sample = Sample('', 200, data_information, 3)
if tf.test.is_gpu_available():
with tf.device("GPU:0"):
sample.metropolis_hastings()
print("--- %s seconds ---" % (time.time() - start_time))
|
{"hexsha": "84798212095b69a0231dc432d94dfd458ced0beb", "size": 3339, "ext": "py", "lang": "Python", "max_stars_repo_path": "runoverfit.py", "max_stars_repo_name": "NMVRodrigues/TFNE", "max_stars_repo_head_hexsha": "44538844f115ee11bbc58d4b7ba33526f17e2264", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-17T22:13:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-07T14:57:01.000Z", "max_issues_repo_path": "runoverfit.py", "max_issues_repo_name": "NMVRodrigues/TFNE", "max_issues_repo_head_hexsha": "44538844f115ee11bbc58d4b7ba33526f17e2264", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "runoverfit.py", "max_forks_repo_name": "NMVRodrigues/TFNE", "max_forks_repo_head_hexsha": "44538844f115ee11bbc58d4b7ba33526f17e2264", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2954545455, "max_line_length": 107, "alphanum_fraction": 0.6576819407, "include": true, "reason": "import jax", "num_tokens": 1014}
|
# coding: utf-8
from datetime import date
import numpy as np
import apertools.sario as sario
import apertools.utils as utils
import apertools.subset as subset
from apertools.constants import PHASE_TO_CM
from apertools.deramp import remove_ramp
MENTONE_EQ_DATE = date(2020, 3, 26)
# TODO: Make a cli version...
# TODO: make pre/post independent like cross
# TODO: integrate with the other subset_top_eq
def stack_igrams(
event_date=MENTONE_EQ_DATE,
num_igrams=10,
use_cm=True,
rate=False,
outname=None,
verbose=True,
ref=(5, 5),
window=5,
ignore_geos=True,
cc_thresh=None,
avg_cc_thresh=0.0,
sigma_filter=0.3,
):
print(f"Event date: {event_date}")
gi_file = "slclist_ignore.txt" if ignore_geos else None
slclist, ifglist = sario.load_slclist_ifglist(".", slclist_ignore_file=gi_file)
ifgs = select_cross_event(slclist, event_date, num_igrams=num_igrams)
# stack_igrams = select_pre_event(slclist, ifglist, event_date)
# stack_igrams = select_post_event(slclist, ifglist, event_date)
stack_fnames = sario.ifglist_to_filenames(ifgs, ".unw")
if verbose:
print(f"Using the following {len(stack_fnames)} igrams in stack:")
for f in stack_fnames:
print(f)
dts = [(pair[1] - pair[0]).days for pair in ifgs]
cur_phase_sum, cc_stack = create_stack(
stack_fnames,
dts,
rate=rate,
use_cm=use_cm,
ref=ref,
window=window,
cc_thresh=cc_thresh,
avg_cc_thresh=avg_cc_thresh,
sigma_filter=sigma_filter,
)
if outname:
import h5py
with h5py.File(outname, "w") as f:
f["stackavg"] = cur_phase_sum
sario.save_dem_to_h5(outname, sario.load("dem.rsc"))
return cur_phase_sum, cc_stack
def select_cross_event(slclist, event_date, num_igrams=None):
"""Choose a list of independent igrams spanning `event_date`"""
insert_idx = np.searchsorted(slclist, event_date)
num_igrams = num_igrams or len(slclist) - insert_idx
# Since `event_date` will fit in the sorted array at `insert_idx`, then
# slclist[insert_idx] is the first date AFTER the event
start_idx = np.clip(insert_idx - num_igrams, 0, None)
end_idx = insert_idx + num_igrams
geo_subset = slclist[start_idx:end_idx]
stack_igrams = list(zip(geo_subset[:num_igrams], geo_subset[num_igrams:]))
return stack_igrams
def select_pre_event(slclist, event_date, num_igrams=None, min_date=None):
insert_idx = np.searchsorted(slclist, event_date)
num_igrams = num_igrams or (insert_idx // 2)
num_geos = 2 * num_igrams
start_idx = np.clip(insert_idx - num_geos, 0, None)
end_idx = insert_idx
geo_subset = slclist[start_idx:end_idx]
# print(f"{start_idx = }, {insert_idx = }, {end_idx = }")
stack_igrams = list(zip(geo_subset[:num_igrams], geo_subset[num_igrams:]))
return stack_igrams
def select_post_event(slclist, event_date, num_igrams=None, max_date=None):
insert_idx = np.searchsorted(slclist, event_date)
num_igrams = num_igrams or (len(slclist) - insert_idx) // 2
num_geos = 2 * num_igrams
start_idx = insert_idx
end_idx = np.clip(insert_idx + num_geos, None, len(slclist))
geo_subset = slclist[start_idx:end_idx]
# print(f"{start_idx = }, {insert_idx = }, {end_idx = }")
stack_igrams = list(zip(geo_subset[:num_igrams], geo_subset[num_igrams:]))
return stack_igrams
def select_pre_event_redundant(
slclist, ifglist, event_date, num_igrams=None, min_date=None
):
ifgs = [ifg for ifg in ifglist if (ifg[0] < event_date and ifg[1] < event_date)]
return utils.filter_min_max_date(ifgs, min_date, None)
def select_post_event_redundant(slclist, ifglist, event_date, max_date=None):
ifgs = [ifg for ifg in ifglist if (ifg[0] > event_date and ifg[1] > event_date)]
return utils.filter_min_max_date(ifgs, None, max_date)
def create_stack(
stack_fnames,
dts,
rate=False,
use_cm=True,
ref=(5, 5),
window=5,
cc_thresh=None,
avg_cc_thresh=0.35,
sigma_filter=0.3,
):
cur_phase_sum = np.zeros(sario.load(stack_fnames[0]).shape).astype(float)
cc_stack = np.zeros_like(cur_phase_sum)
# for pixels that get masked sometimes, lower that count in the final stack dividing
pixel_count = np.zeros_like(cur_phase_sum, dtype=int)
dt_total = 0
for f, dt in zip(stack_fnames, dts):
deramped_phase = remove_ramp(sario.load(f), deramp_order=1, mask=np.ma.nomask)
cur_cc = sario.load(f.replace(".unw", ".cc"))
if cc_thresh:
bad_pixel_mask = cur_cc < cc_thresh
else:
# zeros => dont mask any to nan
bad_pixel_mask = np.zeros_like(deramped_phase, dtype=bool)
deramped_phase[bad_pixel_mask] = np.nan
# cur_phase_sum += deramped_phase
cur_phase_sum = np.nansum(np.stack([cur_phase_sum, deramped_phase]), axis=0)
pixel_count += (~bad_pixel_mask).astype(int)
dt_total += (~bad_pixel_mask) * dt
cc_stack += cur_cc
# subtract the reference location:
ref_row, ref_col = ref
win = window // 2
patch = cur_phase_sum[
ref_row - win : ref_row + win + 1, ref_col - win : ref_col + win + 1
]
cur_phase_sum -= np.nanmean(patch)
if rate:
cur_phase_sum /= dt_total
else:
cur_phase_sum /= pixel_count
cc_stack /= len(stack_fnames)
if avg_cc_thresh:
cur_phase_sum[cc_stack < avg_cc_thresh] = np.nan
if use_cm:
cur_phase_sum *= PHASE_TO_CM
if sigma_filter:
import blobsar.utils as blob_utils
cur_phase_sum = blob_utils.gaussian_filter_nan(cur_phase_sum, sigma_filter)
return cur_phase_sum, cc_stack
def subset_stack(
point,
event_date,
ref=(3, 3),
window=3,
nigrams=10,
ignore_geos=True,
min_date=None,
max_date=None,
):
gi_file = "slclist_ignore.txt" if ignore_geos else None
slclist, ifglist = sario.load_slclist_ifglist(".", slclist_ignore_file=gi_file)
ifgs = select_cross_event(slclist, event_date, nigrams)
# stack_igrams = select_pre_event(slclist, event_date, min_date=date(2019, 7, 1))
# stack_igrams = select_post_event(
# slclist, event_date, max_date=date(2020, 5, 1)
# )
stack_fnames = sario.ifglist_to_filenames(ifgs, ".unw")
# dts = [(pair[1] - pair[0]).days for pair in stack_igrams]
phase_subset_stack = []
for f in stack_fnames:
cur = subset.read_subset(
subset.bbox_around_point(*point), f, driver="ROI_PAC", bands=[2]
)
deramped_phase = remove_ramp(np.squeeze(cur), deramp_order=1, mask=np.ma.nomask)
phase_subset_stack.append(deramped_phase)
phase_subset_stack = np.mean(np.stack(phase_subset_stack, axis=0), axis=0)
# subtract the reference location:
ref_row, ref_col = ref
win = window // 2
patch = phase_subset_stack[
ref_row - win : ref_row + win + 1, ref_col - win : ref_col + win + 1
]
phase_subset_stack -= np.nanmean(patch)
phase_subset_stack *= PHASE_TO_CM
return phase_subset_stack
|
{"hexsha": "a630ca450adaebd889a99d6bcc3405fdb82f05a9", "size": 7151, "ext": "py", "lang": "Python", "max_stars_repo_path": "apertools/coseismic_stack.py", "max_stars_repo_name": "scottstanie/apertools", "max_stars_repo_head_hexsha": "f959d03038e77444204c1ff224ddd8357db3fc04", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-02-22T15:44:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T02:49:57.000Z", "max_issues_repo_path": "apertools/coseismic_stack.py", "max_issues_repo_name": "scottstanie/apertools", "max_issues_repo_head_hexsha": "f959d03038e77444204c1ff224ddd8357db3fc04", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "apertools/coseismic_stack.py", "max_forks_repo_name": "scottstanie/apertools", "max_forks_repo_head_hexsha": "f959d03038e77444204c1ff224ddd8357db3fc04", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-02T15:04:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-08T20:20:58.000Z", "avg_line_length": 31.9241071429, "max_line_length": 88, "alphanum_fraction": 0.6801845896, "include": true, "reason": "import numpy", "num_tokens": 1996}
|
\documentclass[main.tex]{subfiles}
\begin{document}
\marginpar{Tuesday\\ 2020-8-18, \\ compiled \\ \today}
Let us go into some more details regarding how the radiation field looks.
Let us suppose that the angle between the acceleration \(\dot{\vec{u}}\) and the observation unit vector \(\vec{n}\) is \(\Theta \): then the absolute value of the electric field and magnetic fields will be
%
\begin{align}
E_r = \frac{q}{Rc^2} \dot{u} \sin \Theta = B_r
\,.
\end{align}
\(\vec{E}\) and \(\vec{B}\) are orthogonal, so the magnitude of the Poynting vector will be
%
\begin{align}
\abs{\vec{S}} = \frac{c}{4 \pi } \abs{\vec{E} \times \vec{B}} =
\frac{c}{4 \pi } E_r^2 = \frac{q^2}{4 \pi c^3 R^2} \dot{u}^2 \sin^2 \Theta
\,,
\end{align}
%
which, as is expected, decays like \(R^{-2}\), since it is the power radiated per unit area. If we instead want the power radiated per unit solid angle then we must use \(\dd{A} = R^2 \dd{\Omega }\):
%
\begin{align}
\frac{ \dd{w}}{ \dd{t} \dd{\Omega }} = SR^2 = \frac{q^2}{4 \pi c^3} \dot{u}^2 \sin^2 \Theta
\,.
\end{align}
Integrating this over the solid angle allows us to calculate the total power emitted:
%
\begin{align}
\dv{w}{t} &= \int \frac{q^2}{4 \pi c^3} \dot{u}^2 \sin^2 \Theta \dd{\Omega } \\
&= \frac{q^2 \dot{u}^2}{2 c^3} \underbrace{\int_{-1}^{1} (1 - \mu^2) \dd{\mu }}_{= 2 - 2/3}
\marginnote{Reparametrized \(\mu = \cos \Theta \), integrated over \(\phi \).} \\
&= \frac{2q^2 \dot{u}^2}{3 c^3} \label{eq:larmor}
\,,
\end{align}
%
which is the well-known \textbf{Larmor formula}.
What we want to do now is to generalize this result to a system of \emph{many charges}.
Each of these will have a charge \(q_i\), a distance \(R_i\) from the observer, an acceleration \(\dot{u}_i\).
In principle the generalization is simple: the electromagnetic field obeys the superposition principle, so we can just add the \(E_{i, r}\) from all the charges together.
The issue is that the retarded time for each of the particles is slightly different from that of another particle.
This is a treatable problem, but it makes the calculation more cumbersome.
What we can do in order to mitigate it is to make the \textbf{dipole approximation}.
Suppose that the system of \(N\) charges is contained within a volume whose characteristic length scale is \(L\).
Also suppose that \(\tau \) is the typical scale across which the system evolves.
If the evolution time \(\tau \) is slow compared to \(L / c\), then we can ignore the differences between the retarded times.
Now, if the system changes significantly over a time \(\tau \) then the electric field also changes significantly over that time, so we can estimate the frequency of the emitted radiation as \(\nu \sim 1/ \tau \).
What we are asking, \(\tau \gg L / c\), then becomes \(\nu \approx 1/\tau \ll c/ L\), or equivalently \(\lambda \gg L\).
We can also give an order of magnitude for the typical velocity of the particles: if \(\ell\) is the typical path length of a particle, then the typical velocity will be \(u \sim \ell / \tau \).
This means that \(\ell / u = \tau\) must be larger than \(L / c\): rearranging, we have
%
\begin{align}
\frac{\ell}{L} \gg \frac{u}{c}
\,,
\end{align}
%
but since \(\ell \lesssim L\) this means that \(u / c \ll 1\).
If these conditions are satisfied, then we can get the total electric field by adding all the electric field contributions from all the various particles:
%
\begin{align}
\vec{E}_r &= \sum _{i} \vec{E}_{r, i}
= \sum _{i} \frac{q_i}{R_i c^2} \vec{n}_i \times \qty(\vec{n}_i \times \dot{\vec{u}}_i) \\
&= \sum _{i} \frac{1}{R_i c^2} \vec{n}_i \times \qty(\vec{n}_i \times q_i \dot{\vec{u}}_i)
\,.
\end{align}
All of these will still need to be computed at the retarded time, although the retarded time will be the same for all the charges.
If the length scale \(L\) of the system is smaller than all the distances \(R_i\), then we can approximate any \(R_i\) with a constant \(R_0\); also the unit vectors \(\vec{n}_i\) will be almost equal, and we can approximate them with a single \(\vec{n}\).
\(R_0\) does not need to be the mean of the \(R_i\), the point is that all of them are quite similar so the distance to any point inside the source works. The reasoning is similar for \(\vec{n}\).
If we make both of these approximations we find
%
\begin{align}
\vec{E}_r &= \sum _{i} \frac{1}{R_0 c^2} \vec{n} \times \qty(\vec{n} \times q_i \dot{\vec{u}}_i) \\
&= \frac{1}{R_0 c^2} \vec{n} \times \qty(\vec{n} \times \underbrace{\sum _{i} q_i \dot{\vec{u}}_i}_{ \ddot{d}})
\,,
\end{align}
%
where we define the dipole moment
%
\begin{align}
\vec{d} = \sum _{i} q_i \vec{r}_i
\,.
\end{align}
Because the dipole moment appeared, this is called the \textbf{dipole approximation}.
If \(\Theta \) is the angle between \(\vec{n}\) and \(\ddot{\vec{d}}\), then we have
%
\begin{align}
E_r = \frac{ \ddot{d} \sin \Theta }{R_0 c^2}
\,,
\end{align}
%
and with this we can calculate the power radiated per unit area
%
\begin{align}
\frac{ \dd{w}}{ \dd{A} \dd{t}} = \frac{c}{4 \pi } E_r^2
\,,
\end{align}
%
and per unit solid angle:
%
\begin{align}
\frac{ \dd{w}}{ \dd{t} \dd{\Omega }}= \frac{c}{4 \pi } E_r^2 R_0^2
= \frac{1}{c^3} \frac{ \ddot{d}^2}{4 \pi } \sin^2\Theta
\,,
\end{align}
%
so if we integrate we can find the total emitted power, which generalizes the \textbf{Larmor formula}:
%
\begin{align} \label{eq:larmor-dipole}
\dv{w}{t}= \frac{2 \ddot{d}^2}{3 c^3}
\,.
\end{align}
Now let us discuss the \textbf{spectral distribution} of the emitted energy. The energy per unit frequency and area will be
%
\begin{align}
\frac{ \dd{w}}{ \dd{A} \dd{\omega }} = c \abs{\hat{E}_r(\omega )}^2
\,,
\end{align}
%
while the energy emitted per unit solid angle as usual is calculated by multiplying by \(R^2\):
%
\begin{align}
\frac{ \dd{w}}{ \dd{\Omega } \dd{\omega }} = R^2 c \abs{\hat{E}_r (\omega )}^2 = \frac{1}{c^3} \sin^2\Theta \abs{ \hat{\ddot{d}} (\omega )}^2
\,,
\end{align}
%
where \(\hat{ \ddot{d} }\) is the Fourier transform of \(\ddot{d}\).
This assumes that \(\Theta \) is fixed through time; we are allowing its magnitude but not its direction to change.
Derivatives in the Fourier domain are multiplication by \(-i \omega \): so, we can recover the second derivative of the dipole moment in the time domain as
%
\begin{align}
\ddot{d} (t) = - \int_{- \infty }^{\infty } \omega^2 e^{-i \omega t} \hat{d}(\omega ) \dd{\omega }
\,,
\end{align}
%
which means that we can express the electric field in the Fourier domain as
%
\begin{align}
\hat{E} ( \omega ) = - \frac{\omega^2}{c^2 R} \hat{d}(\omega ) \sin \Theta
\,,
\end{align}
%
so that the energy emitted per unit frequency and solid angle becomes
%
\begin{align}
\frac{ \dd{w}}{ \dd{\Omega } \dd{\omega }} = \frac{\omega^{4}}{c^3} \abs{\hat{d}(\omega )}^2 \sin^2 \Theta
\,.
\end{align}
If we integrate over the sphere we find
%
\begin{align} \label{eq:spectral-distribution-dipole}
\dv{w}{\omega } = \frac{8 \pi }{3 c^2} \omega^{4} \abs{\hat{d}(\omega )}^2
\,.
\end{align}
Note that the spectrum of the radiation is heavily dependent on the frequency of the radiation.
\subsection{Thomson scattering}
This is scattering of radiation by free electrons.
Consider a plane, linearly polarized, electromagnetic wave impacting a free electron. The force onto the electron is the Lorentz force:
%
\begin{align}
\vec{F} = q \qty(\vec{E} + \frac{\vec{v}}{c} \times \vec{B})
\,.
\end{align}
We know that \(\abs{E} = \abs{B}\) for the wave, so if the electron is nonrelativistic then the magnetic term is negligible:
%
\begin{align}
\vec{F} \approx e \vec{E}
\,.
\end{align}
\end{document}
|
{"hexsha": "918d7ca6d865b945fe5ca53f79d95ed75c805c1e", "size": 7655, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ap_second_semester/radiative_processes/mar26.tex", "max_stars_repo_name": "jacopok/notes", "max_stars_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-10-10T13:10:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T14:52:50.000Z", "max_issues_repo_path": "ap_second_semester/radiative_processes/mar26.tex", "max_issues_repo_name": "jacopok/notes", "max_issues_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ap_second_semester/radiative_processes/mar26.tex", "max_forks_repo_name": "jacopok/notes", "max_forks_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-10-03T16:20:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-06T16:11:07.000Z", "avg_line_length": 39.8697916667, "max_line_length": 257, "alphanum_fraction": 0.6642717178, "num_tokens": 2605}
|
import warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import pickle
import cv2
import os
import json
import sys
import lmdb
from collections import defaultdict
import random
from utils import *
from datetime import datetime
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0,1,2,3'
# GPU_ID
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options,log_device_placement=True,allow_soft_placement=True)
#############
# Visual Feature Extraction
# Columbia University
#############
# Specify data path
shared = ''
models = ''
corpus_path = '/root/LDC/'
working_path = shared + '/root/shared/'
model_path = models + '/root/models/'
# Version Setting
# Set evaluation version as the prefix folder
version_folder = 'dryrun03/' #'dryrun/'
# Input: LDC2019E42 unpacked data, CU visual grounding and instance matching moodels, UIUC text mention results, CU object detection results
# Input Paths
# Source corpus data paths
print('Check Point: Raw Data corpus_path change',corpus_path)
parent_child_tab = corpus_path + 'docs/parent_children.tab'
kfrm_msb = corpus_path + 'docs/masterShotBoundary.msb'
kfrm_path = corpus_path + 'data/video_shot_boundaries/representative_frames'
jpg_path = corpus_path + 'data/jpg/jpg/'
#UIUC text mention result paths
video_asr_path = working_path + 'uiuc_asr_files/' + version_folder +'en_asr_ltf/'
video_map_path = working_path + 'uiuc_asr_files/' + version_folder +'en_asr_map/'
print('Check Point: text mentions path change',video_asr_path)
# CU object detection result paths
det_results_path_img = working_path + 'cu_objdet_results/' + version_folder + 'det_results_merged_34a.pkl' # jpg images
det_results_path_kfrm = working_path + 'cu_objdet_results/' + version_folder + 'det_results_merged_34b.pkl' # key frames
print('Check Point: Alireza path change:','\n',det_results_path_img,'\n', det_results_path_kfrm,'\n')
# Model Paths
# CU visual grounding and instance matching moodel paths
grounding_model_path = model_path + 'model_ELMo_PNASNET_VOA_norm'
matching_model_path = model_path + 'model_universal_no_recons_ins_only'
# Output: CU visual grounding and instance matching features
# Output Paths
# CU visual grounding feature paths
out_path_jpg_sem = working_path + 'cu_grounding_matching_features/' + version_folder + 'semantic_features_jpg.lmdb'
out_path_kfrm_sem = working_path + 'cu_grounding_matching_features/' + version_folder + 'semantic_features_keyframe.lmdb'
if not os.path.exists(working_path + 'cu_grounding_matching_features/' + version_folder):
os.makedirs(working_path + 'cu_grounding_matching_features/' + version_folder)
# CU instance matching feature paths
out_path_jpg = working_path + 'cu_grounding_matching_features/' + version_folder + 'instance_features_jpg.lmdb'
out_path_kfrm = working_path + 'cu_grounding_matching_features/' + version_folder + 'instance_features_keyframe.lmdb'
#loading grounding pretrained model
print('Loading grounding pretrained model...')
sess, graph = load_model(grounding_model_path,config)
input_img = graph.get_tensor_by_name("input_img:0")
mode = graph.get_tensor_by_name("mode:0")
v = graph.get_tensor_by_name("image_local_features:0")
v_bar = graph.get_tensor_by_name("image_global_features:0")
print('Loading done.')
#preparing dicts
parent_dict, child_dict = create_dict(parent_child_tab)
id2dir_dict_kfrm = create_dict_kfrm(kfrm_path, kfrm_msb, video_asr_path, video_map_path)
#jpg
path_dict = create_path_dict(jpg_path)
#mp4
path_dict.update(create_path_dict_kfrm(id2dir_dict_kfrm))
# print('HC000TJCP' in id2dir_dict_kfrm.keys())
# print(id2dir_dict_kfrm.keys())
#loading object detection results
with open(det_results_path_img, 'rb') as f:
dict_obj_img = pickle.load(f)
with open(det_results_path_kfrm, 'rb') as f:
dict_obj_kfrm = pickle.load(f)
print(datetime.now())
# print(child_dict)
# Semantic Features
# about 8 hours in total for Instance Features
#opening lmdb environment
lmdb_env_jpg = lmdb.open(out_path_jpg_sem, map_size=int(1e11), lock=False)
lmdb_env_kfrm = lmdb.open(out_path_kfrm_sem, map_size=int(1e11), lock=False)
#about 1.5 hour
print(datetime.now())
missed_children_jpg = []
for i, key in enumerate(dict_obj_img):
imgs,_ = fetch_img(key+'.jpg.ldcc', parent_dict, child_dict, path_dict, level = 'Child')
if len(imgs)==0:
missed_children_jpg.append(key)
continue
img_batch, bb_ids, bboxes_norm = batch_of_bbox(imgs[0], dict_obj_img, key,\
score_thr=0, filter_out=False)
if len(bb_ids)>0:
feed_dict = {input_img: img_batch, mode: 'test'}
v_pred = sess.run([v], feed_dict)[0]
for j,bb_id in enumerate(bb_ids):
mask = mask_fm_bbox(feature_map_size=(19,19),bbox_norm=bboxes_norm[j,:],order='xyxy')
if np.sum(mask)==0:
continue
img_vec = np.average(v_pred[j,:], weights = np.reshape(mask,[361]), axis=0)
save_key = key+'/'+str(bb_id)
with lmdb_env_jpg.begin(write=True) as lmdb_txn:
lmdb_txn.put(save_key.encode(), img_vec)
# [break] only for dockerization testing
#break
sys.stderr.write("Stored for image {} / {} \r".format(i, len(dict_obj_img)))
print(datetime.now())
#about 4-6 hours
print(datetime.now())
missed_children_kfrm = []
for i, key in enumerate(dict_obj_kfrm):
# key+'.mp4.ldcc'
# print('path from obj detecton for kfrm:',key+'.mp4.ldcc')
imgs,_ = fetch_img(key+'.mp4.ldcc', parent_dict, child_dict, path_dict, level = 'Child')
if len(imgs)==0:
missed_children_kfrm.append(key)
continue
img_batch, bb_ids, bboxes_norm = batch_of_bbox(imgs[0], dict_obj_kfrm, key,\
score_thr=0, filter_out=False)
if len(bb_ids)>0:
feed_dict = {input_img: img_batch, mode: 'test'}
v_pred = sess.run([v], feed_dict)[0]
for j,bb_id in enumerate(bb_ids):
mask = mask_fm_bbox(feature_map_size=(19,19),bbox_norm=bboxes_norm[j,:],order='xyxy')
if np.sum(mask)==0:
continue
img_vec = np.average(v_pred[j,:], weights = np.reshape(mask,[361]), axis=0)
save_key = key+'/'+str(bb_id)
with lmdb_env_kfrm.begin(write=True) as lmdb_txn:
lmdb_txn.put(save_key.encode(), img_vec)
# [break] only for dockerization testing
#break
sys.stderr.write("Stored for keyframe {} / {} \r".format(i, len(dict_obj_kfrm)))
print(datetime.now())
len(missed_children_jpg)
len(missed_children_kfrm)
# Instance Features
# about 3 hours in total for Instance Features
#opening lmdb environment
lmdb_env_jpg = lmdb.open(out_path_jpg, map_size=int(1e11), lock=False)
lmdb_env_kfrm = lmdb.open(out_path_kfrm, map_size=int(1e11), lock=False)
#loading instance matching pretrained model
sess, graph = load_model(matching_model_path, config)
input_img = graph.get_tensor_by_name("input_img:0")
mode = graph.get_tensor_by_name("mode:0")
img_vec = graph.get_tensor_by_name("img_vec:0")
#about 0.5 hour
print(datetime.now())
missed_children_jpg = []
for i, key in enumerate(dict_obj_img):
# Todo test
#if 'HC0005KMS' not in key: #or 'HC0001H01' in key:
# continue
print(i,key)
imgs,_ = fetch_img(key+'.jpg.ldcc', parent_dict, child_dict, path_dict, level = 'Child')
if len(imgs)==0:
missed_children_jpg.append(key)
continue
img_batch, bb_ids, bboxes_norm = batch_of_bbox(imgs[0], dict_obj_img, key,\
score_thr=0, filter_out=False,img_size=(224,224))
if len(bb_ids)>0:
# Test for Corpping bug
feed_dict = {input_img: img_batch, mode: 'test'}
img_vec_pred = sess.run([img_vec], feed_dict)[0]
# print('img_batch',img_batch)
# print('img_batch len:',len(img_batch),np.shape(img_batch))
# print('img_batch vec:',img_batch)
# print(np.shape(img_vec_pred))
# print('img_vec_pred',type(img_vec_pred),img_vec_pred)
for j,bb_id in enumerate(bb_ids):
save_key = key+'/'+str(bb_id)
with lmdb_env_jpg.begin(write=True) as lmdb_txn:
lmdb_txn.put(save_key.encode(), img_vec_pred[j,:])
# print(sum(img_vec_pred[j,:]))
# [break] only for dockerization testing
#break
sys.stderr.write("Stored for image {} / {} \r".format(i, len(dict_obj_img)))
print(datetime.now())
#about 3 hours
missed_children_kfrm = []
for i, key in enumerate(dict_obj_kfrm):
imgs,_ = fetch_img(key+'.mp4.ldcc', parent_dict, child_dict, path_dict, level = 'Child')
if len(imgs)==0:
missed_children_kfrm.append(key)
continue
img_batch, bb_ids, bboxes_norm = batch_of_bbox(imgs[0], dict_obj_kfrm, key,\
score_thr=0, filter_out=False,img_size=(224,224))
if len(bb_ids)>0:
feed_dict = {input_img: img_batch, mode: 'test'}
img_vec_pred = sess.run([img_vec], feed_dict)[0]
for j,bb_id in enumerate(bb_ids):
save_key = key+'/'+str(bb_id)
with lmdb_env_kfrm.begin(write=True) as lmdb_txn:
lmdb_txn.put(save_key.encode(), img_vec_pred[j,:])
# [break] only for dockerization testing
#break
sys.stderr.write("Stored for keyframe {} / {} \r".format(i, len(dict_obj_kfrm)))
print(datetime.now())
print('Visual Feature Extraction Finished.')
|
{"hexsha": "321866cff7ef442633e7554e4bfdd6f32d72ce87", "size": 9573, "ext": "py", "lang": "Python", "max_stars_repo_path": "Feature_Extraction.py", "max_stars_repo_name": "GAIA-DARPA-AIDA/grounding-merging", "max_stars_repo_head_hexsha": "600760326c2322e8dec36f862b02c3a30abbb8ee", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-05-03T17:15:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T10:35:46.000Z", "max_issues_repo_path": "Feature_Extraction.py", "max_issues_repo_name": "GAIA-DARPA-AIDA/grounding-merging", "max_issues_repo_head_hexsha": "600760326c2322e8dec36f862b02c3a30abbb8ee", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-06T20:46:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-06T20:46:43.000Z", "max_forks_repo_path": "Feature_Extraction.py", "max_forks_repo_name": "GAIA-DARPA-AIDA/grounding-merging", "max_forks_repo_head_hexsha": "600760326c2322e8dec36f862b02c3a30abbb8ee", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-05-03T02:12:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-10T08:37:27.000Z", "avg_line_length": 38.9146341463, "max_line_length": 140, "alphanum_fraction": 0.7035412097, "include": true, "reason": "import numpy", "num_tokens": 2469}
|
__precompile__()
module Wavelets
include("util.jl")
include("wt.jl")
include("transforms.jl")
include("threshold.jl")
include("plot.jl")
using Reexport
@reexport using .Util, .WT, .Transforms, .Threshold, .Plot
end
|
{"hexsha": "2f1517808d0b3ee446e72f5b4f41fcc36bfa279e", "size": 221, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Wavelets.jl", "max_stars_repo_name": "JuliaPackageMirrors/Wavelets.jl", "max_stars_repo_head_hexsha": "39f076014406712adfb8b797d55a22ad53c3814b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Wavelets.jl", "max_issues_repo_name": "JuliaPackageMirrors/Wavelets.jl", "max_issues_repo_head_hexsha": "39f076014406712adfb8b797d55a22ad53c3814b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Wavelets.jl", "max_forks_repo_name": "JuliaPackageMirrors/Wavelets.jl", "max_forks_repo_head_hexsha": "39f076014406712adfb8b797d55a22ad53c3814b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.0, "max_line_length": 58, "alphanum_fraction": 0.7239819005, "num_tokens": 64}
|
// The MIT License (MIT)
//
// Copyright (c) 2015 Jonathan McCluskey and William Harding
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
#include <gmpxx.h>
#include <iostream>
#include "Utilities.h"
BOOST_AUTO_TEST_CASE(utilties_test_1)
{
std::string str = "Bob";
mpz_class num = Utilities::StringToNumber(str);
// there is an additional null terminator (that is why 0x00 on end)
mpz_class expected_num("426F62", 16);
BOOST_CHECK(expected_num == num);
}
BOOST_AUTO_TEST_CASE(utilties_test_2)
{
std::string expected_str = "Bob";
mpz_class num("426F62", 16);
std::string str = Utilities::NumberToString(num);
BOOST_CHECK_EQUAL(str, expected_str);
}
BOOST_AUTO_TEST_CASE(utilties_test_3)
{
mpz_class ans = Utilities::FastExp(11, 13, 53);
BOOST_CHECK(ans == 52);
}
BOOST_AUTO_TEST_CASE(utilties_test_4)
{
mpz_class gcd;
mpz_class x;
mpz_class y;
std::tie(gcd, x, y) = Utilities::ExtendedGcd(65, 40);
BOOST_CHECK(gcd == 5);
BOOST_CHECK(x == -3);
BOOST_CHECK(y == 5);
std::tie(gcd, x, y) = Utilities::ExtendedGcd(1239, 735);
BOOST_CHECK(gcd == 21);
BOOST_CHECK(x == -16);
BOOST_CHECK(y == 27);
}
|
{"hexsha": "b1d99ad851362d89b620f8e2a7f95acd451b79a7", "size": 2290, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/libcrypto/test/check_Utilities.cpp", "max_stars_repo_name": "ToadRedCarp/koolkash-digital-cash-protocol", "max_stars_repo_head_hexsha": "ad8b1ed8fdb79658c7d74934db53463d02c5cb42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/libcrypto/test/check_Utilities.cpp", "max_issues_repo_name": "ToadRedCarp/koolkash-digital-cash-protocol", "max_issues_repo_head_hexsha": "ad8b1ed8fdb79658c7d74934db53463d02c5cb42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/libcrypto/test/check_Utilities.cpp", "max_forks_repo_name": "ToadRedCarp/koolkash-digital-cash-protocol", "max_forks_repo_head_hexsha": "ad8b1ed8fdb79658c7d74934db53463d02c5cb42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9459459459, "max_line_length": 81, "alphanum_fraction": 0.7165938865, "num_tokens": 572}
|
import unittest
import numpy as np
import six
import chainer
from chainer import testing
from chainer_tests.dataset_tests.tabular_tests import dummy_dataset
# filter out invalid combinations of params
def _filter_params(params):
for param in params:
if param['out_mode'] is None and \
isinstance(param['key_indices'], tuple) and \
any(1 <= key_index
for key_index in param['key_indices']):
continue
yield param
@testing.parameterize(*_filter_params(testing.product({
'in_mode': [tuple, dict, None],
'out_mode': [tuple, dict, None],
'indices': [None, [1, 3], slice(None, 2)],
'key_indices': [None, (0,), (1, 0)],
'with_batch': [False, True],
})))
class TestTransform(unittest.TestCase):
def test_transform(self):
dataset = dummy_dataset.DummyDataset(
mode=self.in_mode, return_array=True, convert=True)
def transform(*args, **kwargs):
if self.in_mode is tuple:
self.assertEqual(len(args), 3)
self.assertEqual(len(kwargs), 0)
a, b, c = args
elif self.in_mode is dict:
self.assertEqual(len(args), 0)
self.assertEqual(len(kwargs), 3)
a, b, c = kwargs['a'], kwargs['b'], kwargs['c']
elif self.in_mode is None:
self.assertEqual(len(args), 1)
self.assertEqual(len(kwargs), 0)
a, = args
b, c = a, a
if self.with_batch:
self.assertIsInstance(a, np.ndarray)
self.assertIsInstance(b, np.ndarray)
self.assertIsInstance(c, np.ndarray)
else:
self.assertIsInstance(a, float)
self.assertIsInstance(b, float)
self.assertIsInstance(c, float)
if self.out_mode is tuple:
return a + b, b + c
elif self.out_mode is dict:
return {'alpha': a + b, 'beta': b + c}
elif self.out_mode is None:
return a + b + c
if self.in_mode is not None:
a, b, c = dataset.data
else:
a, = dataset.data
b, c = a, a
if self.out_mode is not None:
if self.with_batch:
view = dataset.transform_batch(('alpha', 'beta'), transform)
else:
view = dataset.transform(('alpha', 'beta'), transform)
data = np.vstack((a + b, b + c))
else:
if self.with_batch:
view = dataset.transform_batch('alpha', transform)
else:
view = dataset.transform('alpha', transform)
data = (a + b + c)[None]
self.assertIsInstance(view, chainer.dataset.TabularDataset)
self.assertEqual(len(view), len(dataset))
if self.out_mode is not None:
self.assertEqual(view.keys, ('alpha', 'beta'))
self.assertEqual(view.mode, self.out_mode)
else:
self.assertEqual(view.keys, ('alpha',))
self.assertEqual(view.mode, self.out_mode)
output = view.get_examples(self.indices, self.key_indices)
if self.indices is not None:
data = data[:, self.indices]
if self.key_indices is not None:
data = data[list(self.key_indices)]
for out, d in six.moves.zip_longest(output, data):
np.testing.assert_equal(out, d)
if self.with_batch:
self.assertIsInstance(out, np.ndarray)
else:
self.assertIsInstance(out, list)
self.assertEqual(view.convert(view.fetch()), 'converted')
@testing.parameterize(
{'mode': tuple},
{'mode': dict},
{'mode': None},
)
class TestTransformInvalid(unittest.TestCase):
def setUp(self):
self.count = 0
def _transform(self, a, b, c):
self.count += 1
if self.count % 2 == 0:
mode = self.mode
else:
if self.mode is tuple:
mode = dict
elif self.mode is dict:
mode = None
elif self.mode is None:
mode = tuple
if mode is tuple:
return a,
elif mode is dict:
return {'a': a}
elif mode is None:
return a
def test_transform_inconsistent_mode(self):
dataset = dummy_dataset.DummyDataset()
view = dataset.transform(('a',), self._transform)
view.get_examples([0], None)
with self.assertRaises(ValueError):
view.get_examples([0], None)
def test_transform_batch_inconsistent_mode(self):
dataset = dummy_dataset.DummyDataset()
view = dataset.transform_batch(('a',), self._transform)
view.get_examples(None, None)
with self.assertRaises(ValueError):
view.get_examples(None, None)
def test_transform_batch_length_changed(self):
dataset = dummy_dataset.DummyDataset()
def transform_batch(a, b, c):
if self.mode is tuple:
return a + [0],
elif self.mode is dict:
return {'a': a + [0]}
elif self.mode is None:
return a + [0]
view = dataset.transform_batch(('a',), transform_batch)
with self.assertRaises(ValueError):
view.get_examples(None, None)
testing.run_module(__name__, __file__)
|
{"hexsha": "96193ed1eccc313e7ec4c5347c46ccd345463e81", "size": 5484, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/chainer_tests/dataset_tests/tabular_tests/test_transform.py", "max_stars_repo_name": "zjzh/chainer", "max_stars_repo_head_hexsha": "e9da1423255c58c37be9733f51b158aa9b39dc93", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3705, "max_stars_repo_stars_event_min_datetime": "2017-06-01T07:36:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T10:46:15.000Z", "max_issues_repo_path": "tests/chainer_tests/dataset_tests/tabular_tests/test_transform.py", "max_issues_repo_name": "zjzh/chainer", "max_issues_repo_head_hexsha": "e9da1423255c58c37be9733f51b158aa9b39dc93", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5998, "max_issues_repo_issues_event_min_datetime": "2017-06-01T06:40:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T01:42:44.000Z", "max_forks_repo_path": "tests/chainer_tests/dataset_tests/tabular_tests/test_transform.py", "max_forks_repo_name": "zjzh/chainer", "max_forks_repo_head_hexsha": "e9da1423255c58c37be9733f51b158aa9b39dc93", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1150, "max_forks_repo_forks_event_min_datetime": "2017-06-02T03:39:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T02:29:32.000Z", "avg_line_length": 31.6994219653, "max_line_length": 76, "alphanum_fraction": 0.5516046681, "include": true, "reason": "import numpy", "num_tokens": 1154}
|
[STATEMENT]
lemma dim_poly_greater_ex_coeff: "dim_poly x > d \<Longrightarrow> \<exists>i\<ge>d. coeff x i \<noteq> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. d < dim_poly x \<Longrightarrow> \<exists>i\<ge>d. Abstract_Linear_Poly.coeff x i \<noteq> 0
[PROOF STEP]
by (simp split: if_splits) (meson Max_in coeff_zero finite_vars less_Suc_eq_le)
|
{"llama_tokens": 148, "file": "Linear_Programming_Matrix_LinPoly", "length": 1}
|
\chapter{Practical recommendations}
\label{practical-recommendations}
% A section which is missing in something called a "Cookbook" would be
% $ practical recommendations on how to input Unicode characters. There are
% various character selection tools, shortcuts on the keyboard, the
% shapecatcher website references at several places, or the Wikipedia
% lists of glyphs and fileformat.info. Having all this in one section
% would be handy for the user. It is of course unrelated to the
% orthography profiles, but I imagine that many people will use this book
% as a primer on IPA+Unicode and actually disregard the last two chapters.
% For this group, such a summary would be useful.
This chapter is meant to be a short guide for novice users who are not interested in the programmatic aspects presented in Chapters \ref{orthography-profiles} \& \ref{implementation}. Instead, we provide links to quickly find general information about the Unicode Standard and the International Phonetic Alphabet (IPA). We target ordinary working linguists who want to know how to easily insert special characters into their digital documents and applications.
\section{Unicode}
We discussed the Unicode Consortium's approach to computationally encoding writing systems in Chapter \ref{the-unicode-approach}. The common pitfalls that we have encountered when using the Unicode Standard are discussed in detail in Chapter \ref{unicode-pitfalls}. Together these chapters provide users with an in-depth background about the hurdles they may encounter when using the Unicode Standard for encoding their data or for developing multilingual applications. For general background information about Unicode and character encodings, see these resources:
\begin{itemize}
\item \url{http://www.unicode.org/standard/WhatIsUnicode.html}
\item \url{https://en.wikipedia.org/wiki/Unicode}
\item \url{https://www.w3.org/International/articles/definitions-characters/}
\end{itemize}
% \section{Unicode character pickers}
For practical purposes, users need a way to insert special characters (i.e.\ characters that are not easily entered via their keyboards) into documents and software applications. There are a few basic approaches for inserting special characters. One way is to use software-specific functionality, when it is available. For example, Microsoft Word has an insert-special-symbol-or-character function that allows users to scroll through a table of special characters across different scripts. Special characters can be then inserted into the document by clicking on them. Another way is to install a system-wide application for special character insertion. We have long been fans of the PopChar application from Ergonis Software, which is a small program that can insert most Unicode characters (note however that the full version requires a paid subscription).\footnote{\url{http://www.ergonis.com/products/popcharx/}}
There are also web-based Unicode character pickers available through the browser that allow for the creation and insertion of special characters, which can then be copied \& pasted into documents or software applications. For example, try:
\begin{itemize}
\item \url{https://unicode-table.com/en/}
\item \url{https://r12a.github.io/pickers/}
\end{itemize}
Yet another option for special character insertion includes operating system-specific shortcuts. For example on the Mac, holding down a key on the keyboard for a second, say <u>, triggers a pop up with the options <û, ü, ù, ú, ū> which can then be inserted by keying the associated number (1--5). This method is convenient for occasionally inserting type accented characters, but the full range of special characters is limited and this method is burdensome for rapidly inserting many different characters. For complete access to special characters, Mac provides a Keyboard Viewer application available in the Keyboard pane of the System Preferences.
On Windows, accented characters can be inserted by using alt-key shortcuts, i.e.\ holding down the alt-key and keying in a sequence of numbers (which typically reflect the Unicode character's decimal representation). For example, \textsc{latin small letter c with cedilla} at \uni{00E7} with the decimal code 231 can be inserted by holding the alt-key and keying the sequence 0231. Again, this method is burdensome for rapidly inserting characters. For access to the full range of Unicode characters, the Character Map program comes preinstalled on all Microsoft operating systems.
There are also many third-party applications that provide custom keyboard layouts. These programs typically override keys or keystrokes on the user's keyboard allowing them to quickly enter special characters (once the layout of the new keyboard is mastered). They can be language-specific or devoted specifically to IPA. Two popular programs are:
\begin{itemize}
\item \url{https://keyman.com/}
\item \url{http://scripts.sil.org/ipa-sil_keyboard}
\end{itemize}
\section{IPA}
In Chapter \ref{the-international-phonetic-alphabet} we described in detail the history and principles of the International Phonetic Alphabet (IPA) and how it became encoded in the Unicode Standard. In Chapter \ref{ipa-meets-unicode} we describe the resulting pitfalls from their marriage. These two chapters provide a detailed overview of the challenges that users face when working with the two standards.
For general information about the IPA, the standard text is the \textit{Handbook of the International Phonetic Association: A Guide to the Use of the International Phonetic Alphabet} \citep{IPA1999}. The handbook describes in detail the principles and premises of the IPA, which we have summarized in Section \ref{IPApremises-principles}. The handbook also provides many examples of how to use the IPA. The Association also makes available information about itself online\footnote{\url{https://www.internationalphoneticassociation.org/}} and it provides the most current IPA charts.\footnote{\url{https://www.internationalphoneticassociation.org/content/ipa-chart}} Wikipedia also has a comprehensive article about the IPA.\footnote{\url{https://en.wikipedia.org/wiki/International_Phonetic_Alphabet}}
There are several good Unicode IPA character pickers available through the browser, including:
\begin{itemize}
\item \url{https://r12a.github.io/pickers/ipa/}
\item \url{https://westonruter.github.io/ipa-chart/keyboard/}
\item \url{http://ipa.typeit.org/}
\end{itemize}
\noindent Various linguistics departments also provide information about IPA fonts, software, and inserting Unicode IPA characters. Two useful resources are:
\begin{itemize}
\item \url{http://www.phon.ucl.ac.uk/resource/phonetics/}
\item \url{https://www.york.ac.uk/language/current/resources/freeware/ipa-fonts-and-software/}
\end{itemize}
Regarding fonts that display Unicode IPA correctly, many linguists turn to the IPA Unicode fonts developed by SIL International. The complete SIL font list is available online.\footnote{\url{http://scripts.sil.org/SILFontList}} There is also a page that describes IPA transcription using the SIL fonts and provides an informative discussion on deciding which font to use.\footnote{\url{http://scripts.sil.org/ipahome}} Traditionally, IPA fonts popular with linguists were created and maintained by SIL International, so it is often the case in our experience that we encounter linguistics data in legacy IPA fonts, i.e.\ pre-Unicode fonts such as SIL IPA93.\footnote{\url{http://scripts.sil.org/FontFAQ_IPA93}} SIL International does a good job of describing how to convert from legacy IPA fonts to Unicode IPA. The most popular Unicode IPA fonts are Doulos SIL and Charis SIL:
\begin{itemize}
\item \url{https://software.sil.org/doulos/}
\item \url{https://software.sil.org/charis/}
\end{itemize}
Lastly, here are some online resources that we find particularly useful for finding more information about individual Unicode characters and also for converting between encodings:
\begin{itemize}
\item \url{http://www.fileformat.info/}
\item \url{https://unicodelookup.com/}
\item \url{https://r12a.github.io/scripts/featurelist/}
\item \url{https://r12a.github.io/app-conversion/}
\end{itemize}
\section{For programmers and potential programmers}
If you have made it this far, and you are eager to know more about the technical aspects of the Unicode Standard and how they relate to software programming, we recommend two light-hearted blog posts on the topic. The classic blog post about what programmers should know about the Unicode Standard is Joel Spolsky's \textit{The Absolute Minimum Every Software Developer Absolutely, Positively Must Know About Unicode and Character Sets (No Excuses!)}.\footnote{\url{https://www.joelonsoftware.com/2003/10/08/the-absolute-minimum-every-software-developer-absolutely-positively-must-know-about-unicode-and-character-sets-no-excuses/}} A more recent blogpost, with a bit more of the technical details, is by David C. Zentgraf and is titled, \textit{What Every Programmer Absolutely, Positively Needs To Know About Encodings And Character Sets To Work With Text}.\footnote{\url{http://kunststube.net/encoding/}} This post is aimed at software developers and uses the PHP language for examples.
For users of Python, see the standard documentation on how to use Unicode in your programming applications.\footnote{\url{https://docs.python.org/3/howto/unicode.html}} For R users we recommend the \textsc{stringi} library.\footnote{\url{https://cran.r-project.org/web/packages/stringi/index.html}} For \LaTeX~users the \textsc{TIPA} package is useful for inserting IPA characters into your typeset documents. See these resources:
\begin{itemize}
\item \url{http://www.tug.org/tugboat/tb17-2/tb51rei.pdf}
\item \url{https://ctan.org/pkg/tipa}
\item \url{http://ptmartins.info/tex/tipacheatsheet.pdf}
\end{itemize}
\noindent But we find it much easier to use the Unicode-aware \hologo{XeTeX} typesetting system.\footnote{\url{http://xetex.sourceforge.net/}} Unicode characters can be directly inserted into your \hologo{TeX} documents and compiled into typeset PDF with \hologo{XeLaTeX}.
Lastly, we leave you with some Unicode humor for making it this far:
\begin{itemize}
\item \url{https://xkcd.com/380/}
\item \url{https://xkcd.com/1137/}
\item \url{http://www.commitstrip.com/en/2014/06/17/unicode-7-et-ses-nouveaux-emoji/}
\item \url{http://www.i18nguy.com/humor/unicode-haiku.html}
\end{itemize}
|
{"hexsha": "60e614050fc6933fb2cc4bd2d205f566392bbec0", "size": 10490, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "book/chapters/practical_recommendations.tex", "max_stars_repo_name": "unicode-cookbook/unicode", "max_stars_repo_head_hexsha": "f6172408352709b24122acf0aeff0ad7e8a8f6ab", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2017-02-14T09:21:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T19:46:29.000Z", "max_issues_repo_path": "book/chapters/practical_recommendations.tex", "max_issues_repo_name": "unicode-cookbook/unicode", "max_issues_repo_head_hexsha": "f6172408352709b24122acf0aeff0ad7e8a8f6ab", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 41, "max_issues_repo_issues_event_min_datetime": "2017-02-02T11:24:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-28T10:50:53.000Z", "max_forks_repo_path": "book/chapters/practical_recommendations.tex", "max_forks_repo_name": "unicode-cookbook/unicode", "max_forks_repo_head_hexsha": "f6172408352709b24122acf0aeff0ad7e8a8f6ab", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-24T22:24:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-20T21:36:30.000Z", "avg_line_length": 97.1296296296, "max_line_length": 989, "alphanum_fraction": 0.7980934223, "num_tokens": 2391}
|
(* Title: Lightweight Java, the definition
Authors: Rok Strnisa <rok@strnisa.com>, 2006
Matthew Parkinson <matt@matthewp.com>, 2006
Maintainer:
Note: This file should _not_ be modified directly. Please see the
accompanying README file.
*)
(* generated by Ott 0.20.3 from: lj_common.ott lj_base.ott lj.ott *)
theory Lightweight_Java_Definition
imports Main "HOL-Library.Multiset"
begin
(** warning: the backend selected ignores the file structure informations *)
(** syntax *)
type_synonym "j" = "nat"
type_synonym "f" = "string"
type_synonym "meth" = "string"
type_synonym "var" = "string"
type_synonym "dcl" = "string"
type_synonym "oid" = "nat"
datatype "fqn" =
fqn_def "dcl"
datatype "cl" =
cl_object
| cl_fqn "fqn"
datatype "x" =
x_var "var"
| x_this
datatype "vd" =
vd_def "cl" "var"
type_synonym "X" = "x list"
datatype "ctx" =
ctx_def
type_synonym "vds" = "vd list"
datatype "s" =
s_block "s list"
| s_ass "var" "x"
| s_read "var" "x" "f"
| s_write "x" "f" "x"
| s_if "x" "x" "s" "s"
| s_new "var" "ctx" "cl"
| s_call "var" "x" "meth" "X"
datatype "meth_sig" =
meth_sig_def "cl" "meth" "vds"
datatype "meth_body" =
meth_body_def "s list" "x"
datatype "fd" =
fd_def "cl" "f"
datatype "meth_def" =
meth_def_def "meth_sig" "meth_body"
type_synonym "fds" = "fd list"
type_synonym "meth_defs" = "meth_def list"
datatype "cld" =
cld_def "dcl" "cl" "fds" "meth_defs"
type_synonym "ctxcld" = "(ctx \<times> cld)"
datatype "ty" =
ty_top
| ty_def "ctx" "dcl"
datatype "v" =
v_null
| v_oid "oid"
type_synonym "clds" = "cld list"
type_synonym "ctxclds" = "ctxcld list"
type_synonym "fs" = "f list"
type_synonym "ty_opt" = "ty option"
type_synonym "tys" = "ty list"
type_synonym "L" = "x \<rightharpoonup> v"
type_synonym "H" = "oid \<rightharpoonup> (ty \<times> (f \<rightharpoonup> v))"
datatype "Exception" =
ex_npe
type_synonym "P" = "clds"
type_synonym "ctxcld_opt" = "ctxcld option"
type_synonym "nn" = "nat"
type_synonym "ctxclds_opt" = "ctxclds option"
type_synonym "fs_opt" = "fs option"
type_synonym "meths" = "meth list"
datatype "ty_opt_bot" =
ty_opt_bot_opt "ty_opt"
| ty_opt_bot_bot
type_synonym "meth_def_opt" = "meth_def option"
type_synonym "ctxmeth_def_opt" = "(ctx \<times> meth_def) option"
datatype "mty" =
mty_def "tys" "ty"
type_synonym "\<Gamma>" = "x \<rightharpoonup> ty"
type_synonym "v_opt" = "v option"
datatype "config" =
config_normal "P" "L" "H" "s list"
| config_ex "P" "L" "H" "Exception"
type_synonym "T" = "x \<rightharpoonup> x"
(** library functions *)
lemma [mono]:"
(!! x. f x --> g x) ==> list_all (%b. b) (map f foo_list)-->
list_all (%b. b) (map g foo_list) "
apply(induct_tac foo_list, auto) done
lemma [mono]: "case_prod f p = f (fst p) (snd p)" by (simp add: split_def)
(** definitions *)
(*defns class_name_def *)
inductive class_name :: "cld \<Rightarrow> dcl \<Rightarrow> bool"
where
(* defn class_name *)
class_nameI: "class_name ((cld_def dcl cl fds meth_defs)) (dcl)"
(*defns superclass_name_def *)
inductive superclass_name :: "cld \<Rightarrow> cl \<Rightarrow> bool"
where
(* defn superclass_name *)
superclass_nameI: "superclass_name ((cld_def dcl cl fds meth_defs)) (cl)"
(*defns class_fields_def *)
inductive class_fields :: "cld \<Rightarrow> fds \<Rightarrow> bool"
where
(* defn class_fields *)
class_fieldsI: "class_fields ((cld_def dcl cl fds meth_defs)) (fds)"
(*defns class_methods_def *)
inductive class_methods :: "cld \<Rightarrow> meth_defs \<Rightarrow> bool"
where
(* defn class_methods *)
class_methodsI: "class_methods ((cld_def dcl cl fds meth_defs)) (meth_defs)"
(*defns method_name_def *)
inductive method_name :: "meth_def \<Rightarrow> meth \<Rightarrow> bool"
where
(* defn method_name *)
method_nameI: "method_name ((meth_def_def (meth_sig_def cl meth vds) meth_body)) (meth)"
(*defns distinct_names_def *)
inductive distinct_names :: "P \<Rightarrow> bool"
where
(* defn distinct_names *)
dn_defI: "\<lbrakk> P = ((List.map (%((cld_XXX::cld),(dcl_XXX::dcl)).cld_XXX) cld_dcl_list)) ;
list_all (\<lambda>f. f) ((List.map (%((cld_XXX::cld),(dcl_XXX::dcl)).class_name (cld_XXX) (dcl_XXX)) cld_dcl_list)) ;
distinct ( ((List.map (%((cld_XXX::cld),(dcl_XXX::dcl)).dcl_XXX) cld_dcl_list)) ) \<rbrakk> \<Longrightarrow>
distinct_names (P)"
(*defns find_cld_def *)
inductive find_cld :: "P \<Rightarrow> ctx \<Rightarrow> fqn \<Rightarrow> ctxcld_opt \<Rightarrow> bool"
where
(* defn find_cld *)
fc_emptyI: "find_cld ( [] ) (ctx) (fqn) ( None )"
| fc_cons_trueI: "\<lbrakk> P = ([(cld)] @ cld_list) ;
cld = (cld_def dcl cl fds meth_defs) \<rbrakk> \<Longrightarrow>
find_cld (P) (ctx) ((fqn_def dcl)) ( (Some ( ( ctx , cld ) )) )"
| fc_cons_falseI: "\<lbrakk> cld = (cld_def dcl' cl fds meth_defs) ;
(cl_fqn (fqn_def dcl)) \<noteq> (cl_fqn (fqn_def dcl')) ;
find_cld ( (cld_list) ) (ctx) ((fqn_def dcl)) (ctxcld_opt)\<rbrakk> \<Longrightarrow>
find_cld ( ([(cld)] @ cld_list) ) (ctx) ((fqn_def dcl)) (ctxcld_opt)"
(*defns find_type_def *)
inductive find_type :: "P \<Rightarrow> ctx \<Rightarrow> cl \<Rightarrow> ty_opt \<Rightarrow> bool"
where
(* defn find_type *)
ft_objI: "find_type (P) (ctx) (cl_object) ( (Some ( ty_top )) )"
| ft_nullI: "\<lbrakk>find_cld (P) (ctx) (fqn) ( None )\<rbrakk> \<Longrightarrow>
find_type (P) (ctx) ((cl_fqn fqn)) ( None )"
| ft_dclI: "\<lbrakk>find_cld (P) (ctx) ((fqn_def dcl)) ( (Some ( ( ctx' , cld ) )) )\<rbrakk> \<Longrightarrow>
find_type (P) (ctx) ((cl_fqn (fqn_def dcl))) ( (Some ( (ty_def ctx' dcl) )) )"
(*defns path_length_def *)
inductive path_length :: "P \<Rightarrow> ctx \<Rightarrow> cl \<Rightarrow> nn \<Rightarrow> bool"
where
(* defn path_length *)
pl_objI: "path_length (P) (ctx) (cl_object) ( 0 )"
| pl_fqnI: "\<lbrakk>find_cld (P) (ctx) (fqn) ( (Some ( ( ctx' , cld ) )) ) ;
superclass_name (cld) (cl) ;
path_length (P) (ctx') (cl) (nn)\<rbrakk> \<Longrightarrow>
path_length (P) (ctx) ((cl_fqn fqn)) ( ( nn + 1 ) )"
(*defns acyclic_clds_def *)
inductive acyclic_clds :: "P \<Rightarrow> bool"
where
(* defn acyclic_clds *)
ac_defI: "\<lbrakk> \<forall> ctx fqn . ( ( (\<exists> ctx' cld . find_cld (P) (ctx) (fqn) ( (Some ( ( ctx' , cld ) )) ) ) ) \<longrightarrow> (\<exists> nn . path_length (P) (ctx) ((cl_fqn fqn)) (nn) ) ) \<rbrakk> \<Longrightarrow>
acyclic_clds (P)"
(*defns find_path_rec_def *)
inductive find_path_rec :: "P \<Rightarrow> ctx \<Rightarrow> cl \<Rightarrow> ctxclds \<Rightarrow> ctxclds_opt \<Rightarrow> bool"
where
(* defn find_path_rec *)
fpr_objI: "find_path_rec (P) (ctx) (cl_object) (ctxclds) ( Some ( ctxclds ) )"
| fpr_nullI: "\<lbrakk> ( \<not> ( acyclic_clds (P) ) ) \<or> find_cld (P) (ctx) (fqn) ( None ) \<rbrakk> \<Longrightarrow>
find_path_rec (P) (ctx) ((cl_fqn fqn)) (ctxclds) ( None )"
| fpr_fqnI: "\<lbrakk> acyclic_clds (P) \<and> find_cld (P) (ctx) (fqn) ( (Some ( ( ctx' , cld ) )) ) ;
superclass_name (cld) (cl) ;
find_path_rec (P) (ctx') (cl) ( ctxclds @[ ( ctx' , cld ) ] ) (ctxclds_opt)\<rbrakk> \<Longrightarrow>
find_path_rec (P) (ctx) ((cl_fqn fqn)) (ctxclds) (ctxclds_opt)"
(*defns find_path_def *)
inductive find_path :: "P \<Rightarrow> ctx \<Rightarrow> cl \<Rightarrow> ctxclds_opt \<Rightarrow> bool"
where
(* defn find_path *)
fp_defI: "\<lbrakk>find_path_rec (P) (ctx) (cl) ( [] ) (ctxclds_opt)\<rbrakk> \<Longrightarrow>
find_path (P) (ctx) (cl) (ctxclds_opt)"
(*defns find_path_ty_def *)
inductive find_path_ty :: "P \<Rightarrow> ty \<Rightarrow> ctxclds_opt \<Rightarrow> bool"
where
(* defn find_path_ty *)
fpty_objI: "find_path_ty (P) (ty_top) ( Some ( [] ) )"
| fpty_dclI: "\<lbrakk>find_path (P) (ctx) ((cl_fqn (fqn_def dcl))) (ctxclds_opt)\<rbrakk> \<Longrightarrow>
find_path_ty (P) ((ty_def ctx dcl)) (ctxclds_opt)"
(*defns fields_in_path_def *)
inductive fields_in_path :: "ctxclds \<Rightarrow> fs \<Rightarrow> bool"
where
(* defn fields_in_path *)
fip_emptyI: "fields_in_path ( [] ) ( [] )"
| fip_consI: "\<lbrakk>class_fields (cld) ( ((List.map (%((cl_XXX::cl),(f_XXX::f)).(fd_def cl_XXX f_XXX)) cl_f_list)) ) ;
fields_in_path ( (ctxcld_list) ) (fs) ;
fs' = ( ((List.map (%((cl_XXX::cl),(f_XXX::f)).f_XXX) cl_f_list)) @ fs ) \<rbrakk> \<Longrightarrow>
fields_in_path ( ([( ( ctx , cld ) )] @ ctxcld_list) ) (fs')"
(*defns fields_def *)
inductive fields :: "P \<Rightarrow> ty \<Rightarrow> fs_opt \<Rightarrow> bool"
where
(* defn fields *)
fields_noneI: "\<lbrakk>find_path_ty (P) (ty) ( None )\<rbrakk> \<Longrightarrow>
fields (P) (ty) ( None )"
| fields_someI: "\<lbrakk>find_path_ty (P) (ty) ( Some ( ctxclds ) ) ;
fields_in_path (ctxclds) (fs)\<rbrakk> \<Longrightarrow>
fields (P) (ty) ( Some ( fs ) )"
(*defns methods_in_path_def *)
inductive methods_in_path :: "clds \<Rightarrow> meths \<Rightarrow> bool"
where
(* defn methods_in_path *)
mip_emptyI: "methods_in_path ( [] ) ( [] )"
| mip_consI: "\<lbrakk>class_methods (cld) ( ((List.map (%((meth_def_XXX::meth_def),(cl_XXX::cl),(meth_XXX::meth),(vds_XXX::vds),(meth_body_XXX::meth_body)).meth_def_XXX) meth_def_cl_meth_vds_meth_body_list)) ) ;
list_all (\<lambda>f. f) ((List.map (%((meth_def_XXX::meth_def),(cl_XXX::cl),(meth_XXX::meth),(vds_XXX::vds),(meth_body_XXX::meth_body)). meth_def_XXX = (meth_def_def (meth_sig_def cl_XXX meth_XXX vds_XXX) meth_body_XXX) ) meth_def_cl_meth_vds_meth_body_list)) ;
methods_in_path ( (cld_list) ) (meths') ;
meths = ( ((List.map (%((meth_def_XXX::meth_def),(cl_XXX::cl),(meth_XXX::meth),(vds_XXX::vds),(meth_body_XXX::meth_body)).meth_XXX) meth_def_cl_meth_vds_meth_body_list)) @ meths' ) \<rbrakk> \<Longrightarrow>
methods_in_path ( ([(cld)] @ cld_list) ) (meths)"
(*defns methods_def *)
inductive methods :: "P \<Rightarrow> ty \<Rightarrow> meths \<Rightarrow> bool"
where
(* defn methods *)
methods_methodsI: "\<lbrakk>find_path_ty (P) (ty) ( Some ( ((List.map (%((ctx_XXX::ctx),(cld_XXX::cld)). ( ctx_XXX , cld_XXX ) ) ctx_cld_list)) ) ) ;
methods_in_path ( ((List.map (%((ctx_XXX::ctx),(cld_XXX::cld)).cld_XXX) ctx_cld_list)) ) (meths)\<rbrakk> \<Longrightarrow>
methods (P) (ty) (meths)"
(*defns ftype_in_fds_def *)
inductive ftype_in_fds :: "P \<Rightarrow> ctx \<Rightarrow> fds \<Rightarrow> f \<Rightarrow> ty_opt_bot \<Rightarrow> bool"
where
(* defn ftype_in_fds *)
ftif_emptyI: "ftype_in_fds (P) (ctx) ( [] ) (f) ((ty_opt_bot_opt None ))"
| ftif_cons_botI: "\<lbrakk>find_type (P) (ctx) (cl) ( None )\<rbrakk> \<Longrightarrow>
ftype_in_fds (P) (ctx) ( ([((fd_def cl f))] @ fd_list) ) (f) (ty_opt_bot_bot)"
| ftif_cons_trueI: "\<lbrakk>find_type (P) (ctx) (cl) ( (Some ( ty )) )\<rbrakk> \<Longrightarrow>
ftype_in_fds (P) (ctx) ( ([((fd_def cl f))] @ fd_list) ) (f) ((ty_opt_bot_opt (Some ( ty )) ))"
| ftif_cons_falseI: "\<lbrakk> f \<noteq> f' ;
ftype_in_fds (P) (ctx) ( (fd_list) ) (f') (ty_opt_bot)\<rbrakk> \<Longrightarrow>
ftype_in_fds (P) (ctx) ( ([((fd_def cl f))] @ fd_list) ) (f') (ty_opt_bot)"
(*defns ftype_in_path_def *)
inductive ftype_in_path :: "P \<Rightarrow> ctxclds \<Rightarrow> f \<Rightarrow> ty_opt \<Rightarrow> bool"
where
(* defn ftype_in_path *)
ftip_emptyI: "ftype_in_path (P) ( [] ) (f) ( None )"
| ftip_cons_botI: "\<lbrakk>class_fields (cld) (fds) ;
ftype_in_fds (P) (ctx) (fds) (f) (ty_opt_bot_bot)\<rbrakk> \<Longrightarrow>
ftype_in_path (P) ( ([( ( ctx , cld ) )] @ ctxcld_list) ) (f) ( None )"
| ftip_cons_trueI: "\<lbrakk>class_fields (cld) (fds) ;
ftype_in_fds (P) (ctx) (fds) (f) ((ty_opt_bot_opt (Some ( ty )) ))\<rbrakk> \<Longrightarrow>
ftype_in_path (P) ( ([( ( ctx , cld ) )] @ ctxcld_list) ) (f) ( (Some ( ty )) )"
| ftip_cons_falseI: "\<lbrakk>class_fields (cld) (fds) ;
ftype_in_fds (P) (ctx) (fds) (f) ((ty_opt_bot_opt None )) ;
ftype_in_path (P) ( (ctxcld_list) ) (f) (ty_opt)\<rbrakk> \<Longrightarrow>
ftype_in_path (P) ( ([( ( ctx , cld ) )] @ ctxcld_list) ) (f) (ty_opt)"
(*defns ftype_def *)
inductive ftype :: "P \<Rightarrow> ty \<Rightarrow> f \<Rightarrow> ty \<Rightarrow> bool"
where
(* defn ftype *)
ftypeI: "\<lbrakk>find_path_ty (P) (ty) ( Some ( ctxclds ) ) ;
ftype_in_path (P) (ctxclds) (f) ( (Some ( ty' )) )\<rbrakk> \<Longrightarrow>
ftype (P) (ty) (f) (ty')"
(*defns find_meth_def_in_list_def *)
inductive find_meth_def_in_list :: "meth_defs \<Rightarrow> meth \<Rightarrow> meth_def_opt \<Rightarrow> bool"
where
(* defn find_meth_def_in_list *)
fmdil_emptyI: "find_meth_def_in_list ( [] ) (meth) ( None )"
| fmdil_cons_trueI: "\<lbrakk> meth_def = (meth_def_def (meth_sig_def cl meth vds) meth_body) \<rbrakk> \<Longrightarrow>
find_meth_def_in_list ( ([(meth_def)] @ meth_def_list) ) (meth) ( Some ( meth_def ) )"
| fmdil_cons_falseI: "\<lbrakk> meth_def = (meth_def_def (meth_sig_def cl meth' vds) meth_body) ;
meth \<noteq> meth' ;
find_meth_def_in_list ( (meth_def_list) ) (meth) (meth_def_opt)\<rbrakk> \<Longrightarrow>
find_meth_def_in_list ( ([(meth_def)] @ meth_def_list) ) (meth) (meth_def_opt)"
(*defns find_meth_def_in_path_def *)
inductive find_meth_def_in_path :: "ctxclds \<Rightarrow> meth \<Rightarrow> ctxmeth_def_opt \<Rightarrow> bool"
where
(* defn find_meth_def_in_path *)
fmdip_emptyI: "find_meth_def_in_path ( [] ) (meth) ( (None::ctxmeth_def_opt) )"
| fmdip_cons_trueI: "\<lbrakk>class_methods (cld) (meth_defs) ;
find_meth_def_in_list (meth_defs) (meth) ( Some ( meth_def ) )\<rbrakk> \<Longrightarrow>
find_meth_def_in_path ( ([( ( ctx , cld ) )] @ ctxcld_list) ) (meth) ( (Some ( ctx , meth_def )::ctxmeth_def_opt) )"
| fmdip_cons_falseI: "\<lbrakk>class_methods (cld) (meth_defs) ;
find_meth_def_in_list (meth_defs) (meth) ( None ) ;
find_meth_def_in_path ( (ctxcld_list) ) (meth) (ctxmeth_def_opt)\<rbrakk> \<Longrightarrow>
find_meth_def_in_path ( ([( ( ctx , cld ) )] @ ctxcld_list) ) (meth) (ctxmeth_def_opt)"
(*defns find_meth_def_def *)
inductive find_meth_def :: "P \<Rightarrow> ty \<Rightarrow> meth \<Rightarrow> ctxmeth_def_opt \<Rightarrow> bool"
where
(* defn find_meth_def *)
fmd_nullI: "\<lbrakk>find_path_ty (P) (ty) ( None )\<rbrakk> \<Longrightarrow>
find_meth_def (P) (ty) (meth) ( (None::ctxmeth_def_opt) )"
| fmd_optI: "\<lbrakk>find_path_ty (P) (ty) ( Some ( ctxclds ) ) ;
find_meth_def_in_path (ctxclds) (meth) (ctxmeth_def_opt)\<rbrakk> \<Longrightarrow>
find_meth_def (P) (ty) (meth) (ctxmeth_def_opt)"
(*defns mtype_def *)
inductive mtype :: "P \<Rightarrow> ty \<Rightarrow> meth \<Rightarrow> mty \<Rightarrow> bool"
where
(* defn mtype *)
mtypeI: "\<lbrakk>find_meth_def (P) (ty) (meth) ( (Some ( ctx , meth_def )::ctxmeth_def_opt) ) ;
meth_def = (meth_def_def (meth_sig_def cl meth ((List.map (%((cl_XXX::cl),(var_XXX::var),(ty_XXX::ty)).(vd_def cl_XXX var_XXX)) cl_var_ty_list)) ) meth_body) ;
find_type (P) (ctx) (cl) ( (Some ( ty' )) ) ;
list_all (\<lambda>f. f) ((List.map (%((cl_XXX::cl),(var_XXX::var),(ty_XXX::ty)).find_type (P) (ctx) (cl_XXX) ( (Some ( ty_XXX )) )) cl_var_ty_list)) ;
mty = (mty_def ((List.map (%((cl_XXX::cl),(var_XXX::var),(ty_XXX::ty)).ty_XXX) cl_var_ty_list)) ty') \<rbrakk> \<Longrightarrow>
mtype (P) (ty) (meth) (mty)"
(*defns sty_one_def *)
inductive sty_one :: "P \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> bool"
where
(* defn one *)
sty_objI: "\<lbrakk>find_path_ty (P) (ty) ( Some ( ctxclds ) )\<rbrakk> \<Longrightarrow>
sty_one (P) (ty) (ty_top)"
| sty_dclI: "\<lbrakk>find_path_ty (P) (ty) ( Some ( ((List.map (%((ctx_XXX::ctx),(cld_XXX::cld),(dcl_XXX::dcl)). ( ctx_XXX , cld_XXX ) ) ctx_cld_dcl_list)) ) ) ;
list_all (\<lambda>f. f) ((List.map (%((ctx_XXX::ctx),(cld_XXX::cld),(dcl_XXX::dcl)).class_name (cld_XXX) (dcl_XXX)) ctx_cld_dcl_list)) ;
( ctx' , dcl' ) \<in> set ((List.map (%((ctx_XXX::ctx),(cld_XXX::cld),(dcl_XXX::dcl)).(ctx_XXX,dcl_XXX)) ctx_cld_dcl_list)) \<rbrakk> \<Longrightarrow>
sty_one (P) (ty) ((ty_def ctx' dcl'))"
(*defns sty_many_def *)
inductive sty_many :: "P \<Rightarrow> tys \<Rightarrow> tys \<Rightarrow> bool"
where
(* defn many *)
sty_manyI: "\<lbrakk> tys = ((List.map (%((ty_XXX::ty),(ty_'::ty)).ty_XXX) ty_ty'_list)) ;
tys' = ((List.map (%((ty_XXX::ty),(ty_'::ty)).ty_') ty_ty'_list)) ;
list_all (\<lambda>f. f) ((List.map (%((ty_XXX::ty),(ty_'::ty)).sty_one (P) (ty_XXX) (ty_')) ty_ty'_list)) \<rbrakk> \<Longrightarrow>
sty_many (P) (tys) (tys')"
(*defns sty_option_def *)
inductive sty_option :: "P \<Rightarrow> ty_opt \<Rightarrow> ty_opt \<Rightarrow> bool"
where
(* defn option *)
sty_optionI: "\<lbrakk> ty_opt = (Some ( ty )) ;
ty_opt' = (Some ( ty' )) ;
sty_one (P) (ty) (ty')\<rbrakk> \<Longrightarrow>
sty_option (P) (ty_opt) (ty_opt')"
(*defns well_formedness *)
inductive wf_object :: "P \<Rightarrow> H \<Rightarrow> v_opt \<Rightarrow> ty_opt \<Rightarrow> bool"
and wf_varstate :: "P \<Rightarrow> \<Gamma> \<Rightarrow> H \<Rightarrow> L \<Rightarrow> bool"
and wf_heap :: "P \<Rightarrow> H \<Rightarrow> bool"
and wf_config :: "\<Gamma> \<Rightarrow> config \<Rightarrow> bool"
and wf_stmt :: "P \<Rightarrow> \<Gamma> \<Rightarrow> s \<Rightarrow> bool"
and wf_meth :: "P \<Rightarrow> ty \<Rightarrow> meth_def \<Rightarrow> bool"
and wf_class_common :: "P \<Rightarrow> ctx \<Rightarrow> dcl \<Rightarrow> cl \<Rightarrow> fds \<Rightarrow> meth_defs \<Rightarrow> bool"
and wf_class :: "P \<Rightarrow> cld \<Rightarrow> bool"
and wf_program :: "P \<Rightarrow> bool"
where
(* defn object *)
wf_nullI: "\<lbrakk> ty_opt = (Some ( ty )) \<rbrakk> \<Longrightarrow>
wf_object (P) (H) ( Some v_null ) (ty_opt)"
| wf_objectI: "\<lbrakk>sty_option (P) ( (case H oid of
None \<Rightarrow> None
| Some tyfs \<Rightarrow> Some (fst tyfs)) ) (ty_opt)\<rbrakk> \<Longrightarrow>
wf_object (P) (H) ( Some (v_oid oid) ) (ty_opt)"
| (* defn varstate *)
wf_varstateI: "\<lbrakk> finite (dom ( L )) ;
\<forall> x \<in> dom \<Gamma> . wf_object (P) (H) ( L x ) ( \<Gamma> x ) \<rbrakk> \<Longrightarrow>
wf_varstate (P) (\<Gamma>) (H) (L)"
| (* defn heap *)
wf_heapI: "\<lbrakk> finite (dom ( H )) ;
\<forall> oid \<in> dom H . ( \<exists> ty . (case H oid of
None \<Rightarrow> None
| Some tyfs \<Rightarrow> Some (fst tyfs)) = (Some ( ty )) \<and> (\<exists> fs . fields (P) (ty) ( Some ( fs ) ) \<and> (\<forall> f \<in> set fs . \<exists> ty' . ( ftype (P) (ty) (f) (ty') \<and> wf_object (P) (H) ( (case H oid of
None \<Rightarrow> None
| Some tyfs \<Rightarrow> (snd tyfs) f ) ) ( (Some ( ty' )) ) ) ) ) ) \<rbrakk> \<Longrightarrow>
wf_heap (P) (H)"
| (* defn config *)
wf_all_exI: "\<lbrakk>wf_program (P) ;
wf_heap (P) (H) ;
wf_varstate (P) (\<Gamma>) (H) (L)\<rbrakk> \<Longrightarrow>
wf_config (\<Gamma>) ((config_ex P L H Exception))"
| wf_allI: "\<lbrakk>wf_program (P) ;
wf_heap (P) (H) ;
wf_varstate (P) (\<Gamma>) (H) (L) ;
list_all (\<lambda>f. f) ((List.map (%(s_XXX::s).wf_stmt (P) (\<Gamma>) (s_XXX)) s_list)) \<rbrakk> \<Longrightarrow>
wf_config (\<Gamma>) ((config_normal P L H (s_list)))"
| (* defn stmt *)
wf_blockI: "\<lbrakk> list_all (\<lambda>f. f) ((List.map (%(s_XXX::s).wf_stmt (P) (\<Gamma>) (s_XXX)) s_list)) \<rbrakk> \<Longrightarrow>
wf_stmt (P) (\<Gamma>) ((s_block (s_list)))"
| wf_var_assignI: "\<lbrakk>sty_option (P) ( \<Gamma> x ) ( \<Gamma> (x_var var) )\<rbrakk> \<Longrightarrow>
wf_stmt (P) (\<Gamma>) ((s_ass var x))"
| wf_field_readI: "\<lbrakk> \<Gamma> x = (Some ( ty )) ;
ftype (P) (ty) (f) (ty') ;
sty_option (P) ( (Some ( ty' )) ) ( \<Gamma> (x_var var) )\<rbrakk> \<Longrightarrow>
wf_stmt (P) (\<Gamma>) ((s_read var x f))"
| wf_field_writeI: "\<lbrakk> \<Gamma> x = (Some ( ty )) ;
ftype (P) (ty) (f) (ty') ;
sty_option (P) ( \<Gamma> y ) ( (Some ( ty' )) )\<rbrakk> \<Longrightarrow>
wf_stmt (P) (\<Gamma>) ((s_write x f y))"
| wf_ifI: "\<lbrakk> sty_option (P) ( \<Gamma> x ) ( \<Gamma> y ) \<or> sty_option (P) ( \<Gamma> y ) ( \<Gamma> x ) ;
wf_stmt (P) (\<Gamma>) (s1) ;
wf_stmt (P) (\<Gamma>) (s2)\<rbrakk> \<Longrightarrow>
wf_stmt (P) (\<Gamma>) ((s_if x y s1 s2))"
| wf_newI: "\<lbrakk>find_type (P) (ctx) (cl) ( (Some ( ty )) ) ;
sty_option (P) ( (Some ( ty )) ) ( \<Gamma> (x_var var) )\<rbrakk> \<Longrightarrow>
wf_stmt (P) (\<Gamma>) ((s_new var ctx cl))"
| wf_mcallI: "\<lbrakk> Y = ((List.map (%((y_XXX::x),(ty_XXX::ty)).y_XXX) y_ty_list)) ;
\<Gamma> x = (Some ( ty )) ;
mtype (P) (ty) (meth) ((mty_def ((List.map (%((y_XXX::x),(ty_XXX::ty)).ty_XXX) y_ty_list)) ty')) ;
list_all (\<lambda>f. f) ((List.map (%((y_XXX::x),(ty_XXX::ty)).sty_option (P) ( \<Gamma> y_XXX ) ( (Some ( ty_XXX )) )) y_ty_list)) ;
sty_option (P) ( (Some ( ty' )) ) ( \<Gamma> (x_var var) )\<rbrakk> \<Longrightarrow>
wf_stmt (P) (\<Gamma>) ((s_call var x meth Y))"
| (* defn meth *)
wf_methodI: "\<lbrakk> distinct ( ((List.map (%((cl_XXX::cl),(var_XXX::var),(ty_XXX::ty)).var_XXX) cl_var_ty_list)) ) ;
list_all (\<lambda>f. f) ((List.map (%((cl_XXX::cl),(var_XXX::var),(ty_XXX::ty)).find_type (P) (ctx) (cl_XXX) ( (Some ( ty_XXX )) )) cl_var_ty_list)) ;
\<Gamma> = ( (map_of ( ((List.map (%((cl_XXX::cl),(var_XXX::var),(ty_XXX::ty)).((x_var var_XXX),ty_XXX)) cl_var_ty_list)) )) ( x_this \<mapsto> (ty_def ctx dcl) )) ;
list_all (\<lambda>f. f) ((List.map (%(s_XXX::s).wf_stmt (P) (\<Gamma>) (s_XXX)) s_list)) ;
find_type (P) (ctx) (cl) ( (Some ( ty )) ) ;
sty_option (P) ( \<Gamma> y ) ( (Some ( ty )) )\<rbrakk> \<Longrightarrow>
wf_meth (P) ((ty_def ctx dcl)) ((meth_def_def (meth_sig_def cl meth ((List.map (%((cl_XXX::cl),(var_XXX::var),(ty_XXX::ty)).(vd_def cl_XXX var_XXX)) cl_var_ty_list)) ) (meth_body_def (s_list) y)))"
| (* defn class_common *)
wf_class_commonI: "\<lbrakk>find_type (P) (ctx) (cl) ( (Some ( ty )) ) ;
(ty_def ctx dcl) \<noteq> ty ;
distinct ( ((List.map (%((cl_XXX::cl),(f_XXX::f),(ty_XXX::ty)).f_XXX) cl_f_ty_list)) ) ;
fields (P) (ty) ( Some ( fs ) ) ;
(set ((List.map (%((cl_XXX::cl),(f_XXX::f),(ty_XXX::ty)).f_XXX) cl_f_ty_list)) ) \<inter> (set fs ) = {} ;
list_all (\<lambda>f. f) ((List.map (%((cl_XXX::cl),(f_XXX::f),(ty_XXX::ty)).find_type (P) (ctx) (cl_XXX) ( (Some ( ty_XXX )) )) cl_f_ty_list)) ;
list_all (\<lambda>f. f) ((List.map (%((meth_def_XXX::meth_def),(meth_XXX::meth)).wf_meth (P) ((ty_def ctx dcl)) (meth_def_XXX)) meth_def_meth_list)) ;
list_all (\<lambda>f. f) ((List.map (%((meth_def_XXX::meth_def),(meth_XXX::meth)).method_name (meth_def_XXX) (meth_XXX)) meth_def_meth_list)) ;
distinct ( ((List.map (%((meth_def_XXX::meth_def),(meth_XXX::meth)).meth_XXX) meth_def_meth_list)) ) ;
methods (P) (ty) ( ((List.map (%((meth_'::meth),(mty_XXX::mty),(mty_'::mty)).meth_') meth'_mty_mty'_list)) ) ;
list_all (\<lambda>f. f) ((List.map (%((meth_'::meth),(mty_XXX::mty),(mty_'::mty)).mtype (P) ((ty_def ctx dcl)) (meth_') (mty_XXX)) meth'_mty_mty'_list)) ;
list_all (\<lambda>f. f) ((List.map (%((meth_'::meth),(mty_XXX::mty),(mty_'::mty)).mtype (P) (ty) (meth_') (mty_')) meth'_mty_mty'_list)) ;
list_all (\<lambda>f. f) ((List.map (%((meth_'::meth),(mty_XXX::mty),(mty_'::mty)). meth_' \<in> set ((List.map (%((meth_def_XXX::meth_def),(meth_XXX::meth)).meth_XXX) meth_def_meth_list)) \<longrightarrow> mty_XXX = mty_' ) meth'_mty_mty'_list)) \<rbrakk> \<Longrightarrow>
wf_class_common (P) (ctx) (dcl) (cl) ( ((List.map (%((cl_XXX::cl),(f_XXX::f),(ty_XXX::ty)).(fd_def cl_XXX f_XXX)) cl_f_ty_list)) ) ( ((List.map (%((meth_def_XXX::meth_def),(meth_XXX::meth)).meth_def_XXX) meth_def_meth_list)) )"
| (* defn class *)
wf_classI: "\<lbrakk> (cld_def dcl cl fds meth_defs) \<in> set P ;
wf_class_common (P) (ctx_def) (dcl) (cl) (fds) (meth_defs)\<rbrakk> \<Longrightarrow>
wf_class (P) ((cld_def dcl cl fds meth_defs))"
| (* defn program *)
wf_programI: "\<lbrakk> P = (cld_list) ;
distinct_names (P) ;
list_all (\<lambda>f. f) ((List.map (%(cld_XXX::cld).wf_class (P) (cld_XXX)) cld_list)) ;
acyclic_clds (P)\<rbrakk> \<Longrightarrow>
wf_program (P)"
(*defns var_trans *)
inductive tr_s :: "T \<Rightarrow> s \<Rightarrow> s \<Rightarrow> bool"
where
(* defn tr_s *)
tr_s_blockI: "\<lbrakk> list_all (\<lambda>f. f) ((List.map (%((s_XXX::s),(s_'::s)).tr_s (T) (s_XXX) (s_')) s_s'_list)) \<rbrakk> \<Longrightarrow>
tr_s (T) ((s_block ((List.map (%((s_XXX::s),(s_'::s)).s_XXX) s_s'_list)))) ((s_block ((List.map (%((s_XXX::s),(s_'::s)).s_') s_s'_list))))"
| tr_s_var_assignI: "\<lbrakk> (case T (x_var var ) of
None \<Rightarrow> var | Some x' \<Rightarrow>
(case x' of x_this \<Rightarrow> var
| x_var var' \<Rightarrow> var')) = var' ;
(case T x of None \<Rightarrow> x
| Some x' \<Rightarrow> x') = x' \<rbrakk> \<Longrightarrow>
tr_s (T) ((s_ass var x)) ((s_ass var' x'))"
| tr_s_field_readI: "\<lbrakk> (case T (x_var var ) of
None \<Rightarrow> var | Some x' \<Rightarrow>
(case x' of x_this \<Rightarrow> var
| x_var var' \<Rightarrow> var')) = var' ;
(case T x of None \<Rightarrow> x
| Some x' \<Rightarrow> x') = x' \<rbrakk> \<Longrightarrow>
tr_s (T) ((s_read var x f)) ((s_read var' x' f))"
| tr_s_field_writeI: "\<lbrakk> (case T x of None \<Rightarrow> x
| Some x' \<Rightarrow> x') = x' ;
(case T y of None \<Rightarrow> y
| Some x' \<Rightarrow> x') = y' \<rbrakk> \<Longrightarrow>
tr_s (T) ((s_write x f y)) ((s_write x' f y'))"
| tr_s_ifI: "\<lbrakk> (case T x of None \<Rightarrow> x
| Some x' \<Rightarrow> x') = x' ;
(case T y of None \<Rightarrow> y
| Some x' \<Rightarrow> x') = y' ;
tr_s (T) (s1) (s1') ;
tr_s (T) (s2) (s2')\<rbrakk> \<Longrightarrow>
tr_s (T) ((s_if x y s1 s2)) ((s_if x' y' s1' s2'))"
| tr_s_newI: "\<lbrakk> (case T (x_var var ) of
None \<Rightarrow> var | Some x' \<Rightarrow>
(case x' of x_this \<Rightarrow> var
| x_var var' \<Rightarrow> var')) = var' \<rbrakk> \<Longrightarrow>
tr_s (T) ((s_new var ctx cl)) ((s_new var' ctx cl))"
| tr_s_mcallI: "\<lbrakk> (case T (x_var var ) of
None \<Rightarrow> var | Some x' \<Rightarrow>
(case x' of x_this \<Rightarrow> var
| x_var var' \<Rightarrow> var')) = var' ;
(case T x of None \<Rightarrow> x
| Some x' \<Rightarrow> x') = x' ;
list_all (\<lambda>f. f) ((List.map (%((y_XXX::x),(y_'::x)). (case T y_XXX of None \<Rightarrow> y_XXX
| Some x' \<Rightarrow> x') = y_' ) y_y'_list)) \<rbrakk> \<Longrightarrow>
tr_s (T) ((s_call var x meth ((List.map (%((y_XXX::x),(y_'::x)).y_XXX) y_y'_list)) )) ((s_call var' x' meth ((List.map (%((y_XXX::x),(y_'::x)).y_') y_y'_list)) ))"
(*defns reduction *)
inductive r_stmt :: "config \<Rightarrow> config \<Rightarrow> bool"
where
(* defn stmt *)
r_blockI: "r_stmt ((config_normal P L H ([((s_block (s_list)))] @ s'_list))) ((config_normal P L H (s_list @ s'_list)))"
| r_var_assignI: "\<lbrakk> L x = Some v \<rbrakk> \<Longrightarrow>
r_stmt ((config_normal P L H ([((s_ass var x))] @ s_list))) ((config_normal P ( L ( (x_var var) \<mapsto> v )) H (s_list)))"
| r_field_read_npeI: "\<lbrakk> L x = Some v_null \<rbrakk> \<Longrightarrow>
r_stmt ((config_normal P L H ([((s_read var x f))] @ s_list))) ((config_ex P L H ex_npe))"
| r_field_readI: "\<lbrakk> L x = Some (v_oid oid) ;
(case H oid of
None \<Rightarrow> None
| Some tyfs \<Rightarrow> (snd tyfs) f ) = Some v \<rbrakk> \<Longrightarrow>
r_stmt ((config_normal P L H ([((s_read var x f))] @ s_list))) ((config_normal P ( L ( (x_var var) \<mapsto> v )) H (s_list)))"
| r_field_write_npeI: "\<lbrakk> L x = Some v_null \<rbrakk> \<Longrightarrow>
r_stmt ((config_normal P L H ([((s_write x f y))] @ s_list))) ((config_ex P L H ex_npe))"
| r_field_writeI: "\<lbrakk> L x = Some (v_oid oid) ;
L y = Some v \<rbrakk> \<Longrightarrow>
r_stmt ((config_normal P L H ([((s_write x f y))] @ s_list))) ((config_normal P L (case H oid of
None \<Rightarrow> arbitrary
| Some tyfs \<Rightarrow>
(( H ( oid \<mapsto>
(fst tyfs, snd tyfs ( f \<mapsto> v ))))::H)) (s_list)))"
| r_if_trueI: "\<lbrakk> L x = Some v ;
L y = Some w ;
v = w \<rbrakk> \<Longrightarrow>
r_stmt ((config_normal P L H ([((s_if x y s1 s2))] @ s'_list))) ((config_normal P L H ([(s1)] @ s'_list)))"
| r_if_falseI: "\<lbrakk> L x = Some v ;
L y = Some w ;
v \<noteq> w \<rbrakk> \<Longrightarrow>
r_stmt ((config_normal P L H ([((s_if x y s1 s2))] @ s'_list))) ((config_normal P L H ([(s2)] @ s'_list)))"
| r_newI: "\<lbrakk>find_type (P) (ctx) (cl) ( (Some ( ty )) ) ;
fields (P) (ty) ( Some ( (f_list) ) ) ;
oid \<notin> dom H ;
H' = (( H ( oid \<mapsto> ( ty ,
map_of ((List.map (%(f_XXX::f).(f_XXX,v_null)) f_list)) )))::H) \<rbrakk> \<Longrightarrow>
r_stmt ((config_normal P L H ([((s_new var ctx cl))] @ s_list))) ((config_normal P ( L ( (x_var var) \<mapsto> (v_oid oid) )) H' (s_list)))"
| r_mcall_npeI: "\<lbrakk> L x = Some v_null \<rbrakk> \<Longrightarrow>
r_stmt ((config_normal P L H ([((s_call var x meth (y_list) ))] @ s_list))) ((config_ex P L H ex_npe))"
| r_mcallI: "\<lbrakk> L x = Some (v_oid oid) ;
(case H oid of
None \<Rightarrow> None
| Some tyfs \<Rightarrow> Some (fst tyfs)) = (Some ( ty )) ;
find_meth_def (P) (ty) (meth) ( (Some ( ctx , (meth_def_def (meth_sig_def cl meth ((List.map (%((y_XXX::x),(cl_XXX::cl),(var_XXX::var),(var_'::var),(v_XXX::v)).(vd_def cl_XXX var_XXX)) y_cl_var_var'_v_list)) ) (meth_body_def ((List.map (%((s_''::s),(s_'::s)).s_') s''_s'_list)) y)) )::ctxmeth_def_opt) ) ;
(set ((List.map (%((y_XXX::x),(cl_XXX::cl),(var_XXX::var),(var_'::var),(v_XXX::v)).(x_var var_')) y_cl_var_var'_v_list)) ) Int (dom L ) = {} ;
distinct ( ((List.map (%((y_XXX::x),(cl_XXX::cl),(var_XXX::var),(var_'::var),(v_XXX::v)).var_') y_cl_var_var'_v_list)) ) ;
x' \<notin> dom L ;
x' \<notin> set ((List.map (%((y_XXX::x),(cl_XXX::cl),(var_XXX::var),(var_'::var),(v_XXX::v)).(x_var var_')) y_cl_var_var'_v_list)) ;
list_all (\<lambda>f. f) ((List.map (%((y_XXX::x),(cl_XXX::cl),(var_XXX::var),(var_'::var),(v_XXX::v)). L y_XXX = Some v_XXX ) y_cl_var_var'_v_list)) ;
L' = ( ( L ++ (map_of ( ((List.map (%((y_XXX::x),(cl_XXX::cl),(var_XXX::var),(var_'::var),(v_XXX::v)).((x_var var_'),v_XXX)) y_cl_var_var'_v_list)) ))) ( x' \<mapsto> (v_oid oid) )) ;
T = ( (map_of ( ((List.map (%((y_XXX::x),(cl_XXX::cl),(var_XXX::var),(var_'::var),(v_XXX::v)).((x_var var_XXX),(x_var var_'))) y_cl_var_var'_v_list)) )) ( x_this \<mapsto> x' )) ;
list_all (\<lambda>f. f) ((List.map (%((s_''::s),(s_'::s)).tr_s (T) (s_') (s_'')) s''_s'_list)) ;
(case T y of None \<Rightarrow> y
| Some x' \<Rightarrow> x') = y' \<rbrakk> \<Longrightarrow>
r_stmt ((config_normal P L H ([((s_call var x meth ((List.map (%((y_XXX::x),(cl_XXX::cl),(var_XXX::var),(var_'::var),(v_XXX::v)).y_XXX) y_cl_var_var'_v_list)) ))] @ s_list))) ((config_normal P L' H ((List.map (%((s_''::s),(s_'::s)).s_'') s''_s'_list) @ [((s_ass var y'))] @ s_list)))"
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/LightweightJava/Lightweight_Java_Definition.thy"}
|
import matplotlib
matplotlib.use('Pdf')
import matplotlib.pyplot as plt
import numpy as np
import os.path as osp
import rllab.misc.logger as logger
import rllab_maml.plotter as plotter
import tensorflow as tf
import time
from rllab_maml.algos.base import RLAlgorithm
from sandbox_maml.rocky.tf.policies.base import Policy
from sandbox.ours.sampler.MAML_sampler.maml_batch_sampler import BatchSampler
from sandbox.ours.sampler.MAML_sampler.maml_vectorized_sampler import MAMLVectorizedSampler
from sandbox_maml.rocky.tf.spaces import Discrete
from rllab_maml.sampler.stateful_pool import singleton_pool
class BatchMAMLPolopt(RLAlgorithm):
"""
Base class for batch sampling-based policy optimization methods, with maml.
This includes various policy gradient methods like vpg, npg, ppo, trpo, etc.
"""
def __init__(
self,
env,
policy,
baseline,
scope=None,
n_itr=500,
start_itr=0,
# Note that the number of trajectories for grad upate = batch_size
# Defaults are 10 trajectories of length 500 for gradient update
batch_size=100,
max_path_length=500,
meta_batch_size = 100,
num_grad_updates=1,
discount=0.99,
gae_lambda=1,
entropy_bonus=0,
plot=False,
pause_for_plot=False,
center_adv=True,
positive_adv=False,
store_paths=False,
whole_paths=True,
fixed_horizon=False,
sampler_cls=None,
sampler_args=None,
force_batch_sampler=False,
parallel_sampler=True,
use_maml=True,
load_policy=None,
**kwargs
):
"""
:param env: Environment
:param policy: Policy
:type policy: Policy
:param baseline: Baseline
:param scope: Scope for identifying the algorithm. Must be specified if running multiple algorithms
simultaneously, each using different environments and policies
:param n_itr: Number of iterations.
:param start_itr: Starting iteration.
:param batch_size: Number of trajectories for grad update
:param max_path_length: Maximum length of a single rollout.
:param meta_batch_size: Number of tasks sampled per meta-update
:param num_grad_updates: Number of fast gradient updates
:param discount: Discount.
:param gae_lambda: Lambda used for generalized advantage estimation.
:param plot: Plot evaluation run after each iteration.
:param pause_for_plot: Whether to pause before contiuing when plotting.
:param center_adv: Whether to rescale the advantages so that they have mean 0 and standard deviation 1.
:param positive_adv: Whether to shift the advantages so that they are always positive. When used in
conjunction with center_adv the advantages will be standardized before shifting.
:param store_paths: Whether to save all paths data to the snapshot.
:return:
"""
self.env = env
self.policy = policy
self.load_policy=load_policy
self.baseline = baseline
self.scope = scope
self.n_itr = n_itr
self.start_itr = start_itr
# batch_size is the number of trajectories for one fast grad update.
# self.batch_size is the number of total transitions to collect.
self.batch_size = batch_size * max_path_length * meta_batch_size
self.max_path_length = max_path_length
self.discount = discount
self.gae_lambda = gae_lambda
self.entropy_bonus = entropy_bonus
self.plot = plot
self.pause_for_plot = pause_for_plot
self.center_adv = center_adv
self.positive_adv = positive_adv
self.store_paths = store_paths
self.whole_paths = whole_paths
self.fixed_horizon = fixed_horizon
self.meta_batch_size = meta_batch_size # number of tasks
self.num_grad_updates = num_grad_updates # number of gradient steps during training
if sampler_cls is None:
import multiprocessing
singleton_pool.initialize(n_parallel=multiprocessing.cpu_count())
if singleton_pool.n_parallel > 1 and False: # Use vectorized sampler since it is faster
sampler_cls = BatchSampler
sampler_args = dict(n_envs=self.meta_batch_size)
else:
sampler_cls = MAMLVectorizedSampler
sampler_args = dict(n_tasks=self.meta_batch_size, n_envs=self.meta_batch_size, parallel=parallel_sampler)
self.sampler = sampler_cls(self, **sampler_args)
def start_worker(self):
self.sampler.start_worker()
if self.plot:
plotter.init_plot(self.env, self.policy)
def shutdown_worker(self):
self.sampler.shutdown_worker()
def obtain_samples(self, itr, reset_args=None, log_prefix=''):
# This obtains samples using self.policy, and calling policy.get_actions(obses)
# return_dict specifies how the samples should be returned (dict separates samples
# by task)
paths = self.sampler.obtain_samples(itr, reset_args, return_dict=True, log_prefix=log_prefix)
assert type(paths) == dict
return paths
def process_samples(self, itr, paths, prefix='', log=True):
return self.sampler.process_samples(itr, paths, log=log, log_prefix=prefix)
def train(self):
# TODO - make this a util
flatten_list = lambda l: [item for sublist in l for item in sublist]
with tf.Session() as sess:
# Code for loading a previous policy. Somewhat hacky because needs to be in sess.
if self.load_policy is not None:
import joblib
self.policy = joblib.load(self.load_policy)['policy']
self.init_opt()
# initialize uninitialized vars (only initialize vars that were not loaded)
uninit_vars = []
for var in tf.global_variables():
# note - this is hacky, may be better way to do this in newer TF.
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninit_vars.append(var)
sess.run(tf.variables_initializer(uninit_vars))
self.start_worker()
start_time = time.time()
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
with logger.prefix('itr #%d | ' % itr):
logger.log("Sampling set of tasks/goals for this meta-batch...")
# sample environment configuration
env = self.env
while not ('sample_env_params' in dir(env) or 'sample_goals' in dir(env)):
env = env._wrapped_env
if 'sample_goals' in dir(env):
learner_env_params = env.sample_goals(self.meta_batch_size)
elif 'sample_env_params' in dir(env):
learner_env_params = env.sample_env_params(self.meta_batch_size)
else:
raise NotImplementedError
self.policy.switch_to_init_dist() # Switch to pre-update policy
all_samples_data, all_paths = [], []
list_sampling_time, list_inner_step_time, list_outer_step_time, list_proc_samples_time = [], [], [], []
start_total_inner_time = time.time()
for step in range(self.num_grad_updates+1):
logger.log('** Step ' + str(step) + ' **')
""" -------------------- Sampling --------------------------"""
logger.log("Obtaining samples...")
time_env_sampling_start = time.time()
paths = self.obtain_samples(itr, reset_args=learner_env_params, log_prefix=str(step))
list_sampling_time.append(time.time() - time_env_sampling_start)
all_paths.append(paths)
""" ----------------- Processing Samples ---------------------"""
logger.log("Processing samples...")
time_proc_samples_start = time.time()
samples_data = {}
for key in paths.keys(): # the keys are the tasks
# don't log because this will spam the consol with every task.
samples_data[key] = self.process_samples(itr, paths[key], log=False)
all_samples_data.append(samples_data)
list_proc_samples_time.append(time.time() - time_proc_samples_start)
# for logging purposes
self.process_samples(itr, flatten_list(paths.values()), prefix=str(step), log=True)
logger.log("Logging diagnostics...")
self.log_diagnostics(flatten_list(paths.values()), prefix=str(step))
""" ------------------- Inner Policy Update --------------------"""
time_inner_step_start = time.time()
if step < self.num_grad_updates:
logger.log("Computing policy updates...")
self.policy.compute_updated_dists(samples_data)
list_inner_step_time.append(time.time() - time_inner_step_start)
total_inner_time = time.time() - start_total_inner_time
time_maml_opt_start = time.time()
""" ------------------ Outer Policy Update ---------------------"""
logger.log("Optimizing policy...")
# This needs to take all samples_data so that it can construct graph for meta-optimization.
time_outer_step_start = time.time()
self.optimize_policy(itr, all_samples_data)
""" ------------------- Logging Stuff --------------------------"""
logger.record_tabular('Time-OuterStep', time.time() - time_outer_step_start)
logger.record_tabular('Time-TotalInner', total_inner_time)
logger.record_tabular('Time-InnerStep', np.sum(list_inner_step_time))
logger.record_tabular('Time-SampleProc', np.sum(list_proc_samples_time))
logger.record_tabular('Time-Sampling', np.sum(list_sampling_time))
logger.log("Saving snapshot...")
params = self.get_itr_snapshot(itr, all_samples_data[-1]) # , **kwargs)
if self.store_paths:
params["paths"] = all_samples_data[-1]["paths"]
logger.save_itr_params(itr, params)
logger.log("Saved")
logger.record_tabular('Time', time.time() - start_time)
logger.record_tabular('ItrTime', time.time() - itr_start_time)
logger.record_tabular('Time-MAMLSteps', time.time() - time_maml_opt_start)
logger.dump_tabular(with_prefix=False)
self.shutdown_worker()
def log_diagnostics(self, paths, prefix):
self.env.log_diagnostics(paths, prefix)
self.policy.log_diagnostics(paths, prefix)
self.baseline.log_diagnostics(paths)
def init_opt(self):
"""
Initialize the optimization procedure. If using tensorflow, this may
include declaring all the variables and compiling functions
"""
raise NotImplementedError
def get_itr_snapshot(self, itr, samples_data):
"""
Returns all the data that should be saved in the snapshot for this
iteration.
"""
raise NotImplementedError
def optimize_policy(self, itr, samples_data):
raise NotImplementedError
def update_plot(self):
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
|
{"hexsha": "6855c05c30582090154cfb0dcaa5a047d8cf4660", "size": 12256, "ext": "py", "lang": "Python", "max_stars_repo_path": "sandbox/ours/algos/MAML/batch_maml_polopt.py", "max_stars_repo_name": "jackwilkinson255/mbmpo_master", "max_stars_repo_head_hexsha": "e9e0eaf542c7895764dcb0bfee28752818124ff2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2018-11-15T14:14:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-10T01:53:43.000Z", "max_issues_repo_path": "sandbox/ours/algos/MAML/batch_maml_polopt.py", "max_issues_repo_name": "hongzimao/model_ensemble_meta_learning", "max_issues_repo_head_hexsha": "8b1351df94dfe530efaff1118022315c8d877774", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-05-05T23:39:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-15T15:28:06.000Z", "max_forks_repo_path": "sandbox/ours/algos/MAML/batch_maml_polopt.py", "max_forks_repo_name": "hongzimao/model_ensemble_meta_learning", "max_forks_repo_head_hexsha": "8b1351df94dfe530efaff1118022315c8d877774", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2018-11-15T16:47:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-28T14:58:01.000Z", "avg_line_length": 45.9026217228, "max_line_length": 123, "alphanum_fraction": 0.5955450392, "include": true, "reason": "import numpy", "num_tokens": 2311}
|
import Base.-, Base.+
# @inline -(a::WrappingInt32, b::WrappingInt32) = a.val - b.val
@inline +(a::WrappingInt32, b::UInt32) = WrappingInt32(a.val + b)
@inline -(a::WrappingInt32, b::UInt32) = a + -b
@inline +(a::WrappingInt32, b::Integer) = a + UInt32(b)
@inline -(a::WrappingInt32, b::Integer) = a + -UInt32(b)
function wrap(n::UInt, isn::WrappingInt32)
tmp = UInt32((n << 32 >> 32))
return isn + tmp
end
wrap(n::Integer, isn::WrappingInt32) = wrap(UInt(n), isn)
function unwrap(n::WrappingInt32, isn::WrappingInt32, checkpoint::Integer)
ckpt = UInt(checkpoint)
absolute_seqno_64 = UInt((n.val - isn.val))
(ckpt <= absolute_seqno_64) && return absolute_seqno_64
size_period = 0x100000000
quotient = (ckpt - absolute_seqno_64) >> 32
remainder = (ckpt - absolute_seqno_64) << 32 >> 32
return absolute_seqno_64 + ((quotient + (remainder >= size_period ÷ 2)) << UInt32(32))
end
|
{"hexsha": "ec5805900829495357c0b58318eda7cfa07cc689", "size": 915, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/wrapping_integers.jl", "max_stars_repo_name": "AquaIndigo/JLSponge", "max_stars_repo_head_hexsha": "06b6204c40c89194762ae4c15f0526cefb689f0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-05-09T12:24:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-05T08:49:08.000Z", "max_issues_repo_path": "src/wrapping_integers.jl", "max_issues_repo_name": "AquaIndigo/JLSponge", "max_issues_repo_head_hexsha": "06b6204c40c89194762ae4c15f0526cefb689f0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-05-07T00:41:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-10T00:43:22.000Z", "max_forks_repo_path": "src/wrapping_integers.jl", "max_forks_repo_name": "AquaIndigo/JLSponge", "max_forks_repo_head_hexsha": "06b6204c40c89194762ae4c15f0526cefb689f0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.125, "max_line_length": 90, "alphanum_fraction": 0.6644808743, "num_tokens": 312}
|
using SnoopCompile
using Test
uncompiled(x) = x + 1
if VERSION >= v"1.2.0-DEV.573"
include_string(Main, """
@testset "snoopi" begin
timing_data = @snoopi uncompiled(2)
@test any(td->td[2].def.name == :uncompiled, timing_data)
# Ensure older methods can be tested
a = rand(Float16, 5)
timing_data = @snoopi sum(a)
@test any(td->td[2].def.name == :sum, timing_data)
end
""")
end
# issue #26
@snoopc "/tmp/anon.log" begin
map(x->x^2, [1,2,3])
end
data = SnoopCompile.read("/tmp/anon.log")
pc = SnoopCompile.parcel(reverse!(data[2]))
@test length(pc[:Base]) <= 1
#=
# Simple call
let str = "sum"
keep, pcstring, topmod = SnoopCompile.parse_call("Foo.any($str)")
@test keep
@test pcstring == "Tuple{$str}"
@test topmod == :Main
end
# Operator
let str = "Base.:*, Int, Int"
keep, pcstring, topmod = SnoopCompile.parse_call("Foo.any($str)")
@test keep
@test pcstring == "Tuple{$str}"
@test topmod == :Base
end
# Function as argument
let str = "typeof(Base.identity), Array{Bool, 1}"
keep, pcstring, topmod = SnoopCompile.parse_call("Foo.any($str, Vararg{Any, N} where N)")
@test keep
@test pcstring == "Tuple{$str, Int}"
@test topmod == :Base
end
# Anonymous function closure in a new module as argument
let func = (@eval Main module SnoopTestTemp
func = () -> (y = 2; (x -> x > y))
end).func
str = "getfield(SnoopTestTemp, Symbol(\"$(typeof(func()))\")), Array{Float32, 1}"
keep, pcstring, topmod = SnoopCompile.parse_call("Foo.any($str)")
@test keep
@test pcstring == "Tuple{$str}"
@test topmod == :SnoopTestTemp
end
# Function as a type
let str = "typeof(Base.Sort.sort!), Array{Any, 1}, Base.Sort.MergeSortAlg, Base.Order.By{typeof(Base.string)}"
keep, pcstring, topmod = SnoopCompile.parse_call("Foo.Bar.sort!($str)")
@test keep
@test pcstring == "Tuple{$str}"
@test topmod == :Base
end
=#
include("colortypes.jl")
|
{"hexsha": "4d747fbe818a302889826fe0d75c91c5bd064d9d", "size": 1993, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "UnofficialJuliaMirror/SnoopCompile.jl-aa65fe97-06da-5843-b5b1-d5d13cad87d2", "max_stars_repo_head_hexsha": "3a8460017f1e6a538b6a860c2dcfd29f1ae75670", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "UnofficialJuliaMirror/SnoopCompile.jl-aa65fe97-06da-5843-b5b1-d5d13cad87d2", "max_issues_repo_head_hexsha": "3a8460017f1e6a538b6a860c2dcfd29f1ae75670", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "UnofficialJuliaMirror/SnoopCompile.jl-aa65fe97-06da-5843-b5b1-d5d13cad87d2", "max_forks_repo_head_hexsha": "3a8460017f1e6a538b6a860c2dcfd29f1ae75670", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6805555556, "max_line_length": 110, "alphanum_fraction": 0.6236828901, "num_tokens": 625}
|
# -*- coding: utf-8 -*-
# Authors: Federico Raimondo <federaimondo@gmail.com>
# simplified BSD-3 license
import os.path as op
from numpy.testing import assert_array_equal
from scipy import io as sio
from mne.io import read_raw_eximia
from mne.io.tests.test_raw import _test_raw_reader
from mne.datasets.testing import data_path, requires_testing_data
testing_path = data_path(download=False)
@requires_testing_data
def test_eximia_nxe():
"""Test reading Eximia NXE files."""
fname = op.join(testing_path, 'eximia', 'test_eximia.nxe')
raw = read_raw_eximia(fname, preload=True)
assert 'RawEximia' in repr(raw)
_test_raw_reader(read_raw_eximia, fname=fname,
test_scaling=False, # XXX probably a scaling problem
)
fname_mat = op.join(testing_path, 'eximia', 'test_eximia.mat')
mc = sio.loadmat(fname_mat)
m_data = mc['data']
m_header = mc['header']
assert raw._data.shape == m_data.shape
assert m_header['Fs'][0, 0][0, 0] == raw.info['sfreq']
m_names = [x[0][0] for x in m_header['label'][0, 0]]
m_names = list(
map(lambda x: x.replace('GATE', 'GateIn').replace('TRIG', 'Trig'),
m_names))
assert raw.ch_names == m_names
m_ch_types = [x[0][0] for x in m_header['chantype'][0, 0]]
m_ch_types = list(
map(lambda x: x.replace('unknown', 'stim').replace('trigger', 'stim'),
m_ch_types))
types_dict = {2: 'eeg', 3: 'stim', 202: 'eog'}
ch_types = [types_dict[raw.info['chs'][x]['kind']]
for x in range(len(raw.ch_names))]
assert ch_types == m_ch_types
assert_array_equal(m_data, raw._data)
|
{"hexsha": "7d9acecca2c3002b6c50e9aa68a8022c999e9f7a", "size": 1675, "ext": "py", "lang": "Python", "max_stars_repo_path": "mne/io/eximia/tests/test_eximia.py", "max_stars_repo_name": "0reza/mne-python", "max_stars_repo_head_hexsha": "da02a256423404a81929d6de278bc63d3192a280", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mne/io/eximia/tests/test_eximia.py", "max_issues_repo_name": "0reza/mne-python", "max_issues_repo_head_hexsha": "da02a256423404a81929d6de278bc63d3192a280", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mne/io/eximia/tests/test_eximia.py", "max_forks_repo_name": "0reza/mne-python", "max_forks_repo_head_hexsha": "da02a256423404a81929d6de278bc63d3192a280", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4130434783, "max_line_length": 78, "alphanum_fraction": 0.6495522388, "include": true, "reason": "from numpy,from scipy", "num_tokens": 480}
|
import FinanceLib as Fl
import FinanceLib.FixedIncomes.MoneyMarkets as MM
import FinanceLib.FixedIncomes as FI
@testset "FinanceLib.FixedIncomes " begin
@testset "MoneyMarkets" begin
@test MM.tBillR(150,98_000,100_000) ≈ 0.048
@test MM.tBillD(0.048, 150, 100_000) == 2_000
@test MM.holdingPerYield(98, 95, 5) == 0.020408163265306145
@test MM.effAnnYield(150, 98, 95, 5) == 0.05038831660532006
@test MM.moneyMktYield(150, 98, 95, 5) == 0.04897959183673475
@test MM.twrr([4,6,5.775,6.72,5.508],[1,-0.5,0.225,-0.6]) ==
0.06159232319186159
@test MM.twrr(1.0, [100, 112, 142.64], [0, 20.])==0.21027878787878795
end
end
include("Bonds/runtests.jl")
|
{"hexsha": "4295d0406b70fdd3762c4d7376838384fa6674a2", "size": 728, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/FixedIncomes/runtests.jl", "max_stars_repo_name": "n-kishaloy/FinanceLib.jl", "max_stars_repo_head_hexsha": "0c8ca56a7e366e8d411bba59f38bca6f0c9ac9d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/FixedIncomes/runtests.jl", "max_issues_repo_name": "n-kishaloy/FinanceLib.jl", "max_issues_repo_head_hexsha": "0c8ca56a7e366e8d411bba59f38bca6f0c9ac9d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/FixedIncomes/runtests.jl", "max_forks_repo_name": "n-kishaloy/FinanceLib.jl", "max_forks_repo_head_hexsha": "0c8ca56a7e366e8d411bba59f38bca6f0c9ac9d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0, "max_line_length": 77, "alphanum_fraction": 0.6483516484, "num_tokens": 298}
|
import os
import numpy as np
from sklearn import manifold
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
import matplotlib.pyplot as plt
from keras.layers import Input
from core.util import print_accuracy,LearningHandler
from core import Conv
import scipy.io as scio
import tensorflow as tf
import scipy.io as sio
from sklearn.cluster import KMeans
from sklearn.metrics import normalized_mutual_info_score as nmi
from keras.utils import to_categorical
import imageio
from keras.models import Model
from core.util import get_scale, print_accuracy, get_cluster_sols, LearningHandler, make_layer_list, train_gen, get_y_preds
import cv2
def run_net(data, params):
x_train_unlabeled, y_train_unlabeled, x_val, y_val, x_test, y_test = data['spectral']['train_and_test']
inputs_vae = Input(shape=(params['img_dim'],params['img_dim'],1), name='inputs_vae')
ConvAE = Conv.ConvAE(inputs_vae,params)
ConvAE.vae.load_weights('MNIST_64.h5')
ConvAE.Advsior.load_weights('MNIST_ADV_64.h5')
lh = LearningHandler(lr=params['spec_lr'], drop=params['spec_drop'], lr_tensor=ConvAE.learning_rate,
patience=params['spec_patience'])
lh.on_train_begin()
# one_hots = to_categorical(y_val,10)
losses_vae = np.empty((1000,))
acc = np.empty((1000,))
losse = np.empty((1000,))
nmi1 = np.empty((1000,))
noise = 1*np.random.rand(13000,params['latent_dim'])
noise1 = 1 * np.random.rand(13000, params['n_clusters'])
for i in range(1000):
x_val_t = ConvAE.encoder.predict(x_val)
# scale = conv1.get_scale(x_val_y, 1000, params['scale_nbr'])
x_val_t1 = ConvAE.Advsior.predict(x_val)
# q= target_distribution(x_val_y)
x_sp = ConvAE.classfier.predict(x_val_t)
y_sp = x_sp.argmax(axis=1)
x_val_y = ConvAE.classfier.predict(x_val_t1+x_val_t)
y_sp_1 = x_val_y.argmax(axis=1)
x_val_1 = ConvAE.decoder.predict(x_val_t)
x_val_2 = ConvAE.decoder.predict(x_val_t1+x_val_t)
accuracy = print_accuracy(y_sp, y_val, params['n_clusters'])
nmi1[i] = accuracy
accuracy = print_accuracy(y_sp_1, y_val, params['n_clusters'])
losses_vae[i] = ConvAE.train_defense(x_val,noise,noise1,params['batch_size'])
print("1Z Epoch: {}, loss={:2f},D = {}".format(i, losses_vae[i],M))
acc[i] = accuracy
# nmi1[i] = nmi(y_sp, y_val)
print('NMI: ' + str(np.round(nmi(y_sp, y_val), 4)))
print('NMI: ' + str(np.round(nmi(y_sp_1, y_val), 4)))
if i>1:
if np.abs(losses_vae[i]-losses_vae[i-1])<0.0001:
print('STOPPING EARLY')
break
x_val_t = ConvAE.encoder.predict(x_val)
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
Z_tsne = tsne.fit_transform(x_val_t)
fig = plt.figure()
plt.scatter(Z_tsne[:, 0], Z_tsne[:, 1], s=2, c=y_val, cmap=plt.cm.get_cmap("jet", 10))
plt.colorbar(ticks=range(10))
plt.show()
print("finished training")
x_val_y = ConvAE.vae.predict(x_val)[2]
y_sp = x_val_y.argmax(axis=1)
print_accuracy(y_sp, y_val, params['n_clusters'])
nmi_score1 = nmi(y_sp, y_val)
print('NMI: ' + str(np.round(nmi_score1, 4)))
def target_distribution(q): # target distribution P which enhances the discrimination of soft label Q
weight = q ** 2 / q.sum(0)
return (weight.T / weight.sum(1)).T
|
{"hexsha": "e9c8cf417da937a175c7c56484fc11d17d08107f", "size": 3423, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/applications/ADDC.py", "max_stars_repo_name": "xdxuyang/Adversarial-Learning-for-Robust-Deep-Clustering", "max_stars_repo_head_hexsha": "90b88e0d83ad2225dbe8534cd21d63982dd8b34e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-11-14T14:04:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-02T02:44:38.000Z", "max_issues_repo_path": "src/applications/ADDC.py", "max_issues_repo_name": "xdxuyang/Adversarial-Learning-for-Robust-Deep-Clustering", "max_issues_repo_head_hexsha": "90b88e0d83ad2225dbe8534cd21d63982dd8b34e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-07-30T12:24:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-20T06:16:49.000Z", "max_forks_repo_path": "src/applications/ADDC.py", "max_forks_repo_name": "xdxuyang/Adversarial-Learning-for-Robust-Deep-Clustering", "max_forks_repo_head_hexsha": "90b88e0d83ad2225dbe8534cd21d63982dd8b34e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-04T06:49:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-04T06:49:28.000Z", "avg_line_length": 28.525, "max_line_length": 123, "alphanum_fraction": 0.6695880806, "include": true, "reason": "import numpy,import scipy", "num_tokens": 977}
|
"""Test the old numpy pickler, compatibility version."""
import random
# numpy_pickle is not a drop-in replacement of pickle, as it takes
# filenames instead of open files as arguments.
from joblib import numpy_pickle_compat
def test_z_file(tmpdir):
# Test saving and loading data with Zfiles.
filename = tmpdir.join('test.pkl').strpath
data = numpy_pickle_compat.asbytes('Foo, \n Bar, baz, \n\nfoobar')
with open(filename, 'wb') as f:
numpy_pickle_compat.write_zfile(f, data)
with open(filename, 'rb') as f:
data_read = numpy_pickle_compat.read_zfile(f)
assert data == data_read
|
{"hexsha": "5e8319212b606dbebf27bfed32c109c00294929e", "size": 624, "ext": "py", "lang": "Python", "max_stars_repo_path": "venv/lib/python3.8/site-packages/joblib/test/test_numpy_pickle_compat.py", "max_stars_repo_name": "avrumnoor/NewsSummarizer", "max_stars_repo_head_hexsha": "a963497ef9bc62d2148aa28e624ea32955992f57", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2607, "max_stars_repo_stars_event_min_datetime": "2015-01-06T16:06:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T14:47:54.000Z", "max_issues_repo_path": "venv/lib/python3.8/site-packages/joblib/test/test_numpy_pickle_compat.py", "max_issues_repo_name": "avrumnoor/NewsSummarizer", "max_issues_repo_head_hexsha": "a963497ef9bc62d2148aa28e624ea32955992f57", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1091, "max_issues_repo_issues_event_min_datetime": "2015-01-20T18:01:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T14:08:24.000Z", "max_forks_repo_path": "venv/lib/python3.8/site-packages/joblib/test/test_numpy_pickle_compat.py", "max_forks_repo_name": "avrumnoor/NewsSummarizer", "max_forks_repo_head_hexsha": "a963497ef9bc62d2148aa28e624ea32955992f57", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 313, "max_forks_repo_forks_event_min_datetime": "2015-01-08T04:02:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T19:53:13.000Z", "avg_line_length": 32.8421052632, "max_line_length": 70, "alphanum_fraction": 0.7179487179, "include": true, "reason": "import numpy", "num_tokens": 152}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.