text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
module LSystem
export @lsys
export LModel, add_rule!
export LState, next, result
using MacroTools
# L-System model definition
"""
A L-system model is represented by an axiom called `axiom`
and a set of rewriting `rules`.
"""
struct LModel
axiom
rules
end
"Create a L-system model."
LModel(axiom) = LModel([axiom], Dict())
"Add rule to a model."
function add_rule!(model::LModel, left::T, right::T) where {T <: AbstractString}
model.rules[left] = split(right, "")
return nothing
end
"Display model nicely."
function Base.show(io::IO, model::LModel)
println(io, "LModel:")
println(io, " Axiom: ", join(model.axiom))
for k in sort(collect(keys(model.rules)))
println(io, " Rule: ", k, " → ", join(model.rules[k]))
end
end
# Tracking state of the system
"""
A L-system state contains a reference to the `model`, the
current iteration, and the result.
"""
struct LState
model
current_iteration
result
end
"Create a L-system state from a `model`."
LState(model::LModel) = LState(model, 1, model.axiom)
"Advance to the next state and returns a new LState object."
function next(state::LState)
new_result = []
for el in state.result
next_el = get(state.model.rules, el, el)
push!.(Ref(new_result), next_el)
end
return LState(state.model, state.current_iteration + 1, new_result)
end
"Repeated next call"
next(state::LState, n) = n > 0 ? next(next(state), n-1) : state
"Compact the result suitable for display"
result(state::LState) = join(state.result)
Base.show(io::IO, s::LState) =
print(io, "LState(", s.current_iteration, "): ", result(s))
# DSL implementation
"""
The @lsys macro is used to construct a L-System model object [LModel](@ref).
The domain specific language requires a single axiom and a set of rewriting rules.
For example:
```
model = @lsys begin
axiom : A
rule : A → AB
rule : B → A
end
```
"""
macro lsys(ex)
ex = MacroTools.postwalk(walk, ex)
push!(ex.args, :( model ))
return ex
end
# Walk the AST tree and match expressions.
function walk(ex)
match_axiom = @capture(ex, axiom : sym_)
if match_axiom
sym_str = String(sym)
return :( model = LModel($sym_str) )
end
match_rule = @capture(ex, rule : original_ → replacement_)
if match_rule
original_str = String(original)
replacement_str = String(replacement)
return :(
add_rule!(model, $original_str, $replacement_str)
)
end
return ex
end
end # module
|
{"hexsha": "1165c3a8c99f732a4a29364ff26b61192a3be6cd", "size": 2552, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/LSystem.jl", "max_stars_repo_name": "tk3369/LSystem.jl", "max_stars_repo_head_hexsha": "9953ce229c6aa2cecbad3e0fd5ffa3c8a5e7da0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-18T03:19:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-18T03:19:18.000Z", "max_issues_repo_path": "src/LSystem.jl", "max_issues_repo_name": "tk3369/LSystem.jl", "max_issues_repo_head_hexsha": "9953ce229c6aa2cecbad3e0fd5ffa3c8a5e7da0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/LSystem.jl", "max_forks_repo_name": "tk3369/LSystem.jl", "max_forks_repo_head_hexsha": "9953ce229c6aa2cecbad3e0fd5ffa3c8a5e7da0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-18T03:19:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-18T03:19:21.000Z", "avg_line_length": 22.0, "max_line_length": 82, "alphanum_fraction": 0.6559561129, "num_tokens": 687}
|
[STATEMENT]
lemma aux:
"
distinct (map fst (ts1@ts2)) \<Longrightarrow>
the_default (0::val) (case map_of ts1 (k, i) of None \<Rightarrow> map_of ts2 (k, i) | Some x \<Rightarrow> Some x)
= the_default 0 (map_of ts1 (k, i)) + the_default 0 (map_of ts2 (k, i))
"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. distinct (map fst (ts1 @ ts2)) \<Longrightarrow> the_default 0 (case map_of ts1 (k, i) of None \<Rightarrow> map_of ts2 (k, i) | Some x \<Rightarrow> Some x) = the_default 0 (map_of ts1 (k, i)) + the_default 0 (map_of ts2 (k, i))
[PROOF STEP]
apply (auto split: option.splits)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x2. \<lbrakk>distinct (map fst ts1); distinct (map fst ts2); fst ` set ts1 \<inter> fst ` set ts2 = {}; ((k, i), x2) \<in> set ts1\<rbrakk> \<Longrightarrow> the_default 0 (map_of ts2 (k, i)) = 0
[PROOF STEP]
by (metis disjoint_iff_not_equal img_fst map_of_eq_None_iff the_default.simps(2))
|
{"llama_tokens": 402, "file": "VerifyThis2019_Challenge3", "length": 2}
|
import os
import numpy as np
import json
from ._base_dataset import _BaseDataset
from ..utils import TrackEvalException
from .. import utils
from .. import _timing
class YouTubeVIS(_BaseDataset):
"""Dataset class for YouTubeVIS tracking"""
@staticmethod
def get_default_dataset_config():
"""Default class config values"""
code_path = utils.get_code_path()
default_config = {
'GT_FOLDER': os.path.join(code_path, 'data/gt/youtube_vis/'), # Location of GT data
'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/youtube_vis/'),
# Trackers location
'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)
'CLASSES_TO_EVAL': None, # Classes to eval (if None, all classes)
'SPLIT_TO_EVAL': 'train_sub_split', # Valid: 'train', 'val', 'train_sub_split'
'PRINT_CONFIG': True, # Whether to print current config
'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL
}
return default_config
def __init__(self, config=None):
"""Initialise dataset, checking that all required files are present"""
super().__init__()
# Fill non-given config values with defaults
self.config = utils.init_config(config, self.get_default_dataset_config(), self.get_name())
self.gt_fol = self.config['GT_FOLDER'] + 'youtube_vis_' + self.config['SPLIT_TO_EVAL']
self.tracker_fol = self.config['TRACKERS_FOLDER'] + 'youtube_vis_' + self.config['SPLIT_TO_EVAL']
self.use_super_categories = False
self.should_classes_combine = True
self.output_fol = self.config['OUTPUT_FOLDER']
if self.output_fol is None:
self.output_fol = self.tracker_fol
self.output_sub_fol = self.config['OUTPUT_SUB_FOLDER']
self.tracker_sub_fol = self.config['TRACKER_SUB_FOLDER']
if not os.path.exists(self.gt_fol):
print("GT folder not found: " + self.gt_fol)
raise TrackEvalException("GT folder not found: " + os.path.basename(self.gt_fol))
gt_dir_files = [file for file in os.listdir(self.gt_fol) if file.endswith('.json')]
if len(gt_dir_files) != 1:
raise TrackEvalException(self.gt_fol + ' does not contain exactly one json file.')
with open(os.path.join(self.gt_fol, gt_dir_files[0])) as f:
self.gt_data = json.load(f)
# Get classes to eval
self.valid_classes = [cls['name'] for cls in self.gt_data['categories']]
cls_name_to_cls_id_map = {cls['name']: cls['id'] for cls in self.gt_data['categories']}
if self.config['CLASSES_TO_EVAL']:
self.class_list = [cls.lower() if cls.lower() in self.valid_classes else None
for cls in self.config['CLASSES_TO_EVAL']]
if not all(self.class_list):
raise TrackEvalException('Attempted to evaluate an invalid class. Only classes ' +
', '.join(self.valid_classes) + ' are valid.')
else:
self.class_list = [cls['name'] for cls in self.gt_data['categories']]
self.class_name_to_class_id = {k: v for k, v in cls_name_to_cls_id_map.items() if k in self.class_list}
# Get sequences to eval and check gt files exist
self.seq_list = [vid['file_names'][0].split('/')[0] for vid in self.gt_data['videos']]
self.seq_name_to_seq_id = {vid['file_names'][0].split('/')[0]: vid['id'] for vid in self.gt_data['videos']}
self.seq_lengths = {vid['id']: len(vid['file_names']) for vid in self.gt_data['videos']}
# encode masks and compute track areas
self._prepare_gt_annotations()
# Get trackers to eval
if self.config['TRACKERS_TO_EVAL'] is None:
self.tracker_list = os.listdir(self.tracker_fol)
else:
self.tracker_list = self.config['TRACKERS_TO_EVAL']
if self.config['TRACKER_DISPLAY_NAMES'] is None:
self.tracker_to_disp = dict(zip(self.tracker_list, self.tracker_list))
elif (self.config['TRACKERS_TO_EVAL'] is not None) and (
len(self.config['TRACKER_DISPLAY_NAMES']) == len(self.tracker_list)):
self.tracker_to_disp = dict(zip(self.tracker_list, self.config['TRACKER_DISPLAY_NAMES']))
else:
raise TrackEvalException('List of tracker files and tracker display names do not match.')
# counter for globally unique track IDs
self.global_tid_counter = 0
self.tracker_data = dict()
for tracker in self.tracker_list:
tracker_dir_path = os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol)
tr_dir_files = [file for file in os.listdir(tracker_dir_path) if file.endswith('.json')]
if len(tr_dir_files) != 1:
raise TrackEvalException(tracker_dir_path + ' does not contain exactly one json file.')
with open(os.path.join(tracker_dir_path, tr_dir_files[0])) as f:
curr_data = json.load(f)
self.tracker_data[tracker] = curr_data
def get_display_name(self, tracker):
return self.tracker_to_disp[tracker]
def _load_raw_file(self, tracker, seq, is_gt):
"""Load a file (gt or tracker) in the YouTubeVIS format
If is_gt, this returns a dict which contains the fields:
[gt_ids, gt_classes] : list (for each timestep) of 1D NDArrays (for each det).
[gt_dets]: list (for each timestep) of lists of detections.
[classes_to_gt_tracks]: dictionary with class values as keys and list of dictionaries (with frame indices as
keys and corresponding segmentations as values) for each track
[classes_to_gt_track_ids, classes_to_gt_track_areas, classes_to_gt_track_iscrowd]: dictionary with class values
as keys and lists (for each track) as values
if not is_gt, this returns a dict which contains the fields:
[tracker_ids, tracker_classes, tracker_confidences] : list (for each timestep) of 1D NDArrays (for each det).
[tracker_dets]: list (for each timestep) of lists of detections.
[classes_to_dt_tracks]: dictionary with class values as keys and list of dictionaries (with frame indices as
keys and corresponding segmentations as values) for each track
[classes_to_dt_track_ids, classes_to_dt_track_areas]: dictionary with class values as keys and lists as values
[classes_to_dt_track_scores]: dictionary with class values as keys and 1D numpy arrays as values
"""
# select sequence tracks
seq_id = self.seq_name_to_seq_id[seq]
if is_gt:
tracks = [ann for ann in self.gt_data['annotations'] if ann['video_id'] == seq_id]
else:
tracks = self._get_tracker_seq_tracks(tracker, seq_id)
# Convert data to required format
num_timesteps = self.seq_lengths[seq_id]
data_keys = ['ids', 'classes', 'dets']
if not is_gt:
data_keys += ['tracker_confidences']
raw_data = {key: [None] * num_timesteps for key in data_keys}
for t in range(num_timesteps):
raw_data['dets'][t] = [track['segmentations'][t] for track in tracks if track['segmentations'][t]]
raw_data['ids'][t] = np.atleast_1d([track['id'] for track in tracks
if track['segmentations'][t]]).astype(int)
raw_data['classes'][t] = np.atleast_1d([track['category_id'] for track in tracks
if track['segmentations'][t]]).astype(int)
if not is_gt:
raw_data['tracker_confidences'][t] = np.atleast_1d([track['score'] for track in tracks
if track['segmentations'][t]]).astype(float)
if is_gt:
key_map = {'ids': 'gt_ids',
'classes': 'gt_classes',
'dets': 'gt_dets'}
else:
key_map = {'ids': 'tracker_ids',
'classes': 'tracker_classes',
'dets': 'tracker_dets'}
for k, v in key_map.items():
raw_data[v] = raw_data.pop(k)
all_cls_ids = {self.class_name_to_class_id[cls] for cls in self.class_list}
classes_to_tracks = {cls: [track for track in tracks if track['category_id'] == cls] for cls in all_cls_ids}
# mapping from classes to track representations and track information
raw_data['classes_to_tracks'] = {cls: [{i: track['segmentations'][i]
for i in range(len(track['segmentations']))} for track in tracks]
for cls, tracks in classes_to_tracks.items()}
raw_data['classes_to_track_ids'] = {cls: [track['id'] for track in tracks]
for cls, tracks in classes_to_tracks.items()}
raw_data['classes_to_track_areas'] = {cls: [track['area'] for track in tracks]
for cls, tracks in classes_to_tracks.items()}
if is_gt:
raw_data['classes_to_gt_track_iscrowd'] = {cls: [track['iscrowd'] for track in tracks]
for cls, tracks in classes_to_tracks.items()}
else:
raw_data['classes_to_dt_track_scores'] = {cls: np.array([track['score'] for track in tracks])
for cls, tracks in classes_to_tracks.items()}
if is_gt:
key_map = {'classes_to_tracks': 'classes_to_gt_tracks',
'classes_to_track_ids': 'classes_to_gt_track_ids',
'classes_to_track_areas': 'classes_to_gt_track_areas'}
else:
key_map = {'classes_to_tracks': 'classes_to_dt_tracks',
'classes_to_track_ids': 'classes_to_dt_track_ids',
'classes_to_track_areas': 'classes_to_dt_track_areas'}
for k, v in key_map.items():
raw_data[v] = raw_data.pop(k)
raw_data['num_timesteps'] = num_timesteps
raw_data['seq'] = seq
return raw_data
@_timing.time
def get_preprocessed_seq_data(self, raw_data, cls):
""" Preprocess data for a single sequence for a single class ready for evaluation.
Inputs:
- raw_data is a dict containing the data for the sequence already read in by get_raw_seq_data().
- cls is the class to be evaluated.
Outputs:
- data is a dict containing all of the information that metrics need to perform evaluation.
It contains the following fields:
[num_timesteps, num_gt_ids, num_tracker_ids, num_gt_dets, num_tracker_dets] : integers.
[gt_ids, tracker_ids, tracker_confidences]: list (for each timestep) of 1D NDArrays (for each det).
[gt_dets, tracker_dets]: list (for each timestep) of lists of detections.
[similarity_scores]: list (for each timestep) of 2D NDArrays.
Notes:
General preprocessing (preproc) occurs in 4 steps. Some datasets may not use all of these steps.
1) Extract only detections relevant for the class to be evaluated (including distractor detections).
2) Match gt dets and tracker dets. Remove tracker dets that are matched to a gt det that is of a
distractor class, or otherwise marked as to be removed.
3) Remove unmatched tracker dets if they fall within a crowd ignore region or don't meet a certain
other criteria (e.g. are too small).
4) Remove gt dets that were only useful for preprocessing and not for actual evaluation.
After the above preprocessing steps, this function also calculates the number of gt and tracker detections
and unique track ids. It also relabels gt and tracker ids to be contiguous and checks that ids are
unique within each timestep.
YouTubeVIS:
In YouTubeVIS, the 4 preproc steps are as follow:
1) There are 40 classes which are evaluated separately.
2) No matched tracker dets are removed.
3) No unmatched tracker dets are removed.
4) No gt dets are removed.
Further, for TrackMAP computation track representations for the given class are accessed from a dictionary
and the tracks from the tracker data are sorted according to the tracker confidence.
"""
cls_id = self.class_name_to_class_id[cls]
data_keys = ['gt_ids', 'tracker_ids', 'gt_dets', 'tracker_dets', 'similarity_scores']
data = {key: [None] * raw_data['num_timesteps'] for key in data_keys}
unique_gt_ids = []
unique_tracker_ids = []
num_gt_dets = 0
num_tracker_dets = 0
for t in range(raw_data['num_timesteps']):
# Only extract relevant dets for this class for eval (cls)
gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id)
gt_class_mask = gt_class_mask.astype(np.bool)
gt_ids = raw_data['gt_ids'][t][gt_class_mask]
gt_dets = [raw_data['gt_dets'][t][ind] for ind in range(len(gt_class_mask)) if gt_class_mask[ind]]
tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id)
tracker_class_mask = tracker_class_mask.astype(np.bool)
tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask]
tracker_dets = [raw_data['tracker_dets'][t][ind] for ind in range(len(tracker_class_mask)) if
tracker_class_mask[ind]]
similarity_scores = raw_data['similarity_scores'][t][gt_class_mask, :][:, tracker_class_mask]
data['tracker_ids'][t] = tracker_ids
data['tracker_dets'][t] = tracker_dets
data['gt_ids'][t] = gt_ids
data['gt_dets'][t] = gt_dets
data['similarity_scores'][t] = similarity_scores
unique_gt_ids += list(np.unique(data['gt_ids'][t]))
unique_tracker_ids += list(np.unique(data['tracker_ids'][t]))
num_tracker_dets += len(data['tracker_ids'][t])
num_gt_dets += len(data['gt_ids'][t])
# Re-label IDs such that there are no empty IDs
if len(unique_gt_ids) > 0:
unique_gt_ids = np.unique(unique_gt_ids)
gt_id_map = np.nan * np.ones((np.max(unique_gt_ids) + 1))
gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
for t in range(raw_data['num_timesteps']):
if len(data['gt_ids'][t]) > 0:
data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int)
if len(unique_tracker_ids) > 0:
unique_tracker_ids = np.unique(unique_tracker_ids)
tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1))
tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids))
for t in range(raw_data['num_timesteps']):
if len(data['tracker_ids'][t]) > 0:
data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int)
# Ensure that ids are unique per timestep.
self._check_unique_ids(data)
# Record overview statistics.
data['num_tracker_dets'] = num_tracker_dets
data['num_gt_dets'] = num_gt_dets
data['num_tracker_ids'] = len(unique_tracker_ids)
data['num_gt_ids'] = len(unique_gt_ids)
data['num_timesteps'] = raw_data['num_timesteps']
data['seq'] = raw_data['seq']
# get track representations
data['gt_tracks'] = raw_data['classes_to_gt_tracks'][cls_id]
data['gt_track_ids'] = raw_data['classes_to_gt_track_ids'][cls_id]
data['gt_track_areas'] = raw_data['classes_to_gt_track_areas'][cls_id]
data['gt_track_iscrowd'] = raw_data['classes_to_gt_track_iscrowd'][cls_id]
data['dt_tracks'] = raw_data['classes_to_dt_tracks'][cls_id]
data['dt_track_ids'] = raw_data['classes_to_dt_track_ids'][cls_id]
data['dt_track_areas'] = raw_data['classes_to_dt_track_areas'][cls_id]
data['dt_track_scores'] = raw_data['classes_to_dt_track_scores'][cls_id]
data['iou_type'] = 'mask'
# sort tracker data tracks by tracker confidence scores
if data['dt_tracks']:
idx = np.argsort([-score for score in data['dt_track_scores']], kind="mergesort")
data['dt_track_scores'] = [data['dt_track_scores'][i] for i in idx]
data['dt_tracks'] = [data['dt_tracks'][i] for i in idx]
data['dt_track_ids'] = [data['dt_track_ids'][i] for i in idx]
data['dt_track_areas'] = [data['dt_track_areas'][i] for i in idx]
return data
def _calculate_similarities(self, gt_dets_t, tracker_dets_t):
similarity_scores = self._calculate_mask_ious(gt_dets_t, tracker_dets_t, is_encoded=True, do_ioa=False)
return similarity_scores
def _prepare_gt_annotations(self):
"""
Prepares GT data by rle encoding segmentations and computing the average track area.
:return: None
"""
# only loaded when needed to reduce minimum requirements
from pycocotools import mask as mask_utils
for track in self.gt_data['annotations']:
h = track['height']
w = track['width']
for i, seg in enumerate(track['segmentations']):
if seg:
track['segmentations'][i] = mask_utils.frPyObjects(seg, h, w)
areas = [a for a in track['areas'] if a]
if len(areas) == 0:
track['area'] = 0
else:
track['area'] = np.array(areas).mean()
def _get_tracker_seq_tracks(self, tracker, seq_id):
"""
Prepares tracker data for a given sequence. Extracts all annotations for given sequence ID, computes
average track area and assigns a track ID.
:param tracker: the given tracker
:param seq_id: the sequence ID
:return: the extracted tracks
"""
# only loaded when needed to reduce minimum requirements
from pycocotools import mask as mask_utils
tracks = [ann for ann in self.tracker_data[tracker] if ann['video_id'] == seq_id]
for track in tracks:
track['areas'] = []
for seg in track['segmentations']:
if seg:
track['areas'].append(mask_utils.area(seg))
else:
track['areas'].append(None)
areas = [a for a in track['areas'] if a]
if len(areas) == 0:
track['area'] = 0
else:
track['area'] = np.array(areas).mean()
track['id'] = self.global_tid_counter
self.global_tid_counter += 1
return tracks
|
{"hexsha": "6d5b54c9ed9045c5f4e2f25e4234f87a00f8e302", "size": 19626, "ext": "py", "lang": "Python", "max_stars_repo_path": "trackeval/datasets/youtube_vis.py", "max_stars_repo_name": "AlexanderSing/TrackEval", "max_stars_repo_head_hexsha": "373e643f8989445f0253af6748e9e247d6ae6322", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 325, "max_stars_repo_stars_event_min_datetime": "2021-02-25T19:00:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:30:42.000Z", "max_issues_repo_path": "trackeval/datasets/youtube_vis.py", "max_issues_repo_name": "AlexanderSing/TrackEval", "max_issues_repo_head_hexsha": "373e643f8989445f0253af6748e9e247d6ae6322", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 49, "max_issues_repo_issues_event_min_datetime": "2021-03-26T14:40:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T17:33:13.000Z", "max_forks_repo_path": "trackeval/datasets/youtube_vis.py", "max_forks_repo_name": "AlexanderSing/TrackEval", "max_forks_repo_head_hexsha": "373e643f8989445f0253af6748e9e247d6ae6322", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 93, "max_forks_repo_forks_event_min_datetime": "2021-02-26T09:05:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T11:44:01.000Z", "avg_line_length": 53.7698630137, "max_line_length": 119, "alphanum_fraction": 0.6152043208, "include": true, "reason": "import numpy", "num_tokens": 4441}
|
from ..mapping import MappedArray, AccessType
from ..indexing import is_fullslice, split_operation, slicer_sub2ind, invert_slice
from .. import volutils
from ..readers import reader_classes
from .metadata import ome_zooms, parse_unit
from nitorch.spatial import affine_default
from nitorch.core import pyutils, dtypes
from tifffile import TiffFile
from contextlib import contextmanager
import torch
import numpy as np
from warnings import warn
class TiffArray(MappedArray):
"""
MappedArray that uses `tifffile` under the hood.
"""
def __init__(self, file_like, permission='r', keep_file_open=True, **hints):
"""
Parameters
----------
file_like : str or file object
keep_file_open : bool, default=True
Whether to keep the file handle open
hints : keyword of the form `is_<format>=<True|False>`
Tells the Tiff reader that a file is or isn't of a specific
subformat. If not provided, it it guessed by the Tiff reader.
"""
self._tiff = TiffFile(file_like, **hints)
if not keep_file_open:
self._tiff.close()
self._series = 0
self._level = 0
self._cache = dict()
super().__init__()
_series: int = 0 # index of series to map
_level: int = 0 # index of pyramid level to map
_cache: dict = {} # a cache of precomputed _shape, _spatial, etc
@property
def _shape(self):
"""Full shape of a series+level"""
if '_shape' not in self._cache:
with self.tiffobj() as tiff:
shape = tiff.series[self.series].levels[self.level].shape
self._cache['_shape'] = shape
return self._cache['_shape']
@property
def _axes(self):
"""Axes names of a series+level"""
if '_axes' not in self._cache:
with self.tiffobj() as tiff:
axes = tiff.series[self.series].levels[self.level].axes
self._cache['_axes'] = axes
return self._cache['_axes']
@property
def _spatial(self):
"""Mask of spatial axes of a series+level"""
msk = [ax in 'XYZ' for ax in self._axes]
return msk
@property
def _affine(self):
"""Affine orientation matrix of a series+level"""
# TODO: I don't know yet how we should use GeoTiff to encode
# affine matrices. In the matrix/zooms, their voxels are ordered
# as [x, y, z] even though their dimensions in the returned array
# are ordered as [Z, Y, X]. If we want to keep the same convention
# as nitorch, I need to permute the matrix/zooms.
if '_affine' not in self._cache:
with self.tiffobj() as tiff:
omexml = tiff.ome_metadata
geotags = tiff.geotiff_metadata or {}
zooms, units, axes = ome_zooms(omexml, self.series)
if zooms:
# convert to mm + drop non-spatial zooms
units = [parse_unit(u) for u in units]
zooms = [z * (f / 1e-3) for z, (f, type) in zip(zooms, units)
if type == 'm']
if 'ModelPixelScaleTag' in geotags:
warn("Both OME and GeoTiff pixel scales are present: "
"{} vs {}. Using OME."
.format(zooms, geotags['ModelPixelScaleTag']))
elif 'ModelPixelScaleTag' in geotags:
zooms = geotags['ModelPixelScaleTag']
axes = 'XYZ'
else:
zooms = 1.
axes = [ax for ax in self._axes if ax in 'XYZ']
if 'ModelTransformation' in geotags:
aff = geotags['ModelTransformation']
aff = torch.as_tensor(aff, dtype=torch.double).reshape(4, 4)
self._cache['_affine'] = aff
elif ('ModelTiepointTag' in geotags):
# copied from tifffile
sx, sy, sz = pyutils.make_list(zooms, n=3)
tiepoints = torch.as_tensor(geotags['ModelTiepointTag'])
affines = []
for tiepoint in tiepoints:
i, j, k, x, y, z = tiepoint
affines.append(torch.as_tensor(
[[sx, 0.0, 0.0, x - i * sx],
[0.0, -sy, 0.0, y + j * sy],
[0.0, 0.0, sz, z - k * sz],
[0.0, 0.0, 0.0, 1.0]], dtype=torch.double))
affines = torch.stack(affines, dim=0)
if len(tiepoints) == 1:
affines = affines[0]
self._cache['_affine'] = affines
else:
zooms = pyutils.make_list(zooms, n=len(axes))
ax2zoom = {ax: zoom for ax, zoom in zip(axes, zooms)}
axes = [ax for ax in self._axes if ax in 'XYZ']
shape = [shp for shp, msk in zip(self._shape, self._spatial)
if msk]
zooms = [ax2zoom.get(ax, 1.) for ax in axes]
layout = [('R' if ax == 'Z' else 'P' if ax == 'Y' else 'S')
for ax in axes]
aff = affine_default(shape, zooms, layout=''.join(layout))
self._cache['_affine'] = aff
return self._cache['_affine']
@property
def dtype(self):
if 'dtype' not in self._cache:
with self.tiffobj() as tiff:
dt = tiff.series[self.series].levels[self.level].dtype
self._cache['dtype'] = dt
return self._cache['dtype']
@property
def series(self):
"""Series index (Tiff files can hold multiple series)"""
return self._series
@series.setter
def series(self, val):
if val != self.series and not all(is_fullslice(self.slicer)):
raise RuntimeError("Cannot change series in a view")
self._series = val
self._cache = {}
@property
def level(self):
"""Level index (Tiff files can hold multiple spatial resolutions)"""
return self._level
@level.setter
def level(self, val):
if val != self.level and not all(is_fullslice(self.slicer)):
raise RuntimeError("Cannot change resolution level in a view")
self._level = val
self._cache = {}
@property
def readable(self):
# That's not exact: pseudo partial access in-plane
return AccessType.TruePartial
@property
def writable(self):
return AccessType.No
@contextmanager
def tiffobj(self):
"""Returns an *open* Tiff reader.
Should be used in a `with` statement:
```python
>>> with self.tiffobj() as tiff:
>>> # do stuff with `tiff`
```
"""
closed = self._tiff.filehandle.closed
if closed:
self._tiff.filehandle.open()
yield self._tiff
if closed:
self._tiff.close()
def __del__(self):
# make sure we close all file objects
self._tiff.close()
@property
def filename(self):
with self.tiffobj() as f:
return f.filename
def data(self, dtype=None, device=None, casting='unsafe', rand=True,
cutoff=None, dim=None, numpy=False):
# --- sanity check before reading ---
dtype = self.dtype if dtype is None else dtype
dtype = dtypes.dtype(dtype)
if not numpy and dtype.torch is None:
raise TypeError('Data type {} does not exist in PyTorch.'
.format(dtype))
# --- check that view is not empty ---
if pyutils.prod(self.shape) == 0:
if numpy:
return np.zeros(self.shape, dtype=dtype.numpy)
else:
return torch.zeros(self.shape, dtype=dtype.torch, device=device)
# --- read native data ---
slicer, perm, newdim = split_operation(self.permutation, self.slicer, 'r')
with self.tiffobj() as f:
dat = self._read_data_raw(slicer, tiffobj=f)
dat = dat.transpose(perm)[newdim]
indtype = dtypes.dtype(self.dtype)
# --- cutoff ---
dat = volutils.cutoff(dat, cutoff, dim)
# --- cast ---
rand = rand and not indtype.is_floating_point
if rand and not dtype.is_floating_point:
tmpdtype = dtypes.float64
else:
tmpdtype = dtype
dat, scale = volutils.cast(dat, tmpdtype.numpy, casting, with_scale=True)
# --- random sample ---
# uniform noise in the uncertainty interval
if rand and not (scale == 1 and not dtype.is_floating_point):
dat = volutils.addnoise(dat, scale)
# --- final cast ---
dat = volutils.cast(dat, dtype.numpy, 'unsafe')
# convert to torch if needed
if not numpy:
dat = torch.as_tensor(dat, device=device)
return dat
# --------------
# LOW LEVEL
# --------------
def _read_data_raw(self, slicer=None, tiffobj=None):
"""Read native data
Dispatch to `_read_data_raw_full` or `_read_data_raw_partial`.
Parameters
----------
slicer : tuple[index_like], optional
A tuple of indices that describe the chunk of data to read.
If None, read everything.
tiffobj : file object, default=`self.fileobj('image', 'r')`
A file object (with `seek`, `read`) from which to read
Returns
-------
dat : np.ndarray
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._read_data_raw(slicer, tiffobj)
# load sub-array
if slicer is None or all(is_fullslice(slicer, self._shape)):
dat = self._read_data_raw_full(tiffobj)
else:
dat = self._read_data_raw_partial(slicer, tiffobj)
return dat
def _read_data_raw_partial(self, slicer, tiffobj=None):
"""Read a chunk of data from disk
Parameters
----------
slicer : tuple[slice or int]
tiffobj : TiffFile
Returns
-------
dat : np.ndarray
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._read_data_raw_partial(slicer, tiffobj)
# 1) split dimensions
shape_feat, shape_stack, shape_page = self._shape_split(tiffobj)
dim_feat = len(shape_feat)
dim_stack = len(shape_stack)
dim_page = len(shape_page)
# 2) split slicer
slicer_feat = slicer[:dim_feat]
slicer_stack = slicer[dim_feat:dim_feat+dim_stack]
slicer_page = slicer[dim_feat+dim_stack:]
dim_feat_out = sum(isinstance(idx, slice) for idx in slicer_feat)
dim_stack_out = sum(isinstance(idx, slice) for idx in slicer_stack)
dim_page_out = sum(isinstance(idx, slice) for idx in slicer_page)
# 3) ensure positive strides
slicer_inv = [slice(None, None, -1) if idx.step and idx.step < 0
else slice(None) for idx in slicer_stack
if isinstance(idx, slice)]
slicer_stack = [invert_slice(idx, shp) if isinstance(idx, slice) and
idx.step and idx.step < 0
else idx for idx, shp in zip(slicer_stack, shape_stack)]
# 4) convert stack slice to list of linear indices
# (or to one slice if possible)
index_stack = slicer_sub2ind(slicer_stack, shape_stack)
# 5) read only pages in the substack
dat = tiffobj.asarray(key=index_stack,
series=self.series,
level=self.level)
dat = dat.reshape([*shape_feat, -1, *shape_page])
# 6) apply slicers along the feature and page dimensions
dat = dat[(*slicer_feat, slice(None), *slicer_page)]
# 7) reshape
dat = dat.reshape(self.shape)
# 7) final slicers for negative strides along stack dimensions
slicer = [slice(None)] * dim_feat_out + slicer_inv + [slice(None)] * dim_page_out
dat = dat[tuple(slicer)]
return dat
def _read_data_raw_full(self, tiffobj=None):
"""Read the full data from disk
Parameters
----------
tiffobj : TiffFile
Returns
-------
dat : np.ndarray
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._read_data_raw_full(tiffobj)
return tiffobj.asarray(series=self.series, level=self.level)
def _shape_split(self, tiffobj=None):
"""Split the shape into different components
Returns
-------
shape_feat : tuple[int]
Color features (belong to pages but end-up at the left-most axis)
shape_collection : tuple[int]
Shape of the collection of pages (usually Z, T, etc. axes)
shape_page : tuple[int]
Shape of one page -- with or without features (usually X, Y axes)
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._shape_split(tiffobj)
if tiffobj.is_imagej:
return self._shape_split_imagej(tiffobj)
else:
page = tiffobj.series[self.series].levels[self.level].pages[0]
shape_page = page.shape
page_dim = len(shape_page)
shape_collection = self._shape[:-page_dim]
return tuple(), tuple(shape_collection), tuple(shape_page)
def _shape_split_imagej(self, tiffobj):
"""Split the shape into different components (ImageJ format).
This is largely copied from tifffile.
"""
pages = tiffobj.pages
pages.useframes = True
pages.keyframe = 0
page = pages[0]
meta = tiffobj.imagej_metadata
def is_virtual():
# ImageJ virtual hyperstacks store all image metadata in the first
# page and image data are stored contiguously before the second
# page, if any
if not page.is_final:
return False
images = meta.get('images', 0)
if images <= 1:
return False
offset, count = page.is_contiguous
if (
count != pyutils.prod(page.shape) * page.bitspersample // 8
or offset + count * images > self.filehandle.size
):
raise ValueError()
# check that next page is stored after data
if len(pages) > 1 and offset + count * images > pages[1].offset:
return False
return True
isvirtual = is_virtual()
if isvirtual:
# no need to read other pages
pages = [page]
else:
pages = pages[:]
images = meta.get('images', len(pages))
frames = meta.get('frames', 1)
slices = meta.get('slices', 1)
channels = meta.get('channels', 1)
# compute shape of the collection of pages
shape = []
axes = []
if frames > 1:
shape.append(frames)
axes.append('T')
if slices > 1:
shape.append(slices)
axes.append('Z')
if channels > 1 and (pyutils.prod(shape) if shape else 1) != images:
shape.append(channels)
axes.append('C')
remain = images // (pyutils.prod(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
if page.axes[0] == 'S' and 'C' in axes:
# planar storage, S == C, saved by Bio-Formats
return tuple(), tuple(shape), tuple(page.shape[1:])
elif page.axes[0] == 'I':
# contiguous multiple images
return tuple(), tuple(shape), tuple(page.shape[1:])
elif page.axes[:2] == 'SI':
# color-mapped contiguous multiple images
return tuple(page.shape[0:1]), tuple(shape), tuple(page.shape[2:])
else:
return tuple(), tuple(shape), tuple(page.shape)
reader_classes.append(TiffArray)
|
{"hexsha": "bc78abf7f8e2a8c907ac5c5f3f0c8027acf794de", "size": 16271, "ext": "py", "lang": "Python", "max_stars_repo_path": "nitorch/io/tiff/array.py", "max_stars_repo_name": "wyli/nitorch", "max_stars_repo_head_hexsha": "3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-09T21:24:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-09T21:24:47.000Z", "max_issues_repo_path": "nitorch/io/tiff/array.py", "max_issues_repo_name": "wyli/nitorch", "max_issues_repo_head_hexsha": "3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nitorch/io/tiff/array.py", "max_forks_repo_name": "wyli/nitorch", "max_forks_repo_head_hexsha": "3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.295010846, "max_line_length": 89, "alphanum_fraction": 0.5543605187, "include": true, "reason": "import numpy", "num_tokens": 3876}
|
from __future__ import annotations
import warnings
from scvi.dataset.dataset import (
GeneExpressionDataset,
logger,
remap_categories,
CellMeasurement,
)
import numpy as np
import pandas as pd
import scipy.sparse as sp_sparse
import os
import torch
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
from multiprocessing import cpu_count, Lock
import re
from tqdm.auto import tqdm
import h5py
import loompy
from inspect import getfullargspec
from typing import Dict, Tuple, Callable, Union, Iterable, List
# this pattern will be used throughout the code to identify simply the scvi dataloader class
# without the need for the scvi.dataloader prefix when converted to a str.
# A bit unstable, especially if the str __rep__ of the dataloaders were to change, but that's a future
# github issue waiting to be solved then.
class_regex_pattern = r"((?<=[.])[A-Za-z_0-9]+(?='>))|((?<=class ')\w+(?='>))"
class UnionDataset(GeneExpressionDataset):
"""
The UnionDataset class aims to provide a fully scVI compatible dataset concatenation API with large data support.
Its 3 main features are:
- Concatenating scVI datasets, preserving cell type labels, batch indices (if so wished), local means and vars,
and mapping the datasets onto a common gene map.
- Building of a common gene map either by loading it from a csv file of column structure
(Genes, PositionalIndex) or by building it from: datasets to load, hdf5 datasets file, loom file.
- Supporting out of memory data loading for hdf5 and loom files too big to be loaded into memory. With such it
is possible to train on datasets worth multiple hundred gigabytes, albeit the speed of which doesn't convince
(Private dataloading benchmark: loom ~400 times slower, hdf5 ~800 times slower than memory).
"""
def __init__(
self,
save_path: str,
low_memory: bool = True,
ignore_batch_annotation: bool = True,
gene_map_load_filename: str = None,
gene_map_save_filename: str = None,
data_load_filename: str = None,
data_save_filename: str = None,
):
"""
Setting the most important features of the class on init. These settings can be overwritten after instantiation.
:param save_path: str, the path, in which all of the data to load and save is stored and will be stored.
:param low_memory: bool, if true the class will load metaloader for the data attribute ``X`` that load data
on demand out of memory.
:param ignore_batch_annotation: bool (optional), if true, all batch indices are reduced to 0.
:param gene_map_load_filename: str (optional), the file, from which to load the gene map
:param gene_map_save_filename: str (optional), the file, to which a potentially later built gene map would be
saved.
:param data_load_filename: str (optional), the file, from which data should be loaded (e.g. h5 file).
:param data_save_filename: str (optional), the file, to which concatenated data should be saved. Can be easily
changed later, on method call.
"""
super().__init__()
self.save_path = save_path
self.gene_map = None
self.gene_names = []
self.gene_names_len = 0
self.gene_map_load_filename = gene_map_load_filename
self.gene_map_save_filename = gene_map_save_filename
self.data_load_filename = data_load_filename
self.data_save_filename = data_save_filename
self.dataset_to_genemap_cache = None
self.index_to_dataset_map = []
self.dataset_to_index_map = dict()
self.low_memory = low_memory
self.ignore_batch_annotation = ignore_batch_annotation
self.load_gene_map()
if data_load_filename is not None:
_, ext = os.path.splitext(data_load_filename)
if low_memory:
self._set_attributes(data_load_filename)
if ext == ".h5":
self._cache_genemap()
else:
if ext == ".h5":
self._union_from_hdf5_to_memory(in_filename=data_load_filename)
elif ext == ".loom":
self._union_from_loom_to_memory(
in_filename=data_load_filename, as_sparse=True
)
#############################
# #
# Public Interface #
# #
#############################
def build_genemap(self, data_source: str, force_build: bool = False, **kwargs):
"""
Concatenate datasets the way determined in ``data_source`` and ``data_target``. Any combination of the elements
mentioned in ``data_source`` and ``data_target`` below are possible to be used.
Kwargs needs to fit the combination precisely.
For ``data_source``:
- 'memory' will use datasets already loaded into memory.
``kwargs``:
- ``gene_datasets``: list, a collection of scvi datasets, whose gene names are being included.
- 'hdf5' will load gene names from the datasets of an hdf5 file.
`kwargs``:
- ``in_filename``: str, the hdf5 file containing the datasets to use.
- 'loom' will load the gene names from a dataset in a loom file.
`kwargs``:
- ``in_filename``: str, the loom file containing the datasets to use.
- 'scvi' will load datasets using the scvi dataloaders and then take their gene names.
``kwargs`` are:
- ``dataset_classes``: list
- ``dataset_names``: list, strings of the names of the datasets.
- ``dataset_args``: list, list of lists of further keyword arguments to provide for each dataloader.
- ``multiprocess``: bool, load the datasets in parallel or serial.
- 'self' will simply map the gene names of the currently loaded dataset.
:param data_source: str, one of "memory", "hdf5", "loom", "self", or "scvi".
:param force_build: bool, if true, the gene map will be built regardless of an existing gene map load filename.
:param kwargs: the arguments for the chosen combination method.
:return: self
"""
if self.gene_map_load_filename is not None:
if not force_build:
logger.log("Gene map filename found. Loading it instead.")
self.load_gene_map()
return
else:
logger.log(
"Gene map filename found, but 'force_build=True'. Continuing to build gene_map."
)
if data_source not in ["memory", "hdf5", "loom", "self", "scvi"]:
raise ValueError(f"Parameter 'data_source={data_source}' not supported.")
gene_map = eval(f"self._build_genemap_from_{data_source}(**kwargs)")
self.gene_map = pd.Series(
data=list(gene_map.values()), index=list(gene_map.keys())
)
self.gene_names = self.gene_map.index.values
self.gene_names_len = len(self.gene_map)
if self.gene_map_save_filename:
self.gene_map.to_csv(
os.path.join(self.save_path, self.gene_map_save_filename + ".csv"),
header=False,
)
return self
def join_datasets(self, data_source: str, data_target: str, **kwargs):
"""
Concatenate datasets the way determined in ``data_source`` and ``data_target``. Any combination of the elements
mentioned in ``data_source`` and ``data_target`` below are possible to be used.
Kwargs needs to fit the combination precisely.
For ``data_source``:
- 'memory' will concatenate datasets already loaded into memory.
- 'hdf5' will load datasets from a hdf5 file.
- 'loom' will load the dataset from a loom file.
- 'scvi' will load datasets using the scvi dataloaders.
- 'self' will simply map the currently loaded dataset onto the gene map.
For ``data_target``:
- 'memory' will store the concatenated dataset into memory.
- 'hdf5' will save the concatenated dataset into an hdf5 file.
- 'loom' will save the concatenated dataset into a loom file.
The ``kwargs`` for the various combinations are:
-- ``memory`` to ``memory``:
- ``gene_datasets``: list, a collection of scvi datasets to concatenate.
-- ``memory`` to ``hdf5``:
- ``gene_datasets``: list, a collection of scvi datasets to concatenate.
- ``out_filename``: str, the filename of the hdf5 file to load. Warns if file extension is not '.h5'.
-- ``memory`` to ``loom``:
- ``gene_datasets``: list, a collection of scvi datasets to concatenate.
- ``out_filename``: str, the filename of the loom file to load. Wanrs if file extension is not '.loom'.
-- ``hdf5`` to ``memory``:
- ``in_filename``: str, the filename of the hdf5 file to load. File extension should be '.h5'.
-- ``hdf5`` to ``loom``:
- ``in_filename``: str, the filename of the hdf5 file to load. File extension should be '.h5'.
- ``out_filename``: str, the filename of the outgoing loom file. Warns if file extension is not '.loom'.
-- ``loom`` to ``memory``:
- ``in_filename``: str, the filename of the loom file to load. File extension should be '.loom'.
- ``as_sparse``: bool, if true, will load the data into sparse matrix (default).
-- ``loom`` to ``hdf5``:
- ``in_filename``: str, the filename of the loom file to load. File extension should be '.loom'.
- ``out_filename``: str, the filename of the outgoing hdf5 file. Warns if file extension is not '.h5'.
-- ``scvi`` to ``memory``:
- ``dataset_classes``: List, list of class-initializers of scvi GeneExpression datasets
- ``dataset_names``: List, list of names complementing the dataset_classes (needed for some classes)
- ``dataset_args``: List, list of further positional arguments for when loading the datasets
-- ``scvi`` to ``hdf5``:
- ``dataset_classes``: List, list of class-initializers of scvi GeneExpression datasets
- ``dataset_names``: List, list of names complementing the dataset_classes (needed for some classes)
- ``dataset_args``: List, list of further positional arguments for when loading the datasets
- ``out_filename``: str, the filename of the outgoing hdf5 file. Warns if file extension is not '.h5'.
-- ``scvi`` to ``loom``:
- ``dataset_classes``: List, list of class-initializers of scvi GeneExpression datasets
- ``dataset_names``: List, list of names complementing the dataset_classes (needed for some classes)
- ``dataset_args``: List, list of further positional arguments for when loading the datasets
- ``out_filename``: str, the filename of the outgoing loom file. Warns if file extension is not '.loom'.
-- ``self`` to ``hdf5`` or ``loom``:
- ``out_filename``: str, the filename of the outgoing loom or hdf5 file. Warnings for file extension.
:param data_source: str, one of "memory", "hdf5", "loom", "self", or "scvi".
:param data_target: str, one of "memory", "hdf5", or "loom"
:param kwargs: the arguments for the chosen combination method.
:return: self
"""
if data_source not in ["memory", "hdf5", "loom", "self", "scvi"]:
raise ValueError(f"Parameter 'data_source={data_source}' not supported.")
if data_target not in ["memory", "hdf5", "loom"]:
raise ValueError(f"Parameter 'data_target={data_target}' not supported.")
eval(f"self._union_from_{data_source}_to_{data_target}(**kwargs)")
return self
def map_data(
self,
data: Union[np.ndarray, sp_sparse.csr_matrix],
gene_names: np.ndarray = None,
mappable_genes_indices: np.ndarray = None,
col_indices: np.ndarray = None,
) -> Union[np.ndarray, sp_sparse.lil_matrix]:
"""
Maps single cell data gene wise onto a predefined gene map.
Can take numpy arrays or scipy sparse matrices (assumes sparse matrix if not numpy).
:param data: ndarray (#cells, #genes) or scipy sparse matrix, the data to map
:param gene_names: ndarray (#genes,), gene codes to use for the mapping
:param mappable_genes_indices: ndarray (optional), the column indices of the input array, which slices the data.
A pre computed list of indices of the gene codes that can be found in the mapping (saves computational time if
the same indices are mapped repeatedly and can therefore be reused).
:param col_indices: ndarray (optional), the column indices of the output array, to which the sliced data
is being mapped.
:return: ndarray (#cells, #genes in mapping), the sliced data in the correct format of the gene map.
"""
# check for already provided source data location indices
if mappable_genes_indices is not None:
mappable_genes_indices = mappable_genes_indices.flatten()
else:
mappable_genes_indices = np.isin(
np.char.upper(gene_names), self.gene_map.index
).flatten()
# check for already provided target data location indices
if col_indices is not None:
col_indices = col_indices
else:
mappable_genes = gene_names[mappable_genes_indices]
col_indices = self.gene_map[mappable_genes].values
# actual data mapping now
if isinstance(data, np.ndarray):
data_out = np.zeros((data.shape[0], self.gene_names_len), dtype=data.dtype)
try:
data_out[:, col_indices] = data[:, mappable_genes_indices]
except Exception as e:
p = 3
else:
data_out = sp_sparse.csr_matrix(
([], ([], [])), shape=(0, self.gene_names_len), dtype=data.dtype
)
nr_rows = 5000
for i in range(0, data.shape[0], nr_rows):
data_mapped = data[i : i + nr_rows, mappable_genes_indices].toarray()
temp_mapped = np.zeros(
(data_mapped.shape[0], self.gene_names_len), dtype=data.dtype
)
temp_mapped[:, col_indices] = data_mapped
data_out = sp_sparse.vstack(
[data_out, sp_sparse.csr_matrix(temp_mapped)]
)
return data_out
def set_gene_map_load_filename(self, filename: str = None) -> UnionDataset:
self.gene_map_load_filename = filename
return self
def load_gene_map(self):
"""
Load the gene map from the file given in gene_map_load_filename.
"""
if self.gene_map_load_filename is not None:
self.gene_map = pd.read_csv(
os.path.join(self.save_path, self.gene_map_load_filename + ".csv"),
header=0,
index_col=0,
).sort_index()
index = self.gene_map.index.astype(str).str.upper()
self.gene_map = pd.Series(range(len(self.gene_map)), index=index)
self.gene_names = self.gene_map.index.values
self.gene_names_len = len(self.gene_names)
return self
def set_gene_map_save_filename(self, filename: bool):
self.gene_map_save_filename = filename
return self
def set_memory_setting(self, low_memory: bool) -> UnionDataset:
"""
Set the memory setting of the union object. As a side effect it also sets the appropriate collate method to the
one of the datafile in question or to the standard (when the class handles data in memory).
"""
if low_memory:
self.low_memory = True
filename, ext = os.path.splitext(self.data_load_filename)
if ext == ".h5":
self.collate_fn_base = self._collate_fn_base_h5
elif ext == ".loom":
self.collate_fn_base = self._collate_fn_base_loom
else:
self.low_memory = False
self.collate_fn_base = super(UnionDataset, self).collate_fn_base
return self
def set_data_load_filename(self, filename: str) -> UnionDataset:
self.data_load_filename = filename
return self
def set_data_save_filename(self, filename: str) -> UnionDataset:
self.data_save_filename = filename
return self
def set_ignore_batch_annotation(self, ignore_batch_annotation: bool):
self.ignore_batch_annotation = ignore_batch_annotation
return self
#############################
# #
# Internal Logic #
# #
#############################
def _collate_fn_base_h5(
self, attributes_and_types: Dict, indices: Iterable
) -> Tuple[torch.Tensor]:
"""
Collate method specialization for loading the data from an hdf5 file.
Unlike for the loom collate method, the indices need to be first collected by associated dataset, so that h5py
can load all the chosen indices from each dataset at once, instead of lazily iterating over every index and
searching for the respective dataset anew every time.
"""
indices = np.asarray(indices)
indices.sort()
self._cache_genemap()
batch = defaultdict(list)
for attr, dtype in attributes_and_types.items():
elems = getattr(self, attr)[indices].astype(dtype)
batch[attr].append(np.asarray(elems).astype(dtype))
batch_out = []
for _, elems in batch.items():
batch_out.append(torch.from_numpy(np.vstack(elems)))
return tuple(batch_out)
def _collate_fn_base_loom(
self, attributes_and_types, indices
) -> Tuple[torch.Tensor]:
"""
Collate function specialization for loading the data from a loom file.
"""
indices = np.asarray(indices)
indices.sort()
batch = []
for attr, dtype in attributes_and_types.items():
elems = getattr(self, attr)[indices]
batch.append(torch.from_numpy(elems.astype(dtype)))
return tuple(batch)
def _set_attributes(self, data_filename: str = None):
"""
Set the attributes correctly after having concatenated datasets. This sets the data attribute ``X`` depending
on the datafile extension, i.e. either of the metaloaders for loom or hdf5, if needed.
"""
if data_filename is None:
data_filename = self.data_load_filename
else:
self.data_load_filename = data_filename
filepath = os.path.join(self.save_path, data_filename)
filename, ext = os.path.splitext(data_filename)
self.set_memory_setting(self.low_memory)
if ext == ".h5":
self._fill_index_map()
self._cache_genemap()
# get the info for all shapes of the datasets
self.batch_indices, n_batches = self._load_batch_indices_from_hdf5(
h5_filepath=filepath, attr_map=self.dataset_to_index_map
)
self.labels, self.cell_types = self._load_labels_from_hdf5(
h5_filepath=filepath, attr_map=self.dataset_to_index_map
)
self.local_means, self.local_vars = self._load_local_means_vars_from_hdf5(
h5_filepath=filepath, attr_map=self.dataset_to_index_map
)
self.X = DataLoaderh5(
"X",
parent=self,
h5_filepath=filepath,
attr_map=self.index_to_dataset_map,
)
with h5py.File(filepath, "r") as h5_file:
if not self.ignore_batch_annotation:
local_means = np.empty((0, 1))
local_vars = np.empty((0, 1))
for group_name, group in h5_file["Datasets"].items():
local_means = np.concatenate(
[local_means, group["local_means"][:]]
)
local_vars = np.concatenate(
[local_vars, group["local_vars"][:]]
)
self.local_means = local_means
self.local_vars = local_vars
else:
self.local_means = (
np.repeat(
h5_file["Metadata"]["local_mean_complete_dataset"][
:
].flatten(),
self.nb_cells,
)
.reshape(-1, 1)
.astype(np.float32)
)
self.local_vars = (
np.repeat(
h5_file["Metadata"]["local_var_complete_dataset"][
:
].flatten(),
self.nb_cells,
)
.reshape(-1, 1)
.astype(np.float32)
)
elif ext == ".loom":
with loompy.connect(filepath) as ds:
gene_names = ds.ra["Gene"]
if np.any(gene_names != self.gene_names):
raise ValueError("Chosen gene map and dataset genes are not equal.")
self.X = DataLoaderLoom(filepath)
self.batch_indices = ds.ca["BatchID"]
self.labels = ds.ca["ClusterID"]
self.cell_types = ds.attrs["CellTypes"]
if (self.labels > 0).all():
self.labels -= 1
self.cell_types = self.cell_types[self.cell_types != "undefined"]
if self.ignore_batch_annotation:
self.local_means = (
np.repeat(
ds.attrs["LocalMeanCompleteDataset"].flatten(),
self.nb_cells,
)
.reshape(-1, 1)
.astype(np.float32)
)
self.local_vars = (
np.repeat(
ds.attrs["LocalVarCompleteDataset"].flatten(), self.nb_cells
)
.reshape(-1, 1)
.astype(np.float32)
)
else:
self.local_means = ds.ca["LocalMeans"]
self.local_vars = ds.ca["LocalVars"]
self.name = ds.attrs["DatasetName"]
def _fill_index_map(self) -> None:
"""
If not already existing, create a mapping of each dataset to an associated index and its inverse.
This is needed to speed up later access. Its necessity is given by the fact that one wants a simple index
structure when accessing via indices, however an hdf5 file with multiple datasets would always first need to
know the dataset and then the dataset specific index to access any data.
In essence, we need a mapping of the kind
(Dataset_name: 10x_mouse_10k, index: 216) -> (index: 216)
(Dataset_name: 10x_mouse_10k, index: 429) -> (index: 429)
(Dataset_name: smartseq_m_3k, index: 216) -> (index: 10216)
(Dataset_name: smartseq_m_3k, index: 1659) -> (index: 11659)
and its inverse.
"""
if not self.index_to_dataset_map or not self.dataset_to_index_map:
with h5py.File(
os.path.join(self.save_path, self.data_load_filename), "r"
) as h5_file:
# Walk through all groups, extracting datasets
for group_name, group in h5_file["Datasets"].items():
shape = group["X"].shape
curr_index_len = len(self.index_to_dataset_map)
self.dataset_to_index_map[group_name] = [
curr_index_len + i for i in range(shape[0])
]
self.index_to_dataset_map.extend(
[(group_name, i) for i in range(shape[0])]
)
def _cache_genemap(self, force_redo: bool = False):
"""
Compute the mapping of the gene names of each dataset inside the hdf5 file. Useful for faster data mapping when
loading from file later in training.
:param force_redo: bool, if true the cache is recomputed (e.g. for genemap) change.
"""
ext = None
if self.data_load_filename is not None:
_, ext = os.path.splitext(self.data_load_filename)
conditions = (
self.low_memory
and ext in [".h5", ".loom"]
and (force_redo or not self.dataset_to_genemap_cache)
)
if conditions:
self.dataset_to_genemap_cache = dict()
with h5py.File(
os.path.join(self.save_path, self.data_load_filename), "r"
) as h5_file:
# Walk through all groups, extracting datasets
for group_name, group in h5_file["Datasets"].items():
gene_names = np.char.upper(group["gene_names"][:].astype(str))
mappable_genes_indices = np.isin(gene_names, self.gene_map.index)
mappable_genes = gene_names[mappable_genes_indices]
col_indices = self.gene_map[mappable_genes].values
col_indices.sort()
self.dataset_to_genemap_cache[group_name] = (
col_indices,
mappable_genes_indices.flatten(),
)
def _load_dataset(
self, ds_class, ds_args,
):
"""
Helper method to load datasets from the specified scvi class, name and further arguments.
:param ds_class: object, the scvi dataloader provided as callable object.
:param ds_args: list, further kwargs for the dataloader.
:return: tuple, 1 - the dataset; 2 - the dataset class object; 3 - the dataset name
"""
class_init_args = getfullargspec(ds_class.__init__).args
if "save_path" in class_init_args:
ds_args.update({"save_path": self.save_path})
dataset = ds_class(**ds_args)
return dataset, ds_class, dataset.name
def _build_genemap_from_self(self):
return {
gene: pos
for (gene, pos) in zip(sorted(self.gene_names), range(len(self.gene_names)))
}
def _build_genemap_from_scvi(
self,
dataset_classes: List[GeneExpressionDataset],
dataset_names: List[str],
dataset_args: List[any] = None,
multiprocess: bool = True,
):
"""
Build the gene map by loading the datasets specified in the parameters through their respective dataloaders.
:param dataset_classes: list
:param dataset_names: list, strings of the names of the datasets.
:param dataset_args: list, list of lists of further keyword arguments to provide for each specific dataloader.
:param multiprocess: bool, load the datasets in parallel or serial.
:return: dict, the gene map as dictionary with the genes as keys and their positional number as value.
"""
if dataset_args is None:
dataset_args = [{}] * len(dataset_names)
total_genes = set()
def append_genes(dset):
nonlocal total_genes
if dset.gene_names is None:
# without gene names we can't build a proper mapping
warnings.warn(
f"Dataset {(ds_class, ds_name)} does not have gene_names as attribute. Skipping this dataset."
)
return
total_genes = total_genes.union(dataset.gene_names)
if not multiprocess:
for ds_name, ds_class, ds_args in zip(
dataset_names, dataset_classes, dataset_args
):
dataset, _, _ = self._load_dataset(ds_name, ds_class, ds_args)
append_genes(dataset)
else:
with ProcessPoolExecutor(
max_workers=min(len(dataset_names), cpu_count() // 2)
) as executor:
futures = list(
(
executor.submit(self._load_dataset, ds_name, ds_class, ds_args)
for ds_name, ds_class, ds_args in zip(
dataset_names, dataset_classes, dataset_args
)
)
)
for future in as_completed(futures):
dataset, ds_class, ds_name = future.result()
append_genes(dataset)
return {
gene: pos
for (gene, pos) in zip(sorted(total_genes), range(len(total_genes)))
}
def _build_genemap_from_hdf5(
self,
in_filename: str = None,
subselection_datasets: Union[List[str], np.ndarray] = None,
):
"""
Build the gene map by loading the gene names from a dataset inside an hdf5 file.
:param in_filename: str, the name of the hdf5 file.
:param subselection_datasets: list or ndarray, if provided, a list of dataset names that are to be considered.
:return: dict, the gene map as dictionary with the genes as keys and their positional number as value.
"""
if in_filename is None:
if self.data_load_filename is not None:
in_filename = self.data_load_filename
else:
raise ValueError("No filename to read from provided.")
total_genes = set()
with h5py.File(os.path.join(self.save_path, in_filename), "r") as h5file:
dataset_group = h5file["Datasets"]
for dataset_name, dataset_acc in dataset_group.items():
if (
subselection_datasets is not None
and dataset_name in subselection_datasets
):
total_genes = total_genes.union(
dataset_acc["gene_names"][:].astype(str)
)
return {
gene: pos
for (gene, pos) in zip(sorted(total_genes), range(len(total_genes)))
}
def _build_genemap_from_loom(
self, in_filename: str = None, gene_names_attribute_name: str = "Gene"
):
"""
Build the gene map by loading the gene names from a dataset inside a loom file.
:param in_filename: str, the name of the loom file.
:param gene_names_attribute_name: str, the accessor name of the attribute storing the gene names.
:return: dict, the gene map as dictionary with the genes as keys and their positional number as value.
"""
if in_filename is None and self.data_load_filename is not None:
in_filename = self.data_load_filename
else:
raise ValueError("No filename to read from provided.")
total_genes = set()
with loompy.connect(os.path.join(self.save_path, in_filename)) as ds:
for row_attribute_name in ds.ra:
if row_attribute_name == gene_names_attribute_name:
gene_names = np.char.upper(
ds.ra[gene_names_attribute_name].astype(str)
)
total_genes = total_genes.union(gene_names)
return {
gene: pos
for (gene, pos) in zip(sorted(total_genes), range(len(total_genes)))
}
@staticmethod
def _build_genemap_from_memory(gene_datasets: List[GeneExpressionDataset]):
"""
Build the gene map from datasets already loaded into memory.
:param gene_datasets: list, all datasets, that are meant to be used for the gene map.
:return: dict, the gene map as dictionary with the genes as keys and their positional number as value.
"""
total_genes = set()
for dataset in gene_datasets:
gene_names = np.char.upper(dataset.gene_names.astype(str))
total_genes = total_genes.union(gene_names)
return {
gene: pos
for (gene, pos) in zip(sorted(total_genes), range(len(total_genes)))
}
@staticmethod
def _dataset_class_str(dataset_class):
return re.search(class_regex_pattern, str(dataset_class)).group()
def _write_dataset_to_hdf5(
self, out_filename, dataset, dataset_class, dataset_name
):
"""
Method to append a dataset onto a previously created hdf5 file.
:param out_filename: str, the filename of the hdf5 file.
:param dataset: GeneExpressionDataset, the dataset to write into the hdf5 file.
:param dataset_class: object, the dataloader class this dataset is from
:param dataset_name: str, the filename that characterizes the data (e.g. the data name from 10x datasets).
"""
string_dt = h5py.special_dtype(vlen=str)
with h5py.File(os.path.join(self.save_path, out_filename), "a") as h5file:
data_group = h5file["Datasets"]
dataset_class_str = self._dataset_class_str(dataset_class)
# grab the necessary data parts:
# aside from the data itself (X), the gene_names, local means, local_vars, batch_indices and labels
# there are no guaranteed attributes of each dataset. Thus for now these will be the ones we
# work with
X = dataset.X
gene_names = dataset.gene_names
local_means = dataset.local_means
local_vars = dataset.local_vars
batch_indices = dataset.batch_indices
labels = dataset.labels
# Build the group for the dataset, under which the data is going to be stored
# We will store the above mentioned data in the following scheme:
# -- Datasets
# ---- 1st Dataset CLASS and NAME
# ------ X
# ------ gene_names
# ------ local_means
# ------ local_vars
# ------ batch_indices
# ------ labels
# ------ (cell_types)
# ---- 2nd Dataset CLASS and NAME
# ------ ...
# -- Metadata
# ---- metadata1
# ---- metadata2
# ---- ...
group_name = f"{dataset_class_str}_{dataset_name}"
if group_name in data_group:
i = 0
while True:
if f"{group_name}_{i}" not in data_group:
logger.warning(
f"Dataset group name {group_name} already exists. "
f"Appending suffix '_{i}'."
)
group_name = f"{group_name}_{i}"
break
i += 1
dataset_h5_g = data_group.create_group(group_name)
if isinstance(X, (sp_sparse.csr_matrix, sp_sparse.csc_matrix)):
dset = dataset_h5_g.create_dataset(
"X",
shape=(X.shape[0], len(dataset.gene_names)),
compression="lzf",
dtype=np.float32,
)
nr_rows = 5000
for start in tqdm(
range(0, len(dataset), nr_rows),
desc="Writing sparse matrix iteratively to file",
):
sl = slice(start, min(start + nr_rows, X.shape[0]))
dset[sl, :] = X[sl, :].toarray().astype(np.float32)
else:
dataset_h5_g.create_dataset("X", data=X)
dataset_h5_g.create_dataset(
"gene_names", data=gene_names.astype(np.dtype("S")), dtype=string_dt
)
dataset_h5_g.create_dataset("local_means", data=local_means)
dataset_h5_g.create_dataset("local_vars", data=local_vars)
dataset_h5_g.create_dataset("batch_indices", data=batch_indices)
dataset_h5_g.create_dataset("labels", data=labels)
if hasattr(dataset, "cell_types"):
cell_types = dataset.cell_types
dataset_h5_g.create_dataset(
"cell_types", data=cell_types.astype(np.dtype("S")), dtype=string_dt
)
return
def _write_dataset_to_loom(
self, dataset_ptr, dataset, dataset_name=None, dataset_class=None
):
"""
Method to write a dataset onto an opened loom file.
:param dataset_ptr: loom file pointer, the reference to the loom file, onto which the dataset should be written.
:param dataset: GeneExpressionDataset, the dataset to write.
:param dataset_name: str, the name of the current dataset to append to the total name
:param dataset_class: object, the class identifier, that signifies which dataloader this dataset came from.
"""
if dataset_name is None:
dataset_class_str = self._dataset_class_str(dataset_class)
dataset_ptr.attrs.DatasetName += f"_{dataset_class_str}"
else:
dataset_ptr.attrs.DatasetName += f"_{dataset_name}"
# grab the necessary data parts:
# aside from the data itself (X), the gene_names, local means, local_vars, batch_indices and labels
# there are no guaranteed attributes of each dataset.
X, gene_names, batch_indices, labels, local_means, local_vars = (
dataset.X,
dataset.gene_names,
dataset.batch_indices,
dataset.labels,
dataset.local_means,
dataset.local_means,
)
if not all(dataset.cell_types == "undefined"):
known_cts = [ct for ct in dataset_ptr.attrs.CellTypes]
cts = dataset.cell_types
labels = cts[labels]
# append cell type only if unknown
for ct in cts:
if ct not in known_cts:
known_cts.append(ct)
# remap from ["endothelial_cell", "B_cell", "B_cell", ...] to [3, 5, 5, ...]
for cat_from, cat_to in zip(cts, [known_cts.index(ct) for ct in cts]):
labels[labels == cat_from] = cat_to
labels = labels.astype(np.uint16)
dataset_ptr.attrs.CellTypes = known_cts
if "BatchID" in dataset_ptr.col_attrs:
max_batch_idx = dataset_ptr.ca["BatchID"].max() + 1
batch_indices = batch_indices + max_batch_idx
if isinstance(X, sp_sparse.csc_matrix):
X = X.tocsr()
if isinstance(X, sp_sparse.csr_matrix):
nr_rows = 5000
mappable_genes_indices = np.isin(
np.char.upper(gene_names), self.gene_map.index
)
mappable_genes = gene_names[mappable_genes_indices]
col_indices = self.gene_map[mappable_genes].values
for start in range(0, len(dataset), nr_rows):
select = slice(start, min(start + nr_rows, X.shape[0]))
X_batch = (
self.map_data(
data=X[select, :].toarray(),
col_indices=col_indices,
mappable_genes_indices=mappable_genes_indices,
)
.astype(np.int32)
.transpose()
)
dataset_ptr.add_columns(
X_batch,
col_attrs={
"ClusterID": labels[select],
"BatchID": batch_indices[select],
"LocalMeans": local_means[select],
"LocalVars": local_vars[select],
},
row_attrs={"Gene": self.gene_names},
)
else:
dataset_ptr.add_columns(
self.map_data(data=X, gene_names=gene_names).transpose(),
col_attrs={
"ClusterID": labels,
"BatchID": batch_indices,
"LocalMeans": local_means,
"LocalVars": local_vars,
},
row_attrs={"Gene": self.gene_names},
)
return
def _union_from_memory_to_hdf5(
self, gene_datasets: List[GeneExpressionDataset], out_filename=None
):
"""
Combines multiple unlabelled gene_datasets based on a mapping of gene names. Stores the final
dataset onto a hdf5 file with filename ``out_filename``.
:param out_filename: str, name of the file to which to write.
:param gene_datasets: List, list of already loaded datasets of class ``GeneExpressionDataset``.
"""
if out_filename is None:
if self.data_save_filename is not None:
out_filename = self.data_save_filename
else:
raise ValueError("No filename to write to provided.")
filename, ext = os.path.splitext(out_filename)
if ext != ".h5":
logger.warn(
f"Chosen file type is 'hdf5'. Yet provided ending is: '{ext}' versus expected ending: '.h5'."
)
with h5py.File(os.path.join(self.save_path, out_filename), "w") as h5file:
# just opening the file to overwrite any existing content and enabling sub-function to simply append
h5file.create_group("Datasets")
h5file.create_group("Metadata")
counts = []
datasets_pbar = tqdm(gene_datasets)
for dataset in datasets_pbar:
dataset_class_str = self._dataset_class_str(type(dataset))
datasets_pbar.set_description(
f"Writing dataset {dataset_class_str, dataset.name} to h5 file"
)
self._write_dataset_to_hdf5(
out_filename, dataset, type(dataset), dataset.name
)
counts.append(np.array(dataset.X.sum(axis=1)).flatten())
log_counts = np.concatenate(np.log(counts))
total_lm = np.mean(log_counts).reshape(-1, 1).astype(np.float32)
total_lv = np.var(log_counts).reshape(-1, 1).astype(np.float32)
with h5py.File(os.path.join(self.save_path, out_filename), "a") as h5file:
g = h5file["Metadata"]
g.create_dataset("local_mean_complete_dataset", data=total_lm)
g.create_dataset("local_var_complete_dataset", data=total_lv)
self._set_attributes(out_filename)
return
def _union_from_memory_to_loom(
self, gene_datasets: List[GeneExpressionDataset], out_filename=None
):
"""
Combines multiple unlabelled gene_datasets based on a mapping of gene names. Stores the final
dataset onto a loom file with filename ``out_filename``.
:param out_filename: str, name of the file to which to write.
:param gene_datasets: List, list of already loaded datasets of class ``GeneExpressionDataset``.
"""
if out_filename is None:
if self.data_save_filename is not None:
out_filename = self.data_save_filename
else:
raise ValueError("No filename to write to provided.")
filename, ext = os.path.splitext(out_filename)
if ext != ".loom":
logger.warn(
f"Chosen file type is 'loom'. Yet provided ending is: '{ext}' versus expected ending: '.loom'."
)
file = os.path.join(self.save_path, out_filename)
counts = []
with loompy.new(file) as dsout:
dsout.attrs.CellTypes = ["undefined"]
dsout.attrs.DatasetName = ""
datasets_pbar = tqdm(gene_datasets)
for dataset in datasets_pbar:
dataset_class_str = self._dataset_class_str(type(dataset))
datasets_pbar.set_description(
f"Writing dataset '{dataset_class_str} - {dataset.name}' to loom file"
)
self._write_dataset_to_loom(dsout, dataset, type(dataset))
counts.append(np.array(dataset.X.sum(axis=1)).flatten())
cts = dsout.attrs.CellTypes[:]
labels = dsout.ca["ClusterID"][:]
if (labels > 0).all():
labels = cts[labels]
# there are no undefined cell types in the data. Removing this label.
cts = np.sort(cts[cts != "undefined"])
else:
labels = cts[labels]
cts.sort()
labels, _ = remap_categories(original_categories=labels, mapping_from=cts)
dsout.attrs.CellTypes = cts
dsout.ca["ClusterID"] = labels
log_counts = np.log(np.concatenate(counts))
total_lm = np.mean(log_counts).reshape(-1, 1).astype(np.float32)
total_lv = np.var(log_counts).reshape(-1, 1).astype(np.float32)
dsout.attrs.LocalMeanCompleteDataset = total_lm
dsout.attrs.LocalVarCompleteDataset = total_lv
self._set_attributes(out_filename)
return
def _union_from_memory_to_memory(
self, gene_datasets: List[GeneExpressionDataset], shared_batches=False
):
"""
Combines multiple unlabelled gene_datasets based on a mapping of gene names. Loads the final
dataset directly into memory.
:param gene_datasets: List, the loaded data sets of (inherited) class GeneExpressionDataset to concatenate.
"""
containers = defaultdict(list)
n_batch_offset = 0
for dataset in tqdm(gene_datasets, desc="Concatenating datasets"):
n_batch_offset = self._extract_content(
dataset, containers, n_batch_offset, shared_batches
)
labels = np.concatenate(containers["labels"])
containers["local_means"] = np.concatenate(containers["local_means"])
containers["local_vars"] = np.concatenate(containers["local_vars"])
containers["cell_types"] = np.sort(np.unique(labels))
containers["labels"], _ = remap_categories(labels, containers["cell_types"])
containers["X"] = sp_sparse.vstack(
containers["X"]
) # done before the populate data file, in order to release unstacked memory of X
self.populate_from_data(**containers, gene_names=self.gene_names)
logger.info(
f"Joined {len(gene_datasets)} datasets to one of shape {self.nb_cells} x {self.gene_names_len}."
)
return
def _union_from_self_to_hdf5(self, out_filename):
"""
Simplifier for when one wants to write the loaded data to a hdf5 file.
:param out_filename: str, the filename of the hdf5 file to write.
"""
self._union_from_memory_to_hdf5(out_filename=out_filename, gene_datasets=[self])
return
def _union_from_self_to_loom(self, out_filename):
"""
Simplifier for when one wants to write the loaded data to a loom file.
:param out_filename: str, the filename of the loom file to write.
"""
self._union_from_memory_to_loom(out_filename=out_filename, gene_datasets=[self])
return
def _union_from_scvi_to_memory(
self, dataset_classes=None, dataset_args=None, shared_batches=False
):
"""
Loads scvi gene_datasets from the specified dataloaders and combines them based on a mapping of gene names.
Loads the concatenated dataset into memory only.
:param dataset_classes: List, list of class-initializers of scvi GeneExpression datasets
:param dataset_args: List, list of further positional arguments for when loading the datasets
:param shared_batches: bool, whether the batch_indices are shared or not for the datasets
"""
containers = defaultdict(list)
n_batch_offset = 0
if dataset_args is None:
dataset_args = [{}] * len(dataset_classes)
_LOCK = Lock()
with ThreadPoolExecutor() as executor:
futures = list(
(
executor.submit(self._load_dataset, ds_class, ds_args)
for ds_class, ds_args in zip(dataset_classes, dataset_args)
)
)
for future in as_completed(futures):
###################
_LOCK.acquire()
dataset = future.result()[0]
n_batch_offset = self._extract_content(
dataset, containers, n_batch_offset, shared_batches
)
_LOCK.release()
###################
labels = np.concatenate(containers["labels"])
containers["local_means"] = np.concatenate(containers["local_means"])
containers["local_vars"] = np.concatenate(containers["local_vars"])
containers["cell_types"] = np.sort(np.unique(labels))
containers["labels"], _ = remap_categories(labels, containers["cell_types"])
containers["X"] = sp_sparse.vstack(
containers["X"]
) # done before the populate data file, in order to release unstacked memory of X
self.populate_from_data(**containers, gene_names=self.gene_names)
def _union_from_scvi_to_hdf5(
self, dataset_classes, dataset_args=None, out_filename=None
):
"""
Combines multiple unlabelled gene_datasets based on a mapping of gene names. Stores the final
dataset onto a Hdf5 file with filename ``out_filename``.
:param dataset_classes: List, list of class-initializers of scvi GeneExpression datasets
:param dataset_args: List, list of further positional arguments for when loading the datasets
:param out_filename: str, name of the file to which to write.
"""
if out_filename is None:
if self.data_save_filename is not None:
out_filename = self.data_save_filename
else:
raise ValueError("No filename to write to provided.")
self._check_extension(out_filename, ".h5")
with h5py.File(os.path.join(self.save_path, out_filename), "w") as h5file:
# just opening the file to overwrite any existing content and enabling sub-function to simply append
h5file.create_group("Datasets")
h5file.create_group("Metadata")
pass
if dataset_args is None:
dataset_args = [{}] * len(dataset_classes)
lock = Lock()
with ThreadPoolExecutor() as executor:
futures = list(
(
executor.submit(self._load_dataset, ds_class, ds_args)
for ds_class, ds_args in zip(dataset_classes, dataset_args)
)
)
for future in as_completed(futures):
res = future.result()
dataset, dataset_class, dataset_fname = res
lock.acquire()
self._write_dataset_to_hdf5(
out_filename, dataset, dataset_class, dataset_fname
)
lock.release()
print(f"conversion completed to file '{out_filename}'")
self._set_attributes(out_filename)
def _union_from_scvi_to_loom(
self, dataset_classes, dataset_args=None, out_filename=None
):
"""
Combines multiple unlabelled gene_datasets based on a mapping of gene names. Stores the final
dataset onto a loom file with filename ``out_filename``.
:param dataset_classes: List, list of class-initializers of scvi GeneExpression datasets
:param dataset_args: List, list of further positional arguments for when loading the datasets
:param out_filename: str, name of the file to which to write.
"""
if out_filename is None:
if self.data_save_filename is not None:
out_filename = self.data_save_filename
else:
raise ValueError("No filename to write to provided.")
self._check_extension(out_filename, ".loom")
with loompy.new(os.path.join(self.save_path, out_filename)) as dsout:
dsout.attrs.CellTypes = ["undefined"]
dsout.attrs.DatasetName = ""
if dataset_args is None:
dataset_args = [{}] * len(dataset_classes)
counts = []
lock = Lock()
with ThreadPoolExecutor() as executor:
futures = list(
(
executor.submit(self._load_dataset, ds_class, ds_args)
for ds_class, ds_args in zip(dataset_classes, dataset_args)
)
)
futures_pbar = tqdm(as_completed(futures))
for future in futures_pbar:
lock.acquire()
res = future.result()
dataset, dataset_class, dataset_fname = res
dataset_class_str = self._dataset_class_str(type(dataset))
futures_pbar.set_description(
f"Writing dataset '{dataset_class_str} - {dataset.name}' to loom file"
)
self._write_dataset_to_loom(dsout, dataset, type(dataset))
counts.append(np.array(dataset.X.sum(axis=1)).flatten())
lock.release()
log_counts = np.log(np.concatenate(counts))
total_lm = np.mean(log_counts).reshape(-1, 1).astype(np.float32)
total_lv = np.var(log_counts).reshape(-1, 1).astype(np.float32)
dsout.attrs.LocalMeanCompleteDataset = total_lm
dsout.attrs.LocalVarCompleteDataset = total_lv
cts = dsout.attrs.CellTypes[:]
labels = dsout.ca["ClusterID"][:]
if (labels > 0).all():
labels = cts[labels]
# there are no undefined cell types in the data. Removing this label.
cts = np.sort(cts[cts != "undefined"])
else:
labels = cts[labels]
cts.sort()
labels, _ = remap_categories(original_categories=labels, mapping_from=cts)
dsout.attrs.CellTypes = cts
dsout.ca["ClusterID"] = labels
print(f"conversion completed to file '{out_filename}'")
self._set_attributes(out_filename)
def _union_from_hdf5_to_memory(
self,
in_filename=None,
datasets_subselection: List[str] = None,
shared_batches=False,
):
"""
Concatenate all datasets in an hdf5 file to memory.
:param in_filename: str, the filename of the hdf5 file to read.
"""
if in_filename is None:
if self.data_load_filename is not None:
in_filename = self.data_load_filename
else:
raise ValueError("No filename to read from provided.")
subselection_check = lambda dset_name: dset_name in datasets_subselection
if datasets_subselection is None:
subselection_check = lambda dset_name: True
containers = defaultdict(list)
n_batch_offset = 0
with h5py.File(os.path.join(self.save_path, in_filename), "r") as h5file:
for group_name, group in h5file["Datasets"].items():
if subselection_check(group_name):
containers["X"].append(
sp_sparse.csr_matrix(
self.map_data(
group["X"][:],
np.char.upper(group["gene_names"][:].astype(str)),
)
)
)
containers["local_means"].append(group["local_means"][:])
containers["local_vars"].append(group["local_vars"][:])
bis = group["batch_indices"][:]
if not shared_batches:
bis += n_batch_offset
n_batch_offset += bis.max() + 1
containers["batch_indices"].append(bis)
if "cell_types" in group:
containers["labels"].append(
group["cell_types"][:][group["labels"][:]]
)
else:
containers["labels"].append(np.repeat("undefined", len(bis)))
labels = np.concatenate(containers["labels"])
containers["local_means"] = np.concatenate(containers["local_means"])
containers["local_vars"] = np.concatenate(containers["local_vars"])
containers["cell_types"] = np.sort(np.unique(labels))
containers["labels"], _ = remap_categories(labels, containers["cell_types"])
containers["X"] = sp_sparse.vstack(containers["X"])
self.populate_from_data(**containers, gene_names=self.gene_names)
return
def _union_from_hdf5_to_loom(
self,
in_filename=None,
out_filename=None,
datasets_subselection: List[str] = None,
shared_batches=False,
):
"""
Concatenate all datasets mentioned in `` datasets_subselection`` in an hdf5 file to a loom file.
:param in_filename: str, the filename of the hdf5 file to read.
:param out_filename: str, the filename of the loom file to write to.
:param datasets_subselection: list, a list of dataset names which are to be considered for concatenation.
:param shared_batches: bool, if true all batch annotation is considered to be from the same experiment.
Otherwise each new dataset will see its annotation data consecutively increased to create distinction.
"""
if in_filename is None:
if self.data_load_filename is not None:
in_filename = self.data_load_filename
else:
raise ValueError("No filename to read from provided.")
if out_filename is None:
if self.data_save_filename is not None:
out_filename = self.data_save_filename
else:
raise ValueError("No filename to write to provided.")
subselection_check: Callable[
[str], bool
] = lambda dset_name: dset_name in datasets_subselection
if datasets_subselection is None:
subselection_check = lambda dset_name: True
labels = []
cell_types = {"undefined"}
with h5py.File(
os.path.join(self.save_path, in_filename), "r"
) as h5file, loompy.new(os.path.join(self.save_path, out_filename)) as loomfile:
loomfile.attrs.CellTypes = []
loomfile.attrs.DatasetName = ""
dataset_group = h5file["Datasets"]
for group_name, group in dataset_group.items():
if subselection_check(group_name):
if "cell_types" in group:
cts = group["cell_types"][:]
labels.append(cts[group["labels"][:]])
cell_types = cell_types.union(cts)
else:
labels.append(np.repeat("undefined", group["X"].shape[0]))
labels = np.concatenate(labels)
if (labels != "undefined").all():
cell_types.remove("undefined")
cell_types = sorted(cell_types)
labels, _ = remap_categories(labels, mapping_from=cell_types)
start_idx = 0
n_batch_offset = 0
for group_name, group in dataset_group.items():
if subselection_check(group_name):
X = sp_sparse.csr_matrix(
self.map_data(
group["X"][:],
np.char.upper(group["gene_names"][:].astype(str)),
)
)
lbs = labels[start_idx : start_idx + X.shape[0]]
local_means = group["local_means"][:]
local_vars = group["local_vars"][:]
batch_indices = group["batch_indices"][:]
if not shared_batches:
batch_indices += n_batch_offset
n_batch_offset = batch_indices.max() + 1
start_idx = start_idx + X.shape[0]
self.populate_from_data(
X=X,
batch_indices=batch_indices,
labels=lbs,
gene_names=self.gene_names,
cell_types=cell_types,
local_means=local_means,
local_vars=local_vars,
)
self._write_dataset_to_loom(loomfile, self, dataset_name=group_name)
self._set_attributes(in_filename)
return
def _union_from_loom_to_memory(
self,
in_filename=None,
as_sparse=True,
gene_names_attribute_name="Gene",
batch_indices_attribute_name="BatchID",
local_means_attribute_name="LocalMeans",
local_vars_attribute_name="LocalVars",
labels_attribute_name="ClusterID",
cell_types_attribute_name="CellTypes",
total_local_mean_attribute_name="LocalMeanCompleteDataset",
total_local_var_attribute_name="LocalVarCompleteDataset",
dataset_name_attribute_name="DatasetName",
):
"""
Loads a loom file completely into memory. If ``as_sparse`` is true, the data will be loaded into a scipy sparse
matrix, otherwise a dense numpy array.
The loading code has been taken mostly from the loom dataloader class in ``loom.py``.
:param in_filename: str, filename of the loom file.
:param as_sparse: bool, flag whether to use sparse matrices or numpy arrays.
:param gene_names_attribute_name: str, the identifier for the gene names attribute within the loom file.
:param batch_indices_attribute_name: str, the identifier for the batch indices attribute within the loom file.
:param local_means_attribute_name: str, the identifier for the local means attribute within the loom file.
:param local_vars_attribute_name: str, the identifier for the local vars attribute within the loom file.
:param labels_attribute_name: str, the identifier for the label attribute within the loom file.
:param cell_types_attribute_name: str, the identifier for the cell types attribute within the loom file.
:param total_local_mean_attribute_name: str, the identifier for the attribute within the loom file. This is the
attribute, which stores the local_mean of each data entry, if all of the data were to be seen as from a single
batch. Needed for when batch annotation is ignored.
:param total_local_var_attribute_name: str, the identifier for the local var attribute within the loom file, if
the dataset ignores batch indices (see @total_local_mean).
:param dataset_name_attribute_name: str, the identifier for the dataset name attribute within the loom file.
"""
if in_filename is None:
if self.data_load_filename is not None:
in_filename = self.data_load_filename
else:
raise ValueError("No filename to read from provided.")
batch_indices = None
labels = None
cell_types = None
gene_names = None
local_means = None
local_vars = None
name = None
with loompy.connect(os.path.join(self.save_path, in_filename)) as ds:
for row_attribute_name in ds.ra:
if row_attribute_name == gene_names_attribute_name:
gene_names = np.char.upper(
ds.ra[gene_names_attribute_name].astype(str)
)
else:
gene_attributes_dict = (
gene_attributes_dict if gene_attributes_dict is not None else {}
)
gene_attributes_dict[row_attribute_name] = ds.ra[row_attribute_name]
for column_attribute_name in ds.ca:
if column_attribute_name == batch_indices_attribute_name:
batch_indices = ds.ca[batch_indices_attribute_name][:].astype(int)
elif column_attribute_name == labels_attribute_name:
labels = ds.ca[labels_attribute_name][:].astype(int)
elif column_attribute_name == local_means_attribute_name:
if not self.ignore_batch_annotation:
local_means = ds.ca[local_means_attribute_name]
elif column_attribute_name == local_vars_attribute_name:
if not self.ignore_batch_annotation:
local_vars = ds.ca[local_vars_attribute_name]
else:
cell_attributes_dict = (
cell_attributes_dict if cell_attributes_dict is not None else {}
)
cell_attributes_dict[column_attribute_name] = ds.ca[
column_attribute_name
][:]
global_attributes_dict = None
for global_attribute_name in ds.attrs:
if global_attribute_name == cell_types_attribute_name:
cell_types = ds.attrs[cell_types_attribute_name].astype(str)
elif global_attribute_name == total_local_mean_attribute_name:
if self.ignore_batch_annotation:
local_means = ds.attrs[total_local_mean_attribute_name]
elif global_attribute_name == total_local_var_attribute_name:
if self.ignore_batch_annotation:
local_vars = ds.attrs[total_local_var_attribute_name]
elif global_attribute_name == dataset_name_attribute_name:
name = ds.attrs[dataset_name_attribute_name].astype(str)
else:
global_attributes_dict = (
global_attributes_dict
if global_attributes_dict is not None
else {}
)
global_attributes_dict[global_attribute_name] = ds.attrs[
global_attribute_name
]
if global_attributes_dict is not None:
self.global_attributes_dict = global_attributes_dict
if as_sparse:
shape = ds.shape
nr_rows = 5000
X = sp_sparse.csr_matrix(
([], ([], [])), shape=(0, shape[0]), dtype=np.float32
)
for i in tqdm(
range(0, shape[1], nr_rows),
desc="Loading from file to memory iteratively",
):
X = sp_sparse.vstack(
[X, sp_sparse.csr_matrix(ds[:, i : i + nr_rows].T)]
)
else:
X = ds[:, :].T
self.populate_from_data(
X=X,
gene_names=gene_names,
batch_indices=batch_indices,
cell_types=cell_types,
labels=labels,
local_means=np.repeat(local_means, X.shape[0], axis=0),
local_vars=np.repeat(local_vars, X.shape[0], axis=0),
name=name,
)
return
def _union_from_loom_to_hdf5(
self,
in_filename=None,
out_filename=None,
as_sparse=True,
gene_names_attribute_name="Gene",
batch_indices_attribute_name="BatchID",
local_means_attribute_name="LocalMeans",
local_vars_attribute_name="LocalVars",
labels_attribute_name="ClusterID",
cell_types_attribute_name="CellTypes",
total_local_mean_attribute_name="LocalMeanCompleteDataset",
total_local_var_attribute_name="LocalVarCompleteDataset",
dataset_name_attribute_name="DatasetName",
):
"""
Loads a loom file completely into memory. If ``as_sparse`` is true, the data will be loaded into a scipy sparse
matrix, otherwise a dense numpy array.
The loading code has been taken mostly from the loom dataloader class in ``loom.py``.
:param in_filename: str, filename of the loom file.
:param as_sparse: bool, flag whether to use sparse matrices or numpy arrays.
:param gene_names_attribute_name: str, the identifier for the gene names attribute within the loom file.
:param batch_indices_attribute_name: str, the identifier for the batch indices attribute within the loom file.
:param local_means_attribute_name: str, the identifier for the local means attribute within the loom file.
:param local_vars_attribute_name: str, the identifier for the local vars attribute within the loom file.
:param labels_attribute_name: str, the identifier for the label attribute within the loom file.
:param cell_types_attribute_name: str, the identifier for the cell types attribute within the loom file.
:param total_local_mean_attribute_name: str, the identifier for the attribute within the loom file. This is the
attribute, which stores the local_mean of each data entry, if all of the data were to be seen as from a single
batch. Needed for when batch annotation is ignored.
:param total_local_var_attribute_name: str, the identifier for the local var attribute within the loom file, if
the dataset ignores batch indices (see @total_local_mean).
:param dataset_name_attribute_name: str, the identifier for the dataset name attribute within the loom file.
"""
if in_filename is None:
if self.data_load_filename is not None:
in_filename = self.data_load_filename
else:
raise ValueError("No filename to read from provided.")
if out_filename is None:
if self.data_save_filename is not None:
out_filename = self.data_save_filename
else:
raise ValueError("No filename to save to provided.")
batch_indices = None
labels = None
cell_types = None
gene_names = None
local_means = None
local_vars = None
string_dt = h5py.special_dtype(vlen=str)
with loompy.connect(os.path.join(self.save_path, in_filename)) as ds, h5py.File(
os.path.join(self.save_path, out_filename), "w"
) as h5file:
h5file.create_group("Datasets")
h5file.create_group("Metadata")
data_group = h5file["Datasets"]
try:
name = ds.attrs[dataset_name_attribute_name].astype(str)
except KeyError as e:
name = "UNKNOWN_NAME"
dataset_group = data_group.create_group(name)
ds_out = dataset_group.create_dataset(
"X",
shape=(ds.shape[1], ds.shape[0]),
compression="lzf",
dtype=np.float32,
)
for row_attribute_name in ds.ra:
if row_attribute_name == gene_names_attribute_name:
gene_names = np.char.upper(
ds.ra[gene_names_attribute_name].astype(str)
)
else:
gene_attributes_dict = (
gene_attributes_dict if gene_attributes_dict is not None else {}
)
gene_attributes_dict[row_attribute_name] = ds.ra[row_attribute_name]
for column_attribute_name in ds.ca:
if column_attribute_name == batch_indices_attribute_name:
batch_indices = ds.ca[batch_indices_attribute_name][:].astype(int)
elif column_attribute_name == labels_attribute_name:
labels = ds.ca[labels_attribute_name][:].astype(int)
elif column_attribute_name == local_means_attribute_name:
if not self.ignore_batch_annotation:
local_means = ds.ca[local_means_attribute_name]
elif column_attribute_name == local_vars_attribute_name:
if not self.ignore_batch_annotation:
local_vars = ds.ca[local_vars_attribute_name]
else:
cell_attributes_dict = (
cell_attributes_dict if cell_attributes_dict is not None else {}
)
cell_attributes_dict[column_attribute_name] = ds.ca[
column_attribute_name
][:]
global_attributes_dict = None
for global_attribute_name in ds.attrs:
if global_attribute_name == cell_types_attribute_name:
cell_types = ds.attrs[cell_types_attribute_name].astype(str)
elif global_attribute_name == total_local_mean_attribute_name:
if self.ignore_batch_annotation:
local_means = ds.attrs[total_local_mean_attribute_name]
elif global_attribute_name == total_local_var_attribute_name:
if self.ignore_batch_annotation:
local_vars = ds.attrs[total_local_var_attribute_name]
else:
global_attributes_dict = (
global_attributes_dict
if global_attributes_dict is not None
else {}
)
global_attributes_dict[global_attribute_name] = ds.attrs[
global_attribute_name
]
dataset_group.create_dataset(
"gene_names", data=gene_names.astype(np.dtype("S")), dtype=string_dt
)
dataset_group.create_dataset("local_means", data=local_means)
dataset_group.create_dataset("local_vars", data=local_vars)
dataset_group.create_dataset("batch_indices", data=batch_indices)
dataset_group.create_dataset("labels", data=labels)
dataset_group.create_dataset(
"cell_types", data=cell_types.astype(np.dtype("S")), dtype=string_dt
)
if global_attributes_dict is not None:
self.global_attributes_dict = global_attributes_dict
shape = ds.shape
nr_rows = 5000
for i in tqdm(
range(0, shape[1], nr_rows),
desc="Loading from loom and writing to hdf5 iteratively",
):
ds_out[i : i + nr_rows, :] = ds[:, i : i + nr_rows].astype(np.float32).T
self._set_attributes(in_filename)
return
#############################
# #
# utils #
# #
#############################
@staticmethod
def _check_extension(out_filename, expected):
_, ext = os.path.splitext(out_filename)
if ext != expected:
logger.warn(f"Provided file type is '{ext}', but expected: '{expected}'.")
def _compute_library_size(self, data, batch_size=None):
if self.low_memory:
logger.warn(
"Library size computation ignored in low memory mode. "
"Ensure you have loaded the local means and vars!"
)
else:
sum_counts = data.sum(axis=1)
log_counts = np.log(sum_counts)
m = np.mean(log_counts)
v = np.var(log_counts)
return (
np.array(m).astype(np.float32).reshape(-1, 1),
np.array(v).reshape(-1, 1).astype(np.float32),
)
def _extract_content(
self, dataset, containers, n_batch_offset, shared_batches
) -> int:
"""
Helper function to extract the main information contained in an scvi dataset to be put into an incoming
storage container.
This container is updated in-place.
The extracted variables are:
- X
- gene_names
- local_means
- local_vars
- batch_indices
- labels
However, the labels are not integers referring to a list of cell types, but rather the cell types themselves.
:return: int, the updated batch_offset counter
"""
containers["X"].append(
sp_sparse.csr_matrix(
self.map_data(dataset.X, np.char.upper(dataset.gene_names))
)
)
containers["local_means"].append(dataset.local_means)
containers["local_vars"].append(dataset.local_vars)
bis = dataset.batch_indices.copy()
if not shared_batches:
bis += n_batch_offset
n_batch_offset += bis.max() + 1
containers["batch_indices"].append(bis)
if dataset.cell_types is not None:
containers["labels"].append(dataset.cell_types[dataset.labels])
else:
containers["labels"].append(np.repeat("undefined", len(bis)))
return n_batch_offset
@staticmethod
def _compute_nr_datapoints_in_hdf5(h5_filepath):
nr_data_entries = 0
with h5py.File(h5_filepath, "r") as h5file:
for group_name, group in h5file["Datasets"].items():
nr_data_entries += group["X"].shape[0]
return nr_data_entries
@staticmethod
def _load_local_means_vars_from_hdf5(
h5_filepath, attr_map,
):
nr_datapoints = UnionDataset._compute_nr_datapoints_in_hdf5(h5_filepath)
local_means = np.zeros(nr_datapoints, dtype=np.float32)
local_vars = np.zeros(nr_datapoints, dtype=np.float32)
with h5py.File(h5_filepath, "r") as h5file:
for group_name, group in h5file["Datasets"].items():
lms, lvs = group["local_means"][:], group["local_vars"][:]
this_dset_indices = np.array(attr_map[group_name])
local_means[this_dset_indices] = lms.flatten()
local_vars[this_dset_indices] = lvs.flatten()
return local_means, local_vars
@staticmethod
def _load_batch_indices_from_hdf5(h5_filepath, attr_map):
batch_indices = np.zeros(
UnionDataset._compute_nr_datapoints_in_hdf5(h5_filepath), dtype=np.int64
)
with h5py.File(h5_filepath, "r") as h5file:
curr_offset = 0
for group_name, group in h5file["Datasets"].items():
bis = group["batch_indices"][:].flatten()
this_dset_indices = attr_map[group_name]
batch_indices[this_dset_indices] = bis + curr_offset
curr_offset += bis.max() + 1
n_batches = len(np.unique(batch_indices).astype(np.int64))
return batch_indices, n_batches
@staticmethod
def _load_labels_from_hdf5(h5_filepath, attr_map):
labels = np.zeros(
UnionDataset._compute_nr_datapoints_in_hdf5(h5_filepath), dtype=np.int64
)
known_cts = ["undefined"] # known cell types
with h5py.File(h5_filepath, "r") as h5file:
for group_name, group in h5file["Datasets"].items():
lbs = group["labels"][:]
if lbs.sum() == 0:
continue
cts = group["cell_types"][:]
lbs = cts[lbs]
# append cell type only if unknown
for ct in cts:
if ct not in known_cts:
known_cts.append(ct)
# remap from ["endothelial_cell", "B_cell", "B_cell", ...] to [3, 5, 3, ...]
for cat_from, cat_to in zip(cts, [known_cts.index(ct) for ct in cts]):
lbs[lbs == cat_from] = cat_to
this_dataset_indices = np.array(attr_map[group_name])
labels[this_dataset_indices] = lbs.flatten()
if (labels > 0).all():
# there are no undefined cell types
known_cts.remove("undefined")
labels -= 1 # reduce the label of all cells by 1
labels = np.array(known_cts)[labels]
known_cts = np.sort(known_cts)
labels, _ = remap_categories(labels, mapping_from=known_cts)
labels = labels.reshape(-1, 1)
cell_types = np.array(known_cts)
return labels, cell_types
#############################
# #
# Base methods override #
# #
#############################
def compute_library_size_batch(self):
"""
Computes the library size per batch. Overrides base method with a method that practically avoids computing the
library size for the low memmory setting, because computing library size for data stemming from out of memory is
incredibly slow.
More a hotfix than a smart solution.
"""
if not self.low_memory:
self.local_means = np.zeros((self.nb_cells, 1))
self.local_vars = np.zeros((self.nb_cells, 1))
for i_batch in range(self.n_batches):
idx_batch = np.squeeze(self.batch_indices == i_batch)
(
self.local_means[idx_batch],
self.local_vars[idx_batch],
) = self._compute_library_size(
self.X[idx_batch], batch_size=len(idx_batch)
)
self.cell_attribute_names.update(["local_means", "local_vars"])
def populate_from_data(
self,
X: Union[np.ndarray, sp_sparse.csr_matrix],
Ys: List[CellMeasurement] = None,
batch_indices: Union[List[int], np.ndarray, sp_sparse.csr_matrix] = None,
labels: Union[List[int], np.ndarray, sp_sparse.csr_matrix] = None,
gene_names: Union[List[str], np.ndarray] = None,
cell_types: Union[List[str], np.ndarray] = None,
cell_attributes_dict: Dict[str, Union[List, np.ndarray]] = None,
gene_attributes_dict: Dict[str, Union[List, np.ndarray]] = None,
remap_attributes: bool = True,
**kwargs,
):
"""
Base class method override to allow the setting of extra features such as local_means via kwargs
"""
super().populate_from_data(
X=X,
Ys=Ys,
batch_indices=np.concatenate(batch_indices),
labels=labels,
gene_names=self.gene_names,
cell_types=cell_types,
cell_attributes_dict=cell_attributes_dict,
gene_attributes_dict=gene_attributes_dict,
remap_attributes=remap_attributes,
)
for kwarg, value in kwargs.items():
setattr(self, kwarg, value)
return self
@property
def nb_genes(self) -> int:
return self.gene_names_len
@nb_genes.setter
def nb_genes(self, nb_genes: int):
self.gene_names_len = nb_genes
@property
def batch_indices(self) -> np.ndarray:
return self._batch_indices
@batch_indices.setter
def batch_indices(self, batch_indices):
"""Sets batch indices and the number of batches."""
if not self.ignore_batch_annotation:
batch_indices = np.asarray(batch_indices, dtype=np.uint16).reshape(-1, 1)
self.n_batches = len(np.unique(batch_indices))
self._batch_indices = batch_indices
else:
logger.info("Union dataset is set to ignore batch annotation.")
self._batch_indices = np.zeros((len(batch_indices), 1), dtype=np.int64)
self.n_batches = len(np.unique(batch_indices))
@property
def labels(self) -> np.ndarray:
return self._labels
@labels.setter
def labels(self, labels: Union[List[int], np.ndarray]):
"""Sets labels and the number of labels"""
labels = np.asarray(labels, dtype=np.uint16).reshape(-1, 1)
self.n_labels = len(np.unique(labels))
self._labels = labels
@property
def X(self):
return self._X
@X.setter
def X(
self, X: Union[np.ndarray, sp_sparse.csr_matrix, DataLoaderh5, DataLoaderLoom]
):
"""Sets the data attribute ``X`` without recomputing the library size."""
n_dim = len(X.shape)
if n_dim != 2:
raise ValueError(
"Gene expression data should be 2-dimensional not {}-dimensional.".format(
n_dim
)
)
self._X = X
class DataLoaderh5:
def __init__(self, attr, parent, h5_filepath=None, attr_map=None):
self.attr = attr
self.parent = parent
self.h5_filepath = h5_filepath
self.attr_map = attr_map
self.shape = 0
with h5py.File(self.h5_filepath, "r") as h5file:
for group_name, group in h5file["Datasets"].items():
self.shape += group["X"].shape[0]
self.shape = (self.shape, None)
def __len__(self):
return self.shape[0]
def __getitem__(self, idx: Union[List, np.ndarray]):
if np.array(idx).dtype == bool:
idx = np.arange(len(self.attr_map))[idx]
data = self.load_data(idx)
return np.vstack(data)
def load_data(self, indices, map_data=True):
datasets_to_indices = defaultdict(list)
data = []
for i in np.atleast_1d(indices):
dset, i = self.attr_map[i]
datasets_to_indices[dset].append(i)
with h5py.File(self.h5_filepath, "r") as h5_file:
datasets = h5_file["Datasets"]
for ds_specifier, indices in datasets_to_indices.items():
group = datasets[ds_specifier]
loaded_data = group[self.attr][indices]
col_indices, mappable_gene_ind = self.parent.dataset_to_genemap_cache[
ds_specifier
]
if map_data:
loaded_data = self.parent.map_data(
loaded_data,
mappable_genes_indices=mappable_gene_ind,
col_indices=col_indices,
)
data.append(loaded_data)
return np.vstack(data)
class DataLoaderLoom:
def __init__(
self, loom_filepath=None,
):
self.loom_filepath = loom_filepath
with loompy.connect(self.loom_filepath) as loom_file:
s = loom_file.shape
self.shape = (s[1], s[0])
self.dtype = loom_file[0, 0].dtype
def __len__(self):
return self.shape[0]
def __getitem__(self, idx: Union[List, np.ndarray]):
with loompy.connect(self.loom_filepath) as loom_file:
return loom_file[:, idx].transpose()
|
{"hexsha": "623b10dad9c55b19953cc3871452e573b31ab068", "size": 87839, "ext": "py", "lang": "Python", "max_stars_repo_path": "scvi/dataset/union.py", "max_stars_repo_name": "maichmueller/scVI", "max_stars_repo_head_hexsha": "95fe13a36f83e2b6390fc47e96a2d17ed1fa318d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scvi/dataset/union.py", "max_issues_repo_name": "maichmueller/scVI", "max_issues_repo_head_hexsha": "95fe13a36f83e2b6390fc47e96a2d17ed1fa318d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scvi/dataset/union.py", "max_forks_repo_name": "maichmueller/scVI", "max_forks_repo_head_hexsha": "95fe13a36f83e2b6390fc47e96a2d17ed1fa318d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.2960161372, "max_line_length": 120, "alphanum_fraction": 0.5843759606, "include": true, "reason": "import numpy,import scipy", "num_tokens": 17926}
|
# Based on PWSCF documentation (version 6.2)
function gen_lattice_cubic( a::Float64 )
v1 = a*[1,0,0]
v2 = a*[0,1,0]
v3 = a*[0,0,1]
#
LL = zeros(3,3)
LL[:,1] = v1
LL[:,2] = v2
LL[:,3] = v3
return LL
end
gen_lattice_sc(a::Float64) = gen_lattice_cubic(a)
function gen_lattice_fcc( a::Float64 )
v1 = 0.5*a*[-1,0,1]
v2 = 0.5*a*[0,1,1]
v3 = 0.5*a*[-1,1,0]
#
LL = zeros(3,3)
LL[:,1] = v1
LL[:,2] = v2
LL[:,3] = v3
return LL
end
function gen_lattice_bcc( a::Float64 )
v1 = 0.5*a*[1,1,1]
v2 = 0.5*a*[-1,1,1]
v3 = 0.5*a*[-1,-1,1]
#
LL = zeros(3,3)
LL[:,1] = v1
LL[:,2] = v2
LL[:,3] = v3
return LL
end
# more symmetric axis:
function gen_lattice_bcc_v2( a::Float64 )
v1 = 0.5*a*[-1,1,1]
v2 = 0.5*a*[1,-1,1]
v3 = 0.5*a*[1,1,-1]
#
LL = zeros(3,3)
LL[:,1] = v1
LL[:,2] = v2
LL[:,3] = v3
return LL
end
# also for trigonal P
function gen_lattice_hexagonal( a::Float64, c::Float64 )
v1 = a*[1,0,0]
v2 = a*[-1/2,sqrt(3)/2,0]
v3 = [0,0,c]
#
LL = zeros(3,3)
LL[:,1] = v1
LL[:,2] = v2
LL[:,3] = v3
return LL
end
# 5 Trigonal R, 3fold axis c celldm(4)=cos(gamma)
# The crystallographic vectors form a three-fold star around
# the z-axis, the primitive cell is a simple rhombohedron:
# v1 = a(tx,-ty,tz), v2 = a(0,2ty,tz), v3 = a(-tx,-ty,tz)
# where c=cos(gamma) is the cosine of the angle gamma between
# any pair of crystallographic vectors, tx, ty, tz are:
# tx=sqrt((1-c)/2), ty=sqrt((1-c)/6), tz=sqrt((1+2c)/3)
function gen_lattice_trigonal( a::Float64, gamma_degree::Float64 )
c = cos( gamma_degree*pi/180 )
tx = sqrt((1-c)/2)
ty = sqrt((1-c)/6)
tz = sqrt((1+2c)/3)
#
v1 = a*[tx,-ty,tz]
v2 = a*[0,2*ty,tz]
v3 = a*[-tx,-ty,tz]
#
LL = zeros(3,3)
LL[:,1] = v1
LL[:,2] = v2
LL[:,3] = v3
return LL
end
# -5 Trigonal R, 3fold axis <111> celldm(4)=cos(gamma)
# The crystallographic vectors form a three-fold star around
# <111>. Defining a' = a/sqrt(3) :
# v1 = a' (u,v,v), v2 = a' (v,u,v), v3 = a' (v,v,u)
# where u and v are defined as
# u = tz - 2*sqrt(2)*ty, v = tz + sqrt(2)*ty
# and tx, ty, tz as for case ibrav=5
# Note: if you prefer x,y,z as axis in the cubic limit,
# set u = tz + 2*sqrt(2)*ty, v = tz - sqrt(2)*ty
# See also the note in Modules/latgen.f90
function gen_lattice_trigonal_v2( a::Float64, gamma_degree::Float64 )
c = cos( gamma_degree*pi/180 )
tx = sqrt((1-c)/2)
ty = sqrt((1-c)/6)
tz = sqrt((1+2c)/3)
u = tz - 2*sqrt(2)*ty
v = tz + sqrt(2)*ty
ap = a/sqrt(3)
#
v1 = ap*[u,v,v]
v2 = ap*[v,u,v]
v3 = ap*[v,v,u]
#
LL = zeros(3,3)
LL[:,1] = v1
LL[:,2] = v2
LL[:,3] = v3
return LL
end
#Tetragonal P (st) celldm(3)=c/a
#v1 = a(1,0,0), v2 = a(0,1,0), v3 = a(0,0,c/a)
function gen_lattice_tetragonal_P( a::Float64, c::Float64)
v1 = a*[1,0,0]
v2 = a*[0,1,0]
v3 = [0,0,c]
#
LL = zeros(3,3)
LL[:,1] = v1
LL[:,2] = v2
LL[:,3] = v3
return LL
end
#7 Tetragonal I (bct) celldm(3)=c/a
#v1=(a/2)(1,-1,c/a), v2=(a/2)(1,1,c/a), v3=(a/2)(-1,-1,c/a)
function gen_lattice_tetragonal_I( a::Float64, c::Float64 )
v1 = 0.5*[a,-a,c]
v2 = 0.5*[a,a,c]
v3 = 0.5*[-a,-a,c]
#
LL = zeros(3,3)
LL[:,1] = v1
LL[:,2] = v2
LL[:,3] = v3
return LL
end
# 8 Orthorhombic P celldm(2)=b/a
# celldm(3)=c/a
# v1 = (a,0,0), v2 = (0,b,0), v3 = (0,0,c)
function gen_lattice_orthorhombic_P( a::Float64, b::Float64, c::Float64 )
v1 = [a,0,0]
v2 = [0,b,0]
v3 = [0,0,c]
#
LL = zeros(3,3)
LL[:,1] = v1
LL[:,2] = v2
LL[:,3] = v3
return LL
end
# 12 Monoclinic P, unique axis c celldm(2)=b/a
# celldm(3)=c/a,
# celldm(4)=cos(ab)
# v1=(a,0,0), v2=(b*cos(gamma),b*sin(gamma),0), v3 = (0,0,c)
# where gamma is the angle between axis a and b.
function gen_lattice_monoclinic_P( a::Float64, b::Float64, c::Float64, gamma_degree::Float64 )
gamma = gamma_degree*pi/180
v1 = [a,0,0]
v2 = [b*cos(gamma), b*sin(gamma), 0]
v3 = [0,0,c]
#
LL = zeros(3,3)
LL[:,1] = v1
LL[:,2] = v2
LL[:,3] = v3
return LL
end
# 14 Triclinic celldm(2)= b/a,
# celldm(3)= c/a,
# celldm(4)= cos(bc),
# celldm(5)= cos(ac),
# celldm(6)= cos(ab)
# v1 = (a, 0, 0),
# v2 = (b*cos(gamma), b*sin(gamma), 0)
# v3 = (c*cos(beta), c*(cos(alpha)-cos(beta)cos(gamma))/sin(gamma),
# c*sqrt( 1 + 2*cos(alpha)cos(beta)cos(gamma)
# - cos(alpha)^2-cos(beta)^2-cos(gamma)^2 )/sin(gamma) )
# where alpha is the angle between axis b and c
# beta is the angle between axis a and c
# gamma is the angle between axis a and b
function gen_lattice_triclinic(a::Float64, b::Float64, c::Float64,
alpha_degree::Float64, beta_degree::Float64, gamma_degree::Float64)
if alpha_degree + beta_degree + gamma_degree <= 180.0
println("ERROR in gen_lattice_triclinic")
println("sum of angles must be larger than 180°")
exit()
end
alpha = alpha_degree*pi/180
beta = beta_degree*pi/180
gamma = gamma_degree*pi/180
#
v1 = [a, 0, 0]
v2 = [b*cos(gamma), b*sin(gamma), 0]
t1 = c*(cos(alpha)-cos(beta)cos(gamma))/sin(gamma)
t2 = c*sqrt( 1 + 2*cos(alpha)cos(beta)cos(gamma) -
cos(alpha)^2-cos(beta)^2-cos(gamma)^2 )/sin(gamma)
v3 = [c*cos(beta), t1, t2]
#
LL = zeros(3,3)
LL[:,1] = v1
LL[:,2] = v2
LL[:,3] = v3
return LL
end
|
{"hexsha": "99bda8549fc5726626fcf2718d1a163b6b54ddc4", "size": 6096, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "PW/common/gen_lattice_pwscf.jl", "max_stars_repo_name": "f-fathurrahman/ffr-ElectronicStructure.jl", "max_stars_repo_head_hexsha": "35dca9831bfc6a3e49bb0f3a5872558ffce4b211", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-01-03T02:19:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-29T13:30:20.000Z", "max_issues_repo_path": "PW/common/gen_lattice_pwscf.jl", "max_issues_repo_name": "f-fathurrahman/ffr-ElectronicStructure.jl", "max_issues_repo_head_hexsha": "35dca9831bfc6a3e49bb0f3a5872558ffce4b211", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PW/common/gen_lattice_pwscf.jl", "max_forks_repo_name": "f-fathurrahman/ffr-ElectronicStructure.jl", "max_forks_repo_head_hexsha": "35dca9831bfc6a3e49bb0f3a5872558ffce4b211", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-03-23T06:58:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-03T00:54:28.000Z", "avg_line_length": 27.0933333333, "max_line_length": 98, "alphanum_fraction": 0.4886811024, "num_tokens": 2416}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
from statistics import mean, variance
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def plotCD(fig, data, reg1, reg2, log):
interval = (max(data["K"]) - min(data["K"])) // 30
interval2 = (max(data["L"]) - min(data["L"])) // 30
x = np.arange(min(data["K"]), max(data["K"]), interval)
y = np.arange(min(data["L"]), max(data["L"]), interval2)
x, y = np.meshgrid(x, y)
fig.suptitle("Производственная функция Кобба-Дугласа")
z1 = (math.exp(reg1[0]) if not log else reg1[0]) * x ** reg1[1] * y ** (1 - reg1[1])
z2 = (math.exp(reg2[0]) if not log else reg2[0]) * x ** reg2[1] * y ** reg2[2]
z = [z1, z2]
for i in range(2):
ax = fig.add_subplot(1, 2, i + 1, projection="3d")
ax.plot_wireframe(
x,
y,
z[i],
antialiased=False,
rstride=2,
cstride=2,
color="green" if i == 0 else "blue",
linewidth=1,
)
ax.set_title(
"Постоянная отдача от масштаба"
if i == 0
else "Доходность с переменным масштабом",
fontweight="bold",
)
ax.set_xlabel("K", fontweight="bold")
ax.set_ylabel("L", fontweight="bold")
ax.set_zlabel("Y", fontweight="bold")
ax.scatter(
data["K"], data["L"], data["Y"], c="red", linewidth=0, antialiased=False
)
plt.show()
def getData(file, log, d=";"):
"""
Получите файл данных csv на основе имени
"""
data = {"Y": [], "K": [], "L": [], "P": []}
with open(file, "r", newline="") as csvfile:
freader = csv.reader(csvfile, delimiter=d)
next(freader)
for row in freader:
if not log:
row = [np.log(np.float(n.replace(",", "."))) for n in row]
else:
row = [float(n.replace(",", ".")) for n in row]
data["Y"].append(row[0])
data["K"].append(row[1])
data["L"].append(row[2])
if len(row) > 3:
data["P"].append(row[3])
return data
class RegressionModel:
y = 0
x1 = []
x2 = None
x3 = None
residuals = []
file = ""
log = False
model = []
cond = 0
def __init__(self, y, x1, x2=None, x3=None):
self.y = y
self.x1 = x1
self.x2 = x2
self.x3 = x3
def cov(self, a, b):
"""
Метод расчета ковариации
"""
cov = 0.0
for i in range(len(a)):
cov += (a[i] - mean(a)) * (b[i] - mean(b))
return cov / (len(a) - 1)
def se(self, y, x1, residuals, x2=None, x3=None):
"""
Стандартные ошибки
"""
se = []
SSr = sum([(res) ** 2 for res in residuals])
MSE = SSr / (len(y) - 3)
if x2 is None:
s = (sum([res ** 2 for res in residuals]) / (len(y) - 2)) ** 0.5
SSX = sum([(x - mean(x1)) ** 2 for x in x1])
xsq = [x ** 2 for x in x1]
se.append(s * (sum(xsq) / (len(y) * SSX)) ** 0.5)
se.append(s / (SSX) ** 0.5)
return se
elif x3 is None:
mat = np.column_stack((np.array(np.ones(len(y))), np.array(x1), np.array(x2)))
else:
mat = np.column_stack(
(np.array(np.ones(len(y))), np.array(x1), np.array(x2), np.array(x3))
)
mat = np.linalg.pinv(np.matmul(mat.transpose(), mat))
se = [(d * MSE) ** 0.5 for d in mat.diagonal()]
return se
def getRes(self, y, x1, b0, b1, x2=None, b2=None, x3=None, b3=None):
"""
Получите остатки рассчитанной регрессии.
"""
res = []
yp = []
if x2 is None:
for i in range(len(y)):
yp.append(b0 + b1 * x1[i])
res.append(y[i] - yp[i])
elif x3 is None:
for i in range(len(y)):
yp.append(b0 + b1 * x1[i] + b2 * x2[i])
res.append(y[i] - yp[i])
else:
for i in range(len(y)):
yp.append(b0 + b1 * x1[i] + b2 * x2[i] + b3 * x3[i])
res.append(y[i] - yp[i])
return res, yp
def r2(self, y, residuals, ym):
"""
Коэффициент детерминации
"""
SSr = sum([res ** 2 for res in residuals])
SSt = sum([(yi - ym) ** 2 for yi in y])
return 1 - (SSr / SSt) if SSt != 0 else 1
def r2_adj(self, y, R2, fac):
"""
Коэффициент детерминации (скорректированный)
"""
return 1 - (1 - R2) * ((len(y) - 1) / (len(y) - fac - 1))
def f(self, y, yp, R2, fac):
"""
Тест F
"""
SSE = 0.0
SSM = 0.0
for i in range(len(y)):
SSE += (y[i] - yp[i]) ** 2
SSM += (yp[i] - mean(y)) ** 2
return (SSM / (fac)) / (SSE / (len(y) - fac - 1)) if SSE != 0 else math.inf
def t(self, coeff, se):
"""
t-статистика
"""
t_stat = []
for i in range(len(coeff)):
if se[i] == 0:
continue
t_stat.append(coeff[i] / se[i])
return t_stat
def dw(self, residuals):
"""
Критерии Дарбина-Ватсона
"""
sumr = 0.0
rsq = sum([res ** 2 for res in residuals])
for i in range(1, len(residuals)):
sumr += (residuals[i] - residuals[i - 1]) ** 2
return sumr / rsq if rsq != 0 else 0
def jb(self, y, residuals):
"""
Тест Жарка-Бера
"""
m3 = sum([res ** 3 for res in residuals]) / len(y)
sig3 = (sum([res ** 2 for res in residuals]) / len(y)) ** 1.5
m4 = sum([res ** 4 for res in residuals]) / len(y)
sig4 = (sum([res ** 2 for res in residuals]) / len(y)) ** 2
S = m3 / sig3 if sig3 != 0 else 0
C = m4 / sig4 if sig3 != 0 else 0
jb_stat = len(y) * ((S ** 2) / 6 + ((C - 3) ** 2) / 24)
return jb_stat
def regr(self, y, x1, x2=None, x3=None):
"""
Метод расчета коэффициентов регрессии.
"""
if x2 is None:
b1 = self.cov(x1, y) / variance(x1)
b0 = mean(y) - b1 * mean(x1)
coeff = [b0, b1]
return coeff
elif x3 is None:
X = np.column_stack((np.array(np.ones(len(y))), np.array(x1), np.array(x2)))
else:
X = np.column_stack(
(np.array(np.ones(len(y))), np.array(x1), np.array(x2), np.array(x3))
)
Y = np.column_stack(np.array(y))
A = np.linalg.inv(np.matmul(X.transpose(), X))
B = np.matmul(X.transpose(), Y.transpose())
coeff = np.matmul(A, B)
self.cond = np.linalg.cond(np.matmul(X.transpose(), X))
coeff = np.squeeze(np.array(coeff))
return coeff
def CD(self):
"""
Основной метод расчета регрессии и статистики.
"""
y = self.y
x1 = self.x1
x2 = self.x2
x3 = self.x3
model = self.regr(y, x1, x2, x3)
if len(model) == 3:
res, yp = self.getRes(y, x1, model[0], model[1], x2, model[2])
elif len(model) == 2:
res, yp = self.getRes(y, x1, model[0], model[1])
else:
res, yp = self.getRes(y, x1, model[0], model[1], x2, model[2], x3, model[3])
R2 = self.r2(y, res, mean(y))
R2_adj = self.r2_adj(y, R2, len(model) - 1)
dw_test = self.dw(res)
F = self.f(y, yp, R2, len(model) - 1)
SE = self.se(y, x1, res, x2, x3)
t_stat = self.t(model, SE)
jb_test = self.jb(y, res)
self.model = model
res = {
"coefs": model,
"standard error": SE,
"t-statistic": t_stat,
"Determination coefficient": R2,
"Determination coefficient (close-fitting)": R2_adj,
"Test F": F,
"Durbin-Watson statistic": dw_test,
"Jarque-Bera test": jb_test,
"Condition number of X ^ tX": self.cond,
}
names_stat = [
"coefs",
"standard error",
"t-statistic",
"Determination coefficient",
"Determination coefficient (close-fitting)",
"Test F",
"Durbin-Watson statistic",
"Jarque-Bera test",
"Condition number of X ^ tX",
]
print(
"{0}\n{1:^103}\n{2}".format("=" * 103, "Regression Summary", "=" * 103)
)
for i in range(len(names_stat)):
print("{0:40} {1:}".format(names_stat[i], res[names_stat[i]]))
print("\n")
return res
def model():
"""
CLI interface
"""
# while True:
try:
file = "dataless.csv"
log = False
data = getData(file, log)
fig = plt.figure()
if len(data["P"]) != 0:
reg3 = RegressionModel(
[a - b for a, b in zip(data["Y"], data["P"])],
[a - b for a, b in zip(data["K"], data["P"])],
[a - b for a, b in zip(data["L"], data["P"])],
)
reg4 = RegressionModel(data["Y"], data["K"], data["L"], data["P"])
reg3.CD()
reg4.CD()
else:
reg1 = RegressionModel(
[a - b for a, b in zip(data["Y"], data["L"])],
[a - b for a, b in zip(data["K"], data["L"])],
)
reg2 = RegressionModel(data["Y"], data["K"], data["L"])
reg1.CD()
reg2.CD()
plotCD(fig, getData(file, True), reg1.model, reg2.model, log)
except Exception as err:
print(err, "\n")
model()
|
{"hexsha": "26d704cbf147d023f33e37747bda6b17df8e640b", "size": 10157, "ext": "py", "lang": "Python", "max_stars_repo_path": "CobbDouglas.py", "max_stars_repo_name": "barklan/cobb-dude", "max_stars_repo_head_hexsha": "5d1d2d3ee9032558b33b84ba857d7cc2279c4a1a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CobbDouglas.py", "max_issues_repo_name": "barklan/cobb-dude", "max_issues_repo_head_hexsha": "5d1d2d3ee9032558b33b84ba857d7cc2279c4a1a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CobbDouglas.py", "max_forks_repo_name": "barklan/cobb-dude", "max_forks_repo_head_hexsha": "5d1d2d3ee9032558b33b84ba857d7cc2279c4a1a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4504792332, "max_line_length": 91, "alphanum_fraction": 0.4475731023, "include": true, "reason": "import numpy", "num_tokens": 3001}
|
import numpy as np
from lib.Activations import Activation_Softmax
class Loss:
def remember_trainable_layers(self, trainable_layers):
self.trainable_layers = trainable_layers
def calculate(self, output, y, *, include_regularization=False):
sample_losses = self.forward(output, y)
data_loss = np.mean(sample_losses)
self.accumulated_sum += np.sum(sample_losses)
self.accumulated_count += len(sample_losses)
if not include_regularization:
return data_loss
# this will break some older things
return data_loss, self.regularization_loss()
# this will break some older things
def regularization_loss(self):
regularization_loss = 0
for layer in self.trainable_layers:
if layer.weight_regulaizer_l1 > 0:
regularization_loss += layer.weight_regulaizer_l1 * (
np.sum(np.abs(layer.weights))
)
if layer.weight_regulaizer_l2 > 0:
regularization_loss += layer.weight_regulaizer_l2 * (
np.sum(layer.weights * layer.weights)
)
if layer.bias_regularizer_l1 > 0:
regularization_loss += layer.bias_regularizer_l1 * (
np.sum(np.abs(layer.biases))
)
if layer.bias_regularizer_l2 > 0:
regularization_loss += layer.bias_regularizer_l2 * (
np.sum(layer.biases * layer.biases)
)
return regularization_loss
def calculate_accumulated(self, *, include_regularization=False):
data_loss = self.accumulated_sum / self.accumulated_count
if not include_regularization:
return data_loss
return data_loss, self.regularization_loss()
def new_pass(self):
self.accumulated_sum = 0
self.accumulated_count = 0
def forward(self, y_pred, y_true):
print("This method must be overridden")
return -1
class Loss_CategoricalCrossentropy(Loss):
def forward(self, y_pred, y_true):
samples = len(y_pred)
y_pred_clipped = np.clip(y_pred, 1e-7, 1 - 1e-7)
if len(y_true.shape) == 1:
correct_confidences = y_pred_clipped[range(samples), y_true]
elif len(y_true.shape) == 2:
correct_confidences = np.sum(y_pred_clipped * y_true, axis=1)
negative_log_likelihoods = -np.log(correct_confidences)
return negative_log_likelihoods
def backward(self, dvalues, y_true):
samples = len(dvalues)
labels = len(dvalues[0])
if len(y_true.shape) == 1:
y_true = np.eye(labels)[y_true]
self.dinputs = (
-y_true / dvalues
) # derivative of the CE function - see derivation
self.dinputs = self.dinputs / samples # normalize
class Activation_Softmax_Loss_CategoricalCrossentropy:
def backward(self, dvalues, y_true):
samples = len(dvalues)
if len(y_true) == 2:
y_true = np.argmax(y_true, axis=1)
self.dinputs = dvalues.copy()
# same as dvalues - y_true, which is the chained derivative of the c.e. loss + softmax - see derivation
self.dinputs[range(samples), y_true] -= 1
# normalize derivatives
self.dinputs = self.dinputs / samples
class Loss_BinaryCrossentropy(Loss):
def forward(self, y_pred, y_true):
y_pred_clipped = np.clip(y_pred, 1e-7, 1 - 1e-7)
sample_losses = -(
y_true * np.log(y_pred_clipped) + (1 - y_true) * np.log(1 - y_pred_clipped)
)
sample_losses = np.mean(sample_losses, axis=1)
return sample_losses
def backward(self, dvalues, y_true):
samples = len(dvalues)
outputs = len(dvalues[0])
clipped_dvalues = np.clip(dvalues, 1e-7, 1 - 1e-7)
self.dinputs = (
-(y_true / clipped_dvalues - (1 - y_true) / (1 - clipped_dvalues)) / outputs
)
self.dinputs = self.dinputs / samples
class Loss_MeanSquaredError(Loss):
def forward(self, y_pred, y_true):
sample_losses = np.mean((y_true - y_pred) ** 2, axis=-1)
return sample_losses
def backward(self, dvalues, y_true):
samples = len(dvalues)
outputs = len(dvalues[0])
self.dinputs = -2 * (y_true - dvalues) / outputs
self.dinputs = self.dinputs / samples
class Loss_MeanAbsoluteError(Loss):
def forward(self, y_pred, y_true):
sample_losses = np.mean(np.abs(y_true - y_pred), axis=-1)
return sample_losses
def backward(self, dvalues, y_true):
samples = len(dvalues)
outputs = len(dvalues[0])
self.dinputs = np.sign(y_true - dvalues) / outputs
self.dinputs = self.dinputs / samples
|
{"hexsha": "0ea298350ef6c8cfdf86193784229f99f10da684", "size": 4804, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/Losses.py", "max_stars_repo_name": "QuicksandDesignStudio/neural-network", "max_stars_repo_head_hexsha": "2bc6c533e1fb7dc11d43762d5fc62caaa670b55b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/Losses.py", "max_issues_repo_name": "QuicksandDesignStudio/neural-network", "max_issues_repo_head_hexsha": "2bc6c533e1fb7dc11d43762d5fc62caaa670b55b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/Losses.py", "max_forks_repo_name": "QuicksandDesignStudio/neural-network", "max_forks_repo_head_hexsha": "2bc6c533e1fb7dc11d43762d5fc62caaa670b55b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8309859155, "max_line_length": 111, "alphanum_fraction": 0.6205245629, "include": true, "reason": "import numpy", "num_tokens": 1144}
|
import random
import os
import glob
import numpy as np
np.random.seed(0)
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchsat.transforms import transforms_cls
from skimage import io
from skimage.transform import rescale
class RandomApply(object):
"""Apply randomly a list of transformations with a given probability
Args:
transforms (list or tuple): list of transformations
p (float): probability
"""
def __init__(self, transforms, p=0.5):
super(object, self).__init__(self,transforms)
self.p = p
def __call__(self, img):
if self.p < random.random():
return img
for t in self.transforms:
img = t(img)
return img
def resize_bands(img, size=120):
return np.array(rescale(img, size/img.shape[0], anti_aliasing=False))
def load_patch(patch_dir):
bands = ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09', 'B11', 'B12']
patch_name = os.path.basename(patch_dir)
patch = [io.imread(os.path.join(patch_dir, f'{patch_name}_{band}.tif')) for band in bands]
patch = np.stack([resize_bands(xx) for xx in patch], axis=2)
return patch
class TileDataloader(Dataset):
def __init__(self, tile_dir, transform):
self.tile_dir = tile_dir
self.tile_files = glob.glob(os.path.join(self.tile_dir, '*'))
self.transform = transform
self.bands = ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09', 'B11', 'B12']
def __len__(self):
return len(self.tile_files)
def __getitem__(self, idx):
sample = load_patch(os.path.join(self.tile_dir, str(os.path.basename(self.tile_files[idx]))))
sample = self.transform(sample)
return sample
class DataSetWrapper(object):
def __init__(self, batch_size, num_workers, valid_size, input_shape, s, input_dir):
self.input_dir = input_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.valid_size = valid_size
self.s = s
self.input_shape = eval(input_shape)
def get_data_loaders(self):
data_augment = self._get_simclr_pipeline_transform()
# train_dataset = datasets.STL10('./data',
# split='train+unlabeled',
# download=True,
# transform=SimCLRDataTransform(data_augment))
train_dataset = TileDataloader(tile_dir = self.input_dir,
transform = SimCLRDataTransform(data_augment))
train_loader, valid_loader = self.get_train_validation_data_loaders(train_dataset)
return train_loader, valid_loader
def _get_simclr_pipeline_transform(self):
gray = transforms_cls.ToGray()
# Mising Color Jitter (and others check paper, probably need to be handcrafted).
data_transforms = transforms_cls.Compose([transforms_cls.RandomResizedCrop(crop_size=int(self.input_shape[0]*.8), target_size=self.input_shape[0]),
transforms_cls.RandomHorizontalFlip(), # Missing Color Jitter
#RandomApply([gray], p=0.8), #Should be random w some probability
transforms_cls.GaussianBlur(kernel_size=13),
transforms_cls.ToTensor()])
return data_transforms
def get_train_validation_data_loaders(self, train_dataset):
# obtain training indices that will be used for validation
num_train = len(train_dataset)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(self.valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = DataLoader(train_dataset,
batch_size=self.batch_size,
sampler=train_sampler,
num_workers=self.num_workers,
drop_last=True,
shuffle=False)
valid_loader = DataLoader(train_dataset,
batch_size=self.batch_size,
sampler=valid_sampler,
num_workers=self.num_workers,
drop_last=True)
return train_loader, valid_loader
class SimCLRDataTransform(object):
def __init__(self, transform):
self.transform = transform
def __call__(self, sample):
xi = self.transform(sample)
xj = self.transform(sample)
return xi, xj
|
{"hexsha": "f0b632aed5906ff90074f175742094c7a9afde02", "size": 5004, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_aug/dataset_wrapper.py", "max_stars_repo_name": "ezekielbarnett/SimCLR", "max_stars_repo_head_hexsha": "89f9e36f18b0da264a4faf833981198797b4a94b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_aug/dataset_wrapper.py", "max_issues_repo_name": "ezekielbarnett/SimCLR", "max_issues_repo_head_hexsha": "89f9e36f18b0da264a4faf833981198797b4a94b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_aug/dataset_wrapper.py", "max_forks_repo_name": "ezekielbarnett/SimCLR", "max_forks_repo_head_hexsha": "89f9e36f18b0da264a4faf833981198797b4a94b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9090909091, "max_line_length": 155, "alphanum_fraction": 0.603117506, "include": true, "reason": "import numpy", "num_tokens": 1036}
|
// Ogonek
//
// Written in 2012-2013 by Martinho Fernandes <martinho.fernandes@gmail.com>
//
// To the extent possible under law, the author(s) have dedicated all copyright and related
// and neighboring rights to this software to the public domain worldwide. This software is
// distributed without any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
// Encoding/decoding iterators
#ifndef OGONEK_ENCODING_ITERATOR_HPP
#define OGONEK_ENCODING_ITERATOR_HPP
#include <ogonek/traits.h++>
#include <ogonek/types.h++>
#include <ogonek/error.h++>
#include <ogonek/detail/constants.h++>
#include <ogonek/detail/ranges.h++>
#include <ogonek/detail/container/partial_array.h++>
#include <ogonek/detail/container/encoded_character.h++>
#include <boost/iterator/iterator_facade.hpp>
#include <boost/range/iterator_range.hpp>
#include <boost/range/sub_range.hpp>
#include <algorithm>
#include <array>
#include <initializer_list>
#include <cstddef>
namespace ogonek {
namespace detail {
class decoding_iterator_access;
} // namespace detail
template <typename EncodingForm, typename Iterator, typename ErrorHandler>
struct encoding_iterator
: boost::iterator_facade<
encoding_iterator<EncodingForm, Iterator, ErrorHandler>,
CodeUnit<EncodingForm>,
std::forward_iterator_tag, // TODO
CodeUnit<EncodingForm>
> {
public:
encoding_iterator(Iterator first, Iterator last)
: first(std::move(first)), last(std::move(last)) { encode_next(); }
CodeUnit<EncodingForm> dereference() const {
return encoded[current];
}
bool equal(encoding_iterator const& that) const {
return (first == that.first && current == that.current)
|| (first == last && that.first == that.last
&& current == depleted && that.current == depleted);
}
void increment() {
++current;
if(current == encoded.size()) {
encode_next();
}
}
private:
static constexpr std::size_t depleted = -1;
void encode_next() {
if(first != last) {
auto u = *first++;
encoded = encode_validated(u, ErrorHandler{});
current = 0;
} else {
current = depleted;
}
}
detail::encoded_character<EncodingForm> encode_validated(code_point u, assume_valid_t) {
return EncodingForm::encode_one(u, state, assume_valid);
}
template <typename ErrorHandler1>
detail::encoded_character<EncodingForm> encode_validated(code_point u, ErrorHandler1) {
if(u > detail::last_code_point || detail::is_surrogate(u)) {
return ErrorHandler1::template apply_encode<EncodingForm>(u, state);
} else {
return EncodingForm::encode_one(u, state, ErrorHandler1{});
}
}
Iterator first, last;
EncodingState<EncodingForm> state {};
detail::encoded_character<EncodingForm> encoded {};
std::size_t current;
};
template <typename EncodingForm, typename Iterator, typename ErrorHandler>
struct decoding_iterator
: boost::iterator_facade<
decoding_iterator<EncodingForm, Iterator, ErrorHandler>,
code_point,
std::forward_iterator_tag, // TODO
code_point
> {
public:
decoding_iterator(Iterator first, Iterator last)
: first(std::move(first)), last(std::move(last)) {}
code_point dereference() const {
code_point u;
auto s = state;
EncodingForm::decode_one(boost::sub_range<range>(first, last), u, s, ErrorHandler{});
return u;
}
bool equal(decoding_iterator const& that) const {
return first == that.first || (first == last && that.first == that.last);
}
void increment() {
code_point dummy;
first = EncodingForm::decode_one(boost::sub_range<range>(first, last), dummy, state, ErrorHandler{}).begin();
}
private:
using range = boost::iterator_range<Iterator>;
friend class detail::decoding_iterator_access;
Iterator first, last;
EncodingState<EncodingForm> state {};
};
} // namespace ogonek
#endif // OGONEK_ENCODING_ITERATOR_HPP
|
{"hexsha": "4d71b6f285911bac410ea4daee4ff81c2be2075d", "size": 4535, "ext": "h++", "lang": "C++", "max_stars_repo_path": "include/ogonek/encoding/iterator.h++", "max_stars_repo_name": "libogonek/ogonek", "max_stars_repo_head_hexsha": "46b7edbf6b7ff89892f5ba25494749b442e771b3", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 25.0, "max_stars_repo_stars_event_min_datetime": "2016-10-21T12:37:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-22T05:46:46.000Z", "max_issues_repo_path": "include/ogonek/encoding/iterator.h++", "max_issues_repo_name": "libogonek/ogonek", "max_issues_repo_head_hexsha": "46b7edbf6b7ff89892f5ba25494749b442e771b3", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/ogonek/encoding/iterator.h++", "max_forks_repo_name": "libogonek/ogonek", "max_forks_repo_head_hexsha": "46b7edbf6b7ff89892f5ba25494749b442e771b3", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2016-09-05T10:23:18.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-09T19:37:37.000Z", "avg_line_length": 33.5925925926, "max_line_length": 121, "alphanum_fraction": 0.6302094818, "num_tokens": 957}
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torchvision import datasets, transforms
import os
import argparse
import pdb
import copy
import numpy as np
from torch.optim import lr_scheduler
import logging
from utils import *
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
READ_CKPT=True
class Net(nn.Module):
def __init__(self, num_classes):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, num_classes)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
criterion = nn.CrossEntropyLoss()
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader, mode="raw-task"):
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
if mode == "raw-task":
classes = [str(i) for i in range(10)]
elif mode == "targetted-task":
classes = ["T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot"]
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
_, predicted = torch.max(output, 1)
c = (predicted == target).squeeze()
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
for image_index in range(args.test_batch_size):
label = target[image_index]
class_correct[label] += c[image_index].item()
class_total[label] += 1
test_loss /= len(test_loader.dataset)
if mode == "raw-task":
for i in range(10):
logger.info('Accuracy of %5s : %.2f %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
elif mode == "targetted-task":
# TODO (hwang): need to modify this for future use
for i in range(10):
logger.info('Accuracy of %5s : %.2f %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
def calc_norm_diff(gs_model, vanilla_model, epoch, fl_round, mode="bad"):
norm_diff = 0
for p_index, p in enumerate(gs_model.parameters()):
norm_diff += torch.norm(list(gs_model.parameters())[p_index] - list(vanilla_model.parameters())[p_index]) ** 2
norm_diff = torch.sqrt(norm_diff).item()
if mode == "bad":
#pdb.set_trace()
logger.info("===> ND `|w_bad-w_g|` in local epoch: {} | FL round: {} |, is {}".format(epoch, fl_round, norm_diff))
elif mode == "normal":
logger.info("===> ND `|w_normal-w_g|` in local epoch: {} | FL round: {} |, is {}".format(epoch, fl_round, norm_diff))
elif mode == "avg":
logger.info("===> ND `|w_avg-w_g|` in local epoch: {} | FL round: {} |, is {}".format(epoch, fl_round, norm_diff))
def fed_avg_aggregator(net_list, net_freq):
#net_avg = VGG('VGG11').to(device)
net_avg = Net(num_classes=10).to(device)
whole_aggregator = []
for p_index, p in enumerate(net_list[0].parameters()):
# initial
params_aggregator = torch.zeros(p.size()).to(device)
for net_index, net in enumerate(net_list):
# we assume the adv model always comes to the beginning
params_aggregator = params_aggregator + net_freq[net_index] * list(net.parameters())[p_index].data
whole_aggregator.append(params_aggregator)
for param_index, p in enumerate(net_avg.parameters()):
p.data = whole_aggregator[param_index]
return net_avg
if __name__ == "__main__":
device = 'cuda:2' if torch.cuda.is_available() else 'cpu'
logger.info('==> Building model..')
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.99, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--fraction', type=float or int, default=10,
help='how many fraction of poisoned data inserted')
parser.add_argument('--local_train_period', type=int, default=1,
help='number of local training epochs')
#parser.add_argument('--save-model', action='store_true', default=False,
# help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
torch.manual_seed(args.seed)
import copy
# the hyper-params are inspired by the paper "Can you really backdoor FL?" (https://arxiv.org/pdf/1911.07963.pdf)
### Hyper-params for poisoned attack:
if args.fraction < 1:
fraction=args.fraction #0.1 #10
else:
fraction=int(args.fraction)
num_nets = 3383
part_nets_per_round = 30
num_dp_cifar10 = 5e4
num_dp_adversary = 55e3
partition_strategy = "homo"
net_dataidx_map = partition_data(
'emnist', './data', partition_strategy,
num_nets, 0.5)
# rounds of fl to conduct
## some hyper-params here:
fl_round = 100
local_training_period = args.local_train_period #5 #1
adversarial_local_training_period = 5
#lr = 0.0005
args_lr = 0.01
attacking_fl_rounds = [1]
#attacking_range = np.arange(30)
# TODO(hwang): we need to generate this per FL round
# load poisoned dataset:
with open("poisoned_dataset_fraction_{}".format(fraction), "rb") as saved_data_file:
poisoned_emnist_dataset = torch.load(saved_data_file)
num_dps_poisoned_dataset = poisoned_emnist_dataset.data.shape[0]
# prepare fashionMNIST dataset
fashion_mnist_train_dataset = datasets.FashionMNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
fashion_mnist_test_dataset = datasets.FashionMNIST('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# prepare EMNIST dataset
emnist_train_dataset = datasets.EMNIST('./data', split="digits", train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
emnist_test_dataset = datasets.EMNIST('./data', split="digits", train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
poisoned_emnist_train_loader = torch.utils.data.DataLoader(poisoned_emnist_dataset,
batch_size=args.batch_size, shuffle=True, **kwargs)
vanilla_train_loader = torch.utils.data.DataLoader(emnist_train_dataset,
batch_size=args.batch_size, shuffle=True, **kwargs)
vanilla_emnist_test_loader = torch.utils.data.DataLoader(emnist_test_dataset,
batch_size=args.test_batch_size, shuffle=False, **kwargs)
targetted_task_test_loader = torch.utils.data.DataLoader(fashion_mnist_test_dataset,
batch_size=args.test_batch_size, shuffle=False, **kwargs)
if READ_CKPT:
net_avg = Net(num_classes=10).to(device)
with open("emnist_lenet.pt", "rb") as ckpt_file:
ckpt_state_dict = torch.load(ckpt_file)
net_avg.load_state_dict(ckpt_state_dict)
logger.info("Loading checkpoint file successfully ...")
else:
net_avg = Net(num_classes=10).to(device)
logger.info("Test the model performance on the entire task before FL process ... ")
test(args, net_avg, device, vanilla_emnist_test_loader, mode="raw-task")
test(args, net_avg, device, targetted_task_test_loader, mode="targetted-task")
# let's remain a copy of the global model for measuring the norm distance:
vanilla_model = copy.deepcopy(net_avg)
# let's conduct multi-round training
for flr in range(1, fl_round+1):
if flr in attacking_fl_rounds:
# randomly select participating clients
# in this current version, we sample `part_nets_per_round-1` per FL round since we assume attacker will always participates
selected_node_indices = np.random.choice(num_nets, size=part_nets_per_round-1, replace=False)
num_data_points = [len(net_dataidx_map[i]) for i in selected_node_indices]
total_num_dps_per_round = sum(num_data_points) + num_dps_poisoned_dataset
net_freq = [num_dps_poisoned_dataset/ total_num_dps_per_round] + [num_data_points[i]/total_num_dps_per_round for i in range(part_nets_per_round-1)]
logger.info("Net freq: {}, FL round: {} with adversary".format(net_freq, flr))
#pdb.set_trace()
# we need to reconstruct the net list at the beginning
net_list = [copy.deepcopy(net_avg) for _ in range(part_nets_per_round)]
logger.info("################## Starting fl round: {}".format(flr))
# # start the FL process
for net_idx, net in enumerate(net_list):
if net_idx == 0:
pass
else:
dataidxs = net_dataidx_map[net_idx]
train_dl_local, _ = get_dataloader('emnist', './data', args.batch_size,
args.test_batch_size, dataidxs) # also get the data loader
logger.info("@@@@@@@@ Working on client: {}".format(net_idx))
#logger.info("Before local training, the performance of model ...")
#test(args, net, device, vanilla_emnist_test_loader, mode="raw-task")
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args_lr, momentum=0.9, weight_decay=1e-4) # epoch, net, train_loader, optimizer, criterion
if net_idx == 0:
for e in range(1, adversarial_local_training_period+1):
# we always assume net index 0 is adversary
train(args, net, device, poisoned_emnist_train_loader, optimizer, e)
# at here we can check the distance between w_bad and w_g i.e. `\|w_bad - w_g\|_2`
#def calc_norm_diff(gs_model, vanilla_model, epoch, fl_round, mode="bad"):
calc_norm_diff(gs_model=net, vanilla_model=vanilla_model, epoch=e, fl_round=flr, mode="bad")
else:
for e in range(1, local_training_period+1):
train(args, net, device, train_dl_local, optimizer, e)
# at here we can check the distance between w_normal and w_g i.e. `\|w_bad - w_g\|_2`
#calc_norm_diff(gs_model, vanilla_model, epoch)
calc_norm_diff(gs_model=net, vanilla_model=vanilla_model, epoch=e, fl_round=flr, mode="normal")
#for e in range(1, local_training_period+1):
# if net_idx == 0:
# # we always assume net index 0 is adversary
# train(args, net, device, poisoned_emnist_train_loader, optimizer, e)
# else:
# train(args, net, device, train_dl_local, optimizer, e)
else:
selected_node_indices = np.random.choice(num_nets, size=part_nets_per_round, replace=False)
num_data_points = [len(net_dataidx_map[i]) for i in selected_node_indices]
total_num_dps_per_round = sum(num_data_points)
net_freq = [num_data_points[i]/total_num_dps_per_round for i in range(part_nets_per_round)]
logger.info("Net freq: {}, FL round: {} without adversary".format(net_freq, flr))
# we need to reconstruct the net list at the beginning
net_list = [copy.deepcopy(net_avg) for _ in range(part_nets_per_round)]
logger.info("################## Starting fl round: {}".format(flr))
# # start the FL process
for net_idx, net in enumerate(net_list):
dataidxs = net_dataidx_map[net_idx]
train_dl_local, _ = get_dataloader('emnist', './data', args.batch_size,
args.test_batch_size, dataidxs) # also get the data loader
logger.info("@@@@@@@@ Working on client: {}".format(net_idx))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args_lr, momentum=0.9, weight_decay=1e-4) # epoch, net, train_loader, optimizer, criterion
for e in range(1, local_training_period+1):
train(args, net, device, train_dl_local, optimizer, e)
# after local training periods
net_avg = fed_avg_aggregator(net_list, net_freq)
calc_norm_diff(gs_model=net_avg, vanilla_model=vanilla_model, epoch=0, fl_round=flr, mode="avg")
logger.info("Measuring the accuracy of the averaged global model, FL round: {} ...".format(flr))
test(args, net_avg, device, vanilla_emnist_test_loader, mode="raw-task")
test(args, net_avg, device, targetted_task_test_loader, mode="targetted-task")
|
{"hexsha": "dc006a71f6c96c0d4be65444d9e9aa8c5b3f14f8", "size": 16423, "ext": "py", "lang": "Python", "max_stars_repo_path": "backup/simulated_averaging_april22.py", "max_stars_repo_name": "SanaAwan5/edgecase_backdoors", "max_stars_repo_head_hexsha": "c892024242e45557fa94363ecadc355a9250bca0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-05-08T07:49:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T16:18:05.000Z", "max_issues_repo_path": "backup/simulated_averaging_april22.py", "max_issues_repo_name": "SanaAwan5/edgecase_backdoors", "max_issues_repo_head_hexsha": "c892024242e45557fa94363ecadc355a9250bca0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-05-19T14:58:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T15:53:19.000Z", "max_forks_repo_path": "backup/simulated_averaging_april22.py", "max_forks_repo_name": "SanaAwan5/edgecase_backdoors", "max_forks_repo_head_hexsha": "c892024242e45557fa94363ecadc355a9250bca0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-05-22T08:56:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:50:21.000Z", "avg_line_length": 45.3674033149, "max_line_length": 159, "alphanum_fraction": 0.6017171041, "include": true, "reason": "import numpy", "num_tokens": 3825}
|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Power flow data for 9 bus, 3 generator case.
Modifications:
1. Add 3 new lines to complicate the network
2. twice the loads
Additional data:
1. Add 3 columns for gen data
"""
from numpy import array
def case9():
"""Power flow data for 9 bus, 3 generator case.
Please see L{caseformat} for details on the case file format.
Based on data from Joe H. Chow's book, p. 70.
@return: Power flow data for 9 bus, 3 generator case.
"""
ppc = {"version": '2'}
##----- Power Flow Data -----##
## system MVA base
ppc["baseMVA"] = 100.0
## bus data
# bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin
#! ratio_ls_max - maximal ratio of load shedding
#! w_d - load shedding cost, the unit is $/MW
ppc["bus"] = array([
[1, 3, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9, 0.8, 10],
[2, 2, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9, 0.8, 10],
[3, 2, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9, 0.8, 10],
[4, 1, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9, 0.8, 10],
[5, 1, 90, 30, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9, 0.8, 10],
[6, 1, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9, 0.8, 10],
[7, 1, 100, 35, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9, 0.8, 8],
[8, 1, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9, 0.8, 10],
[9, 1, 125, 50, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9, 0.8, 6]
])
## generator data
# bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,
# Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf
#! gen_type - 0 for conventional generators and 1 for VRE-based generators
#! phi_v_min - minimal power factor of inverters
#! s_v_max - MVA rating of inverters
#! r_+2 - upward ramp rate for the second stage
#! r_-2 - downward ramp rate for the second stage, r_-2 = r_+2 if the generator does not participate into regulation
#! r_+3 - upward ramp rate for the second stage
#! r_-3 - downward ramp rate for the second stage, r_-2 = r_+2 if the generator does not participate into regulation
ppc["gen"] = array([
[1, 0, 0, 300, -300, 1, 100, 1, 250, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.95, 250*1.5, 20, 40, 30, 60],
[2, 163, 0, 300, -300, 1, 100, 1, 300, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0.95, 300*1.5, 20, 40, 30, 60],
[3, 85, 0, 300, -300, 1, 100, 1, 270, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0.95, 270*1.5, 0, 0, 30, 60]
])
## branch data
# fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax
#! w_s - cost of line switching in the third-stage corrective control
ppc["branch"] = array([
[1, 4, 0, 0.0576, 0, 250*3, 250, 250, 1, 0, 1, -30, 30, 0.1],
[4, 5, 0.017, 0.092, 0.158, 250, 250, 250, 0, 0, 1, -30, 30, 0.1],
[5, 6, 0.039, 0.17, 0.358, 150, 150, 150, 0, 0, 1, -30, 30, 0.1],
[3, 6, 0, 0.0586, 0, 300*3, 300, 300, 1, 0, 1, -30, 30, 0.1],
[6, 7, 0.0119, 0.1008, 0.209, 150, 150, 150, 0, 0, 1, -30, 30, 0.1],
[7, 8, 0.0085, 0.072, 0.149, 250, 250, 250, 0, 0, 1, -30, 30, 0.1],
[8, 2, 0, 0.0625, 0, 250*3, 250, 250, 1, 0, 1, -30, 30, 0.1],
[8, 9, 0.032, 0.161, 0.306, 250, 250, 250, 0, 0, 1, -30, 30, 0.1],
[9, 4, 0.01, 0.085, 0.176, 250, 250, 250, 0, 0, 1, -30, 30, 0.1],
[5, 7, 0.002, 0.02, 0.04 ,50, 50, 50, 0, 0, 1, -30, 30, 0.1],
[5, 9, 0.002, 0.02, 0.04 ,50, 50, 50, 0, 0, 1, -30, 30, 0.1],
[7, 9, 0.002, 0.02, 0.04 ,50, 50, 50, 0, 0, 1, -30, 30, 0.1],
[4, 6, 0.02, 0.2, 0.4 , 300, 300, 300, 0, 0, 1, -30, 30, 0.1],
[4, 8, 0.02, 0.2, 0.4 , 300, 300, 300, 0, 0, 1, -30, 30, 0.1]
])
##----- OPF Data -----##
## area data
# area refbus
ppc["areas"] = array([
[1, 5]
])
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
#! w_+2
#! w_-2
#! w_+3
#! w_-3
ppc["gencost"] = array([
[2, 1500, 0, 3, 0.11, 5, 150, 10, 10, 1, 1],
[2, 2000, 0, 3, 0.085, 1.2, 600, 1, 1, 1, 1],
[2, 3000, 0, 3, 0.1225, 1, 335, 1, 1, 1, 1]
])
return ppc
|
{"hexsha": "f88cfa25a30adfa3008406fdd55bd8a13a5a0257", "size": 4512, "ext": "py", "lang": "Python", "max_stars_repo_path": "Uncertainty/data/case-9-modified/case9.py", "max_stars_repo_name": "thanever/SOC", "max_stars_repo_head_hexsha": "9f30d1a9c7610a68de9c178a1170bdf1c8ca11d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Uncertainty/data/case-9-modified/case9.py", "max_issues_repo_name": "thanever/SOC", "max_issues_repo_head_hexsha": "9f30d1a9c7610a68de9c178a1170bdf1c8ca11d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Uncertainty/data/case-9-modified/case9.py", "max_forks_repo_name": "thanever/SOC", "max_forks_repo_head_hexsha": "9f30d1a9c7610a68de9c178a1170bdf1c8ca11d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.3846153846, "max_line_length": 127, "alphanum_fraction": 0.4833776596, "include": true, "reason": "from numpy", "num_tokens": 2361}
|
import numpy as np
import random,sys
import scipy
from scipy.spatial.distance import pdist,squareform,cdist
#from scipy.spatial import distance_matrix
import matplotlib.pyplot as plt
import scipy
### "for loop" version
### faster than "matrix version"
### because only need to consider points within h_k
### for loop version
### run this cell to overwrite the previous matrix version
### because this version is faster
def adaptive_cluster(data, gap_par = 0.5, n0=None,debug=False,assign_outliers = 'nearest_cluster'):
'''
data:: a numeric numpy array
gap_par: the lambda parameter used to test the gap
n0: the initial neighbors for each data point.
debug: for debug
assign_outliers: nearest_cluster, assign outliers to nearest cluster. new_cluster, assign outliers to a new cluster
'''
weight_matrix_history = []
(n_points,n_features) = data.shape
#distance_matrix = scipy.spatial.distance_matrix(data,data)
## faster version
distance_matrix = scipy.spatial.distance.cdist(data, data, 'euclidean')
#print('distance_matrix.shape',distance_matrix.shape)
weight_matrix = np.zeros(shape=(n_points,n_points))
weight_matrix_history.append((0,weight_matrix))
#print('weight_matrix.shape',weight_matrix.shape)
#plot_weight_matrix(weight_matrix)
### sort the distance matrix
sorted_distance_idx_matrix = np.argsort(distance_matrix,axis=1)
sorted_distance_matrix = np.sort(distance_matrix,axis=1)
#print('sorted_distance_matrix.shape',sorted_distance_matrix.shape)
#print('sorted_distance_idx_matrix.shape',sorted_distance_idx_matrix.shape)
### number of neighbors
if n0 is None:
n0 = 2*n_features+2
### h0 is the the radius such that the point has n0 neighbors
h0 = sorted_distance_matrix[:,n0]
#print('h0.shape',h0.shape)
### max(h0(Xi),h0(Xj))
#max_h0 = np.reshape([np.maximum(h0[i],h0[j]) for i in range(n_points) for j in range(n_points)],newshape=(n_points,n_points))
#print('max_h0.shape',max_h0.shape)
### weight_matrix
#weight_matrix = (distance_matrix <= max_h0).astype('int')
### faster version
h0_matrix = np.tile(h0, (n_points, 1))
h0_matrix_T = h0_matrix.T
h0_matrix_max = np.maximum(h0_matrix,h0_matrix_T)
weight_matrix = (distance_matrix<=h0_matrix_max).astype('int')
#print('weight_matrix.shape',weight_matrix.shape)
#plot_weight_matrix(weight_matrix)
#################################################################
### find h sequence
a = 1.4142135623730951
b = 1.95
#gap_par = -1
max_distance = np.max(sorted_distance_matrix)
### h0 is a vector, each data point has n0 neighbors
### max(h0) makes sure that each data point has at least n0 neighbors
h_array = np.array([np.max(h0)])
#n_matrix = np.repeat(n0, n_points)
#n_matrix = n_matrix[:,np.newaxis]
k = 0
weight_matrix_history.append((h_array[k],weight_matrix.copy()))
while h_array[k] <= max_distance:
### upper bound of n(Xi,h_k+1)
### given radius h_array[k], how many neighbors for each data point
### -1 removes its self from counting
n_upper = a * np.array([np.sum(sorted_distance_matrix[i,:]<=h_array[k])-1 for i in np.arange(n_points)])
n_upper = (np.floor(n_upper)).astype('int')
### when h is big, the n_upper may be > n_points
n_upper = np.clip(n_upper, a_min=None,a_max=(n_points-1))
#print(n_upper)
### n_upper can decide the h_upper
h_upper_by_n_upper = np.min(np.array([sorted_distance_matrix[i,n_upper[i]] for i in np.arange(n_points)]))
### upper bound of h_k+1
h_upper = b*h_array[k]
### must satisfy both conditions
min_h_upper = np.minimum(h_upper_by_n_upper,h_upper)
#print(k,min_h_upper)
### append to the h_array
### just make sure h is not > max_distance
if min_h_upper <= max_distance:
if min_h_upper <= h_array[k]: break
#print(k,'h',min_h_upper)
h_array = np.append(h_array,min_h_upper)
k = k + 1
#################################################################
### check if those h satisfy the conditions
if debug:
for k in range(1,len(h_array)):
if h_array[k] <= b*h_array[k-1]:
continue
print('k',k,h_array[k],h_array[k-1],b*h_array[k-1],end=',')
print(h_array[k]/h_array[k-1])
else:
print('h error')
for k in range(1,len(h_array)):
for i in range(n_points):
n1 = np.sum(sorted_distance_matrix[i,:]<=h_array[k-1])-1
n2 = np.sum(sorted_distance_matrix[i,:]<=h_array[k])-1
if n2<=a*n1 and n1>=n0 and n2>=n0:
continue
print('n',k,n1,n2,a*n1,end=',')
print(n2/n1)
else:
print('n error')
#################################################################
beta_a = (n_features+1.0)/2.0
beta_b = 0.5
beta_function = scipy.special.beta(beta_a,beta_b)
np.seterr(divide='ignore', invalid='ignore')
print('h_k',h_array[0])
for k in range(1,len(h_array)):
print('h_k',h_array[k])
#t_matrix = distance_matrix/h_array[k-1]
#beta_x_matrix = 1.0-(t_matrix**2)/4.0
#incomplete_beta_function_matrix = scipy.special.betainc(beta_a,beta_b,beta_x_matrix)
#q_matrix = incomplete_beta_function_matrix / (2*beta_function-incomplete_beta_function_matrix)
for i in range(n_points):
weight_matrix[i,i] = 1
for j in range(i,n_points):
#if weight_matrix[i,j] == 1:
# continue
#if i == j:
# weight_matrix[i,j] = 1
# continue
#if i > j:
# weight_matrix[i,j] = weight_matrix[j,i]
# continue
if distance_matrix[i,j] <= h_array[k] and h_array[k-1] >= h0[i] and h_array[k-1] >= h0[j]:
#### caclulate overlap
N_overlap = np.dot(weight_matrix[i,:],weight_matrix[j,:])
#### caclulate complement
#N_complement = np.zeros(shape=(n_points,n_points))
if k>1:
ind1 = (distance_matrix[j,:] > h_array[k-1]) + 0.0
ind2 = (distance_matrix[i,:] > h_array[k-1]) + 0.0
else:
ind1 = (distance_matrix[j,:] > h0_matrix_max[i,j]) + 0.0
ind2 = (distance_matrix[i,:] > h0_matrix_max[i,j]) + 0.0
N_complement = np.dot(weight_matrix[i,:],ind1) + np.dot(weight_matrix[j,:],ind2)
#### caclulate union
N_union = N_overlap + N_complement
#### theta
theta = N_overlap / N_union
#### q
t = distance_matrix[i,j]/h_array[k-1]
beta_x = 1.0-(t**2)/4.0
incomplete_beta_function = scipy.special.betainc(beta_a,beta_b,beta_x)
q = incomplete_beta_function / (2*beta_function-incomplete_beta_function)
#q = q_matrix[i,j]
T1 = N_union
#### this may raise warnings about log(0) or log(nan)
#### this is fine, since I used the whole matrix here
#### some of the points are out of the h(k) radius
#### we will mask those points in the later step
T2 = theta*np.log(theta/q)+(1.0-theta)*np.log((1.0-theta)/(1.0-q))
#### when N_overlap is 0, theta is 0, this leands to T is nan
#### replace those nan with 0 in T
#T2 = np.where(theta==0.0,0.0,T2)
#T2 = np.where(theta==1.0,0.0,T2)
#T3 = ((theta<=q).astype('int')-(theta>q).astype('int'))
### faster version
if theta<=q:
T = T1 * T2
else:
T = - (T1 * T2)
#T = T1 * T2 * T3
####
####
#weight_matrix[i,j] = (distance_matrix[i,j]<=h_array[k]) * (T<=gap_par) + 0.0
weight_matrix[i,j] = (T<=gap_par) + 0.0
#### be careful with those boundary points
#### theta=0 means no overlap at all
#### theta=1 means completely overlap
#### needs special treatment for them
if theta==0: weight_matrix[i,j] = 0
if theta==1: weight_matrix[i,j] = 1
####
weight_matrix[j,i] = weight_matrix[i,j]
weight_matrix_history.append((h_array[k],weight_matrix.copy()))
### reset to default
np.seterr(divide='warn', invalid='warn')
### calculate S
S = np.sum(weight_matrix)
### extract clusters from weight matrix
labels = (np.zeros(shape=weight_matrix.shape[0]))
labels.fill(np.nan)
cluster_ind = 0
for i in range(len(labels)):
for j in range(len(labels)):
if i == j:continue
if weight_matrix[i,j] == 1:
if np.isnan(labels[i]) and np.isnan(labels[j]):
labels[i] = cluster_ind
labels[j] = cluster_ind
cluster_ind = cluster_ind + 1
elif not np.isnan(labels[i]) and np.isnan(labels[j]):
labels[j] = labels[i]
elif np.isnan(labels[i]) and not np.isnan(labels[j]):
labels[i] = labels[j]
elif not np.isnan(labels[i]) and not np.isnan(labels[j]):
continue
else:
print(i,j,labels[i],labels[j])
print('cluster assignment error')
### some points may not belong to any cluster
### assign those points to the nearest cluster
### or they can be ignored (by default, those points will have np.nan as labels)
### thus those points can be considered as outliers
if assign_outliers == 'nearest_cluster':
if np.sum(np.isnan(labels))>0:
nan_ind = np.argwhere(np.isnan(labels)).flatten()
for i in nan_ind:
dist = distance_matrix[i,:].copy()
dist[i] = np.max(dist)
nearest_ind = np.argmin(dist)
labels[i] = labels[nearest_ind]
#print(dist)
#print(i,nearest_ind)
elif assign_outliers == 'new_cluster':
if np.sum(np.isnan(labels))>0:
nan_ind = np.argwhere(np.isnan(labels)).flatten()
outlier_label = np.nanmax(np.unique(labels)) + 1
for i in nan_ind:
labels[i] = outlier_label
else:
print('assign_outliers parameter is not correct')
return({"S":S,"weight_matrix":weight_matrix,
"cluster_label":labels,
"weight_matrix_history":weight_matrix_history,
})
def k_means(data, n_clusters=3, n_init=20, max_iter=100, kernel=None,
verbose=False,sigma = 1.0,use_kmean_controid=False):
'''
data: a numeric numpy array
n_clusters: number of clusters
n_init: number of different initializations to run kmeans
max_iter: number of max iterations
kernel: "None", regular k means; "gaussian", k means with gaussian kernel
verbose: output detailed information
sigma: the sigma parameter in the gaussian kernel
use_kmean_controid: for kenel K means, use the best controids from K means as initialization points.
'''
### may not be efficient in terms of memory use
### no need to save whole history
### get whole hitory for debugging purpose
controid_history = {}
cluster_label_history = {}
sse_history = np.zeros(shape=(n_init,1))
### start k-means
n_points = data.shape[0]
### calculate the kernel matrix
if kernel == 'gaussian':
### 'sqeuclidean': squared Euclidean distance
kernel_matrix = np.exp(-0.5/(sigma**2)*squareform(pdist(data,'sqeuclidean')))
### repeat k-means n_init times
### return the best one
np.seterr(divide='ignore', invalid='ignore')
for i_init in range(n_init):
if verbose: print('Random seed',i_init)
#### set random seed
np.random.seed(i_init)
#### generate initial cluster labels
cluster_labels = np.random.choice(range(n_clusters),size=n_points, replace=True)
#### generate initial centroids
#### randomly choose n_clusters points from the data as centroids
if use_kmean_controid:
#### run one K means
print('Use best K means centroid')
km_result = k_means(data, n_clusters, n_init=20, max_iter=100, kernel=None)
centroids = km_result['best_controids']
else:
#### randomly choose n_clusters points from the data as centroids
centroids = data[np.random.choice(np.arange(n_points), n_clusters, replace=False),:]
for i_iter in range(max_iter):
if verbose: print('Iteration',i_iter,end=', ')
distance_to_centroids = np.zeros(shape=(data.shape[0],n_clusters))
######
if kernel is None:
distance_to_centroids = scipy.spatial.distance.cdist(data, centroids, 'euclidean')
######
elif kernel == 'gaussian':
dist1 = np.diag(kernel_matrix)
cluster_ind_matrix = np.zeros(shape=(data.shape[0],n_clusters))
for i_centroid in range(n_clusters):
cluster_ind_matrix[:,i_centroid] = (cluster_labels == i_centroid) + 0.0
kth_cluster_ind = (cluster_labels == i_centroid) + 0.0
kth_cluster_matrix = np.outer(kth_cluster_ind,kth_cluster_ind)
dist2 = 2.0*np.sum(np.tile(kth_cluster_ind,(n_points,1))*kernel_matrix,axis=1)/np.sum(kth_cluster_ind)
dist3 = np.sum(kth_cluster_matrix*kernel_matrix)/np.sum(kth_cluster_matrix)
#print(dist1.shape,dist2.shape,dist3.shape,)
### ord=2 is L2 distance
### axis=1 is to calculate norm along columns
distance_to_centroids[:,i_centroid] = dist1-dist2+dist3
#break
else:
sys.exit('Kernel parameter is not correct!')
#print(distance_to_centroids)
### assign the cluster labels
cluster_labels = np.argmin(distance_to_centroids,axis=1)
sse = np.sum((np.min(distance_to_centroids,axis=1))**2)
if verbose: print('SSE',sse)
### re-calculate centroids
previous_centroids = centroids
centroids = np.array([data[cluster_labels == i_centroid].mean(axis = 0) for i_centroid in range(n_clusters)])
### if centroids don't change
### stop the iteration
if np.all(previous_centroids == centroids):
if verbose: print('Centroids do not change',i_iter)
break
#break
controid_history[i_init] = centroids
cluster_label_history[i_init] = cluster_labels
sse_history[i_init] = sse
#break
np.seterr(divide='warn', invalid='warn')
### find the best initializations
best_iter = np.argmin(sse_history)
best_sse = sse_history[best_iter]
best_controids = controid_history[best_iter]
best_cluster_label = cluster_label_history[best_iter]
return {'best_iter':best_iter,
'best_sse':best_sse,
'best_controids':best_controids,
'best_cluster_label':best_cluster_label,
'controid_history':controid_history,
'cluster_label_history':cluster_label_history,
'sse_history':sse_history,
}
def plot_weight_matrix(weight_matrix):
plt.imshow(weight_matrix)
plt.show()
|
{"hexsha": "264003e14799f041d8920a3bc23352775eb6833c", "size": 16290, "ext": "py", "lang": "Python", "max_stars_repo_path": "kmean_clustering/kmean_clustering.py", "max_stars_repo_name": "chvlyl/kernel_kmeans_and_adaptive_clustering", "max_stars_repo_head_hexsha": "0dd95158abc566b1975672016eaa327df8505267", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-07-25T11:13:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T07:52:53.000Z", "max_issues_repo_path": "kmean_clustering/kmean_clustering.py", "max_issues_repo_name": "chvlyl/kernel_kmeans_and_adaptive_clustering", "max_issues_repo_head_hexsha": "0dd95158abc566b1975672016eaa327df8505267", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kmean_clustering/kmean_clustering.py", "max_forks_repo_name": "chvlyl/kernel_kmeans_and_adaptive_clustering", "max_forks_repo_head_hexsha": "0dd95158abc566b1975672016eaa327df8505267", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-02-02T17:09:16.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-02T17:09:16.000Z", "avg_line_length": 45.6302521008, "max_line_length": 130, "alphanum_fraction": 0.5686924494, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 3818}
|
import json
import os
from collections import defaultdict
import cv2
import numpy as np
import torchvision.transforms as tf
from models.utils import draw_umich_gaussian, gaussian_radius, line_gaussian
from PIL import Image
from shapely.geometry import Polygon
from torch.utils import data
class SUNRGBD(data.Dataset):
def __init__(self, config, phase='train', split='all'):
self.config = config
self.phase = phase
self.split = split
self.max_objs = config.max_objs
self.colorjitter = tf.ColorJitter(
brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5)
self.transforms = tf.Compose([
tf.ToTensor(),
tf.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
if os.path.isfile(f'data/SUNRGBD/sunrgb_s3d_{phase}.json'):
with open(f'data/SUNRGBD/sunrgb_s3d_{phase}.json') as f:
self.anno = json.load(f)
else:
self.anno = self.convert_suntos3d()
with open(f'data/SUNRGBD/sunrgb_s3d_{phase}.json', 'w') as f:
json.dump(self.anno, f)
# extract NYU dataset
self.anno_nyu = []
self.anno_other = []
for i in self.anno:
im_name = i['file_name']
cato = im_name.split('/')[4]
if cato == 'NYUdata':
self.anno_nyu.append(i)
else:
self.anno_other.append(i)
self.anno = {'all': self.anno,
'nyu': self.anno_nyu, 'other': self.anno_other}
def __getitem__(self, index):
sample = self.anno[self.split][index]
img_name = sample['file_name']
intri_name = os.path.join(*img_name.split('/')[:-2], 'intrinsics.txt')
with open(intri_name, 'r') as f:
K = [[float(x) for x in y.rstrip(' ').lstrip(' ').split(' ')]
for y in f.readlines()]
K = np.array(K).reshape([3, 3])
self.K = np.array(K).astype(np.float32)
self.K_inv = np.linalg.inv(K).astype(np.float32)
img = Image.open(img_name) # RGB
if self.phase == 'train' and self.config.colorjitter:
img = self.colorjitter(img)
img = np.array(img)
img, inh, inw = self.padimage(img)
img = self.transforms(img)
layout = sample['layout']
pparams = []
labels = []
segs = -1 * np.ones([inh, inw])
i = 0
endpoints = []
for _, pp in enumerate(layout):
if pp['category'] == 1: # wall
polygon = Polygon(np.array(pp['polygon'][0]))
area = polygon.area
if area > 1000:
cout = np.array(pp['polygon'][0]).astype(np.int32)
cv2.fillPoly(segs, [cout], color=i)
pparams.append([*pp['plane_param']])
labels.append(0)
i = i + 1
else:
for v in pp['polygon']:
cout = np.array(v)
if len(cout) <= 2:
continue
polygon = Polygon(cout)
if polygon.area > 1000:
cout = cout.astype(np.int32)
cv2.fillPoly(segs, [cout], color=i)
pparams.append([*pp['plane_param']])
if pp['category'] == 2: # floor
labels.append(1)
else:
labels.append(2)
i = i + 1
if pp['attr'] == 1 or pp['attr'] == 2: # oc line or intersection line
xy = np.array(pp['endpoints'])
if len(xy) > 0:
endpoints.append([xy[0, 0], xy[0, 1], xy[1, 0], xy[1, 1]])
# plane detection gt and instance plane params #center map, wh, offset, instance param
oh, ow = inh // self.config.downsample, inw // self.config.downsample
hm = np.zeros((3, oh, ow), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
params3d = np.zeros((self.max_objs, 4), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
for i, (label, param) in enumerate(zip(labels, pparams)):
yx = np.where(segs == i)
if len(yx[0]) == 0:
continue
box = np.array([np.min(yx[1]), np.min(yx[0]), np.max(
yx[1]), np.max(yx[0])], dtype=np.float32)
box /= self.config.downsample
h = box[3] - box[1]
w = box[2] - box[0]
radius = gaussian_radius((np.ceil(h), np.ceil(w)))
radius = max(0, int(radius))
ct = np.array(
[(box[0] + box[2]) / 2, (box[1] + box[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_umich_gaussian(hm[label], ct_int, radius)
wh[i] = 1. * w, 1. * h
ind[i] = ct_int[1] * ow + ct_int[0]
reg[i] = ct - ct_int
reg_mask[i] = 1
params3d[i, :3] = param[:3]
params3d[i, 3] = param[3] # 1. / param[3]
ret = {'img': img, 'plane_hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'plane_wh': wh,
'plane_offset': reg,
'params3d': params3d}
# line detection gt # line map, alpha, offset map
line_hm = np.zeros((3, oh, ow), dtype=np.float32)
for line in endpoints:
line = np.array(line) / self.config.downsample
line = np.reshape(line, [2, 2])
line_gaussian(line_hm, line, 2)
ret['line_hm'] = line_hm[0:1]
ret['line_alpha'] = line_hm[1:2]
ret['line_offset'] = line_hm[2:3]
# plane param map gt # pixel-wise plane param
plane_params = np.zeros((4, oh, ow), dtype=np.float32)
plane_params_input = np.zeros((4, inh, inw), dtype=np.float32)
oseg = cv2.resize(segs, (ow, oh), interpolation=cv2.INTER_NEAREST)
for i, param in enumerate(pparams):
param = np.array(param)
plane_params[:3, oseg == i] = param[:3, np.newaxis] # normal
# 1. / param[3] # offset 1/d
plane_params[3, oseg == i] = param[3]
plane_params_input[:3, segs == i] = param[:3, np.newaxis]
plane_params_input[3, segs == i] = param[3]
ret['plane_params'] = plane_params
# param params for depth loss
# coordinate map
x = np.arange(ow * 4)
y = np.arange(oh * 4)
xx, yy = np.meshgrid(x, y)
xymap = np.stack([xx, yy], axis=2).astype(np.float32)
oxymap = cv2.resize(xymap, (ow, oh), interpolation=cv2.INTER_LINEAR)
oxy1map = np.concatenate([oxymap, np.ones_like(
oxymap[:, :, :1])], axis=-1).astype(np.float32)
inverdepth = self.inverdepth(plane_params, self.K_inv, oxy1map)
# depthmap = cv2.resize(cv2.imread(os.path.join(dirs, 'depth.png'), cv2.IMREAD_UNCHANGED), (ow, oh), interpolation=cv2.INTER_LINEAR)
# cv2.imwrite('./depth.png', 1/inverdepth*100)
ret['odepth'] = inverdepth
ret['oseg'] = oseg
ret['oxy1map'] = oxy1map
# reconstructure # camera intri, plane label, segs, depth
ret['intri'] = self.K # np.array(anno['intri']).T
ret['intri_inv'] = self.K_inv
# evaluate gt
ret['iseg'] = segs
ixymap = cv2.resize(xymap, (inw, inh), interpolation=cv2.INTER_LINEAR)
ixy1map = np.concatenate([ixymap, np.ones_like(
ixymap[:, :, :1])], axis=-1).astype(np.float32)
inverdepth_input = self.inverdepth(
plane_params_input, self.K_inv, ixy1map)
ret['ixy1map'] = ixy1map
ret['idepth'] = inverdepth_input
return ret
def __len__(self):
return len(self.anno[self.split])
def padimage(self, image):
outsize = [480, 640, 3]
h, w = image.shape[0], image.shape[1]
cx = min(w, 640)
cy = min(h, 480)
padimage = np.zeros(outsize, dtype=np.uint8)
padimage[:cy, :cx] = image[:cy, :cx]
return padimage, outsize[0], outsize[1]
def inverdepth(self, param, K_inv, xy1map):
n_d = param[:3] / np.clip(param[3], 1e-8, 1e8) # meter n*1/d
n_d = np.transpose(n_d, [1, 2, 0])
inverdepth = -1 * np.sum(np.dot(n_d, K_inv) * xy1map, axis=2)
return inverdepth
def convert_suntos3d(self):
adr = f'data/SUNRGBD/sunrgbd_{self.phase}.json'
data = json.load(open(adr))
imgs = data['images']
annos = data['annotations']
imgid2anno = defaultdict(list)
id2imgid = defaultdict(list)
for i in range(len(annos)):
imgid2anno[annos[i]['image_id']].append(annos[i])
for i in range(len(imgs)):
id2imgid[i] = imgs[i]['id']
annotations = []
for i in range(len(imgs)):
im_name = imgs[i]['file_name'][6:]
im_name = os.path.join('data', 'SUNRGBD', im_name)
im = cv2.imread(im_name)
h, w, _ = im.shape
anno = imgid2anno[id2imgid[i]]
sample = {}
sample['file_name'] = im_name
sample['layout'] = []
for j, an in enumerate(anno):
# seg = np.array(an['segmentation']).reshape(-1, 2)
seg = [np.array(x).reshape(-1, 2) for x in an['segmentation']]
line = np.array(an['inter_line']).reshape(-1,
2).astype(np.int32)
param = np.array(an['plane_param'])
category_id = an['category_id']
if np.all(line == 0) or j == 0: # no line first wall or ceiling or floor
sample['layout'].append({
'attr': 0,
'endpoints': [],
'polygon': [x.tolist() for x in seg],
'plane_param': param.tolist(),
'category': category_id
})
continue
if len(line) == 4: # oc line
left = line[0:2]
right = line[2:]
len_l = np.sum((left[0] - left[1])**2)
len_r = np.sum((right[0] - right[1])**2)
if len_l < len_r:
endpoints = right if right[0,
1] < right[1, 1] else right[[1, 0]]
else:
endpoints = left if left[0,
1] < left[1, 1] else left[[1, 0]]
sample['layout'].append({
'attr': 1,
'endpoints': endpoints.tolist(),
'polygon': [x.tolist() for x in seg],
'plane_param': param.tolist(),
'category': category_id
})
continue
line = line if line[0, 1] < line[1, 1] else line[[1, 0]]
sample['layout'].append({
'attr': 2,
'endpoints': line.tolist(),
'polygon': [x.tolist() for x in seg],
'plane_param': param.tolist(),
'category': category_id
})
annotations.append(sample)
return annotations
|
{"hexsha": "3770de3f2f701ecc83ad00ca7e8efea6c5bd4b57", "size": 11895, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/sunrgbd.py", "max_stars_repo_name": "litsunshine/NonCuboidRoom", "max_stars_repo_head_hexsha": "c782222b951c622d80cae5f3217424dc2cbe6ef5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 54, "max_stars_repo_stars_event_min_datetime": "2021-04-16T09:06:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T14:58:09.000Z", "max_issues_repo_path": "datasets/sunrgbd.py", "max_issues_repo_name": "litsunshine/NonCuboidRoom", "max_issues_repo_head_hexsha": "c782222b951c622d80cae5f3217424dc2cbe6ef5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-05-23T22:45:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-20T01:47:08.000Z", "max_forks_repo_path": "datasets/sunrgbd.py", "max_forks_repo_name": "litsunshine/NonCuboidRoom", "max_forks_repo_head_hexsha": "c782222b951c622d80cae5f3217424dc2cbe6ef5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-07-16T01:42:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T01:25:21.000Z", "avg_line_length": 42.3309608541, "max_line_length": 141, "alphanum_fraction": 0.4810424548, "include": true, "reason": "import numpy", "num_tokens": 3074}
|
#!/usr/bin/env python3
"""
Visualize a detector output on the CS6 validation set.
The val set GT annotations are in an FDDB/WIDER-style txt file format.
A symlink 'data/CS6' should point to the CS6 data root location
(on Gypsum this is in /mnt/nfs/scratch1/arunirc/data/CS6/CS6/CS6.0.01/CS6).
Usage (on slurm cluster):
srun --mem 10000 python tools/face/viz_detector_cs6.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import distutils.util
import os
import sys
import pprint
import subprocess
from collections import defaultdict
from six.moves import xrange
import os.path as osp
import time
# Use a non-interactive backend
import matplotlib
matplotlib.use('Agg')
import numpy as np
import cv2
import skvideo
import skvideo.io
import sys
sys.path.append('./tools')
import torch
import torch.nn as nn
from torch.autograd import Variable
import _init_paths
import nn as mynn
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
from core.test import im_detect_bbox
from modeling.model_builder import Generalized_RCNN
import datasets.dummy_datasets as datasets
import utils.boxes as box_utils # for NMS
import utils.misc as misc_utils
import utils.net as net_utils
import utils.vis as vis_utils
import utils.face_utils as face_utils
from utils.detectron_weight_helper import load_detectron_weight
# set random seeds
np.random.seed(999)
random.seed(999)
torch.cuda.manual_seed_all(999)
torch.manual_seed(999)
torch.backends.cudnn.deterministic = True
# --- Quick settings ---
GT_FILE = 'data/CS6_annot/annot-format-GT/cs6_gt_annot_val-easy.txt'
OUT_DIR = 'Outputs/visualizations/'
NUM_IMG = 100
DET_NAME = 'baseline-cs6'
CFG_PATH = 'configs/wider_face/e2e_faster_rcnn_R-50-C4_1x.yaml'
WT_PATH = 'Outputs/e2e_faster_rcnn_R-50-C4_1x/Jul30-15-51-27_node097_step/ckpt/model_step79999.pth'
CONF_THRESH = 0.25
NMS_THRESH = 0.15
DATA_DIR = 'data/CS6_annot'
def parse_args():
parser = argparse.ArgumentParser(description='Creating CS6 ground truth data')
parser.add_argument(
'--det_name',
help='detector name',
default=DET_NAME
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='cfg model file (/path/to/model_prototxt)',
default=CFG_PATH,
)
parser.add_argument(
'--load_ckpt',
help='checkpoints weights model file (/path/to/model_weights.pkl)',
default=WT_PATH,
)
parser.add_argument(
'--thresh',
dest='thresh',
help='Threshold on class score (default: 0.5)',
default=CONF_THRESH,
type=float
)
parser.add_argument(
'--output_dir', help='directory for saving outputs',
default=OUT_DIR,
)
parser.add_argument(
'--gt_file', help='Name of dataset file in FDDB-format',
default=GT_FILE
)
parser.add_argument(
'--imdir', help="root directory for loading dataset images",
default=DATA_DIR
)
parser.add_argument(
'--data-dir',
dest='data_dir',
help='Path to CS6 annotations folder',
default=DATA_DIR
)
parser.add_argument(
'--num_im', help='Number of images to visualize per video',
default=NUM_IMG, type=int
)
return parser.parse_args()
_GREEN = (18, 127, 15)
# ------------------------------------------------------------------------------
def draw_detection_list(im, dets):
# ------------------------------------------------------------------------------
""" Draw detected bounding boxes on a copy of image and return it.
[x0 y0 w h conf_score]
"""
im_det = im.copy()
if dets.ndim == 1:
dets = dets[np.newaxis,:] # handle single detection case
# format into [xmin, ymin, xmax, ymax]
dets[:, 2] = dets[:, 2] + dets[:, 0]
dets[:, 3] = dets[:, 3] + dets[:, 1]
for i, det in enumerate(dets):
bbox = dets[i, :4]
conf_score = dets[i, 4]
x0, y0, x1, y1 = [int(x) for x in bbox]
line_color = _GREEN
cv2.rectangle(im_det, (x0, y0), (x1, y1), line_color, thickness=2)
disp_str = '%d: %.2f' % (i, conf_score)
face_utils._draw_string(im_det, (x0, y0), disp_str)
return im_det
if __name__ == '__main__':
args = parse_args()
det_dict = face_utils.parse_wider_gt(args.det_file)
out_dir = osp.join(args.output_dir,
osp.splitext(osp.basename(args.gt_file))[0],
args.det_name)
if not osp.exists(out_dir):
os.makedirs(out_dir, exist_ok=True)
i = 0
for (image_name, dets) in det_dict.items():
if len(dets) == 0:
continue
print(image_name)
im = cv2.imread(osp.join(args.imdir, image_name))
assert im.size > 0
im_det = draw_detection_list(im, np.array(dets))
out_path = osp.join(out_dir, image_name.replace('/', '_'))
cv2.imwrite(out_path, im_det)
i += 1
if i == args.num_im:
break
print('Done visualizing')
print('Results folder: %s' % out_dir)
|
{"hexsha": "158319cc51b6bfa9c8824295a0463bd652f37315", "size": 5215, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/face/viz_detector_cs6.py", "max_stars_repo_name": "AruniRC/detectron-self-train", "max_stars_repo_head_hexsha": "a5d0edc51aeab92b953948ef2401294e87efb719", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 128, "max_stars_repo_stars_event_min_datetime": "2019-04-12T17:06:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T10:24:43.000Z", "max_issues_repo_path": "tools/face/viz_detector_cs6.py", "max_issues_repo_name": "AruniRC/detectron-self-train", "max_issues_repo_head_hexsha": "a5d0edc51aeab92b953948ef2401294e87efb719", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2019-06-12T03:55:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-12T07:09:53.000Z", "max_forks_repo_path": "tools/face/viz_detector_cs6.py", "max_forks_repo_name": "AruniRC/detectron-self-train", "max_forks_repo_head_hexsha": "a5d0edc51aeab92b953948ef2401294e87efb719", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2019-04-12T17:06:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-12T12:38:20.000Z", "avg_line_length": 25.4390243902, "max_line_length": 99, "alphanum_fraction": 0.6379674017, "include": true, "reason": "import numpy", "num_tokens": 1339}
|
import os
from shutil import *
import random, math
import scipy.misc
import numpy as np
import tensorflow as tf
def clear_duplicated_layers(layers):
layers0 = [layers[0]]
for layer in layers:
if layer.name != layers0[-1].name:
layers0.append(layer)
return layers0
def allocate_gpu(gpu_id=-1, maxLoad=0.1, maxMem=0.5, order='memory'):
if gpu_id == -1:
try:
import common.GPUtil as GPUtil
gpu_id = GPUtil.getFirstAvailable(order=order, maxLoad=maxLoad, maxMemory=maxMem)[0]
except:
gpu_id = 0
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
return gpu_id
def ini_model(sess):
sess.run(tf.global_variables_initializer())
def save_model(saver, sess, checkpoint_dir, step=None):
makedirs(checkpoint_dir)
model_name = "model"
saver.save(sess, checkpoint_dir + model_name, global_step=step)
def load_model(saver, sess, checkpoint_dir):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
return True
else:
return False
from functools import reduce
import operator
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def sigmoid(x):
return (1 / (1 + np.exp(-x)))
def mean(x):
try:
return np.mean(x).__float__()
except:
return 0.
def std(x):
try:
return np.std(x).__float__()
except:
return 0.
def copydir(src, dst):
if os.path.exists(dst):
removedirs(dst)
copytree(src, dst)
def remove(path):
if os.path.exists(path):
os.remove(path)
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def removedirs(path):
if os.path.exists(path):
rmtree(path)
def str_flags(flags):
p = ''
for key in np.sort(list(flags.keys())):
p += str(key) + ':' + str(flags.get(key)._value) + '\n'
return p
def rampup(step, rampup_length):
p = tf.minimum(1.0, tf.cast(step, tf.float32) / rampup_length)
return tf.nn.sigmoid(10.0*(p-0.5)) / sigmoid(5.0)
def save_images(images, size, path):
if images.shape[3] == 1:
images = np.concatenate([images, images, images], 3)
images = np.clip(images, -1.0, 1.0)
return scipy.misc.toimage(merge(images, size), cmin=-1, cmax=1).save(path)
def imread(path, is_grayscale=False):
if (is_grayscale):
return scipy.misc.imread(path, flatten=True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float)
def imresize(image, resize=1):
h, w = image.shape[0], image.shape[1]
img = np.zeros((h * resize, w * resize, image.shape[2]))
for i in range(h * resize):
for j in range(w * resize):
img[i, j] = image[i // resize, j // resize]
return img
def merge(images, size, resize=3):
h, w = images.shape[1] * resize, images.shape[2] * resize
img = np.zeros((h * size[0], w * size[1], images.shape[3]))
assert size[0] * size[1] == images.shape[0]
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = imresize(image, resize)
return img
def center_crop(x, crop_h, crop_w=None, resize_w=64):
h, w = x.shape[:2]
if crop_w is None:
crop_w = crop_h
if crop_h == 0:
crop_h = crop_w = min(h, w)
j = int(round((h - crop_h) / 2.))
i = int(round((w - crop_w) / 2.))
return scipy.misc.imresize(x[j:j + crop_h, i:i + crop_w], [resize_w, resize_w])
def batch_resize(images, newHeight, newWidth):
images_resized = np.zeros([images.shape[0], newHeight, newWidth, 3])
for idx, image in enumerate(images):
if (images.shape[3] == 1):
image = np.concatenate([image, image, image], 2)
images_resized[idx] = scipy.misc.imresize(image, [newHeight, newWidth], 'bilinear')
return images_resized
def clip_truncated_normal(mean, stddev, shape, minval=None, maxval=None):
if minval == None:
minval = mean - 2 * stddev
if maxval == None:
maxval = mean + 2 * stddev
return np.clip(np.random.normal(mean, stddev, shape), minval, maxval)
def collect(X, x, len):
if isinstance(x, np.ndarray):
if x.shape.__len__() == 1:
x = x.reshape((1,) + x.shape)
return x if X is None else np.concatenate([X, x], 0)[-len:]
else:
return [x] if X is None else (X + [x])[-len:]
def get_name(layer_name, cts):
if not layer_name in cts:
cts[layer_name] = 0
name = layer_name + '_' + str(cts[layer_name])
cts[layer_name] += 1
return name
def shuffle_datas(datas):
rand_indexes = np.random.permutation(datas.shape[0])
shuffled_images = datas[rand_indexes]
return shuffled_images
def shuffle_datas_and_labels(datas, labels):
rand_indexes = np.random.permutation(datas.shape[0])
shuffled_images = datas[rand_indexes]
shuffled_labels = labels[rand_indexes]
return shuffled_images, shuffled_labels
def data_gen_random(data, num_sample):
while True:
num_data = len(data)
data_index = np.random.choice(num_data, num_sample, replace=True, p=num_data * [1 / num_data])
yield data[data_index]
def data_gen_epoch(datas, batch_size, func=None, epoch=None):
cur_epoch = 0
while len(datas) < 100 * batch_size:
datas = np.concatenate([datas, datas], axis=0)
while True:
np.random.shuffle(datas)
for i in range(len(datas) // batch_size):
if func is None:
yield datas[i * batch_size:(i + 1) * batch_size]
else:
yield func(datas[i * batch_size:(i + 1) * batch_size])
cur_epoch += 1
if epoch is not None:
if cur_epoch >= epoch:
break
def labeled_data_gen_random(data, labels, num_sample):
while True:
num_data = len(data)
index = np.random.choice(num_data, num_sample, replace=True, p=num_data * [1 / num_data])
yield data[index], labels[index]
def labeled_data_gen_epoch(datas, labels, batch_size, func=None, epoch=None):
cur_epoch = 0
while True:
rng_state = np.random.get_state()
np.random.shuffle(datas)
np.random.set_state(rng_state)
np.random.shuffle(labels)
for i in range(len(datas) // batch_size):
if func is None:
yield (datas[i * batch_size:(i + 1) * batch_size], labels[i * batch_size:(i + 1) * batch_size])
else:
yield (func(datas[i * batch_size:(i + 1) * batch_size]), labels[i * batch_size:(i + 1) * batch_size])
cur_epoch += 1
if epoch is not None:
if cur_epoch >= epoch:
break
def random_augment_image_nchw(image, pad=4, data_format="NCHW"):
if data_format=="NHWC":
image = np.transpose(image, [2,0,1])
init_shape = image.shape
new_shape = [init_shape[0],
init_shape[1] + pad * 2,
init_shape[2] + pad * 2]
zeros_padded = np.zeros(new_shape)
zeros_padded[:, pad:init_shape[1] + pad, pad:init_shape[2] + pad] = image
init_x = np.random.randint(0, pad * 2)
init_y = np.random.randint(0, pad * 2)
cropped = zeros_padded[:,
init_x: init_x + init_shape[1],
init_y: init_y + init_shape[2]]
flip = random.getrandbits(1)
if flip:
cropped = cropped[:, :, ::-1]
if data_format=="NHWC":
cropped = np.transpose(cropped, [1,2,0])
return cropped
def random_augment_image_nhwc(image, pad=4, data_format="NHWC"):
if data_format=="NCHW":
image = np.transpose(image, [1,2,0])
init_shape = image.shape
new_shape = [init_shape[0] + pad * 2,
init_shape[1] + pad * 2,
init_shape[2]]
zeros_padded = np.zeros(new_shape)
zeros_padded[pad:init_shape[0] + pad, pad:init_shape[1] + pad, :] = image
init_x = np.random.randint(0, pad * 2)
init_y = np.random.randint(0, pad * 2)
cropped = zeros_padded[
init_x: init_x + init_shape[0],
init_y: init_y + init_shape[1],
:]
flip = random.getrandbits(1)
if flip:
cropped = cropped[:, ::-1, :]
if data_format=="NCHW":
cropped = np.transpose(cropped, [2,0,1])
return cropped
def random_augment_all_images(initial_images, pad=4, data_format="NCHW"):
new_images = np.zeros(initial_images.shape)
for i in range(initial_images.shape[0]):
new_images[i] = random_augment_image_nchw(initial_images[i], pad=pad, data_format=data_format)
return new_images
def softmax(x):
e_x = np.exp(x - np.max(x, 1, keepdims=True))
return e_x / e_x.sum(axis=1, keepdims=True)
|
{"hexsha": "f1fb45f3687e0efe26e0857ab4340fd3e5eb8587", "size": 8894, "ext": "py", "lang": "Python", "max_stars_repo_path": "author_code_base/WGAN/common/utils.py", "max_stars_repo_name": "MichaelKonobeev/adashift", "max_stars_repo_head_hexsha": "bf86b021d42e922078a39246770f0f875300a6f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2018-11-23T08:07:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T11:40:25.000Z", "max_issues_repo_path": "author_code_base/MulitiLayer_MNIST/common/utils.py", "max_issues_repo_name": "MichaelKonobeev/adashift", "max_issues_repo_head_hexsha": "bf86b021d42e922078a39246770f0f875300a6f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-06-02T13:31:44.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-02T13:31:44.000Z", "max_forks_repo_path": "author_code_base/MulitiLayer_MNIST/common/utils.py", "max_forks_repo_name": "MichaelKonobeev/adashift", "max_forks_repo_head_hexsha": "bf86b021d42e922078a39246770f0f875300a6f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-05-30T02:34:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-27T18:59:32.000Z", "avg_line_length": 28.5064102564, "max_line_length": 117, "alphanum_fraction": 0.6153586688, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2407}
|
import dash
import dash_bootstrap_components as dbc
from dash import dcc
from dash import html
from dash import dash_table
from dash.dependencies import Input, Output, State
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import base64
image_filename = 'cover.png'
def b64_image(image_filename):
with open(image_filename, 'rb') as f:
image = f.read()
return 'data:image/png;base64,' + base64.b64encode(image).decode('utf-8')
image_layout = html.Img(src=b64_image(image_filename))
|
{"hexsha": "64e0eda8eebca0be9d214d8b594a50791e8d4f0c", "size": 554, "ext": "py", "lang": "Python", "max_stars_repo_path": "image.py", "max_stars_repo_name": "daxinniu/data1050_dash_app", "max_stars_repo_head_hexsha": "451fe05ae56b6f7d1585d6f0e0526395b9b7f16d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "image.py", "max_issues_repo_name": "daxinniu/data1050_dash_app", "max_issues_repo_head_hexsha": "451fe05ae56b6f7d1585d6f0e0526395b9b7f16d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "image.py", "max_forks_repo_name": "daxinniu/data1050_dash_app", "max_forks_repo_head_hexsha": "451fe05ae56b6f7d1585d6f0e0526395b9b7f16d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-12-08T16:06:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-10T06:38:23.000Z", "avg_line_length": 24.0869565217, "max_line_length": 77, "alphanum_fraction": 0.7689530686, "include": true, "reason": "import numpy", "num_tokens": 134}
|
import os
import pickle
import numpy as np
import json
def sortbylength(X, y) :
len_t = np.argsort([len(x) for x in X])
X1 = [X[i] for i in len_t]
y1 = [y[i] for i in len_t]
return X1, y1
def filterbylength(X, y, min_length = None, max_length = None) :
lens = [len(x)-2 for x in X]
min_l = min(lens) if min_length is None else min_length
max_l = max(lens) if max_length is None else max_length
idx = [i for i in range(len(X)) if len(X[i]) > min_l+2 and len(X[i]) < max_l+2]
X = [X[i] for i in idx]
y = [y[i] for i in idx]
return X, y
def set_balanced_pos_weight(dataset) :
y = np.array(dataset.train_data.y)
dataset.pos_weight = [len(y) / sum(y) - 1]
class DataHolder() :
def __init__(self, X, y, y_attn=None, true_pred=None) :
self.X = X
self.y = y
self.gold_attns = y_attn
self.true_pred = true_pred
self.attributes = ['X', 'y', 'gold_attns', 'true_pred']
class Dataset() :
def __init__(self, name, path, min_length=None, max_length=None, args=None) :
self.name = name
if args is not None and hasattr(args, 'data_dir') :
path = os.path.join(args.data_dir, path)
self.vec = pickle.load(open(path, 'rb'))
X, Xt = self.vec.seq_text['train'], self.vec.seq_text['test'] # these are lists (of lists) of num. insts-length (NOT PADDED)
y, yt = self.vec.label['train'], self.vec.label['test']
X, y = filterbylength(X, y, min_length=min_length, max_length=max_length)
Xt, yt = filterbylength(Xt, yt, min_length=min_length, max_length=max_length)
Xt, yt = sortbylength(Xt, yt)
if args.pre_loaded_attn or args.adversarial :
# these are lists of lists, with some residual padding
y_attn = json.load(open(os.path.join(args.gold_label_dir, 'train_attentions_best_epoch.json'), 'r'))
yt_attn = json.load(open(os.path.join(args.gold_label_dir, 'test_attentions_best_epoch.json'), 'r'))
true_pred = json.load(open(os.path.join(args.gold_label_dir, 'train_predictions_best_epoch.json'), 'r'))
true_pred_t = json.load(open(os.path.join(args.gold_label_dir, 'test_predictions_best_epoch.json'), 'r'))
true_pred = [e[0] for e in true_pred]
true_pred_t = [e[0] for e in true_pred_t] #these are lists of num. insts-length
#trim padding from static attentions
new_attns = []
for e, a in zip(X, y_attn):
tmp = [0] + [el for el in a if el != 0] + [0]
assert len(tmp) == len(e)
new_attns.append(tmp)
y_attn = new_attns
#do the same for test
new_attns = []
for e, a in zip(Xt, yt_attn):
tmp = [0] + [el for el in a if el != 0] + [0]
assert len(tmp) == len(e)
new_attns.append(tmp)
yt_attn = new_attns
self.train_data = DataHolder(X, y, y_attn, true_pred)
self.test_data = DataHolder(Xt, yt, yt_attn, true_pred_t)
else :
self.train_data = DataHolder(X, y)
self.test_data = DataHolder(Xt, yt)
if args is not None and hasattr(args, 'hidden_size') :
self.hidden_size = args.hidden_size
self.output_size = 1
self.save_on_metric = 'roc_auc'
self.keys_to_use = {
'roc_auc' : 'roc_auc',
'pr_auc' : 'pr_auc'
}
self.bsize = 32
if args is not None and hasattr(args, 'output_dir') :
self.basepath = args.output_dir
########################################## Dataset Loaders ################################################################################
def SST_dataset(args=None) :
dataset = Dataset(name='sst', path='preprocess/SST/vec_sst.p', min_length=5, args=args)
set_balanced_pos_weight(dataset)
return dataset
def IMDB_dataset(args=None) :
dataset = Dataset(name='imdb', path='preprocess/IMDB/vec_imdb.p', min_length=6, args=args)
set_balanced_pos_weight(dataset)
return dataset
def News20_dataset(args=None) :
dataset = Dataset(name='20News_sports', path='preprocess/20News/vec_20news_sports.p', min_length=6, max_length=500, args=args)
set_balanced_pos_weight(dataset)
return dataset
def ADR_dataset(args=None) :
dataset = Dataset(name='tweet', path='preprocess/Tweets/vec_adr.p', min_length=5, max_length=100, args=args)
set_balanced_pos_weight(dataset)
return dataset
def Anemia_dataset(args=None) :
dataset = Dataset(name='anemia', path='preprocess/MIMIC/vec_anemia.p', max_length=4000, args=args)
set_balanced_pos_weight(dataset)
return dataset
def Diabetes_dataset(args=None) :
dataset = Dataset(name='diabetes', path='preprocess/MIMIC/vec_diabetes.p', min_length=6, max_length=4000, args=args)
set_balanced_pos_weight(dataset)
return dataset
def AGNews_dataset(args=None) :
dataset = Dataset(name='agnews', path='preprocess/ag_news/vec_agnews.p', args=args)
set_balanced_pos_weight(dataset)
return dataset
datasets = {
"sst" : SST_dataset,
"imdb" : IMDB_dataset,
"20News_sports" : News20_dataset,
"tweet" : ADR_dataset ,
"Anemia" : Anemia_dataset,
"Diabetes" : Diabetes_dataset,
"AgNews" : AGNews_dataset
}
|
{"hexsha": "d13949a9f039ffee1ffea702b7c1eec6f1f4010f", "size": 5385, "ext": "py", "lang": "Python", "max_stars_repo_path": "Trainers/DatasetBC.py", "max_stars_repo_name": "wenting-zhao/cs6741_replication", "max_stars_repo_head_hexsha": "fbd8275793c5d2b097458c68bc3bba00144665aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Trainers/DatasetBC.py", "max_issues_repo_name": "wenting-zhao/cs6741_replication", "max_issues_repo_head_hexsha": "fbd8275793c5d2b097458c68bc3bba00144665aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Trainers/DatasetBC.py", "max_forks_repo_name": "wenting-zhao/cs6741_replication", "max_forks_repo_head_hexsha": "fbd8275793c5d2b097458c68bc3bba00144665aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1379310345, "max_line_length": 139, "alphanum_fraction": 0.6128133705, "include": true, "reason": "import numpy", "num_tokens": 1443}
|
import data.rat
open function
namespace mth1001
section composite
def q₁ (x : ℕ) : ℤ := x + 3
def q₂ (x : ℤ) : ℚ := 2 * x
/-
When a function `f` takes values from a type (or set) `α` and returns values in a type (or set) `β`,
we write that the *domain* of `f` is `α` and the *codomain* of `f` is `β`. This is denoted
`f : α → β`.
-/
/-
Given `f : α → β` and `g : β → γ`, the *composite* of `g` and `f`, denoted `g ∘ f` is the function
`g ∘ f : α → γ` with the property that `(g ∘ f) x = g (f x)`, for every `x : α`.
-/
-- With `q₁` and `q₂` as above, `q₁ : ℕ → ℤ` and `q₂ : Z → ℚ`. So `q₂ ∘ q₁ : ℕ → ℚ`.
#check q₁
#check q₂
#check q₂ ∘ q₁
-- We verify, that `(q₂ ∘ q₁) 5 = q₂ (q₁ 5)`.
#eval (q₂ ∘ q₁) 5
#eval q₂ (q₁ 5)
/-
With the above functions, `q₁ ∘ q₂` is *not defined* as the codomain of `q₂` differs from the
domain of `q₁`.
-/
/-
If all the domains and codomains of two functions, say `p₁` and `p₂` are equal, then it makes sense
to consider both composites. However, `p₂ ∘ p₁` will not (in general) be equal to `p₁ ∘ p₂`.
-/
def p₁ (x : ℤ) : ℤ := 3 * x
def p₂ (y : ℤ) : ℤ := y + 4
#eval (p₂ ∘ p₁) 6 -- `(p₂ ∘ p₁) 6 = p₂ (p₁ 6) = p₂ (3*6) = p₂ 18 = 18 + 4 = 22`, but
#eval (p₁ ∘ p₂) 6 -- `(p₁ ∘ p₂) 6 = p₁ (p₂ 6) = p₁ (6 + 4) = p₁ 10 = 3 * 10 = 30`.
/-
We'll prove that the composite of two injective functions is injective.
-/
variable {α : Type*}
variable {β : Type*}
variable {γ : Type*}
theorem injective_comp {f : α → β} {g : β → γ} (h₁ : injective f) (h₂ : injective g) :
injective (g ∘ f) :=
begin
unfold injective at *, -- We use the definition of injective.
intros a₁ a₂ h, -- Assume `a₁ a₂ : α`. Assume `h : (g ∘ f) a₁ = (g ∘ f) a₂`.
have h₄ : f a₁ = f a₂,
from h₂ h, -- By injectivity of `g`, applied to `h`, we have `h₄ : f a₁ = f a₂`.
show a₁ = a₂, from h₁ h₄, -- We show `a₁ = a₂` by injectivity of `f`, applied to `h₄`.
end
/-
We'll prove that the composite of two surjective functions is surjective. The proof is
more involved that the corresponding injectivity result.
-/
theorem surjective_comp {f : α → β} {g : β → γ} (h₁ : surjective f) (h₂ : surjective g) :
surjective (g ∘ f) :=
begin
unfold surjective at *, -- We use the definition of surjective.
intro c, -- Assume `c : γ`. It suffices to show `∃ a : α, (g ∘ f) a = c`.
sorry
end
-- Exercise 145:
-- From these two results, we have that the composite of two bijective functions is bijective.
theorem bijective_comp {f : α → β} {g : β → γ} (h₁ : bijective f) (h₂ : bijective g) :
bijective (g ∘ f) :=
begin
sorry
end
end composite
end mth1001
|
{"author": "gihanmarasingha", "repo": "mth1001_tutorial", "sha": "bb277eebd5013766e1418365b91416b406275130", "save_path": "github-repos/lean/gihanmarasingha-mth1001_tutorial", "path": "github-repos/lean/gihanmarasingha-mth1001_tutorial/mth1001_tutorial-bb277eebd5013766e1418365b91416b406275130/src/exercises/src_32_composite.lean"}
|
import tensorflow as tf
import numpy as np
import sys
import random
class GruRNN(object):
def __init__(self, num_classes, state_size, learning_rate=0.1, model_name='gru_rnn_model', ckpt_path='./ckpt/gru/'):
self.num_classes = num_classes
self.state_size = state_size
self.learning_rate = learning_rate
self.model_name = model_name
self.ckpt_path = ckpt_path
# build graph
sys.stdout.write('\nBuilding Graph...')
tf.reset_default_graph()
# inputs
self.xs_ = tf.placeholder(shape=[None, None], dtype=tf.int32)
self.ys_ = tf.placeholder(shape=[None], dtype=tf.int32)
# embeddings
embs = tf.get_variable('emb', [self.num_classes, self.state_size])
rnn_inputs = tf.nn.embedding_lookup(embs, self.xs_)
# initial hidden state
self.init_state = tf.placeholder(shape=[None, self.state_size], dtype=tf.float32, name='initial_state')
# initializer and params
xav_init = tf.contrib.layers.xavier_initializer
w = tf.get_variable('W', shape=[3, self.state_size, self.state_size], initializer=xav_init())
u = tf.get_variable('U', shape=[3, self.state_size, self.state_size], initializer=xav_init())
# b = tf.get_variable('b', shape=[self.state_size], initializer=tf.constant_initializer(0.0))
def __step__(st_1, x):
# update gate
z = tf.sigmoid(tf.matmul(x, u[0]) + tf.matmul(st_1, w[0]))
# reset gate
r = tf.sigmoid(tf.matmul(x, u[1]) + tf.matmul(st_1, w[1]))
# intermediate
h = tf.tanh(tf.matmul(x, u[2]) + tf.matmul((r * st_1), w[2]))
# new state
st = (1 - z) * h + (z * st_1)
return st
states = tf.scan(__step__, tf.transpose(rnn_inputs, [1, 0, 2]), initializer=self.init_state)
# predictions
v = tf.get_variable('V', shape=[self.state_size, self.num_classes], initializer=xav_init())
bo = tf.get_variable('bo', shape=[self.num_classes], initializer=tf.constant_initializer(0.0))
# transpose and flatten states to 2d matrix for matmult with V
states = tf.reshape(tf.transpose(states, [1, 0, 2]), [-1, self.state_size])
logits = tf.add(tf.matmul(states, v), bo)
# get last state
self.last_state = states[-1]
# predictions
self.predictions = tf.nn.softmax(logits)
# optimization
self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=self.ys_))
self.train_op = tf.train.AdagradOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
sys.stdout.write(' Done...\n')
def train(self, train_set, epochs=50, steps_per_epoch=1000):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_loss = 0
epoch = 0
try:
for epoch in range(epochs):
for step in range(steps_per_epoch):
xs, ys = train_set.__next__()
batch_size = xs.shape[0]
feed_dict = {self.xs_: xs, self.ys_: ys.flatten(),
self.init_state: np.zeros([batch_size, self.state_size])}
_, train_loss_ = sess.run([self.train_op, self.loss], feed_dict=feed_dict)
train_loss += train_loss_
print('[{}] loss : {}'.format(epoch, train_loss / steps_per_epoch))
train_loss = 0
except KeyboardInterrupt:
print('interrupted by user at ' + str(epoch))
saver = tf.train.Saver()
saver.save(sess, self.ckpt_path + self.model_name, global_step=epoch)
def generate(self, idx2w, w2idx, num_words=100, separator=' '):
random_init_word = random.choice(idx2w)
current_word = w2idx[random_init_word]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# restore session
ckpt = tf.train.get_checkpoint_state(self.ckpt_path)
saver = tf.train.Saver()
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
words = [current_word] # generate operation
state = None
state_ = None
# enter the loop
for i in range(num_words):
if state:
feed_dict = {self.xs_: np.array([current_word]).reshape([1, 1]), self.init_state: state_}
else:
feed_dict = {self.xs_: np.array([current_word]).reshape([1, 1]),
self.init_state: np.zeros([1, self.state_size])}
# forward propagation
preds, state_ = sess.run([self.predictions, self.last_state], feed_dict=feed_dict)
state = True # set flag to true
# set new word
current_word = np.random.choice(preds.shape[-1], 1, p=np.squeeze(preds))[0]
words.append(current_word) # add to list of words
return separator.join([idx2w[w] for w in words])
|
{"hexsha": "54abfacce7ecc043bfc284fee0ef51570beb9ee9", "size": 5236, "ext": "py", "lang": "Python", "max_stars_repo_path": "rnn_from_scratch/rnn_units/gru_rnn.py", "max_stars_repo_name": "IsaacChanghau/AmusingPythonCodes", "max_stars_repo_head_hexsha": "013ecaaafe62696866b47b0910e1db00cca9ea37", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2018-01-30T09:02:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T05:14:53.000Z", "max_issues_repo_path": "rnn_from_scratch/rnn_units/gru_rnn.py", "max_issues_repo_name": "IsaacChanghau/AmusingPythonCodes", "max_issues_repo_head_hexsha": "013ecaaafe62696866b47b0910e1db00cca9ea37", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-12-28T04:09:45.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-28T04:09:45.000Z", "max_forks_repo_path": "rnn_from_scratch/rnn_units/gru_rnn.py", "max_forks_repo_name": "IsaacChanghau/AmusingPythonCodes", "max_forks_repo_head_hexsha": "013ecaaafe62696866b47b0910e1db00cca9ea37", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2017-09-29T08:59:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-15T03:23:02.000Z", "avg_line_length": 49.3962264151, "max_line_length": 120, "alphanum_fraction": 0.5861344538, "include": true, "reason": "import numpy", "num_tokens": 1214}
|
import numpy as np
def coll_func(x):
return (
0.25
+ (np.sqrt(3) / (4 * np.pi)) * np.log((x ** (1 / 3) + 1) ** 3 / (x + 1))
+ (3 / (2 * np.pi)) * np.arctan((2 * x ** (1 / 3) - 1) / (np.sqrt(3)))
)
def WE_SA_collection_eff(TYPE="PINE"):
coll_eff = []
if TYPE == "ALS":
r1, r2, r3, coll_eff = 0.1 * 4 * 0.5, 0.1 * 5 * 0.5, 0.1 * 7 * 0.5, []
SAdisk, SAring = np.pi * (r1 ** 2), np.pi * (r3 ** 2 - r2 ** 2)
if TYPE == "PINE":
r1, r2, r3 = 0.1 * 5.5 * 0.5, 0.1 * 6.5 * 0.5, 0.1 * 8.5 * 0.5
coll_eff = 0.38
SAdisk, SAring = np.pi * (r1 ** 2), np.pi * (r3 ** 2 - r2 ** 2)
if TYPE == "PINE-ring":
r1, r2, r3 = 0.1 * 5.5 * 0.5, 0.1 * 6.5 * 0.5, 0.1 * 8.5 * 0.5
coll_eff = 0.38
SAdisk, SAring = np.pi * (r1 ** 2), np.pi * (r3 ** 2 - r2 ** 2)
# SA = np.pi*(r3**2-r2**2)
if coll_eff == []:
a, b = (r2 / r1) ** 3 - 1, (r3 / r1) ** 3 - (r2 / r1) ** 3
c = a / b
coll_eff = (
1
- r3 ** 2
+ b ** (2 / 3)
- coll_func(c)
- b ** (2 / 3) * coll_func(a)
+ r3 ** 2 * coll_func(c * r3 ** 3)
)
"r1 = disk, r2 = ring ID, r3 = ring OD in mm"
# print('%s According to manufacturer: disk(dia:%.2f cm %.4f cm2), ring (%.4f cm2)' %(TYPE,r1*2,SAdisk,SAring))
return {
"Electrode_Type": TYPE,
"CollEff": coll_eff,
"Disk_cm2": np.round(SAdisk, 4),
"Ring_cm2": np.round(SAring, 4),
}
if __name__ == "__main__":
print(WE_SA_collection_eff())
|
{"hexsha": "befeebe1547b72570f64e9bfaae32ee37b66db85", "size": 1600, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/elchempy/experiments/EC_conditions/electrode.py", "max_stars_repo_name": "MyPyDavid/ECpy", "max_stars_repo_head_hexsha": "b74842b64eca86d2181067fdb22bfa8fa4b2c8bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-01-04T09:06:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T08:24:01.000Z", "max_issues_repo_path": "src/elchempy/experiments/EC_conditions/electrode.py", "max_issues_repo_name": "MyPyDavid/ECpy", "max_issues_repo_head_hexsha": "b74842b64eca86d2181067fdb22bfa8fa4b2c8bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/elchempy/experiments/EC_conditions/electrode.py", "max_forks_repo_name": "MyPyDavid/ECpy", "max_forks_repo_head_hexsha": "b74842b64eca86d2181067fdb22bfa8fa4b2c8bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-05T12:17:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T12:17:49.000Z", "avg_line_length": 32.6530612245, "max_line_length": 120, "alphanum_fraction": 0.416875, "include": true, "reason": "import numpy", "num_tokens": 699}
|
import bpy
import mathutils as mut
import numpy as np
import operator
from collections import deque
from constants import C, ORIGIN, CustomError, D, EASE_IN_OUT, PI, WHITE,\
OBJECT_COUNTER, BLACK
from externals.blender_utils import selectOnly, computeQuaternion
from externals.bezier_interpolation import interpolate, getInterpolatedColors
from externals.iterable_utils import mag
class Blobject(object):
"""
The Blobject is the super-class for all of the subclass objects defined below.
It's designed to provide transform, shift, rotate (TSR) and other methods common
to all of these objects. Any object that wishes to make use of its own TSR, etc.
methods should have its own implementation of T, S, R, or whatever and it will
completely override the Blobject definition. Each object should really make use of
its own __init__ however, since each object has a different instantiation process.
"""
def __init__(self):
"""
Blobject constructor - called as a first for every object that inherits from
Blobject.
"""
self.normal = (0, 0, 1)
self.origin = ORIGIN
self.name = []
self.tampered = []
self.texNames = []
self.isTransparent = False
self.currentAlpha = 1
def transform(self, newNormal=ORIGIN, x=None, y=None, z=None):
"""
Transforms from an original normal to a new normal. Highly useful when
determining the actual rotations can be hazy.
Args:
newNormal (tuple, optional): Defines the normal to be rotated to. Defaults
to ORIGIN.
x (lambda, optional): Defines x-lambda function that specify how the
transformation happens in a timed animation. Defaults to None.
y (lambda, optional): Defines y-lambda function that specify how the
transformation happens in a timed animation. Defaults to None.
z (lambda, optional): Defines z-lambda function that specify how the
transformation happens in a timed animation. Defaults to None.
Raises:
CustomError: requires non-zero newNormal.
"""
# error checking
if newNormal == ORIGIN:
raise CustomError(
"Calling transform() requires a new non-zero direction tuple to be passed in as newNormal"
)
selectOnly(self.name)
for name in self.name:
obj = D.objects[name]
# define new vector
ogAxis = mut.Vector(self.normal)
newAxis = mut.Vector(newNormal)
# determine quaternions
q1 = computeQuaternion(ogAxis, newAxis)
# perform quaternion rotation
obj.rotation_mode = "QUATERNION"
obj.rotation_quaternion = q1 @ obj.rotation_quaternion
# reset normal
self.normal = newNormal
def init_transform(
self, t0=0, tf=1, rate=EASE_IN_OUT, newNormal=ORIGIN, x=None, y=None, z=None
):
"""
Every "init_..." function is the starting point of an animation. A stack is
defined in an "init_..." function that is then traversed through with the
"update_..." functions on each frame.
Args:
t0 (int, optional): Starting time of animation (in seconds). Defaults to 0.
tf (int, optional): Ending time of animation (in seconds). Defaults to 1.
rate (tuple, optional): Bezier-defined rate. Defaults to EASE_IN_OUT.
Returns:
deque: the interpolation stack to be traversed through on each call of
"update_..."
"""
# error checking
if newNormal == ORIGIN:
raise CustomError(
"Calling transform() requires a new non-zero direction tuple to be passed in as newNormal"
)
t = interpolate(t0, tf, rate)
t.pop(0)
# if no lambdas specified, create a default linear scaling between normals
ogAxis = mut.Vector(self.normal)
newAxis = mut.Vector(newNormal)
v = (newAxis - ogAxis) / (tf - t0)
if x == None or y == None or z == None:
def x(t):
return ogAxis[0] + (t - t0) * v[0]
def y(t):
return ogAxis[1] + (t - t0) * v[1]
def z(t):
return ogAxis[2] + (t - t0) * v[2]
stack = deque()
t.reverse()
for tj in t:
stack.append((x(tj), y(tj), z(tj)))
return stack
def update_transform(self, val, newNormal=ORIGIN, x=None, y=None, z=None):
"""
Every "update_..." method defines how the animation evolves in time. Tiny
chunks of rotation/shift are popped off the interpolation stack from
"init_..." and used to update each individual frame.
Args:
val: The value popped off of the interpolation stack. Usually, it is simply
used to call the original function, but sometimes special constraints
are required, hence the need for an "update_..." function separate
from the original.
Raises:
CustomError: val must be passed in from the interpolation stack.
"""
# error checking
if newNormal == ORIGIN:
raise CustomError(
"Calling transform() requires a new non-zero direction tuple to be passed in as newNormal"
)
if val is None:
raise CustomError(
"val must be specified and passed into update_transform()"
)
self.transform(val)
def rotate(self, axis=(0, 0, 1), angle=0, angleDeg=False):
"""
Rotates about an axis via some angle, by computing the relevant quaternion
associated with such a rotation and appending it to the object's current
quaternion.
Args:
axis (tuple, optional): Axis about which rotation occurs. Defaults to
(0, 0, 1).
angle (float, optional): Angle of rotation in radians (or degrees if angleDeg
is True). Defaults to 0.
angleDeg (bool, optional): Defines whether angles are in degrees. Defaults
to False.
Raises:
CustomError: requires non-zero rotation axis
CustomError: indeterminate quaternion error usually occurs in flipping
180 degrees. There are separate checks for this for example in the
Vector() functions.
"""
# error checking
if axis == ORIGIN:
raise CustomError(
"Calling rotate() requires a reasonable rotation axis to be passed in as a tuple"
)
if angleDeg:
angle = angle * PI / 180
# change axis to be normalized
axis = tuple([i/mag(axis) for i in axis])
# determine quaternion
q = mut.Quaternion(axis, angle)
# check for indeterminacy of q
if q.magnitude == 0:
raise CustomError(
"Indeterminate Quaternion Rotation: make use of another rotation to interpolate between antiparallel states"
)
q.normalize()
selectOnly(self.name)
i = -1
for name in self.name:
i += 1
# only rotate if untampered
if len(self.tampered) > 0 and self.tampered[i]:
continue
for name in self.texNames:
self.colorSubprocess(name, WHITE)
obj = D.objects[name]
# perform quaternion rotation
obj.rotation_mode = "QUATERNION"
obj.rotation_quaternion = q @ obj.rotation_quaternion
# reset normal
oldVec = mut.Vector(self.normal)
newVec = q @ oldVec
self.normal = newVec[:]
def init_rotate(
self, t0=0, tf=1, rate=EASE_IN_OUT, axis=(0, 0, 1), angle=0, angleDeg=False
):
# error checking
if axis == ORIGIN:
raise CustomError(
"Calling init_rotate() requires a new non-zero axis to be passed in as newNormal"
)
t = interpolate(t0, tf, rate)
t.pop(0)
diffs = np.diff(interpolate(0, angle, rate, numIntervals=len(t))).tolist()
stack = deque()
diffs.reverse()
for smallAngle in diffs:
stack.append(smallAngle)
return stack
def update_rotate(self, val, axis=(0, 0, 1), angle=0, angleDeg=False):
# error checking
if axis == ORIGIN:
raise CustomError(
"Calling rotate() requires a new non-zero direction tuple to be passed in as newNormal"
)
if val is None:
raise CustomError(
"val must be specified and passed into update_transform()"
)
self.rotate(axis, val, angleDeg)
def shift(self, x=0, y=0, z=0, xLam=None, yLam=None, zLam=None):
"""
General shift function applied to an object.
Args:
x (int, optional): Amount to shift in x-direction. Defaults to 0.
y (int, optional): Amount to shift in y-direction. Defaults to 0.
z (int, optional): Amount to shift in z-direction. Defaults to 0.
xLam (lambda, optional): Defines lambda function through which shift
occurs. Defaults to None.
yLam (lambda, optional): Defines lambda function through which shift
occurs. Defaults to None.
zLam (lambda, optional): Defines lambda function through which shift
occurs. Defaults to None.
"""
selectOnly(self.name)
# shift the object
bpy.ops.transform.translate(
value=(x, y, z),
orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)),
mirror=True,
)
self.origin = tuple(map(operator.add, self.origin, (x, y, z)))
def init_shift(
self,
t0=0,
tf=1,
rate=EASE_IN_OUT,
x=0,
y=0,
z=0,
xLam=None,
yLam=None,
zLam=None,
):
t = interpolate(t0, tf, rate)
t.pop(0)
stack = deque()
# do a simple linear shift...
if xLam == None or yLam == None or zLam == None:
xDiffs = np.diff(interpolate(0, x, rate, numIntervals=len(t))).tolist()
yDiffs = np.diff(interpolate(0, y, rate, numIntervals=len(t))).tolist()
zDiffs = np.diff(interpolate(0, z, rate, numIntervals=len(t))).tolist()
xDiffs.reverse()
yDiffs.reverse()
zDiffs.reverse()
for xVal, yVal, zVal in zip(xDiffs, yDiffs, zDiffs):
stack.append((xVal, yVal, zVal))
return stack
# ... unless lambdas specified, in which case, apply the shift along the lambda
else:
t.reverse()
for tj in t:
stack.append((xLam(tj), yLam(tj), zLam(tj)))
return stack
def update_shift(self, val, x=0, y=0, z=0, xLam=None, yLam=None, zLam=None):
if val is None:
raise CustomError(
"val must be specified and passed into update_transform()"
)
if xLam != None and yLam != None and zLam != None:
shifties = [vali - origi for vali, origi in zip(val, self.origin)]
self.shift(shifties[0], shifties[1], shifties[2])
else:
self.shift(val[0], val[1], val[2])
def colorSubprocess(self, someName, theColor):
"""
Applies a coloring to a single part of some object if it contains multiple parts.
Args:
someName (str): the name of the subpart of the object to be colored.
theColor (tuple): tuple of length 4 which defines the RGBA color.
"""
selectOnly([someName])
C.view_layer.objects.active = D.objects[someName]
ob = C.active_object
mat = D.materials.get(someName)
# if no material, make it
if mat is None:
mat = D.materials.new(name=someName)
# assign mat to object
if ob.data.materials:
# assign to first material slot
ob.data.materials[0] = mat
else:
# no slots
ob.data.materials.append(mat)
# use nodes
C.object.active_material.use_nodes = True
mat = C.object.material_slots[C.active_object.name]
mat.material.node_tree.nodes["Principled BSDF"].inputs[
"Emission"
].default_value = theColor
def color(self, theColor=WHITE, ignoreTampered=False):
"""
General coloring of an object.
Args:
theColor (tuple, optional): tuple of length 4 that defines RGBA of color.
Defaults to WHITE.
ignoreTampered (bool, optional): necessary for Tex coloring. Defaults to
False.
"""
if hasattr(self, "texNames"):
for name in self.texNames:
self.colorSubprocess(name, theColor)
i = -1
for leName in self.name:
i += 1
if len(self.tampered) > 0 and self.tampered[i] and not ignoreTampered:
continue
self.colorSubprocess(leName, theColor)
self.objColor = theColor
def init_color(
self, t0=0, tf=1, rate=EASE_IN_OUT, theColor=WHITE, ignoreTampered=False
):
colors = getInterpolatedColors(t0, tf, self.getColor(), theColor, rate)
stack = deque()
colors.reverse()
for var in colors:
stack.append(var[1])
return stack
def update_color(self, val, theColor=WHITE, ignoreTampered=False):
if val is None:
raise CustomError("val must be specified and passed into update_color()")
self.color(val)
def fade(self, color=(-1, -1, -1, 1), ignoreTampered=False):
"""Fade from current color to color
Args:
color (tuple, optional): Color to be faded into. Defaults to
(-1, -1, -1, 1).
ignoreTampered (bool, optional): Necessary for proper Tex coloring.
Defaults to False.
"""
if color == (-1, -1, -1, 1):
color = self.getOppositeColor()
# wrapper for color
theColor = color
self.color(theColor, ignoreTampered)
def init_fade(
self, t0=0, tf=2, rate=EASE_IN_OUT, color=(-1, -1, -1, 1), ignoreTampered=False
):
if color == (-1, -1, -1, 1):
color = self.getOppositeColor()
return self.init_color(t0, tf, rate, color)
def update_fade(self, val, color=(-1, -1, -1, 1), ignoreTampered=False):
self.update_color(val)
def fadeShift(self, color=(-1, -1, -1, -1), x=0, y=0, z=0):
"""
Literally what the name implies: fades into a color while shifting by
(x, y, z).
Args:
color (tuple, optional): Color to fade into. Defaults to
(-1, -1, -1, -1).
x (int, optional): Value to shift in the x-direction. Defaults to 0.
y (int, optional): Value to shift in the y-direction. Defaults to 0.
z (int, optional): Value to shift in the z-direction. Defaults to 0.
Raises:
CustomError: requires color to be a tuple
"""
# error checking for color
if type(color) is not tuple:
raise CustomError("fadeShift() error: color must be a tuple")
self.fade(color)
self.shift(x, y, z)
def init_fadeShift(
self, t0=0, tf=2, rate=EASE_IN_OUT, color=(-1, -1, -1, 1), x=0, y=0, z=0
):
# error checking for color
if type(color) is not tuple:
raise CustomError("fadeShift() error: color must be a tuple")
if color == (-1, -1, -1, 1):
color = self.getOppositeColor()
colors = getInterpolatedColors(t0, tf, self.getColor(), color, rate)
colors.reverse()
t = interpolate(t0, tf, rate)
t.pop(0)
# do a simple linear shift...
xDiffs = np.diff(interpolate(0, x, rate, numIntervals=len(t))).tolist()
yDiffs = np.diff(interpolate(0, y, rate, numIntervals=len(t))).tolist()
zDiffs = np.diff(interpolate(0, z, rate, numIntervals=len(t))).tolist()
xDiffs.reverse()
yDiffs.reverse()
zDiffs.reverse()
stack = deque()
# append fade and shift vals to stack
for var, xVal, yVal, zVal in zip(colors, xDiffs, yDiffs, zDiffs):
stack.append([var[1], (xVal, yVal, zVal)])
return stack
def update_fadeShift(self, val, color=(-1, -1, -1, 1), x=0, y=0, z=0):
self.fade(val[0])
self.shift(*val[1])
def fadeLeft(self, color=(-1, -1, -1, -1), dist=0):
self.fadeShift(color, -dist)
def init_fadeLeft(
self, t0=0, tf=2, rate=EASE_IN_OUT, color=(-1, -1, -1, -1), dist=0
):
return self.init_fadeShift(t0, tf, rate, color, -dist)
def update_fadeLeft(self, val, color=(-1, -1, -1, -1), dist=0):
self.update_fadeShift(val, color, -dist)
def fadeRight(self, color=(-1, -1, -1, -1), dist=0):
self.fadeShift(color, dist)
def init_fadeRight(
self, t0=0, tf=2, rate=EASE_IN_OUT, color=(-1, -1, -1, -1), dist=0
):
return self.init_fadeShift(t0, tf, rate, color, dist)
def update_fadeRight(self, val, color=(-1, -1, -1, -1), dist=0):
self.update_fadeShift(val, color, dist)
def fadeUp(self, color=(-1, -1, -1, -1), dist=0):
self.fadeShift(color, 0, dist)
def init_fadeUp(self, t0=0, tf=2, rate=EASE_IN_OUT, color=(-1, -1, -1, -1), dist=0):
return self.init_fadeShift(t0, tf, rate, color, 0, dist)
def update_fadeUp(self, val, color=(-1, -1, -1, -1), dist=0):
self.update_fadeShift(val, color, 0, dist)
def fadeDown(self, color=(-1, -1, -1, -1), dist=0):
self.fadeShift(color, 0, -dist)
def init_fadeDown(
self, t0=0, tf=2, rate=EASE_IN_OUT, color=(-1, -1, -1, -1), dist=0
):
return self.init_fadeShift(t0, tf, rate, color, 0, -dist)
def update_fadeDown(self, val, color=(-1, -1, -1, -1), dist=0):
self.update_fadeShift(val, color, 0, -dist)
def transparent(self, alpha=1, ignoreTampered=False):
"""
Makes an object transparent by some amount alpha. Alpha of 1 is totally
opaque and alpha of 0 is totally transparent, i.e. completely dark and
undetectable in the black scene. Balance between 0 and 1 is key.
Args:
alpha (int, optional): Alpha transparency. Defaults to 1.
ignoreTampered (bool, optional): Necessary for Tex coloring. Defaults
to False.
"""
if hasattr(self, "texNames"):
for name in self.texNames:
self.transparentSubprocess(name, alpha)
i = -1
for leName in self.name:
i += 1
if len(self.tampered) > 0 and self.tampered[i] and not ignoreTampered:
continue
self.transparentSubprocess(leName, alpha)
self.isTransparent = True
self.currentAlpha = alpha
def init_transparent(
self, t0=0, tf=2, rate=EASE_IN_OUT, alpha=1, ignoreTampered=False
):
timeVals = interpolate(t0, tf, rate)
timeVals.pop(0)
alphaValues = interpolate(self.currentAlpha, alpha, rate, len(timeVals))
alphaValues.pop(0)
stack = deque()
alphaValues.reverse()
for val in alphaValues:
stack.append(val)
return stack
def update_transparent(self, val, alpha=1, ignoreTampered=False):
self.transparent(val, ignoreTampered)
def transparentSubprocess(self, someName, alpha):
"""Applies a transparency to a subpart of an object.
Args:
someName (str): the name of the subpart in Blender UI to which
transparency must be applied.
alpha (int): alpha value between 0 and 1, with 0 being totally
transparent and 1 being totally opaque.
"""
selectOnly([someName])
C.view_layer.objects.active = D.objects[someName]
ob = C.active_object
mat = D.materials.get(someName)
# if no material, make it
if mat is None:
mat = D.materials.new(name=someName)
# assign mat to object
if ob.data.materials:
# assign to first material slot
ob.data.materials[0] = mat
else:
# no slots
ob.data.materials.append(mat)
# use nodes
C.object.active_material.use_nodes = True
mat = C.object.material_slots[C.active_object.name]
C.object.active_material.blend_method = "BLEND"
# print(mat.material.node_tree.nodes['Principled BSDF'].inputs['Alpha'].default_value)
mat.material.node_tree.nodes["Principled BSDF"].inputs[
"Alpha"
].default_value = alpha
def changeOriginTo(self, x=0, y=0, z=0):
"""
Changes the origin of an object to any (x, y, z) in space without actually
shifting/reorienting the object. Considering rotate() rotates an object about
its origin, this function is *very* useful.
Args:
x (int, optional): x-value of new origin. Defaults to 0.
y (int, optional): y-value of new origin. Defaults to 0.
z (int, optional): z-value of new origin. Defaults to 0.
"""
# shift cursor to location
C.scene.cursor.location = (x, y, z)
for name in self.name:
selectOnly(name)
# shift object origin to cursor
bpy.ops.object.origin_set(type="ORIGIN_CURSOR", center="MEDIAN")
# do it for texNames too
for name in self.texNames:
selectOnly(name)
# shift object origin to cursor
bpy.ops.object.origin_set(type="ORIGIN_CURSOR", center="MEDIAN")
# send 3D cursor back to origin
C.scene.cursor.location = ORIGIN
def delete(self):
"""
Deletes an object from the UI and Blender memory. Necessary to free up
space, as having many objects in the UI is the primary thing that slows
down scripts in my experience.
"""
# delete object's components in UI
for stringy in self.name:
D.objects.remove(D.objects[stringy], do_unlink=True)
# find any dangling collections and delete them too
for c in D.collections:
if len(c.objects.values()) == 0:
D.collections.remove(c)
# delete dangling meshes, materials, lights, textures, curves, cameras, images
for block in D.meshes:
if block.users == 0:
D.meshes.remove(block)
for block in D.materials:
if block.users == 0:
D.materials.remove(block)
for block in D.lights:
if block.users == 0:
D.lights.remove(block)
for block in D.textures:
if block.users == 0:
D.textures.remove(block)
for block in D.images:
if block.users == 0:
D.images.remove(block)
for block in D.curves:
if block.users == 0:
D.curves.remove(block)
for block in D.cameras:
if block.users == 0:
D.cameras.remove(block)
# remove materials again - in case curves had materials
for block in D.materials:
if block.users == 0:
D.materials.remove(block)
def createID(self, str):
"""
ID of an object - based on the global OBJECT_COUNTER, which counts upward
to infinity.
Args:
str (str): No idea what this is for (!)
Returns:
int: the numerical ID of the object - also happens to be the global
OBJECT_COUNTER.
"""
# a simple counter shall suffice.
global OBJECT_COUNTER
OBJECT_COUNTER += 1
return OBJECT_COUNTER
def stringID(self, i):
"""
String representation of an object's ID with 8 digits. I've never had
anywhere remotely near 99 million objects generated in a script, so 8
digits is plenty for me, but in principle, you can always change the 8
to be a larger number... just not too large...
Args:
i (int): the ID for which a string is generated
Returns:
str: string representation of ID. Example: 34 becomes ".00000034"
"""
return "." + str(i).zfill(8)
def getOppositeColor(self):
"""
Needed for determining the difference between light (any color) and
dark (totally black).
Returns:
tuple: opposite color of object.
"""
if self.getColor() == BLACK:
return WHITE
else:
return BLACK
def getColor(self):
"""
Returns the current color of the object.
Returns:
tuple: current color of object.
"""
if hasattr(self, "objColor"):
return self.objColor
for leName in self.name:
selectOnly([leName])
C.view_layer.objects.active = D.objects[leName]
mat = D.materials.get(leName)
# if no material, it's (probably?) black
if mat is None:
return BLACK
else:
mat = C.object.material_slots[C.active_object.name]
leColor = (
mat.material.node_tree.nodes["Principled BSDF"]
.inputs["Emission"]
.default_value
)
return tuple(leColor[i] for i in (0, 1, 2, 3))
|
{"hexsha": "3543d22c1543ff8e9db0a97b6dc874552e6eca38", "size": 25892, "ext": "py", "lang": "Python", "max_stars_repo_path": "peeps/blobjects/blobject.py", "max_stars_repo_name": "hmomin/peeps", "max_stars_repo_head_hexsha": "885a48262872c9fa51616ae1987463936fd0f743", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-10-06T19:46:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T19:05:08.000Z", "max_issues_repo_path": "peeps/blobjects/blobject.py", "max_issues_repo_name": "hmomin/peeps", "max_issues_repo_head_hexsha": "885a48262872c9fa51616ae1987463936fd0f743", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "peeps/blobjects/blobject.py", "max_forks_repo_name": "hmomin/peeps", "max_forks_repo_head_hexsha": "885a48262872c9fa51616ae1987463936fd0f743", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2303030303, "max_line_length": 124, "alphanum_fraction": 0.5743859107, "include": true, "reason": "import numpy", "num_tokens": 6214}
|
import torch
from abs_models import utils as u
import numpy as np
def squared_L2_loss(a, b, axes, keepdim=True):
return u.tsum((a - b)**2, axes=axes, keepdim=keepdim)
def KLD(mu_latent_q, sig_q=1., dim=-3):
"""
:param mu_latent_q: z must be shape (..., n_latent ...) at i-th pos
:param sig_q: scalar
:param dim: determines pos i
:return:
"""
return -0.5 * torch.sum(1 - mu_latent_q ** 2 + u.tlog(sig_q) - sig_q**2,
dim=dim, keepdim=True)
def ELBOs(x_rec: torch.Tensor, samples_latent: torch.Tensor, x_orig: torch.Tensor,
beta=1, dist_fct=squared_L2_loss):
"""
:param x_rec: shape (..., n_channels, nx, ny)
:param samples_latent: (..., n_latent, 1, 1)
:param x_orig: (..., n_channels, nx, ny)
:param beta:
:param dist_fct:
:return:
"""
n_ch, nx, ny = x_rec.shape[-3:]
kld = KLD(samples_latent, sig_q=1.)
rec_loss = dist_fct(x_orig, x_rec, axes=[-1, -2, -3])
elbo = rec_loss + beta * kld
# del x_rec, x_orig, kld
# del x_rec, samples_latent, x_orig
return elbo / (n_ch * nx * ny)
def ELBOs2(x, rec_x, samples_latent, beta):
"""This is the loss function used during inference to calculate the logits.
This function must only operate on the last the dimensions of x and rec_x.
There can be varying number of additional dimensions before them!
"""
input_size = int(np.prod(x.shape[-3:]))
assert len(x.shape) == 4 and len(rec_x.shape) == 4
# alternative implementation that is much faster and more memory efficient
# when each sample in x needs to be compared to each sample in rec_x
assert x.shape[-3:] == rec_x.shape[-3:]
x = x.reshape(x.shape[0], input_size)
y = rec_x.reshape(rec_x.shape[0], input_size)
x2 = torch.norm(x, p=2, dim=-1, keepdim=True).pow(2) # x2 shape (bs, 1)
y2 = torch.norm(y, p=2, dim=-1, keepdim=True).pow(2) # y2 shape (1, nsamples)
# note that we could cache the calculation of y2, but
# it's so fast that it doesn't matter
L2squared = x2 + y2.t() - 2 * torch.mm(x, y.t())
L2squared = L2squared / input_size
kld = KLD(samples_latent, sig_q=1.)[None, :, 0, 0, 0] / input_size
# note that the KLD sum is over the latents, not over the input size
return L2squared + beta * kld
|
{"hexsha": "853ff048cef441826c21e5187119df5284223302", "size": 2324, "ext": "py", "lang": "Python", "max_stars_repo_path": "abs_models/loss_functions.py", "max_stars_repo_name": "akashkumar25/AnalysisBySynthesis", "max_stars_repo_head_hexsha": "daa5a3df5c5fe2f809b6e2aa755b15ed3e07c3ff", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 59, "max_stars_repo_stars_event_min_datetime": "2018-10-23T13:27:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T15:50:59.000Z", "max_issues_repo_path": "Chapter08/Testing Adversarial-Robustness of Neural Networks/abs_models/loss_functions.py", "max_issues_repo_name": "DandyAndy22/Machine-Learning-for-Cybersecurity-Cookbook", "max_issues_repo_head_hexsha": "dd1095173e86522d52949e82c43fdea1bb7a5fdd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-10-24T02:43:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-03T04:48:22.000Z", "max_forks_repo_path": "Chapter08/Testing Adversarial-Robustness of Neural Networks/abs_models/loss_functions.py", "max_forks_repo_name": "DandyAndy22/Machine-Learning-for-Cybersecurity-Cookbook", "max_forks_repo_head_hexsha": "dd1095173e86522d52949e82c43fdea1bb7a5fdd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2018-10-23T13:31:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-04T20:47:42.000Z", "avg_line_length": 34.1764705882, "max_line_length": 82, "alphanum_fraction": 0.6303786575, "include": true, "reason": "import numpy", "num_tokens": 738}
|
"""
Clase "Decaimiento radiactivo"
Luis Eduardo Sánchez González
Facultad de Ciencias Físico Matemáticas
Física Computacional
sáb 01 may 2021 10:12:14 CDT
Repositorio: https://github.com/Luis2501/Fisica-Computacional-1
"""
from random import random
import numpy as np
class Radioactive_Decay:
def __init__(self, N0, t, p):
self.N0, self.t, self.p = N0, t, p
def __call__(self):
N0, p, t = self.N0, self.p, self.t
self.N = np.zeros(t)
self.N[0] = N0
for i in range(t - 1):
for k in range(N0):
if random() < p:
N0 -= 1
self.N[i + 1] = N0
return self.N
def decay_mean(self, M):
try:
Nt = np.zeros(self.t)
for i in range(M):
Nt += self()
return (1/M)*Nt
except ValueError:
print("Decay failed")
|
{"hexsha": "4de1b2032e7cc5180712f94077f5f2e386fe2aab", "size": 776, "ext": "py", "lang": "Python", "max_stars_repo_path": "Procesos aleatorios/Radioactive_Decay.py", "max_stars_repo_name": "Luis2501/Fisica-Computacional-1", "max_stars_repo_head_hexsha": "502481ad900cf7dabdb7f9a74b0f68c60876c5ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Procesos aleatorios/Radioactive_Decay.py", "max_issues_repo_name": "Luis2501/Fisica-Computacional-1", "max_issues_repo_head_hexsha": "502481ad900cf7dabdb7f9a74b0f68c60876c5ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Procesos aleatorios/Radioactive_Decay.py", "max_forks_repo_name": "Luis2501/Fisica-Computacional-1", "max_forks_repo_head_hexsha": "502481ad900cf7dabdb7f9a74b0f68c60876c5ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.8571428571, "max_line_length": 63, "alphanum_fraction": 0.6275773196, "include": true, "reason": "import numpy", "num_tokens": 270}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 15 11:24:15 2021
@author: Christian Pfister
https://cpfister.com
https://github.com/christianpfister43?tab=repositories
Schuldenuhr: https://www.gold.de/staatsverschuldung-deutschland/
"""
import numpy as np
from PIL import ImageGrab
import cv2
import os
#%% set your custom paths and parameters here!
"""
parameters here work for me for:
https://www.gold.de/staatsverschuldung-deutschland/
google chrome
res: 1920 * 1080
you will need to adapt these for your system, and problem
"""
im_path = './data'
number_of_digits = 13 # German debt hase currently 13 digits!
# width and height of the digits
w = 20
h = 28
# offset where the digits begin on the screen
x_0 = 800
y_0 = 613
# width of the sliding window
delta_x = w
# width of the "." that seperates bundels of 3 digits
dot_width = 11
#%% loop over all digits
digit_array = [[] for n in range(number_of_digits)]
dot_offset = 0
for n in range (number_of_digits):
if (n==1)|(n==4)|(n==7)|(n==10):
dot_offset+= dot_width # to jump over "." every 3 digits
# depending on your problem this will need adaption, e.g. jump over a "." and a "," can be different
# grab image from screen and transform to numpy array
screen_cap = ImageGrab.grab(bbox=(x_0+n*delta_x+dot_offset,y_0,x_0+n*delta_x+w+dot_offset,y_0+h))
screen_cap_array = np.array(screen_cap.getdata(), dtype='uint8')\
.reshape((screen_cap.size[1],screen_cap.size[0],3))
# cv2.imshow(f'window_{n}', printscreen_numpy)
# padding the image-border with white and resize to 28*28 pixels
# this helped me for recognizing the digits later
im = cv2.copyMakeBorder(screen_cap_array.copy(),3,3,3,3,cv2.BORDER_CONSTANT,value=[255,255,255])
im = cv2.resize(im, (28,28))
# save image of digit
cv2.imwrite(f'{im_path}/digit_{n}.png',im)
|
{"hexsha": "894ba525fc76b93ec5a0b50dcd0042cae65d7f6d", "size": 1866, "ext": "py", "lang": "Python", "max_stars_repo_path": "screen_parsing_schuldenuhr.py", "max_stars_repo_name": "christianpfister43/Reading-a-Live-Ticker", "max_stars_repo_head_hexsha": "46eef58c69dbcd805c32646b38b56efecbde5d4e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "screen_parsing_schuldenuhr.py", "max_issues_repo_name": "christianpfister43/Reading-a-Live-Ticker", "max_issues_repo_head_hexsha": "46eef58c69dbcd805c32646b38b56efecbde5d4e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "screen_parsing_schuldenuhr.py", "max_forks_repo_name": "christianpfister43/Reading-a-Live-Ticker", "max_forks_repo_head_hexsha": "46eef58c69dbcd805c32646b38b56efecbde5d4e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5901639344, "max_line_length": 108, "alphanum_fraction": 0.7025723473, "include": true, "reason": "import numpy", "num_tokens": 545}
|
#app.py
from flask import Flask, flash, request, redirect, url_for, render_template
import urllib.request
from werkzeug.utils import secure_filename
import cv2
import pytesseract
import numpy as np
app = Flask(__name__)
UPLOAD_FOLDER = 'static/uploads/'
app.secret_key = "secret key"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def home():
return render_template('test.html')
@app.route('/', methods=['POST'])
def upload_image():
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No image selected for uploading')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
import os
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
per = 25
roi = [[(410, 236), (768, 282), 'text', 'Số'],
[(300, 302), (830, 344), 'text', '/Họ tên'],
[(584, 352), (770, 388), 'text', '/Ngày sinh'],
[(480, 380), (566, 420), 'text', '/Giới tính'],
[(838, 384), (984, 422), 'text', '/Quốc tịch'],
[(300, 458), (936, 494), 'text', '/Quê quán'],
[(700, 496), (910, 534), 'text', '/Địa chỉ'],
[(300, 526), (962, 562), 'text', '']]
pytesseract.pytesseract.tesseract_cmd = 'D:\\Tesseract-OCR\\tesseract.exe'
imgQ = cv2.imread('cc.jpg')
h, w, c = imgQ.shape
orb = cv2.ORB_create(5000)
kp1, des1 = orb.detectAndCompute(imgQ, None)
# imgKp1 = cv2.drawKeypoints(imgQ,kp1,None)
path = 'static\\uploads'
myPicList = os.listdir(path)
print(myPicList)
for j, y in enumerate(myPicList):
img = cv2.imread(path + '/' + y)
# cv2.imshow('y', img)
kp2, des2 = orb.detectAndCompute(img, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = bf.match(des2, des1)
sorted(matches, key=lambda x: x.distance)
good = matches[:int(len(matches) * (per / 100))]
imgMatch = cv2.drawMatches(img, kp2, imgQ, kp1, good[:50], None, flags=2)
cv2.imshow('y1', imgMatch)
srcPoints = np.float32([kp2[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dstPoints = np.float32([kp1[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, _ = cv2.findHomography(srcPoints, dstPoints, cv2.RANSAC, 5.0)
imgScan = cv2.warpPerspective(img, M, (w, h))
# cv2.imshow('y', imgScan)
imgShow = imgScan.copy()
imgMask = np.zeros_like(imgShow)
myData = []
for x, r in enumerate(roi):
cv2.rectangle(imgMask, ((r[0][0]), r[0][1]), ((r[1][0]), r[1][1]), (0, 255, 00), cv2.FILLED)
imgShow = cv2.addWeighted(imgShow, 0.99, imgMask, 0.1, 0)
imgCrop = imgScan[r[0][1]:r[1][1], r[0][0]:r[1][0]]
# cv2.imshow(str(x), imgCrop)
if r[2] == 'text':
txt = f'{r[3]} : {pytesseract.image_to_string(imgCrop, lang="vie")}'
print(txt)
myData.append(pytesseract.image_to_string(imgCrop, lang="vie"))
with open('dich.txt', 'a', encoding='utf-8') as f:
f.writelines(txt)
# cv2.imshow('y', imgShow)
# # cv2.imshow('output1',imgKp1)
# cv2.imshow('output', imgQ)
# cv2.waitKey(0)
f1 = open('dich.txt', 'r', encoding='UTF-8')
data1 = f1.read()
flash(data1)
return render_template('test.html', filename=filename)
else:
flash('Allowed image types are - png, jpg, jpeg, gif')
return redirect(request.url)
@app.route('/display/<filename>')
def display_image(filename):
#print('display_image filename: ' + filename)
return redirect(url_for('static', filename='uploads/' + filename), code=301)
if __name__ == "__main__":
app.run(debug=True)
|
{"hexsha": "d2cb862ffe38e6d5c023e5a68319d44cf77f72d8", "size": 4489, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "longtp12/opencv4nodejs", "max_stars_repo_head_hexsha": "05952d1119c1dd9ccf696434bcc5cdf73f06b627", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test.py", "max_issues_repo_name": "longtp12/opencv4nodejs", "max_issues_repo_head_hexsha": "05952d1119c1dd9ccf696434bcc5cdf73f06b627", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "longtp12/opencv4nodejs", "max_forks_repo_head_hexsha": "05952d1119c1dd9ccf696434bcc5cdf73f06b627", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0075757576, "max_line_length": 109, "alphanum_fraction": 0.5373134328, "include": true, "reason": "import numpy", "num_tokens": 1228}
|
chapter\<open>Preliminaries\<close>
text\<open>In this chapter, we introduce the preliminaries, including a three-valued logic, variables,
arithmetic expressions and guard expressions.\<close>
section\<open>Three-Valued Logic\<close>
text\<open>Because our EFSMs are dynamically typed, we cannot rely on conventional Boolean logic when
evaluating expressions. For example, we may end up in the situation where we need to evaluate
the guard $r_1 > 5$. This is fine if $r_1$ holds a numeric value, but if $r_1$ evaluates to a
string, this causes problems. We cannot simply evaluate to \emph{false} because then the negation
would evaluate to \emph{true.} Instead, we need a three-valued logic such that we can meaningfully
evaluate nonsensical guards.
The \texttt{trilean} datatype is used to implement three-valued Bochvar logic
\cite{bochvar1981}. Here we prove that the logic is an idempotent semiring, define a partial order,
and prove some other useful lemmas.\<close>
theory Trilean
imports Main
begin
datatype trilean = true | false | invalid
instantiation trilean :: semiring begin
fun times_trilean :: "trilean \<Rightarrow> trilean \<Rightarrow> trilean" where
"times_trilean _ invalid = invalid" |
"times_trilean invalid _ = invalid" |
"times_trilean true true = true" |
"times_trilean _ false = false" |
"times_trilean false _ = false"
fun plus_trilean :: "trilean \<Rightarrow> trilean \<Rightarrow> trilean" where
"plus_trilean invalid _ = invalid" |
"plus_trilean _ invalid = invalid" |
"plus_trilean true _ = true" |
"plus_trilean _ true = true" |
"plus_trilean false false = false"
abbreviation maybe_and :: "trilean \<Rightarrow> trilean \<Rightarrow> trilean" (infixl "\<and>?" 70) where
"maybe_and x y \<equiv> x * y"
abbreviation maybe_or :: "trilean \<Rightarrow> trilean \<Rightarrow> trilean" (infixl "\<or>?" 65) where
"maybe_or x y \<equiv> x + y"
lemma plus_trilean_assoc:
"a \<or>? b \<or>? c = a \<or>? (b \<or>? c)"
proof(induct a b arbitrary: c rule: plus_trilean.induct)
case (1 uu)
then show ?case
by simp
next
case "2_1"
then show ?case
by simp
next
case "2_2"
then show ?case
by simp
next
case "3_1"
then show ?case
by (metis plus_trilean.simps(2) plus_trilean.simps(4) trilean.exhaust)
next
case "3_2"
then show ?case
by (metis plus_trilean.simps(3) plus_trilean.simps(5) plus_trilean.simps(6) plus_trilean.simps(7) trilean.exhaust)
next
case 4
then show ?case
by (metis plus_trilean.simps(2) plus_trilean.simps(3) plus_trilean.simps(4) plus_trilean.simps(5) plus_trilean.simps(6) trilean.exhaust)
next
case 5
then show ?case
by (metis plus_trilean.simps(6) plus_trilean.simps(7) trilean.exhaust)
qed
lemma plus_trilean_commutative: "a \<or>? b = b \<or>? a"
proof(induct a b rule: plus_trilean.induct)
case (1 uu)
then show ?case
by (metis plus_trilean.simps(1) plus_trilean.simps(2) plus_trilean.simps(3) trilean.exhaust)
next
case "2_1"
then show ?case
by simp
next
case "2_2"
then show ?case
by simp
next
case "3_1"
then show ?case
by simp
next
case "3_2"
then show ?case
by simp
next
case 4
then show ?case
by simp
next
case 5
then show ?case
by simp
qed
lemma times_trilean_commutative: "a \<and>? b = b \<and>? a"
by (metis (mono_tags) times_trilean.simps trilean.distinct(5) trilean.exhaust)
lemma times_trilean_assoc:
"a \<and>? b \<and>? c = a \<and>? (b \<and>? c)"
proof(induct a b arbitrary: c rule: plus_trilean.induct)
case (1 uu)
then show ?case
by (metis (mono_tags, lifting) times_trilean.simps(1) times_trilean_commutative)
next
case "2_1"
then show ?case
by (metis (mono_tags, lifting) times_trilean.simps(1) times_trilean_commutative)
next
case "2_2"
then show ?case
by (metis (mono_tags, lifting) times_trilean.simps(1) times_trilean_commutative)
next
case "3_1"
then show ?case
by (metis times_trilean.simps(1) times_trilean.simps(4) times_trilean.simps(5) trilean.exhaust)
next
case "3_2"
then show ?case
by (metis times_trilean.simps(1) times_trilean.simps(5) times_trilean.simps(6) times_trilean.simps(7) trilean.exhaust)
next
case 4
then show ?case
by (metis times_trilean.simps(1) times_trilean.simps(4) times_trilean.simps(5) times_trilean.simps(7) trilean.exhaust)
next
case 5
then show ?case
by (metis (full_types) times_trilean.simps(1) times_trilean.simps(6) times_trilean.simps(7) trilean.exhaust)
qed
lemma trilean_distributivity_1:
"(a \<or>? b) \<and>? c = a \<and>? c \<or>? b \<and>? c"
proof(induct a b rule: times_trilean.induct)
case (1 uu)
then show ?case
by (metis (mono_tags, lifting) plus_trilean.simps(1) plus_trilean_commutative times_trilean.simps(1) times_trilean_commutative)
next
case "2_1"
then show ?case
by (metis (mono_tags, lifting) plus_trilean.simps(1) times_trilean.simps(1) times_trilean_commutative)
next
case "2_2"
then show ?case
by (metis (mono_tags, lifting) plus_trilean.simps(1) times_trilean.simps(1) times_trilean_commutative)
next
case 3
then show ?case
apply simp
by (metis (no_types, hide_lams) plus_trilean.simps(1) plus_trilean.simps(4) plus_trilean.simps(7) times_trilean.simps(1) times_trilean.simps(4) times_trilean.simps(5) trilean.exhaust)
next
case "4_1"
then show ?case
apply simp
by (metis (no_types, hide_lams) plus_trilean.simps(1) plus_trilean.simps(5) plus_trilean.simps(7) times_trilean.simps(1) times_trilean.simps(4) times_trilean.simps(5) times_trilean.simps(6) times_trilean.simps(7) trilean.exhaust)
next
case "4_2"
then show ?case
apply simp
by (metis (no_types, hide_lams) plus_trilean.simps(1) plus_trilean.simps(7) times_trilean.simps(1) times_trilean.simps(6) times_trilean.simps(7) trilean.exhaust)
next
case 5
then show ?case
apply simp
by (metis (no_types, hide_lams) plus_trilean.simps(1) plus_trilean.simps(6) plus_trilean.simps(7) times_trilean.simps(1) times_trilean.simps(4) times_trilean.simps(5) times_trilean.simps(6) times_trilean.simps(7) trilean.exhaust)
qed
instance
apply standard
apply (simp add: plus_trilean_assoc)
apply (simp add: plus_trilean_commutative)
apply (simp add: times_trilean_assoc)
apply (simp add: trilean_distributivity_1)
using times_trilean_commutative trilean_distributivity_1 by auto
end
lemma maybe_or_idempotent: "a \<or>? a = a"
apply (cases a)
by auto
lemma maybe_and_idempotent: "a \<and>? a = a"
apply (cases a)
by auto
instantiation trilean :: ord begin
definition less_eq_trilean :: "trilean \<Rightarrow> trilean \<Rightarrow> bool" where
"less_eq_trilean a b = (a + b = b)"
definition less_trilean :: "trilean \<Rightarrow> trilean \<Rightarrow> bool" where
"less_trilean a b = (a \<le> b \<and> a \<noteq> b)"
declare less_trilean_def less_eq_trilean_def [simp]
instance
by standard
end
instantiation trilean :: uminus begin
fun maybe_not :: "trilean \<Rightarrow> trilean" ("\<not>? _" [60] 60) where
"\<not>? true = false" |
"\<not>? false = true" |
"\<not>? invalid = invalid"
instance
by standard
end
lemma maybe_and_one: "true \<and>? x = x"
by (cases x, auto)
lemma maybe_or_zero: "false \<or>? x = x"
by (cases x, auto)
lemma maybe_double_negation: "\<not>? \<not>? x = x"
by (cases x, auto)
lemma maybe_negate_true: "(\<not>? x = true) = (x = false)"
by (cases x, auto)
lemma maybe_negate_false: "(\<not>? x = false) = (x = true)"
by (cases x, auto)
lemma maybe_and_true: "(x \<and>? y = true) = (x = true \<and> y = true)"
using times_trilean.elims by blast
lemma maybe_and_not_true:
"(x \<and>? y \<noteq> true) = (x \<noteq> true \<or> y \<noteq> true)"
by (simp add: maybe_and_true)
lemma negate_valid: "(\<not>? x \<noteq> invalid) = (x \<noteq> invalid)"
by (metis maybe_double_negation maybe_not.simps(3))
lemma maybe_and_valid:
"x \<and>? y \<noteq> invalid \<Longrightarrow> x \<noteq> invalid \<and> y \<noteq> invalid"
using times_trilean.elims by blast
lemma maybe_or_valid:
"x \<or>? y \<noteq> invalid \<Longrightarrow> x \<noteq> invalid \<and> y \<noteq> invalid"
using plus_trilean.elims by blast
lemma maybe_or_false:
"(x \<or>? y = false) = (x = false \<and> y = false)"
using plus_trilean.elims by blast
lemma maybe_or_true:
"(x \<or>? y = true) = ((x = true \<or> y = true) \<and> x \<noteq> invalid \<and> y \<noteq> invalid)"
using plus_trilean.elims by blast
lemma maybe_not_invalid: "(\<not>? x = invalid) = (x = invalid)"
by (metis maybe_double_negation maybe_not.simps(3))
lemma maybe_or_invalid:
"(x \<or>? y = invalid) = (x = invalid \<or> y = invalid)"
using plus_trilean.elims by blast
lemma maybe_and_invalid:
"(x \<and>? y = invalid) = (x = invalid \<or> y = invalid)"
using times_trilean.elims by blast
lemma maybe_and_false:
"(x \<and>? y = false) = ((x = false \<or> y = false) \<and> x \<noteq> invalid \<and> y \<noteq> invalid)"
using times_trilean.elims by blast
lemma invalid_maybe_and: "invalid \<and>? x = invalid"
using maybe_and_valid by blast
lemma maybe_not_eq: "(\<not>? x = \<not>? y) = (x = y)"
by (metis maybe_double_negation)
lemma de_morgans_1:
"\<not>? (a \<or>? b) = (\<not>?a) \<and>? (\<not>?b)"
by (metis (no_types, hide_lams) add.commute invalid_maybe_and maybe_and_idempotent maybe_and_one maybe_not.elims maybe_not.simps(1) maybe_not.simps(3) maybe_not_invalid maybe_or_zero plus_trilean.simps(1) plus_trilean.simps(4) times_trilean.simps(1) times_trilean_commutative trilean.exhaust trilean.simps(6))
lemma de_morgans_2:
"\<not>? (a \<and>? b) = (\<not>?a) \<or>? (\<not>?b)"
by (metis de_morgans_1 maybe_double_negation)
lemma not_true: "(x \<noteq> true) = (x = false \<or> x = invalid)"
by (metis (no_types, lifting) maybe_not.cases trilean.distinct(1) trilean.distinct(3))
lemma pull_negation: "(x = \<not>? y) = (\<not>? x = y)"
using maybe_double_negation by auto
lemma comp_fun_commute_maybe_or: "comp_fun_commute maybe_or"
apply standard
apply (simp add: comp_def)
apply (rule ext)
by (simp add: add.left_commute)
lemma comp_fun_commute_maybe_and: "comp_fun_commute maybe_and"
apply standard
apply (simp add: comp_def)
apply (rule ext)
by (metis add.left_commute de_morgans_2 maybe_not_eq)
end
|
{"author": "jmafoster1", "repo": "efsm-isabelle", "sha": "fde322562b98c9b4618c112e36a6ac5b9a056610", "save_path": "github-repos/isabelle/jmafoster1-efsm-isabelle", "path": "github-repos/isabelle/jmafoster1-efsm-isabelle/efsm-isabelle-fde322562b98c9b4618c112e36a6ac5b9a056610/Trilean.thy"}
|
"""
4차원 데이터를 2차원으로 변환한 후에 max pooling 구현
"""
import numpy as np
from common.util import im2col
if __name__ == '__main__':
np.random.seed(116)
# 가상의 이미지 데이터(c,h,w) = (3,4,4) 1개를 난수로 생성 -> (1,3,4,4)
x = np.random.randint(10, size=(1, 3, 4, 4))
print(x, 'shape:', x.shape)
# 4차원 데이터를 2차원 ndarray로 변환
col = im2col(x, filter_h=2, filter_w=2, stride=2, pad=0)
print(col, 'shape:', col.shape) # 4*12
# max pooling : 채널별로 최댓값을 찾음
# 채널별 최댓값을 쉽게 찾기 위해 2차원 배열의 Shape을 변환
col = col.reshape(-1, 2 * 2) # (-1, fh*fw)
print(col, 'shape:', col.shape)
# 각 행(row)에서 최댓값을 찾음.
out = np.max(col, axis=1)
print(out, 'shape:', out.shape)
# 1차원 pooling의 결과를 4차원으로 변환: (n, oh, ow, c) → (n, c, oh, ow)
out = out.reshape(1, 2, 2, 3)
print(out)
out = out.transpose(0, 3, 1, 2)
|
{"hexsha": "66b3b4c5839e4b3791679cbcec1b4a2288ee9657", "size": 830, "ext": "py", "lang": "Python", "max_stars_repo_path": "ch07/ex11_pooling.py", "max_stars_repo_name": "lee-hyeonseung/lab_dl", "max_stars_repo_head_hexsha": "b8906247b6e0e2586f538081e2efaf47dac34972", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-08T09:14:46.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-08T09:14:46.000Z", "max_issues_repo_path": "ch07/ex11_pooling.py", "max_issues_repo_name": "lee-hyeonseung/lab_dl", "max_issues_repo_head_hexsha": "b8906247b6e0e2586f538081e2efaf47dac34972", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ch07/ex11_pooling.py", "max_forks_repo_name": "lee-hyeonseung/lab_dl", "max_forks_repo_head_hexsha": "b8906247b6e0e2586f538081e2efaf47dac34972", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9375, "max_line_length": 64, "alphanum_fraction": 0.5686746988, "include": true, "reason": "import numpy", "num_tokens": 395}
|
import os
import subprocess
import click
import numpy as np
import fitsio
import esutil.numpy_util
import sep
from lsst.daf.persistence import Butler
from sxdes import run_sep
from ssi_tools.layout_utils import make_hexgrid_for_tract
from fsi_tools.matching import do_balrogesque_matching
from desc_dc2_dm_data import REPOS
sep.set_extract_pixstack(1_000_000)
# this list is hard coded - the gen 2 butler doesn't have a method for introspection
DC2_TRACTS = set(
[
2723, 2730, 2897, 2904, 3076, 3083, 3259, 3266, 3445, 3452, 3635, 3642, 3830,
3837, 4028, 4035, 4230, 4428, 4435, 4636, 4643, 4851, 4858, 5069, 2724, 2731,
2898, 2905, 3077, 3084, 3260, 3267, 3446, 3453, 3636, 3643, 3831, 4022, 4029,
4224, 4231, 4429, 4436, 4637, 4644, 4852, 4859, 5070, 2725, 2732, 2899, 2906,
3078, 3085, 3261, 3268, 3447, 3454, 3637, 3825, 3832, 4023, 4030, 4225, 4232,
4430, 4437, 4638, 4645, 4853, 4860, 5071, 2726, 2733, 2900, 2907, 3079, 3086,
3262, 3441, 3448, 3631, 3638, 3826, 3833, 4024, 4031, 4226, 4233, 4431, 4438,
4639, 4646, 4854, 5065, 5072, 3451, 2727, 2734, 2901, 2908, 3080, 3256, 3263,
3442, 3449, 3632, 3639, 3827, 3834, 4025, 4032, 4227, 4234, 4432, 4439, 4640,
4647, 4855, 5066, 5073, 2728, 2735, 2902, 3074, 3081, 3257, 3264, 3443, 3450,
3633, 3640, 3828, 3835, 4026, 4033, 4228, 4235, 4433, 4440, 4641, 4648, 4856,
5067, 5074, 2729, 2896, 2903, 3075, 3082, 3258, 3265, 3444, 3634, 3641, 3829,
3836, 4027, 4034, 4229, 4236, 4434, 4441, 4642, 4850, 4857, 5068,
]
)
# DC2 truth catalog to use as injected sources
DC2_TRUTH_CAT = (
"/global/cfs/cdirs/lsst/groups/fake-source-injection/DC2/catalogs/"
"cosmoDC2_v1.1.4_small_fsi_catalog.fits"
)
OUTPUT_BUTLER = os.path.expandvars(os.path.join("$SCRATCH", "butler_coadd_sep"))
OUTPUT_DIR = "ssi_cats"
def _run_sep_and_add_radec(ti, img, zp, err=None, minerr=None):
if err is None:
err = np.sqrt(img.variance.array.copy())
img = img.image.array.copy()
if minerr is not None:
msk = err < minerr
err[msk] = minerr
cat, seg = run_sep(
img,
err,
)
cat = esutil.numpy_util.add_fields(
cat,
[("ra", "f8"), ("dec", "f8"), ("mag_auto", "f8")]
)
wcs = ti.getWcs()
cat["ra"], cat["dec"] = wcs.pixelToSkyArray(cat["x"], cat["y"], degrees=True)
cat["mag_auto"] = zp - 2.5*np.log10(cat["flux_auto"])
return cat, seg
@click.command()
@click.option(
'--tract', type=int, default=None, help='the tract to process', required=True
)
@click.option(
'--patch', type=int, default=None, help='the patch to process', required=True
)
@click.option('--seed', type=int, default=None, help='seed for the RNG', required=True)
def main(tract, patch, seed):
"""Run SSI on a DC2 tract and patch"""
# first we need to extract the tract and patch from the butler in order to
# setup the source catalog
butler = Butler(REPOS["2.2i_dr6_wfd"])
skymap = butler.get("deepCoadd_skyMap")
if tract not in DC2_TRACTS:
raise RuntimeError("Tract %d is not valid for DC2!" % tract)
ti = skymap[tract]
if patch < 0 or patch >= len(ti):
raise RuntimeError(
"patch %d is not valid for tract %d (has only %d patches)!" % (
patch, tract, len(tract)
)
)
# now we are making the truth catalog
# - we cut to things brighter than mag 25 to avoid injecting gobs of faint things
# - we cut the injection catalog to the patch bounaries in order to avoid drawing
# extra stuff
# - we have to write the tract sources to disk for the stack task
grid = make_hexgrid_for_tract(ti, rng=seed)
srcs = fitsio.read(DC2_TRUTH_CAT)
msk = srcs["rmagVar"] <= 25
srcs = srcs[msk]
rng = np.random.RandomState(seed=seed)
inds = rng.choice(len(srcs), size=len(grid), replace=True)
tract_sources = srcs[inds].copy()
tract_sources["raJ2000"] = np.deg2rad(grid["ra"])
tract_sources["decJ2000"] = np.deg2rad(grid["dec"])
pi = ti[patch]
msk = pi.getOuterBBox().contains(grid["x"], grid["y"])
tract_sources = tract_sources[msk]
subprocess.run("mkdir -p " + OUTPUT_DIR, shell=True, check=True)
ssi_src_file = os.path.join(
OUTPUT_DIR, "ssi_input_tract%d_patch%d.fits" % (tract, patch)
)
fitsio.write(
ssi_src_file,
tract_sources,
clobber=True,
)
# now we need to run the SSI
# for this we need to define an output butler area
subprocess.run("mkdir -p " + OUTPUT_BUTLER, shell=True, check=True)
cmd = """\
insertFakes.py \
/global/cfs/cdirs/lsst/production/DC2_ImSim/Run2.2i/desc_dm_drp/v19.0.0-v1\
/rerun/run2.2i-coadd-wfd-dr6-v1 \
--output %s/ \
--id tract=%d patch=%s \
filter=r -c fakeType=%s \
--clobber-config --no-versions
""" % (OUTPUT_BUTLER, tract, "%d,%d" % pi.getIndex(), ssi_src_file)
subprocess.run(cmd, shell=True, check=True)
# from here we have images with the sources on disk
# we are going to read them back in, make a few catalogs, and output the data
output_butler = Butler(OUTPUT_BUTLER)
bbox = pi.getOuterBBox()
coaddId = {
'tract': ti.getId(),
'patch': "%d,%d" % pi.getIndex(),
'filter': 'r'
}
image = output_butler.get(
"deepCoadd_sub", bbox=bbox, immediate=True, dataId=coaddId
)
fake_image = output_butler.get(
"fakes_deepCoadd_sub", bbox=bbox, immediate=True, dataId=coaddId
)
zp = 2.5*np.log10(image.getPhotoCalib().getInstFluxAtZeroMagnitude())
orig_det_cat, orig_det_seg = _run_sep_and_add_radec(ti, image, zp)
ssi_det_cat, ssi_det_seg = _run_sep_and_add_radec(ti, fake_image, zp)
ssi_truth_cat, ssi_truth_seg = _run_sep_and_add_radec(
ti,
(fake_image.image.array - image.image.array).copy(),
zp,
np.zeros_like(np.sqrt(fake_image.variance.array.copy())),
minerr=np.mean(np.sqrt(fake_image.variance.array.copy())),
)
match_flag, match_index = do_balrogesque_matching(
ssi_det_cat, orig_det_cat, ssi_truth_cat, "flux_auto",
)
ssi_det_cat = esutil.numpy_util.add_fields(
ssi_det_cat,
[("match_flag", "i4"), ("match_index", "i8")]
)
ssi_det_cat["match_flag"] = match_flag
ssi_det_cat["match_index"] = match_index
output_fname = "ssi_data_tract%d_patch%d.fits" % (tract, patch)
with fitsio.FITS(
os.path.join(OUTPUT_DIR, output_fname), "rw", clobber=True
) as fits:
fits.write(orig_det_cat, extname="orig_cat")
fits.write(ssi_det_cat, extname="ssi_cat")
fits.write(ssi_truth_cat, extname="truth_cat")
if __name__ == "__main__":
main()
|
{"hexsha": "b8a95685409aa11e505ea51cdc0549a0c037b275", "size": 6766, "ext": "py", "lang": "Python", "max_stars_repo_path": "2020_07_16_coadd_sep/process_tract_patch.py", "max_stars_repo_name": "LSSTDESC/ssi-cosmodc2", "max_stars_repo_head_hexsha": "678e4f708018e492f2232b8e5cd3a7845341c5f0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2020_07_16_coadd_sep/process_tract_patch.py", "max_issues_repo_name": "LSSTDESC/ssi-cosmodc2", "max_issues_repo_head_hexsha": "678e4f708018e492f2232b8e5cd3a7845341c5f0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-07-23T17:32:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-03T19:21:13.000Z", "max_forks_repo_path": "2020_07_16_coadd_sep/process_tract_patch.py", "max_forks_repo_name": "LSSTDESC/ssi-cosmodc2", "max_forks_repo_head_hexsha": "678e4f708018e492f2232b8e5cd3a7845341c5f0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-01T16:49:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-01T16:49:50.000Z", "avg_line_length": 34.8762886598, "max_line_length": 87, "alphanum_fraction": 0.6477978126, "include": true, "reason": "import numpy", "num_tokens": 2313}
|
[STATEMENT]
lemma analz_insert_MPair [simp]:
"analz (insert \<lbrace>X,Y\<rbrace> H) =
insert \<lbrace>X,Y\<rbrace> (analz (insert X (insert Y H)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. analz (insert \<lbrace>X, Y\<rbrace> H) = insert \<lbrace>X, Y\<rbrace> (analz (insert X (insert Y H)))
[PROOF STEP]
apply (rule equalityI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. analz (insert \<lbrace>X, Y\<rbrace> H) \<subseteq> insert \<lbrace>X, Y\<rbrace> (analz (insert X (insert Y H)))
2. insert \<lbrace>X, Y\<rbrace> (analz (insert X (insert Y H))) \<subseteq> analz (insert \<lbrace>X, Y\<rbrace> H)
[PROOF STEP]
apply (rule subsetI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x. x \<in> analz (insert \<lbrace>X, Y\<rbrace> H) \<Longrightarrow> x \<in> insert \<lbrace>X, Y\<rbrace> (analz (insert X (insert Y H)))
2. insert \<lbrace>X, Y\<rbrace> (analz (insert X (insert Y H))) \<subseteq> analz (insert \<lbrace>X, Y\<rbrace> H)
[PROOF STEP]
apply (erule analz.induct, auto)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. x \<in> analz (insert X (insert Y H)) \<Longrightarrow> x \<in> analz (insert \<lbrace>X, Y\<rbrace> H)
[PROOF STEP]
apply (erule analz.induct)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>x Xa. Xa \<in> insert X (insert Y H) \<Longrightarrow> Xa \<in> analz (insert \<lbrace>X, Y\<rbrace> H)
2. \<And>x Xa Ya. \<lbrakk>\<lbrace>Xa, Ya\<rbrace> \<in> analz (insert X (insert Y H)); \<lbrace>Xa, Ya\<rbrace> \<in> analz (insert \<lbrace>X, Y\<rbrace> H)\<rbrakk> \<Longrightarrow> Xa \<in> analz (insert \<lbrace>X, Y\<rbrace> H)
3. \<And>x Xa Ya. \<lbrakk>\<lbrace>Xa, Ya\<rbrace> \<in> analz (insert X (insert Y H)); \<lbrace>Xa, Ya\<rbrace> \<in> analz (insert \<lbrace>X, Y\<rbrace> H)\<rbrakk> \<Longrightarrow> Ya \<in> analz (insert \<lbrace>X, Y\<rbrace> H)
4. \<And>x K Xa. \<lbrakk>Crypt K Xa \<in> analz (insert X (insert Y H)); Crypt K Xa \<in> analz (insert \<lbrace>X, Y\<rbrace> H); Key (invKey K) \<in> analz (insert X (insert Y H)); Key (invKey K) \<in> analz (insert \<lbrace>X, Y\<rbrace> H)\<rbrakk> \<Longrightarrow> Xa \<in> analz (insert \<lbrace>X, Y\<rbrace> H)
[PROOF STEP]
apply (blast intro: analz.Fst analz.Snd)+
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 975, "file": null, "length": 6}
|
# -*- coding: utf-8 -*-
"""
============================================================================
Authors:
Edwin Alvarez-Mamani and Jose Luis Soncco-Alvarez*
*Department of Informatics
Universidad Nacional de San Antonio Abad del Cusco (UNSAAC) - Perú
============================================================================
"""
# Python: 3.8.x
"""
Script for evaluate best topology (static and dinamic) about convergente
"""
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter # useful for `logit` scale
import numpy as np
from utils import topology, dataset
print("******* START *******")
dataset_topology = [
[9, 5, 2, 3, 7, 8, 4, 1, 6], # d 20
[], # d 21
[5, 7, 2, 6, 8, 9, 3, 1, 4], # d 22
[9, 5, 1, 2, 8, 3, 4, 6, 7], # d 23
[8, 7, 1, 5, 2, 4, 3, 6, 9], # d 24
[7, 8, 1, 5, 6, 4, 2, 3, 9], # d 25
[9, 8, 4, 5, 1, 2, 6, 7, 3], # d 26
[7, 4, 1, 2, 8, 9, 3, 5, 6], # d 27
[8, 6, 3, 4, 5, 7, 1, 2, 9], # d 28
[] # d 29
]
data = {}
for index, index_topology in enumerate([0, 1, 2, 3, 4, 5, 6, 7, 8]): # change [0, 1, 2, 3, 4, 5, 6, 7, 8]
rankig = [] # rankig by metric
# load data for plot
for index_dataset in [20, 22, 23, 24, 25, 26, 27, 28]: # change [0, ..., 29]
rankig.append(dataset_topology[index_dataset - 20][index])
data[topology[index_topology]] = np.sum(rankig)
names = list(data.keys())
values = list(data.values())
fig, axs = plt.subplots(1, 1, figsize=(9, 3), sharey=True)
axs.bar(names, values)
fig.suptitle("Best Topology")
# naming the x axis
plt.xlabel("Topology")
# naming the y axis
plt.ylabel("Rankig metric")
# giving a title to my graph
# plt.title("Best Topology")
# add grid
plt.grid()
plt.show()
print("******* END *******")
# Run:
# python2 graphic_convergence_topology_v2.py
|
{"hexsha": "7ba507e2f7051b1a2e1f3de2b40561ee6c497dac", "size": 1797, "ext": "py", "lang": "Python", "max_stars_repo_path": "graphic_convergence_topology_v2.py", "max_stars_repo_name": "win7/parallel_social_spider_optimization", "max_stars_repo_head_hexsha": "9dbad144e4242fef2ff6aacc8e72376e14b03a61", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-02T15:49:18.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-02T15:49:18.000Z", "max_issues_repo_path": "graphic_convergence_topology_v2.py", "max_issues_repo_name": "win7/parallel_social_spider_optimization", "max_issues_repo_head_hexsha": "9dbad144e4242fef2ff6aacc8e72376e14b03a61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphic_convergence_topology_v2.py", "max_forks_repo_name": "win7/parallel_social_spider_optimization", "max_forks_repo_head_hexsha": "9dbad144e4242fef2ff6aacc8e72376e14b03a61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6461538462, "max_line_length": 107, "alphanum_fraction": 0.5459098497, "include": true, "reason": "import numpy", "num_tokens": 700}
|
The address(rifle range, 38.5361, 121.7508) behind King Hall hasnt been a rifle range for years. It currently houses some of the business office for Facilities Management. There is an almostcompletely faded RIFLE RANGE sign above the door (youll have to look very closely!)
At one point this was the ROTC rifle range and held their armory. It has been remodeled to the point where the main clue for its previous life, aside from its name, is its thick roof.
Where, exactly, was this? users/JoePomidor
20080626 21:21:41 nbsp I took Basic Riflery here in Winter Quarter 1996 as a PE class. It was pretty fun. We shot .22caliber rifles, IIRC, and I seem to remember there being six or eight lanes. Where does ROTC practice now? Users/MattJurach
|
{"hexsha": "d02c6cab441888b5d59a88672313e38a9be75de2", "size": 754, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Rifle_Range.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Rifle_Range.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Rifle_Range.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 62.8333333333, "max_line_length": 274, "alphanum_fraction": 0.7785145889, "num_tokens": 199}
|
+incdir+./
+incdir+../../
FPU_F32_ADD.sv
FPU_F32_DIV.sv
FPU_F32_MUL.sv
FPU_F32_to_INT.sv
FPU_INT_to_F32.sv
top.sv
|
{"hexsha": "9b080d07b2bc847003b4de20278cb2829101ce35", "size": 115, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "Meitner/System/HDL/DUTs/FPU/top.f", "max_stars_repo_name": "testdrive-profiling-master/profiles", "max_stars_repo_head_hexsha": "6e3854874366530f4e7ae130000000812eda5ff7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Meitner/System/HDL/DUTs/FPU/top.f", "max_issues_repo_name": "testdrive-profiling-master/profiles", "max_issues_repo_head_hexsha": "6e3854874366530f4e7ae130000000812eda5ff7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Meitner/System/HDL/DUTs/FPU/top.f", "max_forks_repo_name": "testdrive-profiling-master/profiles", "max_forks_repo_head_hexsha": "6e3854874366530f4e7ae130000000812eda5ff7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 11.5, "max_line_length": 17, "alphanum_fraction": 0.7652173913, "num_tokens": 58}
|
[STATEMENT]
lemma mapCollect_const[simp]:
"m \<noteq> Map.empty \<Longrightarrow> {e | k\<mapsto>v\<in>m} = {e}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. m \<noteq> Map.empty \<Longrightarrow> {e |k\<mapsto>v\<in>m} = {e}
[PROOF STEP]
unfolding mapCollect_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. m \<noteq> Map.empty \<Longrightarrow> {uu_. \<exists>k v. uu_ = e \<and> m k = Some v} = {e}
[PROOF STEP]
by auto
|
{"llama_tokens": 186, "file": "Launchbury_AList-Utils", "length": 2}
|
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
import numpy as np
import os
from os.path import join
import torch
import pandas as pd
import scipy.sparse as sp
from scipy.sparse import coo_matrix
from torch.utils.data import Dataset
import sklearn
import logging
from core.utils import data_utils
from core.utils import settings
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') # include timestamp
class PairedSubgraphDataset(Dataset):
def __init__(self, file_dir, seed, shuffle):
self.file_dir = file_dir
# load subgraphs
logger.info('loading adjs...')
self.graphs = np.load(join(file_dir, 'adjacency_matrix.npy'))
logger.info('adjs loaded')
# add self-loop
identity = np.identity(self.graphs.shape[1]).astype(np.bool_)
self.graphs += identity
self.graphs[self.graphs != 0] = 1.0
self.graphs = self.graphs.astype(np.dtype('B'))
logger.info('graph processed.')
self.ego_size = self.graphs.shape[1]
# load node features
node_to_vec = data_utils.load_vectors(file_dir, 'entity_node_emb.vec')
logger.info("input node features loaded!")
# load labels
self.labels = np.load(os.path.join(file_dir, "label.npy"))
self.labels = self.labels.astype(np.long)
logger.info("labels loaded!")
# load vertices
self.vertices = np.load(join(file_dir, 'vertex_id.npy'))
logger.info('vertices loaded')
# load vertex types
self.vertex_types = np.load(os.path.join(file_dir, 'vertex_types.npy'))
logger.info('vertex types loaded')
if shuffle:
self.graphs, self.labels, self.vertices, self.vertex_types = \
sklearn.utils.shuffle(
self.graphs, self.labels, self.vertices, self.vertex_types,
random_state=seed
)
logger.info('constructing node map...')
self.all_nodes = set(self.vertices.flatten())
self.all_nodes_list = list(self.all_nodes)
self.n_nodes = len(self.all_nodes)
logger.info('all node count %d', self.n_nodes)
self.id2idx = {item: i for i, item in enumerate(self.all_nodes_list)}
self.vertices = np.array(list(map(self.id2idx.get, self.vertices.flatten())),
dtype=np.long).reshape(self.vertices.shape) # convert to idx
# order node features
self.node_feature_dim = len(node_to_vec[list(node_to_vec.keys())[0]])
logger.info('input node features dim %d', self.node_feature_dim)
vertex_features = np.zeros((self.n_nodes, self.node_feature_dim))
n_hit_emb = 0
for i, eid in enumerate(self.all_nodes_list):
if i % 10000 == 0:
logger.info('construct node %d features, n_hit_emb %d', i, n_hit_emb)
if eid in node_to_vec:
vertex_features[i] = node_to_vec[eid]
n_hit_emb += 1
else:
vertex_features[i] = np.random.normal(size=(self.node_feature_dim, ))
self.node_features = torch.FloatTensor(vertex_features)
self.vertex_types = torch.FloatTensor(self.vertex_types)
self.N = len(self.graphs)
logger.info("%d pair ego networks loaded, each with size %d" % (self.N, self.graphs.shape[1]))
def get_embedding(self):
return self.node_features
def get_node_input_feature_dim(self):
return self.node_feature_dim
def __len__(self):
return self.N
def __getitem__(self, idx):
return self.graphs[idx], self.labels[idx], self.vertices[idx], self.vertex_types[idx]
if __name__ == '__main__':
dataset = PairedSubgraphDataset(file_dir=settings.AUTHOR_DATA_DIR, seed=42, shuffle=True)
|
{"hexsha": "c79941be7c198e2be99c933c77a5d0e59104cff6", "size": 3949, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/gat/data_loader.py", "max_stars_repo_name": "awesome-archive/OAG", "max_stars_repo_head_hexsha": "551a237e8aa1fd6642b6c89f0fdb545104c09712", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 50, "max_stars_repo_stars_event_min_datetime": "2019-08-02T05:46:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T02:01:52.000Z", "max_issues_repo_path": "core/gat/data_loader.py", "max_issues_repo_name": "awesome-archive/OAG", "max_issues_repo_head_hexsha": "551a237e8aa1fd6642b6c89f0fdb545104c09712", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-08-14T07:51:49.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-16T07:22:24.000Z", "max_forks_repo_path": "core/gat/data_loader.py", "max_forks_repo_name": "awesome-archive/OAG", "max_forks_repo_head_hexsha": "551a237e8aa1fd6642b6c89f0fdb545104c09712", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-07-30T07:32:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-09T13:28:29.000Z", "avg_line_length": 34.6403508772, "max_line_length": 102, "alphanum_fraction": 0.6520638136, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 873}
|
// file: val3_fstream_socket.cpp, style: indent -kr -ci2 -cli2 -i2 -l130 -nut <file>
//
// License http://opensource.org/licenses/BSD-3-Clause
// Copyright (c) 2016 14U2g4ocMy5aB2cY4cmCtbXD6qyNQzujuA (serves donations as well)
// All rights reserved.
//
// assembles string that flows over topic to robot control in VAL3-format;
//
// C++11: snprintf, return values are moved
//
// $ val3_fstream_socket::val3_fstream_socket(<1:sin_addr> <2:sin_port> <3:be_verbose:0/1(int)>)
// <1:groupIP> server-side socket to connect to.
// <2:socketPort> server-side port.
//
#include <arpa/inet.h>
#include <fcntl.h>
#include <netdb.h>
#include <signal.h>
#include <stdio.h>
#include <sys/socket.h>
#include <unistd.h>
#include <cerrno>
#include <cmath>
#include <iomanip>
#include <sstream>
#include <string>
#include <vector>
// boost
#include <boost/algorithm/string.hpp>
// ros
#include <ros/console.h>
#include <ros/ros.h>
#include <sensor_msgs/JointState.h>
#include <std_msgs/Float32MultiArray.h>
#include <std_msgs/String.h>
// local
#include "val3_fstream_socket.h"
#include "custom_defines.h"
// constructor
val3_fstream_socket::val3_fstream_socket()
{
}
// close socket upon destruction
val3_fstream_socket::~val3_fstream_socket()
{
manageSocket(0);
}
// pre-set socket configurations to setup_socket()
void val3_fstream_socket::manageSocket(int i)
{
// Roboter ip : 10.10.5.11
// Roboter ip : 131.188.112.73
// Roboter port 5653 (selm 10210)
// Roboter Emulator port 5660
// Pentagon: 131.188.112.158
// Blackjack: 131.188.112.46
// Lupus: 131.188.112.159
// Lux (ubu.): 131.188.112.146
if (i == (0 || 21 || 31))
{
// just close
close(fd_);
}
else if (i == 22)
{
// connect to robots CS8C
if (fcntl(fd_, F_GETfd_) != -1 || errno != EBADF)
{
close(fd_);
}
val3_fstream_socket::setup_socket("10.10.5.11", 5653);
}
else if (i == 32)
{
// connect to emulator
if (fcntl(fd_, F_GETfd_) != -1 || errno != EBADF)
{
close(fd_);
}
val3_fstream_socket::setup_socket("131.188.112.146", 5660);
}
}
// connect to partner socket
void val3_fstream_socket::setup_socket(const char *numDotIP, int port)
{
// stream socket: p2p, lossless, ordered
fd_ = socket(PF_INET, SOCK_STREAM, 0);
if (fd_ == -1)
{
printf(ACRED "val3_fstream_socket::setup_socket: %s" ACRESET "\n", strerror(errno));
return;
}
// specific interface [optional]
// char *devname = "enp9s0";
// if (setsockopt(fd_, SOL_SOCKET, SO_BINDTODEVICE, devname, strlen(devname)) == -1) {
// printf(ACRED "val3_fstream_socket::setup_socket::setsockopt: %f" ACRESET "\n", strerror(errno));
// return;
// }
// info struct on the server
struct hostent *host;
struct sockaddr_in addr;
host = gethostbyname(numDotIP);
if (host == 0)
{
printf(ACRED "val3_fstream_socket::setup_socket, gethostbyname: %s" ACRESET "\n", strerror(errno));
return;
}
// initialize struct members
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
addr.sin_addr = *(struct in_addr *)host->h_addr;
printf("val3_fstream_socket::setup_socket: connecting to %s:%d", inet_ntoa(addr.sin_addr), port);
if (connect(fd_, (struct sockaddr *)&addr, sizeof(addr)) == -1)
{
printf(ACRED "val3_fstream_socket::setup_socket, connect: %s" ACRESET "\n", strerror(errno));
return;
}
// output socket is connected
printf("val3_fstream_socket::setup_socket: connected to %s:%d", inet_ntoa(addr.sin_addr), port);
prepareCS8C();
}
// send keyword and motion descriptor to CS8C
void val3_fstream_socket::prepareCS8C()
{
char buffer[buffer_size_];
int n = snprintf(buffer, sizeof(buffer), "enable");
// truncation check
if (buffer_size_ <= n || n < 0)
{
printf(ACRED "val3_fstream_socket::setup_socket, snprintf: %s" ACRESET "\n", strerror(errno));
}
if (send(fd_, buffer, strlen(buffer), 0) == -1)
{
printf(ACRED "val3_fstream_socket::setup_socket, send: %s" ACRESET "\n", strerror(errno));
return;
}
printf("val3_fstream_socket::readFstreamBuffer:'%s'\n", readFstreamBuffer().c_str());
// send motion descriptor
int mDescType = 0;
int configType = 0;
genmDesc(mDesci, mDescType);
genConfig(configi, configType);
n = snprintf(buffer, sizeof(buffer), "mdesc(%i,%i,%i,%i,%i,%s,%i,%i)", mDesci.vel, mDesci.vel, mDesci.decel,
mDesci.tvel, mDesci.rvel, mDesci.blend.c_str(), mDesci.leave, mDesci.reach);
// truncation check
if (buffer_size_ <= n || n < 0)
{
printf(ACRED "val3_fstream_socket::setup_socket, snprintf: %s" ACRESET "\n", strerror(errno));
}
if (send(fd_, buffer, strlen(buffer), 0) == -1)
{
printf(ACRED "val3_fstream_socket::setup_socket, send: %s" ACRESET "\n", strerror(errno));
return;
}
printf("val3_fstream_socket::readFstreamBuffer:'%s'\n", readFstreamBuffer().c_str());
}
// initialize motion descriptor
void val3_fstream_socket::genmDesc(mDesc &m, int i)
{
switch (i)
{
default:
m.accel = 100;
m.vel = 100;
m.decel = 100;
m.tvel = 9999;
m.rvel = 9999;
m.blend = "joint";
m.leave = 50;
m.reach = 50;
ROS_WARN("val3_fstream_socket::genmDesc: defaults.");
}
}
// initialize configuration
void val3_fstream_socket::genConfig(config &c, int i)
{
switch (i)
{
default:
c.shoulder = 0;
c.elbow = 0;
c.wrist = 0;
ROS_WARN("val3_fstream_socket::genConfig: defaults.");
}
}
// convert quaternion to euler angles of xyz sequence
// see github.com/ItsmeJulian/quat2eul
void val3_fstream_socket::quat2eul(double &qx, double &qy, double &qz, double &qw)
{
double psi = atan2(2 * (qx * qw - qy * qz), (qw * qw - qx * qx - qy * qy + qz * qz));
double theta = asin(2 * (qx * qz + qy * qw));
double phi = atan2(2 * (qz * qw - qx * qy), (qw * qw + qx * qx - qy * qy - qz * qz));
qx = psi;
qy = theta;
qz = phi;
qw = 0.;
}
void val3_fstream_socket::sendMovej(double x, double y, double z, double qx, double qy, double qz, double qw)
{
quat2eul(qx, qy, qz, qw);
std::stringstream ss;
ss << "movej( " << x * 1e3 << ", " << y * 1e3 << ", " << z * 1e3 << ", " << qx << ", " << qy << ", " << qz << ", "
<< configi.shoulder << ", " << configi.elbow << ", " << configi.wrist << "," << mDesci.vel << ":" << mDesci.accel
<< ":" << mDesci.decel << ":" << mDesci.tvel << ":" << mDesci.rvel << ":" << mDesci.blend << ":" << mDesci.leave
<< ":" << mDesci.reach << " )";
std_msgs::String msg_string;
msg_string.data = ss.str();
pub.publish(msg_string);
}
// send joint angles to robot
void val3_fstream_socket::sendMJ(std::vector<double> j)
{
char buffer[buffer_size_];
int n = snprintf(buffer, sizeof(buffer), "mJ(%f,%f,%f,%f,%f,%f, %i,%i,%i,%i,%i,%s,%i,%i)", j[0], j[1], j[2], j[3],
j[4], j[5], mDesci.vel, mDesci.vel, mDesci.decel, mDesci.tvel, mDesci.rvel, mDesci.blend.c_str(),
mDesci.leave, mDesci.reach);
// truncation check
if (buffer_size_ <= n || n < 0)
{
printf(ACRED "val3_fstream_socket::setup_socket, snprintf: %s" ACRESET "\n", strerror(errno));
}
if (send(fd_, buffer, strlen(buffer), 0) == -1)
{
printf(ACRED "val3_fstream_socket::setup_socket, send: %s" ACRESET "\n", strerror(errno));
return;
}
printf("val3_fstream_socket::readFstreamBuffer:'%s'\n", readFstreamBuffer().c_str());
}
// send trivial string
void val3_fstream_socket::sendString(std::string str, bool readBuff)
{
char buffer[buffer_size_];
int n = snprintf(buffer, sizeof(buffer), "%s", str.c_str());
// truncation check
if (buffer_size_ <= n || n < 0)
{
printf(ACRED "val3_fstream_socket::setup_socket, snprintf: %s" ACRESET "\n", strerror(errno));
}
if (send(fd_, buffer, strlen(buffer), 0) == -1)
{
printf(ACRED "val3_fstream_socket::setup_socket, send: %s" ACRESET "\n", strerror(errno));
return;
}
if (readBuff == true)
{
printf("val3_fstream_socket::readFstreamBuffer:'%s'\n", readFstreamBuffer().c_str());
}
}
// get joint angles in rad from 6DOF-robot
std::vector<double> val3_fstream_socket::getJPos()
{
char buffer[buffer_size_];
sendString("jpos", false);
// process databuf
std::string tmpBuf = readFstreamBuffer();
std::vector<std::string> tmp;
std::vector<double> joints;
boost::split(tmp, tmpBuf, boost::is_any_of(","));
joints.resize(tmp.size());
int j = 0;
std::vector<std::string>::const_iterator itr;
for (itr = tmp.begin(); itr != tmp.end(); ++itr)
{
std::istringstream iss(*itr);
iss >> joints[j];
joints[j] *= (M_PI / 180);
j++;
}
return joints;
}
// read incoming
std::string val3_fstream_socket::readFstreamBuffer()
{
char response[buffer_size_];
int byteReceived = recv(fd_, response, sizeof(response), 0);
if (byteReceived == -1)
{
printf(ACRED "val3_fstream_socket::readFstreamBuffer, recv: %s" ACRESET "\n", strerror(errno));
}
return response;
}
// EOF
|
{"hexsha": "22bfc95bb32aa243e7015d01401e992a0ba00cd1", "size": 8957, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "dotnect_platform/src/gen_val3_class.cpp", "max_stars_repo_name": "vwas2/Dotnet_stack", "max_stars_repo_head_hexsha": "77edf5eb3dbea98c1a7c43868b435d862e8058d9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dotnect_platform/src/gen_val3_class.cpp", "max_issues_repo_name": "vwas2/Dotnet_stack", "max_issues_repo_head_hexsha": "77edf5eb3dbea98c1a7c43868b435d862e8058d9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dotnect_platform/src/gen_val3_class.cpp", "max_forks_repo_name": "vwas2/Dotnet_stack", "max_forks_repo_head_hexsha": "77edf5eb3dbea98c1a7c43868b435d862e8058d9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0570469799, "max_line_length": 118, "alphanum_fraction": 0.6410628559, "num_tokens": 2883}
|
'''
Created on 12 Aug 2020
@author: Tobias Pielok
'''
import numpy as np
from sklearn.decomposition import TruncatedSVD
from scipy.linalg import expm
from scipy.linalg import logm
from typing import List, Tuple
def svd_dmd(ts: np.array, r: int) -> Tuple[np.array, np.array]:
'''
Returns the SVD-DMD of ts.
Implementation of algorithm 1 of Lu, H. and Tartakovsky, D. M. Predictive Accuracy of Dynamic Mode Decomposition. 2019. eprint: arXiv:1905.01587.
:param ts: PxN time series matrix of P timesteps consisting of N-1 features
:param r: dimension of the low-rank space
'''
u, s, vh = np.linalg.svd(ts[:-1, :-1].transpose(), full_matrices=False)
d = min(r, ts.shape[1])
u = u[:, :d]
vh = vh[:d, :]
s = s[:d]
S = u.transpose() @ ts[1:, :-1].transpose() @ \
vh.transpose() @ np.diag(s ** -1)[:d, :d]
eigval, eigvec = np.linalg.eig(S)
Phi = ts[1:,:-1].transpose() @ vh.transpose() @ np.diag(s ** -1) @ eigvec
return Phi, np.diag(eigval)
def dmd_predict(x0: np.array, T: np.array, A:np.array, num_pred:int, timescale:float = 1) -> np.array:
'''
Predict uniformely-spaced in time via
x_i = T * A^i * T^-1 * x0
and returns prediction (num_pred)x(N+1) time series (with added time dimension)
:param x0: Nx1 init value of the prediction
:param T: Nxr projection matrix
:param A: rxr low-rank prediction matrix
:param num_pred: number of predictions to be made
:param timescale: timestep size of the predictions
'''
b0 = np.linalg.pinv(T) @ x0
pred_ts = np.zeros((num_pred, len(x0) + 1))
pred_ts[:, -1] = np.array(range(num_pred)) * timescale
for i in range(num_pred):
pred_ts[i, :-1] = np.real(T @ (np.linalg.matrix_power(A, i) @ b0))
return pred_ts
def grid_search_svd_dmd(ts_centered: np.array, im_dims: List[int], steps: List[int]) -> Tuple[np.array, np.array, int, int, float]:
'''
Returns the projection and prediction matrices of the best SVD-DMD found via grid search on the centered training data.
Also returns the hyperparameters of the best SVD-DMD and its evaluation value.
:param ts_centered: PxN time series matrix of P timesteps consisting of N-1 features
:param im_dims: search space of low-rank dimensions
:param steps: serach space of number of steps to be included in estimation step
'''
x0 = ts_centered[0, :-1]
min_value = float('inf')
min_Phi = None
min_i = None
min_eigval = None
min_t = None
for i in im_dims:
for t in steps:
ts = ts_centered[:t, :]
num_preds = ts_centered.shape[0]
Phi, eigval = svd_dmd(ts, i)
new_value = np.mean((ts_centered[:,:-1] - dmd_predict(x0, Phi, eigval, num_preds, 1)[:,:-1])**2)
if (min_value > new_value and (np.real(np.log(np.diag(eigval))) < 0).all()):
min_value = new_value
min_i = i
min_t = t
min_Phi = Phi
min_eigval = eigval
print(np.round(min_value,3), min_i, min_t)
return min_Phi, min_eigval, min_i, min_t, min_value
def random_search_svd_dmd(ts_centered: np.array, im_dims: List[int], steps: List[int], num_tries: int) -> Tuple[np.array, np.array, int, int, float]:
'''
Returns the projection and prediction matrices of the best SVD-DMD found via random search on the centered training data.
Also returns the hyperparameters of the best SVD-DMD and its evaluation value.
:param ts_centered: PxN time series matrix of P timesteps consisting of N-1 features
:param im_dims: search space of low-rank dimensions
:param steps: search space of number of steps to be included in estimation step
:param num_tries: number of evaluations done by random search
'''
x0 = ts_centered[0, :-1]
min_value = float('inf')
min_Phi = None
min_i = None
min_eigval = None
min_t = None
for k in range(num_tries):
i = np.random.choice(im_dims)
t = np.random.choice(steps)
ts = ts_centered[:t, :]
num_preds = ts_centered.shape[0]
Phi, eigval = svd_dmd(ts, i)
new_value = np.mean((ts_centered[:,:-1] - dmd_predict(x0, Phi, eigval, num_preds, 1)[:,:-1])**2)
if (min_value > new_value and (np.real(np.log(np.diag(eigval))) < 0).all()):
min_value = new_value
min_i = i
min_t = t
min_Phi = Phi
min_eigval = eigval
print(np.round(min_value,3), min_i, min_t)
return min_Phi, min_eigval, min_i, min_t, min_value
def dmd_real(Phi: np.array, eigval: np.array) -> Tuple[np.array, np.array]:
'''
Returns the real SVD-DMD representation as described in Tobias Pielok, Residual Enhanced Probabilistic Koopman-based Representation Learning, Master's thesis
:param Phi: Nxr complex projection matrix
:param eigval: rxr complex low-rank prediction matrix
'''
D = eigval.shape[0]
T_star = np.diag(np.repeat(1/np.sqrt(2) + 0j,D))
is_cmpl = is_complex(np.diag(eigval))
T_22_star = 1/np.sqrt(2) * np.array([[1, 1],[1j, -1j]])
for i in range(D-1):
if(is_cmpl[i]):
is_cmpl[i+1] = False # already processed here
T_star[i:(i+2), i:(i+2)] = T_22_star
T_star = 1/np.sqrt(2) * T_star
T = np.linalg.pinv(T_star)
eigval_r = np.real(T_star @ eigval @ T)
Phi_r = np.real(Phi @ T)
if (np.max(np.imag(Phi @ T)) > 10 ** -9):
print("Err:", np.max(np.imag(Phi @ T)))
return Phi_r, eigval_r
def softplus(x):
return np.log(np.exp(x)+1)
def softplus_inverse(x):
return np.log(np.exp(x)-0.9999999)
def vec_to_K(params: np.array, use_softplus:bool=False) -> np.array:
'''
Returns the prediction matrix of the associated parameter vector using the parametrization of
Pan, S. and Duraisamy, K. Physics-Informed Probabilistic Learning of Linear Embeddings of Non-linear Dynamics With Guaranteed Stability. 2019. eprint: arXiv:1906.03663.
:param params: Mx1 parameter vector
:param use_softplus: flag whether the sigma parameters must be softplus-transformed
'''
r = int((len(params)+1)/2)
if use_softplus:
sigma = -(softplus(params[0:r])**2)
else:
sigma = -(params[0:r]**2)
ceta = params[r:2*r-1]
K = np.diag(sigma)
for i in range(0,r-1):
K[i, i + 1] = ceta[i]
K[i + 1, i] = -ceta[i]
return K
def K_to_vec(K: np.array, timescale:float =1.0, use_softplus_inv:bool = False):
'''
Returns the parameter vector of the associated prediction matrix using the parametrization of
Pan, S. and Duraisamy, K. Physics-Informed Probabilistic Learning of Linear Embeddings of Non-linear Dynamics With Guaranteed Stability. 2019. eprint: arXiv:1906.03663.
:param params: Mx1 parameter vector
:param timescale: scale time 'dimension' by this value
:param use_softplus_inv: flag whether the sigma parameters are softplus-transformed
'''
if use_softplus_inv:
return np.hstack([softplus_inverse(np.sqrt(-np.diag(K)/timescale)), np.diag(K, 1)/timescale])
else:
return np.hstack([np.sqrt(-np.diag(K)/timescale), np.diag(K, 1)/timescale])
def is_complex(x):
return np.abs(np.imag(x)) > 10 ** -9
def getK(A: np.array) -> Tuple[np.array, np.array]:
'''
Returns the transformation and prediction matrices [K, T] of a (complex) low-rank prediction matrix using the parametrization of
Pan, S. and Duraisamy, K. Physics-Informed Probabilistic Learning of Linear Embeddings of Non-linear Dynamics With Guaranteed Stability. 2019. eprint: arXiv:1906.03663, s.t.
T * exp(K) * T^-1 = A.
:param A: rxr (complex) low-rank prediction matrix
'''
A_eval, A_evec = np.linalg.eig(A)
logA_eval = np.log(A_eval)
sig = [np.sqrt(-np.real(logA_eval [i])) for i in range(len(logA_eval ))]
cet = [np.abs(np.imag(logA_eval [i])) for i in range(len(logA_eval ))]
set_zero = False
for i in range(len(cet)):
if (is_complex(logA_eval[i])):
if (not set_zero):
set_zero = True
else:
cet[i] = 0
set_zero = False
else:
cet[i] = 0
cet = cet[:-1]
K = vec_to_K(np.hstack([sig, cet]))
expK_eval, expK_evec = np.linalg.eig(expm(K))
return K, np.real(A_evec @ np.linalg.inv(expK_evec))
def extend_mats(T: np.array, K: np.array, diff: int) -> Tuple[np.array, np.array]:
'''
Returns the SVD-DMD matrices extended by 'diff' zero columns and rows [T_e, K_e], s.t.
T_e in Nx(r + diff), K_e in (r + diff)x(r + diff).
:param T: Nxr projection matrix
:param K: rxr low-rank prediction matrix
:param diff: number of zero columns and rows to be added
'''
T_e = np.hstack([T, np.zeros((T.shape[0], diff))])
K_e = np.pad(K, ((0, diff), (0, diff)), 'constant', constant_values=(0, 0))
return T_e, K_e
|
{"hexsha": "ca1c26917c6f99c0beb01addb70f57a24e66c439", "size": 9278, "ext": "py", "lang": "Python", "max_stars_repo_path": "PKRL/svd_dmd.py", "max_stars_repo_name": "pkmtum/Probabilistic_Koopman_Learning", "max_stars_repo_head_hexsha": "e91730533e53f897221c3b7a62116a20f45eb59b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-14T10:50:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-02T21:06:53.000Z", "max_issues_repo_path": "PKRL/svd_dmd.py", "max_issues_repo_name": "pkmtum/Probabilistic_Koopman_Learning", "max_issues_repo_head_hexsha": "e91730533e53f897221c3b7a62116a20f45eb59b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PKRL/svd_dmd.py", "max_forks_repo_name": "pkmtum/Probabilistic_Koopman_Learning", "max_forks_repo_head_hexsha": "e91730533e53f897221c3b7a62116a20f45eb59b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-14T13:38:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-14T13:38:42.000Z", "avg_line_length": 36.3843137255, "max_line_length": 177, "alphanum_fraction": 0.6138176331, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2658}
|
*DECK DGMRES
SUBROUTINE DGMRES(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSOLVE,
$ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, SB, SX,
$ RGWK, LRGW, IGWK, LIGW, RWORK, IWORK )
C***BEGIN PROLOGUE DGMRES
C***DATE WRITTEN 890404 (YYMMDD)
C***REVISION DATE 890404 (YYMMDD)
C***CATEGORY NO. D2A4, D2B4
C***KEYWORDS LIBRARY=SLATEC(SLAP),
C TYPE=DOUBLE PRECISION(DGMRES-D),
C Non-Symmetric Linear system, Sparse,
C Iterative Precondition, Generalized Minimum Residual
C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
C Lawrence Livermore National Laboratory
C PO BOX 808, L-300
C Livermore, CA 94550 (415) 423-3141
C***PURPOSE Preconditioned GMRES iterative sparse Ax=b solver.
C This routine uses the generalized minimum residual
C (GMRES) method with preconditioning to solve
C non-symmetric linear systems of the form: A*x = b.
C***DESCRIPTION
C *Usage:
C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX
C INTEGER IERR, IUNIT, LRGW, LIGW, IGWK(LIGW)
C INTEGER IWORK(USER DEFINED)
C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, SB(N), SX(N)
C DOUBLE PRECISION RGWK(LRGW), RWORK(USER DEFINED)
C EXTERNAL MATVEC, MSOLVE
C
C CALL DGMRES(N, B, X, NELT, IA, JA, A, ISYM, MATVEC, MSOLVE,
C $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, SB, SX,
C $ RGWK, LRGW, IGWK, LIGW, RWORK, IWORK)
C
C *Arguments:
C N :IN Integer.
C Order of the Matrix.
C B :IN Double Precision B(N).
C Right-hand side vector.
C X :INOUT Double Precision X(N).
C On input X is your initial guess for the solution vector.
C On output X is the final approximate solution.
C NELT :IN Integer.
C Number of Non-Zeros stored in A.
C IA :IN Integer IA(NELT).
C JA :IN Integer JA(NELT).
C A :IN Double Precision A(NELT).
C These arrays contain the matrix data structure for A.
C It could take any form. See "Description", below
C for more late breaking details...
C ISYM :IN Integer.
C Flag to indicate symmetric storage format.
C If ISYM=0, all nonzero entries of the matrix are stored.
C If ISYM=1, the matrix is symmetric, and only the upper
C or lower triangle of the matrix is stored.
C MATVEC :EXT External.
C Name of a routine which performs the matrix vector multiply
C Y = A*X given A and X. The name of the MATVEC routine must
C be declared external in the calling program. The calling
C sequence to MATVEC is:
C CALL MATVEC( N, X, Y, NELT, IA, JA, A, ISYM )
C where N is the number of unknowns, Y is the product A*X
C upon return, X is an input vector, and NELT is the number of
C non-zeros in the SLAP IA, JA, A storage for the matrix A.
C ISYM is a flag which, if non-zero, denotes that A is
C symmetric and only the lower or upper triangle is stored.
C MSOLVE :EXT External.
C Name of the routine which solves a linear system Mz = r for
C z given r with the preconditioning matrix M (M is supplied via
C RWORK and IWORK arrays. The name of the MSOLVE routine must
C be declared external in the calling program. The calling
C sequence to MSLOVE is:
C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
C Where N is the number of unknowns, R is the right-hand side
C vector, and z is the solution upon return. RWORK is a
C double precision
C array that can be used to pass necessary preconditioning
C information and/or workspace to MSOLVE. IWORK is an integer
C work array for the same purpose as RWORK.
C ITOL :IN Integer.
C Flag to indicate the type of convergence criterion used.
C ITOL=0 Means the iteration stops when the test described
C below on the residual RL is satisfied. This is
C the "Natural Stopping Criteria" for this routine.
C Other values of ITOL cause extra, otherwise
C unnecessary, computation per iteration and are
C therefore much less efficient. See ISDGMR (the
C stop test routine) for more information.
C ITOL=1 Means the iteration stops when the first test
C described below on the residual RL is satisfied,
C and there is either right or no preconditioning
C being used.
C ITOL=2 Implies that the user is using left
C preconditioning, and the second stopping criterion
C below is used.
C ITOL=3 Means the iteration stops when the third test
C described below on Minv*Residual is satisfied, and
C there is either left or no preconditioning begin
C used.
C ITOL=11 is often useful for checking and comparing
C different routines. For this case, the user must
C supply the "exact" solution or a very accurate
C approximation (one with an error much less than
C TOL) through a common block,
C COMMON /SOLBLK/ SOLN(1)
C if ITOL=11, iteration stops when the 2-norm of the
C difference between the iterative approximation and
C the user-supplied solution divided by the 2-norm
C of the user-supplied solution is less than TOL.
C Note that this requires the user to set up the
C "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling
C routine. The routine with this declaration should
C be loaded before the stop test so that the correct
C length is used by the loader. This procedure is
C not standard Fortran and may not work correctly on
C your system (although it has worked on every
C system the authors have tried). If ITOL is not 11
C then this common block is indeed standard Fortran.
C TOL :INOUT Double Precision.
C Convergence criterion, as described below. If TOL is set
C to zero on input, then a default value of 500*(the smallest
C positive magnitude, machine epsilon) is used.
C ITMAX :DUMMY Integer.
C Maximum number of iterations in most SLAP routines. In
C this routine this does not make sense. The maximum number
C of iterations here is given by ITMAX = MAXL*(NRMAX+1).
C See IGWK for definitions of MAXL and NRMAX.
C ITER :OUT Integer.
C Number of iterations required to reach convergence, or
C ITMAX if convergence criterion could not be achieved in
C ITMAX iterations.
C ERR :OUT Double Precision.
C Error estimate of error in final approximate solution, as
C defined by ITOL. Letting norm() denote the Euclidean
C norm, ERR is defined as follows..
C
C If ITOL=0, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
C for right or no preconditioning, and
C ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
C norm(SB*(M-inverse)*B),
C for left preconditioning.
C If ITOL=1, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
C since right or no preconditioning
C being used.
C If ITOL=2, then ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
C norm(SB*(M-inverse)*B),
C since left preconditioning is being
C used.
C If ITOL=3, then ERR = Max |(Minv*(B-A*X(L)))(i)/x(i)|
C i=1,n
C If ITOL=11, then ERR = norm(SB*(X(L)-SOLN))/norm(SB*SOLN).
C IERR :OUT Integer.
C Return error flag.
C IERR = 0 => All went well.
C IERR = 1 => Insufficient storage allocated for
C RGWK or IGWK.
C IERR = 2 => Routine Dgmres failed to reduce the norm
C of the current residual on its last call,
C and so the iteration has stalled. In
C this case, X equals the last computed
C approximation. The user must either
C increase MAXL, or choose a different
C initial guess.
C IERR =-1 => Insufficient length for RGWK array.
C IGWK(6) contains the required minimum
C length of the RGWK array.
C IERR =-2 => Inconsistent ITOL and JPRE values.
C For IERR <= 2, RGWK(1) = RHOL, which is the norm on the
C left-hand-side of the relevant stopping test defined
C below associated with the residual for the current
C approximation X(L).
C IUNIT :IN Integer.
C Unit number on which to write the error at each iteration,
C if this is desired for monitoring convergence. If unit
C number is 0, no writing will occur.
C SB :IN Double Precision SB(N).
C Array of length N containing scale factors for the right
C hand side vector B. If JSCAL.eq.0 (see below), SB need
C not be supplied.
C SX :IN Double Precision SX(N).
C Array of length N containing scale factors for the solution
C vector X. If JSCAL.eq.0 (see below), SX need not be
C supplied. SB and SX can be the same array in the calling
C program if desired.
C RGWK :INOUT Double Precision RGWK(LRGW).
C Double Precision array of size at least
C 1 + N*(MAXL+6) + MAXL*(MAXL+3)
C used for work space by DGMRES. See below for definition of
C MAXL.
C On return, RGWK(1) = RHOL. See IERR for definition of RHOL.
C LRGW :IN Integer.
C Length of the double precision workspace, RGWK.
C LRGW > 1 + N*(MAXL+6) + MAXL*(MAXL+3).
C For the default values, RGWK has size at least 131 + 16*N.
C IGWK :INOUT Integer IGWK(LIGW).
C The following IGWK parameters should be set by the user
C before calling this routine.
C IGWK(1) = MAXL. Maximum dimension of Krylov subspace in
C which X - X0 is to be found (where, X0 is the initial
C guess). The default value of MAXL is 10.
C IGWK(2) = KMP. Maximum number of previous Krylov basis
C vectors to which each new basis vector is made orthogonal.
C The default value of KMP is MAXL.
C IGWK(3) = JSCAL. Flag indicating whether the scaling
C arrays SB and SX are to be used.
C JSCAL = 0 => SB and SX are not used and the algorithm
C will perform as if all SB(I) = 1 and SX(I) = 1.
C JSCAL = 1 => Only SX is used, and the algorithm
C performs as if all SB(I) = 1.
C JSCAL = 2 => Only SB is used, and the algorithm
C performs as if all SX(I) = 1.
C JSCAL = 3 => Both SB and SX are used.
C IGWK(4) = JPRE. Flag indicating whether preconditioning
C is being used.
C JPRE = 0 => There is no preconditioning.
C JPRE > 0 => There is preconditioning on the right
C only, and the solver will call routine MSOLVE.
C JPRE < 0 => There is preconditioning on the left
C only, and the solver will call routine MSOLVE.
C IGWK(5) = NRMAX. Maximum number of restarts of the
C Krylov iteration. The default value of NRMAX = 10.
C if IWORK(5) = -1, then no restarts are performed (in
C this case, NRMAX is set to zero internally).
C The following IWORK parameters are diagnostic information
C made available to the user after this routine completes.
C IGWK(6) = MLWK. Required minimum length of RGWK array.
C IGWK(7) = NMS. The total number of calls to MSOLVE.
C LIGW :IN Integer.
C Length of the integer workspace, IGWK. LIGW >= 20.
C
C *Description:
C DGMRES solves a linear system A*X = B rewritten in the form:
C
C (SB*A*(M-inverse)*(SX-inverse))*(SX*M*X) = SB*B,
C
C with right preconditioning, or
C
C (SB*(M-inverse)*A*(SX-inverse))*(SX*X) = SB*(M-inverse)*B,
C
C with left preconditioning, where A is an N-by-N double
C precision matrix,
C X and B are N-vectors, SB and SX are diagonal scaling
C matrices, and M is a preconditioning matrix. It uses
C preconditioned Krylov subpace methods based on the
C generalized minimum residual method (GMRES). This routine
C optionally performs either the full orthogonalization
C version of the GMRES algorithm or an incomplete variant of
C it. Both versions use restarting of the linear iteration by
C default, although the user can disable this feature.
C
C The GMRES algorithm generates a sequence of approximations
C X(L) to the true solution of the above linear system. The
C convergence criteria for stopping the iteration is based on
C the size of the scaled norm of the residual R(L) = B -
C A*X(L). The actual stopping test is either:
C
C norm(SB*(B-A*X(L))) .le. TOL*norm(SB*B),
C
C for right preconditioning, or
C
C norm(SB*(M-inverse)*(B-A*X(L))) .le.
C TOL*norm(SB*(M-inverse)*B),
C
C for left preconditioning, where norm() denotes the euclidean
C norm, and TOL is a positive scalar less than one input by
C the user. If TOL equals zero when DGMRES is called, then a
C default value of 500*(the smallest positive magnitude,
C machine epsilon) is used. If the scaling arrays SB and SX
C are used, then ideally they should be chosen so that the
C vectors SX*X(or SX*M*X) and SB*B have all their components
C approximately equal to one in magnitude. If one wants to
C use the same scaling in X and B, then SB and SX can be the
C same array in the calling program.
C
C The following is a list of the other routines and their
C functions used by DGMRES:
C DPIGMR Contains the main iteration loop for GMRES.
C DORTH Orthogonalizes a new vector against older basis vects.
C DHEQR Computes a QR decomposition of a Hessenberg matrix.
C DHELS Solves a Hessenberg least-squares system, using QR
C factors.
C DRLCAL Computes the scaled residual RL.
C DXLCAL Computes the solution XL.
C ISDGMR User-replaceable stopping routine.
C
C This routine does not care what matrix data structure is
C used for A and M. It simply calls the MATVEC and MSOLVE
C routines, with the arguments as described above. The user
C could write any type of structure and the appropriate MATVEC
C and MSOLVE routines. It is assumed that A is stored in the
C IA, JA, A arrays in some fashion and that M (or INV(M)) is
C stored in IWORK and RWORK in some fashion. The SLAP
C routines DSDCG and DSICCG are examples of this procedure.
C
C Two examples of matrix data structures are the: 1) SLAP
C Triad format and 2) SLAP Column format.
C
C =================== S L A P Triad format ===================
C This routine requires that the matrix A be stored in the
C SLAP Triad format. In this format only the non-zeros are
C stored. They may appear in *ANY* order. The user supplies
C three arrays of length NELT, where NELT is the number of
C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
C each non-zero the user puts the row and column index of that
C matrix element in the IA and JA arrays. The value of the
C non-zero matrix element is placed in the corresponding
C location of the A array. This is an extremely easy data
C structure to generate. On the other hand it is not too
C efficient on vector computers for the iterative solution of
C linear systems. Hence, SLAP changes this input data
C structure to the SLAP Column format for the iteration (but
C does not change it back).
C
C Here is an example of the SLAP Triad storage format for a
C 5x5 Matrix. Recall that the entries may appear in any order.
C
C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
C 1 2 3 4 5 6 7 8 9 10 11
C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
C | 0 0 0 44 0|
C |51 0 53 0 55|
C
C =================== S L A P Column format ==================
C This routine requires that the matrix A be stored in the
C SLAP Column format. In this format the non-zeros are stored
C counting down columns (except for the diagonal entry, which
C must appear first in each "column") and are stored in the
C double precision array A. In other words, for each column
C in the matrix put the diagonal entry in A. Then put in the
C other non-zero elements going down the column (except the
C diagonal) in order. The IA array holds the row index for
C each non-zero. The JA array holds the offsets into the IA,
C A arrays for the beginning of each column. That is,
C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
C Note that we always have JA(N+1) = NELT+1, where N is the
C number of columns in the matrix and NELT is the number of
C non-zeros in the matrix.
C
C Here is an example of the SLAP Column storage format for a
C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
C column):
C
C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
C 1 2 3 4 5 6 7 8 9 10 11
C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
C | 0 0 33 0 35| JA: 1 4 6 8 9 12
C | 0 0 0 44 0|
C |51 0 53 0 55|
C
C *Precision: Double Precision
C***REFERENCES 1. Peter N. Brown and A. C. Hindmarsh,
C "Reduced Storage Matrix Methods In Stiff ODE
C Systems," LLNL report UCRL-95088, Rev. 1,
C June 1987.
C***ROUTINES CALLED DPIGMR, DORTH, DHEQR, DHELS, DRCAL, DXLCAL,
C ISDGMR, DNRM2, DDOT, DAXPY, DSCAL, IDAMAX, D1MACH.
C***END PROLOGUE DGMRES
C The following is for optimized compilation on LLNL/LTSS Crays.
CLLL. OPTIMIZE
IMPLICIT DOUBLE PRECISION(A-H,O-Z)
INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, ITOL, ITMAX, ITER
INTEGER IERR, IUNIT, LRGW, LIGW, IGWK(LIGW)
INTEGER IWORK(*)
DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, SB(N), SX(N)
DOUBLE PRECISION RGWK(LRGW), RWORK(*)
EXTERNAL MATVEC, MSOLVE, D1MACH
INTEGER JPRE, KMP, MAXL, NMS, MAXLP1, NMSL, NRSTS, NRMAX
INTEGER I, IFLAG, LR, LDL, LHES, LGMR, LQ, LV, LW
DOUBLE PRECISION BNRM, RHOL, SUM
C
C***FIRST EXECUTABLE STATEMENT DGMRES
IERR = 0
C ------------------------------------------------------------------
C Load method parameters with user values or defaults.
C ------------------------------------------------------------------
MAXL = IGWK(1)
IF (MAXL .EQ. 0) MAXL = 10
IF (MAXL .GT. N) MAXL = N
KMP = IGWK(2)
IF (KMP .EQ. 0) KMP = MAXL
IF (KMP .GT. MAXL) KMP = MAXL
JSCAL = IGWK(3)
JPRE = IGWK(4)
C Check for consistent values of ITOL and JPRE.
IF( ITOL.EQ.1 .AND. JPRE.LT.0 ) GOTO 650
IF( ITOL.EQ.2 .AND. JPRE.GE.0 ) GOTO 650
NRMAX = IGWK(5)
IF( NRMAX.EQ.0 ) NRMAX = 10
C If NRMAX .eq. -1, then set NRMAX = 0 to turn off restarting.
IF( NRMAX.EQ.-1 ) NRMAX = 0
C If input value of TOL is zero, set it to its default value.
IF( TOL.EQ.0.0D0 ) TOL = 500.0*D1MACH(3)
C
C Initialize counters.
ITER = 0
NMS = 0
NRSTS = 0
C ------------------------------------------------------------------
C Form work array segment pointers.
C ------------------------------------------------------------------
MAXLP1 = MAXL + 1
LV = 1
LR = LV + N*MAXLP1
LHES = LR + N + 1
LQ = LHES + MAXL*MAXLP1
LDL = LQ + 2*MAXL
LW = LDL + N
LXL = LW + N
LZ = LXL + N
C
C Load igwk(6) with required minimum length of the rgwk array.
IGWK(6) = LZ + N - 1
IF( LZ+N-1.GT.LRGW ) GOTO 640
C ------------------------------------------------------------------
C Calculate scaled-preconditioned norm of RHS vector b.
C ------------------------------------------------------------------
IF (JPRE .LT. 0) THEN
CALL MSOLVE(N, B, RGWK(LR), NELT, IA, JA, A, ISYM,
$ RWORK, IWORK)
NMS = NMS + 1
ELSE
CALL DCOPY(N, B, 1, RGWK(LR), 1)
ENDIF
IF( JSCAL.EQ.2 .OR. JSCAL.EQ.3 ) THEN
SUM = 0.D0
DO 10 I = 1,N
SUM = SUM + (RGWK(LR-1+I)*SB(I))**2
10 CONTINUE
BNRM = DSQRT(SUM)
ELSE
BNRM = DNRM2(N,RGWK(LR),1)
ENDIF
C ------------------------------------------------------------------
C Calculate initial residual.
C ------------------------------------------------------------------
CALL MATVEC(N, X, RGWK(LR), NELT, IA, JA, A, ISYM)
DO 50 I = 1,N
RGWK(LR-1+I) = B(I) - RGWK(LR-1+I)
50 CONTINUE
C ------------------------------------------------------------------
C If performing restarting, then load the residual into the
C correct location in the Rgwk array.
C ------------------------------------------------------------------
100 CONTINUE
IF( NRSTS.GT.NRMAX ) GOTO 610
IF( NRSTS.GT.0 ) THEN
C Copy the curr residual to different loc in the Rgwk array.
CALL DCOPY(N, RGWK(LDL), 1, RGWK(LR), 1)
ENDIF
C ------------------------------------------------------------------
C Use the DPIGMR algorithm to solve the linear system A*Z = R.
C ------------------------------------------------------------------
CALL DPIGMR(N, RGWK(LR), SB, SX, JSCAL, MAXL, MAXLP1, KMP,
$ NRSTS, JPRE, MATVEC, MSOLVE, NMSL, RGWK(LZ), RGWK(LV),
$ RGWK(LHES), RGWK(LQ), LGMR, RWORK, IWORK, RGWK(LW),
$ RGWK(LDL), RHOL, NRMAX, B, BNRM, X, RGWK(LXL), ITOL,
$ TOL, NELT, IA, JA, A, ISYM, IUNIT, IFLAG, ERR)
ITER = ITER + LGMR
NMS = NMS + NMSL
C
C Increment X by the current approximate solution Z of A*Z = R.
C
LZM1 = LZ - 1
DO 110 I = 1,N
X(I) = X(I) + RGWK(LZM1+I)
110 CONTINUE
IF( IFLAG.EQ.0 ) GOTO 600
IF( IFLAG.EQ.1 ) THEN
NRSTS = NRSTS + 1
GOTO 100
ENDIF
IF( IFLAG.EQ.2 ) GOTO 620
C ------------------------------------------------------------------
C All returns are made through this section.
C ------------------------------------------------------------------
C The iteration has converged.
C
600 CONTINUE
IGWK(7) = NMS
RGWK(1) = RHOL
IERR = 0
RETURN
C
C Max number((NRMAX+1)*MAXL) of linear iterations performed.
610 CONTINUE
IGWK(7) = NMS
RGWK(1) = RHOL
IERR = 1
RETURN
C
C GMRES failed to reduce last residual in MAXL iterations.
C The iteration has stalled.
620 CONTINUE
IGWK(7) = NMS
RGWK(1) = RHOL
IERR = 2
RETURN
C Error return. Insufficient length for Rgwk array.
640 CONTINUE
ERR = TOL
IERR = -1
RETURN
C Error return. Inconsistent ITOL and JPRE values.
650 CONTINUE
ERR = TOL
IERR = -2
RETURN
C------------- LAST LINE OF DGMRES FOLLOWS ----------------------------
END
*DECK DSDGMR
SUBROUTINE DSDGMR(N, B, X, NELT, IA, JA, A, ISYM, NSAVE,
$ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW,
$ IWORK, LENIW )
C***BEGIN PROLOGUE DSDGMR
C***DATE WRITTEN 890404 (YYMMDD)
C***REVISION DATE 890404 (YYMMDD)
C***CATEGORY NO. D2A4, D2B4
C***KEYWORDS LIBRARY=SLATEC(SLAP),
C TYPE=DOUBLE PRECISION(DSDGMR-D),
C Non-Symmetric Linear system, Sparse,
C Iterative Precondition, Generalized Minimum Residual
C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
C Lawrence Livermore National Laboratory
C PO BOX 808, L-300
C Livermore, CA 94550 (415) 423-3141
C***PURPOSE Diagonally scaled GMRES iterative sparse Ax=b solver.
C This routine uses the generalized minimum residual
C (GMRES) method with diagonal scaling to solve possibly
C non-symmetric linear systems of the form: A*x = b.
C***DESCRIPTION
C *Usage:
C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE
C INTEGER ITOL, ITMAX, IERR, IUNIT, LENW, IWORK(LENIW), LENIW
C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR
C DOUBLE PRECISION RWORK(LENW)
C EXTERNAL MATVEC, MSOLVE
C
C CALL DSDGMR(N, B, X, NELT, IA, JA, A, ISYM, NSAVE,
C $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
C $ RWORK, LENW, IWORK, LENIW)
C
C *Arguments:
C N :IN Integer.
C Order of the Matrix.
C B :IN Double Precision B(N).
C Right-hand side vector.
C X :INOUT Double Precision X(N).
C On input X is your initial guess for solution vector.
C On output X is the final approximate solution.
C NELT :IN Integer.
C Number of Non-Zeros stored in A.
C IA :IN Integer IA(NELT).
C JA :IN Integer JA(NELT).
C A :IN Double Precision A(NELT).
C These arrays should hold the matrix A in either the SLAP
C Triad format or the SLAP Column format. See "Description",
C below. If the SLAP Triad format is chosen it is changed
C internally to the SLAP Column format.
C ISYM :IN Integer.
C Flag to indicate symmetric storage format.
C If ISYM=0, all nonzero entries of the matrix are stored.
C If ISYM=1, the matrix is symmetric, and only the upper
C or lower triangle of the matrix is stored.
C NSAVE :IN Integer.
C Number of direction vectors to save and orthogonalize against.
C Must be greater than 1.
C ITOL :IN Integer.
C Flag to indicate the type of convergence criterion used.
C ITOL=0 Means the iteration stops when the test described
C below on the residual RL is satisfied. This is
C the "Natural Stopping Criteria" for this routine.
C Other values of ITOL cause extra, otherwise
C unnecessary, computation per iteration and are
C therefore much less efficient. See ISDGMR (the
C stop test routine) for more information.
C ITOL=1 Means the iteration stops when the first test
C described below on the residual RL is satisfied,
C and there is either right or no preconditioning
C being used.
C ITOL=2 Implies that the user is using left
C preconditioning, and the second stopping criterion
C below is used.
C ITOL=3 Means the iteration stops when the third test
C described below on Minv*Residual is satisfied, and
C there is either left or no preconditioning begin
C used.
C ITOL=11 is often useful for checking and comparing
C different routines. For this case, the user must
C supply the "exact" solution or a very accurate
C approximation (one with an error much less than
C TOL) through a common block,
C COMMON /SOLBLK/ SOLN(1)
C if ITOL=11, iteration stops when the 2-norm of the
C difference between the iterative approximation and
C the user-supplied solution divided by the 2-norm
C of the user-supplied solution is less than TOL.
C Note that this requires the user to set up the
C "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling
C routine. The routine with this declaration should
C be loaded before the stop test so that the correct
C length is used by the loader. This procedure is
C not standard Fortran and may not work correctly on
C your system (although it has worked on every
C system the authors have tried). If ITOL is not 11
C then this common block is indeed standard Fortran.
C TOL :INOUT Double Precision.
C Convergence criterion, as described below. If TOL is set
C to zero on input, then a default value of 500*(the smallest
C positive magnitude, machine epsilon) is used.
C ITMAX :IN Integer.
C Maximum number of iterations. This routine uses the default
C of NRMAX = ITMAX/NSAVE to determine the when each restart
C oshould ccur. See the description of NRMAX and MAXL in
C DGMRES for a full and frightfully interesting discussion of
C this topic.
C ITER :OUT Integer.
C Number of iterations required to reach convergence, or
C ITMAX+1 if convergence criterion could not be achieved in
C ITMAX iterations.
C ERR :OUT Double Precision.
C Error estimate of error in final approximate solution, as
C defined by ITOL. Letting norm() denote the Euclidean
C norm, ERR is defined as follows...
C If ITOL=0, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
C for right or no preconditioning, and
C ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
C norm(SB*(M-inverse)*B),
C for left preconditioning.
C If ITOL=1, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
C since right or no preconditioning
C being used.
C If ITOL=2, then ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
C norm(SB*(M-inverse)*B),
C since left preconditioning is being
C used.
C If ITOL=3, then ERR = Max |(Minv*(B-A*X(L)))(i)/x(i)|
C i=1,n
C If ITOL=11, then ERR = norm(SB*(X(L)-SOLN))/norm(SB*SOLN).
C IERR :OUT Integer.
C Return error flag.
C IERR = 0 => All went well.
C IERR = 1 => Insufficient storage allocated for
C RGWK or IGWK.
C IERR = 2 => Routine DPIGMR failed to reduce the norm
C of the current residual on its last call,
C and so the iteration has stalled. In
C this case, X equals the last computed
C approximation. The user must either
C increase MAXL, or choose a different
C initial guess.
C IERR =-1 => Insufficient length for RGWK array.
C IGWK(6) contains the required minimum
C length of the RGWK array.
C IERR =-2 => Inconsistent ITOL and JPRE values.
C For IERR <= 2, RGWK(1) = RHOL, which is the norm on the
C left-hand-side of the relevant stopping test defined
C below associated with the residual for the current
C approximation X(L).
C IUNIT :IN Integer.
C Unit number on which to write the error at each iteration,
C if this is desired for monitoring convergence. If unit
C number is 0, no writing will occur.
C RWORK :WORK Double Precision RWORK(LENW).
C Double Precision array of size LENW.
C LENW :IN Integer.
C Length of the double precision workspace, RWORK.
C LENW >= 1 + N*(NSAVE+7) + NSAVE*(NSAVE+3).
C For the recommended values of NSAVE (10), RWORK has size at
C least 131 + 17*N.
C IWORK :INOUT Integer IWORK(USER DEFINED >= 30).
C Used to hold pointers into the RWORK array.
C Upon return the following locations of IWORK hold information
C which may be of use to the user:
C IWORK(9) Amount of Integer workspace actually used.
C IWORK(10) Amount of Double Precision workspace actually used.
C LENIW :IN Integer.
C Length of the integer workspace IWORK. LENIW >= 30.
C
C *Description:
C DSDGMR solves a linear system A*X = B rewritten in the form:
C
C (SB*A*(M-inverse)*(SX-inverse))*(SX*M*X) = SB*B,
C
C with right preconditioning, or
C
C (SB*(M-inverse)*A*(SX-inverse))*(SX*X) = SB*(M-inverse)*B,
C
C with left preconditioning, where a is an n-by-n double
C precision matrix,
C X and B are N-vectors, SB and SX are diagonal scaling
C matrices, and M is the diagonal of A. It uses
C preconditioned Krylov subpace methods based on the
C generalized minimum residual method (GMRES). This routine
C is a driver routine which assumes a SLAP matrix data
C structure and sets up the necessary information to do
C diagonal preconditioning and calls the main GMRES routine
C DGMRES for the solution of the linear system. DGMRES
C optionally performs either the full orthogonalization
C version of the GMRES algorithm or an incomplete variant of
C it. Both versions use restarting of the linear iteration by
C default, although the user can disable this feature.
C
C The GMRES algorithm generates a sequence of approximations
C X(L) to the true solution of the above linear system. The
C convergence criteria for stopping the iteration is based on
C the size of the scaled norm of the residual R(L) = B -
C A*X(L). The actual stopping test is either:
C
C norm(SB*(B-A*X(L))) .le. TOL*norm(SB*B),
C
C for right preconditioning, or
C
C norm(SB*(M-inverse)*(B-A*X(L))) .le.
C TOL*norm(SB*(M-inverse)*B),
C
C for left preconditioning, where norm() denotes the euclidean
C norm, and TOL is a positive scalar less than one input by
C the user. If TOL equals zero when DSDGMR is called, then a
C default value of 500*(the smallest positive magnitude,
C machine epsilon) is used. If the scaling arrays SB and SX
C are used, then ideally they should be chosen so that the
C vectors SX*X(or SX*M*X) and SB*B have all their components
C approximately equal to one in magnitude. If one wants to
C use the same scaling in X and B, then SB and SX can be the
C same array in the calling program.
C
C The following is a list of the other routines and their
C functions used by GMRES:
C DGMRES Contains the matrix structure independent driver
C routine for GMRES.
C DPIGMR Contains the main iteration loop for GMRES.
C DORTH Orthogonalizes a new vector against older basis vects.
C DHEQR Computes a QR decomposition of a Hessenberg matrix.
C DHELS Solves a Hessenberg least-squares system, using QR
C factors.
C RLCALC Computes the scaled residual RL.
C XLCALC Computes the solution XL.
C ISDGMR User-replaceable stopping routine.
C
C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
C data structures: 1) the SLAP Triad format or 2) the SLAP
C Column format. The user can hand this routine either of the
C of these data structures and SLAP will figure out which on
C is being used and act accordingly.
C
C =================== S L A P Triad format ===================
C This routine requires that the matrix A be stored in the
C SLAP Triad format. In this format only the non-zeros are
C stored. They may appear in *ANY* order. The user supplies
C three arrays of length NELT, where NELT is the number of
C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
C each non-zero the user puts the row and column index of that
C matrix element in the IA and JA arrays. The value of the
C non-zero matrix element is placed in the corresponding
C location of the A array. This is an extremely easy data
C structure to generate. On the other hand it is not too
C efficient on vector computers for the iterative solution of
C linear systems. Hence, SLAP changes this input data
C structure to the SLAP Column format for the iteration (but
C does not change it back).
C
C Here is an example of the SLAP Triad storage format for a
C 5x5 Matrix. Recall that the entries may appear in any order.
C
C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
C 1 2 3 4 5 6 7 8 9 10 11
C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
C | 0 0 0 44 0|
C |51 0 53 0 55|
C
C =================== S L A P Column format ==================
C This routine requires that the matrix A be stored in the
C SLAP Column format. In this format the non-zeros are stored
C counting down columns (except for the diagonal entry, which
C must appear first in each "column") and are stored in the
C double precision array A. In other words, for each column
C in the matrix put the diagonal entry in A. Then put in the
C other non-zero elements going down the column (except the
C diagonal) in order. The IA array holds the row index for
C each non-zero. The JA array holds the offsets into the IA,
C A arrays for the beginning of each column. That is,
C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
C Note that we always have JA(N+1) = NELT+1, where N is the
C number of columns in the matrix and NELT is the number of
C non-zeros in the matrix.
C
C Here is an example of the SLAP Column storage format for a
C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
C column):
C
C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
C 1 2 3 4 5 6 7 8 9 10 11
C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
C | 0 0 33 0 35| JA: 1 4 6 8 9 12
C | 0 0 0 44 0|
C |51 0 53 0 55|
C
C *Precision: Double Precision
C *Side Effects:
C The SLAP Triad format (IA, JA, A) is modified internally to be
C the SLAP Column format. See above.
C***REFERENCES 1. Peter N. Brown and A. C. Hindmarsh,
C "Reduced Storage Matrix Methods In Stiff ODE
C Systems," LLNL report UCRL-95088, Rev. 1,
C June 1987.
C***ROUTINES CALLED DS2Y, DCHKW, DSDS, DGMRES
C***END PROLOGUE DSDGMR
C The following is for optimized compilation on LLNL/LTSS Crays.
CLLL. OPTIMIZE
IMPLICIT DOUBLE PRECISION(A-H,O-Z)
INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE, ITOL
INTEGER ITMAX, ITER, IERR, IUNIT, LENW, LENIW, IWORK(LENIW)
DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(LENW)
EXTERNAL DSMV, DSDI
PARAMETER (LOCRB=1, LOCIB=11)
C
C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
C***FIRST EXECUTABLE STATEMENT DSDGMR
IERR = 0
ERR = 0.0
IF( NSAVE.LE.1 ) THEN
IERR = 3
RETURN
ENDIF
CALL DS2Y( N, NELT, IA, JA, A, ISYM )
C
C Set up the workspace. We assume MAXL=KMP=NSAVE.
C Compute the inverse of the diagonal of the matrix.
LOCIGW = LOCIB
LOCIW = LOCIGW + 20
C
LOCDIN = LOCRB
LOCRGW = LOCDIN + N
LOCW = LOCRGW + 1+N*(NSAVE+6)+NSAVE*(NSAVE+3)
C
IWORK(4) = LOCDIN
IWORK(9) = LOCIW
IWORK(10) = LOCW
C
C Check the workspace allocations.
CALL DCHKW( 'DSDGMR', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
IF( IERR.NE.0 ) RETURN
C
CALL DSDS(N, NELT, IA, JA, A, ISYM, RWORK(LOCDIN))
C
C Perform the Diagonaly Scaled Generalized Minimum
C Residual iteration algorithm. The following DGMRES
C defaults are used MAXL = KMP = NSAVE, JSCAL = 0,
C JPRE = -1, NRMAX = ITMAX/NSAVE
IWORK(LOCIGW ) = NSAVE
IWORK(LOCIGW+1) = NSAVE
IWORK(LOCIGW+2) = 0
IWORK(LOCIGW+3) = -1
IWORK(LOCIGW+4) = ITMAX/NSAVE
MYITOL = 0
C
CALL DGMRES( N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSDI,
$ MYITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK, RWORK,
$ RWORK(LOCRGW), LENW-LOCRGW, IWORK(LOCIGW), 20,
$ RWORK, IWORK )
C
IF( ITER.GT.ITMAX ) IERR = 2
RETURN
C------------- LAST LINE OF DSDGMR FOLLOWS ----------------------------
END
*DECK DSLUGM
SUBROUTINE DSLUGM(N, B, X, NELT, IA, JA, A, ISYM, NSAVE,
$ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK, LENW,
$ IWORK, LENIW )
C***BEGIN PROLOGUE DSLUGM
C***DATE WRITTEN 890404 (YYMMDD)
C***REVISION DATE 890404 (YYMMDD)
C***CATEGORY NO. D2A4, D2B4
C***KEYWORDS LIBRARY=SLATEC(SLAP),
C TYPE=DOUBLE PRECISION(DSLUGM-D),
C Non-Symmetric Linear system, Sparse,
C Iterative Precondition, Generalized Minimum Residual
C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
C Lawrence Livermore National Laboratory
C PO BOX 808, L-300
C Livermore, CA 94550 (415) 423-3141
C***PURPOSE Incomplete LU GMRES iterative sparse Ax=b solver.
C This routine uses the generalized minimum residual
C (GMRES) method with incomplete LU factorization for
C preconditioning to solve possibly non-symmetric linear
C systems of the form: Ax = b.
C***DESCRIPTION
C *Usage:
C INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE
C INTEGER ITOL, ITMAX, IERR, IUNIT, LENW, IWORK(LENIW), LENIW
C DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, SB(N), SX(N)
C DOUBLE PRECISION RWORK(LENW)
C EXTERNAL MATVEC, MSOLVE
C
C CALL DSLUGM(N, B, X, NELT, IA, JA, A, ISYM, NSAVE,
C $ ITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT,
C $ RWORK, LENW, IWORK, LENIW)
C
C *Arguments:
C N :IN Integer.
C Order of the Matrix.
C B :IN Double Precision B(N).
C Right-hand side vector.
C X :INOUT Double Precision X(N).
C On input X is your initial guess for solution vector.
C On output X is the final approximate solution.
C NELT :IN Integer.
C Number of Non-Zeros stored in A.
C IA :IN Integer IA(NELT).
C JA :IN Integer JA(NELT).
C A :IN Double Precision A(NELT).
C These arrays should hold the matrix A in either the SLAP
C Triad format or the SLAP Column format. See "Description",
C below. If the SLAP Triad format is chosen it is changed
C internally to the SLAP Column format.
C ISYM :IN Integer.
C Flag to indicate symmetric storage format.
C If ISYM=0, all nonzero entries of the matrix are stored.
C If ISYM=1, the matrix is symmetric, and only the upper
C or lower triangle of the matrix is stored.
C NSAVE :IN Integer.
C Number of direction vectors to save and orthogonalize against.
C Must be greater than 1.
C ITOL :IN Integer.
C Flag to indicate the type of convergence criterion used.
C ITOL=0 Means the iteration stops when the test described
C below on the residual RL is satisfied. This is
C the "Natural Stopping Criteria" for this routine.
C Other values of ITOL cause extra, otherwise
C unnecessary, computation per iteration and are
C therefore much less efficient. See ISDGMR (the
C stop test routine) for more information.
C ITOL=1 Means the iteration stops when the first test
C described below on the residual RL is satisfied,
C and there is either right or no preconditioning
C being used.
C ITOL=2 Implies that the user is using left
C preconditioning, and the second stopping criterion
C below is used.
C ITOL=3 Means the iteration stops when the third test
C described below on Minv*Residual is satisfied, and
C there is either left or no preconditioning begin
C used.
C ITOL=11 is often useful for checking and comparing
C different routines. For this case, the user must
C supply the "exact" solution or a very accurate
C approximation (one with an error much less than
C TOL) through a common block,
C COMMON /SOLBLK/ SOLN(1)
C if ITOL=11, iteration stops when the 2-norm of the
C difference between the iterative approximation and
C the user-supplied solution divided by the 2-norm
C of the user-supplied solution is less than TOL.
C Note that this requires the user to set up the
C "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling
C routine. The routine with this declaration should
C be loaded before the stop test so that the correct
C length is used by the loader. This procedure is
C not standard Fortran and may not work correctly on
C your system (although it has worked on every
C system the authors have tried). If ITOL is not 11
C then this common block is indeed standard Fortran.
C TOL :INOUT Double Precision.
C Convergence criterion, as described below. If TOL is set
C to zero on input, then a default value of 500*(the smallest
C positive magnitude, machine epsilon) is used.
C ITMAX :IN Integer.
C Maximum number of iterations. This routine uses the default
C of NRMAX = ITMAX/NSAVE to determine the when each restart
C should occur. See the description of NRMAX and MAXL in
C DGMRES for a full and frightfully interesting discussion of
C this topic.
C ITER :OUT Integer.
C Number of iterations required to reach convergence, or
C ITMAX+1 if convergence criterion could not be achieved in
C ITMAX iterations.
C ERR :OUT Double Precision.
C Error estimate of error in final approximate solution, as
C defined by ITOL. Letting norm() denote the Euclidean
C norm, ERR is defined as follows...
C If ITOL=0, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
C for right or no preconditioning, and
C ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
C norm(SB*(M-inverse)*B),
C for left preconditioning.
C If ITOL=1, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
C since right or no preconditioning
C being used.
C If ITOL=2, then ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
C norm(SB*(M-inverse)*B),
C since left preconditioning is being
C used.
C If ITOL=3, then ERR = Max |(Minv*(B-A*X(L)))(i)/x(i)|
C i=1,n
C If ITOL=11, then ERR = norm(SB*(X(L)-SOLN))/norm(SB*SOLN).
C IERR :OUT Integer.
C Return error flag.
C IERR = 0 => All went well.
C IERR = 1 => Insufficient storage allocated for
C RGWK or IGWK.
C IERR = 2 => Routine DPIGMR failed to reduce the norm
C of the current residual on its last call,
C and so the iteration has stalled. In
C this case, X equals the last computed
C approximation. The user must either
C increase MAXL, or choose a different
C initial guess.
C IERR =-1 => Insufficient length for RGWK array.
C IGWK(6) contains the required minimum
C length of the RGWK array.
C IERR =-2 => Inconsistent ITOL and JPRE values.
C For IERR <= 2, RGWK(1) = RHOL, which is the norm on the
C left-hand-side of the relevant stopping test defined
C below associated with the residual for the current
C approximation X(L).
C IUNIT :IN Integer.
C Unit number on which to write the error at each iteration,
C if this is desired for monitoring convergence. If unit
C number is 0, no writing will occur.
C RWORK :WORK Double Precision RWORK(LENW).
C Double Precision array of size LENW.
C LENW :IN Integer.
C Length of the double precision workspace, RWORK.
C LENW >= 1 + N*(NSAVE+7) + NSAVE*(NSAVE+3)+NEL+NU.
C For the recommended values, RWORK
C has size at least 131 + 17*N + NEL + NU. Where NEL is the
C number of non- zeros in the lower triangle of the matrix
C (including the diagonal). NU is the number of nonzeros in
C the upper triangle of the matrix (including the diagonal).
C IWORK :INOUT Integer IWORK(LENIW).
C Used to hold pointers into the RWORK array.
C Upon return the following locations of IWORK hold information
C which may be of use to the user:
C IWORK(9) Amount of Integer workspace actually used.
C IWORK(10) Amount of Double Precision workspace actually used.
C LENIW :IN Integer.
C Length of the integer workspace, IWORK.
C LENIW >= NEL+NU+4*N+32.
C
C *Description:
C DSLUGM solves a linear system A*X = B rewritten in the form:
C
C (SB*A*(M-inverse)*(SX-inverse))*(SX*M*X) = SB*B,
C
C with right preconditioning, or
C
C (SB*(M-inverse)*A*(SX-inverse))*(SX*X) = SB*(M-inverse)*B,
C
C with left preconditioning, where a is an n-by-n double
C precision matrix,
C X and B are N-vectors, SB and SX are diagonal scaling
C matrices, and M is the Incomplete LU factorization of A. It
C uses preconditioned Krylov subpace methods based on the
C generalized minimum residual method (GMRES). This routine
C is a driver routine which assumes a SLAP matrix data
C structure and sets up the necessary information to do
C diagonal preconditioning and calls the main GMRES routine
C DGMRES for the solution of the linear system. DGMRES
C optionally performs either the full orthogonalization
C version of the GMRES algorithm or an incomplete variant of
C it. Both versions use restarting of the linear iteration by
C default, although the user can disable this feature.
C
C The GMRES algorithm generates a sequence of approximations
C X(L) to the true solution of the above linear system. The
C convergence criteria for stopping the iteration is based on
C the size of the scaled norm of the residual R(L) = B -
C A*X(L). The actual stopping test is either:
C
C norm(SB*(B-A*X(L))) .le. TOL*norm(SB*B),
C
C for right preconditioning, or
C
C norm(SB*(M-inverse)*(B-A*X(L))) .le.
C TOL*norm(SB*(M-inverse)*B),
C
C for left preconditioning, where norm() denotes the euclidean
C norm, and TOL is a positive scalar less than one input by
C the user. If TOL equals zero when DSLUGM is called, then a
C default value of 500*(the smallest positive magnitude,
C machine epsilon) is used. If the scaling arrays SB and SX
C are used, then ideally they should be chosen so that the
C vectors SX*X(or SX*M*X) and SB*B have all their components
C approximately equal to one in magnitude. If one wants to
C use the same scaling in X and B, then SB and SX can be the
C same array in the calling program.
C
C The following is a list of the other routines and their
C functions used by GMRES:
C DGMRES Contains the matrix structure independent driver
C routine for GMRES.
C DPIGMR Contains the main iteration loop for GMRES.
C DORTH Orthogonalizes a new vector against older basis vects.
C DHEQR Computes a QR decomposition of a Hessenberg matrix.
C DHELS Solves a Hessenberg least-squares system, using QR
C factors.
C RLCALC Computes the scaled residual RL.
C XLCALC Computes the solution XL.
C ISDGMR User-replaceable stopping routine.
C
C The Sparse Linear Algebra Package (SLAP) utilizes two matrix
C data structures: 1) the SLAP Triad format or 2) the SLAP
C Column format. The user can hand this routine either of the
C of these data structures and SLAP will figure out which on
C is being used and act accordingly.
C
C =================== S L A P Triad format ===================
C This routine requires that the matrix A be stored in the
C SLAP Triad format. In this format only the non-zeros are
C stored. They may appear in *ANY* order. The user supplies
C three arrays of length NELT, where NELT is the number of
C non-zeros in the matrix: (IA(NELT), JA(NELT), A(NELT)). For
C each non-zero the user puts the row and column index of that
C matrix element in the IA and JA arrays. The value of the
C non-zero matrix element is placed in the corresponding
C location of the A array. This is an extremely easy data
C structure to generate. On the other hand it is not too
C efficient on vector computers for the iterative solution of
C linear systems. Hence, SLAP changes this input data
C structure to the SLAP Column format for the iteration (but
C does not change it back).
C
C Here is an example of the SLAP Triad storage format for a
C 5x5 Matrix. Recall that the entries may appear in any order.
C
C 5x5 Matrix SLAP Triad format for 5x5 matrix on left.
C 1 2 3 4 5 6 7 8 9 10 11
C |11 12 0 0 15| A: 51 12 11 33 15 53 55 22 35 44 21
C |21 22 0 0 0| IA: 5 1 1 3 1 5 5 2 3 4 2
C | 0 0 33 0 35| JA: 1 2 1 3 5 3 5 2 5 4 1
C | 0 0 0 44 0|
C |51 0 53 0 55|
C
C =================== S L A P Column format ==================
C This routine requires that the matrix A be stored in the
C SLAP Column format. In this format the non-zeros are stored
C counting down columns (except for the diagonal entry, which
C must appear first in each "column") and are stored in the
C double precision array A. In other words, for each column
C in the matrix put the diagonal entry in A. Then put in the
C other non-zero elements going down the column (except the
C diagonal) in order. The IA array holds the row index for
C each non-zero. The JA array holds the offsets into the IA,
C A arrays for the beginning of each column. That is,
C IA(JA(ICOL)), A(JA(ICOL)) points to the beginning of the
C ICOL-th column in IA and A. IA(JA(ICOL+1)-1),
C A(JA(ICOL+1)-1) points to the end of the ICOL-th column.
C Note that we always have JA(N+1) = NELT+1, where N is the
C number of columns in the matrix and NELT is the number of
C non-zeros in the matrix.
C
C Here is an example of the SLAP Column storage format for a
C 5x5 Matrix (in the A and IA arrays '|' denotes the end of a
C column):
C
C 5x5 Matrix SLAP Column format for 5x5 matrix on left.
C 1 2 3 4 5 6 7 8 9 10 11
C |11 12 0 0 15| A: 11 21 51 | 22 12 | 33 53 | 44 | 55 15 35
C |21 22 0 0 0| IA: 1 2 5 | 2 1 | 3 5 | 4 | 5 1 3
C | 0 0 33 0 35| JA: 1 4 6 8 9 12
C | 0 0 0 44 0|
C |51 0 53 0 55|
C
C *Precision: Double Precision
C *Side Effects:
C The SLAP Triad format (IA, JA, A) is modified internally to be
C the SLAP Column format. See above.
C***REFERENCES 1. Peter N. Brown and A. C. Hindmarsh,
C "Reduced Storage Matrix Methods In Stiff ODE
C Systems," LLNL report UCRL-95088, Rev. 1,
C June 1987.
C***ROUTINES CALLED DS2Y, DCHKW, DSILUS, DGMRES, DSMV, DSLUI
C***END PROLOGUE DSLUGM
C The following is for optimized compilation on LLNL/LTSS Crays.
CLLL. OPTIMIZE
IMPLICIT DOUBLE PRECISION(A-H,O-Z)
INTEGER N, NELT, IA(NELT), JA(NELT), ISYM, NSAVE, ITOL
INTEGER ITMAX, ITER, IERR, IUNIT, LENW, LENIW, IWORK(LENIW)
DOUBLE PRECISION B(N), X(N), A(NELT), TOL, ERR, RWORK(LENW)
EXTERNAL DSMV, DSLUI
PARAMETER (LOCRB=1, LOCIB=11)
C
C Change the SLAP input matrix IA, JA, A to SLAP-Column format.
C***FIRST EXECUTABLE STATEMENT DSLUGM
IERR = 0
ERR = 0.0
IF( NSAVE.LE.1 ) THEN
IERR = 3
RETURN
ENDIF
CALL DS2Y( N, NELT, IA, JA, A, ISYM )
C
C Count number of Non-Zero elements preconditioner ILU matrix.
C Then set up the work arrays. We assume MAXL=KMP=NSAVE.
NL = 0
NU = 0
DO 20 ICOL = 1, N
C Don't count diagonal.
JBGN = JA(ICOL)+1
JEND = JA(ICOL+1)-1
IF( JBGN.LE.JEND ) THEN
CVD$ NOVECTOR
DO 10 J = JBGN, JEND
IF( IA(J).GT.ICOL ) THEN
NL = NL + 1
IF( ISYM.NE.0 ) NU = NU + 1
ELSE
NU = NU + 1
ENDIF
10 CONTINUE
ENDIF
20 CONTINUE
C
LOCIGW = LOCIB
LOCIL = LOCIGW + 20
LOCJL = LOCIL + N+1
LOCIU = LOCJL + NL
LOCJU = LOCIU + NU
LOCNR = LOCJU + N+1
LOCNC = LOCNR + N
LOCIW = LOCNC + N
C
LOCL = LOCRB
LOCDIN = LOCL + NL
LOCU = LOCDIN + N
LOCRGW = LOCU + NU
LOCW = LOCRGW + 1+N*(NSAVE+6)+NSAVE*(NSAVE+3)
C
C Check the workspace allocations.
CALL DCHKW( 'DSLUGM', LOCIW, LENIW, LOCW, LENW, IERR, ITER, ERR )
IF( IERR.NE.0 ) RETURN
C
IWORK(1) = LOCIL
IWORK(2) = LOCJL
IWORK(3) = LOCIU
IWORK(4) = LOCJU
IWORK(5) = LOCL
IWORK(6) = LOCDIN
IWORK(7) = LOCU
IWORK(9) = LOCIW
IWORK(10) = LOCW
C
C Compute the Incomplete LU decomposition.
CALL DSILUS( N, NELT, IA, JA, A, ISYM, NL, IWORK(LOCIL),
$ IWORK(LOCJL), RWORK(LOCL), RWORK(LOCDIN), NU, IWORK(LOCIU),
$ IWORK(LOCJU), RWORK(LOCU), IWORK(LOCNR), IWORK(LOCNC) )
C
C Perform the Incomplet LU Preconditioned Generalized Minimum
C Residual iteration algorithm. The following DGMRES
C defaults are used MAXL = KMP = NSAVE, JSCAL = 0,
C JPRE = -1, NRMAX = ITMAX/NSAVE
IWORK(LOCIGW ) = NSAVE
IWORK(LOCIGW+1) = NSAVE
IWORK(LOCIGW+2) = 0
IWORK(LOCIGW+3) = -1
IWORK(LOCIGW+4) = ITMAX/NSAVE
MYITOL = 0
C
CALL DGMRES( N, B, X, NELT, IA, JA, A, ISYM, DSMV, DSLUI,
$ MYITOL, TOL, ITMAX, ITER, ERR, IERR, IUNIT, RWORK, RWORK,
$ RWORK(LOCRGW), LENW-LOCRGW, IWORK(LOCIGW), 20,
$ RWORK, IWORK )
C
IF( ITER.GT.ITMAX ) IERR = 2
RETURN
C------------- LAST LINE OF DSLUGM FOLLOWS ----------------------------
END
*DECK DHELS
SUBROUTINE DHELS(A, LDA, N, Q, B)
C***BEGIN PROLOGUE DHEQR
C***DATE WRITTEN 890404 (YYMMDD)
C***REVISION DATE 890404 (YYMMDD)
C***CATEGORY NO. D2A4, D2B4
C***KEYWORDS LIBRARY=SLATEC(SLAP),
C TYPE=DOUBLE PRECISION(DHEQR-D),
C Non-Symmetric Linear system, Sparse,
C Iterative Precondition, Generalized Minimum Residual
C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
C Lawrence Livermore National Laboratory
C PO BOX 808, L-300
C Livermore, CA 94550 (415) 423-3141
C***PURPOSE Internal routine for DGMRES.
C***DESCRIPTION
C This routine is extraced from the LINPACK routine SGESL with
C changes due to the fact that A is an upper Hessenberg
C matrix.
C
C DHELS solves the least squares problem:
C
C MIN(B-A*X,B-A*X)
C
C using the factors computed by DHEQR.
C
C *Usage:
C INTEGER LDA, N
C DOUBLE PRECISION A(LDA,1), B(1), Q(1)
C
C CALL DHELS(A, LDA, N, Q, B)
C
C *Arguments:
C A :IN Double Precision A(LDA,N)
C The output from DHEQR which contains the upper
C triangular factor R in the QR decomposition of A.
C LDA :IN Integer
C The leading dimension of the array A.
C N :IN Integer
C A is originally an (N+1) by N matrix.
C Q :IN Double Precision Q(2*N)
C The coefficients of the N givens rotations
C used in the QR factorization of A.
C B :INOUT Double Precision B(N+1)
C On input, B is the right hand side vector.
C On output, B is the solution vector X.
C *See Also:
C DGMRES
C
C***ROUTINES CALLED DAXPY
C***END PROLOGUE DHEQR
C The following is for optimized compilation on LLNL/LTSS Crays.
CLLL. OPTIMIZE
IMPLICIT DOUBLE PRECISION(A-H,O-Z)
INTEGER LDA, N
DOUBLE PRECISION A(LDA,1), B(1), Q(1)
C
C Local Variables.
C
INTEGER IQ, K, KB, KP1
DOUBLE PRECISION C, S, T, T1, T2
C
C minimize(B-A*X,B-A*X). First form Q*B.
C
DO 20 K = 1, N
KP1 = K + 1
IQ = 2*(K-1) + 1
C = Q(IQ)
S = Q(IQ+1)
T1 = B(K)
T2 = B(KP1)
B(K) = C*T1 - S*T2
B(KP1) = S*T1 + C*T2
20 CONTINUE
C
C Now solve R*X = Q*B.
C
DO 40 KB = 1, N
K = N + 1 - KB
B(K) = B(K)/A(K,K)
T = -B(K)
CALL DAXPY(K-1, T, A(1,K), 1, B(1), 1)
40 CONTINUE
RETURN
C------------- LAST LINE OF DHELS FOLLOWS ----------------------------
END
*DECK DHEQR
SUBROUTINE DHEQR(A, LDA, N, Q, INFO, IJOB)
C***BEGIN PROLOGUE DHEQR
C***DATE WRITTEN 890404 (YYMMDD)
C***REVISION DATE 890404 (YYMMDD)
C***CATEGORY NO. D2A4, D2B4
C***KEYWORDS LIBRARY=SLATEC(SLAP),
C TYPE=DOUBLE PRECISION(DHEQR-D),
C Non-Symmetric Linear system, Sparse,
C Iterative Precondition, Generalized Minimum Residual
C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
C Lawrence Livermore National Laboratory
C PO BOX 808, L-300
C Livermore, CA 94550 (415) 423-3141
C***PURPOSE Internal routine for DGMRES.
C***DESCRIPTION
C This routine performs a QR decomposition of an upper
C Hessenberg matrix A using Givens rotations. There are two
C options available: 1) Performing a fresh decomposition 2)
C updating the QR factors by adding a row and a column to the
C matrix A.
C
C *Usage:
C INTEGER LDA, N, INFO, IJOB
C DOUBLE PRECISION A(LDA,1), Q(1)
C
C CALL DHEQR(A, LDA, N, Q, INFO, IJOB)
C
C *Arguments:
C A :INOUT Double Precision A(LDA,N)
C On input, the matrix to be decomposed.
C On output, the upper triangular matrix R.
C The factorization can be written Q*A = R, where
C Q is a product of Givens rotations and R is upper
C triangular.
C LDA :IN Integer
C The leading dimension of the array A.
C N :IN Integer
C A is an (N+1) by N Hessenberg matrix.
C IJOB :IN Integer
C = 1 means that a fresh decomposition of the
C matrix A is desired.
C .ge. 2 means that the current decomposition of A
C will be updated by the addition of a row
C and a column.
C Q :OUT Double Precision Q(2*N)
C The factors c and s of each Givens rotation used
C in decomposing A.
C INFO :OUT Integer
C = 0 normal value.
C = K if A(K,K) .eq. 0.0 . This is not an error
C condition for this subroutine, but it does
C indicate that DHELS will divide by zero
C if called.
C
C *See Also:
C DGMRES
C
C***ROUTINES CALLED (NONE)
C***END PROLOGUE DHEQR
C The following is for optimized compilation on LLNL/LTSS Crays.
CLLL. OPTIMIZE
IMPLICIT DOUBLE PRECISION(A-H,O-Z)
INTEGER LDA, N, INFO, IJOB
DOUBLE PRECISION A(LDA,*), Q(*)
C
C Local Variables.
C
INTEGER I, IQ, J, K, KM1, KP1, NM1
DOUBLE PRECISION C, S, T, T1, T2
C
C***FIRST EXECUTABLE STATEMENT DHEQR
IF (IJOB .GT. 1) GO TO 70
C -------------------------------------------------------------------
C A new facorization is desired.
C -------------------------------------------------------------------
C QR decomposition without pivoting.
C
INFO = 0
DO 60 K = 1, N
KM1 = K - 1
KP1 = K + 1
C
C Compute K-th column of R.
C First, multiply the K-th column of a by the previous
C K-1 Givens rotations.
C
IF (KM1 .LT. 1) GO TO 20
DO 10 J = 1, KM1
I = 2*(J-1) + 1
T1 = A(J,K)
T2 = A(J+1,K)
C = Q(I)
S = Q(I+1)
A(J,K) = C*T1 - S*T2
A(J+1,K) = S*T1 + C*T2
10 CONTINUE
C
C Compute Givens components C and S.
C
20 CONTINUE
IQ = 2*KM1 + 1
T1 = A(K,K)
T2 = A(KP1,K)
IF( T2.EQ.0.0D0 ) THEN
C = 1.0D0
S = 0.0D0
ELSEIF( ABS(T2).GE.ABS(T1) ) THEN
T = T1/T2
S = -1.0D0/DSQRT(1.0D0+T*T)
C = -S*T
ELSE
T = T2/T1
C = 1.0D0/DSQRT(1.0D0+T*T)
S = -C*T
ENDIF
Q(IQ) = C
Q(IQ+1) = S
A(K,K) = C*T1 - S*T2
IF( A(K,K).EQ.0.0D0 ) INFO = K
60 CONTINUE
RETURN
C -------------------------------------------------------------------
C The old factorization of a will be updated. A row and a
C column has been added to the matrix A. N by N-1 is now
C the old size of the matrix.
C -------------------------------------------------------------------
70 CONTINUE
NM1 = N - 1
C -------------------------------------------------------------------
C Multiply the new column by the N previous Givens rotations.
C -------------------------------------------------------------------
DO 100 K = 1,NM1
I = 2*(K-1) + 1
T1 = A(K,N)
T2 = A(K+1,N)
C = Q(I)
S = Q(I+1)
A(K,N) = C*T1 - S*T2
A(K+1,N) = S*T1 + C*T2
100 CONTINUE
C -------------------------------------------------------------------
C Complete update of decomposition by forming last Givens
C rotation, and multiplying it times the column
C vector(A(N,N),A(NP1,N)).
C -------------------------------------------------------------------
INFO = 0
T1 = A(N,N)
T2 = A(N+1,N)
IF ( T2.EQ.0.0D0 ) THEN
C = 1.0D0
S = 0.0D0
ELSEIF( ABS(T2).GE.ABS(T1) ) THEN
T = T1/T2
S = -1.0D0/DSQRT(1.0D0+T*T)
C = -S*T
ELSE
T = T2/T1
C = 1.0D0/DSQRT(1.0D0+T*T)
S = -C*T
ENDIF
IQ = 2*N - 1
Q(IQ) = C
Q(IQ+1) = S
A(N,N) = C*T1 - S*T2
IF (A(N,N) .EQ. 0.0D0) INFO = N
RETURN
C------------- LAST LINE OF DHEQR FOLLOWS ----------------------------
END
*DECK DORTH
SUBROUTINE DORTH(VNEW, V, HES, N, LL, LDHES, KMP, SNORMW)
C***BEGIN PROLOGUE DORTH
C***DATE WRITTEN 890404 (YYMMDD)
C***REVISION DATE 890404 (YYMMDD)
C***CATEGORY NO. D2A4, D2B4
C***KEYWORDS LIBRARY=SLATEC(SLAP),
C TYPE=DOUBLE PRECISION(DORTH-D),
C Non-Symmetric Linear system, Sparse,
C Iterative Precondition, Generalized Minimum Residual
C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
C Lawrence Livermore National Laboratory
C PO BOX 808, L-300
C Livermore, CA 94550 (415) 423-3141
C***PURPOSE Internal routine for DGMRES.
C***DESCRIPTION
C This routine orthogonalizes the vector VNEW against the
C previous KMP vectors in the V array. It uses a modified
C gram-schmidt orthogonalization procedure with conditional
C reorthogonalization.
C
C *Usage:
C INTEGER N, LL, LDHES, KMP
C DOUBLE PRECISION VNEW, V, HES, SNORMW
C DIMENSION VNEW(1), V(N,1), HES(LDHES,1)
C
C CALL DORTH(VNEW, V, HES, N, LL, LDHES, KMP, SNORMW)
C
C *Arguments:
C VNEW :INOUT Double Precision VNEW(N)
C On input, the vector of length n containing a scaled
C product of the jacobian and the vector v(*,ll).
C On output, the new vector orthogonal to v(*,i0) to v(*,ll),
C where i0 = max(1, ll-kmp+1).
C V :IN Double Precision V(N,1)
C The n x ll array containing the previous ll
C orthogonal vectors v(*,1) to v(*,ll).
C HES :INOUT Double Precision HES(LDHES,1)
C On input, an LL x LL upper hessenberg matrix containing,
C in HES(I,K), K.lt.LL, the scaled inner products of
C A*V(*,K) and V(*,i).
C On return, column LL of HES is filled in with
C the scaled inner products of A*V(*,LL) and V(*,i).
C LDHES :IN Integer
C The leading dimension of the HES array.
C N :IN Integer
C The order of the matrix A, and the length of VNEW.
C LL :IN Integer
C The current order of the matrix HES.
C KMP :IN Integer
C The number of previous vectors the new vector VNEW
C must be made orthogonal to (KMP .le. MAXL).
C SNORMW :OUT DOUBLE PRECISION
C Scalar containing the l-2 norm of VNEW.
C
C *See Also:
C DGMRES
C
C***ROUTINES CALLED DAXPY
C***END PROLOGUE DORTH
C The following is for optimized compilation on LLNL/LTSS Crays.
CLLL. OPTIMIZE
IMPLICIT DOUBLE PRECISION(A-H,O-Z)
INTEGER N, LL, LDHES, KMP
DOUBLE PRECISION VNEW, V, HES, SNORMW
DIMENSION VNEW(1), V(N,1), HES(LDHES,1)
C
C Internal variables.
C
INTEGER I, I0
DOUBLE PRECISION ARG, SUMDSQ, TEM, VNRM
C
C Get norm of unaltered VNEW for later use.
C***FIRST EXECUTABLE STATEMENT DORTH
VNRM = DNRM2(N, VNEW, 1)
C -------------------------------------------------------------------
C Perform the modified gram-schmidt procedure on VNEW =A*V(LL).
C Scaled inner products give new column of HES.
C Projections of earlier vectors are subtracted from VNEW.
C -------------------------------------------------------------------
I0 = MAX0(1,LL-KMP+1)
DO 10 I = I0,LL
HES(I,LL) = DDOT(N, V(1,I), 1, VNEW, 1)
TEM = -HES(I,LL)
CALL DAXPY(N, TEM, V(1,I), 1, VNEW, 1)
10 CONTINUE
C -------------------------------------------------------------------
C Compute SNORMW = norm of VNEW. If VNEW is small compared
C to its input value (in norm), then reorthogonalize VNEW to
C V(*,1) through V(*,LL). Correct if relative correction
C exceeds 1000*(unit roundoff). Finally, correct SNORMW using
C the dot products involved.
C -------------------------------------------------------------------
SNORMW = DNRM2(N, VNEW, 1)
IF (VNRM + 0.001D0*SNORMW .NE. VNRM) RETURN
SUMDSQ = 0.0D0
DO 30 I = I0,LL
TEM = -DDOT(N, V(1,I), 1, VNEW, 1)
IF (HES(I,LL) + 0.001D0*TEM .EQ. HES(I,LL)) GO TO 30
HES(I,LL) = HES(I,LL) - TEM
CALL DAXPY(N, TEM, V(1,I), 1, VNEW, 1)
SUMDSQ = SUMDSQ + TEM**2
30 CONTINUE
IF (SUMDSQ .EQ. 0.0D0) RETURN
ARG = MAX(0.0D0,SNORMW**2 - SUMDSQ)
SNORMW = DSQRT(ARG)
C
RETURN
C------------- LAST LINE OF DORTH FOLLOWS ----------------------------
END
*DECK DPIGMR
SUBROUTINE DPIGMR(N, R0, SR, SZ, JSCAL, MAXL, MAXLP1, KMP,
$ NRSTS, JPRE, MATVEC, MSOLVE, NMSL, Z, V, HES, Q, LGMR,
$ RPAR, IPAR, WK, DL, RHOL, NRMAX, B, BNRM, X, XL,
$ ITOL, TOL, NELT, IA, JA, A, ISYM, IUNIT, IFLAG, ERR)
C***BEGIN PROLOGUE DPIGMR
C***DATE WRITTEN 890404 (YYMMDD)
C***REVISION DATE 890404 (YYMMDD)
C***CATEGORY NO. D2A4, D2B4
C***KEYWORDS LIBRARY=SLATEC(SLAP),
C TYPE=DOUBLE PRECISION(DPIGMR-D),
C Non-Symmetric Linear system, Sparse,
C Iterative Precondition, Generalized Minimum Residual
C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
C Lawrence Livermore National Laboratory
C PO BOX 808, L-300
C Livermore, CA 94550 (415) 423-3141
C***PURPOSE Internal routine for DGMRES.
C***DESCRIPTION
C This routine solves the linear system A * Z = R0 using a
C scaled preconditioned version of the generalized minimum
C residual method. An initial guess of Z = 0 is assumed.
C
C *Usage:
C EXTERNAL MATVEC, MSOLVE
C INTEGER N,MAXL,MAXLP1,KMP,JPRE,NMSL,LGMR,IPAR,IFLAG,JSCAL,NRSTS
C INTEGER NRMAX,ITOL,NELT,ISYM
C DOUBLE PRECISION R0,SR,SZ,Z,V,HES,Q,RPAR,WK,DL,RHOL,BNRM,TOL,
C $ A,B,X, R0(1), SR(1), SZ(1), Z(1), V(N,1),
C $ HES(MAXLP1,1), Q(1), RPAR(1), IPAR(1), WK(1), DL(1),
C $ IA(NELT), JA(NELT), A(NELT), B(1), X(1), XL(1)
C
C CALL DPIGMR(N, R0, SR, SZ, JSCAL, MAXL, MAXLP1, KMP,
C $ NRSTS, JPRE, MATVEC, MSOLVE, NMSL, Z, V, HES, Q, LGMR,
C $ RPAR, IPAR, WK, DL, RHOL, NRMAX, B, BNRM, X, XL,
C $ ITOL, TOL, NELT, IA, JA, A, ISYM, IUNIT, IFLAG, ERR)
C
C *Arguments:
C R0 :IN Double Precision R0(N)
C R0 = the right hand side of the system A*Z = R0.
C R0 is also used as work space when computing
C the final approximation.
C (R0 is the same as V(*,MAXL+1) in the call to DPIGMR.)
C SR :IN Double Precision SR(N)
C SR is a vector of length N containing the nonzero
C elements of the diagonal scaling matrix for R0.
C SZ :IN Double Precision SZ(N)
C SZ is a vector of length N containing the nonzero
C elements of the diagonal scaling matrix for Z.
C JSCAL :IN Integer
C A flag indicating whether arrays SR and SZ are used.
C JSCAL=0 means SR and SZ are not used and the
C algorithm will perform as if all
C SR(i) = 1 and SZ(i) = 1.
C JSCAL=1 means only SZ is used, and the algorithm
C performs as if all SR(i) = 1.
C JSCAL=2 means only SR is used, and the algorithm
C performs as if all SZ(i) = 1.
C JSCAL=3 means both SR and SZ are used.
C N :IN Integer
C The order of the matrix A, and the lengths
C of the vectors SR, SZ, R0 and Z.
C MAXL :IN Integer
C The maximum allowable order of the matrix H.
C MAXLP1 :IN Integer
C MAXPL1 = MAXL + 1, used for dynamic dimensioning of HES.
C KMP :IN Integer
C The number of previous vectors the new vector VNEW
C must be made orthogonal to. (KMP .le. MAXL)
C NRSTS :IN Integer
C Counter for the number of restarts on the current
C call to DGMRES. If NRSTS .gt. 0, then the residual
C R0 is already scaled, and so scaling of it is
C not necessary.
C JPRE :IN Integer
C Preconditioner type flag.
C WK :IN Double Precision WK(N)
C A double precision work array of length N used by routine
C MATVEC
C and MSOLVE.
C DL :INOUT Double Precision DL(N)
C On input, a double precision work array of length N used for
C calculation of the residual norm RHO when the method is
C incomplete (KMP.lt.MAXL), and/or when using restarting.
C On output, the scaled residual vector RL. It is only loaded
C when performing restarts of the Krylov iteration.
C NRMAX :IN Integer
C The maximum number of restarts of the Krylov iteration.
C NRMAX .gt. 0 means restarting is active, while
C NRMAX = 0 means restarting is not being used.
C B :IN Double Precision B(N)
C The right hand side of the linear system A*X = B.
C BNRM :IN Double Precision
C The scaled norm of b.
C X :IN Double Precision X(N)
C The current approximate solution as of the last
C restart.
C XL :IN Double Precision XL(N)
C An array of length N used to hold the approximate
C solution X(L) when ITOL=11.
C ITOL :IN Integer
C A flag to indicate the type of convergence criterion
C used. see the driver for its description.
C TOL :IN Double Precision
C The tolerance on residuals R0-A*Z in scaled norm.
C NELT :IN Integer
C The length of arrays IA, JA and A.
C IA :IN Integer IA(NELT)
C An integer array of length NELT containing matrix data.
C It is passed directly to the MATVEC and MSOLVE routines.
C JA :IN Integer JA(NELT)
C An integer array of length NELT containing matrix data.
C It is passed directly to the MATVEC and MSOLVE routines.
C A :IN Double Precision A(NELT)
C A double precision array of length NELT containing matrix
C data. It is passed directly to the MATVEC and MSOLVE routines.
C ISYM :IN Integer
C A flag to indicate symmetric matrix storage.
C If ISYM=0, all nonzero entries of the matrix are
C stored. If ISYM=1, the matrix is symmetric and
C only the upper or lower triangular part is stored.
C IUNIT :IN Integer
C The i/o unit number for writing intermediate residual
C norm values.
C Z :OUT Double Precision Z(N)
C The final computed approximation to the solution
C of the system A*Z = R0.
C LGMR :OUT Integer
C The number of iterations performed and
C the current order of the upper hessenberg
C matrix HES.
C RPAR :IN Double Precision RPAR(*)
C Double Precision work space passed directly to the MSOLVE
C routine.
C IPAR :IN Integer IPAR(*)
C Integer work space passed directly to the MSOLVE
C routine.
C NMSL :OUT Integer
C The number of calls to MSOLVE.
C V :OUT Double Precision V(N,MAXLP1)
C The N by (LGMR+1) array containing the LGMR
C orthogonal vectors V(*,1) to V(*,LGMR).
C HES :OUT Double Precision HES(MAXLP1,MAXL)
C The upper triangular factor of the QR decomposition
C of the (LGMR+1) by LGMR upper Hessenberg matrix whose
C entries are the scaled inner-products of A*V(*,I)
C and V(*,K).
C Q :OUT Double Precision Q(2*MAXL)
C A double precision array of length 2*MAXL containing the
C components of the Givens rotations used in the QR
C decomposition of HES. It is loaded in DHEQR and used in
C DHELS.
C RHOL :OUT Double Precision
C A double precision scalar containing the norm of the final
C residual.
C IFLAG :OUT Integer
C An integer error flag..
C 0 means convergence in LGMR iterations, LGMR.le.MAXL.
C 1 means the convergence test did not pass in MAXL
C iterations, but the residual norm is .lt. norm(R0),
C and so Z is computed.
C 2 means the convergence test did not pass in MAXL
C iterations, residual .ge. norm(R0), and Z = 0.
C ERR :OUT Double Precision.
C Error estimate of error in final approximate solution, as
C defined by ITOL.
C
C *See Also:
C DGMRES
C
C***ROUTINES CALLED ISDGMR, MATVEC, MSOLVE, DORTH, DRLCAL, DHELS,
C DHEQR, DXLCAL, DAXPY, DCOPY, DSCAL,
C***END PROLOGUE DPIGMR
C The following is for optimized compilation on LLNL/LTSS Crays.
CLLL. OPTIMIZE
IMPLICIT DOUBLE PRECISION(A-H,O-Z)
EXTERNAL MATVEC, MSOLVE
INTEGER N,MAXL,MAXLP1,KMP,JPRE,NMSL,LGMR,IFLAG,JSCAL,NRSTS
INTEGER NRMAX,ITOL,NELT,ISYM
DOUBLE PRECISION RHOL, BNRM, TOL
DOUBLE PRECISION R0(*), SR(*), SZ(*), Z(*), V(N,*)
DOUBLE PRECISION HES(MAXLP1,*), Q(*), RPAR(*), WK(*), DL(*)
DOUBLE PRECISION A(NELT), B(*), X(*), XL(*)
INTEGER IPAR(*), IA(NELT), JA(NELT)
C
C Local variables.
C
INTEGER I, INFO, IP1, I2, J, K, LL, LLP1
DOUBLE PRECISION R0NRM,C,DLNRM,PROD,RHO,S,SNORMW,TEM
C
C Zero out the z array.
C***FIRST EXECUTABLE STATEMENT DPIGMR
DO 5 I = 1,N
Z(I) = 0.0D0
5 CONTINUE
C
IFLAG = 0
LGMR = 0
NMSL = 0
C Load ITMAX, the maximum number of iterations.
ITMAX =(NRMAX+1)*MAXL
C -------------------------------------------------------------------
C The initial residual is the vector R0.
C Apply left precon. if JPRE < 0 and this is not a restart.
C Apply scaling to R0 if JSCAL = 2 or 3.
C -------------------------------------------------------------------
IF ((JPRE .LT. 0) .AND.(NRSTS .EQ. 0)) THEN
CALL DCOPY(N, R0, 1, WK, 1)
CALL MSOLVE(N, WK, R0, NELT, IA, JA, A, ISYM, RPAR, IPAR)
NMSL = NMSL + 1
ENDIF
IF (((JSCAL.EQ.2) .OR.(JSCAL.EQ.3)) .AND.(NRSTS.EQ.0)) THEN
DO 10 I = 1,N
V(I,1) = R0(I)*SR(I)
10 CONTINUE
ELSE
DO 20 I = 1,N
V(I,1) = R0(I)
20 CONTINUE
ENDIF
R0NRM = DNRM2(N, V, 1)
ITER = NRSTS*MAXL
C
C Call stopping routine ISDGMR.
C
IF (ISDGMR(N, B, X, XL, NELT, IA, JA, A, ISYM, MSOLVE,
$ NMSL, ITOL, TOL, ITMAX, ITER, ERR, IUNIT, V(1,1), Z, WK,
$ RPAR, IPAR, R0NRM, BNRM, SR, SZ, JSCAL,
$ KMP, LGMR, MAXL, MAXLP1, V, Q, SNORMW, PROD, R0NRM,
$ HES, JPRE) .NE. 0) RETURN
TEM = 1.0D0/R0NRM
CALL DSCAL(N, TEM, V(1,1), 1)
C
C Zero out the HES array.
C
DO 50 J = 1,MAXL
DO 40 I = 1,MAXLP1
HES(I,J) = 0.0D0
40 CONTINUE
50 CONTINUE
C -------------------------------------------------------------------
C main loop to compute the vectors V(*,2) to V(*,MAXL).
C The running product PROD is needed for the convergence test.
C -------------------------------------------------------------------
PROD = 1.0D0
DO 90 LL = 1,MAXL
LGMR = LL
C -------------------------------------------------------------------
C Unscale the current V(LL) and store in WK. Call routine
C msolve to compute(M-inverse)*WK, where M is the
C preconditioner matrix. Save the answer in Z. Call routine
C MATVEC to compute VNEW = A*Z, where A is the the system
C matrix. save the answer in V(LL+1). Scale V(LL+1). Call
C routine DORTH to orthogonalize the new vector VNEW =
C V(*,LL+1). Call routine DHEQR to update the factors of HES.
C -------------------------------------------------------------------
IF ((JSCAL .EQ. 1) .OR.(JSCAL .EQ. 3)) THEN
DO 60 I = 1,N
WK(I) = V(I,LL)/SZ(I)
60 CONTINUE
ELSE
CALL DCOPY(N, V(1,LL), 1, WK, 1)
ENDIF
IF (JPRE .GT. 0) THEN
CALL MSOLVE(N, WK, Z, NELT, IA, JA, A, ISYM, RPAR, IPAR)
NMSL = NMSL + 1
CALL MATVEC(N, Z, V(1,LL+1), NELT, IA, JA, A, ISYM)
ELSE
CALL MATVEC(N, WK, V(1,LL+1), NELT, IA, JA, A, ISYM)
ENDIF
IF (JPRE .LT. 0) THEN
CALL DCOPY(N, V(1,LL+1), 1, WK, 1)
CALL MSOLVE(N,WK,V(1,LL+1),NELT,IA,JA,A,ISYM,RPAR,IPAR)
NMSL = NMSL + 1
ENDIF
IF ((JSCAL .EQ. 2) .OR.(JSCAL .EQ. 3)) THEN
DO 65 I = 1,N
V(I,LL+1) = V(I,LL+1)*SR(I)
65 CONTINUE
ENDIF
CALL DORTH(V(1,LL+1), V, HES, N, LL, MAXLP1, KMP, SNORMW)
HES(LL+1,LL) = SNORMW
CALL DHEQR(HES, MAXLP1, LL, Q, INFO, LL)
IF (INFO .EQ. LL) GO TO 120
C -------------------------------------------------------------------
C Update RHO, the estimate of the norm of the residual R0-A*ZL.
C If KMP < MAXL, then the vectors V(*,1),...,V(*,LL+1) are not
C necessarily orthogonal for LL > KMP. The vector DL must then
C be computed, and its norm used in the calculation of RHO.
C -------------------------------------------------------------------
PROD = PROD*Q(2*LL)
RHO = ABS(PROD*R0NRM)
IF ((LL.GT.KMP) .AND.(KMP.LT.MAXL)) THEN
IF (LL .EQ. KMP+1) THEN
CALL DCOPY(N, V(1,1), 1, DL, 1)
DO 75 I = 1,KMP
IP1 = I + 1
I2 = I*2
S = Q(I2)
C = Q(I2-1)
DO 70 K = 1,N
DL(K) = S*DL(K) + C*V(K,IP1)
70 CONTINUE
75 CONTINUE
ENDIF
S = Q(2*LL)
C = Q(2*LL-1)/SNORMW
LLP1 = LL + 1
DO 80 K = 1,N
DL(K) = S*DL(K) + C*V(K,LLP1)
80 CONTINUE
DLNRM = DNRM2(N, DL, 1)
RHO = RHO*DLNRM
ENDIF
RHOL = RHO
C -------------------------------------------------------------------
C Test for convergence. If passed, compute approximation ZL.
C If failed and LL < MAXL, then continue iterating.
C -------------------------------------------------------------------
ITER = NRSTS*MAXL + LGMR
IF (ISDGMR(N, B, X, XL, NELT, IA, JA, A, ISYM, MSOLVE,
$ NMSL, ITOL, TOL, ITMAX, ITER, ERR, IUNIT, DL, Z, WK,
$ RPAR, IPAR, RHOL, BNRM, SR, SZ, JSCAL,
$ KMP, LGMR, MAXL, MAXLP1, V, Q, SNORMW, PROD, R0NRM,
$ HES, JPRE) .NE. 0) GO TO 200
IF (LL .EQ. MAXL) GO TO 100
C -------------------------------------------------------------------
C Rescale so that the norm of V(1,LL+1) is one.
C -------------------------------------------------------------------
TEM = 1.0D0/SNORMW
CALL DSCAL(N, TEM, V(1,LL+1), 1)
90 CONTINUE
100 CONTINUE
IF (RHO .LT. R0NRM) GO TO 150
120 CONTINUE
IFLAG = 2
C
C Load approximate solution with zero.
C
DO 130 I = 1,N
Z(I) = 0.D0
130 CONTINUE
RETURN
150 IFLAG = 1
C
C Tolerance not met, but residual norm reduced.
C
IF (NRMAX .GT. 0) THEN
C
C If performing restarting (NRMAX > 0) calculate the residual
C vector RL and store it in the DL array. If the incomplete
C version is being used (KMP < MAXL) then DL has already been
C calculated up to a scaling factor. Use DRLCAL to calculate
C the scaled residual vector.
C
CALL DRLCAL(N, KMP, MAXL, MAXL, V, Q, DL, SNORMW, PROD,
$ R0NRM)
ENDIF
C -------------------------------------------------------------------
C Compute the approximation ZL to the solution. Since the
C vector Z was used as work space, and the initial guess
C of the linear iteration is zero, Z must be reset to zero.
C -------------------------------------------------------------------
200 CONTINUE
LL = LGMR
LLP1 = LL + 1
DO 210 K = 1,LLP1
R0(K) = 0.0D0
210 CONTINUE
R0(1) = R0NRM
CALL DHELS(HES, MAXLP1, LL, Q, R0)
DO 220 K = 1,N
Z(K) = 0.0D0
220 CONTINUE
DO 230 I = 1,LL
CALL DAXPY(N, R0(I), V(1,I), 1, Z, 1)
230 CONTINUE
IF ((JSCAL .EQ. 1) .OR.(JSCAL .EQ. 3)) THEN
DO 240 I = 1,N
Z(I) = Z(I)/SZ(I)
240 CONTINUE
ENDIF
IF (JPRE .GT. 0) THEN
CALL DCOPY(N, Z, 1, WK, 1)
CALL MSOLVE(N, WK, Z, NELT, IA, JA, A, ISYM, RPAR, IPAR)
NMSL = NMSL + 1
ENDIF
RETURN
C------------- LAST LINE OF DPIGMR FOLLOWS ----------------------------
END
*DECK DRLCAL
SUBROUTINE DRLCAL(N, KMP, LL, MAXL, V, Q, RL, SNORMW, PROD,
$ R0NRM)
C***BEGIN PROLOGUE DRLCAL
C***DATE WRITTEN 890404 (YYMMDD)
C***REVISION DATE 890404 (YYMMDD)
C***CATEGORY NO. D2A4, D2B4
C***KEYWORDS LIBRARY=SLATEC(SLAP),
C TYPE=DOUBLE PRECISION(DRLCAL-D),
C Non-Symmetric Linear system, Sparse,
C Iterative Precondition, Generalized Minimum Residual
C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
C Lawrence Livermore National Laboratory
C PO BOX 808, L-300
C Livermore, CA 94550 (415) 423-3141
C***PURPOSE Internal routine for DGMRES.
C***DESCRIPTION
C This routine calculates the scaled residual RL from the
C V(I)'s.
C *Usage:
C INTEGER N, KMP, LL, MAXL
C DOUBLE PRECISION SNORMW
C DOUBLE PRECISION V(N,1), Q(1), RL(N)
C
C CALL DRLCAL(N, KMP, LL, MAXL, V, Q, RL, SNORMW, PROD,
C $ R0NRM)
C
C *Arguments:
C N :IN Integer
C The order of the matrix A, and the lengths
C of the vectors SR, SZ, R0 and Z.
C KMP :IN Integer
C The number of previous V vectors the new vector VNEW
C must be made orthogonal to. (KMP .le. MAXL)
C LL :IN Integer
C The current dimension of the Krylov subspace.
C MAXL :IN Integer
C The maximum dimension of the Krylov subspace.
C Q :IN Double Precision Q(2*MAXL)
C A double precision array of length 2*MAXL containing the
C components of the Givens rotations used in the QR
C decomposition of HES. It is loaded in DHEQR and used in
C DHELS.
C PROD :IN Double Precision
C The product s1*s2*...*sl = the product of the sines of the
C givens rotations used in the QR factorization of
C the hessenberg matrix HES.
C R0NRM :IN Double Precision
C The scaled norm of initial residual R0.
C RL :OUT Double Precision RL(N)
C The residual vector RL. This is either SB*(B-A*XL) if
C not preconditioning or preconditioning on the right,
C or SB*(M-inverse)*(B-A*XL) if preconditioning on the
C left.
C
C *See Also:
C DGMRES
C
C***ROUTINES CALLED DCOPY, DSCAL
C***END PROLOGUE DRLCAL
C The following is for optimized compilation on LLNL/LTSS Crays.
CLLL. OPTIMIZE
IMPLICIT DOUBLE PRECISION(A-H,O-Z)
INTEGER N, KMP, LL, MAXL
DOUBLE PRECISION SNORMW
DOUBLE PRECISION V(N,*), Q(*), RL(N)
C
C Internal Variables.
C
INTEGER I, IP1, I2, K
C
C***FIRST EXECUTABLE STATEMENT DRLCAL
IF (KMP .EQ. MAXL) THEN
C
C calculate RL. Start by copying V(*,1) into RL.
C
CALL DCOPY(N, V(1,1), 1, RL, 1)
LLM1 = LL - 1
DO 20 I = 1,LLM1
IP1 = I + 1
I2 = I*2
S = Q(I2)
C = Q(I2-1)
DO 10 K = 1,N
RL(K) = S*RL(K) + C*V(K,IP1)
10 CONTINUE
20 CONTINUE
S = Q(2*LL)
C = Q(2*LL-1)/SNORMW
LLP1 = LL + 1
DO 30 K = 1,N
RL(K) = S*RL(K) + C*V(K,LLP1)
30 CONTINUE
ENDIF
C
C When KMP < MAXL, RL vector already partially calculated.
C Scale RL by R0NRM*PROD to obtain the residual RL.
C
TEM = R0NRM*PROD
CALL DSCAL(N, TEM, RL, 1)
RETURN
C------------- LAST LINE OF DRLCAL FOLLOWS ----------------------------
END
*DECK DXLCAL
SUBROUTINE DXLCAL(N, LGMR, X, XL, ZL, HES, MAXLP1, Q, V, R0NRM,
$ WK, SZ, JSCAL, JPRE, MSOLVE, NMSL, RPAR, IPAR,
$ NELT, IA, JA, A, ISYM)
C***BEGIN PROLOGUE DXLCAL
C***DATE WRITTEN 890404 (YYMMDD)
C***REVISION DATE 890404 (YYMMDD)
C***CATEGORY NO. D2A4, D2B4
C***KEYWORDS LIBRARY=SLATEC(SLAP),
C TYPE=DOUBLE PRECISION(DXLCAL-D),
C Non-Symmetric Linear system, Sparse,
C Iterative Precondition, Generalized Minimum Residual
C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
C Lawrence Livermore National Laboratory
C PO BOX 808, L-300
C Livermore, CA 94550 (415) 423-3141
C***PURPOSE Internal routine for DGMRES.
C***DESCRIPTION
C This routine computes the solution XL, the current DGMRES
C iterate, given the V(I)'s and the QR factorization of the
C Hessenberg matrix HES. This routine is only called when
C ITOL=11.
C
C *Usage:
C EXTERNAL MSOLVE
C DOUBLE PRECISION R0NRM
C DOUBLE PRECISION X(N), XL(N), ZL(N), HES(MAXLP1,1), Q(1)
C DOUBLE PRECISION V(N,1), WK(N), SZ(1), RPAR(1)
C DOUBLE PRECISION A(NELT)
C INTEGER N, LGMR, MAXLP1, JSCAL, JPRE, IPAR, NMSL, NELT, ISYM
C INTEGER IPAR(1), IA(NELT), JA(NELT)
C
C CALL DXLCAL(N, LGMR, X, XL, ZL, HES, MAXLP1, Q, V, R0NRM,
C $ WK, SZ, JSCAL, JPRE, MSOLVE, NMSL, RPAR, IPAR,
C $ NELT, IA, JA, A, ISYM)
C
C *Arguments:
C N :IN Integer
C The order of the matrix A, and the lengths
C of the vectors SR, SZ, R0 and Z.
C LGMR :IN Integer
C The number of iterations performed and
C the current order of the upper Hessenberg
C matrix HES.
C X :IN Double Precision X(N)
C The current approximate solution as of the last restart.
C ZL :IN Double Precision ZL(N)
C An array of length N used to hold the approximate
C solution Z(L).
C SZ :IN Double Precision SZ(N)
C A vector of length N containing the nonzero
C elements of the diagonal scaling matrix for Z.
C JSCAL :IN Integer
C A flag indicating whether arrays SR and SZ are used.
C JSCAL=0 means SR and SZ are not used and the
C algorithm will perform as if all
C SR(i) = 1 and SZ(i) = 1.
C JSCAL=1 means only SZ is used, and the algorithm
C performs as if all SR(i) = 1.
C JSCAL=2 means only SR is used, and the algorithm
C performs as if all SZ(i) = 1.
C JSCAL=3 means both SR and SZ are used.
C MAXLP1 :IN Integer
C MAXLP1 = MAXL + 1, used for dynamic dimensioning of HES.
C MAXL is the maximum allowable order of the matrix HES.
C JPRE :IN Integer
C The preconditioner type flag.
C WK :IN Double Precision WK(N)
C A double precision work array of length N.
C NMSL :IN Integer
C The number of calls to MSOLVE.
C V :IN Double Precision V(N,MAXLP1)
C The N by(LGMR+1) array containing the LGMR
C orthogonal vectors V(*,1) to V(*,LGMR).
C HES :IN Double Precision HES(MAXLP1,MAXL)
C The upper triangular factor of the QR decomposition
C of the (LGMR+1) by LGMR upper Hessenberg matrix whose
C entries are the scaled inner-products of A*V(*,i) and V(*,k).
C Q :IN Double Precision Q(2*MAXL)
C A double precision array of length 2*MAXL containing the
C components of the givens rotations used in the QR
C decomposition of HES. It is loaded in DHEQR.
C R0NRM :IN Double Precision
C The scaled norm of the initial residual for the
C current call to DPIGMR.
C RPAR :IN Double Precision RPAR(*)
C Double Precision work space passed directly to the MSOLVE
C routine.
C IPAR :IN Integer IPAR(*)
C Integer work space passed directly to the MSOLVE
C routine.
C NELT :IN Integer
C The length of arrays IA, JA and A.
C IA :IN Integer IA(NELT)
C An integer array of length NELT containing matrix data.
C It is passed directly to the MATVEC and MSOLVE routines.
C JA :IN Integer JA(NELT)
C An integer array of length NELT containing matrix data.
C It is passed directly to the MATVEC and MSOLVE routines.
C A :IN Double Precision A(NELT)
C A double precision array of length NELT containing matrix
C data.
C It is passed directly to the MATVEC and MSOLVE routines.
C ISYM :IN Integer
C A flag to indicate symmetric matrix storage.
C If ISYM=0, all nonzero entries of the matrix are
C stored. If ISYM=1, the matrix is symmetric and
C only the upper or lower triangular part is stored.
C XL :OUT Double Precision XL(N)
C An array of length N used to hold the approximate
C solution X(L).
C Warning: XL and ZL are the same array in the calling routine.
C
C *See Also:
C DGMRES
C
C***ROUTINES CALLED MSOLVE, DHELS, DAXPY, DCOPY, DSCAL
C***END PROLOGUE DXLCAL
C The following is for optimized compilation on LLNL/LTSS Crays.
CLLL. OPTIMIZE
IMPLICIT DOUBLE PRECISION(A-H,O-Z)
EXTERNAL MSOLVE
INTEGER N, LGMR, MAXLP1, JSCAL, JPRE, IPAR(*), NMSL, NELT
INTEGER IA(NELT), JA(NELT), ISYM
DOUBLE PRECISION R0NRM, X(N), XL(N), ZL(N), HES(MAXLP1,*)
DOUBLE PRECISION Q(*), V(N,*), WK(N), SZ(*), RPAR(*), A(NELT)
C
C Internal variables.
C
INTEGER I, K, LL, LLP1
C
C***FIRST EXECUTABLE STATEMENT DXLCAL
LL = LGMR
LLP1 = LL + 1
DO 10 K = 1,LLP1
WK(K) = 0.0D0
10 CONTINUE
WK(1) = R0NRM
CALL DHELS(HES, MAXLP1, LL, Q, WK)
DO 20 K = 1,N
ZL(K) = 0.0D0
20 CONTINUE
DO 30 I = 1,LL
CALL DAXPY(N, WK(I), V(1,I), 1, ZL, 1)
30 CONTINUE
IF ((JSCAL .EQ. 1) .OR.(JSCAL .EQ. 3)) THEN
DO 40 K = 1,N
ZL(K) = ZL(K)/SZ(K)
40 CONTINUE
ENDIF
IF (JPRE .GT. 0) THEN
CALL DCOPY(N, ZL, 1, WK, 1)
CALL MSOLVE(N, WK, ZL, NELT, IA, JA, A, ISYM, RPAR, IPAR)
NMSL = NMSL + 1
ENDIF
C calculate XL from X and ZL.
DO 50 K = 1,N
XL(K) = X(K) + ZL(K)
50 CONTINUE
RETURN
C------------- LAST LINE OF DXLCAL FOLLOWS ----------------------------
END
*DECK ISDGMR
FUNCTION ISDGMR(N, B, X, XL, NELT, IA, JA, A, ISYM, MSOLVE,
$ NMSL, ITOL, TOL, ITMAX, ITER, ERR, IUNIT, R, Z, DZ,
$ RWORK, IWORK, RNRM, BNRM, SB, SX, JSCAL,
$ KMP, LGMR, MAXL, MAXLP1, V, Q, SNORMW, PROD, R0NRM,
$ HES, JPRE)
C***BEGIN PROLOGUE ISDGMR
C***DATE WRITTEN 890404 (YYMMDD)
C***REVISION DATE 890404 (YYMMDD)
C***CATEGORY NO. D2A4
C***KEYWORDS LIBRARY=SLATEC(SLAP),
C TYPE=INTEGER(ISDGMR-I)
C Linear system, Sparse, Stop Test, GMRES
C***AUTHOR Brown, Peter, (LLNL), brown@lll-crg.llnl.gov
C Hindmarsh, Alan, (LLNL), alanh@lll-crg.llnl.gov
C Seager, Mark K., (LLNL), seager@lll-crg.llnl.gov
C Lawrence Livermore National Laboratory
C PO BOX 808, L-300
C Livermore, CA 94550 (415) 423-3141
C***PURPOSE Generalized Minimum Residual Stop Test.
C This routine calculates the stop test for the Generalized
C Minimum RESidual (GMRES) iteration scheme. It returns a
C nonzero if the error estimate (the type of which is
C determined by ITOL) is less than the user specified
C tolerence TOL.
C***DESCRIPTION
C *Usage:
C INTEGER KMP, LGMR, MAXL, MAXLP1, JPRE, NMSL
C DOUBLE PRECISION DXNRM, RNRM, R0NRM, SNORMW, SOLNRM, PROD
C DOUBLE PRECISION B(1), X(1), IA(1), JA(1), A(1), R(1), Z(1)
C DOUBLE PRECISION DZ(1), RWORK(1), IWORK(1), SB(1), SX(1)
C DOUBLE PRECISION Q(1), V(N,1), HES(MAXLP1,MAXL), XL(1)
C EXTERNAL MSOLVE
C
C IF (ISDGMR(N, B, X, XL, NELT, IA, JA, A, ISYM, MSOLVE,
C $ NMSL, ITOL, TOL, ITMAX, ITER, ERR, IUNIT, R, Z, DZ,
C $ RWORK, IWORK, RNRM, BNRM, SB, SX, JSCAL,
C $ KMP, LGMR, MAXL, MAXLP1, V, Q, SNORMW, PROD, R0NRM,
C $ HES, JPRE) .NE. 0) THEN ITERATION DONE
C
C *Arguments:
C N :IN Integer.
C Order of the Matrix.
C B :IN Double Precision B(N).
C Right-hand-side vector.
C X :IN Double Precision X(N).
C Approximate solution vector as of the last restart.
C XL :OUT Double Precision XL(N)
C An array of length N used to hold the approximate
C solution as of the current iteration. Only computed by
C this routine when ITOL=11.
C NELT :IN Integer.
C Number of Non-Zeros stored in A.
C IA :IN Integer IA(NELT).
C JA :IN Integer JA(NELT).
C A :IN Double Precision A(NELT).
C These arrays contain the matrix data structure for A.
C It could take any form. See "Description", in the DGMRES,
C DSLUGM and DSDGMR routines for more late breaking details...
C ISYM :IN Integer.
C Flag to indicate symmetric storage format.
C If ISYM=0, all nonzero entries of the matrix are stored.
C If ISYM=1, the matrix is symmetric, and only the upper
C or lower triangle of the matrix is stored.
C MSOLVE :EXT External.
C Name of a routine which solves a linear system Mz = r for z
C given r with the preconditioning matrix M (M is supplied via
C RWORK and IWORK arrays. The name of the MSOLVE routine must
C be declared external in the calling program. The calling
C sequence to MSLOVE is:
C CALL MSOLVE(N, R, Z, NELT, IA, JA, A, ISYM, RWORK, IWORK)
C Where N is the number of unknowns, R is the right-hand side
C vector, and z is the solution upon return. RWORK is a
C double precision
C array that can be used to pass necessary preconditioning
C information and/or workspace to MSOLVE. IWORK is an integer
C work array for the same purpose as RWORK.
C NMSL :INOUT Integer.
C A counter for the number of calls to MSOLVE.
C ITOL :IN Integer.
C Flag to indicate the type of convergence criterion used.
C ITOL=0 Means the iteration stops when the test described
C below on the residual RL is satisfied. This is
C the "Natural Stopping Criteria" for this routine.
C Other values of ITOL cause extra, otherwise
C unnecessary, computation per iteration and are
C therefore much less efficient. See ISDGMR (the
C stop test routine) for more information.
C ITOL=1 Means the iteration stops when the first test
C described below on the residual RL is satisfied,
C and there is either right or no preconditioning
C being used.
C ITOL=2 Implies that the user is using left
C preconditioning, and the second stopping criterion
C below is used.
C ITOL=3 Means the iteration stops when the third test
C described below on Minv*Residual is satisfied, and
C there is either left or no preconditioning begin
C used.
C ITOL=11 is often useful for checking and comparing
C different routines. For this case, the user must
C supply the "exact" solution or a very accurate
C approximation (one with an error much less than
C TOL) through a common block,
C COMMON /SOLBLK/ SOLN(1)
C if ITOL=11, iteration stops when the 2-norm of the
C difference between the iterative approximation and
C the user-supplied solution divided by the 2-norm
C of the user-supplied solution is less than TOL.
C Note that this requires the user to set up the
C "COMMON /SOLBLK/ SOLN(LENGTH)" in the calling
C routine. The routine with this declaration should
C be loaded before the stop test so that the correct
C length is used by the loader. This procedure is
C not standard Fortran and may not work correctly on
C your system (although it has worked on every
C system the authors have tried). If ITOL is not 11
C then this common block is indeed standard Fortran.
C TOL :IN Double Precision.
C Convergence criterion, as described above.
C ITMAX :IN Integer.
C Maximum number of iterations.
C ITER :IN Integer.
C The iteration for which to check for convergence.
C ERR :OUT Double Precision.
C Error estimate of error in final approximate solution, as
C defined by ITOL. Letting norm() denote the Euclidean
C norm, ERR is defined as follows..
C
C If ITOL=0, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
C for right or no preconditioning, and
C ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
C norm(SB*(M-inverse)*B),
C for left preconditioning.
C If ITOL=1, then ERR = norm(SB*(B-A*X(L)))/norm(SB*B),
C since right or no preconditioning
C being used.
C If ITOL=2, then ERR = norm(SB*(M-inverse)*(B-A*X(L)))/
C norm(SB*(M-inverse)*B),
C since left preconditioning is being
C used.
C If ITOL=3, then ERR = Max |(Minv*(B-A*X(L)))(i)/x(i)|
C i=1,n
C If ITOL=11, then ERR = norm(SB*(X(L)-SOLN))/norm(SB*SOLN).
C IUNIT :IN Integer.
C Unit number on which to write the error at each iteration,
C if this is desired for monitoring convergence. If unit
C number is 0, no writing will occur.
C R :INOUT Double Precision R(N).
C Work array used in calling routine. It contains
C information necessary to compute the residual RL = B-A*XL.
C Z :WORK Double Precision Z(N).
C Workspace used to hold the pseudo-residule M z = r.
C DZ :WORK Double Precision DZ(N).
C Workspace used to hold temporary vector(s).
C RWORK :WORK Double Precision RWORK(USER DEFINABLE).
C Double Precision array that can be used by MSOLVE.
C IWORK :WORK Integer IWORK(USER DEFINABLE).
C Integer array that can be used by MSOLVE.
C RNRM :IN Double Precision.
C Norm of the current residual. Type of norm depends on ITOL.
C BNRM :IN Double Precision.
C Norm of the right hand side. Type of norm depends on ITOL.
C SB :IN Double Precision SB(N).
C Scaling vector for B.
C SX :IN Double Precision SX(N).
C Scaling vector for X.
C JSCAL :IN Integer.
C Flag indicating if scaling arrays SB and SX are being
C used in the calling routine DPIGMR.
C JSCAL=0 means SB and SX are not used and the
C algorithm will perform as if all
C SB(i) = 1 and SX(i) = 1.
C JSCAL=1 means only SX is used, and the algorithm
C performs as if all SB(i) = 1.
C JSCAL=2 means only SB is used, and the algorithm
C performs as if all SX(i) = 1.
C JSCAL=3 means both SB and SX are used.
C KMP :IN Integer
C The number of previous vectors the new vector VNEW
C must be made orthogonal to. (KMP .le. MAXL)
C LGMR :IN Integer
C The number of GMRES iterations performed on the current call
C to DPIGMR (i.e., # iterations since the last restart) and
C the current order of the upper hessenberg
C matrix HES.
C MAXL :IN Integer
C The maximum allowable order of the matrix H.
C MAXLP1 :IN Integer
C MAXPL1 = MAXL + 1, used for dynamic dimensioning of HES.
C V :IN Double Precision V(N,MAXLP1)
C The N by (LGMR+1) array containing the LGMR
C orthogonal vectors V(*,1) to V(*,LGMR).
C Q :IN Double Precision Q(2*MAXL)
C A double precision array of length 2*MAXL containing the
C components of the Givens rotations used in the QR
C decomposition
C of HES.
C SNORMW :IN Double Precision
C A scalar containing the scaled norm of VNEW before it
C is renormalized in DPIGMR.
C PROD :IN Double Precision
C The product s1*s2*...*sl = the product of the sines of the
C givens rotations used in the QR factorization of
C the hessenberg matrix HES.
C R0NRM :IN Double Precision
C The scaled norm of initial residual R0.
C HES :IN Double Precision HES(MAXLP1,MAXL)
C The upper triangular factor of the QR decomposition
C of the (LGMR+1) by LGMR upper Hessenberg matrix whose
C entries are the scaled inner-products of A*V(*,I)
C and V(*,K).
C JPRE :IN Integer
C Preconditioner type flag.
C
C *Description
C When using the GMRES solver, the preferred value for ITOL
C is 0. This is due to the fact that when ITOL=0 the norm of
C the residual required in the stopping test is obtained for
C free, since this value is already calculated in the GMRES
C algorithm. The variable RNRM contains the appropriate
C norm, which is equal to norm(SB*(RL - A*XL)) when right or
C no preconditioning is being performed, and equal to
C norm(SB*Minv*(RL - A*XL)) when using left preconditioning.
C Here, norm() is the Euclidean norm. Nonzero values of ITOL
C require additional work to calculate the actual scaled
C residual or its scaled/preconditioned form, and/or the
C approximate solution XL. Hence, these values of ITOL will
C not be as efficient as ITOL=0.
C
C***ROUTINES CALLED MSOLVE, DNRM2, DCOPY,
C***END PROLOG ISDGMR
IMPLICIT DOUBLE PRECISION(A-H,O-Z)
INTEGER KMP, LGMR, MAXL, MAXLP1, JPRE, NMSL
DOUBLE PRECISION DXNRM, RNRM, R0NRM, SNORMW, SOLNRM, PROD
DOUBLE PRECISION B(*), X(*), IA(*), JA(*), A(*), R(*), Z(*), DZ(*)
DOUBLE PRECISION RWORK(*), IWORK(*), SB(*), SX(*), Q(*), V(N,*)
DOUBLE PRECISION HES(MAXLP1,MAXL), XL(*)
EXTERNAL MSOLVE
COMMON /SOLBLK/ SOLN(1)
SAVE SOLNRM
C
C***FIRST EXECUTABLE STATEMENT ISDGMR
ISDGMR = 0
IF ( ITOL.EQ.0 ) THEN
C
C Use input from DPIGMR to determine if stop conditions are met.
C
ERR = RNRM/BNRM
ENDIF
IF ( (ITOL.GT.0) .AND. (ITOL.LE.3) ) THEN
C
C Use DRLCAL to calculate the scaled residual vector.
C Store answer in R.
C
IF ( LGMR.NE.0 ) CALL DRLCAL(N, KMP, LGMR, MAXL, V, Q, R,
$ SNORMW, PROD, R0NRM)
IF ( ITOL.LE.2 ) THEN
C err = ||Residual||/||RightHandSide||(2-Norms).
ERR = DNRM2(N, R, 1)/BNRM
C
C Unscale R by R0NRM*PROD when KMP < MAXL.
C
IF ( (KMP.LT.MAXL) .AND. (LGMR.NE.0) ) THEN
TEM = 1.0D0/(R0NRM*PROD)
CALL DSCAL(N, TEM, R, 1)
ENDIF
ELSEIF ( ITOL.EQ.3 ) THEN
C err = Max |(Minv*Residual)(i)/x(i)|
C When jpre .lt. 0, r already contains Minv*Residual.
IF ( JPRE.GT.0 ) THEN
CALL MSOLVE(N, R, DZ, NELT, IA, JA, A, ISYM, RWORK,
$ IWORK)
NMSL = NMSL + 1
ENDIF
C
C Unscale R by R0NRM*PROD when KMP < MAXL.
C
IF ( (KMP.LT.MAXL) .AND. (LGMR.NE.0) ) THEN
TEM = 1.0D0/(R0NRM*PROD)
CALL DSCAL(N, TEM, R, 1)
ENDIF
C
FUZZ = D1MACH(1)
IELMAX = 1
RATMAX = ABS(DZ(1))/MAX(ABS(X(1)),FUZZ)
DO 25 I = 2, N
RAT = ABS(DZ(I))/MAX(ABS(X(I)),FUZZ)
IF( RAT.GT.RATMAX ) THEN
IELMAX = I
RATMAX = RAT
ENDIF
25 CONTINUE
ERR = RATMAX
IF( RATMAX.LE.TOL ) ISDGMR = 1
IF( IUNIT.GT.0 ) WRITE(IUNIT,1020) ITER, IELMAX, RATMAX
RETURN
ENDIF
ENDIF
IF ( ITOL.EQ.11 ) THEN
C
C Use DXLCAL to calculate the approximate solution XL.
C
IF ( (LGMR.NE.0) .AND. (ITER.GT.0) ) THEN
CALL DXLCAL(N, LGMR, X, XL, XL, HES, MAXLP1, Q, V, R0NRM,
$ DZ, SX, JSCAL, JPRE, MSOLVE, NMSL, RWORK, IWORK,
$ NELT, IA, JA, A, ISYM)
ELSEIF ( ITER.EQ.0 ) THEN
C Copy X to XL to check if initial guess is good enough.
CALL DCOPY(N, X, 1, XL, 1)
ELSE
C Return since this is the first call to DPIGMR on a restart.
RETURN
ENDIF
C
IF ((JSCAL .EQ. 0) .OR.(JSCAL .EQ. 2)) THEN
C err = ||x-TrueSolution||/||TrueSolution||(2-Norms).
IF ( ITER.EQ.0 ) SOLNRM = DNRM2(N, SOLN, 1)
DO 30 I = 1, N
DZ(I) = XL(I) - SOLN(I)
30 CONTINUE
ERR = DNRM2(N, DZ, 1)/SOLNRM
ELSE
IF (ITER .EQ. 0) THEN
SOLNRM = 0.D0
DO 40 I = 1,N
SOLNRM = SOLNRM + (SX(I)*SOLN(I))**2
40 CONTINUE
SOLNRM = DSQRT(SOLNRM)
ENDIF
DXNRM = 0.D0
DO 50 I = 1,N
DXNRM = DXNRM + (SX(I)*(XL(I)-SOLN(I)))**2
50 CONTINUE
DXNRM = DSQRT(DXNRM)
C err = ||SX*(x-TrueSolution)||/||SX*TrueSolution|| (2-Norms).
ERR = DXNRM/SOLNRM
ENDIF
ENDIF
C
IF( IUNIT.NE.0 ) THEN
IF( ITER.EQ.0 ) THEN
WRITE(IUNIT,1000) N, ITOL, MAXL, KMP
ENDIF
WRITE(IUNIT,1010) ITER, RNRM/BNRM, ERR
ENDIF
IF ( ERR.LE.TOL ) ISDGMR = 1
C
RETURN
1000 FORMAT(' Generalized Minimum Residual(',I3,I3,') for ',
$ 'N, ITOL = ',I5, I5,
$ /' ITER',' Natral Err Est',' Error Estimate')
1010 FORMAT(1X,I4,1X,E16.7,1X,E16.7)
1020 FORMAT(1X,' ITER = ',I5, ' IELMAX = ',I5,
$ ' |R(IELMAX)/X(IELMAX)| = ',E12.5)
C------------- LAST LINE OF ISDGMR FOLLOWS ----------------------------
END
|
{"hexsha": "b159b60d497a9d0e2039e2d11ae407d924bf626e", "size": 118170, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "models/glc/cism/glimmer-cism/libglimmer-solve/SLAP/dgmres.f", "max_stars_repo_name": "fmyuan/clm-microbe", "max_stars_repo_head_hexsha": "9faee9ed7d6c092c4a9e4a207f32cbffab78b85c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-03-12T01:58:26.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-16T03:08:25.000Z", "max_issues_repo_path": "models/glc/cism/glimmer-cism/libglimmer-solve/SLAP/dgmres.f", "max_issues_repo_name": "fmyuan/clm-microbe", "max_issues_repo_head_hexsha": "9faee9ed7d6c092c4a9e4a207f32cbffab78b85c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-21T01:51:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-21T01:51:13.000Z", "max_forks_repo_path": "models/glc/cism/glimmer-cism/libglimmer-solve/SLAP/dgmres.f", "max_forks_repo_name": "email-clm/CLM-Microbe", "max_forks_repo_head_hexsha": "711c87faec2c1bfe2cea1a7ebd07e4373e82a184", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2016-03-08T21:04:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-16T03:29:35.000Z", "avg_line_length": 44.2252994012, "max_line_length": 72, "alphanum_fraction": 0.5658119658, "num_tokens": 35483}
|
import pandas as pd
import numpy as np
from sklearn.model_selection import GridSearchCV
class modelSelection:
def __init__(self, models, params):
if not set(models.keys()).issubset(set(params.keys())):
missing_params = list(set(models.keys()) - set(params.keys()))
raise ValueError("Some estimators are missing parameters: %s" % missing_params)
self.models = models
self.params = params
self.keys = models.keys()
self.grid_searches = {}
self.best_params = {}
self.best_model = {}
def fit(self, X, y, cv=5, n_jobs=-1, verbose=1, scoring=None, refit=False):
for key in self.keys:
print("Running GridSearchCV for %s." % key)
model = self.models[key]
params = self.params[key]
gs = GridSearchCV(model, params, cv=cv, n_jobs=n_jobs,
verbose=verbose, scoring=scoring, refit=refit,
return_train_score=True)
gs.fit(X,y)
self.grid_searches[key] = gs
self.best_params[key] = gs.best_params_
def fit_all(self, X, y):
for key in self.keys:
print("Fitting all data for %s." % key)
model = self.models[key]
params = self.best_params[key]
model.set_params(**params)
model.fit(X,y)
self.best_model[key] = model
def score_summary(self, sort_by='mean_score'):
def row(key, scores, params):
d = {
'estimator': key,
'min_score': min(scores),
'max_score': max(scores),
'mean_score': np.mean(scores),
'std_score': np.std(scores),
}
return pd.Series({**params,**d})
rows = []
for k in self.grid_searches:
print(k)
params = self.grid_searches[k].cv_results_['params']
scores = []
for i in range(self.grid_searches[k].cv):
key = "split{}_test_score".format(i)
r = self.grid_searches[k].cv_results_[key]
scores.append(r.reshape(len(params),1))
all_scores = np.hstack(scores)
for p, s in zip(params,all_scores):
rows.append((row(k, s, p)))
df = pd.concat(rows, axis=1).T.sort_values([sort_by], ascending=False)
columns = ['estimator', 'min_score', 'mean_score', 'max_score', 'std_score']
columns = columns + [c for c in df.columns if c not in columns]
return df[columns]
|
{"hexsha": "d0cb214e861d0e7c72e5c382f774b6ca9a76f0c9", "size": 2615, "ext": "py", "lang": "Python", "max_stars_repo_path": "ml_libs/model_selection.py", "max_stars_repo_name": "SamTube405/MCAS", "max_stars_repo_head_hexsha": "bc6500ad509f798fa3d60f7c3f436e6b30eda5f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ml_libs/model_selection.py", "max_issues_repo_name": "SamTube405/MCAS", "max_issues_repo_head_hexsha": "bc6500ad509f798fa3d60f7c3f436e6b30eda5f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ml_libs/model_selection.py", "max_forks_repo_name": "SamTube405/MCAS", "max_forks_repo_head_hexsha": "bc6500ad509f798fa3d60f7c3f436e6b30eda5f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3571428571, "max_line_length": 91, "alphanum_fraction": 0.5411089866, "include": true, "reason": "import numpy", "num_tokens": 581}
|
from setuptools import setup
from setuptools import Extension
import numpy as np
import os
from Cython.Build import cythonize
sourcefiles = ['gmmmc/fastgmm/fast_likelihood.pyx']
ext_modules = [Extension("fast_likelihood",
sourcefiles,
include_dirs = [np.get_include()],
extra_compile_args=['-O3', '-fopenmp', '-lc++'],
extra_link_args=['-fopenmp'],
language='c++')]
setup(
name='gmmmc',
version='0.2.3.4',
packages=['gmmmc', 'gmmmc.priors', 'gmmmc.fastgmm', 'gmmmc.proposals'],
url='http://github.com/jeremy-ma/gmmmc',
license='',
author='Jeremy Ma',
author_email='jeremy.ma@student.unsw.edu.au',
description='Functions for drawing Monte Carlo samples from GMM parameter space',
download_url='https://github.com/jeremy-ma/gmmmc/tarball/0.2',
keywords = ['gmm', 'monte carlo', 'speech'],
ext_modules= cythonize(ext_modules),
install_requires=[
'numpy',
'scipy',
'scikit-learn',
'Cython'
]
)
|
{"hexsha": "0658e0552ad62024207a5ef200283e241ecebc62", "size": 1120, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "jeremy-ma/gmmmc", "max_stars_repo_head_hexsha": "fe2c58d5263e78ed360a84fcb85a5d6e08da0d70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "setup.py", "max_issues_repo_name": "jeremy-ma/gmmmc", "max_issues_repo_head_hexsha": "fe2c58d5263e78ed360a84fcb85a5d6e08da0d70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-12T16:34:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-12T16:34:04.000Z", "max_forks_repo_path": "setup.py", "max_forks_repo_name": "jeremy-ma/gmmmc", "max_forks_repo_head_hexsha": "fe2c58d5263e78ed360a84fcb85a5d6e08da0d70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2016-11-07T06:38:31.000Z", "max_forks_repo_forks_event_max_datetime": "2016-11-07T06:38:31.000Z", "avg_line_length": 32.9411764706, "max_line_length": 85, "alphanum_fraction": 0.5866071429, "include": true, "reason": "import numpy", "num_tokens": 280}
|
[STATEMENT]
lemma r01_binary_expression_ex1:
assumes "0 < r" "r < 1"
shows "\<exists>i. r01_binary_expansion' r i = 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>i. r01_binary_expansion' r i = 1
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<nexists>i. r01_binary_expansion' r i = 1 \<Longrightarrow> False
[PROOF STEP]
assume "\<not> (\<exists>i. r01_binary_expansion' r i = 1)"
[PROOF STATE]
proof (state)
this:
\<nexists>i. r01_binary_expansion' r i = 1
goal (1 subgoal):
1. \<nexists>i. r01_binary_expansion' r i = 1 \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<nexists>i. r01_binary_expansion' r i = 1
[PROOF STEP]
have "\<And>i. r01_binary_expansion' r i = 0"
[PROOF STATE]
proof (prove)
using this:
\<nexists>i. r01_binary_expansion' r i = 1
goal (1 subgoal):
1. \<And>i. r01_binary_expansion' r i = 0
[PROOF STEP]
using real01_binary_expansion'_0or1[of r]
[PROOF STATE]
proof (prove)
using this:
\<nexists>i. r01_binary_expansion' r i = 1
r01_binary_expansion' r ?n \<in> {0, 1}
goal (1 subgoal):
1. \<And>i. r01_binary_expansion' r i = 0
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
r01_binary_expansion' r ?i = 0
goal (1 subgoal):
1. \<nexists>i. r01_binary_expansion' r i = 1 \<Longrightarrow> False
[PROOF STEP]
hence 1:"r01_binary_expression r = (\<lambda>n. \<Sum>i=0..n. 0)"
[PROOF STATE]
proof (prove)
using this:
r01_binary_expansion' r ?i = 0
goal (1 subgoal):
1. r01_binary_expression r = (\<lambda>n. \<Sum>i = 0..n. 0)
[PROOF STEP]
by(auto simp add: r01_binary_expression_def r01_binary_sum_def)
[PROOF STATE]
proof (state)
this:
r01_binary_expression r = (\<lambda>n. \<Sum>i = 0..n. 0)
goal (1 subgoal):
1. \<nexists>i. r01_binary_expansion' r i = 1 \<Longrightarrow> False
[PROOF STEP]
hence "LIMSEQ (r01_binary_expression r) 0"
[PROOF STATE]
proof (prove)
using this:
r01_binary_expression r = (\<lambda>n. \<Sum>i = 0..n. 0)
goal (1 subgoal):
1. r01_binary_expression r \<longlonglongrightarrow> 0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
r01_binary_expression r \<longlonglongrightarrow> 0
goal (1 subgoal):
1. \<nexists>i. r01_binary_expansion' r i = 1 \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
r01_binary_expression r \<longlonglongrightarrow> 0
goal (1 subgoal):
1. \<nexists>i. r01_binary_expansion' r i = 1 \<Longrightarrow> False
[PROOF STEP]
have "LIMSEQ (r01_binary_expression r) r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r01_binary_expression r \<longlonglongrightarrow> r
[PROOF STEP]
using r01_binary_expression_converges_to_r[of r] assms
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>0 < r; r < 1\<rbrakk> \<Longrightarrow> r01_binary_expression r \<longlonglongrightarrow> r
0 < r
r < 1
goal (1 subgoal):
1. r01_binary_expression r \<longlonglongrightarrow> r
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
r01_binary_expression r \<longlonglongrightarrow> r
goal (1 subgoal):
1. \<nexists>i. r01_binary_expansion' r i = 1 \<Longrightarrow> False
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
r01_binary_expression r \<longlonglongrightarrow> 0
r01_binary_expression r \<longlonglongrightarrow> r
[PROOF STEP]
have "r = 0"
[PROOF STATE]
proof (prove)
using this:
r01_binary_expression r \<longlonglongrightarrow> 0
r01_binary_expression r \<longlonglongrightarrow> r
goal (1 subgoal):
1. r = 0
[PROOF STEP]
using LIMSEQ_unique
[PROOF STATE]
proof (prove)
using this:
r01_binary_expression r \<longlonglongrightarrow> 0
r01_binary_expression r \<longlonglongrightarrow> r
\<lbrakk>?X \<longlonglongrightarrow> ?a; ?X \<longlonglongrightarrow> ?b\<rbrakk> \<Longrightarrow> ?a = ?b
goal (1 subgoal):
1. r = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
r = 0
goal (1 subgoal):
1. \<nexists>i. r01_binary_expansion' r i = 1 \<Longrightarrow> False
[PROOF STEP]
thus False
[PROOF STATE]
proof (prove)
using this:
r = 0
goal (1 subgoal):
1. False
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
r = 0
0 < r
r < 1
goal (1 subgoal):
1. False
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1827, "file": "Quasi_Borel_Spaces_StandardBorel", "length": 22}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
import numpy as np
from tvm.contrib.hexagon.build import HexagonLauncher
from .conftest import requires_hexagon_toolchain
# use pytest -sv to observe gtest output
# use --gtest_args to pass arguments to gtest
# for example to run all "foo" tests twice and observe gtest output run
# pytest -sv <this file> --gtests_args="--gtest_filter=*foo* --gtest_repeat=2"
@requires_hexagon_toolchain
@pytest.mark.skipif(
os.environ.get("HEXAGON_GTEST") == None,
reason="Test requires environment variable HEXAGON_GTEST set with a path to a Hexagon gtest version normally located at /path/to/hexagon/sdk/utils/googletest/gtest",
)
def test_run_unit_tests(hexagon_session, gtest_args):
try:
func = hexagon_session._rpc.get_function("hexagon.run_unit_tests")
except:
print(
"Test requires TVM Runtime to be built with a Hexagon gtest version using Hexagon API cmake flag -DUSE_HEXAGON_GTEST=${HEXAGON_GTEST}"
)
raise
gtest_error_code_and_output = func(gtest_args)
gtest_error_code = int(gtest_error_code_and_output.splitlines()[0])
gtest_output = gtest_error_code_and_output.split("\n", 1)[-1]
print(gtest_output)
np.testing.assert_equal(gtest_error_code, 0)
|
{"hexsha": "3a383d30e5f4bc184854dcc81164eb47316c0826", "size": 2037, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/python/contrib/test_hexagon/test_run_unit_tests.py", "max_stars_repo_name": "LEA0317/incubator-tvm", "max_stars_repo_head_hexsha": "de21c8f2ef507587fdcc99b851404de5aeeb5a16", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/python/contrib/test_hexagon/test_run_unit_tests.py", "max_issues_repo_name": "LEA0317/incubator-tvm", "max_issues_repo_head_hexsha": "de21c8f2ef507587fdcc99b851404de5aeeb5a16", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/python/contrib/test_hexagon/test_run_unit_tests.py", "max_forks_repo_name": "LEA0317/incubator-tvm", "max_forks_repo_head_hexsha": "de21c8f2ef507587fdcc99b851404de5aeeb5a16", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.4375, "max_line_length": 169, "alphanum_fraction": 0.7604320079, "include": true, "reason": "import numpy", "num_tokens": 491}
|
"""Information Retrieval metrics
Useful Resources:
http://www.cs.utexas.edu/~mooney/ir-course/slides/Evaluation.ppt
http://www.nii.ac.jp/TechReports/05-014E.pdf
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
http://hal.archives-ouvertes.fr/docs/00/72/67/60/PDF/07-busa-fekete.pdf
Learning to Rank for Information Retrieval (Tie-Yan Liu)
"""
import numpy as np
def mean_reciprocal_rank(rs):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.61111111111111105
>>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> mean_reciprocal_rank(rs)
0.5
>>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.75
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
rs = (np.asarray(r).nonzero()[0] for r in rs)
return np.mean([1. / (r[0] + 1) if r.size else 0. for r in rs])
def r_precision(r):
"""Score is precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> r_precision(r)
0.33333333333333331
>>> r = [0, 1, 0]
>>> r_precision(r)
0.5
>>> r = [1, 0, 0]
>>> r_precision(r)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
R Precision
"""
r = np.asarray(r) != 0
z = r.nonzero()[0]
if not z.size:
return 0.
return np.mean(r[:z[-1] + 1])
def precision_at_k(r, k):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> precision_at_k(r, 1)
0.0
>>> precision_at_k(r, 2)
0.0
>>> precision_at_k(r, 3)
0.33333333333333331
>>> precision_at_k(r, 4)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: Relevance score length < k
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
return np.mean(r)
def average_precision(r):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
>>> r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]
>>> delta_r = 1. / sum(r)
>>> sum([sum(r[:x + 1]) / (x + 1.) * delta_r for x, y in enumerate(r) if y])
0.7833333333333333
>>> average_precision(r)
0.78333333333333333
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = np.asarray(r) != 0
out = [precision_at_k(r, k + 1) for k in range(r.size) if r[k]]
if not out:
return 0.
return np.mean(out)
def mean_average_precision(rs):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1]]
>>> mean_average_precision(rs)
0.78333333333333333
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1], [0]]
>>> mean_average_precision(rs)
0.39166666666666666
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean average precision
"""
return np.mean([average_precision(r) for r in rs])
def dcg_at_k(r, k, method=0):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
There is a typographical error on the formula referenced in the original definition of this function:
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
log2(i) should be log2(i+1)
The formulas here are derived from
https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Discounted_Cumulative_Gain
The formulas return the same results when r contains only binary values
>>> r = [3, 2, 3, 0, 1, 2]
>>> dcg_at_k(r, 1)
3.0
>>> dcg_at_k(r, 1, method=1)
7.0
>>> dcg_at_k(r, 2)
4.2618595071429155
>>> dcg_at_k(r, 2, method=1)
8.8927892607143733
>>> dcg_at_k(r, 6)
6.8611266885935018
>>> dcg_at_k(r, 6, method=1)
13.848263629272981
Args:
r: Relevance scores (list or numpy array) in rank order
(first element is the most relevant item)
k: Number of results to consider
method: If 0 then sum rel_i / log2(i + 1) [not log2(i)]
If 1 then sum (2^rel_i - 1) / log2(i + 1)
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
elif method == 1:
return np.sum(np.subtract(np.power(2, r), 1) / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
"""Score is normalized discounted cumulative gain (ndcg)
Relevance is positive real values. Can use binary
as the previous methods.
>>> r = [3, 2, 3, 0, 1, 2]
>>> ndcg_at_k(r, 1)
1.0
>>> ndcg_at_k(r, 1, method=1)
1.0
>>> ndcg_at_k(r, 2)
0.87104906425515294
>>> ndcg_at_k(r, 2, method=1)
0.77894125300883343
>>> ndcg_at_k(r, 6)
0.96080819433606168
>>> ndcg_at_k(r, 6, method=1)
0.94881074856789849
Args:
r: Relevance scores (list or numpy array) in rank order
(first element is the most relevant item)
k: Number of results to consider
method: If 0 then sum rel_i / log2(i + 1) [not log2(i)]
If 1 then sum (2^rel_i - 1) / log2(i + 1)
Returns:
Normalized discounted cumulative gain
"""
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{"hexsha": "f130e393d95c7597cb84ec5c5fd03d2bf8a6c3f9", "size": 6532, "ext": "py", "lang": "Python", "max_stars_repo_path": "ninjia/preprocess/metrics/rank_metrics.py", "max_stars_repo_name": "taohu88/ninjia", "max_stars_repo_head_hexsha": "43e68534aa3a446b237c5dce757c02b41b2e923b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ninjia/preprocess/metrics/rank_metrics.py", "max_issues_repo_name": "taohu88/ninjia", "max_issues_repo_head_hexsha": "43e68534aa3a446b237c5dce757c02b41b2e923b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ninjia/preprocess/metrics/rank_metrics.py", "max_forks_repo_name": "taohu88/ninjia", "max_forks_repo_head_hexsha": "43e68534aa3a446b237c5dce757c02b41b2e923b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1037344398, "max_line_length": 105, "alphanum_fraction": 0.5892529088, "include": true, "reason": "import numpy", "num_tokens": 2096}
|
# coding: utf-8
import sys, os
sys.path.append(os.pardir)
import pickle
import numpy as np
from collections import OrderedDict
from common.layers import *
from common.gradient import numerical_gradient
from common.util import *
def he_stdev(node_num):
return np.sqrt(2)/np.sqrt(node_num)
class ConvNet:
"""ConvNet class
< Model structure >
conv - relu - pool -
conv - relu - pool -
conv - relu - pool -
affine - softmax
Parameters
----------
input_size : 784 (like MNIST)
hidden_size_list : number of hidden neurans(e.g. [100, 100, 100])
output_size : classes
activation : 'relu' or 'sigmoid'
weight_init_std : specifies the standard deviation of the weight (eg 0.01)
"""
def __init__(self, input_dim=(3, 28, 28),
conv_param={'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},
pool_size=2,
hidden_size=100, output_size=10, weight_init_std=0.01):
print("\n***** Network *****")
print(" Input : "+ str(input_dim))
print(" Output : (1, "+ str(output_size)+")\n")
# extract patameter from conv_param dictionary
filter_num = conv_param['filter_num']
filter_size = conv_param['filter_size']
filter_pad = conv_param['pad'] # padding
filter_stride = conv_param['stride']
input_size = input_dim[1]
# output_height = ((input_size - filter_size + 2*filter_pad) / filter_stride) + 1
# deep learning from scratch p.234
# Layer1
layer1_filter_num = filter_num*1
conv1_output_size = int(conv_out_size(input_size=input_size, filter_size=filter_size, filter_pad=filter_pad, filter_stride=filter_stride))
pool1_output_size = int(pool_out_size(conv_output_size=conv1_output_size, filter_pad=filter_pad, pool_size=pool_size))
# Layer2
layer2_filter_num = filter_num*2
conv2_output_size = int(conv_out_size(input_size=pool1_output_size, filter_size=filter_size, filter_pad=filter_pad, filter_stride=filter_stride))
pool2_output_size = int(pool_out_size(conv_output_size=conv2_output_size, filter_pad=filter_pad, pool_size=pool_size))
# Layer3
layer3_filter_num = filter_num*1
conv3_output_size = int(conv_out_size(input_size=pool2_output_size, filter_size=filter_size, filter_pad=filter_pad, filter_stride=filter_stride))
pool3_output_size = int(pool_out_size(conv_output_size=conv3_output_size, filter_pad=filter_pad, pool_size=pool_size))
# Laye4
layer4_input_size = int(layer3_filter_num * pool3_output_size * pool3_output_size)
# weight initialize with normal distribution
self.params = {}
# Layer1
# layer1_filter_num: number of weight filter (output tensor)
# input_dim[0]: channel (input tensor)
# filter_size: weight width and height
print(" Layer1: conv - relu - pool")
print(" filter: %d x %d | input_dim: %d | output_dim: %d" %(filter_size, filter_size, input_dim[0], layer1_filter_num))
self.params['W1'] = np.random.randn(layer1_filter_num, input_dim[0], filter_size, filter_size) * he_stdev(input_dim[0])
self.params['b1'] = np.zeros(layer1_filter_num)
# Layer2
# layer2_filter_num: number of weight filter (output tensor)
# layer1_filter_num: input tensor
# filter_size: weight width and height
print("\n Layer2: conv - relu - pool")
print(" filter: %d x %d | input_dim: %d | output_dim: %d" %(filter_size, filter_size, layer1_filter_num, layer2_filter_num))
self.params['W2'] = np.random.randn(layer2_filter_num, layer1_filter_num, filter_size, filter_size) * he_stdev(layer1_filter_num)
self.params['b2'] = np.zeros(layer2_filter_num)
# Layer3
# layer3_filter_num: number of weight filter (output tensor)
# layer2_filter_num: input tensor
# filter_size: weight width and height
print("\n Layer3: conv - relu - pool")
print(" filter: %d x %d | input_dim: %d | output_dim: %d" %(filter_size, filter_size, layer2_filter_num, layer3_filter_num))
self.params['W3'] = np.random.randn(layer3_filter_num, layer2_filter_num, filter_size, filter_size) * he_stdev(layer2_filter_num)
self.params['b3'] = np.zeros(layer3_filter_num)
# Layer4
# layer4_input_size: fully connected size (input matrix)
# output_size: output (output matrix)
print("\n Layer4: affine")
print(" input: %d | output: %d" %(layer4_input_size, output_size))
self.params['W4'] = weight_init_std * np.random.randn(layer4_input_size, output_size)
self.params['b4'] = np.zeros(output_size)
# Last layer is softmax
print("\n Last layer: softmax")
# define layers
# Layer1
self.layers = OrderedDict()
self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'], conv_param['stride'], conv_param['pad'])
self.layers['Relu1'] = Relu()
self.layers['Pool1'] = Pooling(pool_h=pool_size, pool_w=pool_size, stride=2)
# Layer2
self.layers['Conv2'] = Convolution(self.params['W2'], self.params['b2'], conv_param['stride'], conv_param['pad'])
self.layers['Relu2'] = Relu()
self.layers['Pool2'] = Pooling(pool_h=pool_size, pool_w=pool_size, stride=2)
# Layer3
self.layers['Conv3'] = Convolution(self.params['W3'], self.params['b3'], conv_param['stride'], conv_param['pad'])
self.layers['Relu3'] = Relu()
self.layers['Pool3'] = Pooling(pool_h=pool_size, pool_w=pool_size, stride=2)
# Layer4
self.layers['Affine1'] = Affine(self.params['W4'], self.params['b4'])
# Last Layer
self.last_layer = SoftmaxWithLoss()
def predict(self, x): # It used for predict loss and accuracy.
for layer in self.layers.values():
#print("Input size: "+str(x.shape)+" | "+str(x.shape))
x = layer.forward(x)
return x
# Compute loss
# x : input data
# t : input label
def loss(self, x, t):
y = self.predict(x)
return self.last_layer.forward(y, t)
# Compute accuracy
# x : input data
# t : input label
# batch_size : default value is 100
def accuracy(self, x, t, batch_size=100):
if t.ndim != 1 : t = np.argmax(t, axis=1)
acc = 0.0
for i in range(int(x.shape[0] / batch_size)):
tx = x[i*batch_size:(i+1)*batch_size]
tt = t[i*batch_size:(i+1)*batch_size]
y = self.predict(tx)
y = np.argmax(y, axis=1)
acc += np.sum(y == tt)
return acc / x.shape[0]
# Find Weight and Bias with Numerical differentiation method
# x : input data
# t : input label
def numerical_gradient(self, x, t):
loss_w = lambda w: self.loss(x, t)
grads = {}
for idx in (1, 2, 3):
grads['W' + str(idx)] = numerical_gradient(loss_w, self.params['W' + str(idx)])
grads['b' + str(idx)] = numerical_gradient(loss_w, self.params['b' + str(idx)])
return grads
# Find Weight and Bias with back propagation method
# x : input data
# t : input label
def gradient(self, x, t):
# forward
self.loss(x, t)
# backward
# last_layer: Softmax layer with loss
dout = 1
dout = self.last_layer.backward(dout)
layers = list(self.layers.values()) # return dictionary keywords
layers.reverse() # list upside down
# backward method compute each layer's Weight and Bias - dW, db
for layer in layers:
dout = layer.backward(dout)
# set gradients
grads = {}
# Layer1
grads['W1'] = self.layers['Conv1'].dW
grads['b1'] = self.layers['Conv1'].db
# Layer2
grads['W2'] = self.layers['Conv2'].dW
grads['b2'] = self.layers['Conv2'].db
# Layer3
grads['W3'] = self.layers['Conv3'].dW
grads['b3'] = self.layers['Conv3'].db
# Layer4
grads['W4'] = self.layers['Affine1'].dW
grads['b4'] = self.layers['Affine1'].db
return grads
def save_params(self, file_name="params.pkl"):
# make dictionary
# params is not self.params
params = {}
# extract key and values from self.params and initialize dictionary
# .values : return key only
# .items(): return key and value
for key, val in self.params.items():
params[key] = val
# save dictionary to file
with open(file_name, 'wb') as f:
pickle.dump(params, f)
def load_params(self, file_name="params.pkl"):
# load dictionary to file
with open(file_name, 'rb') as f:
params = pickle.load(f)
# extract key and values from params and set to self.params
# .values : return key only
# .items(): return key and value
for key, val in params.items():
self.params[key] = val
# initialize layers with Weight and Bias
# ['Conv1', 'Conv2', 'Affine1'] are not in pkl file
for i, key in enumerate(['Conv1', 'Conv2', 'Conv3', 'Affine1']):
self.layers[key].W = self.params['W' + str(i+1)]
self.layers[key].b = self.params['b' + str(i+1)]
|
{"hexsha": "5a6104c2633437c2cb141d711bc611dcddca702d", "size": 9402, "ext": "py", "lang": "Python", "max_stars_repo_path": "LEGACY/custom_convnet.py", "max_stars_repo_name": "YeongHyeon/Convolution_Neural_Network", "max_stars_repo_head_hexsha": "11b75f011078c741a5ba1b935c3ede1397b7b46d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 81, "max_stars_repo_stars_event_min_datetime": "2017-06-01T05:01:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-10T22:52:48.000Z", "max_issues_repo_path": "LEGACY/custom_convnet.py", "max_issues_repo_name": "YeongHyeon/Convolution_Neural_Network", "max_issues_repo_head_hexsha": "11b75f011078c741a5ba1b935c3ede1397b7b46d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-02-02T12:16:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-03T08:48:54.000Z", "max_forks_repo_path": "LEGACY/custom_convnet.py", "max_forks_repo_name": "YeongHyeon/Convolution_Neural_Network", "max_forks_repo_head_hexsha": "11b75f011078c741a5ba1b935c3ede1397b7b46d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2017-06-02T08:15:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-16T03:10:27.000Z", "avg_line_length": 40.0085106383, "max_line_length": 153, "alphanum_fraction": 0.6193363114, "include": true, "reason": "import numpy", "num_tokens": 2439}
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import os.path as osp
from glob import glob
import re
import argparse
import collections
import tensorflow.compat.v1 as tf
#import tensorflow as tf
import cv2
import json
import numpy as np
import datetime
import tflite_runtime.interpreter as tflite
CURRENT_DIR = osp.dirname(__file__)
sys.path.append(osp.join(CURRENT_DIR, '..'))
from utils.misc_utils import sort_nicely
Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])
def get_center(x):
return (x - 1.) / 2.
def convert_bbox_format(bbox, to):
x, y, target_width, target_height = bbox.x, bbox.y, bbox.width, bbox.height
if to == 'top-left-based':
x -= get_center(target_width)
y -= get_center(target_height)
elif to == 'center-based':
y += get_center(target_height)
x += get_center(target_width)
else:
raise ValueError("Bbox format: {} was not recognized".format(to))
return Rectangle(x, y, target_width, target_height)
class TargetState(object):
"""Represent the target state."""
def __init__(self, bbox, search_pos, scale_idx):
self.bbox = bbox # (cx, cy, w, h) in the original image
self.search_pos = search_pos # target center position in the search image
self.scale_idx = scale_idx # scale index in the searched scales
class SiameseTracking():
def __init__(self, config_filepath, separate_mode, whole_model, template_model, search_model, cross_model):
self.separate_mode = separate_mode
self.whole_lite = False
self.template_image_quant = False
self.search_image_quant = False
self.num_scales = 1
# whole model
if not separate_mode:
if ".pb" in whole_model:
print("load frozen graph model")
self.graph = tf.Graph()
with tf.gfile.GFile(whole_model, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
for n in graph_def.node:
if n.op in ('Placeholder'):
if n.name == "input_image":
self.num_scales = n.attr['shape'].shape.dim[0].size
with self.graph.as_default():
tf.import_graph_def(graph_def)
self.graph.finalize()
self.sess = tf.Session(graph = self.graph)
elif ".tflite" in whole_model:
self.whole_lite = True
print("load tensorflow lite model")
experimental_delegates = []
if "full_quant" in whole_model:
self.template_image_quant = True
self.search_image_quant = True
print("this model is fully quantized")
if "edgetpu" in whole_model:
print("this model is for edgetpu")
experimental_delegates.append(tflite.load_delegate('libedgetpu.so.1'))
self.interpreter = tflite.Interpreter(model_path=whole_model, experimental_delegates=experimental_delegates)
self.interpreter.allocate_tensors()
self.lite_input_details = self.interpreter.get_input_details()
self.lite_output_details = self.interpreter.get_output_details()
for input in self.lite_input_details:
if input['name'] == "input_image":
self.num_scales = input['shape'][0]
else:
raise ValueError("unsupported whole mode")
else:
# separate model
experimental_delegates = []
if 'edgetpu' in template_model:
experimental_delegates.append(tflite.load_delegate('libedgetpu.so.1'))
self.interpreter_template = tflite.Interpreter(template_model, experimental_delegates=experimental_delegates)
experimental_delegates = []
if 'edgetpu' in search_model:
experimental_delegates.append(tflite.load_delegate('libedgetpu.so.1'))
self.interpreter_search = tflite.Interpreter(search_model, experimental_delegates=experimental_delegates)
experimental_delegates = []
if 'edgetpu' in cross_model:
experimental_delegates.append(tflite.load_delegate('libedgetpu.so.1'))
self.interpreter_cross = tflite.Interpreter(cross_model, experimental_delegates=experimental_delegates)
self.interpreter_template.allocate_tensors()
self.interpreter_search.allocate_tensors()
self.interpreter_cross.allocate_tensors()
self.template_input_details = self.interpreter_template.get_input_details()
self.template_output_details = self.interpreter_template.get_output_details()
self.search_input_details = self.interpreter_search.get_input_details()
self.search_output_details = self.interpreter_search.get_output_details()
self.cross_input_details = self.interpreter_cross.get_input_details()
self.cross_output_details = self.interpreter_cross.get_output_details()
self.embed_z_scale = 1.0
self.embed_z_offset = 0.0
if 'full_quant' in template_model:
self.embed_z_scale = self.search_output_details[0]['quantization_parameters']['scales'][0]
self.embed_z_offset = self.search_output_details[0]['quantization_parameters']['zero_points'][0]
self.template_image_quant = True
self.embed_x_scale = 1.0
self.embed_x_offset = 0.0
if 'full_quant' in search_model:
self.embed_x_scale = self.search_output_details[0]['quantization_parameters']['scales'][0]
self.embed_x_offset = self.search_output_details[0]['quantization_parameters']['zero_points'][0]
self.search_image_quant = True
if self.search_input_details[0]['shape'][0] != 1:
raise ValueError("the search input for seperate model should of one batch for any scale tracking")
self.cross_input_template_scale = 1.0
self.cross_input_template_offset = 0.0
self.cross_input_search_scale = 1.0
self.cross_input_search_offset = 0.0
self.cross_output_scale = 1.0
self.cross_output_offset = 0.0
self.num_scales = self.cross_input_details[1]['shape'][0]
self.embed_z = None
self.update_template = False
with open(osp.join(config_filepath, 'model_config.json'), 'r') as f:
self.model_config = json.load(f)
with open(osp.join(config_filepath, 'track_config.json'), 'r') as f:
self.track_config = json.load(f)
self.search_image_size = self.track_config['x_image_size'] # search image size
self.template_image_size = self.model_config['z_image_size'] # template image size
self.search_center = np.array([get_center(self.search_image_size),
get_center(self.search_image_size)])
self.window_influence = self.track_config['window_influence']
self.current_target_state = None
self.window = None # Cosine window
self.original_target_height = 0
self.original_target_width = 0
scales = np.arange(self.num_scales) - get_center(self.num_scales)
assert np.sum(scales) == 0, 'scales should be symmetric'
self.search_factors = [self.track_config['scale_step'] ** x for x in scales]
def update_template_image(self, input_image, target_bbox):
bbox = convert_bbox_format(target_bbox, 'center-based')
self.original_target_height = bbox.height
self.original_target_width = bbox.width
search_images, _ = self.crop_search_image(input_image, bbox)
# Given the fix ratio btween template image (127) and input image (255) => 1:2
top = int(round(self.search_center[1] - get_center(self.template_image_size)))
bottom = int(top + self.template_image_size)
left = int(round(self.search_center[0] - get_center(self.template_image_size)))
right = int(left + self.template_image_size)
template_image = search_images[int(get_center(self.num_scales))][top:bottom, left:right]
# Update the current_target_state
self.current_target_state = TargetState(bbox=bbox,
search_pos=self.search_center,
scale_idx=int(get_center(self.num_scales)))
self.update_template = False
if self.template_image_quant:
return template_image.astype(np.uint8)
else:
return template_image.astype(np.float32)
def crop_search_image(self, input_image, target_bbox):
target_yx = np.array([target_bbox.y, target_bbox.x])
target_size = np.array([target_bbox.height, target_bbox.width])
avg_chan = (np.average(input_image, axis=(0, 1))).astype(np.uint8)
# TODO: to understand the effect of this factor (to much margin from the template image, why_)
#canonical_size = np.sqrt(np.prod(target_size + 0.5 * np.sum(target_size))) # what is this ?
canonical_size = np.sqrt(np.prod(target_size + 0.5 * np.sum(target_size))) # what is this ?
#canonical_size = np.max(target_size) not good for ball, why ?
search_window_size = self.search_image_size / self.template_image_size * canonical_size
search_resize_rate = self.search_image_size / search_window_size
print("search_window_size: {}".format(search_window_size))
search_images = []
search_resize_rates = []
for factor in self.search_factors:
scaled_search_window_size = factor * search_window_size
topleft = (target_yx - get_center(scaled_search_window_size))
bottomright = (target_yx + get_center(scaled_search_window_size))
search_image = np.ones((int(scaled_search_window_size), int(scaled_search_window_size), 3))
for i in range(len(avg_chan)):
search_image[:,:,i] = search_image[:,:,i] * avg_chan[i]
bottomright = bottomright.astype(np.int32)
topleft = topleft.astype(np.int32)
init_x = 0
init_y = 0
if topleft[0] < 0: # top
init_y = -topleft[0]
topleft[0] = 0
print ("top violate")
if topleft[1] < 0: # left
init_x = -topleft[1]
topleft[1] = 0
print ("left violate")
if bottomright[0] >= input_image.shape[0]: # bottom
bottomright[0] = input_image.shape[0] - 1
if bottomright[1] >= input_image.shape[1]: # right
bottomright[1] = input_image.shape[1] - 1
# print ("topleft for factor{}: {}".format(factor, topleft))
# print ("bottomright for factor{}: {}".format(factor, bottomright))
search_image[init_y: init_y + bottomright[0] - topleft[0], init_x: init_x + bottomright[1] - topleft[1],:] = input_image[topleft[0]:bottomright[0], topleft[1]:bottomright[1],:]
search_image = cv2.resize(search_image, (self.search_image_size, self.search_image_size))
if self.search_image_quant:
search_images.append(search_image.astype(np.uint8))
else:
search_images.append(search_image.astype(np.float32))
search_resize_rates.append(search_resize_rate/factor)
return search_images, search_resize_rates
def inference(self, template_image, input_image):
search_images, search_resize_rates = self.crop_search_image(input_image, self.current_target_state.bbox)
#search_images = np.stack([search_images[int(get_center(self.num_scales))] for _ in range(self.num_scales)]) # test
response = None
best_scale_index = 0
if self.separate_mode: # separate
if not self.update_template:
dt1 = datetime.datetime.now()
self.interpreter_template.set_tensor(self.template_input_details[0]['index'], template_image)
self.interpreter_template.invoke()
dt2 = datetime.datetime.now()
print("template inference du: {}".format(dt2.timestamp() - dt1.timestamp()))
self.embed_z = self.embed_z_scale * (self.interpreter_template.get_tensor(self.template_output_details[0]['index']).astype(np.float32) - self.embed_z_offset)
self.update_template = True
embed_x_list = []
for i in range(self.num_scales):
dt1 = datetime.datetime.now()
self.interpreter_search.set_tensor(self.search_input_details[0]['index'], np.expand_dims(search_images[i],0))
self.interpreter_search.invoke()
dt2 = datetime.datetime.now()
print("search inference du for {}th batch: {}".format(i, dt2.timestamp() - dt1.timestamp()))
embed_x_list.append(self.embed_x_scale * (self.interpreter_search.get_tensor(self.search_output_details[0]['index']).astype(np.float32) - self.embed_x_offset))
#print("embed x : {}".format(embed_x))
embed_x = np.concatenate(embed_x_list, 0)
print("embed x : {}".format(embed_x.shape))
if self.cross_input_template_scale != 1 and self.cross_input_search_scale != 1:
self.embed_z = (self.embed_z / self.cross_input_template_scale + self.cross_input_template_offset).astype(np.uint8)
embed_x = (embed_x / self.cross_input_search_scale + self.cross_input_search_offset).astype(np.uint8)
dt1 = datetime.datetime.now()
self.interpreter_cross.set_tensor(self.cross_input_details[0]['index'], self.embed_z)
self.interpreter_cross.set_tensor(self.cross_input_details[1]['index'], embed_x)
self.interpreter_cross.invoke()
dt2 = datetime.datetime.now()
print("cross inference du: {}".format(dt2.timestamp() - dt1.timestamp()))
raw_output_data = self.cross_output_scale * (self.interpreter_cross.get_tensor(self.cross_output_details[0]['index']) - self.cross_output_offset)
# print("cross correlation : {}".format(np.squeeze(raw_output_data[int(get_center(self.num_scales))])))
# post-processing for upsampling the result
response = np.empty((self.num_scales, self.track_config['upsample_factor'] * raw_output_data.shape[1], self.track_config['upsample_factor'] * raw_output_data.shape[1]))
for i in range(self.num_scales):
response[i] = cv2.resize(np.squeeze(raw_output_data, -1)[i], dsize=None, fx=self.track_config['upsample_factor'], fy=self.track_config['upsample_factor'], interpolation=cv2.INTER_CUBIC)
else:
if self.whole_lite == True:
# https://www.tensorflow.org/lite/guide/inference
self.interpreter.set_tensor(self.lite_input_details[0]['index'], template_image)
self.interpreter.set_tensor(self.lite_input_details[1]['index'], search_images)
dt1 = datetime.datetime.now()
self.interpreter.invoke()
dt2 = datetime.datetime.now()
print("inference du: {}".format(dt2.timestamp() - dt1.timestamp()))
#raw_output_data = self.interpreter.get_tensor(self.lite_output_details[0]['index'])[:,1:15,1:15,:]
raw_output_data = self.interpreter.get_tensor(self.lite_output_details[0]['index'])
# print("cross correlation : {}".format(np.squeeze(raw_output_data[int(get_center(self.num_scales))])))
# post-processing for upsampling the result
response = np.empty((self.num_scales, self.track_config['upsample_factor'] * raw_output_data.shape[1], self.track_config['upsample_factor'] * raw_output_data.shape[1]))
for i in range(self.num_scales):
response[i] = cv2.resize(np.squeeze(raw_output_data, -1)[i], dsize=None, fx=self.track_config['upsample_factor'], fy=self.track_config['upsample_factor'], interpolation=cv2.INTER_CUBIC)
else:
output_tensor = self.graph.get_tensor_by_name("import/upsample/final_result:0")
dt1 = datetime.datetime.now()
response = self.sess.run(output_tensor, feed_dict = {"import/template_image:0": template_image, "import/input_image:0": search_images})
dt2 = datetime.datetime.now()
response = np.squeeze(response, -1)
#raise ValueError("test")
print("inference du: {}".format(dt2.timestamp() - dt1.timestamp()))
# Choose the scale whole response map has the highest peak
best_scale = 0
if self.num_scales > 1:
response_max = np.max(response, axis=(1, 2))
penalties = self.track_config['scale_penalty'] * np.ones(self.num_scales)
current_scale_idx = int(get_center(self.num_scales))
penalties[current_scale_idx] = 1.0
response_penalized = response_max * penalties
#print(response_penalized)
best_scale = np.argmax(response_penalized)
print(best_scale)
response = np.squeeze(response[best_scale])
with np.errstate(all='raise'): # Raise error if something goes wrong
response = response - np.min(response)
response = response / np.sum(response)
if self.window is None:
window = np.dot(np.expand_dims(np.hanning(response.shape[1]), 1),
np.expand_dims(np.hanning(response.shape[1]), 0))
self.window = window / np.sum(window) # normalize window
response = (1 - self.window_influence) * response + self.window_influence * self.window
# Find maximum response
r_max, c_max = np.unravel_index(response.argmax(), response.shape)
p_coor = np.array([r_max, c_max])
disp_instance_final = p_coor - get_center(response.shape[1])
upsample_factor = self.track_config['upsample_factor']
disp_instance_feat = disp_instance_final / upsample_factor
# ... Avoid empty position ...
r_radius = int(response.shape[1] / upsample_factor / 2)
disp_instance_feat = np.maximum(np.minimum(disp_instance_feat, r_radius), -r_radius)
# ... in instance input ...
disp_instance_input = disp_instance_feat * self.model_config['embed_config']['stride']
# ... in instance original crop (in frame coordinates)
disp_instance_frame = disp_instance_input / search_resize_rates[best_scale]
# Position within frame in frame coordinates
y = self.current_target_state.bbox.y
x = self.current_target_state.bbox.x
y += disp_instance_frame[0]
x += disp_instance_frame[1]
# Target scale damping and saturation
target_scale = self.current_target_state.bbox.height / self.original_target_height
search_factor = self.search_factors[best_scale]
scale_damp = self.track_config['scale_damp'] # damping factor for scale update
target_scale *= ((1 - scale_damp) * 1.0 + scale_damp * search_factor)
target_scale = np.maximum(0.2, np.minimum(5.0, target_scale)) # heuristic
# Some book keeping
height = self.original_target_height * target_scale
width = self.original_target_width * target_scale
self.current_target_state.bbox = Rectangle(x, y, width, height)
self.current_target_state.scale_idx = best_scale
self.current_target_state.search_pos = self.search_center + disp_instance_input
# normalize reponse
res_max = np.max(response)
res_min = np.min(response)
response = (response - res_min) / (res_max - res_min) * 255
raw_output_max = np.max(np.squeeze(raw_output_data, -1)[best_scale])
raw_output_min = np.min(np.squeeze(raw_output_data, -1)[best_scale])
best_raw_output = (np.squeeze(raw_output_data, -1)[best_scale] - raw_output_min) / (raw_output_max - raw_output_min) * 255
outputs = {'search_image': search_images[best_scale_index], 'response': response, 'current_target_state': self.current_target_state, 'raw_output_data': best_raw_output}
return outputs
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('--models_dir', dest='models_dir', action="store",
default='Logs/SiamFC/track_model_checkpoints/SiamFC-3s-color-pretrained/models', type=str)
parser.add_argument('--config', dest='config_filepath', action="store",
help='the path of tracking config for inference', default='Logs/SiamFC/track_model_checkpoints/SiamFC-3s-color-pretrained', type=str)
parser.add_argument('--images', dest='image_filepath', action="store",
help='the path of iamges to do inference', default='assets/drone', type=str)
parser.add_argument('--headless', dest='headless', action="store_true")
parser.add_argument('--whole_model', dest='whole_model', action="store",
help='the path of inference model for whole sequence', default='whole_model_scale1.pb', type=str)
parser.add_argument('--search_model', dest='search_model', action="store",
default='search_image_feature_extractor_full_quant_scale1_edgetpu.tflite', type=str)
parser.add_argument('--template_model', dest='template_model', action="store",
default='template_image_feature_extractor_scale1.tflite', type=str)
parser.add_argument('--cross_model', dest='cross_model', action="store",
default='cross_correlation_scale1.tflite', type=str)
parser.add_argument('--separate_mode', dest='separate_mode', action="store_true")
args, _ = parser.parse_known_args()
first_line = open(args.image_filepath + '/groundtruth_rect.txt').readline()
bbox = [int(v) for v in first_line.strip().split(',')]
init_bbox = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3]) # 0-index in python
tracker = SiameseTracking(args.config_filepath,
args.separate_mode,
osp.join(args.models_dir, args.whole_model),
osp.join(args.models_dir, args.template_model),
osp.join(args.models_dir, args.search_model),
osp.join(args.models_dir, args.cross_model))
filenames = sort_nicely(glob(args.image_filepath + '/img/*.jpg'))
first_image = cv2.imread(filenames[0])
template_image = tracker.update_template_image(first_image, init_bbox)
if not args.headless:
cv2.imshow('template_image',template_image.astype(np.uint8))
for i, filename in enumerate(filenames):
if i > 0:
input_image = cv2.imread(filenames[i])
dt1 = datetime.datetime.now()
outputs = tracker.inference(template_image, input_image)
dt2 = datetime.datetime.now()
print("tracking du: {}".format(dt2.timestamp() - dt1.timestamp()))
# visualize
search_image = outputs['search_image'].astype(np.uint8)
bbox_search = convert_bbox_format(outputs['current_target_state'].bbox, 'top-left-based')
input_image = cv2.rectangle(input_image,(int(bbox_search.x), int(bbox_search.y)),(int(bbox_search.x+bbox_search.width), int(bbox_search.y+bbox_search.height)),(0,255,0),2)
if not args.headless:
cv2.imshow('search_image',search_image)
cv2.imshow('raw_image', input_image)
cv2.imshow('response', outputs['raw_output_data'].astype(np.uint8))
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
sys.exit()
|
{"hexsha": "83922bac802398ab2dffc8871dfcfe96f12579a5", "size": 22333, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/tracking.py", "max_stars_repo_name": "tongtybj/SiamFC-TensorFlow", "max_stars_repo_head_hexsha": "b885c62132fb6203e820570a2263e6a401af5d26", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/tracking.py", "max_issues_repo_name": "tongtybj/SiamFC-TensorFlow", "max_issues_repo_head_hexsha": "b885c62132fb6203e820570a2263e6a401af5d26", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/tracking.py", "max_forks_repo_name": "tongtybj/SiamFC-TensorFlow", "max_forks_repo_head_hexsha": "b885c62132fb6203e820570a2263e6a401af5d26", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.8583162218, "max_line_length": 195, "alphanum_fraction": 0.6987417723, "include": true, "reason": "import numpy", "num_tokens": 5189}
|
# ------------------------------------------------------------------------------
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements; and to You under the Apache License,
# Version 2.0. "
#
# Forschungszentrum Jülich
# Institute: Institute for Advanced Simulation (IAS)
# Section: Jülich Supercomputing Centre (JSC)
# Division: High Performance Computing in Neuroscience
# Laboratory: Simulation Laboratory Neuroscience
# Team: Multi-scale Simulation and Design
# ------------------------------------------------------------------------------
#TODO raw copy paste of both rate to spike implementations (by Lionel)
#TODO "choose" one
import numpy as np
from quantities import ms, Hz
from neo.core import AnalogSignal
from elephant.spike_train_generation import inhomogeneous_poisson_process,
homogeneous_poisson_process
from EBRAINS_ConfigManager.global_configurations_manager.xml_parsers.default_directories_enum import DefaultDirectories
# NOTE Former rate_to_spikes_transformer.py subclass
class RatetoSpikes():
'''Transforms the rates into spikes train.'''
def __init__(self, param, configurations_manager, log_settings):
self._log_settings = log_settings
self._configurations_manager = configurations_manager
self.__logger = self._configurations_manager.load_log_configurations(
name="Transformer--Rate_to_Spikes",
log_configurations=self._log_settings,
target_directory=DefaultDirectories.SIMULATION_RESULTS)
# number of spike generators
self.nb_spike_generator = param['nb_neurons']
self.path = param['path'] + "/transformation/"
# variable for saving values
self.save_spike = bool(param['save_spikes'])
if self.save_spike:
self.save_spike_buf = None
self.save_rate = bool(param['save_rate'])
if self.save_rate:
self.save_rate_buf = None
# number of synapsis
self.nb_synapse = int(param["nb_brain_synapses"])
self.__logger.info("Initialised")
def transform(self,count,time_step,rate):
"""
implements the abstract method for the transformation of the
rate to spikes.
Parameters
----------
count : int
counter of the number of time of the transformation (identify the
timing of the simulation)
time_step: int
time interval for the signal
rate: Any # e.g. 1D numpy array
Spikes rate
Returns
------
spikes_train: list
returns spikes train if data is transformed successfully
"""
rate *= self.nb_synapse # rate of poisson generator ( due property of poisson process)
rate += 1e-12
rate = np.abs(rate) # avoid rate equals to zeros
signal = AnalogSignal(rate * Hz, t_start=(time_step[0] + 0.1) * ms,
sampling_period=(time_step[1] - time_step[0]) / rate.shape[-1] * ms)
self.__logger.debug(f"rate: {rate}, signal: {signal}, time_step: {time_step}")
spikes_train = []
# generate individual spike trains
for i in range(self.nb_spike_generator[0]):
spikes_train.append(np.around(np.sort(inhomogeneous_poisson_process(signal, as_array=True)), decimals=1))
return spikes_train
#NOTE former analyzer_rate_to_spike.py subclass
class AnalyzerRateToSpikes():
'''Implements the abstract base class for analyzing the data.'''
def __init__(self, configurations_manager=None, log_settings=None):
# TODO Discuss whether the logging is not necessary
try:
self._log_settings = log_settings
self._configurations_manager = configurations_manager
self.__logger = self._configurations_manager.load_log_configurations(
name="Analyzer--Rate_to_Spikes",
log_configurations=self._log_settings,
target_directory=DefaultDirectories.SIMULATION_RESULTS)
self.__logger.info("initialized")
except Exception:
# continue withour logger
pass
def analyze(self, data, time_start, time_stop, variation=False):
'''
Wrapper for computing the spikes from rate. It generate the spike
train with homogenous or inhomogenous Poisson generator.
Parameters
----------
data : Any
an array or a float of quantities to be analyzed.
time_start: int
time to start the computation (spike train)
time_stop: int
time to stop the computation (spike train)
variation : bool
boolean for variation of rate
Returns
------
rate: numpy array
the resultant one or multiple spike trains.
'''
return self.__rates_to_spikes(data, time_start, time_stop, variation)
def __rates_to_spikes(self, rates, t_start, t_stop, variation):
"""helper function to compute the spikes from rate."""
if variation:
# the case where the variation of the rate is include
# We generate the inhomogenous poisson
if len(rates.shape) == 1:
# the case where we have only one rate
signal = AnalogSignal(rates, t_start=t_start, sampling_period=(t_stop-t_start)/rates.shape[-1])
result = [inhomogeneous_poisson_process(signal,as_array=True)]
return np.array(result)
else :
# the case where we have multiple rates
result = []
for rate in rates:
signal = AnalogSignal(rate, t_start=t_start, sampling_period=(t_stop - t_start) / rates.shape[-1])
result.append(inhomogeneous_poisson_process(signal,as_array=True))
return np.array(result)
else:
# the case we have only the rate
# We generate the homogenous poisson
if len(rates.shape) ==0:
# the case where we have only one rate
result = np.array([homogeneous_poisson_process(rate=rates, t_start=t_start, t_stop=t_stop, as_array=True)])
else:
# the case where we have multiple rates
result = []
for rate in rates:
result.append(homogeneous_poisson_process(rate=rate, t_start=t_start, t_stop=t_stop, as_array=True))
return np.array(result)
|
{"hexsha": "c4f9cbcebdf4f540e137b9c4dc56d8d0e9468f80", "size": 6868, "ext": "py", "lang": "Python", "max_stars_repo_path": "refactored_modular/wrapper/elephant_wrapper_files/Rate_to_spike.py", "max_stars_repo_name": "mfahdaz/EBRAINS-InterscaleHUB", "max_stars_repo_head_hexsha": "c8c3952bfdf5f5cd913b7b078514b9a80829bd4d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "refactored_modular/wrapper/elephant_wrapper_files/Rate_to_spike.py", "max_issues_repo_name": "mfahdaz/EBRAINS-InterscaleHUB", "max_issues_repo_head_hexsha": "c8c3952bfdf5f5cd913b7b078514b9a80829bd4d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "refactored_modular/wrapper/elephant_wrapper_files/Rate_to_spike.py", "max_forks_repo_name": "mfahdaz/EBRAINS-InterscaleHUB", "max_forks_repo_head_hexsha": "c8c3952bfdf5f5cd913b7b078514b9a80829bd4d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6242424242, "max_line_length": 123, "alphanum_fraction": 0.6093476995, "include": true, "reason": "import numpy", "num_tokens": 1378}
|
theory proof_insert
imports
LLRB_SET
LLRB_IMP
begin
subsection \<open>proof of bst_insert\<close>
lemma bst_paint: "inorder(paint c t) = inorder t"
by(induct t)
auto
lemma bst_rightredB:
"inorder (rightredB l a r) = inorder l @ a # inorder r"
by(cases "(l, a, r)" rule: rightredB.cases) auto
lemma bst_baliR:
"inorder(baliR l a r) = inorder l @ a # inorder r"
by(cases "(l,a,r)" rule: baliR.cases)
(auto simp: bst_rightredB)
lemma bst_baliL:
"inorder(baliL l a r) = inorder l @ a # inorder r"
by(cases "(l,a,r)"rule: baliL.cases)
(auto simp: bst_rightredB)
lemma bst_ins:
"sorted(inorder t) \<Longrightarrow> inorder(ins x t) = ins_list x (inorder t)"
by(induction x t rule: ins.induct)
(auto simp: ins_list_simps bst_baliL bst_baliR)
theorem bst_insert:
"sorted(inorder t) \<Longrightarrow> inorder(insert x t) = ins_list x (inorder t)"
by(auto simp: insert_def bst_ins bst_paint)
subsection \<open>proof of invh_insert\<close>
lemma f1:
"bheight t2 = 0 \<Longrightarrow> bheight (rightredB \<langle>\<rangle> a t2) = Suc 0"
apply (induct t2)
apply auto
by (metis (full_types) rightredB.simps(2) Suc_eq_plus1 add_is_0 bheight.simps(1) bheight.simps(2) color.exhaust zero_neq_one)
lemma f2:
"Suc (bheight v) = bheight t2 \<Longrightarrow> bheight (rightredB (B v vc vb) a t2) = Suc (bheight t2)"
apply (induct t2)
apply auto
by (metis (full_types) Suc_eq_plus1 bheight.simps(2) color.exhaust rightredB.simps(1))
lemma bheight_rightredB:
"bheight l = bheight r \<Longrightarrow> bheight (rightredB l a r) = Suc (bheight l)"
apply(induct l a r rule: baliL.induct)
apply auto
apply (simp add: f1)
apply (simp add: f2)
apply (smt (verit, del_insts) One_nat_def add_0 bheight.simps(1) bheight.simps(2) color.exhaust f2 rightredB.simps(5))
apply (smt (verit) One_nat_def add.commute add_0 add_Suc_shift bheight.simps(2) color.exhaust f2 rightredB.simps(5))
apply (simp add: f2)
apply (metis (full_types) Suc_eq_plus1 bheight.simps(2) color.exhaust f2 rightredB.simps(5))
apply (metis (full_types) Suc_eq_plus1 bheight.simps(2) color.exhaust f2 rightredB.simps(5))
by (simp add: f2)
lemma bheight_baliL:
"bheight l = bheight r \<Longrightarrow> bheight (baliL l a r) = Suc (bheight l)"
by(induct l a r rule: baliL.induct)
(auto simp: bheight_rightredB)
lemma bheight_baliR:
"bheight l = bheight r \<Longrightarrow> bheight (baliR l a r) = Suc (bheight l)"
by(induct l a r rule: baliR.induct)
(auto simp: bheight_rightredB)
lemma invh_rightredB:
"\<lbrakk> invh l; invh r; bheight l = bheight r \<rbrakk> \<Longrightarrow> invh (rightredB l a r)"
by(cases "(l,a,r)"rule: rightredB.cases)
auto
lemma invh_baliL:
"\<lbrakk> invh l; invh r; bheight l = bheight r \<rbrakk> \<Longrightarrow> invh (baliL l a r)"
apply(induct l a r rule: baliL.induct)
by(auto simp: bheight_rightredB invh_rightredB)
lemma invh_baliR:
"\<lbrakk> invh l; invh r; bheight l = bheight r \<rbrakk> \<Longrightarrow> invh (baliR l a r)"
by(induct l a r rule: baliR.induct)
(auto simp: bheight_rightredB invh_rightredB)
lemma invh_bheight_rightredB:
"\<lbrakk> invh l; invh r; bheight l = bheight r \<rbrakk> \<Longrightarrow> invh (rightredB l a r) \<and> bheight (rightredB l a r) = Suc (bheight r)"
apply (induct l a r rule: rightredB.induct)
by simp+
lemma paint2: "paint c2 (paint c1 t) = paint c2 t"
by(cases t)
auto
lemma invh_paint: "invh t \<Longrightarrow> invh (paint c t)"
by(cases t)
auto
lemma invh_ins: "invh t \<Longrightarrow> invh (ins x t) \<and> bheight (ins x t) = bheight t"
by(induct x t rule: ins.induct)
(auto simp: invh_baliL invh_baliR bheight_baliL bheight_baliR)
theorem invh_insert: "llrb t \<Longrightarrow> invh (insert x t)"
by (auto simp: insert_def invh_ins invh_paint llrb_def)
subsection \<open>proof of invc_insert\<close>
lemma neq_Black[simp]: "(c \<noteq> Black) = (c = Red)"
by(cases c) auto
lemma neq_Red[simp]: "(c \<noteq> Red) = (c = Black)"
by(cases c) auto
abbreviation invc2 :: "'a llrb \<Rightarrow> bool" where
"invc2 t \<equiv> invc(paint Black t)"
lemma invc2I: "invc t \<Longrightarrow> invc2 t"
apply(cases t rule: search_tree2_cases)
apply simp+
using neq_Black by blast
lemma color_rightredB:"color (rightredB l a r) = Black"
apply (induct l a r rule: rightredB.induct)
by auto
lemma invc_rightredB:"invc l \<Longrightarrow> invc r \<Longrightarrow> invc (rightredB l a r)"
apply (induct l a r rule: rightredB.induct)
by auto
fun invc3 :: "'a llrb \<Rightarrow> bool" where
"invc3 Leaf = True" |
"invc3 (Node l (a,c) r) = ((c = Red \<longrightarrow> color l = Black \<and> color r = Black) \<and> invc l \<and> invc r)"
fun ins_right_red :: "'a llrb \<Rightarrow> 'a llrb" where
"ins_right_red (Node l (a, Red) r) = (if(color l = Black \<and> color r = Red \<and> invc r \<and> invc3 l) then rightredB l a r else (Node l (a, Black) r))" |
"ins_right_red Leaf = Leaf"|
"ins_right_red (B v vc vb) = (B v vc vb)"
abbreviation invc4 :: "'a llrb \<Rightarrow> bool" where
"invc4 t \<equiv> invc(ins_right_red t)"
fun invc_red :: "'a llrb \<Rightarrow> bool" where
"invc_red Leaf = True" |
"invc_red (Node l (a,c) r) = (invc4 (Node l (a,c) r) \<and> invc l \<and> invc r)"
lemma invc4I:"invc t \<Longrightarrow> invc4 t"
apply(cases t rule: search_tree2_cases)
apply simp
by (metis (full_types) ins_right_red.simps(1) ins_right_red.simps(3) invc.simps(2) neq_Black)
lemma invc_redI:"invc t \<Longrightarrow> invc_red t"
apply(cases t rule: search_tree2_cases)
apply simp
by (metis (full_types) ins_right_red.simps(1) ins_right_red.simps(3) invc.simps(2) invc_red.simps(2) neq_Black)
lemma invc_baliR1: "\<lbrakk>invc l; invc_red r\<rbrakk> \<Longrightarrow> invc_red (baliR l a r)"
apply (induct l a r rule: baliR.induct)
by (auto simp: invc_redI invc_rightredB)
lemma invc_baliR2: "\<lbrakk>invc l; invc_red r\<rbrakk> \<Longrightarrow> invc (baliR l a r)"
apply (induct l a r rule: baliR.induct)
apply auto
by (auto simp: invc_rightredB color_rightredB)
lemma invc_baliR3: "\<lbrakk>invc_red l; invc r\<rbrakk> \<Longrightarrow> invc_red (baliL l a r)"
apply (induct l a r rule: baliL.induct)
by(auto simp: invc_redI invc_rightredB)
lemma invc_baliR4: "\<lbrakk>invc_red l; invc r\<rbrakk> \<Longrightarrow> invc (baliL l a r)"
apply (induct l a r rule: baliL.induct)
by(auto simp: invc_rightredB color_rightredB)
lemma color_paint_Black: "color (paint Black t) = Black"
by(cases t) auto
lemma invc3I: "invc t \<Longrightarrow> invc3 t"
apply(cases t rule: search_tree2_cases)
by simp+
lemma invc_ins: "invc t \<longrightarrow> invc_red (ins x t) \<and> (color t = Black \<longrightarrow> invc (ins x t))"
apply(induct x t rule: ins.induct)
by(auto simp: invc_baliR1 invc_baliR2 invc3I invc_baliR3 invc_baliR4 invc_rightredB)
lemma invc2_ins:"invc t \<and> invh t \<and> color t = Black \<Longrightarrow> invc2 (ins x t)"
by (simp add: invc2I invc_ins)
theorem invc_insert: "llrb t \<Longrightarrow> invc (insert x t)"
by(simp add: llrb_def insert_def invc2_ins)
subsection \<open>proof of insert\<close>
theorem llrb_insert: "llrb t \<Longrightarrow> llrb (insert x t)"
by(metis invc_insert invh_insert llrb_def color_paint_Black insert_def)
end
|
{"author": "Criank", "repo": "LLRB_PROOF_NEW", "sha": "2991cfdeee0ef0ce6b2992c393ab61443885781b", "save_path": "github-repos/isabelle/Criank-LLRB_PROOF_NEW", "path": "github-repos/isabelle/Criank-LLRB_PROOF_NEW/LLRB_PROOF_NEW-2991cfdeee0ef0ce6b2992c393ab61443885781b/proof_insert.thy"}
|
library(base)
library(caret)
library(cluster)
library(dummies)
library(e1071)
library(factoextra)
library(modules)
library(RSNNS)
library(rstudioapi)
library(stats)
library(tidyverse)
library(utils)
base::setwd(base::dirname(rstudioapi::getActiveDocumentContext()$path))
start <- base::Sys.time()
base::set.seed(0xACDC)
data <- modules::use('data')$get_data$get_data()
target_column <- 47
data_dm <-
dummies::dummy.data.frame(data = data[, 2:46], sep = '_')
data_norm <-
base::as.data.frame(RSNNS::normalizeData(data_dm, type = '0_1'))
base::names(data_norm) <- base::names(data)[2:46]
data_norm <- base::t(data_norm)
distance <- stats::dist(data_norm, method = 'euclidean')
base::plot(factoextra::fviz_dist(
distance,
gradient = base::list(low = '#00AFBB',
mid = 'white', high = '#FC4E07')
))
methods <- base::c('average', 'single', 'complete', 'ward')
base::names(methods) <- methods
hc_methods <-
purrr::map_dbl(methods, function(x) {
cluster::agnes(data_norm, method = x)$ac
})
base::print(hc_methods)
best_ac_method <- base::names(base::which.max(hc_methods))
hc_agl <- cluster::agnes(data_norm, method = best_ac_method)
cluster::pltree(
hc_agl,
cex = 0.6,
hang = -1,
main = base::paste('Dendrogram of agnes - ', best_ac_method)
)
stats::rect.hclust(hc_agl, k = 2, border = base::c('#00AFBB', '#FC4E07'))
data_norm <- base::t(data_norm)
methods <- base::c('average', 'single', 'complete', 'ward')
base::names(methods) <- methods
hc_methods <-
purrr::map_dbl(methods, function(x) {
cluster::agnes(data_norm, method = x)$ac
})
base::print(hc_methods)
best_ac_method <- base::names(base::which.max(hc_methods))
hc_agl <- cluster::agnes(data_norm, method = best_ac_method)
hc_div <- cluster::diana(data_norm)
base::print(hc_div$dc)
eval_hc <- function(hc) {
cut <- dendextend::cutree(stats::as.hclust(hc), k = 2)
assoc <-
base::as.data.frame(data_norm) %>% dplyr::mutate(cluster = cut)
cm <-
caret::confusionMatrix(base::as.factor(labels),
base::as.factor(assoc$cluster))
base::print(cm$table)
base::print(cm$overall['Accuracy'])
base::plot(stats::as.hclust(hc), cex = 0.6)
stats::rect.hclust(stats::as.hclust(hc), k = 3, border = 2:5)
}
labels <-
base::ifelse(data[, target_column] == 'legitimate', 1, 2)
eval_hc(hc_agl)
labels <-
base::ifelse(data[, target_column] == 'legitimate', 2, 1)
eval_hc(hc_div)
base::print(base::Sys.time() - start)
base::rm(start)
|
{"hexsha": "bad03cca8624bb7e89420fc1d26c703cc67b6ac9", "size": 2497, "ext": "r", "lang": "R", "max_stars_repo_path": "src/clustering.r", "max_stars_repo_name": "7Rocky/phishing-detection", "max_stars_repo_head_hexsha": "65f96bbf5751ee3ae1eab8f029797030678e956f", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-13T22:09:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-13T22:09:13.000Z", "max_issues_repo_path": "src/clustering.r", "max_issues_repo_name": "7Rocky/phishing-detection", "max_issues_repo_head_hexsha": "65f96bbf5751ee3ae1eab8f029797030678e956f", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/clustering.r", "max_forks_repo_name": "7Rocky/phishing-detection", "max_forks_repo_head_hexsha": "65f96bbf5751ee3ae1eab8f029797030678e956f", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-30T01:56:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-30T01:56:53.000Z", "avg_line_length": 23.780952381, "max_line_length": 73, "alphanum_fraction": 0.6692030437, "num_tokens": 768}
|
# Python modules
# 3rd party modules
import numpy as np
def cross_correlate(x, y, lag, covariance=False):
"""
This method calculates the cross correlation Pxy(lag) or cross covariance
Rxy(lag) of two data sets x and y as a function of the lag.
x: a numpy array of type integer, float or complex.
y: a numpy array of type integer, float or complex.
lag: a numpy array, in the interval [-(n-2), (n-2)], of integers that
gives the absolute distance(s) between indexed members of x.
covariance: bool, flag, if set the sample cross covariance is returned
Reference: INTRODUCTION TO STATISTICAL TIME SERIES
Wayne A. Fuller ISBN 0-471-28715-6
"""
nx = len(x)
ny = len(y)
if nx < 2 or ny < 2:
raise ValueError("x and y arrays must contain two or more values.")
if nx != len(y):
raise ValueError("x and y arrays must be same length.")
xd = x - np.sum(x) / nx # deviations
yd = y - np.sum(y) / nx
nlag = len(lag)
corr = np.zeros(nlag, dtype=x.dtype)
for k in range(nlag):
# reverse the variables for negative lags.
if lag[k] > 0:
corr[k] = np.sum(xd[0:nx - lag[k]] * yd[lag[k]:])
else:
corr[k] = np.sum(yd[0:nx + lag[k]] * xd[-lag[k]:])
# Divide by N for covariance, or divide by variance for correlation.
if covariance:
corr /= nx
else:
corr /= np.sqrt(np.sum(xd*xd) * np.sum(yd*yd))
return corr
|
{"hexsha": "e94aa4f39abe210a32aed47e3d34261f3ffe6c58", "size": 1512, "ext": "py", "lang": "Python", "max_stars_repo_path": "vespa/analysis/algos/cross_correlate.py", "max_stars_repo_name": "vespa-mrs/vespa", "max_stars_repo_head_hexsha": "6d3e84a206ec427ac1304e70c7fadf817432956b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vespa/analysis/algos/cross_correlate.py", "max_issues_repo_name": "vespa-mrs/vespa", "max_issues_repo_head_hexsha": "6d3e84a206ec427ac1304e70c7fadf817432956b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-04-17T13:58:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-20T14:19:57.000Z", "max_forks_repo_path": "vespa/analysis/algos/cross_correlate.py", "max_forks_repo_name": "vespa-mrs/vespa", "max_forks_repo_head_hexsha": "6d3e84a206ec427ac1304e70c7fadf817432956b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-06-05T16:34:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T16:13:22.000Z", "avg_line_length": 28.5283018868, "max_line_length": 78, "alphanum_fraction": 0.5958994709, "include": true, "reason": "import numpy", "num_tokens": 422}
|
# CENG 487 Assignment4 by
# Arif Burak Demiray
# December 2021
from OpenGL.GL import *
from OpenGL.GLUT.fonts import GLUT_BITMAP_9_BY_15
from OpenGL.raw.GLUT import glutBitmapCharacter
from numpy import character
def gluPrintText(text: 'list[character]', position_y: int = 0) -> None:
"""
Helper method to print text to the screen
"""
glColor3f(1, 1, 1)
glWindowPos2d(20, 20+position_y)
for i in range(len(text)):
glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(text[i]))
|
{"hexsha": "034e2e62144fe7c7512d48ea2ec8171a132b5fb3", "size": 504, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/glu_helper.py", "max_stars_repo_name": "arifBurakDemiray/computer-graphics", "max_stars_repo_head_hexsha": "acf4781f92e325b12d986974c448b0e3520af431", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/glu_helper.py", "max_issues_repo_name": "arifBurakDemiray/computer-graphics", "max_issues_repo_head_hexsha": "acf4781f92e325b12d986974c448b0e3520af431", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/glu_helper.py", "max_forks_repo_name": "arifBurakDemiray/computer-graphics", "max_forks_repo_head_hexsha": "acf4781f92e325b12d986974c448b0e3520af431", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5263157895, "max_line_length": 71, "alphanum_fraction": 0.7202380952, "include": true, "reason": "from numpy", "num_tokens": 148}
|
import numpy as np
import cv2
from .vector.vector2d import Vector2D
from .features import Features
class FacialLandmarks68Index(object):
POINT_OF_SIGHT = 27
RIGHT_EYE_CORNER = 36
LEFT_EYE_CORNER = 45
NOSE = 30
MOUTH_UP = 51
MOUTH_DOWN = 57
MOUTH_UP = 51
RIGHT_MOUTH_CORNER = 48
LEFT_MOUTH_CORNER = 54
RIGHT_EAR = 0
LEFT_EAR = 16
CHIN = 8
class FacialLandmarks(Features):
"""Represents a 68 2D point facial landmarks"""
def __init__(self, landmarks, image_width, image_height):
"""FacialLandmarks constructor"""
self.data = landmarks
self.name = "facial_landmarks"
self.confidence = .8
self.dimensions = (68, 2)
self.image_width = image_width
self.image_height = image_height
def get_point(self, index):
"""Returns the 2D point specified by the given index"""
return Vector2D(int(self.data[index][0]),
int(self.data[index][1]))
def draw(self, image, color, thickness):
"""Draws the facial landmarks"""
for idx in range(0, 67):
if idx == 16 or idx == 21 or idx == 26 or idx == 30 or idx == 35 \
or idx == 41 or idx == 47 or idx == 66:
pass
else:
point1 = self.get_point(idx)
point2 = self.get_point(idx+1)
cv2.line(image, (point1.x, point1.y),
(point2.x, point2.y), color, thickness)
def to_array(self):
features = np.zeros((68, 2), dtype=np.float32)
features[:, 0] = self.data[:, 0]/float(self.image_width)
features[:, 1] = self.data[:, 1]/float(self.image_height)
return features.flatten()
def head_pose_points(self):
nose = self.get_point(FacialLandmarks68Index.NOSE).to_array()
chin = self.get_point(FacialLandmarks68Index.CHIN).to_array()
left_eye_corner = self.get_point(FacialLandmarks68Index.LEFT_EYE_CORNER).to_array()
right_eye_corner = self.get_point(FacialLandmarks68Index.RIGHT_EYE_CORNER).to_array()
left_mouth_corner = self.get_point(FacialLandmarks68Index.LEFT_MOUTH_CORNER).to_array()
right_mouth_corner = self.get_point(FacialLandmarks68Index.RIGHT_MOUTH_CORNER).to_array()
return np.array([nose, chin, left_eye_corner, right_eye_corner, left_mouth_corner, right_mouth_corner])
|
{"hexsha": "92dfe4de6351572eecb2a9d1c075cabc8fc5011f", "size": 2409, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pyuwds3/types/landmarks.py", "max_stars_repo_name": "uwds3/uwds3", "max_stars_repo_head_hexsha": "3ec70111d63db0c8d97d9f1e0110b7fe9ad56179", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pyuwds3/types/landmarks.py", "max_issues_repo_name": "uwds3/uwds3", "max_issues_repo_head_hexsha": "3ec70111d63db0c8d97d9f1e0110b7fe9ad56179", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pyuwds3/types/landmarks.py", "max_forks_repo_name": "uwds3/uwds3", "max_forks_repo_head_hexsha": "3ec70111d63db0c8d97d9f1e0110b7fe9ad56179", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.640625, "max_line_length": 111, "alphanum_fraction": 0.6367787464, "include": true, "reason": "import numpy", "num_tokens": 630}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 8 13:54:17 2019
@author: wmy
"""
import scipy
import tensorflow as tf
from keras.datasets import mnist
from keras import backend as K
from keras import layers
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D, MaxPooling2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
from PIL import Image
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from utils import CASPEALR1DataLoader
from model import SiameseNetwork
class FaceRecognition(object):
def __init__(self, input_shape=(240, 180, 1), name=None):
self.input_shape = input_shape
self.image_height, self.image_width, _ = input_shape
self.data_loader = CASPEALR1DataLoader(image_height=self.image_height, \
image_width=self.image_width)
self.siamese_network = SiameseNetwork(input_shape)
self.siamese_network.compile(loss='binary_crossentropy', \
optimizer=Adam(lr=0.001), metrics=['accuracy'])
self.name = name
pass
def prepare(self):
self.data_loader.write_infos()
pass
def train(self, epochs=1000, batch_size=3, load_pretrained=False, seed=0):
if load_pretrained:
self.siamese_network.load_weights('./weights/siamese_network_weights.h5')
print('Info: weights loaded.')
pass
for epoch in range(epochs):
seed += 1
for batch_i, (anchor_images, positive_images, negative_images) \
in enumerate(self.data_loader.load_batches(batch_size, seed=seed)):
images_A = np.concatenate((anchor_images, anchor_images), axis=0)
images_B = np.concatenate((positive_images, negative_images), axis=0)
y_true = np.concatenate((np.ones((batch_size, 1)), np.zeros((batch_size, 1))), axis=0)
loss, accuracy = self.siamese_network.train_on_batch([images_A, images_B], y_true)
print('[epoch: {0:}/{1:}][batch: {2:}/{3:}][loss: {4:}][accuracy: {5:}]'.format(epoch+1, \
epochs, batch_i+1, self.data_loader.n_batches, loss, accuracy))
if (batch_i+1)%250==0:
self.siamese_network.save_weights('./weights/siamese_network_weights.h5')
print('Info: weights saved.')
pass
pass
if (epoch+1)%10==0:
self.siamese_network.save_weights('./weights/siamese_network_weights.h5')
print('Info: weights saved.')
pass
pass
pass
def accuracy(self, batch_size=3, seed=0):
self.siamese_network.load_weights('./weights/siamese_network_weights.h5')
print('Info: weights loaded.')
num_true = 0
for batch_i, (anchor_images, positive_images, negative_images) \
in enumerate(self.data_loader.load_batches(batch_size, seed=seed)):
images_A = np.concatenate((anchor_images, anchor_images), axis=0)
images_B = np.concatenate((positive_images, negative_images), axis=0)
y_true = np.concatenate((np.ones((batch_size, 1)), np.zeros((batch_size, 1))), axis=0)
loss, accuracy = self.siamese_network.evaluate([images_A, images_B], y_true)
num_true += int(accuracy * batch_size * 2 + 0.5)
num_sum = (batch_i+1) * batch_size * 2
accuracy = num_true / num_sum
print('[after batch: {0:}][accuracy: {1:}]'.format(batch_i+1, accuracy))
pass
num_sum = self.data_loader.n_batches * batch_size * 2
total_accuracy = num_true / num_sum
print('total accuracy: {0:}'.format(total_accuracy))
pass
def predict(self, image_A_path, image_B_path, have_loaded_weights=False):
if have_loaded_weights==False:
self.siamese_network.load_weights('./weights/siamese_network_weights.h5')
print('Info: weights loaded.')
pass
images_A, images_B = [], []
image_A = self.data_loader.imread(image_A_path)
images_A.append(image_A)
images_A = np.array(images_A)/127.5 - 1.0
image_B = self.data_loader.imread(image_B_path)
images_B.append(image_B)
images_B = np.array(images_B)/127.5 - 1.0
predictions = self.siamese_network.predict([images_A, images_B])
prediction = np.squeeze(predictions)
if prediction >= 0.5:
return 1
else:
return 0
pass
pass
|
{"hexsha": "ae73957711a35d054184c1f5823e4a6a2c522bf3", "size": 5090, "ext": "py", "lang": "Python", "max_stars_repo_path": "facerec.py", "max_stars_repo_name": "wmylxmj/Face-Recognition", "max_stars_repo_head_hexsha": "ee461d179d4c25ec0292ce280738f60352e99be7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-04-08T04:23:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T03:37:19.000Z", "max_issues_repo_path": "facerec.py", "max_issues_repo_name": "wmylxmj/Face-Recognition-Siamese-Network", "max_issues_repo_head_hexsha": "ee461d179d4c25ec0292ce280738f60352e99be7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-12T05:47:19.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-12T05:47:19.000Z", "max_forks_repo_path": "facerec.py", "max_forks_repo_name": "wmylxmj/Face-Recognition-Siamese-Network", "max_forks_repo_head_hexsha": "ee461d179d4c25ec0292ce280738f60352e99be7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-17T11:32:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-27T07:04:10.000Z", "avg_line_length": 43.5042735043, "max_line_length": 107, "alphanum_fraction": 0.6206286837, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1169}
|
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE order_book_tests
#include <boost/test/unit_test.hpp>
#include "order_book.h"
BOOST_AUTO_TEST_CASE( test_buy_ordering )
{
ae::order_book book;
book.insert(ae::order("A", "AUDUSD", 100, 10));
book.insert(ae::order("A", "AUDUSD", 100, 7));
book.insert(ae::order("A", "AUDUSD", 100, 11));
book.insert(ae::order("A", "AUDUSD", 100, 9));
BOOST_REQUIRE_EQUAL(4, book.buy_orders().size());
BOOST_REQUIRE_EQUAL(0, book.sell_orders().size());
BOOST_REQUIRE_EQUAL(11, book.buy_orders().begin()->price());
BOOST_REQUIRE_EQUAL(7, book.buy_orders().rbegin()->price());
}
BOOST_AUTO_TEST_CASE( test_buy_ordering_same_price )
{
ae::order_book book;
book.insert(ae::order("A", "AUDUSD", 100, 10));
book.insert(ae::order("A", "AUDUSD", 101, 10));
book.insert(ae::order("A", "AUDUSD", 102, 10));
book.insert(ae::order("A", "AUDUSD", 103, 10));
BOOST_REQUIRE_EQUAL(4, book.buy_orders().size());
BOOST_REQUIRE_EQUAL(0, book.sell_orders().size());
BOOST_REQUIRE_EQUAL(100, book.buy_orders().front().quantity());
book.buy_orders().pop_front();
BOOST_REQUIRE_EQUAL(101, book.buy_orders().front().quantity());
book.buy_orders().pop_front();
BOOST_REQUIRE_EQUAL(102, book.buy_orders().front().quantity());
book.buy_orders().pop_front();
BOOST_REQUIRE_EQUAL(103, book.buy_orders().front().quantity());
book.buy_orders().pop_front();
}
BOOST_AUTO_TEST_CASE( test_sell_ordering )
{
ae::order_book book;
book.insert(ae::order("A", "AUDUSD", -100, 10));
book.insert(ae::order("A", "AUDUSD", -100, 7));
book.insert(ae::order("A", "AUDUSD", -100, 11));
book.insert(ae::order("A", "AUDUSD", -100, 9));
BOOST_REQUIRE_EQUAL(0, book.buy_orders().size());
BOOST_REQUIRE_EQUAL(4, book.sell_orders().size());
BOOST_REQUIRE_EQUAL(7, book.sell_orders().begin()->price());
BOOST_REQUIRE_EQUAL(11, book.sell_orders().rbegin()->price());
}
BOOST_AUTO_TEST_CASE( test_sell_ordering_same_price )
{
ae::order_book book;
book.insert(ae::order("A", "AUDUSD", -100, 10));
book.insert(ae::order("A", "AUDUSD", -101, 10));
book.insert(ae::order("A", "AUDUSD", -102, 10));
book.insert(ae::order("A", "AUDUSD", -103, 10));
BOOST_REQUIRE_EQUAL(0, book.buy_orders().size());
BOOST_REQUIRE_EQUAL(4, book.sell_orders().size());
BOOST_REQUIRE_EQUAL(100, book.sell_orders().front().quantity());
book.sell_orders().pop_front();
BOOST_REQUIRE_EQUAL(101, book.sell_orders().front().quantity());
book.sell_orders().pop_front();
BOOST_REQUIRE_EQUAL(102, book.sell_orders().front().quantity());
book.sell_orders().pop_front();
BOOST_REQUIRE_EQUAL(103, book.sell_orders().front().quantity());
book.sell_orders().pop_front();
}
BOOST_AUTO_TEST_CASE( test_first_price_entered_selected )
{
ae::order_book book;
book.insert(ae::order("B", "EURUSD", -100, 1.11));
book.insert(ae::order("F", "EURUSD", -50, 1.1));
book.insert(ae::order("D", "EURUSD", 100, 1.11));
ae::trade_collection trades;
book.match(trades);
BOOST_REQUIRE_EQUAL(2, trades.size());
auto trade = trades[0];
BOOST_REQUIRE_EQUAL(1.1, trade.match_price());
BOOST_REQUIRE_EQUAL(50, trade.match_quantity());
trade = trades[1];
BOOST_REQUIRE_EQUAL(1.11, trade.match_price());
BOOST_REQUIRE_EQUAL(50, trade.match_quantity());
}
|
{"hexsha": "198564f26c8a04b89c4f8302fc688deceda7fb92", "size": 3394, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "part_1/cpp_solution/order_book_tests.cpp", "max_stars_repo_name": "jessmorecroft/Exchange", "max_stars_repo_head_hexsha": "7b190b22304c6381db22b7722f0ce4518de3df2c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2020-06-24T20:10:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-09T21:43:06.000Z", "max_issues_repo_path": "part_1/cpp_solution/order_book_tests.cpp", "max_issues_repo_name": "jessmorecroft/Exchange", "max_issues_repo_head_hexsha": "7b190b22304c6381db22b7722f0ce4518de3df2c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9.0, "max_issues_repo_issues_event_min_datetime": "2019-06-15T20:01:48.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-05T03:40:29.000Z", "max_forks_repo_path": "part_1/cpp_solution/order_book_tests.cpp", "max_forks_repo_name": "jessmorecroft/Exchange", "max_forks_repo_head_hexsha": "7b190b22304c6381db22b7722f0ce4518de3df2c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2017-06-15T02:09:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-11T10:11:38.000Z", "avg_line_length": 30.3035714286, "max_line_length": 68, "alphanum_fraction": 0.6744254567, "num_tokens": 946}
|
<a href="https://colab.research.google.com/github/mella30/Deep-Learning-with-Tensorflow-2/blob/main/Course3-Probabilistic_Deep_Learning_with_Tensorflow2/week4_KL_divergence.ipynb" target="_parent"></a>
# Kullback-Leibler divergence
This reading will review the definition of the Kullback-Leibler (or KL) divergence, look at some of its important properties, see how it can be computed in practice with TensorFlow Probability.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
print("TF version:", tf.__version__)
print("TFP version:", tfp.__version__)
# Additional packages for the reading
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Ellipse
```
TF version: 2.6.0
TFP version: 0.14.1
## Introduction
As you have already seen, the KL divergence is used in variational inference to score the dissimilarity between two distributions. In this reading, we will examine KL divergence more closely. We will see the definition of the KL divergence and some important properties, as well as how it can be computed using `tfd.kl_divergence` and Monte Carlo estimation.
## Definition of the Kullback-Leibler divergence
Given two probability density or mass functions $q(x)$ and $p(x)$, the Kullback-Leibler divergence between them is defined as
\begin{equation}
D_{KL}\big[q \ || \ p\big] =\begin{cases}
\text{E}_{X\sim q}\big[
\log q(X) - \log p(X)\big] &\text{if } p(x) = 0 \implies q(x) = 0,\\
\infty &\text{otherwise.}
\end{cases}
\end{equation}
The condition $p(x) = 0 \implies q(x) = 0$ - _absolute continuity_ - ensures that the $\log$ in the expectation is well-defined for all $x$ in the support of $q$.
As was mentioned, the KL divergence is a score for the disagreement of two distributions in their placement of probability mass. A smaller score indicates a greater degree of agreement.
## Properties
The Kullback-Leibler divergence is asymmetric. In general,
\begin{equation}
D_{KL}\big[q \ || \ p\big] \neq D_{KL}\big[p \ || \ q \big]
\end{equation}
In variational inference, $q$ is the approximating distribution, while $p$ is the distribution being approximated. The other KL divergence - $D_{KL}[p \ || \ q ]$ - is also sometimes used as a loss function, for reasons that will become clear later in this reading.
### Gibbs' inequality
A crucial property of the KL divergence is that for all $q$ and $p$,
\begin{equation}
D_{KL}\big[q \ || \ p\big] \geq 0,
\end{equation}
with equality if and only if $q(x) = p(x)$ almost everywhere. This property is very useful when we are trying to learn a $q$ that is similar to a $p$: if $D_{KL}[q \ || \ p] = 0$, then we know that $q$ is identical to $p$.
### What causes KL divergence to increase?
As an example, take $q(x)$ and $p(x)$ to be probability mass functions, and let $\mathcal{X}$ be $q$'s support. Provided $q$ is absolutely continuous with respect to $p$, we have
\begin{equation}
D_{KL}\big[q \ || \ p\big] = \sum_{x \in \mathcal{X}} q(x) \log \frac{q(x)}{p(x)}.
\end{equation}
Values of $x$ that $p$ assigns mass to but $q$ does not do not feature in this sum. Superficially, this may suggest that divergence is not increased if $q$ fails to place mass where $p$ does. However, $q$ is a probability mass function, so will inevitably place more mass than $p$ at some other value(s) of $x$. At those other locations, $\log q(x)/p(x) > 0$, so the divergence is increased.
On the other hand, if $q$ places probability mass where $p$ does not, then $D_{KL}\big[q \ || \ p\big]$ is $+\infty$ - the KL divergence severely penalizes $q$ for locating probability mass where $p$ does not!
From this combination of effects, we can conclude that
\begin{equation}
\text{support}(q) \subseteq \text{support}(p) \implies D_{KL}\big[ q \ || \ p \big] < \infty,
\end{equation}
while
\begin{equation}
\text{support}(p) \subset \text{support}(q) \implies D_{KL}\big[ q \ || \ p \big] = \infty
\end{equation}
Consequently, the KL divergence favours distributions $q$ that have a support contained in the target distribution's (i.e. $p$'s).
The diagram below illustrates how the KL divergence is affected by the support of two bivariate density functions $q$ and $p$. The hatched regions indicate the support of either function.
```python
_, axs = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(11, 5))
delta = 45.0 # degrees
q_ell_inf = Ellipse((0, 0), 2, 1.5, 45, ec='blue', fc='none',
alpha=0.5, label='q(x)', hatch='/')
q_ell_fin = Ellipse((0, 0), 0.5, 0.75, 45, ec='blue', fc='none',
alpha=0.5, label='q(x)', hatch='/')
p_ell_inf = Ellipse((0, 0), 1, 1, 45, ec='red', fc='none',
alpha=0.5, label='p(x)', hatch='\\')
p_ell_fin = Ellipse((0, 0), 1, 1, 45, ec='red', fc='none',
alpha=0.5, label='p(x)', hatch='\\')
# KL divergence is infinite
for ell in [q_ell_inf, p_ell_inf]:
axs[0].add_artist(ell)
axs[0].legend([q_ell_inf, p_ell_inf], ['Support of q', 'Support of p'], loc='lower right')
axs[0].get_xaxis().set_ticks([])
axs[0].get_yaxis().set_ticks([])
# KL divergence is finite
for ell in [q_ell_fin, p_ell_fin]:
axs[1].add_artist(ell)
axs[1].legend([q_ell_fin, p_ell_fin], ['Support of q', 'Support of p'], loc='lower right')
axs[1].get_xaxis().set_ticks([])
axs[1].get_yaxis().set_ticks([])
axs[0].set_title(r'$D_{KL}[q \ || \ p] = +\infty$')
axs[1].set_title(r'$D_{KL}[q \ || \ p]$ is finite but non-zero')
plt.xlim(-1, 1)
plt.ylim(-1, 1);
```
## Computing KL divergence in TensorFlow
For some choices of $q$ and $p$, the KL divergence can be evaluated to a closed-form expression.
`tfd.kl_divergence` computes the KL divergence between two distributions analytically, provided the divergence in question has been implemented in the TensorFlow Probability library.
Below is an example that uses `tfd.kl_divergence` to compute $D_{KL}\big[q \ || \ p \big]$ when $q$ and $p$ are univariate normal distributions.
```python
# Simple example
mu_q = 0.
sigma_q = 1.
mu_p = 0.
sigma_p = 0.5
distribution_q = tfd.Normal(loc=mu_q, scale=sigma_q)
distribution_p = tfd.Normal(loc=mu_p, scale=sigma_p)
tfd.kl_divergence(distribution_q, distribution_p) # D_{KL}[q || p]
```
<tf.Tensor: shape=(), dtype=float32, numpy=0.8068528>
Let's check this value. The KL divergence between two univariate normal distributions can be derived directly from the definition of the KL divergence as
\begin{equation}
D_{KL}\big[ q \ || \ p\big] = \frac{1}{2}\bigg(\frac{\sigma_q^2}{\sigma_p^2} + \frac{(\mu_q - \mu_p)^2}{\sigma_p^2} + 2\log \frac{\sigma_p}{\sigma_q} - 1\bigg)
\end{equation}
The value of this function should be equal to that returned by `kl_divergence(distribution_q, distribution_p)`.
```python
# Analytical expression for KL divergence between two univariate Normals
0.5*( (sigma_q/sigma_p)**2 + ((mu_q - mu_p)/sigma_p)**2 + 2*np.log(sigma_p/sigma_q) - 1)
```
0.8068528194400546
Sure enough, it is.
If a batch of distributions is passed to `kl_divergence`, then a batch of divergences will be returned. `kl_divergence` also supports broadcasting.
```python
# Batch example with broadcasting
distributions_q = tfd.Normal(loc=[0., 1.], scale=1.)
distribution_p = tfd.Normal(loc=0., scale=0.5)
```
```python
# Notice the batch_shape
distributions_q
```
<tfp.distributions.Normal 'Normal' batch_shape=[2] event_shape=[] dtype=float32>
```python
# [D_{KL}[q_1 || p], D_{KL}[q_2 || p]
tfd.kl_divergence(distributions_q, distribution_p)
```
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.8068528, 2.8068528], dtype=float32)>
`kl_divergence` provides a convenient way of computing the KL divergence for many TensorFlow distributions. As a rule of thumb, it will evaluate successfully provided you pass in two distributions of the same parametric family.
```python
# An example with another distribution
beta_q = tfd.Beta(concentration1=12, concentration0=3)
beta_p = tfd.Beta(concentration1=9, concentration0=3)
tfd.kl_divergence(beta_q, beta_p)
```
WARNING:tensorflow:@custom_gradient grad_fn has 'variables' in signature, but no ResourceVariables were used on the forward pass.
<tf.Tensor: shape=(), dtype=float32, numpy=0.09615421>
```python
# An example with a multivariate distribution
cov_q = np.array([[1., 0.5], [0.5, 1.]])
cov_p = np.array([[1., 0.], [0., 1.]])
mvtnormal_q = tfd.MultivariateNormalTriL(loc=[0., 0.], scale_tril=tf.linalg.cholesky(cov_q))
mvtnormal_p = tfd.MultivariateNormalTriL(loc=[0., 0.], scale_tril=tf.linalg.cholesky(cov_p))
tfd.kl_divergence(mvtnormal_q, mvtnormal_p)
```
<tf.Tensor: shape=(), dtype=float64, numpy=0.14384103622589053>
To see a complete list of distributions for which a KL method is defined, refer to `help(tfd.kl_divergence)`.
If you pass `kl_divergence` a pair distributions for which a KL divergence method is not implemented, an error will be raised:
```python
# uniform_q and beta_p are both uniform distributions with support [0, 1]
uniform_q = tfd.Uniform(low=0., high=1.)
beta_p = tfd.Beta(concentration1=0., concentration0=0.)
```
```python
# kl_divergence has no method for computing their divergence
try:
tfd.kl_divergence(uniform_q, beta_p)
except Exception as e:
print(e)
```
No KL(distribution_a || distribution_b) registered for distribution_a type Uniform and distribution_b type Beta
### When `kl_divergence` fails
If you do not have a closed-form expression for your KL divergence, and it is not implemented in `tfd.kl_divergence`, then you can make a Monte Carlo estimate of it. Simply sample $n$ values $x_1, \ldots, x_n$ from $q$, then evaluate the estimate
\begin{equation}
\frac{1}{n}\sum_{i=1}^n \log\big[q(x_i)\big] - \log\big[p(x_i)\big]
\end{equation}
In general, the Monte Carlo estimator is unbiased and its variance is inversely proportional to $n$.
To show how the variance of the Monte Carlo estimator varies with $n$, let's attempt to estimate $D_{KL}\big[q \ | \ p\big]$ when $q$ and $p$ are univariate normal distributions. We'll make many estimates for several values of $n$, then plot their absolute error as a function of $n$.
We'll start by evaluating the exact value $D_{KL}\big[q \ | \ p\big]$ using `kl_divergence`. Bear in mind that the Monte Carlo estimate will only be useful in situations where this not possible!
```python
# Evaluate the exact KL divergence
distribution_q = tfd.Normal(loc=0., scale=1.)
distribution_p = tfd.Normal(loc=0., scale=0.5)
exact_kl_divergence = tfd.kl_divergence(distribution_q, distribution_p).numpy() # D_{KL}[q || p]
exact_kl_divergence
```
0.8068528
Next, we'll define a function for making a Monte Carlo estimate for a given $q$, $p$, and $n$.
```python
# Function to estimate the KL divergence with Monte Carlo samples
def monte_carlo_estimate_of_kl_divergence(n, q_sampler, q_density, p_density):
'''
Computes a Monte Carlo estimate of D_{KL}[q || p] using
n samples from q_sampler.
q_sampler is a function that receives a positive integer
and returns as many samples from q.
Given samples x_1, ..., x_n from q_sampler, the Monte Carlo
estimate is
\frac{1}{n}\sum_{i=1}^n \log(q(x_i)) - \log(p(x_i))
where q and p are density/mass functions.
'''
x = q_sampler(n)
KL_estimate = np.mean(np.log(q_density(x)) - np.log(p_density(x)))
return(KL_estimate)
```
The code below shows how this function can be used to make a single estimate.
```python
# Single MC estimate
n = 1000 # number of samples used in MC estimate
q_sampler = distribution_q.sample
q_density = distribution_q.prob
p_density = distribution_p.prob
monte_carlo_estimate_of_kl_divergence(n, q_sampler, q_density, p_density)
```
0.7580066
To see how the estimator's variance decreases with increasing $n$, let's evaluate a few hundred estimates for each point in a grid of $n$ values.
```python
# Create a grid of 8 points
n_grid = 10**np.arange(1, 8)
samples_per_grid_point = 100 # Number of MC estimates to make for each value of n
```
```python
# Array to store results
kl_estimates = np.zeros(shape=[samples_per_grid_point, len(n_grid), 2])
```
```python
# Make 100 MC estimates for each value of n, store the results in kl_estimates
for sample_num in range(samples_per_grid_point):
for grid_num, n in enumerate(n_grid):
kl_estimates[sample_num, grid_num, 0] = n
kl_estimates[sample_num, grid_num, 1] = monte_carlo_estimate_of_kl_divergence(n,
q_sampler, q_density, p_density)
```
```python
# Compute RMSE of estimates (this is approximately equal to the standard deviation of the MC estimator)
rmse_of_kl_estimates = np.sqrt(np.mean((kl_estimates[:, :, 1] -
exact_kl_divergence)**2, axis=0))
```
```python
# Compute absolute error of the MC estimates
abs_error_of_kl_estimates = abs(kl_estimates[:, :, 1].flatten() - exact_kl_divergence)
```
```python
# Plot the results
_, ax = plt.subplots(1, 1, figsize=(15, 5))
plt.xlabel(r'Number of samples in Monte Carlo estimate, $n$')
ax.scatter(kl_estimates[:, :, 0],
abs_error_of_kl_estimates,
marker='.', color='red',
alpha=0.1, label='Absolute error of Monte Carlo estimates')
ax.plot(n_grid, rmse_of_kl_estimates, color='k', label='RMSE of Monte Carlo estimates')
ax.set_xscale('log'); ax.set_yscale('log'); ax.set_ylim([1e-6, 10])
ax.legend();
```
You can see that the gradient of the estimates' RMSE, an estimate of the MC estimator's standard devation, with respect to $n$ is $-\frac{1}{2}$. This is unsurprising: the estimator's variance is inversely proportional to $n$, so its log standard deviation is a linear function of $\log n$ with gradient $-\frac{1}{2}$. As $n$ increases, the Monte Carlo estimates approach the exact value of the KL divergence.
### Summary
You should now feel confident about how the Kullback-Leibler divergence is motivated and defined, what its key properties and why it is used in variational inference, and how it can be computed or estimated in TensorFlow.
### Further reading and resources
* TensorFlow documentation on `tfd.kl_divergence`: https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/kl_divergence
## Appendix
#### Information gain, relative entropy, and Bayesian inference
This section provides further context for the Kullback-Leibler divergence. It is not essential, but it will give you a more complete understanding of what the divergence measures.
The Kullback-Leibler divergence has its origins in information theory. The Shannon entropy, defined as
\begin{equation}
H(P) := E_{X \sim P(x)}[-\log P(X) ]
\end{equation}
is the greatest lower bound on the average number of nats ($\log 2$ nats are equal to $1$ bit) required to losslessly encode an observation sampled from from $P(x)$. This is an informal statement of a result known as the _source coding theorem_. $-\log P(x)$ is the number of bits used to encode $x$ in the lossless encoding scheme.
Say that a lossless compression algorithm instead encodes observations using a scheme that would be optimal for distribution $Q(x)$. Then the average number of of bits required to encode an observation sampled from $P(x)$ would be
\begin{equation}
H(P, Q) := E_{X \sim P(x)}[-\log Q(X)]
\end{equation}
This is quantity is referred to as the _cross-entropy_ between $P$ and $Q$. Since $H(P)$ is the minimum average information for encoding observations from $P(x)$ by definition, it follows that $H(P, Q) \geq H(P)$.
The Kullback-Leibler divergence is defined as the average additional information required to encode observations from $P(x)$ using an optimal code for $Q(x)$:
\begin{align}
D_{KL}(P \ || \ Q) &:= E_{X \sim P(x)}[-\log Q(X)] - E_{X \sim P(x)}[-\log P(X)] \\
&= H(P, Q) - H(P)
\end{align}
The KL divergence therefore tells us how inefficient the optimal coding scheme for $Q$ is when applied to data source $P$.
That KL divergence is the difference between a cross-entropy and a Shannon entropy sheds light on why the KL divergence has another moniker - _relative entropy_.
Alternatively, we might consider encoding observations in the context of Bayesian inference. Let $P(y)$ be the prior and $P(y|x)$ be the posterior. Then the Kullback-Leibler divergence
\begin{equation}
D_{KL}(P(y|x) \ || \ P(y)) = E_{Y \sim P(y|x)}[-\log P(Y)] - E_{Y \sim P(y|x)}[-\log P(Y|x)]
\end{equation}
is the average number of bits saved if observations are encoded using an optimal code for the posterior rather than the prior. In this sense, the KL divergence tells us how much information is gained by conditioning on $X$.
|
{"hexsha": "81c303fe26569349ade5fb9663ed60be0c92e3dd", "size": 100221, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Course3-Probabilistic_Deep_Learning_with_Tensorflow2/week4_KL_divergence.ipynb", "max_stars_repo_name": "mella30/Probabilistic-Deep-Learning-with-TensorFlow-2", "max_stars_repo_head_hexsha": "e9748316547d7f433632f4735990306d6e15da72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Course3-Probabilistic_Deep_Learning_with_Tensorflow2/week4_KL_divergence.ipynb", "max_issues_repo_name": "mella30/Probabilistic-Deep-Learning-with-TensorFlow-2", "max_issues_repo_head_hexsha": "e9748316547d7f433632f4735990306d6e15da72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Course3-Probabilistic_Deep_Learning_with_Tensorflow2/week4_KL_divergence.ipynb", "max_forks_repo_name": "mella30/Probabilistic-Deep-Learning-with-TensorFlow-2", "max_forks_repo_head_hexsha": "e9748316547d7f433632f4735990306d6e15da72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 111.4805339266, "max_line_length": 34090, "alphanum_fraction": 0.8183215095, "converted": true, "num_tokens": 4761}
|
import gurobipy as gp
from gurobipy import GRB
from mosek.fusion import *
import time,sys
import numpy as np
import pandas as pd
from scipy.linalg import sqrtm
from DimacsReader import *
def save(name,finished,value,relax,soltime,iteration,innner, xsol):
f = open("../output/Application2/"+name+"/InnerOuterApproxAlgo.txt","w+")
if finished==True:
f.write("Finished before time limit.\n")
else:
f.write("Time limit reached.\n")
f.write("Obj: "+str(value)+"\n")
if iteration>0 and finished==False:
f.write("Obj relaxation: "+str(relax)+"\n")
f.write("SolTime: "+str(soltime)+"\n")
f.write("It. number: "+str(iteration)+"\n")
f.write("Percent. inner: "+str(inner)+"\n")
f.write("\nUpper level solution: "+str(xsol)+"\n")
f.close()
def main_app2(name_dimacs,name,mu,timelimit=18000):
#Logs
ValueLogRes, ValueLogRel, EpsLogs, MasterTimeLogs, LLTimeLogs = [],[],[],[],[]
#Reading graph file
f = DimacsReader("../DIMACS/"+name_dimacs)
M = f.M
n = f.n
Q1= np.load("../Application2_data/"+name+"/bigQ1.npy")
Q2= np.load("../Application2_data/"+name+"/bigQ2_fix.npy")
q1= np.load("../Application2_data/"+name+"/q1.npy")
q2= np.load("../Application2_data/"+name+"/q2_fix.npy")
diagonalQ2x = np.load("../Application2_data/"+name+"/diagQ2x.npy")
Mcheck = np.load("../Application2_data/"+name+"/M.npy")
assert(np.linalg.norm(M-Mcheck)<1E-6)
assert(np.linalg.norm(M-M.T)<1E-6)
assert(np.linalg.norm(Q1-Q1.T)<1E-6)
assert(np.linalg.norm(Q2-Q2.T)<1E-6)
print("We are solving instance:", name)
"""Solve the restriction. If sufficient condition of GOPT is satisfied, stop"""
t0 = time.time()
xres, zres, obj = restriction(M,n,Q1,Q2,q1,q2,diagonalQ2x)
x = xres
mastertime = time.time() - t0
mastertime_tot = mastertime
LLtime_tot = 0
obj_relax=0
#we check if the matrix Q2 is PD (i.e. sufficient condition satisfied):
if min(np.linalg.eig(Q2+np.diag(diagonalQ2x*xres))[0])>1E-6: #the matrix is positive definite
running = False
print("The matrix is positive definite.")
else:
running = True
ValueLogRes.append(obj)
ValueLogRel.append(-np.inf)
EpsLogs.append(0)
MasterTimeLogs.append(mastertime)
LLTimeLogs.append(0)
"""If not, we run the inner/outer approximation algorithm """
iteration = 0
mu2 = mu * 100
Qxk_list, qxk_list, vxk_list, yklist = [],[],[],[]
while running and (time.time()-t0 < timelimit):
print("Iteration number {0}".format(iteration+1))
t1 = time.time()
#we solve the master problem
x,z,xrelax,crelax,obj,obj_relax,dist = master(M,n,Q1,Q2,q1,q2,diagonalQ2x,Qxk_list,qxk_list,np.array(vxk_list),yklist,mu)
mastertime = time.time() - t1
mastertime_tot = mastertime_tot + mastertime
Qrelax = Q2+np.diag(diagonalQ2x*xrelax)
brelax = q2 + (M.T)@xrelax
tl = 10+max(0,timelimit-(time.time()-t0))
#we solve the inner problem
t1 = time.time()
yrelax,epsrel = solve_subproblem_App2(n,Qrelax,brelax,crelax,tl) #we get epsrel = (v+crelax)
LLtime = time.time() - t1
LLtime_tot = LLtime_tot + LLtime
Qxk_list.append(Qrelax)
qxk_list.append(brelax)
vxk_list.append(epsrel-crelax)#we solve the LL problem involving h(xrelax)=crelax, thus, to obtain v, we have to subtract from the LLobj h(xrelax)
yklist.append(yrelax)
#Logs
ValueLogRes.append(obj)
ValueLogRel.append(obj_relax)
EpsLogs.append(epsrel)
MasterTimeLogs.append(mastertime)
LLTimeLogs.append(LLtime)
print("ObjRes, ObjRel, Average = {0},{1},{2}".format(obj,obj_relax,0.5*obj+0.5*obj_relax))
print("Distance term (check) = {0}".format(dist))
print("Epsilon term (check) = {0}".format(epsrel))
if epsrel>-1E-6 and dist<1E-6:
running=False
if abs(obj-obj_relax)/abs(obj)<0.001:
mu = mu2
iteration+=1
soltime = time.time() - t0
percentLL = LLtime_tot/(mastertime_tot+LLtime_tot)
save(name,not(running),obj,obj_relax,soltime,iteration,percentLL, x)
df = pd.DataFrame()
df['MasterObjRes'],df['MasterObjRel'],df["Epsilon"],df["MasterTime"],df['LLTime'] = ValueLogRes,ValueLogRel, EpsLogs, MasterTimeLogs, LLTimeLogs
df.to_csv("../output/Application2/"+name+"/InnerOuterApproxAlgo.csv")
def restriction(M,n,Q1,Q2,q1,q2,diagonalQ2x):
"Solve the single level restriction"
with Model("App2") as model:
A = -np.eye(n)
#Upper level var
z = model.variable("z", 1, Domain.unbounded())
x = model.variable("x", n, Domain.greaterThan(0.0))
#LL variables
lam = model.variable("lambda", Domain.unbounded()) #lagrangian multiplier related to the equality constraint (simplex)
lam2 = model.variable("lambda2", n, Domain.greaterThan(0.0)) #lagrangian multiplier related to the nonnegativity of y
alpha = model.variable("alpha", Domain.greaterThan(0.0))
beta = model.variable("beta", Domain.unbounded())
#Vars for PSD constraint
PSDVar = model.variable(Domain.inPSDCone(n+1))
PSDVar_main = PSDVar.slice([0,0], [n,n])
PSDVar_vec = Var.flatten(PSDVar.slice([0,n], [n,n+1]))
PSDVar_offset = PSDVar.slice([n,n], [n+1,n+1])
#other auxiliary variables
t = model.variable("t", 1, Domain.unbounded()) #upper level variable
P1 = sqrtm(Q1) #necessary for the following constraint
##t >= 0.5 x^TQ_1x iif t >= 0.5 ||P_1 x ||^2 iif (t,1, P_1x) \in RotatedCone(n+2)
## This constraint is necessary saturated at the optimum, thus we have t = 0.5 x^TQ_1x
model.constraint(Expr.vstack(t,1, Expr.mul(P1,x)), Domain.inRotatedQCone(n+2))
z_and_player1_cost = Expr.add(z, Expr.add(t,Expr.dot(q1,x))) #upper level objective function
#Objective
model.objective( "objfunct", ObjectiveSense.Minimize, z_and_player1_cost )
#Simplex constraint for x
model.constraint( Expr.sum(x), Domain.equalsTo(1) )
# -z + lambda1 + 2 alpha + beta \leq 0
sum_of_duals = Expr.add(lam,Expr.add(Expr.mul(2,alpha),beta))
model.constraint(Expr.add(Expr.mul(-1,z),sum_of_duals),Domain.lessThan(0.0))
#Constraints to define the several parts of the PSD matrix
Q2x = Expr.add([Expr.mul(x.index(i),Matrix.sparse(n, n, [i], [i], [0.5*diagonalQ2x[i]])) for i in range(n)])
model.constraint(Expr.sub(Expr.add(Expr.add(0.5*Q2,Q2x), Expr.mul(alpha,np.eye(n))), PSDVar_main), Domain.equalsTo(0,n,n) )
model.constraint(Expr.sub(Expr.add(Expr.add(0.5*q2, Expr.add(Expr.mul(0.5*M.T,x),Expr.mul(lam,0.5*np.ones(n)))),Expr.mul(lam2,0.5*A)), PSDVar_vec), Domain.equalsTo(0,n) )
model.constraint(Expr.sub(Expr.add(beta, alpha), PSDVar_offset), Domain.equalsTo(0) )
# Solve
model.solve()
soltime = model.getSolverDoubleInfo("optimizerTime")
#Get results
xres = x.level()
tres = t.level()[0]
zres = z.level()[0]
assert(abs(tres-0.5*xres.dot(Q1).dot(xres))<1E-7)
assert(abs(PSDVar.level()[-1] - (alpha.level()[0]+beta.level()[0]))<1E-7)
return xres, zres , zres + tres + xres.dot(q1)
def master(M,n,Q1,Q2,q1,q2,diagonalQ2x,Qxk_list,qxk_list, vxk_vector,yklist,mu):
"Solve the master problem"
K = len(yklist)
with Model("App2") as model:
A = -np.eye(n)
#Upper level var
z = model.variable("z", 1, Domain.unbounded())
x = model.variable("x", n, Domain.greaterThan(0.0))
zrelax = model.variable("zrelax", 1, Domain.unbounded())
xrelax = model.variable("xrelax", n, Domain.greaterThan(0.0))
distance_term = model.variable("dist", 2, Domain.unbounded())
#LL variables
lam = model.variable("lambda", Domain.unbounded()) #lagrangian multiplier related to the equality constraint (simplex)
lam2 = model.variable("lambda2", n, Domain.greaterThan(0.0)) #lagrangian multiplier related to the nonnegativity of y
alpha = model.variable("alpha", Domain.greaterThan(0.0))
beta = model.variable("beta", Domain.unbounded())
eta = model.variable("eta", K, Domain.greaterThan(0.0))
#Vars for PSD constraint
PSDVar = model.variable(Domain.inPSDCone(n+1))
PSDVar_main = PSDVar.slice([0,0], [n,n])
PSDVar_vec = Var.flatten(PSDVar.slice([0,n], [n,n+1]))
PSDVar_offset = PSDVar.slice([n,n], [n+1,n+1])
#other auxiliary variables
t = model.variable("t", 1, Domain.unbounded()) #upper level variable
trelax = model.variable("trelax", 1, Domain.unbounded()) #upper level variable
P1 = sqrtm(Q1) #necessary for the following constraints
## t >= 0.5 x^TQ_1x iif t >= 0.5 ||P_1 x ||^2 iif (t,1, P_1x) \in RotatedCone(n+2)
## This constraint is necessary saturated at the optimum, thus we have t = 0.5 x^TQ_1x
model.constraint(Expr.vstack(t,1, Expr.mul(P1,x)), Domain.inRotatedQCone(n+2))
model.constraint(Expr.vstack(trelax,1, Expr.mul(P1,xrelax)), Domain.inRotatedQCone(n+2))
z_and_player1_cost = Expr.add(z, Expr.add(t,Expr.dot(q1,x))) #upper level objective function
z_and_player1_cost_relax = Expr.add(zrelax, Expr.add(trelax,Expr.dot(q1,xrelax))) #upper level objective function
#Objective
model.constraint( Expr.vstack(1.0,Expr.vstack(distance_term.index(0), Expr.sub(x, xrelax))), Domain.inRotatedQCone() ) #distance_term first component
model.constraint( Expr.vstack(1.0,Expr.vstack(distance_term.index(1), Expr.sub(z, zrelax))), Domain.inRotatedQCone() ) #distance_term second component
model.objective( "objfunct", ObjectiveSense.Minimize, Expr.add(z_and_player1_cost,Expr.add(z_and_player1_cost_relax,Expr.mul(mu, Expr.sum(distance_term)))) )
#Simplex constraint for x
model.constraint( Expr.sum(x), Domain.equalsTo(1) )
model.constraint( Expr.sum(xrelax), Domain.equalsTo(1) )
# -z -\eta^t v + lambda1 + 2 alpha + beta \leq 0
sum_of_duals = Expr.add(lam,Expr.add(Expr.mul(2,alpha),beta))
model.constraint(Expr.add(Expr.mul(-1,Expr.add(Expr.dot(eta,vxk_vector),z)),sum_of_duals),Domain.lessThan(0.0))
Q2x = Expr.add([Expr.mul(x.index(i),Matrix.sparse(n, n, [i], [i], [0.5*diagonalQ2x[i]])) for i in range(n)])
#Constraints to define the several parts of the PSD matrix
if K>=1:
combiliMat = np.zeros((n,n))
combiliVect = np.zeros(n)
for i in range(K):
combiliMat = Expr.add(combiliMat, Expr.mul(eta.index(i),Qxk_list[i]))
combiliVect = Expr.add(combiliVect, Expr.mul(eta.index(i),qxk_list[i]))
model.constraint(Expr.sub(Expr.add(Expr.sub(Expr.add(0.5*Q2,Q2x),Expr.mul(0.5,combiliMat)), Expr.mul(alpha,np.eye(n))), PSDVar_main), Domain.equalsTo(0,n,n) )
model.constraint(Expr.sub(Expr.add(Expr.add(Expr.sub(0.5*q2,Expr.mul(0.5,combiliVect)), Expr.add(Expr.mul(0.5*M.T,x),Expr.mul(lam,0.5*np.ones(n)))),Expr.mul(lam2,0.5*A)), PSDVar_vec), Domain.equalsTo(0,n) )
model.constraint(Expr.sub(Expr.add(beta, alpha), PSDVar_offset), Domain.equalsTo(0) )
else:
model.constraint(Expr.sub(Expr.add(Expr.add(0.5*Q2,Q2x), Expr.mul(alpha,np.eye(n))), PSDVar_main), Domain.equalsTo(0,n,n) )
model.constraint(Expr.sub(Expr.add(Expr.add(0.5*q2, Expr.add(Expr.mul(0.5*M.T,x),Expr.mul(lam,0.5*np.ones(n)))),Expr.mul(lam2,0.5*A)), PSDVar_vec), Domain.equalsTo(0,n) )
model.constraint(Expr.sub(Expr.add(beta, alpha), PSDVar_offset), Domain.equalsTo(0) )
Q2x_relax = Expr.add([Expr.mul(xrelax.index(i),Matrix.sparse(n, n, [i], [i], [0.5*diagonalQ2x[i]])) for i in range(n)])
quad = Expr.add(0.5*Q2,Q2x_relax)
for k in range(K):
y = yklist[k]
Y = (y.reshape(n,1).dot(y.reshape(1,n))).reshape(n**2)
froeb_prod = Expr.dot(Expr.flatten(quad),Y.flatten())
scal_prod = Expr.dot(y,Expr.add(q2,Expr.mul(M,xrelax)))
model.constraint(Expr.add(Expr.add(froeb_prod,scal_prod),zrelax), Domain.greaterThan(0))
#Solve
model.acceptedSolutionStatus(AccSolutionStatus.Anything)
model.writeTask("App2.ptf") # Save problem in readable format
model.solve()
soltime = model.getSolverDoubleInfo("optimizerTime")
#Get results
xsol,zsol,xrelaxsol,zrelaxsol = x.level(), z.level()[0],xrelax.level(),zrelax.level()[0]
dist = np.linalg.norm(xsol-xrelaxsol,2)**2 + (zsol-zrelaxsol)**2
return xsol,zsol,xrelaxsol,zrelaxsol,zsol+0.5*(xsol@Q1@xsol)+q1@xsol,zrelaxsol+0.5*(xrelaxsol@Q1@xrelaxsol)+q1@xrelaxsol,dist
def solve_subproblem_App2(n,Q,b,c,tl):
m = gp.Model("LL problem")
m.Params.LogToConsole = 0
y = m.addMVar(n, lb = 0.0, ub = 1.0, name="y")
m.addConstr(np.ones(n)@y==1)
m.setObjective(y@(0.5*Q)@y+ b@y +c, GRB.MINIMIZE)
m.setParam('NonConvex', 2)
m.setParam('TimeLimit', tl)
m.optimize()
return y.X, m.objVal
# if name[-2]=='t':
# running=False
# else:
|
{"hexsha": "d5d4bf048809b4c6a14acd2499d2055e1895744b", "size": 13531, "ext": "py", "lang": "Python", "max_stars_repo_path": "AllPythonFiles/Application2_InnerOuterApproxAlgo.py", "max_stars_repo_name": "aoustry/SIP-with-QP-LL", "max_stars_repo_head_hexsha": "afa7b18f6dcb98c04db4786ec5fcdf0607824191", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "AllPythonFiles/Application2_InnerOuterApproxAlgo.py", "max_issues_repo_name": "aoustry/SIP-with-QP-LL", "max_issues_repo_head_hexsha": "afa7b18f6dcb98c04db4786ec5fcdf0607824191", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "AllPythonFiles/Application2_InnerOuterApproxAlgo.py", "max_forks_repo_name": "aoustry/SIP-with-QP-LL", "max_forks_repo_head_hexsha": "afa7b18f6dcb98c04db4786ec5fcdf0607824191", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-28T13:26:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-28T13:26:10.000Z", "avg_line_length": 49.0253623188, "max_line_length": 219, "alphanum_fraction": 0.6255265686, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4104}
|
* MB05OD EXAMPLE PROGRAM TEXT
* Copyright (c) 2002-2020 NICONET e.V.
*
* .. Parameters ..
INTEGER NIN, NOUT
PARAMETER ( NIN = 5, NOUT = 6 )
INTEGER NMAX
PARAMETER ( NMAX = 20 )
INTEGER LDA
PARAMETER ( LDA = NMAX )
INTEGER NDIAG
PARAMETER ( NDIAG = 9 )
INTEGER LDWORK
PARAMETER ( LDWORK = NMAX*( 2*NMAX+NDIAG+1 )+NDIAG )
* .. Local Scalars ..
DOUBLE PRECISION DELTA
INTEGER I, IDIG, INFO, IWARN, J, MDIG, N
CHARACTER*1 BALANC
* .. Local Arrays ..
DOUBLE PRECISION A(LDA,NMAX), DWORK(LDWORK)
INTEGER IWORK(NMAX)
* .. External Subroutines ..
EXTERNAL MB05OD
* .. Executable Statements ..
*
WRITE ( NOUT, FMT = 99999 )
* Skip the heading in the data file and read the data.
READ ( NIN, FMT = '()' )
READ ( NIN, FMT = * ) N, DELTA, BALANC
IF ( N.LE.0 .OR. N.GT.NMAX ) THEN
WRITE ( NOUT, FMT = 99994 ) N
ELSE
READ ( NIN, FMT = * ) ( ( A(I,J), J = 1,N ), I = 1,N )
* Find the exponential of the real defective matrix A*DELTA.
CALL MB05OD( BALANC, N, NDIAG, DELTA, A, LDA, MDIG, IDIG,
$ IWORK, DWORK, LDWORK, IWARN, INFO )
*
IF ( INFO.NE.0 ) THEN
WRITE ( NOUT, FMT = 99998 ) INFO
ELSE
IF ( IWARN.NE.0 )
$ WRITE ( NOUT, FMT = 99993 ) IWARN
WRITE ( NOUT, FMT = 99997 )
DO 20 I = 1, N
WRITE ( NOUT, FMT = 99996 ) ( A(I,J), J = 1,N )
20 CONTINUE
WRITE ( NOUT, FMT = 99995 ) MDIG, IDIG
END IF
END IF
STOP
*
99999 FORMAT (' MB05OD EXAMPLE PROGRAM RESULTS',/1X)
99998 FORMAT (' INFO on exit from MB05OD = ',I2)
99997 FORMAT (' The solution matrix E = exp(A*DELTA) is ')
99996 FORMAT (20(1X,F8.4))
99995 FORMAT (/' Minimal number of accurate digits in the norm of E =',
$ I4,/' Number of accurate digits in the norm of E',/' ',
$ ' at 95 per cent confidence interval =',I4)
99994 FORMAT (/' N is out of range.',/' N = ',I5)
99993 FORMAT (' IWARN on exit from MB05OD = ',I2)
END
|
{"hexsha": "fd72a67eba3e843308081b286487324d86b9a41e", "size": 2264, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "examples/TMB05OD.f", "max_stars_repo_name": "bnavigator/SLICOT-Reference", "max_stars_repo_head_hexsha": "7b96b6470ee0eaf75519a612d15d5e3e2857407d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-11-10T23:47:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T04:34:43.000Z", "max_issues_repo_path": "examples/TMB05OD.f", "max_issues_repo_name": "bnavigator/SLICOT-Reference", "max_issues_repo_head_hexsha": "7b96b6470ee0eaf75519a612d15d5e3e2857407d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-02-07T22:26:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:01:07.000Z", "max_forks_repo_path": "examples/TMB05OD.f", "max_forks_repo_name": "bnavigator/SLICOT-Reference", "max_forks_repo_head_hexsha": "7b96b6470ee0eaf75519a612d15d5e3e2857407d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-11-26T11:06:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T00:37:21.000Z", "avg_line_length": 36.5161290323, "max_line_length": 72, "alphanum_fraction": 0.5083922261, "num_tokens": 721}
|
[STATEMENT]
lemma plus_pres_lens_indep' [simp]:
"\<lbrakk> X \<bowtie> Y; X \<bowtie> Z \<rbrakk> \<Longrightarrow> X \<bowtie> Y +\<^sub>L Z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>X \<bowtie> Y; X \<bowtie> Z\<rbrakk> \<Longrightarrow> X \<bowtie> Y +\<^sub>L Z
[PROOF STEP]
by (auto intro: lens_indep_sym plus_pres_lens_indep)
|
{"llama_tokens": 159, "file": "Optics_Lens_Algebra", "length": 1}
|
------------------------------------------------------------------------
-- Indexed applicative functors
------------------------------------------------------------------------
-- Note that currently the applicative functor laws are not included
-- here.
module Category.Applicative.Indexed where
open import Data.Function
open import Data.Product
open import Category.Functor
IFun : Set → Set₁
IFun I = I → I → Set → Set
record RawIApplicative {I : Set} (F : IFun I) : Set₁ where
infixl 4 _⊛_ _<⊛_ _⊛>_
infix 4 _⊗_
field
pure : ∀ {i A} → A → F i i A
_⊛_ : ∀ {i j k A B} → F i j (A → B) → F j k A → F i k B
rawFunctor : ∀ {i j} → RawFunctor (F i j)
rawFunctor = record
{ _<$>_ = λ g x → pure g ⊛ x
}
private
open module RF {i j : I} =
RawFunctor (rawFunctor {i = i} {j = j})
public
_<⊛_ : ∀ {i j k A B} → F i j A → F j k B → F i k A
x <⊛ y = const <$> x ⊛ y
_⊛>_ : ∀ {i j k A B} → F i j A → F j k B → F i k B
x ⊛> y = flip const <$> x ⊛ y
_⊗_ : ∀ {i j k A B} → F i j A → F j k B → F i k (A × B)
x ⊗ y = (_,_) <$> x ⊛ y
|
{"hexsha": "ba21606725126670a2d8eacc4992bda2531c2a5c", "size": 1099, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "vendor/stdlib/src/Category/Applicative/Indexed.agda", "max_stars_repo_name": "isabella232/Lemmachine", "max_stars_repo_head_hexsha": "8ef786b40e4a9ab274c6103dc697dcb658cf3db3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 56, "max_stars_repo_stars_event_min_datetime": "2015-01-20T02:11:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-21T17:02:19.000Z", "max_issues_repo_path": "vendor/stdlib/src/Category/Applicative/Indexed.agda", "max_issues_repo_name": "larrytheliquid/Lemmachine", "max_issues_repo_head_hexsha": "8ef786b40e4a9ab274c6103dc697dcb658cf3db3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-12T12:17:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T12:17:51.000Z", "max_forks_repo_path": "vendor/stdlib/src/Category/Applicative/Indexed.agda", "max_forks_repo_name": "isabella232/Lemmachine", "max_forks_repo_head_hexsha": "8ef786b40e4a9ab274c6103dc697dcb658cf3db3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2015-07-21T16:37:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T11:54:10.000Z", "avg_line_length": 25.5581395349, "max_line_length": 72, "alphanum_fraction": 0.4567788899, "num_tokens": 401}
|
function [X,J,dXdx,dXdxI]=JacobiDG2(DG,F,Topo,Param)
ksi=DG.xwX;
eta=DG.xwY;
nX=DG.OrdPolyX+1;
nY=DG.OrdPolyY+1;
X=zeros(nX,nY,3);
dXdx=zeros(nX,nY,2,2);
dXdxI=zeros(nX,nY,2,2);
J=zeros(nX,nY);
for j=1:nY
for i=1:nX
X(i,j,1:2)=0.25*((1-ksi(i))*(1-eta(j))*F.P(1:2,1)...
+(1+ksi(i))*(1-eta(j))*F.P(1:2,2)...
+(1+ksi(i))*(1+eta(j))*F.P(1:2,3)...
+(1-ksi(i))*(1+eta(j))*F.P(1:2,4));
[X(i,j,2),dXdx(i,j,2,2)]=Topo(X(i,j,1),X(i,j,2),Param);
end
end
dXdx(:,:,1,1)=DG.DSX*X(:,:,1);
dXdx(:,:,2,1)=DG.DSX*X(:,:,2);
dXdx(:,:,1,2)=X(:,:,1)*DG.DSY';
dXdx(:,:,2,2)=X(:,:,2)*DG.DSY';
for j=1:nY
for i=1:nX
J(i,j)=det(reshape(dXdx(i,j,:,:),2,2));
dXdxI(i,j,:,:)=inv(reshape(dXdx(i,j,:,:),2,2))*J(i,j);
end
end
end
|
{"hexsha": "9881329194f562f31bb52ca34199db37ab814257", "size": 784, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Grid/JacobiDG2.jl", "max_stars_repo_name": "CliMA/CGDycore.jl", "max_stars_repo_head_hexsha": "77297631f8db7775f19daee2d7ac75bc810d9c11", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-05T07:09:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T07:09:16.000Z", "max_issues_repo_path": "src/Grid/JacobiDG2.jl", "max_issues_repo_name": "CliMA/CGDycore.jl", "max_issues_repo_head_hexsha": "77297631f8db7775f19daee2d7ac75bc810d9c11", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Grid/JacobiDG2.jl", "max_forks_repo_name": "CliMA/CGDycore.jl", "max_forks_repo_head_hexsha": "77297631f8db7775f19daee2d7ac75bc810d9c11", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5, "max_line_length": 59, "alphanum_fraction": 0.4783163265, "num_tokens": 387}
|
#
# file: GWO.py
#
# Grey wolf optimization
#
# RTK, 23-Dec-2019
# Last update: 26-May-2020
#
################################################################
import numpy as np
################################################################
# GWO
#
class GWO:
"""Grey wolf optimization"""
#-----------------------------------------------------------
# __init__
#
def __init__(self, obj, # the objective function (subclass Objective)
eta=2.0, # scale factor for a
npart=10, # number of particles in the swarm (> 3)
ndim=3, # number of dimensions in the swarm
max_iter=200, # maximum number of steps
tol=None, # tolerance (done if no done object and gbest < tol)
init=None, # swarm initialization object (subclass Initializer)
done=None, # custom Done object (subclass Done)
bounds=None): # swarm bounds object
self.obj = obj
self.npart = npart
self.ndim = ndim
self.max_iter = max_iter
self.init = init
self.done = done
self.bounds = bounds
self.tol = tol
self.eta = eta
self.initialized = False
#-----------------------------------------------------------
# Results
#
def Results(self):
"""Return the current results"""
if (not self.initialized):
return None
return {
"npart": self.npart, # number of particles
"ndim": self.ndim, # number of dimensions
"max_iter": self.max_iter, # maximum possible iterations
"iterations": self.iterations, # iterations actually performed
"tol": self.tol, # tolerance value, if any
"gbest": self.gbest, # sequence of global best function values
"gpos": self.gpos, # global best positions
"gidx": self.gidx, # particle id of global best
"giter": self.giter, # iteration number of global best
"pos": self.pos, # current particle positions
"vpos": self.vpos, # and objective function values
}
#-----------------------------------------------------------
# Initialize
#
def Initialize(self):
"""Set up the swarm"""
self.initialized = True
self.iterations = 0
self.pos = self.init.InitializeSwarm() # initial swarm positions
self.vpos= np.zeros(self.npart)
for i in range(self.npart):
self.vpos[i] = self.obj.Evaluate(self.pos[i])
# Swarm bests
self.gidx = []
self.gbest = []
self.gpos = []
self.giter = []
idx = np.argmin(self.vpos)
self.gidx.append(idx)
self.gbest.append(self.vpos[idx])
self.gpos.append(self.pos[idx].copy())
self.giter.append(0)
# 1st, 2nd, and 3rd best positions
idx = np.argsort(self.vpos)
self.alpha = self.pos[idx[0]].copy()
self.valpha= self.vpos[idx[0]]
self.beta = self.pos[idx[1]].copy()
self.vbeta = self.vpos[idx[1]]
self.delta = self.pos[idx[2]].copy()
self.vdelta= self.vpos[idx[2]]
#-----------------------------------------------------------
# Done
#
def Done(self):
"""Check if we are done"""
if (self.done == None):
if (self.tol == None):
return (self.iterations == self.max_iter)
else:
return (self.gbest[-1] < self.tol) or (self.iterations == self.max_iter)
else:
return self.done.Done(self.gbest,
gpos=self.gpos,
pos=self.pos,
max_iter=self.max_iter,
iteration=self.iterations)
#-----------------------------------------------------------
# Step
#
def Step(self):
"""Do one swarm step"""
# a from eta ... zero (default eta is 2)
a = self.eta - self.eta*(self.iterations/self.max_iter)
# Update everyone
for i in range(self.npart):
A = 2*a*np.random.random(self.ndim) - a
C = 2*np.random.random(self.ndim)
Dalpha = np.abs(C*self.alpha - self.pos[i])
X1 = self.alpha - A*Dalpha
A = 2*a*np.random.random(self.ndim) - a
C = 2*np.random.random(self.ndim)
Dbeta = np.abs(C*self.beta - self.pos[i])
X2 = self.beta - A*Dbeta
A = 2*a*np.random.random(self.ndim) - a
C = 2*np.random.random(self.ndim)
Ddelta = np.abs(C*self.delta - self.pos[i])
X3 = self.delta - A*Ddelta
self.pos[i,:] = (X1+X2+X3) / 3.0
# Keep in bounds
if (self.bounds != None):
self.pos = self.bounds.Limits(self.pos)
# Get objective function values and check for new leaders
for i in range(self.npart):
self.vpos[i] = self.obj.Evaluate(self.pos[i])
# new alpha?
if (self.vpos[i] < self.valpha):
self.vdelta = self.vbeta
self.delta = self.beta.copy()
self.vbeta = self.valpha
self.beta = self.alpha.copy()
self.valpha = self.vpos[i]
self.alpha = self.pos[i].copy()
# new beta?
if (self.vpos[i] > self.valpha) and (self.vpos[i] < self.vbeta):
self.vdelta = self.vbeta
self.delta = self.beta.copy()
self.vbeta = self.vpos[i]
self.beta = self.pos[i].copy()
# new delta?
if (self.vpos[i] > self.valpha) and (self.vpos[i] < self.vbeta) and (self.vpos[i] < self.vdelta):
self.vdelta = self.vpos[i]
self.delta = self.pos[i].copy()
# is alpha new swarm best?
if (self.valpha < self.gbest[-1]):
self.gidx.append(i)
self.gbest.append(self.valpha)
self.gpos.append(self.alpha.copy())
self.giter.append(self.iterations)
self.iterations += 1
#-----------------------------------------------------------
# Optimize
#
def Optimize(self):
"""Run a full optimization and return the best"""
self.Initialize()
while (not self.Done()):
self.Step()
return self.gbest[-1], self.gpos[-1]
# end GWO.py
|
{"hexsha": "745f8ba64ad21ed255b54c25435a65a5bac03fdf", "size": 6728, "ext": "py", "lang": "Python", "max_stars_repo_path": "appendix/GWO.py", "max_stars_repo_name": "rkneusel9/StrangeCodeBook", "max_stars_repo_head_hexsha": "70ed93396885a5cbf2f4d774d9aa30feca83e46d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-01-11T17:14:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T10:22:32.000Z", "max_issues_repo_path": "appendix/GWO.py", "max_issues_repo_name": "rkneusel9/StrangeCodeBook", "max_issues_repo_head_hexsha": "70ed93396885a5cbf2f4d774d9aa30feca83e46d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "appendix/GWO.py", "max_forks_repo_name": "rkneusel9/StrangeCodeBook", "max_forks_repo_head_hexsha": "70ed93396885a5cbf2f4d774d9aa30feca83e46d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-24T01:11:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-24T01:11:49.000Z", "avg_line_length": 32.8195121951, "max_line_length": 109, "alphanum_fraction": 0.4619500595, "include": true, "reason": "import numpy", "num_tokens": 1522}
|
theory State_Monad_EX
imports
Main
"State_Monad_HL"
begin
record S1 =
x_S1:: int
y_S1:: int
z_S1:: int
(* update functions *)
definition x_S1u:: "S1 \<Rightarrow> int \<Rightarrow> S1" where "x_S1u s v = s \<lparr> x_S1 := v \<rparr>"
definition y_S1u:: "S1 \<Rightarrow> int \<Rightarrow> S1" where "y_S1u s v = s \<lparr> y_S1 := v \<rparr>"
definition z_S1u:: "S1 \<Rightarrow> int \<Rightarrow> S1" where "z_S1u s v = s \<lparr> z_S1 := v \<rparr>"
theorem put_x_rule: "spec (\<lambda>x. p () (x \<lparr> x_S1 := v \<rparr>)) (put x_S1u v) p"
by (simp add: spec_def put_def get_state_def put_state_def x_S1u_def)
theorem put_y_rule: "spec (\<lambda>x. p () (x \<lparr> y_S1 := v \<rparr>)) (put y_S1u v) p"
by (simp add: spec_def put_def get_state_def put_state_def y_S1u_def)
theorem put_z_rule: "spec (\<lambda>x. p () (x \<lparr> z_S1 := v \<rparr>)) (put z_S1u v) p"
by (simp add: spec_def put_def get_state_def put_state_def z_S1u_def)
(* simple programs *)
definition setx0:: "(S1, unit) state" where "setx0 = put x_S1u 0"
definition sety0:: "(S1, unit) state" where "sety0 = put y_S1u 0"
definition setz0:: "(S1, unit) state" where "setz0 = put z_S1u 0"
definition p0:: "S1 \<Rightarrow> bool" where "p0 s = (x_S1 s = 0 \<and> y_S1 s = 0 \<and> z_S1 s = 0)"
definition init0:: "(S1, unit) state" where
"init0 = do { setx0; sety0; setz0 }"
lemma "spec TT init0 (GG p0)"
apply(simp add: init0_def)
apply(intro seq_rule[of _ _ "\<lambda>_ s. x_S1 s = 0"])
apply(simp add: TT_def spec_def setx0_def put_def get_state_def put_state_def x_S1u_def)
apply (intro allI)
apply(intro seq_rule[of _ _ "\<lambda>_ s. x_S1 s = 0 \<and> y_S1 s = 0"])
apply(simp add: spec_def sety0_def put_def get_state_def put_state_def y_S1u_def)
by(simp add: spec_def setz0_def put_def get_state_def put_state_def GG_def p0_def z_S1u_def)
definition let0:: "(S1, unit) state" where "let0 = do { assign y_S1u x_S1; assign z_S1u x_S1 }"
definition q0:: "S1 \<Rightarrow> bool" where "q0 s = (x_S1 s = y_S1 s \<and> x_S1 s = z_S1 s)"
definition q1:: "unit \<Rightarrow> S1 \<Rightarrow> bool" where "q1 _ s = (x_S1 s = y_S1 s)"
lemma "spec TT let0 (GG q0)"
apply (simp add: let0_def)
apply (intro seq_rule[of _ _ "q1"])
apply (simp add: spec_def q1_def assign_def return_def get_def put_def get_state_def put_state_def y_S1u_def)
by (simp add: spec_def GG_def q1_def q0_def assign_def get_def return_def put_def get_state_def put_state_def z_S1u_def)
definition ifc0:: "(S1, unit) state" where "ifc0 = do { c \<leftarrow> get x_S1; if c < 0 then assign y_S1u x_S1 else assign z_S1u x_S1 }"
definition q2:: "S1 \<Rightarrow> bool" where "q2 s = (x_S1 s = z_S1 s)"
lemma "spec (\<lambda>s. x_S1 s > 0) ifc0 (GG q2)"
apply (simp add: ifc0_def)
apply (intro get_rule)
apply (intro allI)
apply (intro cond_rule)
apply (simp add: spec_def assign_def get_def get_state_def return_def put_def put_state_def GG_def q2_def)
by (simp add: spec_def assign_def get_def get_state_def return_def put_def put_state_def GG_def q2_def z_S1u_def)
definition dec0:: "(S1, unit) state" where "dec0 = do { assign z_S1u x_S1; assign x_S1u (\<lambda>x. x_S1 x - 1) }"
definition p3:: "int \<Rightarrow> S1 \<Rightarrow> bool" where "p3 N s = (x_S1 s = N)"
definition q3:: "int \<Rightarrow> S1 \<Rightarrow> bool" where "q3 N s = (x_S1 s < N \<and> z_S1 s = N)"
lemma "spec (p3 N) dec0 (GG (q3 N))"
apply (simp add: dec0_def)
apply(intro seq_rule[of _ _ "\<lambda>_ s. x_S1 s = N \<and> z_S1 s = N"])
apply (simp add: spec_def assign_def put_def get_def get_state_def put_state_def return_def z_S1u_def p3_def)
by (simp add: spec_def assign_def put_def get_def get_state_def put_state_def return_def x_S1u_def GG_def q3_def)
end
|
{"author": "SimplisticCode", "repo": "Tarjan-Isabelle", "sha": "ecd72ef5fc352075e6037965cc30844b7db4bacc", "save_path": "github-repos/isabelle/SimplisticCode-Tarjan-Isabelle", "path": "github-repos/isabelle/SimplisticCode-Tarjan-Isabelle/Tarjan-Isabelle-ecd72ef5fc352075e6037965cc30844b7db4bacc/Monad_Play Around/State_Monad_EX.thy"}
|
#!/usr/bin/python3.6
import sys
import json
import numpy as np
import math
from dask.distributed import Client
import shutil
problem_instance_file = sys.argv[1]
D = np.genfromtxt (problem_instance_file, delimiter=",")
shutil.copyfile(problem_instance_file, '/dev/shm/D.csv')
# Now compute our solution
import pyrankability
import argparse
import pyrankability
client = Client("127.0.0.1:8786")
k,P,skipped = pyrankability.pruning_paper_dask.find_P("/dev/shm/D.csv",4,100,
bilp_method="orig",prune_history=True,check_and_recurse=False,client=client)
print(pyrankability.common.as_json(k,P,{"skipped":skipped,"percent_skipped": skipped*100./math.factorial(D.shape[0])}))
|
{"hexsha": "bb98aeaf52b932f7be3f8ed55032d3ab90b68cc8", "size": 732, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluation/scripts/pruning_paper_wrapper_dask.py", "max_stars_repo_name": "IGARDS/ranklib", "max_stars_repo_head_hexsha": "1acd8c0bd4d4045b55e6c5bd6cbb2fbe080c7479", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-04-30T16:40:07.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-30T16:40:07.000Z", "max_issues_repo_path": "evaluation/scripts/pruning_paper_wrapper_dask.py", "max_issues_repo_name": "IGARDS/ranklib", "max_issues_repo_head_hexsha": "1acd8c0bd4d4045b55e6c5bd6cbb2fbe080c7479", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-04-30T18:50:15.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-30T18:51:52.000Z", "max_forks_repo_path": "evaluation/scripts/pruning_paper_wrapper_dask.py", "max_forks_repo_name": "IGARDS/ranklib", "max_forks_repo_head_hexsha": "1acd8c0bd4d4045b55e6c5bd6cbb2fbe080c7479", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1538461538, "max_line_length": 130, "alphanum_fraction": 0.7254098361, "include": true, "reason": "import numpy", "num_tokens": 191}
|
// ---------------------------------------------------------------------------|
// Boost Test Framework
// ---------------------------------------------------------------------------|
#include <boost/test/unit_test.hpp>
// ---------------------------------------------------------------------------|
// Standard includes
// ---------------------------------------------------------------------------|
#include <cstdint>
#include <string>
#include <vector>
#include <map>
#include <iostream>
#include <sstream>
// ---------------------------------------------------------------------------|
// Yuma Test Harness includes
// ---------------------------------------------------------------------------|
#include "test/support/fixtures/simple-container-module-fixture.h"
#include "test/support/misc-util/log-utils.h"
#include "test/support/nc-query-util/nc-query-test-engine.h"
#include "test/support/nc-session/abstract-nc-session-factory.h"
// ---------------------------------------------------------------------------|
namespace YumaTest {
BOOST_FIXTURE_TEST_SUITE( confirmed_commit_slt_tests, SimpleContainerModuleFixture )
BOOST_AUTO_TEST_CASE( slt_confirmed_commit_success )
{
DisplayTestDescrption(
"Demonstrate population of simple container and successful "
"confirmed-commit operation.",
"Procedure: \n"
"\t 1 - Create the top level container for the module\n"
"\t 2 - Populate the database with 3 key/value pairs\n"
"\t 3 - Check all values are in the candidate\n"
"\t 4 - Check all values are not in the running\n"
"\t 5 - Confirmed-Commit the operation\n"
"\t 6 - Check all values are in the running\n"
"\t 7 - Check all values are in the candidate\n"
"\t 8 - Commit the operation before the timeout\n"
"\t 9 - Check all values are in the running\n"
"\t10 - Check all values are in the candidate\n"
"\t11 - Delete all entries from the candidate\n"
"\t12 - Check all values are not in the candidate\n"
"\t13 - Check all values are in the running\n"
"\t14 - Commit the operation\n"
"\t15 - Check all values are not in the candidate\n"
"\t16 - Check all values are not in the running\n"
);
// RAII Vector of database locks
vector< unique_ptr< NCDbScopedLock > > locks = getFullLock( primarySession_ );
createMainContainer( primarySession_ );
// set some values
populateDatabase( 3 );
// check the entries exist
checkEntries( primarySession_ );
// confirmed-commit
confirmedCommitChanges (primarySession_, 1);
// check the entries exist
checkEntries( primarySession_ );
// commit the changes
commitChanges( primarySession_ );
// sleep for 2 seconds
sleep( 2 );
// check the entries still exist
checkEntries( primarySession_ );
// remove all entries
deleteMainContainer( primarySession_ );
checkEntries( primarySession_ );
commitChanges( primarySession_ );
checkEntries( primarySession_ );
}
BOOST_AUTO_TEST_CASE( slt_confirmed_commit_timeout )
{
DisplayTestDescrption(
"Demonstrate rollback of simple container following "
"confirmed-commit operation timeout.",
"Procedure: \n"
"\t 1 - Create the top level container for the module\n"
"\t 2 - Populate the database with 3 key/value pairs\n"
"\t 3 - Check all values are in the candidate\n"
"\t 4 - Check all values are not in the running\n"
"\t 5 - Confirmed-Commit the operation\n"
"\t 6 - Check all values are in the running\n"
"\t 7 - Check all values are in the candidate\n"
"\t 8 - Allow the timeout to occur\n"
"\t 9 - Check all values are not in the candidate\n"
"\t10 - Check all values are not in the running\n"
);
// RAII Vector of database locks
vector< unique_ptr< NCDbScopedLock > > locks = getFullLock( primarySession_ );
createMainContainer( primarySession_ );
// set some values
populateDatabase( 3 );
// check the entries exist
checkEntries( primarySession_ );
// confirmed-commit
confirmedCommitChanges (primarySession_, 1);
// check the entries exist
checkEntries( primarySession_ );
// sleep for 2 seconds to cause rollback
sleep( 2 );
rollbackChanges( primarySession_);
// check the entries no longer exist
checkEntries( primarySession_ );
}
BOOST_AUTO_TEST_CASE( slt_confirmed_commit_extend_timeout )
{
DisplayTestDescrption(
"Demonstrate rollback of simple container following "
"extended confirmed-commit operation timeout.",
"Procedure: \n"
"\t 1 - Create the top level container for the module\n"
"\t 2 - Populate the database with 3 key/value pairs\n"
"\t 3 - Check all values are in the candidate\n"
"\t 4 - Check all values are not in the running\n"
"\t 5 - Confirmed-Commit the operation\n"
"\t 6 - Check all values are in the running\n"
"\t 7 - Check all values are in the candidate\n"
"\t 8 - Confirmed-Commit to extend the timeout\n"
"\t 8 - Allow initial timeout period to pass\n"
"\t 6 - Check all values are in the running\n"
"\t 7 - Check all values are in the candidate\n"
"\t 8 - Allow the timeout to occur\n"
"\t 9 - Check all values are not in the candidate\n"
"\t10 - Check all values are not in the running\n"
);
// RAII Vector of database locks
vector< unique_ptr< NCDbScopedLock > > locks = getFullLock( primarySession_ );
createMainContainer( primarySession_ );
// set some values
populateDatabase( 3 );
// check the entries exist
checkEntries( primarySession_ );
// confirmed-commit
confirmedCommitChanges (primarySession_, 2);
// check the entries exist
checkEntries( primarySession_ );
sleep( 1 );
// confirmed-commit to extend timeout
confirmedCommitChanges (primarySession_, 4, true);
// go beyond initial timeout
sleep( 3 );
// check the entries still exist
checkEntries( primarySession_ );
// sleep for 2 seconds to cause rollback
sleep( 2 );
rollbackChanges( primarySession_);
// check the entries no longer exist
checkEntries( primarySession_ );
// FIXME: if all entries are not removed the next tests will fail!
// FIXME: this indicates that the rollback has not worked
// FIXME: correctly
// deleteMainContainer( primarySession_ );
// checkEntries( primarySession_ );
// commitChanges( primarySession_ );
// checkEntries( primarySession_ );
}
// ---------------------------------------------------------------------------|
BOOST_AUTO_TEST_SUITE_END()
} // namespace YumaTest
|
{"hexsha": "7d6cf41f56338b8d8417c61fee0db8b8970c34a3", "size": 7067, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "OpenYuma/netconf/test/test-suites/system/simple-edit-tests-confirmed-commit.cpp", "max_stars_repo_name": "5GExchange/escape", "max_stars_repo_head_hexsha": "eb35d460597a0386b18dd5b6a5f62a3f30eed5fa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2016-11-16T16:26:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-26T17:20:28.000Z", "max_issues_repo_path": "OpenYuma/netconf/test/test-suites/system/simple-edit-tests-confirmed-commit.cpp", "max_issues_repo_name": "5GExchange/escape", "max_issues_repo_head_hexsha": "eb35d460597a0386b18dd5b6a5f62a3f30eed5fa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2017-04-20T11:29:17.000Z", "max_issues_repo_issues_event_max_datetime": "2017-11-06T17:12:12.000Z", "max_forks_repo_path": "OpenYuma/netconf/test/test-suites/system/simple-edit-tests-confirmed-commit.cpp", "max_forks_repo_name": "5GExchange/escape", "max_forks_repo_head_hexsha": "eb35d460597a0386b18dd5b6a5f62a3f30eed5fa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2017-03-27T13:58:52.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-24T22:42:51.000Z", "avg_line_length": 34.9851485149, "max_line_length": 84, "alphanum_fraction": 0.5821423518, "num_tokens": 1511}
|
[STATEMENT]
lemma resCasesB[consumes 2, case_names Open Res]:
fixes x :: name
and P :: pi
and a :: name
and y :: name
and RP' :: pi
assumes Trans: "<\<nu>y>P \<longmapsto> a<\<nu>x> \<prec> RP'"
and xineqy: "x \<noteq> y"
and rcOpen: "\<And>P'. \<lbrakk>P \<longmapsto>(OutputR a y) \<prec> P'; a \<noteq> y\<rbrakk> \<Longrightarrow> F ([(x, y)] \<bullet> P')"
and rcResB: "\<And>P'. \<lbrakk>P \<longmapsto>a<\<nu>x> \<prec> P'; y \<noteq> a\<rbrakk> \<Longrightarrow> F (<\<nu>y>P')"
shows "F RP'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. F RP'
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. F RP'
[PROOF STEP]
from Trans
[PROOF STATE]
proof (chain)
picking this:
<\<nu>y>P \<longmapsto> a<\<nu>x> \<prec> RP'
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
<\<nu>y>P \<longmapsto> a<\<nu>x> \<prec> RP'
goal (1 subgoal):
1. F RP'
[PROOF STEP]
proof(induct rule: resCasesB', auto)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
fix Pa Pa' aa b
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
assume PTrans: "Pa \<longmapsto> (aa::name)[b] \<prec> Pa'"
[PROOF STATE]
proof (state)
this:
Pa \<longmapsto> aa[b] \<prec> Pa'
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
assume aaineqb: "aa\<noteq>b"
[PROOF STATE]
proof (state)
this:
aa \<noteq> b
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
assume TermEq: "<\<nu>y>P = <\<nu>b>Pa"
[PROOF STATE]
proof (state)
this:
<\<nu>y>P = <\<nu>b>Pa
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
assume ResEq: "a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> Pa'"
[PROOF STATE]
proof (state)
this:
a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> Pa'
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
have "\<exists>(c::name). c \<sharp> (x, a, aa, y, Pa, Pa', b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>c. c \<sharp> (x, a, aa, y, Pa, Pa', b)
[PROOF STEP]
by(blast intro: name_exists_fresh)
[PROOF STATE]
proof (state)
this:
\<exists>c. c \<sharp> (x, a, aa, y, Pa, Pa', b)
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>c. c \<sharp> (x, a, aa, y, Pa, Pa', b)
[PROOF STEP]
obtain c where cineqx: "c\<noteq>x" and cFresha: "c \<sharp> a" and cineqy: "c \<noteq> y" and cineqaa: "c \<noteq> aa" and cFreshPa: "c \<sharp> Pa" and cFreshPa': "c \<sharp> Pa'" and cineqb: "c \<noteq> b"
[PROOF STATE]
proof (prove)
using this:
\<exists>c. c \<sharp> (x, a, aa, y, Pa, Pa', b)
goal (1 subgoal):
1. (\<And>c. \<lbrakk>c \<noteq> x; c \<sharp> a; c \<noteq> y; c \<noteq> aa; c \<sharp> Pa; c \<sharp> Pa'; c \<noteq> b\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(force simp add: fresh_prod name_fresh)
[PROOF STATE]
proof (state)
this:
c \<noteq> x
c \<sharp> a
c \<noteq> y
c \<noteq> aa
c \<sharp> Pa
c \<sharp> Pa'
c \<noteq> b
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
from cFreshPa
[PROOF STATE]
proof (chain)
picking this:
c \<sharp> Pa
[PROOF STEP]
have "<\<nu>b>Pa = <\<nu>c>([(b, c)] \<bullet> Pa)"
[PROOF STATE]
proof (prove)
using this:
c \<sharp> Pa
goal (1 subgoal):
1. <\<nu>b>Pa = <\<nu>c>([(b, c)] \<bullet> Pa)
[PROOF STEP]
by(rule alphaRes)
[PROOF STATE]
proof (state)
this:
<\<nu>b>Pa = <\<nu>c>([(b, c)] \<bullet> Pa)
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with cineqy TermEq
[PROOF STATE]
proof (chain)
picking this:
c \<noteq> y
<\<nu>y>P = <\<nu>b>Pa
<\<nu>b>Pa = <\<nu>c>([(b, c)] \<bullet> Pa)
[PROOF STEP]
have PEq: "P = [(y, c)] \<bullet> [(b, c)] \<bullet> Pa" and yFreshPa: "y \<sharp> [(b, c)] \<bullet> Pa"
[PROOF STATE]
proof (prove)
using this:
c \<noteq> y
<\<nu>y>P = <\<nu>b>Pa
<\<nu>b>Pa = <\<nu>c>([(b, c)] \<bullet> Pa)
goal (1 subgoal):
1. P = [(y, c)] \<bullet> [(b, c)] \<bullet> Pa &&& y \<sharp> [(b, c)] \<bullet> Pa
[PROOF STEP]
by(simp add: pi.inject name_abs_eq)+
[PROOF STATE]
proof (state)
this:
P = [(y, c)] \<bullet> [(b, c)] \<bullet> Pa
y \<sharp> [(b, c)] \<bullet> Pa
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
from PTrans
[PROOF STATE]
proof (chain)
picking this:
Pa \<longmapsto> aa[b] \<prec> Pa'
[PROOF STEP]
have "([(b, c)] \<bullet> Pa) \<longmapsto> ([(b, c)] \<bullet> (aa[b] \<prec> Pa'))"
[PROOF STATE]
proof (prove)
using this:
Pa \<longmapsto> aa[b] \<prec> Pa'
goal (1 subgoal):
1. [(b, c)] \<bullet> Pa \<longmapsto> [(b, c)] \<bullet> aa[b] \<prec> Pa'
[PROOF STEP]
by(rule TransitionsEarly.eqvt)
[PROOF STATE]
proof (state)
this:
[(b, c)] \<bullet> Pa \<longmapsto> [(b, c)] \<bullet> aa[b] \<prec> Pa'
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with aaineqb cineqaa
[PROOF STATE]
proof (chain)
picking this:
aa \<noteq> b
c \<noteq> aa
[(b, c)] \<bullet> Pa \<longmapsto> [(b, c)] \<bullet> aa[b] \<prec> Pa'
[PROOF STEP]
have L1: "([(b, c)] \<bullet> Pa) \<longmapsto> aa[c] \<prec> [(b, c)] \<bullet> Pa'"
[PROOF STATE]
proof (prove)
using this:
aa \<noteq> b
c \<noteq> aa
[(b, c)] \<bullet> Pa \<longmapsto> [(b, c)] \<bullet> aa[b] \<prec> Pa'
goal (1 subgoal):
1. [(b, c)] \<bullet> Pa \<longmapsto> aa[c] \<prec> [(b, c)] \<bullet> Pa'
[PROOF STEP]
by(simp add: name_calc)
[PROOF STATE]
proof (state)
this:
[(b, c)] \<bullet> Pa \<longmapsto> aa[c] \<prec> [(b, c)] \<bullet> Pa'
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with yFreshPa
[PROOF STATE]
proof (chain)
picking this:
y \<sharp> [(b, c)] \<bullet> Pa
[(b, c)] \<bullet> Pa \<longmapsto> aa[c] \<prec> [(b, c)] \<bullet> Pa'
[PROOF STEP]
have yineqaa: "y \<noteq> aa"
[PROOF STATE]
proof (prove)
using this:
y \<sharp> [(b, c)] \<bullet> Pa
[(b, c)] \<bullet> Pa \<longmapsto> aa[c] \<prec> [(b, c)] \<bullet> Pa'
goal (1 subgoal):
1. y \<noteq> aa
[PROOF STEP]
by(force dest: freshAction)
[PROOF STATE]
proof (state)
this:
y \<noteq> aa
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
from L1 yFreshPa cineqy
[PROOF STATE]
proof (chain)
picking this:
[(b, c)] \<bullet> Pa \<longmapsto> aa[c] \<prec> [(b, c)] \<bullet> Pa'
y \<sharp> [(b, c)] \<bullet> Pa
c \<noteq> y
[PROOF STEP]
have yFreshPa': "y \<sharp> [(b, c)] \<bullet> Pa'"
[PROOF STATE]
proof (prove)
using this:
[(b, c)] \<bullet> Pa \<longmapsto> aa[c] \<prec> [(b, c)] \<bullet> Pa'
y \<sharp> [(b, c)] \<bullet> Pa
c \<noteq> y
goal (1 subgoal):
1. y \<sharp> [(b, c)] \<bullet> Pa'
[PROOF STEP]
by(force intro: freshTransition)
[PROOF STATE]
proof (state)
this:
y \<sharp> [(b, c)] \<bullet> Pa'
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
from L1
[PROOF STATE]
proof (chain)
picking this:
[(b, c)] \<bullet> Pa \<longmapsto> aa[c] \<prec> [(b, c)] \<bullet> Pa'
[PROOF STEP]
have "([(y, c)] \<bullet> [(b, c)] \<bullet> Pa) \<longmapsto> [(y, c)] \<bullet> (aa[c] \<prec> [(b, c)] \<bullet> Pa')"
[PROOF STATE]
proof (prove)
using this:
[(b, c)] \<bullet> Pa \<longmapsto> aa[c] \<prec> [(b, c)] \<bullet> Pa'
goal (1 subgoal):
1. [(y, c)] \<bullet> [(b, c)] \<bullet> Pa \<longmapsto> [(y, c)] \<bullet> aa[c] \<prec> [(b, c)] \<bullet> Pa'
[PROOF STEP]
by(rule TransitionsEarly.eqvt)
[PROOF STATE]
proof (state)
this:
[(y, c)] \<bullet> [(b, c)] \<bullet> Pa \<longmapsto> [(y, c)] \<bullet> aa[c] \<prec> [(b, c)] \<bullet> Pa'
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with cineqaa yineqaa cineqy PEq
[PROOF STATE]
proof (chain)
picking this:
c \<noteq> aa
y \<noteq> aa
c \<noteq> y
P = [(y, c)] \<bullet> [(b, c)] \<bullet> Pa
[(y, c)] \<bullet> [(b, c)] \<bullet> Pa \<longmapsto> [(y, c)] \<bullet> aa[c] \<prec> [(b, c)] \<bullet> Pa'
[PROOF STEP]
have PTrans: "P \<longmapsto> aa[y] \<prec> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'"
[PROOF STATE]
proof (prove)
using this:
c \<noteq> aa
y \<noteq> aa
c \<noteq> y
P = [(y, c)] \<bullet> [(b, c)] \<bullet> Pa
[(y, c)] \<bullet> [(b, c)] \<bullet> Pa \<longmapsto> [(y, c)] \<bullet> aa[c] \<prec> [(b, c)] \<bullet> Pa'
goal (1 subgoal):
1. P \<longmapsto> aa[y] \<prec> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'
[PROOF STEP]
by(simp add: name_calc)
[PROOF STATE]
proof (state)
this:
P \<longmapsto> aa[y] \<prec> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
P \<longmapsto> aa[y] \<prec> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
from cFreshPa'
[PROOF STATE]
proof (chain)
picking this:
c \<sharp> Pa'
[PROOF STEP]
have "aa<\<nu>b> \<prec> Pa' = aa<\<nu>c> \<prec> ([(b, c)] \<bullet> Pa')"
[PROOF STATE]
proof (prove)
using this:
c \<sharp> Pa'
goal (1 subgoal):
1. aa<\<nu>b> \<prec> Pa' = aa<\<nu>c> \<prec> ([(b, c)] \<bullet> Pa')
[PROOF STEP]
by(rule alphaBoundOutput)
[PROOF STATE]
proof (state)
this:
aa<\<nu>b> \<prec> Pa' = aa<\<nu>c> \<prec> ([(b, c)] \<bullet> Pa')
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with ResEq cineqx
[PROOF STATE]
proof (chain)
picking this:
a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> Pa'
c \<noteq> x
aa<\<nu>b> \<prec> Pa' = aa<\<nu>c> \<prec> ([(b, c)] \<bullet> Pa')
[PROOF STEP]
have ResEq': "RP' = [(x, c)] \<bullet> [(b, c)] \<bullet> Pa'" and "x \<sharp> [(b, c)] \<bullet> Pa'"
[PROOF STATE]
proof (prove)
using this:
a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> Pa'
c \<noteq> x
aa<\<nu>b> \<prec> Pa' = aa<\<nu>c> \<prec> ([(b, c)] \<bullet> Pa')
goal (1 subgoal):
1. RP' = [(x, c)] \<bullet> [(b, c)] \<bullet> Pa' &&& x \<sharp> [(b, c)] \<bullet> Pa'
[PROOF STEP]
by(simp add: residual.inject name_abs_eq)+
[PROOF STATE]
proof (state)
this:
RP' = [(x, c)] \<bullet> [(b, c)] \<bullet> Pa'
x \<sharp> [(b, c)] \<bullet> Pa'
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with xineqy cineqy cineqx yFreshPa'
[PROOF STATE]
proof (chain)
picking this:
x \<noteq> y
c \<noteq> y
c \<noteq> x
y \<sharp> [(b, c)] \<bullet> Pa'
RP' = [(x, c)] \<bullet> [(b, c)] \<bullet> Pa'
x \<sharp> [(b, c)] \<bullet> Pa'
[PROOF STEP]
have "RP' = [(x, y)] \<bullet> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'"
[PROOF STATE]
proof (prove)
using this:
x \<noteq> y
c \<noteq> y
c \<noteq> x
y \<sharp> [(b, c)] \<bullet> Pa'
RP' = [(x, c)] \<bullet> [(b, c)] \<bullet> Pa'
x \<sharp> [(b, c)] \<bullet> Pa'
goal (1 subgoal):
1. RP' = [(x, y)] \<bullet> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'
[PROOF STEP]
by(subst pt_perm_compose[OF pt_name_inst, OF at_name_inst], simp add: name_calc name_fresh_fresh)
[PROOF STATE]
proof (state)
this:
RP' = [(x, y)] \<bullet> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
RP' = [(x, y)] \<bullet> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
from ResEq
[PROOF STATE]
proof (chain)
picking this:
a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> Pa'
[PROOF STEP]
have "a=aa"
[PROOF STATE]
proof (prove)
using this:
a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> Pa'
goal (1 subgoal):
1. a = aa
[PROOF STEP]
by(simp add: residual.inject)
[PROOF STATE]
proof (state)
this:
a = aa
goal (2 subgoals):
1. \<And>Pa aa b P'. \<lbrakk><\<nu>y>P = <\<nu>b>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>b> \<prec> P'; Pa \<longmapsto> aa[b] \<prec> P'; aa \<noteq> b; <\<nu>b>Pa \<longmapsto> aa<\<nu>b> \<prec> P'\<rbrakk> \<Longrightarrow> F RP'
2. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
P \<longmapsto> aa[y] \<prec> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'
RP' = [(x, y)] \<bullet> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'
a = aa
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
P \<longmapsto> aa[y] \<prec> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'
RP' = [(x, y)] \<bullet> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'
a = aa
goal (1 subgoal):
1. F RP'
[PROOF STEP]
using yineqaa rcOpen
[PROOF STATE]
proof (prove)
using this:
P \<longmapsto> aa[y] \<prec> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'
RP' = [(x, y)] \<bullet> [(y, c)] \<bullet> [(b, c)] \<bullet> Pa'
a = aa
y \<noteq> aa
\<lbrakk>P \<longmapsto> a[y] \<prec> ?P'; a \<noteq> y\<rbrakk> \<Longrightarrow> F ([(x, y)] \<bullet> ?P')
goal (1 subgoal):
1. F RP'
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
F RP'
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
fix Pa Pa' aa xa ya
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
assume PTrans: "Pa \<longmapsto> aa<\<nu>xa> \<prec> Pa'"
[PROOF STATE]
proof (state)
this:
Pa \<longmapsto> aa<\<nu>xa> \<prec> Pa'
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
assume yaFreshaa: "(ya::name) \<noteq> aa"
[PROOF STATE]
proof (state)
this:
ya \<noteq> aa
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
assume yaineqxa: "ya \<noteq> xa"
[PROOF STATE]
proof (state)
this:
ya \<noteq> xa
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
assume EqTrans: "<\<nu>y>P = <\<nu>ya>Pa"
[PROOF STATE]
proof (state)
this:
<\<nu>y>P = <\<nu>ya>Pa
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
assume EqRes: "a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>Pa')"
[PROOF STATE]
proof (state)
this:
a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>Pa')
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
hence aeqaa: "a = aa"
[PROOF STATE]
proof (prove)
using this:
a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>Pa')
goal (1 subgoal):
1. a = aa
[PROOF STEP]
by(simp add: residual.inject)
[PROOF STATE]
proof (state)
this:
a = aa
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with yaFreshaa
[PROOF STATE]
proof (chain)
picking this:
ya \<noteq> aa
a = aa
[PROOF STEP]
have yaFresha: "ya \<sharp> a"
[PROOF STATE]
proof (prove)
using this:
ya \<noteq> aa
a = aa
goal (1 subgoal):
1. ya \<sharp> a
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ya \<sharp> a
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
have "\<exists>(c::name). c \<sharp> (Pa', y, xa, ya, x, Pa, aa)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>c. c \<sharp> (Pa', y, xa, ya, x, Pa, aa)
[PROOF STEP]
by(blast intro: name_exists_fresh)
[PROOF STATE]
proof (state)
this:
\<exists>c. c \<sharp> (Pa', y, xa, ya, x, Pa, aa)
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>c. c \<sharp> (Pa', y, xa, ya, x, Pa, aa)
[PROOF STEP]
obtain c where cFreshPa': "c \<sharp> Pa'" and cineqy: "c \<noteq> y" and cineqxa: "c \<noteq> xa" and cineqya: "c \<noteq> ya" and cineqx: "c \<noteq> x" and cFreshP: "c \<sharp> Pa" and cFreshaa: "c \<sharp> aa"
[PROOF STATE]
proof (prove)
using this:
\<exists>c. c \<sharp> (Pa', y, xa, ya, x, Pa, aa)
goal (1 subgoal):
1. (\<And>c. \<lbrakk>c \<sharp> Pa'; c \<noteq> y; c \<noteq> xa; c \<noteq> ya; c \<noteq> x; c \<sharp> Pa; c \<sharp> aa\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(force simp add: fresh_prod name_fresh)
[PROOF STATE]
proof (state)
this:
c \<sharp> Pa'
c \<noteq> y
c \<noteq> xa
c \<noteq> ya
c \<noteq> x
c \<sharp> Pa
c \<sharp> aa
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
have "\<exists>(d::name). d \<sharp> (Pa, a, x, Pa', c, xa, ya, y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>d. d \<sharp> (Pa, a, x, Pa', c, xa, ya, y)
[PROOF STEP]
by(blast intro: name_exists_fresh)
[PROOF STATE]
proof (state)
this:
\<exists>d. d \<sharp> (Pa, a, x, Pa', c, xa, ya, y)
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>d. d \<sharp> (Pa, a, x, Pa', c, xa, ya, y)
[PROOF STEP]
obtain d where dFreshPa: "d \<sharp> Pa" and dFresha: "d \<sharp> a" and dineqx: "d \<noteq> x" and dFreshPa': "d \<sharp> Pa'" and dineqc: "d\<noteq>c" and dineqxa: "d \<noteq> xa" and dineqya: "d \<noteq> ya" and dineqy: "d \<noteq> y"
[PROOF STATE]
proof (prove)
using this:
\<exists>d. d \<sharp> (Pa, a, x, Pa', c, xa, ya, y)
goal (1 subgoal):
1. (\<And>d. \<lbrakk>d \<sharp> Pa; d \<sharp> a; d \<noteq> x; d \<sharp> Pa'; d \<noteq> c; d \<noteq> xa; d \<noteq> ya; d \<noteq> y\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(force simp add: fresh_prod name_fresh)
[PROOF STATE]
proof (state)
this:
d \<sharp> Pa
d \<sharp> a
d \<noteq> x
d \<sharp> Pa'
d \<noteq> c
d \<noteq> xa
d \<noteq> ya
d \<noteq> y
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
from dFreshPa
[PROOF STATE]
proof (chain)
picking this:
d \<sharp> Pa
[PROOF STEP]
have "<\<nu>ya>Pa = <\<nu>d>([(ya, d)] \<bullet> Pa)"
[PROOF STATE]
proof (prove)
using this:
d \<sharp> Pa
goal (1 subgoal):
1. <\<nu>ya>Pa = <\<nu>d>([(ya, d)] \<bullet> Pa)
[PROOF STEP]
by(rule alphaRes)
[PROOF STATE]
proof (state)
this:
<\<nu>ya>Pa = <\<nu>d>([(ya, d)] \<bullet> Pa)
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with EqTrans dineqy
[PROOF STATE]
proof (chain)
picking this:
<\<nu>y>P = <\<nu>ya>Pa
d \<noteq> y
<\<nu>ya>Pa = <\<nu>d>([(ya, d)] \<bullet> Pa)
[PROOF STEP]
have PEq: "P = [(y, d)] \<bullet> [(ya, d)] \<bullet> Pa"
and yFreshPa: "y \<sharp> [(ya, d)] \<bullet> Pa"
[PROOF STATE]
proof (prove)
using this:
<\<nu>y>P = <\<nu>ya>Pa
d \<noteq> y
<\<nu>ya>Pa = <\<nu>d>([(ya, d)] \<bullet> Pa)
goal (1 subgoal):
1. P = [(y, d)] \<bullet> [(ya, d)] \<bullet> Pa &&& y \<sharp> [(ya, d)] \<bullet> Pa
[PROOF STEP]
by(simp add: pi.inject name_abs_eq)+
[PROOF STATE]
proof (state)
this:
P = [(y, d)] \<bullet> [(ya, d)] \<bullet> Pa
y \<sharp> [(ya, d)] \<bullet> Pa
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
from dFreshPa'
[PROOF STATE]
proof (chain)
picking this:
d \<sharp> Pa'
[PROOF STEP]
have L1: "<\<nu>ya>Pa' = <\<nu>d>([(ya, d)] \<bullet> Pa')"
[PROOF STATE]
proof (prove)
using this:
d \<sharp> Pa'
goal (1 subgoal):
1. <\<nu>ya>Pa' = <\<nu>d>([(ya, d)] \<bullet> Pa')
[PROOF STEP]
by(rule alphaRes)
[PROOF STATE]
proof (state)
this:
<\<nu>ya>Pa' = <\<nu>d>([(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
from cFreshPa' dineqc cineqya
[PROOF STATE]
proof (chain)
picking this:
c \<sharp> Pa'
d \<noteq> c
c \<noteq> ya
[PROOF STEP]
have "c \<sharp> <\<nu>d>([(ya, d)] \<bullet> Pa')"
[PROOF STATE]
proof (prove)
using this:
c \<sharp> Pa'
d \<noteq> c
c \<noteq> ya
goal (1 subgoal):
1. c \<sharp> <\<nu>d>([(ya, d)] \<bullet> Pa')
[PROOF STEP]
by(simp add: name_fresh_abs name_calc name_fresh_left)
[PROOF STATE]
proof (state)
this:
c \<sharp> <\<nu>d>([(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
hence "aa<\<nu>xa> \<prec> (<\<nu>d>([(ya, d)] \<bullet> Pa')) = aa<\<nu>c> \<prec> ([(xa, c)] \<bullet> <\<nu>d>([(ya, d)] \<bullet> Pa'))" (is "?LHS = _")
[PROOF STATE]
proof (prove)
using this:
c \<sharp> <\<nu>d>([(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. aa<\<nu>xa> \<prec> (<\<nu>d>([(ya, d)] \<bullet> Pa')) = aa<\<nu>c> \<prec> ([(xa, c)] \<bullet> <\<nu>d>([(ya, d)] \<bullet> Pa'))
[PROOF STEP]
by(rule alphaBoundOutput)
[PROOF STATE]
proof (state)
this:
aa<\<nu>xa> \<prec> (<\<nu>d>([(ya, d)] \<bullet> Pa')) = aa<\<nu>c> \<prec> ([(xa, c)] \<bullet> <\<nu>d>([(ya, d)] \<bullet> Pa'))
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with dineqxa dineqc
[PROOF STATE]
proof (chain)
picking this:
d \<noteq> xa
d \<noteq> c
aa<\<nu>xa> \<prec> (<\<nu>d>([(ya, d)] \<bullet> Pa')) = aa<\<nu>c> \<prec> ([(xa, c)] \<bullet> <\<nu>d>([(ya, d)] \<bullet> Pa'))
[PROOF STEP]
have "?LHS = aa<\<nu>c> \<prec> (<\<nu>d>([(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'))"
[PROOF STATE]
proof (prove)
using this:
d \<noteq> xa
d \<noteq> c
aa<\<nu>xa> \<prec> (<\<nu>d>([(ya, d)] \<bullet> Pa')) = aa<\<nu>c> \<prec> ([(xa, c)] \<bullet> <\<nu>d>([(ya, d)] \<bullet> Pa'))
goal (1 subgoal):
1. aa<\<nu>xa> \<prec> (<\<nu>d>([(ya, d)] \<bullet> Pa')) = aa<\<nu>c> \<prec> (<\<nu>d>([(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'))
[PROOF STEP]
by(simp add: name_calc)
[PROOF STATE]
proof (state)
this:
aa<\<nu>xa> \<prec> (<\<nu>d>([(ya, d)] \<bullet> Pa')) = aa<\<nu>c> \<prec> (<\<nu>d>([(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'))
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with L1 EqRes cineqx dineqc dineqx
[PROOF STATE]
proof (chain)
picking this:
<\<nu>ya>Pa' = <\<nu>d>([(ya, d)] \<bullet> Pa')
a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>Pa')
c \<noteq> x
d \<noteq> c
d \<noteq> x
aa<\<nu>xa> \<prec> (<\<nu>d>([(ya, d)] \<bullet> Pa')) = aa<\<nu>c> \<prec> (<\<nu>d>([(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'))
[PROOF STEP]
have
RP'Eq: "RP' = <\<nu>d>([(x, c)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')"
and xFreshPa': "x \<sharp> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'"
[PROOF STATE]
proof (prove)
using this:
<\<nu>ya>Pa' = <\<nu>d>([(ya, d)] \<bullet> Pa')
a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>Pa')
c \<noteq> x
d \<noteq> c
d \<noteq> x
aa<\<nu>xa> \<prec> (<\<nu>d>([(ya, d)] \<bullet> Pa')) = aa<\<nu>c> \<prec> (<\<nu>d>([(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'))
goal (1 subgoal):
1. RP' = <\<nu>d>([(x, c)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa') &&& x \<sharp> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'
[PROOF STEP]
by(simp add: residual.inject name_abs_eq name_fresh_abs name_calc)+
[PROOF STATE]
proof (state)
this:
RP' = <\<nu>d>([(x, c)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
x \<sharp> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
from PTrans aeqaa
[PROOF STATE]
proof (chain)
picking this:
Pa \<longmapsto> aa<\<nu>xa> \<prec> Pa'
a = aa
[PROOF STEP]
have "([(ya, d)] \<bullet> Pa) \<longmapsto> [(ya, d)] \<bullet> (a<\<nu>xa> \<prec> Pa')"
[PROOF STATE]
proof (prove)
using this:
Pa \<longmapsto> aa<\<nu>xa> \<prec> Pa'
a = aa
goal (1 subgoal):
1. [(ya, d)] \<bullet> Pa \<longmapsto> [(ya, d)] \<bullet> a<\<nu>xa> \<prec> Pa'
[PROOF STEP]
by(blast intro: TransitionsEarly.eqvt)
[PROOF STATE]
proof (state)
this:
[(ya, d)] \<bullet> Pa \<longmapsto> [(ya, d)] \<bullet> a<\<nu>xa> \<prec> Pa'
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with yaineqxa yaFresha dineqxa dFresha
[PROOF STATE]
proof (chain)
picking this:
ya \<noteq> xa
ya \<sharp> a
d \<noteq> xa
d \<sharp> a
[(ya, d)] \<bullet> Pa \<longmapsto> [(ya, d)] \<bullet> a<\<nu>xa> \<prec> Pa'
[PROOF STEP]
have L1:
"([(ya, d)] \<bullet> Pa) \<longmapsto> a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa')"
[PROOF STATE]
proof (prove)
using this:
ya \<noteq> xa
ya \<sharp> a
d \<noteq> xa
d \<sharp> a
[(ya, d)] \<bullet> Pa \<longmapsto> [(ya, d)] \<bullet> a<\<nu>xa> \<prec> Pa'
goal (1 subgoal):
1. [(ya, d)] \<bullet> Pa \<longmapsto> a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa')
[PROOF STEP]
by(simp add: name_calc name_fresh_fresh)
[PROOF STATE]
proof (state)
this:
[(ya, d)] \<bullet> Pa \<longmapsto> a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with yFreshPa
[PROOF STATE]
proof (chain)
picking this:
y \<sharp> [(ya, d)] \<bullet> Pa
[(ya, d)] \<bullet> Pa \<longmapsto> a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa')
[PROOF STEP]
have yineqa: "y \<noteq> a"
[PROOF STATE]
proof (prove)
using this:
y \<sharp> [(ya, d)] \<bullet> Pa
[(ya, d)] \<bullet> Pa \<longmapsto> a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. y \<noteq> a
[PROOF STEP]
by(force dest: freshAction)
[PROOF STATE]
proof (state)
this:
y \<noteq> a
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
from dineqc cineqya cFreshPa'
[PROOF STATE]
proof (chain)
picking this:
d \<noteq> c
c \<noteq> ya
c \<sharp> Pa'
[PROOF STEP]
have "c \<sharp> [(ya, d)] \<bullet> Pa'"
[PROOF STATE]
proof (prove)
using this:
d \<noteq> c
c \<noteq> ya
c \<sharp> Pa'
goal (1 subgoal):
1. c \<sharp> [(ya, d)] \<bullet> Pa'
[PROOF STEP]
by(simp add: name_fresh_left name_calc)
[PROOF STATE]
proof (state)
this:
c \<sharp> [(ya, d)] \<bullet> Pa'
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
hence "a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa') = a<\<nu>c> \<prec> ([(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')" (is "?LHS = _")
[PROOF STATE]
proof (prove)
using this:
c \<sharp> [(ya, d)] \<bullet> Pa'
goal (1 subgoal):
1. a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa') = a<\<nu>c> \<prec> ([(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
[PROOF STEP]
by(rule alphaBoundOutput)
[PROOF STATE]
proof (state)
this:
a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa') = a<\<nu>c> \<prec> ([(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with xFreshPa'
[PROOF STATE]
proof (chain)
picking this:
x \<sharp> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'
a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa') = a<\<nu>c> \<prec> ([(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
[PROOF STEP]
have L2: "?LHS = a<\<nu>x> \<prec> ([(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')"
[PROOF STATE]
proof (prove)
using this:
x \<sharp> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'
a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa') = a<\<nu>c> \<prec> ([(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa') = a<\<nu>x> \<prec> ([(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
[PROOF STEP]
by(simp add: alphaBoundOutput)
[PROOF STATE]
proof (state)
this:
a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa') = a<\<nu>x> \<prec> ([(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with L1 PEq
[PROOF STATE]
proof (chain)
picking this:
[(ya, d)] \<bullet> Pa \<longmapsto> a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa')
P = [(y, d)] \<bullet> [(ya, d)] \<bullet> Pa
a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa') = a<\<nu>x> \<prec> ([(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
[PROOF STEP]
have "P \<longmapsto> [(y, d)] \<bullet> (a<\<nu>x> \<prec> ([(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'))"
[PROOF STATE]
proof (prove)
using this:
[(ya, d)] \<bullet> Pa \<longmapsto> a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa')
P = [(y, d)] \<bullet> [(ya, d)] \<bullet> Pa
a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa') = a<\<nu>x> \<prec> ([(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. P \<longmapsto> [(y, d)] \<bullet> a<\<nu>x> \<prec> ([(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
[PROOF STEP]
by(force intro: TransitionsEarly.eqvt simp del: residual.perm)
[PROOF STATE]
proof (state)
this:
P \<longmapsto> [(y, d)] \<bullet> a<\<nu>x> \<prec> ([(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with yineqa dFresha xineqy dineqx
[PROOF STATE]
proof (chain)
picking this:
y \<noteq> a
d \<sharp> a
x \<noteq> y
d \<noteq> x
P \<longmapsto> [(y, d)] \<bullet> a<\<nu>x> \<prec> ([(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
[PROOF STEP]
have Trans: "P \<longmapsto> a<\<nu>x> \<prec> ([(y, d)] \<bullet> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')"
[PROOF STATE]
proof (prove)
using this:
y \<noteq> a
d \<sharp> a
x \<noteq> y
d \<noteq> x
P \<longmapsto> [(y, d)] \<bullet> a<\<nu>x> \<prec> ([(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. P \<longmapsto> a<\<nu>x> \<prec> ([(y, d)] \<bullet> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
[PROOF STEP]
by(simp add: name_calc name_fresh_fresh)
[PROOF STATE]
proof (state)
this:
P \<longmapsto> a<\<nu>x> \<prec> ([(y, d)] \<bullet> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
from L1 L2 yFreshPa xineqy
[PROOF STATE]
proof (chain)
picking this:
[(ya, d)] \<bullet> Pa \<longmapsto> a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa')
a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa') = a<\<nu>x> \<prec> ([(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
y \<sharp> [(ya, d)] \<bullet> Pa
x \<noteq> y
[PROOF STEP]
have "y \<sharp> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'"
[PROOF STATE]
proof (prove)
using this:
[(ya, d)] \<bullet> Pa \<longmapsto> a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa')
a<\<nu>xa> \<prec> ([(ya, d)] \<bullet> Pa') = a<\<nu>x> \<prec> ([(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
y \<sharp> [(ya, d)] \<bullet> Pa
x \<noteq> y
goal (1 subgoal):
1. y \<sharp> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'
[PROOF STEP]
by(force intro: freshTransition)
[PROOF STATE]
proof (state)
this:
y \<sharp> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with RP'Eq
[PROOF STATE]
proof (chain)
picking this:
RP' = <\<nu>d>([(x, c)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
y \<sharp> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'
[PROOF STEP]
have "RP' = <\<nu>y>([(y, d)] \<bullet> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')"
[PROOF STATE]
proof (prove)
using this:
RP' = <\<nu>d>([(x, c)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
y \<sharp> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa'
goal (1 subgoal):
1. RP' = <\<nu>y>([(y, d)] \<bullet> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
[PROOF STEP]
by(simp add: alphaRes name_swap)
[PROOF STATE]
proof (state)
this:
RP' = <\<nu>y>([(y, d)] \<bullet> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. \<And>Pa aa xa P' ya. \<lbrakk><\<nu>y>P = <\<nu>ya>Pa; a<\<nu>x> \<prec> RP' = aa<\<nu>xa> \<prec> (<\<nu>ya>P'); Pa \<longmapsto> aa<\<nu>xa> \<prec> P'; ya \<noteq> aa; ya \<noteq> xa; xa \<sharp> Pa; xa \<noteq> aa; <\<nu>ya>Pa \<longmapsto> aa<\<nu>xa> \<prec> (<\<nu>ya>P')\<rbrakk> \<Longrightarrow> F RP'
[PROOF STEP]
with Trans yineqa
[PROOF STATE]
proof (chain)
picking this:
P \<longmapsto> a<\<nu>x> \<prec> ([(y, d)] \<bullet> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
y \<noteq> a
RP' = <\<nu>y>([(y, d)] \<bullet> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
P \<longmapsto> a<\<nu>x> \<prec> ([(y, d)] \<bullet> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
y \<noteq> a
RP' = <\<nu>y>([(y, d)] \<bullet> [(c, x)] \<bullet> [(xa, c)] \<bullet> [(ya, d)] \<bullet> Pa')
goal (1 subgoal):
1. F RP'
[PROOF STEP]
by(blast intro: rcResB)
[PROOF STATE]
proof (state)
this:
F RP'
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
F RP'
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 24273, "file": "Pi_Calculus_Early_Semantics", "length": 132}
|
% AUTHORSHIP
% Primary Developer: Stephen Meehan <swmeehan@stanford.edu>
% Math Lead & Secondary Developer: Connor Meehan <connor.gw.meehan@gmail.com>
% Bioinformatics Lead: Wayne Moore <wmoore@stanford.edu>
% Provided by the Herzenberg Lab at Stanford University
% License: BSD 3 clause
%
classdef CellBasics
methods(Static)
function tabLines=ToTabLines(data, labels, removeHtml)
if nargin<3
removeHtml=true;
end
if removeHtml
J=edu.stanford.facs.swing.Basics;
end
tabLines=java.util.ArrayList;
[rows,cols]=size(data);
if nargin>1
data=[labels; data];
end
for row=1:rows
line='';
for col=1:cols
value=data{row, col};
if removeHtml && ischar(value)
value=J.RemoveXml(value);
end
if isnumeric(value)
value=num2str(value);
end
line=sprintf('%s%s\t', line, value);
end
tabLines.add(line);
end
end
function groups=GetRowGroups(sortedRows)
groups={};
[R,C]=size(sortedRows);
if R>0
lastStartingR=1;
for r=2:R
if sortedRows(r,1)>sortedRows(r-1,1)+1
groups{end+1}=sortedRows(lastStartingR:r-1,:);
lastStartingR=r;
end
end
end
groups{end+1}=sortedRows(lastStartingR:r,:);
end
function rowIdxs=Find(c, colIdxs, fnc, first, rowStartIdx)
if nargin<5
rowStartIdx=1;
if nargin<4
first=true;
end
end
N=size(c,1);
C=size(c,2);
colIdxsN=length(colIdxs);
rowIdxs=[];
for i=rowStartIdx:N
for j=1:colIdxsN
colIdx=colIdxs(j);
if colIdx>=1 && colIdx<=C
o=c{i,colIdx};
if fnc(o, i, colIdx)
if colIdxsN==1
rowIdxs(end+1)=i;
else
rowIdxs(end+1,:)=[i colIdx];
end
if first
return;
end
end
end
end
end
end
function yes=equals(o, arg)
yes=o==arg;
end
function [found, idx]=Contains(cell, object)
found=false;
N_=length(cell);
for idx=1:N_
if isequal(object, cell{idx})
found=true;
return;
end
end
idx=0;
end
function c=Java(collectionOrArray)
try
N=collectionOrArray.size;
c=cell(1,N);
it=collectionOrArray.iterator;
for i=1:N
c{i}=it.next;
end
catch
N=length(collectionOrArray);
c=cell(1,N);
for i=1:N
c{i}=collectionOrArray(i);
end
end
end
function u=UniqueNumbers(cellNums)
if isnumeric(cellNums)
u=unique(cellNums);
else
u=[];
N=length(cellNums);
for i=1:N
u=unique([u cellNums{i}']);
end
end
end
end
end
|
{"author": "canlab", "repo": "CanlabCore", "sha": "af242e120f0480c4feaeea90471c015a14f1f60e", "save_path": "github-repos/MATLAB/canlab-CanlabCore", "path": "github-repos/MATLAB/canlab-CanlabCore/CanlabCore-af242e120f0480c4feaeea90471c015a14f1f60e/CanlabCore/External/umap/util/CellBasics.m"}
|
(* This file is generated by Why3's Coq driver *)
(* Beware! Only edit allowed sections below *)
Require Import BuiltIn.
Require BuiltIn.
Require HighOrd.
Require int.Int.
Require int.Abs.
Require int.EuclideanDivision.
Require list.List.
Require list.Length.
Require list.Mem.
Require map.Map.
Require bool.Bool.
Require list.Append.
(* Why3 assumption *)
Definition unit := unit.
(* Why3 assumption *)
Inductive id :=
| Id : Z -> id.
Axiom id_WhyType : WhyType id.
Existing Instance id_WhyType.
(* Why3 assumption *)
Definition state := (map.Map.map id Z).
(* Why3 assumption *)
Inductive aexpr :=
| Anum : Z -> aexpr
| Avar : id -> aexpr
| Aadd : aexpr -> aexpr -> aexpr
| Asub : aexpr -> aexpr -> aexpr
| Amul : aexpr -> aexpr -> aexpr.
Axiom aexpr_WhyType : WhyType aexpr.
Existing Instance aexpr_WhyType.
(* Why3 assumption *)
Inductive bexpr :=
| Btrue : bexpr
| Bfalse : bexpr
| Band : bexpr -> bexpr -> bexpr
| Bnot : bexpr -> bexpr
| Beq : aexpr -> aexpr -> bexpr
| Ble : aexpr -> aexpr -> bexpr.
Axiom bexpr_WhyType : WhyType bexpr.
Existing Instance bexpr_WhyType.
(* Why3 assumption *)
Inductive com :=
| Cskip : com
| Cassign : id -> aexpr -> com
| Cseq : com -> com -> com
| Cif : bexpr -> com -> com -> com
| Cwhile : bexpr -> com -> com.
Axiom com_WhyType : WhyType com.
Existing Instance com_WhyType.
(* Why3 assumption *)
Fixpoint aeval (st:(map.Map.map id Z)) (e:aexpr) {struct e}: Z :=
match e with
| (Anum n) => n
| (Avar x) => (map.Map.get st x)
| (Aadd e1 e2) => ((aeval st e1) + (aeval st e2))%Z
| (Asub e1 e2) => ((aeval st e1) - (aeval st e2))%Z
| (Amul e1 e2) => ((aeval st e1) * (aeval st e2))%Z
end.
Parameter beval: (map.Map.map id Z) -> bexpr -> bool.
Axiom beval_def : forall (st:(map.Map.map id Z)) (b:bexpr),
(match b with
| Btrue => True
| Bfalse => False
| (Bnot b') => ((Init.Datatypes.negb (beval st b')) = true)
| (Band b1 b2) => ((Init.Datatypes.andb (beval st b1) (beval st
b2)) = true)
| (Beq a1 a2) => ((aeval st a1) = (aeval st a2))
| (Ble a1 a2) => ((aeval st a1) <= (aeval st a2))%Z
end -> ((beval st b) = true)) /\
((~ match b with
| Btrue => True
| Bfalse => False
| (Bnot b') => ((Init.Datatypes.negb (beval st b')) = true)
| (Band b1 b2) => ((Init.Datatypes.andb (beval st b1) (beval st
b2)) = true)
| (Beq a1 a2) => ((aeval st a1) = (aeval st a2))
| (Ble a1 a2) => ((aeval st a1) <= (aeval st a2))%Z
end) -> ((beval st b) = false)).
(* Why3 assumption *)
Inductive ceval: (map.Map.map id Z) -> com -> (map.Map.map id Z) -> Prop :=
| E_Skip : forall (m:(map.Map.map id Z)), (ceval m Cskip m)
| E_Ass : forall (m:(map.Map.map id Z)) (a:aexpr) (x:id), (ceval m
(Cassign x a) (map.Map.set m x (aeval m a)))
| E_Seq : forall (cmd1:com) (cmd2:com) (m0:(map.Map.map id Z))
(m1:(map.Map.map id Z)) (m2:(map.Map.map id Z)), (ceval m0 cmd1 m1) ->
((ceval m1 cmd2 m2) -> (ceval m0 (Cseq cmd1 cmd2) m2))
| E_IfTrue : forall (m0:(map.Map.map id Z)) (m1:(map.Map.map id Z))
(cond:bexpr) (cmd1:com) (cmd2:com), ((beval m0 cond) = true) -> ((ceval
m0 cmd1 m1) -> (ceval m0 (Cif cond cmd1 cmd2) m1))
| E_IfFalse : forall (m0:(map.Map.map id Z)) (m1:(map.Map.map id Z))
(cond:bexpr) (cmd1:com) (cmd2:com), (~ ((beval m0 cond) = true)) ->
((ceval m0 cmd2 m1) -> (ceval m0 (Cif cond cmd1 cmd2) m1))
| E_WhileEnd : forall (cond:bexpr) (m:(map.Map.map id Z)) (body:com),
(~ ((beval m cond) = true)) -> (ceval m (Cwhile cond body) m)
| E_WhileLoop : forall (mi:(map.Map.map id Z)) (mj:(map.Map.map id Z))
(mf:(map.Map.map id Z)) (cond:bexpr) (body:com), ((beval mi
cond) = true) -> ((ceval mi body mj) -> ((ceval mj (Cwhile cond body)
mf) -> (ceval mi (Cwhile cond body) mf))).
Axiom ceval_deterministic_aux : forall (c:com) (mi:(map.Map.map id Z))
(mf1:(map.Map.map id Z)), (ceval mi c mf1) -> forall (mf2:(map.Map.map id
Z)), (ceval mi c mf2) -> (mf1 = mf2).
Axiom ceval_deterministic : forall (c:com) (mi:(map.Map.map id Z))
(mf1:(map.Map.map id Z)) (mf2:(map.Map.map id Z)), (ceval mi c mf1) ->
((ceval mi c mf2) -> (mf1 = mf2)).
(* Why3 assumption *)
Definition pos := Z.
(* Why3 assumption *)
Definition stack := (list Z).
(* Why3 assumption *)
Inductive machine_state :=
| VMS : Z -> (list Z) -> (map.Map.map id Z) -> machine_state.
Axiom machine_state_WhyType : WhyType machine_state.
Existing Instance machine_state_WhyType.
(* Why3 assumption *)
Definition ofs := Z.
(* Why3 assumption *)
Inductive instr :=
| Iconst : Z -> instr
| Ivar : id -> instr
| Isetvar : id -> instr
| Ibranch : Z -> instr
| Iadd : instr
| Isub : instr
| Imul : instr
| Ibeq : Z -> instr
| Ibne : Z -> instr
| Ible : Z -> instr
| Ibgt : Z -> instr
| Ihalt : instr.
Axiom instr_WhyType : WhyType instr.
Existing Instance instr_WhyType.
(* Why3 assumption *)
Definition code := (list instr).
(* Why3 assumption *)
Inductive codeseq_at: (list instr) -> Z -> (list instr) -> Prop :=
| codeseq_at_intro : forall (c1:(list instr)) (c2:(list instr))
(c3:(list instr)), (codeseq_at
(Init.Datatypes.app (Init.Datatypes.app c1 c2) c3)
(list.Length.length c1) c2).
Axiom codeseq_at_app_right : forall (c:(list instr)) (c1:(list instr))
(c2:(list instr)) (p:Z), (codeseq_at c p (Init.Datatypes.app c1 c2)) ->
(codeseq_at c (p + (list.Length.length c1))%Z c2).
Axiom codeseq_at_app_left : forall (c:(list instr)) (c1:(list instr))
(c2:(list instr)) (p:Z), (codeseq_at c p (Init.Datatypes.app c1 c2)) ->
(codeseq_at c p c1).
(* Why3 assumption *)
Definition iconst (n:Z): (list instr) :=
(Init.Datatypes.cons (Iconst n) Init.Datatypes.nil).
(* Why3 assumption *)
Definition ivar (x:id): (list instr) :=
(Init.Datatypes.cons (Ivar x) Init.Datatypes.nil).
(* Why3 assumption *)
Definition isetvar (x:id): (list instr) :=
(Init.Datatypes.cons (Isetvar x) Init.Datatypes.nil).
(* Why3 assumption *)
Definition ibeq (ofs1:Z): (list instr) :=
(Init.Datatypes.cons (Ibeq ofs1) Init.Datatypes.nil).
(* Why3 assumption *)
Definition ible (ofs1:Z): (list instr) :=
(Init.Datatypes.cons (Ible ofs1) Init.Datatypes.nil).
(* Why3 assumption *)
Definition ibne (ofs1:Z): (list instr) :=
(Init.Datatypes.cons (Ibne ofs1) Init.Datatypes.nil).
(* Why3 assumption *)
Definition ibgt (ofs1:Z): (list instr) :=
(Init.Datatypes.cons (Ibgt ofs1) Init.Datatypes.nil).
(* Why3 assumption *)
Definition ibranch (ofs1:Z): (list instr) :=
(Init.Datatypes.cons (Ibranch ofs1) Init.Datatypes.nil).
(* Why3 assumption *)
Inductive transition: (list instr) -> machine_state -> machine_state ->
Prop :=
| trans_const : forall (c:(list instr)) (p:Z) (n:Z), (codeseq_at c p
(iconst n)) -> forall (s:(list Z)) (m:(map.Map.map id Z)), (transition
c (VMS p s m) (VMS (p + 1%Z)%Z (Init.Datatypes.cons n s) m))
| trans_var : forall (c:(list instr)) (p:Z) (x:id), (codeseq_at c p
(ivar x)) -> forall (s:(list Z)) (m:(map.Map.map id Z)), (transition c
(VMS p s m) (VMS (p + 1%Z)%Z (Init.Datatypes.cons (map.Map.get m x) s)
m))
| trans_set_var : forall (c:(list instr)) (p:Z) (x:id), (codeseq_at c p
(isetvar x)) -> forall (n:Z) (s:(list Z)) (m:(map.Map.map id Z)),
(transition c (VMS p (Init.Datatypes.cons n s) m) (VMS (p + 1%Z)%Z s
(map.Map.set m x n)))
| trans_add : forall (c:(list instr)) (p:Z), (codeseq_at c p
(Init.Datatypes.cons Iadd Init.Datatypes.nil)) -> forall (n1:Z) (n2:Z)
(s:(list Z)) (m:(map.Map.map id Z)), (transition c (VMS p
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m) (VMS (p + 1%Z)%Z
(Init.Datatypes.cons (n1 + n2)%Z s) m))
| trans_sub : forall (c:(list instr)) (p:Z), (codeseq_at c p
(Init.Datatypes.cons Isub Init.Datatypes.nil)) -> forall (n1:Z) (n2:Z)
(s:(list Z)) (m:(map.Map.map id Z)), (transition c (VMS p
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m) (VMS (p + 1%Z)%Z
(Init.Datatypes.cons (n1 - n2)%Z s) m))
| trans_mul : forall (c:(list instr)) (p:Z), (codeseq_at c p
(Init.Datatypes.cons Imul Init.Datatypes.nil)) -> forall (n1:Z) (n2:Z)
(s:(list Z)) (m:(map.Map.map id Z)), (transition c (VMS p
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m) (VMS (p + 1%Z)%Z
(Init.Datatypes.cons (n1 * n2)%Z s) m))
| trans_beq : forall (c:(list instr)) (p1:Z) (ofs1:Z), (codeseq_at c p1
(ibeq ofs1)) -> forall (s:(list Z)) (m:(map.Map.map id Z)) (n1:Z)
(n2:Z), (n1 = n2) -> (transition c (VMS p1
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m)
(VMS ((p1 + 1%Z)%Z + ofs1)%Z s m))
| trans_beq1 : forall (c:(list instr)) (p1:Z) (ofs1:Z), (codeseq_at c p1
(ibeq ofs1)) -> forall (s:(list Z)) (m:(map.Map.map id Z)) (n1:Z)
(n2:Z), (~ (n1 = n2)) -> (transition c (VMS p1
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m)
(VMS (p1 + 1%Z)%Z s m))
| trans_bne : forall (c:(list instr)) (p1:Z) (ofs1:Z), (codeseq_at c p1
(ibne ofs1)) -> forall (s:(list Z)) (m:(map.Map.map id Z)) (n1:Z)
(n2:Z), (n1 = n2) -> (transition c (VMS p1
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m)
(VMS (p1 + 1%Z)%Z s m))
| trans_bne1 : forall (c:(list instr)) (p1:Z) (ofs1:Z), (codeseq_at c p1
(ibne ofs1)) -> forall (s:(list Z)) (m:(map.Map.map id Z)) (n1:Z)
(n2:Z), (~ (n1 = n2)) -> (transition c (VMS p1
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m)
(VMS ((p1 + 1%Z)%Z + ofs1)%Z s m))
| trans_ble : forall (c:(list instr)) (p1:Z) (ofs1:Z), (codeseq_at c p1
(ible ofs1)) -> forall (s:(list Z)) (m:(map.Map.map id Z)) (n1:Z)
(n2:Z), (n1 <= n2)%Z -> (transition c (VMS p1
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m)
(VMS ((p1 + 1%Z)%Z + ofs1)%Z s m))
| trans_ble1 : forall (c:(list instr)) (p1:Z) (ofs1:Z), (codeseq_at c p1
(ible ofs1)) -> forall (s:(list Z)) (m:(map.Map.map id Z)) (n1:Z)
(n2:Z), (~ (n1 <= n2)%Z) -> (transition c (VMS p1
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m)
(VMS (p1 + 1%Z)%Z s m))
| trans_bgt : forall (c:(list instr)) (p1:Z) (ofs1:Z), (codeseq_at c p1
(ibgt ofs1)) -> forall (s:(list Z)) (m:(map.Map.map id Z)) (n1:Z)
(n2:Z), (n1 <= n2)%Z -> (transition c (VMS p1
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m)
(VMS (p1 + 1%Z)%Z s m))
| trans_bgt1 : forall (c:(list instr)) (p1:Z) (ofs1:Z), (codeseq_at c p1
(ibgt ofs1)) -> forall (s:(list Z)) (m:(map.Map.map id Z)) (n1:Z)
(n2:Z), (~ (n1 <= n2)%Z) -> (transition c (VMS p1
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m)
(VMS ((p1 + 1%Z)%Z + ofs1)%Z s m))
| trans_branch : forall (c:(list instr)) (p:Z) (ofs1:Z), (codeseq_at c p
(ibranch ofs1)) -> forall (s:(list Z)) (m:(map.Map.map id Z)),
(transition c (VMS p s m) (VMS ((p + 1%Z)%Z + ofs1)%Z s m)).
(* Why3 assumption *)
Inductive transition_star: (list instr) -> machine_state -> machine_state ->
Prop :=
| Refl : forall (p:(list instr)) (x:machine_state), (transition_star p x x)
| Step : forall (p:(list instr)) (x:machine_state) (y:machine_state)
(z:machine_state), (transition p x y) -> ((transition_star p y z) ->
(transition_star p x z)).
Axiom transition_star_one : forall (p:(list instr)) (s1:machine_state)
(s2:machine_state), (transition p s1 s2) -> (transition_star p s1 s2).
Axiom transition_star_transitive : forall (p:(list instr)) (s1:machine_state)
(s2:machine_state) (s3:machine_state), (transition_star p s1 s2) ->
((transition_star p s2 s3) -> (transition_star p s1 s3)).
(* Why3 assumption *)
Definition vm_terminates (c:(list instr)) (mi:(map.Map.map id Z))
(mf:(map.Map.map id Z)): Prop := exists p:Z, (codeseq_at c p
(Init.Datatypes.cons Ihalt Init.Datatypes.nil)) /\ (transition_star c
(VMS 0%Z Init.Datatypes.nil mi) (VMS p Init.Datatypes.nil mf)).
(* Why3 assumption *)
Definition fst {a:Type} {a_WT:WhyType a} {b:Type} {b_WT:WhyType b} (p:(a*
b)%type): a := match p with
| (x, _) => x
end.
(* Why3 assumption *)
Definition snd {a:Type} {a_WT:WhyType a} {b:Type} {b_WT:WhyType b} (p:(a*
b)%type): b := match p with
| (_, y) => y
end.
(* Why3 assumption *)
Definition pred := (machine_state -> bool).
(* Why3 assumption *)
Definition rel := (machine_state -> (machine_state -> bool)).
(* Why3 assumption *)
Definition pre (a:Type) := (a -> (Z -> (machine_state -> bool))).
(* Why3 assumption *)
Definition post (a:Type) := (a -> (Z -> (machine_state -> (machine_state ->
bool)))).
(* Why3 assumption *)
Inductive hl
(a:Type) :=
| mk_hl : (list instr) -> (a -> (Z -> (machine_state -> bool))) -> (a ->
(Z -> (machine_state -> (machine_state -> bool)))) -> hl a.
Axiom hl_WhyType : forall (a:Type) {a_WT:WhyType a}, WhyType (hl a).
Existing Instance hl_WhyType.
Implicit Arguments mk_hl [[a]].
(* Why3 assumption *)
Definition post1 {a:Type} {a_WT:WhyType a} (v:(hl a)): (a -> (Z ->
(machine_state -> (machine_state -> bool)))) :=
match v with
| (mk_hl x x1 x2) => x2
end.
(* Why3 assumption *)
Definition pre1 {a:Type} {a_WT:WhyType a} (v:(hl a)): (a -> (Z ->
(machine_state -> bool))) := match v with
| (mk_hl x x1 x2) => x1
end.
(* Why3 assumption *)
Definition code1 {a:Type} {a_WT:WhyType a} (v:(hl a)): (list instr) :=
match v with
| (mk_hl x x1 x2) => x
end.
(* Why3 assumption *)
Definition wp_trans (a:Type) := (a -> (Z -> ((machine_state -> bool) ->
(machine_state -> bool)))).
(* Why3 assumption *)
Inductive wp
(a:Type) :=
| mk_wp : (list instr) -> (a -> (Z -> ((machine_state -> bool) ->
(machine_state -> bool)))) -> wp a.
Axiom wp_WhyType : forall (a:Type) {a_WT:WhyType a}, WhyType (wp a).
Existing Instance wp_WhyType.
Implicit Arguments mk_wp [[a]].
(* Why3 assumption *)
Definition wp1 {a:Type} {a_WT:WhyType a} (v:(wp a)): (a -> (Z ->
((machine_state -> bool) -> (machine_state -> bool)))) :=
match v with
| (mk_wp x x1) => x1
end.
(* Why3 assumption *)
Definition wcode {a:Type} {a_WT:WhyType a} (v:(wp a)): (list instr) :=
match v with
| (mk_wp x x1) => x
end.
(* Why3 assumption *)
Definition contextual_irrelevance (c:(list instr)) (p:Z) (ms1:machine_state)
(ms2:machine_state): Prop := forall (c_glob:(list instr)), (codeseq_at
c_glob p c) -> (transition_star c_glob ms1 ms2).
(* Why3 assumption *)
Definition hl_correctness {a:Type} {a_WT:WhyType a} (cs:(hl a)): Prop :=
forall (x:a) (p:Z) (ms:machine_state), (((((pre1 cs) x) p) ms) = true) ->
exists ms':machine_state, ((((((post1 cs) x) p) ms) ms') = true) /\
(contextual_irrelevance (code1 cs) p ms ms').
(* Why3 assumption *)
Definition wp_correctness {a:Type} {a_WT:WhyType a} (code2:(wp a)): Prop :=
forall (x:a) (p:Z) (post2:(machine_state -> bool)) (ms:machine_state),
((((((wp1 code2) x) p) post2) ms) = true) -> exists ms':machine_state,
((post2 ms') = true) /\ (contextual_irrelevance (wcode code2) p ms ms').
(* Why3 assumption *)
Definition seq_wp {a:Type} {a_WT:WhyType a} (l1:Z) (w1:(a -> (Z ->
((machine_state -> bool) -> (machine_state -> bool))))) (w2:((a*
machine_state)%type -> (Z -> ((machine_state -> bool) -> (machine_state ->
bool))))): (a -> (Z -> ((machine_state -> bool) -> (machine_state ->
bool)))) := fun (x:a) (p:Z) (q:(machine_state -> bool))
(ms:machine_state) => ((((w1 x) p) (((w2 (x, ms)) (p + l1)%Z) q)) ms).
Axiom seq_wp_lemma : forall {a:Type} {a_WT:WhyType a}, forall (l1:Z)
(w1:(a -> (Z -> ((machine_state -> bool) -> (machine_state -> bool)))))
(w2:((a* machine_state)%type -> (Z -> ((machine_state -> bool) ->
(machine_state -> bool))))) (x:a) (p:Z) (q:(machine_state -> bool))
(ms:machine_state), ((((((seq_wp l1 w1 w2) x) p) q) ms) = ((((w1 x) p)
(((w2 (x, ms)) (p + l1)%Z) q)) ms)).
Parameter fork_wp: forall {a:Type} {a_WT:WhyType a}, (a -> (Z ->
((machine_state -> bool) -> (machine_state -> bool)))) -> (a -> (Z ->
(machine_state -> bool))) -> (a -> (Z -> ((machine_state -> bool) ->
(machine_state -> bool)))).
Axiom fork_wp_def : forall {a:Type} {a_WT:WhyType a}, forall (w:(a -> (Z ->
((machine_state -> bool) -> (machine_state -> bool))))) (cond:(a -> (Z ->
(machine_state -> bool)))) (x:a) (p:Z) (q:(machine_state -> bool))
(ms:machine_state), ((((((fork_wp w cond) x) p) q) ms) = true) <->
(((~ ((((cond x) p) ms) = true)) -> ((q ms) = true)) /\ (((((cond x) p)
ms) = true) -> (((((w x) p) q) ms) = true))).
Axiom fork_wp_lemma : forall {a:Type} {a_WT:WhyType a}, forall (w:(a -> (Z ->
((machine_state -> bool) -> (machine_state -> bool))))) (cond:(a -> (Z ->
(machine_state -> bool)))) (x:a) (p:Z) (q:(machine_state -> bool))
(ms:machine_state), ((((((fork_wp w cond) x) p) q) ms) = true) <->
(((~ ((((cond x) p) ms) = true)) -> ((q ms) = true)) /\ (((((cond x) p)
ms) = true) -> (((((w x) p) q) ms) = true))).
Parameter towp_wp: forall {a:Type} {a_WT:WhyType a}, (a -> (Z ->
(machine_state -> bool))) -> (a -> (Z -> (machine_state ->
(machine_state -> bool)))) -> (a -> (Z -> ((machine_state -> bool) ->
(machine_state -> bool)))).
Axiom towp_wp_def : forall {a:Type} {a_WT:WhyType a}, forall (pr:(a -> (Z ->
(machine_state -> bool)))) (ps:(a -> (Z -> (machine_state ->
(machine_state -> bool))))) (x:a) (p:Z) (q:(machine_state -> bool))
(ms:machine_state), ((((((towp_wp pr ps) x) p) q) ms) = true) <-> (((((pr
x) p) ms) = true) /\ forall (ms':machine_state), (((((ps x) p) ms)
ms') = true) -> ((q ms') = true)).
Axiom towp_wp_lemma : forall {a:Type} {a_WT:WhyType a}, forall (pr:(a ->
(Z -> (machine_state -> bool)))) (ps:(a -> (Z -> (machine_state ->
(machine_state -> bool))))) (x:a) (p:Z) (q:(machine_state -> bool))
(ms:machine_state), ((((((towp_wp pr ps) x) p) q) ms) = true) <-> (((((pr
x) p) ms) = true) /\ forall (ms':machine_state), (((((ps x) p) ms)
ms') = true) -> ((q ms') = true)).
Parameter trivial_pre: forall {a:Type} {a_WT:WhyType a}, (a -> (Z ->
(machine_state -> bool))).
Axiom trivial_pre_def : forall {a:Type} {a_WT:WhyType a}, forall (us:a) (p:Z)
(ms:machine_state), (((((trivial_pre : (a -> (Z -> (machine_state ->
bool)))) us) p) ms) = true) <-> match ms with
| (VMS p' _ _) => (p = p')
end.
(* Why3 assumption *)
Inductive acc {a:Type} {a_WT:WhyType a}: (a -> (a -> bool)) -> a -> Prop :=
| Acc : forall (r:(a -> (a -> bool))) (x:a), (forall (y:a), (((r y)
x) = true) -> (acc r y)) -> (acc r x).
Parameter loop_progress: forall {a:Type} {a_WT:WhyType a}, (a -> (Z ->
(machine_state -> bool))) -> (a -> (Z -> (machine_state -> bool))) -> (a ->
(Z -> (machine_state -> (machine_state -> bool)))) -> (a -> (Z ->
(machine_state -> (machine_state -> bool)))).
Axiom loop_progress_def : forall {a:Type} {a_WT:WhyType a}, forall (inv:(a ->
(Z -> (machine_state -> bool)))) (post2:(a -> (Z -> (machine_state ->
bool)))) (var:(a -> (Z -> (machine_state -> (machine_state -> bool)))))
(x:a) (p:Z) (ms:machine_state) (ms':machine_state), ((((((loop_progress inv
post2 var) x) p) ms) ms') = true) <-> ((((((inv x) p) ms') = true) /\
(((((var x) p) ms') ms) = true)) \/ ((((post2 x) p) ms') = true)).
(* Why3 assumption *)
Definition forget_old {a:Type} {a_WT:WhyType a} (post2:(a -> (Z ->
(machine_state -> bool)))): (a -> (Z -> (machine_state -> (machine_state ->
bool)))) := fun (x:a) (p:Z) (us:machine_state) => ((post2 x) p).
Parameter ifun_post: forall {a:Type} {a_WT:WhyType a}, (machine_state ->
machine_state) -> (a -> (Z -> (machine_state -> (machine_state -> bool)))).
Axiom ifun_post_def : forall {a:Type} {a_WT:WhyType a},
forall (f:(machine_state -> machine_state)) (us:a) (us1:Z)
(ms:machine_state) (ms':machine_state), ((((((ifun_post f: (a -> (Z ->
(machine_state -> (machine_state -> bool))))) us) us1) ms) ms') = true) <->
(ms' = (f ms)).
Parameter iconst_post: forall {a:Type} {a_WT:WhyType a}, Z -> (a -> (Z ->
(machine_state -> (machine_state -> bool)))).
Axiom iconst_post_def : forall {a:Type} {a_WT:WhyType a}, forall (n:Z) (us:a)
(p:Z) (ms:machine_state) (ms':machine_state), ((((((iconst_post n: (a ->
(Z -> (machine_state -> (machine_state -> bool))))) us) p) ms)
ms') = true) <-> forall (s:(list Z)) (m:(map.Map.map id Z)), (ms = (VMS p s
m)) -> (ms' = (VMS (p + 1%Z)%Z (Init.Datatypes.cons n s) m)).
(* Why3 assumption *)
Definition iconst_fun (n:Z): (machine_state -> machine_state) :=
fun (ms:machine_state) =>
match ms with
| (VMS p s m) => (VMS (p + 1%Z)%Z (Init.Datatypes.cons n s) m)
end.
Parameter ivar_post: forall {a:Type} {a_WT:WhyType a}, id -> (a -> (Z ->
(machine_state -> (machine_state -> bool)))).
Axiom ivar_post_def : forall {a:Type} {a_WT:WhyType a}, forall (x:id) (us:a)
(p:Z) (ms:machine_state) (ms':machine_state), ((((((ivar_post x: (a ->
(Z -> (machine_state -> (machine_state -> bool))))) us) p) ms)
ms') = true) <-> forall (s:(list Z)) (m:(map.Map.map id Z)), (ms = (VMS p s
m)) -> (ms' = (VMS (p + 1%Z)%Z (Init.Datatypes.cons (map.Map.get m x) s)
m)).
(* Why3 assumption *)
Definition ivar_fun (x:id): (machine_state -> machine_state) :=
fun (ms:machine_state) =>
match ms with
| (VMS p s m) => (VMS (p + 1%Z)%Z (Init.Datatypes.cons (map.Map.get m x) s)
m)
end.
(* Why3 assumption *)
Definition binop := (Z -> (Z -> Z)).
Parameter ibinop_pre: forall {a:Type} {a_WT:WhyType a}, (a -> (Z ->
(machine_state -> bool))).
Axiom ibinop_pre_def : forall {a:Type} {a_WT:WhyType a}, forall (us:a) (p:Z)
(ms:machine_state), (((((ibinop_pre : (a -> (Z -> (machine_state ->
bool)))) us) p) ms) = true) <-> exists n1:Z, exists n2:Z,
exists s:(list Z), exists m:(map.Map.map id Z), (ms = (VMS p
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m)).
Parameter ibinop_post: forall {a:Type} {a_WT:WhyType a}, (Z -> (Z -> Z)) ->
(a -> (Z -> (machine_state -> (machine_state -> bool)))).
Axiom ibinop_post_def : forall {a:Type} {a_WT:WhyType a}, forall (op:(Z ->
(Z -> Z))) (us:a) (p:Z) (ms:machine_state) (ms':machine_state),
((((((ibinop_post op: (a -> (Z -> (machine_state -> (machine_state ->
bool))))) us) p) ms) ms') = true) <-> forall (n1:Z) (n2:Z) (s:(list Z))
(m:(map.Map.map id Z)), (ms = (VMS p
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m)) ->
(ms' = (VMS (p + 1%Z)%Z (Init.Datatypes.cons ((op n1) n2) s) m)).
(* Why3 assumption *)
Definition ibinop_fun (op:(Z -> (Z -> Z))): (machine_state ->
machine_state) := fun (ms:machine_state) =>
match ms with
| (VMS p (Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m) =>
(VMS (p + 1%Z)%Z (Init.Datatypes.cons ((op n1) n2) s) m)
| _ => ms
end.
(* Why3 assumption *)
Definition plus: (Z -> (Z -> Z)) := fun (x:Z) (y:Z) => (x + y)%Z.
(* Why3 assumption *)
Definition sub: (Z -> (Z -> Z)) := fun (x:Z) (y:Z) => (x - y)%Z.
(* Why3 assumption *)
Definition mul: (Z -> (Z -> Z)) := fun (x:Z) (y:Z) => (x * y)%Z.
Parameter inil_post: forall {a:Type} {a_WT:WhyType a}, (a -> (Z ->
(machine_state -> (machine_state -> bool)))).
Axiom inil_post_def : forall {a:Type} {a_WT:WhyType a}, forall (us:a) (us1:Z)
(ms:machine_state) (ms':machine_state), ((((((inil_post : (a -> (Z ->
(machine_state -> (machine_state -> bool))))) us) us1) ms) ms') = true) <->
(ms = ms').
Parameter ibranch_post: forall {a:Type} {a_WT:WhyType a}, Z -> (a -> (Z ->
(machine_state -> (machine_state -> bool)))).
Axiom ibranch_post_def : forall {a:Type} {a_WT:WhyType a}, forall (ofs1:Z)
(us:a) (p:Z) (ms:machine_state) (ms':machine_state),
((((((ibranch_post ofs1: (a -> (Z -> (machine_state -> (machine_state ->
bool))))) us) p) ms) ms') = true) <-> forall (s:(list Z)) (m:(map.Map.map
id Z)), (ms = (VMS p s m)) -> (ms' = (VMS ((p + 1%Z)%Z + ofs1)%Z s m)).
(* Why3 assumption *)
Definition ibranch_fun (ofs1:Z): (machine_state -> machine_state) :=
fun (ms:machine_state) =>
match ms with
| (VMS p s m) => (VMS ((p + 1%Z)%Z + ofs1)%Z s m)
end.
(* Why3 assumption *)
Definition cond := (Z -> (Z -> bool)).
Parameter icjump_post: forall {a:Type} {a_WT:WhyType a}, (Z -> (Z ->
bool)) -> Z -> (a -> (Z -> (machine_state -> (machine_state -> bool)))).
Axiom icjump_post_def : forall {a:Type} {a_WT:WhyType a}, forall (cond1:(Z ->
(Z -> bool))) (ofs1:Z) (us:a) (p:Z) (ms:machine_state) (ms':machine_state),
((((((icjump_post cond1 ofs1: (a -> (Z -> (machine_state ->
(machine_state -> bool))))) us) p) ms) ms') = true) <-> forall (n1:Z)
(n2:Z) (s:(list Z)) (m:(map.Map.map id Z)), (ms = (VMS p
(Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m)) -> (((((cond1 n1)
n2) = true) -> (ms' = (VMS ((p + ofs1)%Z + 1%Z)%Z s m))) /\ ((~ (((cond1
n1) n2) = true)) -> (ms' = (VMS (p + 1%Z)%Z s m)))).
Parameter icjump_fun: (Z -> (Z -> bool)) -> Z -> (machine_state ->
machine_state).
Axiom icjump_fun_def : forall (cond1:(Z -> (Z -> bool))) (ofs1:Z)
(ms:machine_state),
match ms with
| (VMS p (Init.Datatypes.cons n2 (Init.Datatypes.cons n1 s)) m) =>
((((cond1 n1) n2) = true) -> (((icjump_fun cond1 ofs1)
ms) = (VMS ((p + ofs1)%Z + 1%Z)%Z s m))) /\ ((~ (((cond1 n1)
n2) = true)) -> (((icjump_fun cond1 ofs1) ms) = (VMS (p + 1%Z)%Z s m)))
| _ => (((icjump_fun cond1 ofs1) ms) = ms)
end.
Parameter beq: (Z -> (Z -> bool)).
Axiom beq_def : forall (x:Z) (y:Z), (((beq x) y) = true) <-> (x = y).
Parameter bne: (Z -> (Z -> bool)).
Axiom bne_def : forall (x:Z) (y:Z), (((bne x) y) = true) <-> ~ (x = y).
Parameter ble: (Z -> (Z -> bool)).
Axiom ble_def : forall (x:Z) (y:Z), (((ble x) y) = true) <-> (x <= y)%Z.
Parameter bgt: (Z -> (Z -> bool)).
Axiom bgt_def : forall (x:Z) (y:Z), (((bgt x) y) = true) <-> (y < x)%Z.
Parameter isetvar_pre: forall {a:Type} {a_WT:WhyType a}, (a -> (Z ->
(machine_state -> bool))).
Axiom isetvar_pre_def : forall {a:Type} {a_WT:WhyType a}, forall (us:a) (p:Z)
(ms:machine_state), (((((isetvar_pre : (a -> (Z -> (machine_state ->
bool)))) us) p) ms) = true) <-> exists n:Z, exists s:(list Z),
exists m:(map.Map.map id Z), (ms = (VMS p (Init.Datatypes.cons n s) m)).
Parameter isetvar_post: forall {a:Type} {a_WT:WhyType a}, id -> (a -> (Z ->
(machine_state -> (machine_state -> bool)))).
Axiom isetvar_post_def : forall {a:Type} {a_WT:WhyType a}, forall (x:id)
(us:a) (p:Z) (ms:machine_state) (ms':machine_state),
((((((isetvar_post x: (a -> (Z -> (machine_state -> (machine_state ->
bool))))) us) p) ms) ms') = true) <-> forall (s:(list Z)) (n:Z)
(m:(map.Map.map id Z)), (ms = (VMS p (Init.Datatypes.cons n s) m)) ->
(ms' = (VMS (p + 1%Z)%Z s (map.Map.set m x n))).
(* Why3 assumption *)
Definition isetvar_fun (x:id): (machine_state -> machine_state) :=
fun (ms:machine_state) =>
match ms with
| (VMS p (Init.Datatypes.cons n s) m) => (VMS (p + 1%Z)%Z s (map.Map.set m
x n))
| _ => ms
end.
Parameter aexpr_post: forall {a:Type} {a_WT:WhyType a}, aexpr -> Z -> (a ->
(Z -> (machine_state -> (machine_state -> bool)))).
Axiom aexpr_post_def : forall {a:Type} {a_WT:WhyType a}, forall (a1:aexpr)
(len:Z) (us:a) (p:Z) (ms:machine_state) (ms':machine_state),
((((((aexpr_post a1 len: (a -> (Z -> (machine_state -> (machine_state ->
bool))))) us) p) ms) ms') = true) <->
match ms with
| (VMS _ s m) => (ms' = (VMS (p + len)%Z (Init.Datatypes.cons (aeval m
a1) s) m))
end.
Parameter bexpr_post: forall {a:Type} {a_WT:WhyType a}, bexpr -> bool -> Z ->
Z -> (a -> (Z -> (machine_state -> (machine_state -> bool)))).
Axiom bexpr_post_def : forall {a:Type} {a_WT:WhyType a}, forall (b:bexpr)
(cond1:bool) (out_t:Z) (out_f:Z) (us:a) (p:Z) (ms:machine_state)
(ms':machine_state), (((((((bexpr_post b cond1 out_t out_f: (a -> (Z ->
(machine_state -> (machine_state -> bool))))) us) p) ms) ms') = true) ->
match ms with
| (VMS _ s m) => (((beval m b) = cond1) -> (ms' = (VMS (p + out_t)%Z s
m))) /\ ((~ ((beval m b) = cond1)) -> (ms' = (VMS (p + out_f)%Z s m)))
end) /\
(match ms with
| (VMS _ s m) => (((beval m b) = cond1) /\ (ms' = (VMS (p + out_t)%Z s
m))) \/ ((~ ((beval m b) = cond1)) /\ (ms' = (VMS (p + out_f)%Z s m)))
end -> ((((((bexpr_post b cond1 out_t out_f: (a -> (Z -> (machine_state ->
(machine_state -> bool))))) us) p) ms) ms') = true)).
Parameter exec_cond: forall {a:Type} {a_WT:WhyType a}, bexpr -> bool -> (a ->
(Z -> (machine_state -> bool))).
Axiom exec_cond_def : forall {a:Type} {a_WT:WhyType a}, forall (b1:bexpr)
(cond1:bool) (us:a) (us1:Z) (ms:machine_state), (((((exec_cond b1
cond1: (a -> (Z -> (machine_state -> bool)))) us) us1) ms) = true) <->
match ms with
| (VMS _ _ m) => ((beval m b1) = cond1)
end.
Parameter com_pre: forall {a:Type} {a_WT:WhyType a}, com -> (a -> (Z ->
(machine_state -> bool))).
Axiom com_pre_def : forall {a:Type} {a_WT:WhyType a}, forall (cmd:com) (us:a)
(p:Z) (ms:machine_state), (((((com_pre cmd: (a -> (Z -> (machine_state ->
bool)))) us) p) ms) = true) <->
match ms with
| (VMS p' _ m) => (p = p') /\ exists m':(map.Map.map id Z), (ceval m cmd
m')
end.
Parameter com_post: forall {a:Type} {a_WT:WhyType a}, com -> Z -> (a -> (Z ->
(machine_state -> (machine_state -> bool)))).
Axiom com_post_def : forall {a:Type} {a_WT:WhyType a}, forall (cmd:com)
(len:Z) (us:a) (us1:Z) (ms:machine_state) (ms':machine_state),
((((((com_post cmd len: (a -> (Z -> (machine_state -> (machine_state ->
bool))))) us) us1) ms) ms') = true) <->
match ms with
| (VMS p s m) =>
match ms' with
| (VMS p' s' m') => (p' = (p + len)%Z) /\ ((s' = s) /\ (ceval m cmd
m'))
end
end.
Parameter exec_cond_old: forall {a:Type} {a_WT:WhyType a}, bexpr -> bool ->
((a* machine_state)%type -> (Z -> (machine_state -> bool))).
Axiom exec_cond_old_def : forall {a:Type} {a_WT:WhyType a}, forall (b1:bexpr)
(cond1:bool) (x:(a* machine_state)%type) (us:Z) (us1:machine_state),
(((((exec_cond_old b1 cond1: ((a* machine_state)%type -> (Z ->
(machine_state -> bool)))) x) us) us1) = true) <->
match (snd x) with
| (VMS _ _ m) => ((beval m b1) = cond1)
end.
Parameter loop_invariant: forall {a:Type} {a_WT:WhyType a}, com -> ((a*
machine_state)%type -> (Z -> (machine_state -> bool))).
Axiom loop_invariant_def : forall {a:Type} {a_WT:WhyType a}, forall (c:com)
(x:(a* machine_state)%type) (p:Z) (msi:machine_state),
(((((loop_invariant c: ((a* machine_state)%type -> (Z -> (machine_state ->
bool)))) x) p) msi) = true) <->
match (snd x) with
| (VMS _ s0 m0) =>
match msi with
| (VMS pi si mi) => (pi = p) /\ ((s0 = si) /\ exists mf:(map.Map.map id
Z), (ceval m0 c mf) /\ (ceval mi c mf))
end
end.
Parameter loop_post: forall {a:Type} {a_WT:WhyType a}, com -> Z -> ((a*
machine_state)%type -> (Z -> (machine_state -> bool))).
Axiom loop_post_def : forall {a:Type} {a_WT:WhyType a}, forall (c:com)
(len:Z) (x:(a* machine_state)%type) (p:Z) (msf:machine_state),
(((((loop_post c len: ((a* machine_state)%type -> (Z -> (machine_state ->
bool)))) x) p) msf) = true) <->
match (snd x) with
| (VMS _ s0 m0) =>
match msf with
| (VMS pf sf mf) => (pf = (p + len)%Z) /\ ((s0 = sf) /\ (ceval m0 c
mf))
end
end.
Parameter loop_variant: forall {a:Type} {a_WT:WhyType a}, com -> bexpr ->
(a -> (Z -> (machine_state -> (machine_state -> bool)))).
Axiom loop_variant_def : forall {a:Type} {a_WT:WhyType a}, forall (c:com)
(test:bexpr) (us:a) (us1:Z) (msj:machine_state) (msi:machine_state),
((((((loop_variant c test: (a -> (Z -> (machine_state -> (machine_state ->
bool))))) us) us1) msj) msi) = true) <->
match msj with
| (VMS pj sj mj) =>
match msi with
| (VMS pi si mi) => (pj = pi) /\ ((sj = si) /\ ((ceval mi c mj) /\
((beval mi test) = true)))
end
end.
Require Import Why3.
Ltac ae := why3 "Alt-Ergo,1.01," timelimit 5; admit.
Ltac cvc := why3 "CVC4,1.4," timelimit 5; admit.
(* Why3 goal *)
Theorem WP_parameter_compile_com : forall {a:Type} {a_WT:WhyType a},
forall (cmd:com), forall (x:bexpr) (x1:com), (cmd = (Cwhile x x1)) ->
forall (code_body:(list instr)) (code_body1:(((a* machine_state)%type*
machine_state)%type -> (Z -> (machine_state -> bool)))) (code_body2:(((a*
machine_state)%type* machine_state)%type -> (Z -> (machine_state ->
(machine_state -> bool))))), let code_body3 := (mk_hl code_body code_body1
code_body2) in ((((code_body1 = (com_pre x1: (((a* machine_state)%type*
machine_state)%type -> (Z -> (machine_state -> bool))))) /\ (hl_correctness
code_body3)) /\ (code_body2 = (com_post x1
(list.Length.length code_body): (((a* machine_state)%type*
machine_state)%type -> (Z -> (machine_state -> (machine_state ->
bool))))))) -> let body_length :=
((list.Length.length code_body) + 1%Z)%Z in forall (code_test:(list instr))
(code_test1:((a* machine_state)%type -> (Z -> (machine_state -> bool))))
(code_test2:((a* machine_state)%type -> (Z -> (machine_state ->
(machine_state -> bool))))), let code_test3 := (mk_hl code_test code_test1
code_test2) in ((((code_test1 = (trivial_pre : ((a* machine_state)%type ->
(Z -> (machine_state -> bool))))) /\ (hl_correctness code_test3)) /\
(code_test2 = (bexpr_post x false
((list.Length.length code_test) + body_length)%Z
(list.Length.length code_test): ((a* machine_state)%type -> (Z ->
(machine_state -> (machine_state -> bool))))))) -> let ofs1 :=
((list.Length.length code_test) + body_length)%Z in forall (o:(list instr))
(o1:((((a* machine_state)%type* machine_state)%type* machine_state)%type ->
(Z -> (machine_state -> bool)))) (o2:((((a* machine_state)%type*
machine_state)%type* machine_state)%type -> (Z -> (machine_state ->
(machine_state -> bool))))), let o3 := (mk_hl o o1 o2) in
((((o1 = (trivial_pre : ((((a* machine_state)%type* machine_state)%type*
machine_state)%type -> (Z -> (machine_state -> bool))))) /\
(o2 = (ibranch_post (-ofs1)%Z: ((((a* machine_state)%type*
machine_state)%type* machine_state)%type -> (Z -> (machine_state ->
(machine_state -> bool))))))) /\ (((list.Length.length o) = 1%Z) /\
(hl_correctness o3))) -> ((hl_correctness o3) -> forall (o4:(list instr))
(o5:((((a* machine_state)%type* machine_state)%type* machine_state)%type ->
(Z -> ((machine_state -> bool) -> (machine_state -> bool))))), let o6 :=
(mk_wp o4 o5) in ((((list.Length.length o4) = (list.Length.length o)) /\
((o5 = (towp_wp o1 o2)) /\ (wp_correctness o6))) -> ((hl_correctness
code_body3) -> forall (o7:(list instr)) (o8:(((a* machine_state)%type*
machine_state)%type -> (Z -> ((machine_state -> bool) -> (machine_state ->
bool))))), let o9 := (mk_wp o7 o8) in
((((list.Length.length o7) = (list.Length.length code_body)) /\
((o8 = (towp_wp code_body1 code_body2)) /\ (wp_correctness o9))) ->
(((wp_correctness o9) /\ (wp_correctness o6)) -> forall (o10:(list instr))
(o11:(((a* machine_state)%type* machine_state)%type -> (Z ->
((machine_state -> bool) -> (machine_state -> bool))))), let o12 :=
(mk_wp o10 o11) in
((((list.Length.length o10) = ((list.Length.length o7) + (list.Length.length o4))%Z) /\
((o11 = (seq_wp (list.Length.length o7) o8 o5)) /\ (wp_correctness
o12))) -> ((wp_correctness o12) -> forall (o13:(list instr)) (o14:(((a*
machine_state)%type* machine_state)%type -> (Z -> ((machine_state ->
bool) -> (machine_state -> bool))))), let o15 := (mk_wp o13 o14) in
(((o14 = (fork_wp o11 (exec_cond x true: (((a* machine_state)%type*
machine_state)%type -> (Z -> (machine_state -> bool)))))) /\
(((list.Length.length o13) = (list.Length.length o10)) /\ (wp_correctness
o15))) -> ((hl_correctness code_test3) -> forall (o16:(list instr))
(o17:((a* machine_state)%type -> (Z -> ((machine_state -> bool) ->
(machine_state -> bool))))), let o18 := (mk_wp o16 o17) in
((((list.Length.length o16) = (list.Length.length code_test)) /\
((o17 = (towp_wp code_test1 code_test2)) /\ (wp_correctness o18))) ->
(((wp_correctness o18) /\ (wp_correctness o15)) ->
forall (wp_while:(list instr)) (wp_while1:((a* machine_state)%type -> (Z ->
((machine_state -> bool) -> (machine_state -> bool))))), let wp_while2 :=
(mk_wp wp_while wp_while1) in
((((list.Length.length wp_while) = ((list.Length.length o16) + (list.Length.length o13))%Z) /\
((wp_while1 = (seq_wp (list.Length.length o16) o17 o14)) /\ (wp_correctness
wp_while2))) -> let inv := (loop_invariant cmd: ((a* machine_state)%type ->
(Z -> (machine_state -> bool)))) in let var := (loop_variant x1 x: ((a*
machine_state)%type -> (Z -> (machine_state -> (machine_state ->
bool))))) in let o19 := (loop_progress inv (loop_post cmd ofs1: ((a*
machine_state)%type -> (Z -> (machine_state -> bool)))) var) in
(((wp_correctness wp_while2) /\ forall (x2:(a* machine_state)%type) (p:Z)
(ms:machine_state), ((((inv x2) p) ms) = true) -> (((((wp_while1 x2) p)
(((o19 x2) p) ms)) ms) = true)) -> forall (hl_while:(list instr))
(hl_while1:((a* machine_state)%type -> (Z -> (machine_state -> bool))))
(hl_while2:((a* machine_state)%type -> (Z -> (machine_state ->
(machine_state -> bool))))), (((hl_while1 = inv) /\ (hl_while2 = o19)) /\
(((list.Length.length hl_while) = (list.Length.length wp_while)) /\
(hl_correctness (mk_hl hl_while hl_while1 hl_while2)))) -> forall (x2:(a*
machine_state)%type) (p:Z) (ms:machine_state), ((((inv x2) p)
ms) = true) -> (acc ((var x2) p) ms))))))))))))))))).
(* Why3 intros a a_WT cmd x x1 h1 code_body code_body1 code_body2 code_body3
((h2,h3),h4) body_length code_test code_test1 code_test2 code_test3
((h5,h6),h7) ofs1 o o1 o2 o3 ((h8,h9),(h10,h11)) h12 o4 o5 o6
(h13,(h14,h15)) h16 o7 o8 o9 (h17,(h18,h19)) (h20,h21) o10 o11 o12
(h22,(h23,h24)) h25 o13 o14 o15 (h26,(h27,h28)) h29 o16 o17 o18
(h30,(h31,h32)) (h33,h34) wp_while wp_while1 wp_while2
(h35,(h36,h37)) inv var o19 (h38,h39) hl_while hl_while1 hl_while2
((h40,h41),(h42,h43)) x2 p ms h44. *)
intros a a_WT cmd x x1 h1 code_body code_body1 code_body2 code_body3
((h2,h3),h4) body_length code_test code_test1 code_test2 code_test3
((h5,h6),h7) ofs o o1 o2 o3 ((h8,h9),(h10,h11)) h12 o4 o5 o6 (h13,(h14,h15))
h16 o7 o8 o9 (h17,(h18,h19)) (h20,h21) o10 o11 o12 (h22,(h23,h24)) h25 o13
o14 o15 (h26,(h27,h28)) h29 o16 o17 o18 (h30,(h31,h32)) (h33,h34) wp_while
wp_while1 wp_while2 (h35,(h36,h37)) inv var o19 (h38,h39) hl_while hl_while1
hl_while2 ((h40,h41),(h42,h43)) x2 p ms h44.
apply loop_invariant_def in h44.
destruct x2.
destruct m.
destruct ms.
simpl in *.
destruct h44.
destruct H0.
destruct H1 as [mf [ P T]].
induction T; try discriminate.
apply Acc.
intros.
apply loop_variant_def in H2.
exfalso. cvc.
apply Acc.
intros.
replace y with (VMS z0 l0 mj).
apply IHT2; trivial.
apply loop_variant_def in H2.
destruct y.
assert (body = x1) by ae.
assert (m0 = mj).
eapply ceval_deterministic.
2: exact T1.
ae. ae.
Admitted.
|
{"author": "williameriksson", "repo": "compiler_construction", "sha": "f71ab5ab1af29c1cb5ebc2b3bc3d1dc6bba609b9", "save_path": "github-repos/coq/williameriksson-compiler_construction", "path": "github-repos/coq/williameriksson-compiler_construction/compiler_construction-f71ab5ab1af29c1cb5ebc2b3bc3d1dc6bba609b9/why3/compiler/compiler_Compile_com_WP_parameter_compile_com_4.v"}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 08:27:36 2019
@author: yaz
"""
from numpy import *
from scipy.integrate import odeint
from scipy.optimize import curve_fit, least_squares
class moments:
def __init__(self, a=None, b=None, la=None, alpha_a=None, alpha_i=None, sigma=None, beta=None, gamma=None):
# species
self.ua = 0
self.ui = 1
self.wa = 2
self.wi = 3
self.xa = 4
self.xi = 5
self.ya = 6
self.yi = 7
self.uu = 8
self.ww = 9
self.xx = 10
self.yy = 11
self.uw = 12
self.ux = 13
self.uy = 14
self.wy = 15
self.n_species = 16
# solution
self.t = None
self.x = None
self.x0 = zeros(self.n_species)
self.K = None
self.p = None
# parameters
if not (a is None or b is None or la is None or alpha_a is None or alpha_i is None or sigma is None or beta is None or gamma is None):
self.set_params(a, b, la, alpha_a, alpha_i, sigma, beta, gamma)
def ode_moments(self, x, t):
dx = zeros(len(x))
# parameters
a = self.a
b = self.b
la = self.la
aa = self.aa
ai = self.ai
si = self.si
be = self.be
ga = self.ga
# first moments
dx[self.ua] = la*aa - be*x[self.ua] + a*(x[self.ui]-x[self.ua])
dx[self.ui] = la*ai - be*x[self.ui] - b*(x[self.ui]-x[self.ua])
dx[self.wa] = (1-la)*aa - be*x[self.wa] + a*(x[self.wi]-x[self.wa])
dx[self.wi] = (1-la)*ai - be*x[self.wi] - b*(x[self.wi]-x[self.wa])
dx[self.xa] = be*(1-si)*x[self.ua] - ga*x[self.xa] + a*(x[self.xi]-x[self.xa])
dx[self.xi] = be*(1-si)*x[self.ui] - ga*x[self.xi] - b*(x[self.xi]-x[self.xa])
dx[self.ya] = be*si*x[self.ua] + be*x[self.wa] - ga*x[self.ya] + a*(x[self.yi]-x[self.ya])
dx[self.yi] = be*si*x[self.ui] + be*x[self.wi] - ga*x[self.yi] - b*(x[self.yi]-x[self.ya])
# second moments
dx[self.uu] = 2*la*self.fbar(aa*x[self.ua], ai*x[self.ui]) - 2*be*x[self.uu]
dx[self.ww] = 2*(1-la)*self.fbar(self.aa*x[self.wa], ai*x[self.wi]) - 2*be*x[self.ww]
dx[self.xx] = 2*be*(1-si)*x[self.ux] - 2*ga*x[self.xx]
dx[self.yy] = 2*si*be*x[self.uy] + 2*be*x[self.wy] - 2*ga*x[self.yy]
dx[self.uw] = la*self.fbar(aa*x[self.wa], ai*x[self.wi]) + (1-la)*self.fbar(aa*x[self.ua], ai*x[self.ui]) - 2*be*x[self.uw]
dx[self.ux] = la*self.fbar(aa*x[self.xa], ai*x[self.xi]) + be*(1-si)*x[self.uu] - (be+ga)*x[self.ux]
dx[self.uy] = la*self.fbar(aa*x[self.ya], ai*x[self.yi]) + si*be*x[self.uu] + be*x[self.uw] - (be+ga)*x[self.uy]
dx[self.wy] = (1-la)*self.fbar(aa*x[self.ya], ai*x[self.yi]) + si*be*x[self.uw] + be*x[self.ww] - (be+ga)*x[self.wy]
return dx
def integrate(self, t, x0 = None):
if x0 is None:
x0 = self.x0
else:
self.x0 = x0
sol = odeint(self.ode_moments, x0, t)
self.x = sol
self.t = t
return sol
def fbar(self, x_a, x_i):
return self.b/(self.a + self.b) * x_a + self.a/(self.a + self.b) * x_i
def set_params(self, a, b, la, alpha_a, alpha_i, sigma, beta, gamma):
self.a = a
self.b = b
self.la = la
self.aa = alpha_a
self.ai = alpha_i
self.si = sigma
self.be = beta
self.ga = gamma
# reset solutions
self.t = None
self.x = None
self.K = None
self.p = None
def get_all_central_moments(self):
ret = zeros((8, len(self.t)))
ret[0] = self.get_nu()
ret[1] = self.get_nw()
ret[2] = self.get_nx()
ret[3] = self.get_ny()
ret[4] = self.get_var_nu()
ret[5] = self.get_var_nw()
ret[6] = self.get_var_nx()
ret[7] = self.get_var_ny()
return ret
def get_nosplice_central_moments(self):
ret = zeros((4, len(self.t)))
ret[0] = self.get_n_labeled()
ret[1] = self.get_n_unlabeled()
ret[2] = self.get_var_labeled()
ret[3] = self.get_var_unlabeled()
return ret
def get_central_moments(self, keys=None):
if keys is None:
ret = self.get_all_centeral_moments()
else:
ret = zeros((len(keys)*2, len(self.t)))
i = 0
if 'ul' in keys:
ret[i] = self.get_nu()
ret[i+1] = self.get_var_nu()
i += 2
if 'uu' in keys:
ret[i] = self.get_nw()
ret[i+1] = self.get_var_nw()
i += 2
if 'sl' in keys:
ret[i] = self.get_nx()
ret[i+1] = self.get_var_nx()
i += 2
if 'su' in keys:
ret[i] = self.get_ny()
ret[i+1] = self.get_var_ny()
i += 2
return ret
def get_nu(self):
return self.fbar(self.x[:, self.ua], self.x[:, self.ui])
def get_nw(self):
return self.fbar(self.x[:, self.wa], self.x[:, self.wi])
def get_nx(self):
return self.fbar(self.x[:, self.xa], self.x[:, self.xi])
def get_ny(self):
return self.fbar(self.x[:, self.ya], self.x[:, self.yi])
def get_n_labeled(self):
return self.get_nu() + self.get_nx()
def get_n_unlabeled(self):
return self.get_nw() + self.get_ny()
def get_var_nu(self):
c = self.get_nu()
return self.x[:, self.uu] + c - c**2
def get_var_nw(self):
c = self.get_nw()
return self.x[:, self.ww] + c - c**2
def get_var_nx(self):
c = self.get_nx()
return self.x[:, self.xx] + c - c**2
def get_var_ny(self):
c = self.get_ny()
return self.x[:, self.yy] + c - c**2
def get_cov_ux(self):
cu = self.get_nu()
cx = self.get_nx()
return self.x[:, self.ux] - cu * cx
def get_cov_wy(self):
cw = self.get_nw()
cy = self.get_ny()
return self.x[:, self.wy] - cw * cy
def get_var_labeled(self):
return self.get_var_nu() + self.get_var_nx() + 2*self.get_cov_ux()
def get_var_unlabeled(self):
return self.get_var_nw() + self.get_var_ny() + 2*self.get_cov_wy()
def computeKnp(self):
# parameters
a = self.a
b = self.b
la = self.la
aa = self.aa
ai = self.ai
si = self.si
be = self.be
ga = self.ga
K = zeros((self.n_species, self.n_species))
# E1
K[self.ua, self.ua] = -be - a
K[self.ua, self.ui] = a
K[self.ui, self.ua] = b
K[self.ui, self.ui] = -be - b
K[self.wa, self.wa] = -be - a
K[self.wa, self.wi] = a
K[self.wi, self.wa] = b
K[self.wi, self.wi] = -be - b
# E2
K[self.xa, self.xa] = -ga - a
K[self.xa, self.xi] = a
K[self.xi, self.xa] = b
K[self.xi, self.xi] = -ga - b
K[self.ya, self.ya] = -ga - a
K[self.ya, self.yi] = a
K[self.yi, self.ya] = b
K[self.yi, self.yi] = -ga - b
# E3
K[self.uu, self.uu] = -2*be
K[self.ww, self.ww] = -2*be
K[self.xx, self.xx] = -2*ga
K[self.yy, self.yy] = -2*ga
# E4
K[self.uw, self.uw] = -2*be
K[self.ux, self.ux] = -be - ga
K[self.uy, self.uy] = -be - ga
K[self.wy, self.wy] = -be - ga
K[self.uy, self.uw] = be
K[self.wy, self.uw] = si * be
# F21
K[self.xa, self.ua] = (1-si)*be
K[self.xi, self.ui] = (1-si)*be
K[self.ya, self.wa] = be
K[self.ya, self.ua] = si * be
K[self.yi, self.wi] = be
K[self.yi, self.ui] = si * be
# F31
K[self.uu, self.ua] = 2 * la * aa * b / (a + b)
K[self.uu, self.ui] = 2 * la * ai * a / (a + b)
K[self.ww, self.wa] = 2 * (1-la) * aa * b / (a + b)
K[self.ww, self.wi] = 2 * (1-la) * ai * a / (a + b)
# F34
K[self.xx, self.ux] = 2*(1-si)*be
K[self.yy, self.uy] = 2*si*be
K[self.yy, self.wy] = 2*be
# F41
K[self.uw, self.ua] = (1-la)*aa*b/(a+b)
K[self.uw, self.ui] = (1-la)*ai*a/(a+b)
K[self.uw, self.wa] = la*aa*b/(a+b)
K[self.uw, self.wi] = la*ai*a/(a+b)
# F42
K[self.ux, self.xa] = la*aa*b/(a+b)
K[self.ux, self.xi] = la*ai*a/(a+b)
K[self.uy, self.ya] = la*aa*b/(a+b)
K[self.uy, self.yi] = la*ai*a/(a+b)
K[self.wy, self.ya] = (1-la)*aa*b/(a+b)
K[self.wy, self.yi] = (1-la)*ai*a/(a+b)
# F43
K[self.ux, self.uu] = (1-si)*be
K[self.uy, self.uu] = si*be
K[self.wy, self.ww] = be
p = zeros(self.n_species)
p[self.ua] = la * aa
p[self.ui] = la * ai
p[self.wa] = (1-la) * aa
p[self.wi] = (1-la) * ai
return K, p
def solve(self, t, x0=None):
t0 = t[0]
if x0 is None:
x0 = self.x0
else:
self.x0 = x0
if self.K is None or self.p is None:
K, p = self.computeKnp()
self.K = K
self.p = p
else:
K = self.K
p = self.p
x_ss = linalg.solve(K, p)
#x_ss = linalg.inv(K).dot(p)
y0 = x0 + x_ss
D, U = linalg.eig(K)
V = linalg.inv(U)
D, U, V = map(real, (D, U, V))
expD = exp(D)
x = zeros((len(t), self.n_species))
x[0] = x0
for i in range(1, len(t)):
x[i] = U.dot(diag(expD**(t[i]-t0))).dot(V).dot(y0) - x_ss
self.x = x
self.t = t
return x
class moments_simple:
def __init__(self, a=None, b=None, la=None, alpha_a=None, alpha_i=None, sigma=None, beta=None, gamma=None):
# species
self._u = 0
self._w = 1
self._x = 2
self._y = 3
self.n_species = 4
# solution
self.t = None
self.x = None
self.x0 = zeros(self.n_species)
self.K = None
self.p = None
# parameters
if not (a is None or b is None or la is None or alpha_a is None or alpha_i is None or sigma is None or beta is None or gamma is None):
self.set_params(a, b, la, alpha_a, alpha_i, sigma, beta, gamma)
def set_initial_condition(self, nu0, nw0, nx0, ny0):
x = zeros(self.n_species)
x[self._u] = nu0
x[self._w] = nw0
x[self._x] = nx0
x[self._y] = ny0
self.x0 = x
return x
def get_x_velocity(self, nu0, nx0):
return self.be * (1 - self.si) * nu0 - self.ga * nx0
def get_y_velocity(self, nu0, nw0, ny0):
return self.be * self.si * nu0 + self.be * nw0 - self.ga * ny0
def fbar(self, x_a, x_i):
return self.b/(self.a + self.b) * x_a + self.a/(self.a + self.b) * x_i
def set_params(self, a, b, la, alpha_a, alpha_i, sigma, beta, gamma):
self.a = a
self.b = b
self.la = la
self.aa = alpha_a
self.ai = alpha_i
self.si = sigma
self.be = beta
self.ga = gamma
# reset solutions
self.t = None
self.x = None
self.K = None
self.p = None
def get_total(self):
return sum(self.x, 1)
def computeKnp(self):
# parameters
la = self.la
aa = self.aa
ai = self.ai
si = self.si
be = self.be
ga = self.ga
K = zeros((self.n_species, self.n_species))
# Diagonal
K[self._u, self._u] = -be
K[self._w, self._w] = -be
K[self._x, self._x] = -ga
K[self._y, self._y] = -ga
# off-diagonal
K[self._x, self._u] = be*(1-si)
K[self._y, self._u] = si*be
K[self._y, self._w] = be
p = zeros(self.n_species)
p[self._u] = la * self.fbar(aa, ai)
p[self._w] = (1-la) * self.fbar(aa, ai)
return K, p
def solve(self, t, x0=None):
t0 = t[0]
if x0 is None:
x0 = self.x0
else:
self.x0 = x0
if self.K is None or self.p is None:
K, p = self.computeKnp()
self.K = K
self.p = p
else:
K = self.K
p = self.p
x_ss = linalg.solve(K, p)
y0 = x0 + x_ss
D, U = linalg.eig(K)
V = linalg.inv(U)
D, U, V = map(real, (D, U, V))
expD = exp(D)
x = zeros((len(t), self.n_species))
x[0] = x0
for i in range(1, len(t)):
x[i] = U.dot(diag(expD**(t[i]-t0))).dot(V).dot(y0) - x_ss
self.x = x
self.t = t
return x
class estimation:
def __init__(self, ranges, x0=None):
self.ranges = ranges
self.n_params = len(ranges)
self.simulator = moments()
if not x0 is None:
self.simulator.x0 = x0
def sample_p0(self, samples=1, method='lhs'):
ret = zeros((samples, self.n_params))
if method == 'lhs':
ret = self._lhsclassic(samples)
for i in range(self.n_params):
ret[:, i] = ret[:, i] * (self.ranges[i][1] - self.ranges[i][0]) + self.ranges[i][0]
else:
for n in range(samples):
for i in range(self.n_params):
r = random.rand()
ret[n, i] = r * (self.ranges[i][1] - self.ranges[i][0]) + self.ranges[i][0]
return ret
def _lhsclassic(self, samples):
# From PyDOE
# Generate the intervals
cut = linspace(0, 1, samples + 1)
# Fill points uniformly in each interval
u = random.rand(samples, self.n_params)
a = cut[:samples]
b = cut[1:samples + 1]
rdpoints = zeros_like(u)
for j in range(self.n_params):
rdpoints[:, j] = u[:, j]*(b-a) + a
# Make the random pairings
H = zeros_like(rdpoints)
for j in range(self.n_params):
order = random.permutation(range(samples))
H[:, j] = rdpoints[order, j]
return H
def get_bound(self, index):
ret = zeros(self.n_params)
for i in range(self.n_params):
ret[i] = self.ranges[i][index]
return ret
def normalize_data(self, X):
# ret = zeros(X.shape)
# for i in range(len(X)):
# x = X[i]
# #ret[i] = x / max(x)
# ret[i] = log10(x + 1)
return log10(X + 1)
def f_curve_fit(self, t, *params):
self.simulator.set_params(*params)
self.simulator.integrate(t, self.simulator.x0)
ret = self.simulator.get_all_central_moments()
ret = self.normalize_data(ret)
return ret.flatten()
def f_lsq(self, params, t, x_data_norm, method='analytical', experiment_type=None):
self.simulator.set_params(*params)
if method == 'numerical':
self.simulator.integrate(t, self.simulator.x0)
elif method == 'analytical':
self.simulator.solve(t, self.simulator.x0)
if experiment_type is None:
ret = self.simulator.get_all_central_moments()
elif experiment_type == 'nosplice':
ret = self.simulator.get_nosplice_central_moments()
elif experiment_type == 'label':
ret = self.simulator.get_central_moments(['ul', 'sl'])
ret = self.normalize_data(ret).flatten()
ret[isnan(ret)] = 0
return ret - x_data_norm
def fit(self, t, x_data, p0=None, bounds=None):
if p0 is None:
p0 = self.sample_p0()
x_data_norm = self.normalize_data(x_data)
if bounds is None:
bounds = (self.get_bound(0), self.get_bound(1))
popt, pcov = curve_fit(self.f_curve_fit, t, x_data_norm.flatten(), p0=p0, bounds=bounds)
return popt, pcov
def fit_lsq(self, t, x_data, p0=None, n_p0=1, bounds=None, sample_method='lhs', method='analytical', experiment_type=None):
if p0 is None:
p0 = self.sample_p0(n_p0, sample_method)
else:
if p0.ndim == 1:
p0 = [p0]
n_p0 = len(p0)
x_data_norm = self.normalize_data(x_data)
if bounds is None:
bounds = (self.get_bound(0), self.get_bound(1))
costs = zeros(n_p0)
X = []
for i in range(n_p0):
ret = least_squares(lambda p: self.f_lsq(p, t, x_data_norm.flatten(), method, experiment_type), p0[i], bounds=bounds)
costs[i] = ret.cost
X.append(ret.x)
i_min = argmin(costs)
return X[i_min], costs[i_min]
|
{"hexsha": "6a091c476a5d0a3c29ab6665a563916be15f319e", "size": 16800, "ext": "py", "lang": "Python", "max_stars_repo_path": "dynamo/tools/utils_moments_deprecated.py", "max_stars_repo_name": "softbear/dynamo-release", "max_stars_repo_head_hexsha": "18b64d257c755ccb3aedd7877d9d39f8c40f46fa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-02T19:38:55.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-02T19:38:55.000Z", "max_issues_repo_path": "dynamo/tools/utils_moments_deprecated.py", "max_issues_repo_name": "softbear/dynamo-release", "max_issues_repo_head_hexsha": "18b64d257c755ccb3aedd7877d9d39f8c40f46fa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dynamo/tools/utils_moments_deprecated.py", "max_forks_repo_name": "softbear/dynamo-release", "max_forks_repo_head_hexsha": "18b64d257c755ccb3aedd7877d9d39f8c40f46fa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6569343066, "max_line_length": 142, "alphanum_fraction": 0.4952380952, "include": true, "reason": "from numpy,from scipy", "num_tokens": 5349}
|
import numpy as np
from PIL import Image
def matched_tiling(img, block_size, target_shape, overlap_size):
new_block_size = block_size - overlap_size
n_blocks = (np.ceil(np.true_divide(target_shape[0:2], new_block_size))).astype('uint32')
output = np.zeros(target_shape, 'uint8')
print("Total blocks to build: " + str(n_blocks[0] * n_blocks[1]))
print("Building...")
for i in range(0, n_blocks[0]):
row1 = i * new_block_size[0]
row2 = min((i + 1) * new_block_size[0] + overlap_size[0], target_shape[1])
if row2 - row1 < overlap_size[0]:
continue
for j in range(0, n_blocks[1]):
col1 = j * new_block_size[1]
col2 = min((j + 1) * new_block_size[1] + overlap_size[1], target_shape[1])
if col2 - col1 < overlap_size[1]:
continue
total_n = i * n_blocks[1] + j + 1
print('Building block ' + str(total_n))
anchor = Anchor(output[row1:row2, col1:col2], row1, col1, overlap_size)
patch = matched_crop(img, np.asarray([row2 - row1, col2 - col1]), anchor)
output[row1:row2, col1:col2] = anchor.stitch(np.array(patch))
return output
class Anchor(object):
def __init__(self, data, row_index, col_index, overlap_size):
self.__data = data
self.__row_index = row_index
self.__col_index = col_index
self.__overlap_size = overlap_size
def __calc_pixel_error(self, block1, block2):
diff = block1 - block2
r, g, b = self.__split_channels(diff * diff)
r *= 0.30
g *= 0.59
b *= 0.11
return np.sqrt(r + g + b)
def calc_error(self, patch):
if self.__row_index == 0 and self.__col_index == 0:
return 0.0
total_error = 0.0
# calculate vertical strip error (includes overlap region)
if self.__col_index != 0:
b1 = self.__data[:, 0: self.__overlap_size[1]]
b2 = patch[:, 0: self.__overlap_size[1]]
total_error += np.sum(self.__calc_pixel_error(b1, b2))
# calculate horizontal strip error (excludes overlap region, since its
# already included above)
if self.__row_index != 0:
b1 = self.__data[0: self.__overlap_size[0], self.__overlap_size[1]:]
b2 = patch[: self.__overlap_size[0], self.__overlap_size[1]:]
total_error += np.sum(self.__calc_pixel_error(b1, b2))
return total_error
def stitch(self, patch):
if self.__row_index == 0 and self.__col_index == 0:
return patch
vertical_error, vertical_offset = None, None
horizontal_error, horizontal_offset = None, None
if self.__col_index != 0:
b1 = self.__data[:, 0: self.__overlap_size[1]]
b2 = patch[:, 0: self.__overlap_size[1]]
vertical_error = self.__calc_pixel_error(b1, b2)
error_shape = vertical_error.shape
vertical_offset = np.zeros(error_shape, np.int8)
for i in range(error_shape[0]-2, -1, -1):
for j in range(0, error_shape[1]):
fix_offset = -1
s_i = j-1
if s_i < 0:
s_i = 0
fix_offset = 0
e_i = j+2
if e_i > error_shape[1]:
e_i = error_shape[1]
temp = vertical_error[i-1, s_i:e_i]
vertical_error[i, j] += np.min(temp)
vertical_offset[i, j] = np.argmin(temp)+fix_offset
if self.__row_index != 0:
b1 = self.__data[: self.__overlap_size[0], :]
b2 = patch[: self.__overlap_size[0], :]
horizontal_error = self.__calc_pixel_error(b1, b2)
error_shape = horizontal_error.shape
horizontal_offset = np.zeros(error_shape, np.int8)
for i in range(error_shape[1] - 2, -1, -1):
for j in range(0, error_shape[0]):
fix_offset = -1
s_i = j - 1
if s_i < 0:
s_i = 0
fix_offset = 0
e_i = j + 2
if e_i > error_shape[1]:
e_i = error_shape[1]
temp = horizontal_error[s_i:e_i, i-1]
horizontal_error[j, i] += np.min(temp)
horizontal_offset[j, i] = np.argmin(temp) + fix_offset
if self.__row_index == 0:
min_j = np.argmin(vertical_error[0, :])
patch[0, :min_j] = self.__data[0, :min_j]
for i in range(1, vertical_error.shape[0]):
min_j += vertical_offset[i-1, min_j]
patch[i, 0:min_j] = self.__data[i, 0:min_j]
elif self.__col_index == 0:
min_i = np.argmin(horizontal_error[:, 0])
patch[0:min_i, 0] = self.__data[0:min_i, 0]
for j in range(1, horizontal_error.shape[1]):
min_i += horizontal_offset[min_i, j-1]
patch[0:min_i, j] = self.__data[0:min_i, j]
else:
error = vertical_error[0:self.__overlap_size[0], :] + horizontal_error[:, 0: self.__overlap_size[1]]
temp = np.argmin(error)
minI = min_i = temp/error.shape[0]
minJ = min_j = temp % error.shape[1]
for j in range(minJ, horizontal_error.shape[1]):
min_i += horizontal_offset[min_i, j-1]
patch[0:min_i, j] = self.__data[0:min_i, j]
for i in range(minI, vertical_error.shape[0]):
min_j += vertical_offset[i-1, min_j]
patch[i, 0:min_j] = self.__data[i, 0:min_j]
return patch
@staticmethod
def __split_channels(array):
r = array[:, :, 0]
g = array[:, :, 1]
b = array[:, :, 2]
return r, g, b
def matched_crop(img, block_size, anchor):
"""
img src image to crop
size size of rect to cut [m,n]
anchor target to match to
"""
img_size = (np.asarray(img.shape))[0:2]
max_size = img_size - block_size
error = np.ones(max_size)
min_error = np.inf
for i in range(0, max_size[0]):
for j in range(0, max_size[1]):
patch = img[i:i + block_size[0], j:j + block_size[1]]
curr_error = anchor.calc_error(patch)
error[i, j] = curr_error
if curr_error < min_error:
min_error = curr_error
if min_error == 0.0:
break
threshold = min_error * 1.1
mask = (error <= threshold).nonzero()
possible = len(mask[0])
to_take = np.random.randint(0, possible)
row_index = (mask[0])[to_take]
col_index = (mask[1])[to_take]
return img[row_index:row_index + block_size[0], col_index:col_index + block_size[1]]
|
{"hexsha": "cfc89ff984c71f6eded4721e22b3dfbbdd61f4af", "size": 7110, "ext": "py", "lang": "Python", "max_stars_repo_path": "quilt/tile/matched_tiling.py", "max_stars_repo_name": "deep110/Quilter", "max_stars_repo_head_hexsha": "f5e8da986558acb7d2689c6e6400cc26c844684e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-04-12T10:11:14.000Z", "max_stars_repo_stars_event_max_datetime": "2017-04-12T10:11:14.000Z", "max_issues_repo_path": "quilt/tile/matched_tiling.py", "max_issues_repo_name": "deep110/Quilter", "max_issues_repo_head_hexsha": "f5e8da986558acb7d2689c6e6400cc26c844684e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "quilt/tile/matched_tiling.py", "max_forks_repo_name": "deep110/Quilter", "max_forks_repo_head_hexsha": "f5e8da986558acb7d2689c6e6400cc26c844684e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9090909091, "max_line_length": 113, "alphanum_fraction": 0.5278481013, "include": true, "reason": "import numpy", "num_tokens": 1833}
|
#
# This file is a part of MolecularGraph.jl
# Licensed under the MIT License http://opensource.org/licenses/MIT
#
@testset "graph.triangle" begin
@testset "triangles" begin
graph1 = pathgraph(5)
@test isempty(triangles(graph1))
graph2 = plaingraph(5, [(1, 2), (2, 3), (3, 1)])
@test issetequal(collect(triangles(graph2))[1], 1:3)
graph3 = plaingraph(8, [
(1, 2), (2, 3), (1, 3), (3, 4), (4, 5),
(5, 6), (4, 6), (5, 7), (7, 8), (8, 6)
])
@test length(triangles(graph3)) == 2
graph4 = plaingraph(10, [
(1, 2), (2, 3), (3, 4), (3, 5), (5, 6),
(6, 7), (7, 5), (5, 8), (8, 9), (9, 5), (5, 10)
])
@test length(triangles(graph4)) == 2
end
end # graph.triangle
|
{"hexsha": "26a8f067461a2d93052510d5ab5281819ac8950e", "size": 729, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/graph/triangle.jl", "max_stars_repo_name": "hhaensel/MolecularGraph.jl", "max_stars_repo_head_hexsha": "c54ccdf09274e36ed3d866604f99b497a39bfaf5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 126, "max_stars_repo_stars_event_min_datetime": "2019-01-28T06:54:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T08:45:46.000Z", "max_issues_repo_path": "test/graph/triangle.jl", "max_issues_repo_name": "timholy/MolecularGraph.jl", "max_issues_repo_head_hexsha": "90d6f6175f30023ffce92d9bbc386f8659866508", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2019-04-19T03:33:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T23:34:44.000Z", "max_forks_repo_path": "test/graph/triangle.jl", "max_forks_repo_name": "timholy/MolecularGraph.jl", "max_forks_repo_head_hexsha": "90d6f6175f30023ffce92d9bbc386f8659866508", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2019-02-07T04:08:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T03:33:20.000Z", "avg_line_length": 28.0384615385, "max_line_length": 67, "alphanum_fraction": 0.5294924554, "num_tokens": 303}
|
import os
import cv2
import json
import sys
import numpy as np
from dataset_utils.utils import FolderVideoReader
from dataset_utils.diamond_accumulator import Accumulator
element_small = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
element_big = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
element_loc_max = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
def get_vp1(cap, mask, debug=False):
lk_params = dict(winSize=(31, 31), maxLevel=4, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict(maxCorners=20, qualityLevel=0.4, minDistance=10, blockSize=11)
ret, prev_frame = cap.read()
# prev_frame = cv2.bitwise_and(prev_frame, prev_frame, mask=mask)
prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
acc = Accumulator(size=256, debug=debug, height=mask.shape[0], width=mask.shape[1])
cnt = 0
while ret and cnt < 1000:
ret, next_frame = cap.read()
if not ret or next_frame is None:
continue
# next_frame = cv2.bitwise_and(next_frame, next_frame, mask=mask)
cnt += 1
next_gray = cv2.cvtColor(next_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(prev_gray, mask=mask, **feature_params)
p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, next_gray, p0, None, **lk_params)
good_p1 = p1[st == 1]
good_p0 = p0[st == 1]
n = np.linalg.norm(good_p1 - good_p0, axis=1)
p = np.concatenate([good_p0, good_p1], axis=1)
p = p[n > 2]
if debug:
for i in range(good_p0.shape[0]):
next_frame = cv2.line(next_frame, (int(good_p0[i, 0]), int(good_p0[i, 1])),
(int(good_p1[i, 0]), int(good_p1[i, 1])), (255, 0, 0), 1)
next_frame = cv2.circle(next_frame, (int(good_p0[i, 0]), int(good_p0[i, 1])), 3, (0, 0, 255))
cv2.imshow("Found", next_frame)
cv2.waitKey(1)
acc.accumulate_xy_lines(p)
prev_gray = next_gray
if cnt % 100 == 0:
vp = acc.get_vp()
print("VP so far: {}".format(vp))
vp = acc.get_vp()
return vp
def get_vp2(vp1, cap, mask, skip=10, debug=False):
pp = [mask.shape[1] / 2 + 0.5, mask.shape[0] / 2 + 0.5]
mask_reduced = cv2.erode(mask, element_big)
back_sub = cv2.createBackgroundSubtractorMOG2()
acc = Accumulator(size=256, debug=debug, height=mask.shape[0], width=mask.shape[1])
cnt = -10
ret = True
while ret and cnt < 1000:
for _ in range(skip):
ret, frame = cap.read()
if frame is None:
continue
cnt += 1
frame = cv2.bitwise_and(frame, frame, mask=mask)
fg_mask = back_sub.apply(frame)
# keep only best pts
fg_mask = np.where(fg_mask > 127, 1.0, 0)
fg_mask = cv2.erode(fg_mask, element_small)
fg_mask = cv2.dilate(fg_mask, element_big)
if cnt > 0:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=5)
sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=5)
mag = np.sqrt(sobel_x ** 2 + sobel_y ** 2)
# mag = mag / np.max(mag)
# apply mask to image
# gray = cv2.bitwise_and(mag, mag, mask=255*fg_mask.astype(np.uint8))
mag_dilated = cv2.dilate(mag, element_loc_max)
local_maxima = cv2.compare(mag, mag_dilated, cmpop=cv2.CMP_GE)
# remove plateaus
l_m_eroded = cv2.erode(local_maxima, element_loc_max)
non_plateaus = cv2.compare(local_maxima, l_m_eroded, cmpop=cv2.CMP_GT)
local_maxima_cleaned = cv2.bitwise_and(non_plateaus, local_maxima)
seeds = np.logical_and(np.logical_and(np.logical_and(local_maxima_cleaned > 0, fg_mask > 0), mag > 450),
mask_reduced > 0)
seeds_idx = np.argwhere(seeds)
a_list = []
b_list = []
c_list = []
p = []
for seed in seeds_idx:
i, j = seed
if 5 > i or i > frame.shape[0] - 5 or 5 > j or j > frame.shape[1] - 5:
continue
window_x = sobel_x[i - 4:i + 5, j - 4:j + 5]
window_y = sobel_y[i - 4:i + 5, j - 4:j + 5]
matrix = np.column_stack([np.reshape(window_x, [81, 1]), np.reshape(window_y, [81, 1])])
u, s, v = np.linalg.svd(matrix.T @ matrix)
q = s[0] / s[1]
d = u[:, 0]
if q < 300:
continue
n_vp = vp1 - np.flip(seed)
dot_product = np.dot(n_vp / np.linalg.norm(n_vp), d / np.linalg.norm(d))
angle = np.arccos(dot_product)
if 0.325 * np.pi < angle < 0.625 * np.pi:
if debug:
frame = cv2.line(frame, (int(seed[1] - 10 * d[1]), int(seed[0] + 10 * d[0])),
(int(seed[1] + 10 * d[1]), int(seed[0] - 10 * d[0])), (0, 0, 255), 1)
frame = cv2.line(frame, (int(vp1[0]), int(vp1[1])),
(int(seed[1]), int(seed[0])), (0, 255, 255), 1)
continue
a_list.append(d[0])
b_list.append(d[1])
c_list.append(- d[0] * seed[1] / 1920 - d[1] * seed[0] / 1080)
# p.append([seed[1] - d[1], seed[0] + d[0], seed[1] + d[1], seed[0] - d[0]])
if debug:
d = d / np.linalg.norm(d)
# print("Edgelet s:{}, q:{}, d:{}".format(seed, q, d))
frame = cv2.line(frame, (int(seed[1] - 10 * d[1]), int(seed[0] + 10 * d[0])),
(int(seed[1] + 10 * d[1]), int(seed[0] - 10 * d[0])), (0, 255, 0), 1)
acc.accumulate_abc_lines(np.array(a_list), np.array(b_list), np.array(c_list))
# acc.accumulate_xy_lines(np.array(p))
if debug:
cv2.imshow("Found", frame)
cv2.waitKey(1)
if cnt % 100 == 0:
vp = acc.get_conditional_vp(vp1, pp)
print("VP so far: {}".format(vp))
vp = acc.get_conditional_vp(vp1, pp)
return vp
def calib_video(video_path, calib_path=None, debug=False, out_path=None):
print('Calibrating for video: {}'.format(video_path))
if os.path.isdir(video_path):
cap = FolderVideoReader(video_path)
video_dir = video_path
else:
cap = cv2.VideoCapture(video_path)
video_dir = os.path.dirname(video_path)
if os.path.exists(os.path.join(video_dir, 'video_mask.png')):
mask = cv2.imread(os.path.join(video_dir, 'video_mask.png'), 0)
else:
ret, img = cap.read()
mask = 255 * np.ones(img.shape[:2], dtype=np.uint8)
if calib_path is not None:
with open(calib_path, 'r+') as file:
structure = json.load(file)
camera_calibration = structure['camera_calibration']
vp1_test, vp2_test = camera_calibration["vp1"], camera_calibration["vp2"]
print("Test VP: {}, {}".format(vp1_test, vp2_test))
vp1 = get_vp1(cap, mask, debug=debug)
print("Detected vp1: {}".format(vp1))
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
vp2 = get_vp2(vp1, cap, mask, debug=debug, skip=3)
print("Detected vp2: {}".format(vp2))
if out_path is not None:
pp = [mask.shape[1] / 2 + 0.5, mask.shape[0] / 2 + 0.5]
camera_calibration = {'vp1': vp1, 'vp2': vp2, 'pp': pp}
json_structure = {'cars': [], 'camera_calibration': camera_calibration}
with open(out_path, 'w') as file:
json.dump(json_structure, file)
if __name__ == "__main__":
# vid_path = 'D:/Skola/PhD/data/2016-ITS-BrnoCompSpeed/dataset'
# results_path = 'D:/Skola/PhD/data/2016-ITS-BrnoCompSpeed/results/'
#
# vid_list = []
# calib_list = []
# for i in range(4, 7):
# dir_list = ['session{}_center'.format(i), 'session{}_left'.format(i), 'session{}_right'.format(i)]
# # dir_list = ['session{}_left'.format(i), 'session{}_right'.format(i)]
# vid_list.extend([os.path.join(vid_path, d, 'video.avi') for d in dir_list])
# calib_list.extend([os.path.join(results_path, d, 'system_SochorCVIU_Edgelets_BBScale_Reg.json') for d in dir_list])
#
# for v, c in zip(vid_list, calib_list):
# test_video(v, c)
vid_dir = 'D:/Skola/PhD/data/DETRAC/Insight-MVT_Annotation_Test/'
vids = ['MVI_39031', 'MVI_39051', 'MVI_39211', 'MVI_39271', 'MVI_39371', 'MVI_39501', 'MVI_39511', 'MVI_40742',
'MVI_40743', 'MVI_40863', 'MVI_40864']
# vids = ['MVI_40742', 'MVI_40743']
vid_list = [os.path.join(vid_dir, v) for v in vids]
calib_list = [os.path.join(vid_dir, v, 'calib.json') for v in vids]
for vid, calib in zip(vid_list, calib_list):
calib_video(vid, debug=False, out_path=calib)
|
{"hexsha": "1017a5ab9c6c3072022a666724e5a7f73099a6a5", "size": 9067, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataset_utils/calib.py", "max_stars_repo_name": "kocurvik/retinanet_traffic_3D", "max_stars_repo_head_hexsha": "592ceac767750c65bb3d6678b36e6880a7bb0403", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-04-06T00:50:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T03:27:02.000Z", "max_issues_repo_path": "dataset_utils/calib.py", "max_issues_repo_name": "kocurvik/retinanet_traffic_3D", "max_issues_repo_head_hexsha": "592ceac767750c65bb3d6678b36e6880a7bb0403", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2021-07-13T12:47:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-05T15:08:51.000Z", "max_forks_repo_path": "dataset_utils/calib.py", "max_forks_repo_name": "kocurvik/retinanet_traffic_3D", "max_forks_repo_head_hexsha": "592ceac767750c65bb3d6678b36e6880a7bb0403", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-07-15T12:22:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T03:12:36.000Z", "avg_line_length": 39.9427312775, "max_line_length": 125, "alphanum_fraction": 0.5587294585, "include": true, "reason": "import numpy", "num_tokens": 2731}
|
import numpy as np
import matplotlib.pyplot as plt
from typing import List, Tuple
from pegasusio import read_input, UnimodalData
from . import estimate_background_probs, demultiplex
def down_sampling(rna_gt: UnimodalData, hto_gt: UnimodalData, probs: List[float], n_threads: int = 1):
f = np.vectorize(lambda x, p: np.random.binomial(int(x + 1e-4), p, size=1)[0])
nsample = rna_gt.shape[0]
nhto = hto_gt.X.sum()
fracs = []
accuracy = []
for p in probs:
rna_data = rna_gt.copy()
hto_data = hto_gt.copy()
hto_data.X.data = f(hto_data.X.data, p)
idx = hto_data.X.sum(axis=1).A1 > 0
hto_data = hto_data[idx,].copy(deep = False)
fracs.append(hto_data.X.sum() / nhto)
estimate_background_probs(hto_data)
demultiplex(rna_data, hto_data, n_threads=n_threads)
accuracy.append(
sum(
rna_data.obs["assignment"].values.astype("str")
== rna_gt.obs["assignment"].values.astype("str")
)
/ nsample
)
return fracs, accuracy
def plot_down_sampling(
demuxEM_res_file: str,
out_file: str,
probs: List[float] = [i / 10.0 for i in range(9, 0, -1)],
n_threads: int = 1,
dpi: int = 500,
figsize: Tuple[float, float] = None,
):
data = read_input(demuxEM_res_file)
rna_gt = data.get_data(modality = "rna")
hto_gt = data.get_data(modality = "hashing")
fracs, accuracy = down_sampling(rna_gt, hto_gt, probs, n_threads=n_threads)
plt.plot(fracs, accuracy, ".-")
ax = plt.gca()
ax.set_xlim(1.0, 0.0)
ax.set_ylim(0.79, 1.01)
vals = ax.get_yticks()
ax.set_yticklabels(["{:.0%}".format(v) for v in vals])
ax.set_xlabel("Fraction of hashtag UMIs")
ax.set_ylabel("Consistency")
if figsize is not None:
plt.gcf().set_size_inches(*figsize)
plt.savefig(out_file, dpi=dpi)
plt.close()
|
{"hexsha": "733e7fb1b6a05c4503424d445da432ca0b669457", "size": 1927, "ext": "py", "lang": "Python", "max_stars_repo_path": "demuxEM/tools/down_sampling.py", "max_stars_repo_name": "slowkow/demuxEM", "max_stars_repo_head_hexsha": "6c2b851b25dc30633e5abfa57d687e31b25a7f16", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-17T08:39:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-17T08:39:36.000Z", "max_issues_repo_path": "demuxEM/tools/down_sampling.py", "max_issues_repo_name": "slowkow/demuxEM", "max_issues_repo_head_hexsha": "6c2b851b25dc30633e5abfa57d687e31b25a7f16", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-07-12T16:40:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-05T04:29:40.000Z", "max_forks_repo_path": "demuxEM/tools/down_sampling.py", "max_forks_repo_name": "slowkow/demuxEM", "max_forks_repo_head_hexsha": "6c2b851b25dc30633e5abfa57d687e31b25a7f16", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-12T15:09:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-12T15:09:23.000Z", "avg_line_length": 29.6461538462, "max_line_length": 102, "alphanum_fraction": 0.624805397, "include": true, "reason": "import numpy", "num_tokens": 572}
|
import numpy as np
from pycompss.api.api import compss_wait_on
from pycompss.api.constraint import constraint
from pycompss.api.parameter import COLLECTION_IN, COLLECTION_OUT, \
Type, Depth
from pycompss.api.task import task
from scipy.sparse import issparse
from scipy.sparse import vstack as vstack_sparse
from sklearn.base import BaseEstimator
from sklearn.utils import validation
from dislib.cluster.dbscan.classes import Region
from dislib.data.array import Array
class DBSCAN(BaseEstimator):
""" Perform DBSCAN clustering.
This algorithm requires data to be arranged in a multidimensional grid.
The fit method re-arranges input data before running the
clustering algorithm. See ``fit()`` for more details.
Parameters
----------
eps : float, optional (default=0.5)
The maximum distance between two samples for them to be considered as
in the same neighborhood.
min_samples : int, optional (default=5)
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
n_regions : int, optional (default=1)
Number of regions per dimension in which to divide the feature space.
The total number of regions generated is equal to ``n_regions`` ^
``len(dimensions)``.
dimensions : iterable, optional (default=None)
Integer indices of the dimensions of the feature space that should be
divided. If None, all dimensions are divided.
max_samples : int, optional (default=None)
Setting max_samples to an integer results in the paralellization of
the computation of distances inside each region of the grid. That
is, each region is processed using various parallel tasks, where each
task finds the neighbours of max_samples samples.
This can be used to balance the load in scenarios where samples are not
evenly distributed in the feature space.
Attributes
----------
n_clusters : int
Number of clusters found. Accessing this member performs a
synchronization.
Examples
--------
>>> from dislib.cluster import DBSCAN
>>> import dislib as ds
>>> import numpy as np
>>>
>>>
>>> if __name__ == '__main__':
>>> arr = np.array([[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]])
>>> x = ds.array(arr, block_size=(2, 2))
>>> dbscan = DBSCAN(eps=3, min_samples=2)
>>> y = dbscan.fit_predict(x)
>>> print(y.collect())
"""
def __init__(self, eps=0.5, min_samples=5, n_regions=1,
dimensions=None, max_samples=None):
assert n_regions >= 1, \
"Number of regions must be greater or equal to 1."
self.eps = eps
self.min_samples = min_samples
self.n_regions = n_regions
self.dimensions = dimensions
self.max_samples = max_samples
def fit(self, x, y=None):
""" Perform DBSCAN clustering on x.
Samples are initially rearranged in a multidimensional grid with
``n_regions`` regions per dimension in ``dimensions``. All regions
in a specific dimension have the same size.
Parameters
----------
x : ds-array
Input data.
y : ignored
Not used, present here for API consistency by convention.
Returns
-------
self : DBSCAN
"""
assert self.n_regions >= 1, \
"Number of regions must be greater or equal to 1."
self._subset_sizes = []
self._sorting = []
self._components = None
self._labels = None
n_features = x.shape[1]
sparse = x._sparse
self._dimensions = self.dimensions
if self.dimensions is None:
self._dimensions = range(n_features)
n_dims = len(self._dimensions)
arranged_data, indices, sizes = _arrange_samples(x, self.n_regions,
self._dimensions)
grid = np.empty((self.n_regions,) * n_dims, dtype=object)
region_widths = self._compute_region_widths(x)[self._dimensions]
sizes = compss_wait_on(sizes)
# Create regions
for subset_idx, region_id in enumerate(np.ndindex(grid.shape)):
subset = arranged_data[subset_idx]
subset_size = sizes[subset_idx]
grid[region_id] = Region(region_id, subset, subset_size,
self.eps, sparse)
# Set region neighbours
distances = np.ceil(self.eps / region_widths)
for region_id in np.ndindex(grid.shape):
self._add_neighbours(grid[region_id], grid, distances)
# Run dbscan on each region
for region_id in np.ndindex(grid.shape):
region = grid[region_id]
region.partial_dbscan(self.min_samples, self.max_samples)
# Compute label equivalences between different regions
equiv_list = []
for region_id in np.ndindex(grid.shape):
equiv_list.append(grid[region_id].get_equivalences())
equivalences = _merge_dicts(*equiv_list)
# Compute connected components
self._components = _get_connected_components(equivalences)
# Update region labels according to equivalences
final_labels = []
for subset_idx, region_id in enumerate(np.ndindex(grid.shape)):
region = grid[region_id]
region.update_labels(self._components)
final_labels.append(region.labels)
label_blocks = _rearrange_labels(final_labels, indices, x._n_blocks[0])
self._labels = Array(blocks=label_blocks,
top_left_shape=(x._top_left_shape[0], 1),
reg_shape=(x._reg_shape[0], 1),
shape=(x._shape[0], 1), sparse=False)
return self
def fit_predict(self, x):
""" Perform DBSCAN clustering on dataset and return cluster labels
for x.
Parameters
----------
x : ds-array
Input data.
Returns
-------
y : ds-array, shape=(n_samples , 1)
Cluster labels.
"""
self.fit(x)
return self._labels
@staticmethod
def _add_neighbours(region, grid, distances):
for ind in np.ndindex(grid.shape):
if ind == region.id:
continue
d = np.abs(np.array(region.id) - np.array(ind))
if (d <= distances).all():
region.add_neighbour(grid[ind])
@property
def n_clusters(self):
validation.check_is_fitted(self, '_components')
self._components = compss_wait_on(self._components)
return len(self._components)
def _compute_region_widths(self, x):
mn = x.min().collect()
mx = x.max().collect()
if issparse(mn):
mn = mn.toarray()
mx = mx.toarray()
return ((mx - mn) / self.n_regions).reshape(-1, )
def _arrange_samples(x, n_regions, dimensions=None):
""" Arranges samples in an n-dimensional grid. The feature space is
divided in ``n_regions`` equally sized regions on each dimension based on
the maximum and minimum values of each feature in x.
Parameters
----------
x : ds-array
Input data.
n_regions : int
Number of regions per dimension in which to split the feature space.
dimensions : iterable, optional (default=None)
Integer indices of the dimensions to split. If None, all dimensions
are split.
Returns
-------
grid_data : list
A list of nd-arrays (futures) containing the samples on each region.
That is, grid_data[i][j] contains the samples in row block i of x
that lie in region j.
sorting : list of lists
sorting[i][j] contains the sample indices of the
samples from row block i that lie in region j. The indices
are relative to row block i.
sizes : list
Sizes (futures) of the arrays in grid_data.
"""
n_features = x.shape[1]
if dimensions is None:
dimensions = range(n_features)
grid_shape = (n_regions,) * len(dimensions)
# min() and max() calls have synchronization points
mn = x.min()
mx = x.max()
bins = _generate_bins(mn._blocks, mx._blocks, dimensions, n_regions)
total_regions = n_regions ** len(dimensions)
return _arrange_data(x, grid_shape, bins, dimensions, total_regions)
def _arrange_data(x, g_shape, bins, dims, total_regions):
reg_lists = list()
ind_lists = list()
for row in x._iterator(axis=0):
reg_list = [object() for _ in range(total_regions)]
ind_list = [object() for _ in range(total_regions)]
# after calling arrange_block, reg_list contains one nd-array per
# region with the corresponding samples, and ind_list contains
# the indices of the samples that go to each region
_arrange_block(row._blocks, bins, dims, g_shape, reg_list, ind_list)
reg_lists.append(reg_list)
ind_lists.append(ind_list)
# the ith element of each element in lol contains the samples of
# the ith region.
reg_arr = np.asarray(reg_lists)
arranged_samples = list()
sizes = list()
for i in range(reg_arr.shape[1]):
# we merge together the ith element of each element in reg_arr and
# sort_arr to obtain a single nd-array per region (convert to list
# again because collections do not support np.arrays)
samples, size = _merge_samples(reg_arr[:, i].tolist(), x._sparse)
arranged_samples.append(samples)
sizes.append(size)
# arranged_samples is a list of nd-arrays (one per region) containing the
# samples in each region.
return arranged_samples, ind_lists, sizes
def _rearrange_labels(labels, indices, n_blocks):
"""
This method rearranges computed labels back to their original position.
"""
blocks_list = list()
for i, arr in enumerate(labels):
blocks = [object() for _ in range(n_blocks)]
# blocks_list[i][j] contains the labels of region i that belong to
# row block j in the original arrangement of the data
_rearrange_region(arr, np.asarray(indices)[:, i].tolist(), blocks)
blocks_list.append(blocks)
blocks_arr = np.asarray(blocks_list)
sorted_blocks = list()
# merge and sort the rearranged labels to build the final array of labels
for i in range(blocks_arr.shape[1]):
label_block = _merge_labels(blocks_arr[:, i].tolist(), indices[i])
sorted_blocks.append([label_block])
return sorted_blocks
@constraint(computing_units="${ComputingUnits}")
@task(mn={Type: COLLECTION_IN, Depth: 2},
mx={Type: COLLECTION_IN, Depth: 2},
returns=1)
def _generate_bins(mn, mx, dimensions, n_regions):
bins = []
mn_arr = Array._merge_blocks(mn)[0]
mx_arr = Array._merge_blocks(mx)[0]
if issparse(mn_arr):
mn_arr = mn_arr.toarray()[0]
mx_arr = mx_arr.toarray()[0]
# create bins for the different regions in the grid in every dimension
for dim in dimensions:
bin_ = np.linspace(mn_arr[dim], mx_arr[dim], n_regions + 1)
bins.append(bin_)
return bins
@constraint(computing_units="${ComputingUnits}")
@task(blocks={Type: COLLECTION_IN, Depth: 2},
samples_list={Type: COLLECTION_OUT},
indices={Type: COLLECTION_OUT})
def _arrange_block(blocks, bins, dimensions, shape, samples_list, indices):
x = Array._merge_blocks(blocks)
n_bins = shape[0]
region_indices = list()
# find the samples that belong to each region iterating over each dimension
for dim_bins, dim in zip(bins, dimensions):
col = x[:, dim]
if issparse(col):
col = col.toarray().flatten()
# digitize a dimension of all the samples into the corresponding bins
# region_idx represents the region index at dimension dim of each
# sample
region_idx = np.digitize(col, dim_bins) - 1
region_idx[region_idx >= n_bins] = n_bins - 1
region_indices.append(region_idx)
# idx_arr is an nd-array of shape (n_dimensions, n_samples), where each
# column represents the region indices of each sample (i.e., the region
# where the sample should go)
idx_arr = np.asarray(region_indices)
# apply np.ravel_multi_index to each column of idx_arr to get a 1-D index
# that represents each region in the output list
out_idx = np.apply_along_axis(np.ravel_multi_index, 0, idx_arr, dims=shape)
for i in range(len(samples_list)):
# insert all the samples that belong to a region to the corresponding
# place in the output list.
sample_indices = np.where(out_idx == i)
samples_list[i] = x[sample_indices]
# sorting contains which samples go to which region
indices[i] = sample_indices
@constraint(computing_units="${ComputingUnits}")
@task(indices=COLLECTION_IN,
blocks=COLLECTION_OUT)
def _rearrange_region(labels, indices, blocks):
"""
indices[i] contains the label/sample indices of row block i (in the
original data) that lie in this region. This method
redistributes the labels into a list representing the row blocks
in the original data
"""
start, end = 0, 0
for i, ind in enumerate(indices):
end += len(ind[0])
blocks[i] = labels[start:end].reshape(-1, 1)
start = end
@constraint(computing_units="${ComputingUnits}")
@task(samples_list={Type: COLLECTION_IN}, returns=2)
def _merge_samples(samples_list, sparse):
if sparse:
samples = vstack_sparse(samples_list)
else:
samples = np.vstack(samples_list)
return samples, samples.shape[0]
@constraint(computing_units="${ComputingUnits}")
@task(labels_list=COLLECTION_IN, indices=COLLECTION_IN, returns=1)
def _merge_labels(labels_list, indices):
labels = np.vstack(labels_list)
# idx contains the original position of each label in labels
idx = np.hstack(np.asarray(indices).flatten())
return np.take(labels, idx).reshape(-1, 1)
@constraint(computing_units="${ComputingUnits}")
@task(returns=1)
def _merge_dicts(*dicts):
merged_dict = {}
for dct in dicts:
merged_dict.update(dct)
return merged_dict
@constraint(computing_units="${ComputingUnits}")
@task(returns=1)
def _get_connected_components(equiv):
# Add inverse equivalences
for node, neighs in equiv.items():
for neigh in neighs:
equiv[neigh].add(node)
visited = set()
connected = []
for node, neighbours in equiv.items():
if node in visited:
continue
connected.append([node])
_visit_neighbours(equiv, neighbours, visited, connected)
return connected
def _visit_neighbours(equiv, neighbours, visited, connected):
to_visit = list(neighbours)
while len(to_visit) > 0:
neighbour = to_visit.pop()
if neighbour in visited:
continue
visited.add(neighbour)
connected[-1].append(neighbour)
if neighbour in equiv:
to_visit.extend(equiv[neighbour])
|
{"hexsha": "2178c258c3f808eda5c604bf765786d181f164e6", "size": 15311, "ext": "py", "lang": "Python", "max_stars_repo_path": "dislib/cluster/dbscan/base.py", "max_stars_repo_name": "alexbarcelo/dislib", "max_stars_repo_head_hexsha": "989f81f235ae30b17410a8d805df258c7d931b38", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2018-10-22T19:21:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T12:10:01.000Z", "max_issues_repo_path": "dislib/cluster/dbscan/base.py", "max_issues_repo_name": "alexbarcelo/dislib", "max_issues_repo_head_hexsha": "989f81f235ae30b17410a8d805df258c7d931b38", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 329, "max_issues_repo_issues_event_min_datetime": "2018-11-22T18:04:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T01:26:55.000Z", "max_forks_repo_path": "dislib/cluster/dbscan/base.py", "max_forks_repo_name": "alexbarcelo/dislib", "max_forks_repo_head_hexsha": "989f81f235ae30b17410a8d805df258c7d931b38", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2019-01-10T11:46:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T12:59:45.000Z", "avg_line_length": 32.7858672377, "max_line_length": 79, "alphanum_fraction": 0.6439161387, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3482}
|
import argparse
from datetime import datetime
from dateutil.relativedelta import relativedelta
import json
import numpy as np
import os
import pandas as pd
import scipy.stats
from subprocess import call
import sys
parser = argparse.ArgumentParser(
description="""Join existing demographic and health data file with new Hospital Episode Statistics data
for subsequent health analyses""",
add_help=False
)
# required args
parser.add_argument('inCSV', type=str,
help="""CSV file containing current data
If the path contains spaces, it must be enclosed in
quote marks (e.g. "/data/dph.../data sets/")""")
parser.add_argument('hesCSV', type=str, help="hospital episode statistics csv")
parser.add_argument('outCSV', type=str, help="""output for analysis csv""")
parser.add_argument('diseaseJSON', type=str, default="icdGroups.json", help="""target ICD10/ICD9 groups json""")
# optional args
parser.add_argument('--incident_prevalent', type=bool, default = False, help="""Should columns for incident and prevalent disease be added?""")
parser.add_argument('--date_column', type=str, default = 'endTime', help="""Name of date column in 'inCSV'""")
# parse arguments
if len(sys.argv) < 5:
parser.print_help()
sys.exit(-1)
args = parser.parse_args()
'''
Add date parsers
'''
hes_format_parser = lambda x: pd.to_datetime(x, format = "%d/%m/%Y", errors="coerce")
'''
Read file of current data
'''
print('read ' + args.inCSV)
dAll = pd.read_csv(args.inCSV)
if ('eid' not in list(dAll.columns)):
sys.exit('inCSV must contain a participant ID column under \'eid\'')
if args.incident_prevalent:
if (str(args.date_column) not in list(dAll.columns)):
sys.exit('Date column needs to be a column of inCSV in order to define incident and prevalent disease.')
#print(dAll[args.date_column])
dAll[args.date_column] = pd.to_datetime(dAll[args.date_column], format = "%Y-%m-%d %H:%M:%S", errors = "coerce")
print(dAll[args.date_column].head())
dAll = dAll.set_index('eid')
'''
Read HES file
'''
print('read and clean ' + args.hesCSV)
dHES = pd.read_csv(args.hesCSV, parse_dates=['epistart','disdate'], date_parser= hes_format_parser)
dHES = dHES[dHES['eid'].isin(dAll.index)] # restrict to participants in dAll
print(len(dHES), 'len dataframe')
diseaseList = json.loads(open(args.diseaseJSON).read())
def cleanHESstr(s):
return s.strip().replace('&','').replace("'","").replace(' ','-').replace(',','')
print('Finding participants with: ')
dHES.loc[dHES['epistart'].isnull(), 'epistart'] = dHES['disdate']
# check for history of specific diseases
for outcome in diseaseList:
outcomeName = cleanHESstr(outcome['disease'])
if outcome['level'] == "all":
e = dHES[['eid','epistart']]\
[(dHES['diag_icd10'].str.contains(outcome['icd10'], na=False)) | \
(dHES['diag_icd9'].str.contains(outcome['icd9'], na=False)) ]
if outcome['level'] == "primary":
dHESPrimary = dHES[dHES['level'] == 1]
e = dHESPrimary[['eid','epistart']]\
[(dHESPrimary['diag_icd10'].str.contains(outcome['icd10'], na=False)) | \
(dHESPrimary['diag_icd9'].str.contains(outcome['icd9'], na=False)) ]
outcomePts = e[['epistart']].groupby(e['eid']).min()
outcomePts.columns = [outcomeName]
print(outcomeName)
dAll = dAll.join(outcomePts)
if args.incident_prevalent:
dAll[outcomeName + "-incident"] = 0
dAll.loc[(dAll[outcomeName] > dAll[args.date_column]) & (~dAll[outcomeName].isnull()), outcomeName + '-incident'] = 1
dAll[outcomeName + "-prevalent"] = 0
dAll.loc[(dAll[outcomeName] <= dAll[args.date_column]) & (~dAll[outcomeName].isnull()), outcomeName + '-prevalent'] = 1
print(outcomeName, ', n = ', len(dAll[~dAll[outcomeName].isnull()]))
'''
Write final output file...
'''
print('write final cleaned file to ' + args.outCSV)
dAll.to_csv(args.outCSV)
print('finished')
|
{"hexsha": "a1e68197e8bcd47daf235463656e275554fc3754", "size": 4020, "ext": "py", "lang": "Python", "max_stars_repo_path": "addNewHES.py", "max_stars_repo_name": "kasbohm/ukb_download_and_prep_template", "max_stars_repo_head_hexsha": "ef8c0171cb0f61d52b4dacf5edf2270ec42a48a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-09-07T14:34:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T12:37:31.000Z", "max_issues_repo_path": "addNewHES.py", "max_issues_repo_name": "kasbohm/ukb_download_and_prep_template", "max_issues_repo_head_hexsha": "ef8c0171cb0f61d52b4dacf5edf2270ec42a48a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-03-05T09:52:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-27T10:52:21.000Z", "max_forks_repo_path": "addNewHES.py", "max_forks_repo_name": "kasbohm/ukb_download_and_prep_template", "max_forks_repo_head_hexsha": "ef8c0171cb0f61d52b4dacf5edf2270ec42a48a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-03-17T09:01:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-03T21:49:31.000Z", "avg_line_length": 37.9245283019, "max_line_length": 143, "alphanum_fraction": 0.6654228856, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1077}
|
! Calculate PDF of a scalar field.
subroutine calc_pdf(dist_mf, lo, hi, ng, num_bins, bin_edges, bin_count, bin_x_sum)
use, intrinsic :: iso_c_binding
implicit none
integer(c_int), intent(in) :: lo(3), hi(3), ng
real(c_double), intent(in) :: dist_mf (lo(1)-ng:hi(1)+ng, lo(2)-ng:hi(2)+ng, lo(3)-ng:hi(3)+ng, 1)
integer(c_int), intent(in) :: num_bins
real(c_double), intent(in) :: bin_edges (num_bins+1)
integer(c_int), intent(out) :: bin_count (num_bins)
real(c_double), intent(out) :: bin_x_sum (num_bins)
real(c_double) :: x_min, x_max
real(c_double) :: xi
integer(c_int) :: i, j, k
integer(c_int) :: bin_index, jj
x_min = minval(bin_edges)
x_max = maxval(bin_edges)
bin_count (:) = 0
bin_x_sum (:) = 0.0d0
do k = lo(3), hi(3)
do j = lo(2), hi(2)
do i = lo(1), hi(1)
xi = dist_mf(i, j, k, 1)
if (xi < x_min .or. xi > x_max) cycle
! Find the bin each value belongs in.
bin_index = -1
do jj = 1, num_bins
if (xi >= bin_edges(jj) .and. xi < bin_edges(jj+1)) then
bin_index = jj
exit
end if
end do
! If value exactly equals the largest bin edge, put it in the largest bin.
if (bin_index == -1) bin_index = num_bins
bin_count (bin_index) = bin_count (bin_index) + 1
bin_x_sum (bin_index) = bin_x_sum (bin_index) + xi
end do
end do
end do
end subroutine calc_pdf
|
{"hexsha": "b48a7b0f0052b1a3e1be79cb01beb33a60af1903", "size": 1528, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "calc_pdf.f90", "max_stars_repo_name": "bcfriesen/gimlet", "max_stars_repo_head_hexsha": "153771cfb5bf810e4f1ca9a8ec4549935a51320a", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-14T16:19:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-14T16:19:12.000Z", "max_issues_repo_path": "calc_pdf.f90", "max_issues_repo_name": "bcfriesen/gimlet", "max_issues_repo_head_hexsha": "153771cfb5bf810e4f1ca9a8ec4549935a51320a", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "calc_pdf.f90", "max_forks_repo_name": "bcfriesen/gimlet", "max_forks_repo_head_hexsha": "153771cfb5bf810e4f1ca9a8ec4549935a51320a", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8301886792, "max_line_length": 102, "alphanum_fraction": 0.5667539267, "num_tokens": 484}
|
# Latent class models with one (loglinear independence) to three classes
data(election)
f <- cbind(MORALG,CARESG,KNOWG,LEADG,DISHONG,INTELG,
MORALB,CARESB,KNOWB,LEADB,DISHONB,INTELB)~1
nes1 <- poLCA(f,election,nclass=1) # log-likelihood: -18647.31
nes2 <- poLCA(f,election,nclass=2) # log-likelihood: -17344.92
nes3 <- poLCA(f,election,nclass=3) # log-likelihood: -16714.66
# Three-class model with a single covariate (party)
f2a <- cbind(MORALG,CARESG,KNOWG,LEADG,DISHONG,INTELG,
MORALB,CARESB,KNOWB,LEADB,DISHONB,INTELB)~PARTY
nes2a <- poLCA(f2a,election,nclass=3,nrep=5) # log-likelihood: -16222.32
pidmat <- cbind(1,c(1:7))
exb <- exp(pidmat %*% nes2a$coeff)
matplot(c(1:7),(cbind(1,exb)/(1+rowSums(exb))),ylim=c(0,1),type="l",
main="Party ID as a predictor of candidate affinity class",
xlab="Party ID: strong Democratic (1) to strong Republican (7)",
ylab="Probability of latent class membership",lwd=2,col=1)
text(5.9,0.35,"Other")
text(5.4,0.7,"Bush affinity")
text(1.8,0.6,"Gore affinity")
|
{"hexsha": "151747b7a21aaaa70a90372b80052e62b87e04e2", "size": 1047, "ext": "r", "lang": "R", "max_stars_repo_path": "poLCA/poLCA_election_demo.r", "max_stars_repo_name": "matthew9602/R-demo", "max_stars_repo_head_hexsha": "301e343750b6e6874985d6db9770379443ff3e86", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "poLCA/poLCA_election_demo.r", "max_issues_repo_name": "matthew9602/R-demo", "max_issues_repo_head_hexsha": "301e343750b6e6874985d6db9770379443ff3e86", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "poLCA/poLCA_election_demo.r", "max_forks_repo_name": "matthew9602/R-demo", "max_forks_repo_head_hexsha": "301e343750b6e6874985d6db9770379443ff3e86", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.8571428571, "max_line_length": 72, "alphanum_fraction": 0.6991404011, "num_tokens": 404}
|
import torch
import torch.optim.lr_scheduler as lr_scheduler
from utils import lr_scheduler_ext, stacked_dict
from torch import nn
import pickle
import numpy as np
from collections import defaultdict
import importlib
from utils import WrappedSummaryWriter
import time
def debug_gradients_tbx(logger, config, net, epoch):
debug = config.get('debug', dict())
if debug.get('weights', False):
for name, param in net.named_parameters():
logger.add_histogram('params/{}'.format(name), param.clone().detach().cpu().numpy(), epoch, bins='doane') # bins='auto' causes tensorboard to implode occasionally
if debug.get('gradients', False):
for name, param in net.named_parameters():
logger.add_histogram('grads/{}'.format(name), param.grad.clone().detach().cpu().numpy(), epoch, bins='doane') # bins='auto' causes tensorboard to implode occasionally
def debug_gradients(config, bi, net):
debug = config.get('mpl_debug', dict())
if len(debug) == 0:
return
if bi % debug['step'] == 0:
# this should be fine as a conditional import ...
from utils import tensor_plot_helper, colorbar
import matplotlib.pyplot as plt
params = {
'legend.fontsize': 'xx-small',
'axes.labelsize': 'xx-small',
'axes.titlesize': 'xx-small',
'xtick.labelsize': 'xx-small',
'ytick.labelsize': 'xx-small'
}
plt.rcParams.update(params)
named_parameters = list(net.named_parameters())
if debug['what'] == 'tensors':
for name, parameter in named_parameters:
weight = parameter.data.cpu().numpy()
grad = parameter.grad.data.cpu().numpy()
fig, axes = plt.subplots(ncols=2)
fig.suptitle(name)
axes[0].set_title('{} w'.format(name))
axes[1].set_title('{} g'.format(name))
im = tensor_plot_helper(fig, axes[0], weight, 'seismic', symm=True, preference='vertical')
colorbar(im)
im = tensor_plot_helper(fig, axes[1], grad, 'seismic', symm=True, preference='vertical')
colorbar(im)
plt.show()
elif debug['what'] == 'histograms':
fig, axes = plt.subplots(ncols=len(named_parameters), nrows=2)
axes[0, 0].set_ylabel('w')
axes[1, 0].set_ylabel('g')
grad_magnitudes = []
names = []
for i, (name, parameter) in enumerate(named_parameters):
names.append(name)
weight = parameter.data.cpu().numpy()
grad = parameter.grad.data.cpu().numpy()
grad_magnitudes.append(np.sqrt(np.sum(grad ** 2)))
axes[0, i].set_title(name)
axes[0, i].hist(weight.flatten(), bins='auto', histtype='step', density=True)
axes[1, i].hist(grad.flatten(), bins='auto', histtype='step', density=True)
fig, ax = plt.subplots()
ax.set_title('gradient magnitudes')
lll = len(grad_magnitudes)
xs = np.arange(lll)
ax.vlines(xs, np.zeros(lll), grad_magnitudes)
ax.set_xticks(xs)
ax.set_xticklabels(names, rotation=90)
plt.show()
def recursive_detach(din):
dout = dict()
for key, value in din.items():
if isinstance(value, dict):
dout[key] = recursive_detach(din[key])
else:
dout[key] = din[key].detach().cpu().numpy()
return dout
class Run(object):
def __init__(self, config, use_cuda=False, instantiate_net=True):
self.config = config
self.net = None
if instantiate_net:
self.instantiate_net(config)
self.measures = dict(
train=defaultdict(list),
valid=defaultdict(list)
)
if use_cuda:
print('using cuda')
self.cuda()
else:
print('using cpu')
self.cpu()
self.epoch = -1
def __getattr__(self, name):
if name == 'logger':
log_dir = 'runs/{}/tensorboard'.format(self.config['run_id'])
self.logger = WrappedSummaryWriter(log_dir)
return self.logger
def cuda(self):
self.net.cuda()
self.CUDA = True
def cpu(self):
self.net.cpu()
self.CUDA = False
def instantiate_net(self, config):
net_module = importlib.import_module(config['modules']['net']['name'])
self.net = net_module.Net(self.config)
data_parallel = self.config.get('data_parallel')
print('using data parallel {}'.format(data_parallel))
if data_parallel is not None:
temp_lf = self.net.get_loss_function
self.net = nn.DataParallel(self.net, **data_parallel)
self.net.get_loss_function = temp_lf
print(self.net)
print('n_params', np.sum([np.prod(p.size()) for p in self.net.parameters()]))
optimizer_class = getattr(torch.optim, config['optimizer']['name'])
self.optimizer = optimizer_class(self.net.parameters(), **config['optimizer']['params'])
if hasattr(lr_scheduler, config['scheduler']['name']):
scheduler_class = getattr(lr_scheduler, config['scheduler']['name'])
self.scheduler = scheduler_class(self.optimizer, **config['scheduler']['params'])
elif hasattr(lr_scheduler_ext, config['scheduler']['name']):
scheduler_class = getattr(lr_scheduler_ext, config['scheduler']['name'])
self.scheduler = scheduler_class(self.optimizer, **config['scheduler']['params'])
else:
raise RuntimeError('unknown LR scheduler {}'.format(config['scheduler']))
def set_dataloaders(self, dataloaders):
self.dataloaders = dataloaders
def advance(self):
abort = self.train_one_epoch(self.dataloaders['train'])
if not abort:
self.evaluate(self.dataloaders['valid'], 'valid', checkpoint=True)
return abort
def test(self):
self.evaluate(self.dataloaders['test'], 'test', checkpoint=False)
def train_one_epoch(self, loader):
self.epoch += 1
print('epoch', self.epoch)
self.net.train()
train_loss_function = self.net.get_train_loss_function()
smoothed_loss = 1.
t_elapsed = 0
n_batches = float(len(loader))
count = 0
total_count = 0
abort = False
for batch in loader:
t_start = time.time()
if self.CUDA:
for key in batch.keys():
batch[key] = batch[key].cuda()
# zero out gradients !
self.optimizer.zero_grad()
predictions = self.net.forward(batch)
loss = train_loss_function(predictions, batch)
loss.backward()
self.optimizer.step()
count += 1
total_count += 1
smoothed_loss = smoothed_loss * 0.9 + loss.cpu().item() * 0.1
# bail if NaN or Inf is encountered
if np.isnan(smoothed_loss) or np.isinf(smoothed_loss):
print('encountered NaN/Inf in smoothed_loss "{}"'.format(smoothed_loss))
abort = True
break
t_end = time.time()
t_elapsed += (t_end - t_start)
if t_elapsed > 60:
batches_per_second = count / t_elapsed
t_rest = ((n_batches - total_count) / batches_per_second) / 3600.
print('bps {:4.2f} eta {:4.2f} [h]'.format(batches_per_second, t_rest))
t_elapsed = 0
count = 0
debug_gradients(self.config, total_count, self.net)
# visu ############################################################
debug_gradients_tbx(self.logger, self.config, self.net, self.epoch)
###################################################################
self.logger.add_scalar('train/loss', smoothed_loss, global_step=self.epoch)
# always recorded, if present, to keep track of lr-scheduler
for gi, param_group in enumerate(self.optimizer.param_groups):
if 'lr' in param_group:
self.logger.add_scalar(
'train/lr',
param_group['lr'],
global_step=self.epoch
)
if 'momentum' in param_group:
self.logger.add_scalar(
'train/momentum',
param_group['momentum'],
global_step=self.epoch
)
return abort
def find_learnrate(self, init_value=1e-8, final_value=10., exp_avg=0.98):
data_loader = self.dataloaders['train']
max_num = 4096
num = min(max_num, len(data_loader) - 1)
mult = (final_value / init_value) ** (1 / num)
lr = init_value
train_loss_function = self.net.get_train_loss_function()
self.optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.
best_loss = 0.
batch_num = 0
losses = []
learning_rates = []
for batch in data_loader:
batch_num += 1
print('flr batch_num', batch_num)
if self.CUDA:
for key in batch.keys():
batch[key] = batch[key].cuda()
self.optimizer.zero_grad()
prediction = self.net.forward(batch)
loss = train_loss_function(prediction, batch)
dloss = loss.detach().cpu().item()
if np.isinf(dloss) or np.isnan(dloss):
print('encountered "{}" in loss at batch "{}"'.format(dloss, batch_num))
break
# compute the smoothed loss
avg_loss = exp_avg * avg_loss + (1 - exp_avg) * loss.item()
smoothed_loss = avg_loss / (1 - exp_avg ** batch_num)
# stop if the loss is exploding
if batch_num > 1 and smoothed_loss > 10 * best_loss:
print('loss exploding, aborting')
break
if batch_num > max_num:
print('max number of batches reached')
break
# record the best loss
if smoothed_loss < best_loss or batch_num == 1:
best_loss = smoothed_loss
# store the values
losses.append(smoothed_loss)
learning_rates.append(lr)
# do the SGD step
loss.backward()
self.optimizer.step()
# update the lr for the next step
lr *= mult
self.optimizer.param_groups[0]['lr'] = lr
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.semilogx(learning_rates, losses)
ax.set_xlabel('learning rate (log scale)')
ax.set_ylabel('loss')
plt.show()
def evaluate_loader(self, loader):
print('evaluating loader')
self.net.eval()
predictions = []
batches = []
with torch.no_grad():
for cpu_batch in loader:
batch = dict()
if self.CUDA:
for key in cpu_batch.keys():
batch[key] = cpu_batch[key].cuda()
else:
batch = cpu_batch
gpu_prediction = self.net.forward(batch)
cpu_prediction = recursive_detach(gpu_prediction)
predictions.append(cpu_prediction)
batches.append(cpu_batch)
predictions = stacked_dict(predictions)
batches = stacked_dict(batches)
return predictions, batches
def evaluate(self, loaders, name, checkpoint=True):
# if we indeed get a list of loaders, run inference / evaluate each individually
if isinstance(loaders, list):
predictions = []
batches = []
for loader in loaders:
# FIXME: this metadata fishing will not work with
# datasets which does not have this field!
loader_predictions, loader_batches = self.evaluate_loader(loader)
predictions.append(dict(
metadata=loader.dataset.metadata,
predictions=loader_predictions
))
batches.append(dict(
metadata=loader.dataset.metadata,
batches=loader_batches
))
else:
predictions, batches = self.evaluate_loader(loaders)
checkpoint_filenames = self.net.evaluate_aggregate_checkpoint(
name,
predictions,
batches,
self.logger,
self.epoch,
scheduler=self.scheduler
)
if checkpoint and checkpoint_filenames is not None:
if isinstance(checkpoint_filenames, list):
for checkpoint_filename in checkpoint_filenames:
self.save(checkpoint_filename)
else:
self.save(checkpoint_filenames)
def process(self, dataloader_module, infile, outfile):
self.net.eval()
loader = dataloader_module.get_loader_for_file(self.config, infile, 'SequentialSampler')
predictions = []
batches = []
with torch.no_grad():
for batch in loader:
if self.CUDA:
for key in batch.keys():
batch[key] = batch[key].cuda()
prediction = self.net.forward(batch)
predictions.append(prediction)
batches.append(batch)
predictions = stacked_dict(predictions)
batches = stacked_dict(batches)
torch.save(
dict(predictions=predictions, batches=batches),
outfile,
pickle_module=pickle,
pickle_protocol=pickle.HIGHEST_PROTOCOL
)
def save(self, filename):
state = dict()
state['epoch'] = self.epoch
state['net_state_dict'] = self.net.state_dict()
state['optimizer_state_dict'] = self.optimizer.state_dict()
torch.save(state, filename, pickle_module=pickle, pickle_protocol=pickle.HIGHEST_PROTOCOL)
def load(self, filename):
state = torch.load(filename, map_location=lambda storage, loc: storage)
if self.instantiate_net:
self.net.load_state_dict(state['net_state_dict']),
self.optimizer.load_state_dict(state['optimizer_state_dict'])
self.epoch = state['epoch']
|
{"hexsha": "aff2689b5f0f4242bc6754ef03d72261fb53de9d", "size": 14614, "ext": "py", "lang": "Python", "max_stars_repo_path": "runners/default_runner.py", "max_stars_repo_name": "VickyChing/ICASSP19", "max_stars_repo_head_hexsha": "247b0b9d738679f5472dd74f52bd1933b871e87f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2019-04-27T14:18:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T22:58:41.000Z", "max_issues_repo_path": "runners/default_runner.py", "max_issues_repo_name": "VickyChing/ICASSP19", "max_issues_repo_head_hexsha": "247b0b9d738679f5472dd74f52bd1933b871e87f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-10-02T11:10:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T09:10:52.000Z", "max_forks_repo_path": "runners/default_runner.py", "max_forks_repo_name": "rainerkelz/ICASSP19", "max_forks_repo_head_hexsha": "15b756692cca80e7b16cdbdf28d40f43db2c03b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-12-01T02:42:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-09T04:01:46.000Z", "avg_line_length": 36.904040404, "max_line_length": 179, "alphanum_fraction": 0.5611742165, "include": true, "reason": "import numpy", "num_tokens": 2983}
|
#!/usr/bin/env python
# *-----------------------------------------------------------------------*
# | |
# | Copyright (c) 2013 by Paul Scherrer Institute (http://www.psi.ch) |
# | |
# | Author Thierry Zamofing (thierry.zamofing@psi.ch) |
# *-----------------------------------------------------------------------*
"""
implements a grid view classe to show 'excel-like'-tables of a hdf5 dataset.
"""
import wx, h5py, os
import wx.grid
import numpy as np
from . import utilities as ut
class DlgFormatSetup(wx.Dialog):
def __init__(self, parent, fmt):
wx.Dialog.__init__(self, parent, -1, "Format Setup")
txtFmt = wx.StaticText(self, -1, "format")
self.edFmt = edFmt = wx.TextCtrl(self, -1, fmt, style=wx.TE_PROCESS_ENTER)
txtPredef = wx.StaticText(self, -1, "predefined")
preDefLst = ("default", "0x%x", "%f+%fi")
self.cbPredef = cbPredef = wx.ComboBox(self, -1, choices=preDefLst, style=wx.CB_READONLY)
# cbPredef.SetSelection(0)
sizer = wx.BoxSizer(wx.VERTICAL)
fgs = wx.FlexGridSizer(4, 2, 5, 5)
fgs.Add(txtFmt, 0, wx.ALIGN_RIGHT)
fgs.Add(edFmt, 0, wx.EXPAND)
fgs.Add(txtPredef, 0, wx.ALIGN_RIGHT)
fgs.Add(cbPredef, 0, wx.EXPAND)
sizer.Add(fgs, 0, wx.EXPAND | wx.ALL, 5)
edFmt.SetFocus()
btns = self.CreateButtonSizer(wx.OK | wx.CANCEL)
sizer.Add(btns, 0, wx.EXPAND | wx.ALL, 5)
self.Bind(wx.EVT_COMBOBOX, self.OnModify, cbPredef)
self.SetSizer(sizer)
sizer.Fit(self)
def OnModify(self, event):
# print 'OnModify'
parent = self.GetParent()
# event.EventObject.Value
# self.cbPredef.Value
if event.Int:
self.edFmt.Value = event.GetString()
else:
self.edFmt.Value = ""
# http://wxpython-users.1045709.n5.nabble.com/filling-a-wxgrid-td2348720.html
class Table1DArray(wx.grid.PyGridTableBase):
def __init__(self, data):
wx.grid.PyGridTableBase.__init__(self)
# ut.StopWatch.Log('DBG 1')
self.data = data
# ut.StopWatch.Log('DBG 2')
def GetRowLabelValue(self, idx):
return idx
def GetColLabelValue(self, idx):
return ""
def GetNumberRows(self):
# ut.StopWatch.Log('GetNumberRows')
return self.data.shape[0]
def GetNumberCols(self):
# ut.StopWatch.Log('GetNumberCols')
return 1
def GetValue(self, row, col):
# ut.StopWatch.Log('GetValue %d %d'%(row,col))
return self.data[row]
class Table2DArray(wx.grid.PyGridTableBase):
def __init__(self, data):
wx.grid.PyGridTableBase.__init__(self)
self.data = data
def GetRowLabelValue(self, idx):
return idx
def GetColLabelValue(self, idx):
return idx
def GetNumberRows(self):
return self.view.shape[0]
def GetNumberCols(self):
return self.view.shape[1]
def GetValue(self, row, col):
try:
return self.cellFormat % self.view[row][col]
except AttributeError:
return self.view[row][col]
class Table1DCompound(wx.grid.PyGridTableBase):
def __init__(self, data):
wx.grid.PyGridTableBase.__init__(self)
self.data = data
def GetRowLabelValue(self, idx):
return idx
def GetColLabelValue(self, idx):
return self.data.dtype.names[idx]
def GetNumberRows(self):
return self.data.shape[0]
def GetNumberCols(self):
return len(self.data.dtype.names)
def GetValue(self, row, col):
try:
return self.cellFormat % self.data[row][col]
except AttributeError:
return self.data[row][col]
class Table2DCompound(wx.grid.PyGridTableBase):
def __init__(self, data):
wx.grid.PyGridTableBase.__init__(self)
self.data = data
def GetRowLabelValue(self, idx):
return idx
def GetColLabelValue(self, idx):
return idx
def GetNumberRows(self):
return self.view.shape[0]
def GetNumberCols(self):
return self.view.shape[1]
def GetValue(self, row, col):
try:
return self.cellFormat % self.view[row][col]
except AttributeError:
return self.view[row][col]
class Grid(wx.grid.Grid):
def __init__(self, parent, data):
wx.grid.Grid.__init__(self, parent, -1)
self.SetDefaultColSize(50)
self.SetDefaultRowSize(17)
font = self.GetLabelFont()
font.PointSize = 8
self.SetLabelFont(font)
font = self.GetDefaultCellFont()
font.PointSize = 8
self.SetDefaultCellFont(font)
self.SetDefaultCellAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTRE)
# self.SetDefaultCellAlignment(wx.ALIGN_CENTRE,wx.ALIGN_CENTRE)
# self.SetDefaultRenderer
@staticmethod
def OnSetView(usrData, value, msg):
gridFrm = usrData.slider.Parent.Parent
grid = gridFrm.grid
tbl = grid.GetTable()
data = tbl.data
sl = ut.GetSlice(tbl.idxXY, data.shape, gridFrm.wxAxCtrlLst)
# tbl.view = tbl.data[value,...]
tbl.view = tbl.data[sl]
grid.ClearGrid()
pass
class HdfGridFrame(wx.Frame):
def __init__(self, parent, lbl, hid):
wx.Frame.__init__(self, parent, title="HDFGridView: " + lbl, size=wx.Size(750, 650))
imgDir = ut.Path.GetImage()
icon = wx.Icon(os.path.join(imgDir, "h5pyViewer.ico"), wx.BITMAP_TYPE_ICO)
self.SetIcon(icon)
pan = wx.Panel(self, -1)
t = type(hid)
if t == h5py.h5d.DatasetID:
data = h5py.Dataset(hid)
elif t == np.ndarray:
data = hid
else:
raise TypeError
grid = Grid(pan, data)
tbl = grid.GetTable()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(grid, 1, wx.EXPAND)
wxAxCtrlLst = []
l = len(data.shape)
if l == 1:
if type(hid.get_type()) == h5py.h5t.TypeCompoundID:
tbl = Table1DCompound(data)
else:
tbl = Table1DArray(data)
else:
idxXY = (l - 2, l - 1)
# idxXY=(l-1,l-2)
for idx, l in enumerate(data.shape):
if idx in idxXY:
continue
wxAxCtrl = ut.SliderGroup(pan, label="Axis:%d" % idx, range=(0, l - 1))
wxAxCtrl.idx = idx
wxAxCtrlLst.append(wxAxCtrl)
sizer.Add(wxAxCtrl.sizer, 0, wx.EXPAND | wx.ALIGN_CENTER | wx.ALL, border=5)
wxAxCtrl.SetCallback(Grid.OnSetView, wxAxCtrl)
sl = ut.GetSlice(idxXY, data.shape, wxAxCtrlLst)
if type(hid.get_type()) == h5py.h5t.TypeCompoundID:
tbl = Table2DArray(data)
else:
tbl = Table2DArray(data)
tbl.idxXY = idxXY
if idxXY[0] < idxXY[1]:
tbl.view = tbl.data[sl]
else:
tbl.view = tbl.data[sl].T
self.wxAxCtrlLst = wxAxCtrlLst
# print type(tbl)
grid.SetTable(tbl, True)
# AutoSize must be called after SetTable, but takes lot of time on big tables!
if tbl.GetNumberCols() * tbl.GetNumberRows() < 50 * 50:
grid.AutoSizeColumns(True)
grid.AutoSizeRows(True)
# grid.SetDefaultColSize(200, True)
self.grid = grid
pan.SetSizer(sizer)
pan.Layout()
self.Centre()
self.BuildMenu()
grid.Bind(wx.grid.EVT_GRID_CMD_COL_SIZE, self.OnColSize)
def OnColSize(self, event):
if event.ShiftDown():
col = event.RowOrCol
sz = self.grid.GetColSize(col)
print("OnColSize", col, sz)
self.grid.SetDefaultColSize(sz, True)
self.grid.ForceRefresh()
def OnSetFormat(self, event):
print("OnSetFormat")
fmt = getattr(self.grid.Table, "cellFormat", "")
dlg = DlgFormatSetup(self, fmt)
if dlg.ShowModal() == wx.ID_OK:
tbl = self.grid.Table
v = dlg.edFmt.Value
if v:
tbl.cellFormat = v
else:
del tbl.cellFormat
self.grid.ForceRefresh()
dlg.Destroy()
def BuildMenu(self):
mnBar = wx.MenuBar()
# -------- Edit Menu --------
mn = wx.Menu()
mnItem = mn.Append(wx.ID_ANY, "Setup Format", "Setup the format of the cells")
self.Bind(wx.EVT_MENU, self.OnSetFormat, mnItem)
self.mnIDxAxis = mnItem.GetId()
mnBar.Append(mn, "&Edit")
self.SetMenuBar(mnBar)
self.CreateStatusBar()
if __name__ == "__main__":
import sys
import argparse # since python 2.7
def GetParser():
cmd = "\n ./" + os.path.basename(sys.argv[0]) + " "
exampleCmd = (
("compound n*m", "/scratch/detectorData/Ptychography/tst/initial_conditions_S01375.h5", "objects/object_0",),
("compound n", "/scratch/detectorData/e14472/scan_00033.hdf5", "entry/data/spec",),
("array l*m*n", "/scratch/detectorData/e14472/scan_00033.hdf5", "entry/data/pilatus_1",),
("array n", "/scratch/detectorData/e14472/scan_00033.hdf5", "entry/data/pilatus_1_info",),
)
epilog = "Examples:" + "".join(["\n # " + s[0] + " #" + cmd + "--hdfFile %s --elem %s" % s[1:] for s in exampleCmd])
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__, epilog=epilog,)
parser.add_argument("--hdfFile", required=True, help="the hdf5 to show")
parser.add_argument("--elem", required=True, help="the path to the element in the hdf5 file")
return parser
args = parser.parse_args()
return args
class App(wx.App):
def OnInit(self):
parser = GetParser()
# parser=GetParser(False) # debug with exampleCmd
args = parser.parse_args()
try:
self.fid = fid = h5py.h5f.open(args.hdfFile)
except IOError as e:
sys.stderr.write("Unable to open File: " + args.hdfFile + "\n")
parser.print_usage(sys.stderr)
return True
try:
hid = h5py.h5o.open(fid, args.elem)
except KeyError as e:
sys.stderr.write("Unable to open Object: " + args.elem + "\n")
parser.print_usage(sys.stderr)
return True
frame = HdfGridFrame(None, args.elem, hid)
frame.Show()
self.SetTopWindow(frame)
return True
def OnExit(self):
self.fid.close()
ut.StopWatch.Start()
app = App()
app.MainLoop()
|
{"hexsha": "e989c8c431717e66566a1044b537800d87cbb54c", "size": 10962, "ext": "py", "lang": "Python", "max_stars_repo_path": "h5pyViewerLib/hdfGrid.py", "max_stars_repo_name": "Eothred/h5pyViewer", "max_stars_repo_head_hexsha": "bba7ad3e1b7cf0896332afb8ebc064fe46ada70f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-03-22T14:34:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-30T14:46:46.000Z", "max_issues_repo_path": "h5pyViewerLib/hdfGrid.py", "max_issues_repo_name": "Eothred/h5pyViewer", "max_issues_repo_head_hexsha": "bba7ad3e1b7cf0896332afb8ebc064fe46ada70f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "h5pyViewerLib/hdfGrid.py", "max_forks_repo_name": "Eothred/h5pyViewer", "max_forks_repo_head_hexsha": "bba7ad3e1b7cf0896332afb8ebc064fe46ada70f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-07-18T12:11:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-12T10:55:26.000Z", "avg_line_length": 32.146627566, "max_line_length": 131, "alphanum_fraction": 0.5618500274, "include": true, "reason": "import numpy", "num_tokens": 2779}
|
import os
import pickle
import torch
import numpy as np
from math import ceil
from model_vc import Generator
ckpt_path = 'logs_dir/autovc_one_hot146000.ckpt'
conversion_list_path = 'conversion_list.txt'
data_dir = '../AutoVC_hujk17/full_106_spmel_nosli'
speaker_id_dict_path = '../AutoVC_hujk17/full_106_spmel_nosli/speaker_seen_unseen.txt'
dim_neck = 32
dim_emb = 256
dim_pre = 512
freq = 32
# look up table用, 102个人, 用128作为上限
speaker_num =128
def pad_seq(x, base=freq):
len_out = int(base * ceil(float(x.shape[0])/base))
len_pad = len_out - x.shape[0]
assert len_pad >= 0
return np.pad(x, ((0,len_pad),(0,0)), 'constant'), len_pad
def text2dict(file):
speaker_id_dict = {}
f = open(file, 'r').readlines()
for i, name in enumerate(f):
name = name.strip().split('|')[0]
speaker_id_dict[name] = i
# print(speaker_id_dict)
return speaker_id_dict
def main():
# init model
device = 'cuda:0'
G = Generator(dim_neck=dim_neck, dim_emb=dim_emb, dim_pre=dim_pre, freq=freq, speaker_num=speaker_num).eval().to(device)
g_checkpoint = torch.load(ckpt_path)
G.load_state_dict(g_checkpoint['model'])
# init speaker name -> id
speaker_id_dict = text2dict(speaker_id_dict_path)
# p228/p228_077.npy|p228|p227
f = open(conversion_list_path, 'r').readlines()
tasks = [i.strip() for i in f]
spect_vc = []
for task in tasks:
task = task.split('|')
assert len(task) == 3
mel_path = task[0]
s_name = task[1]
t_name = task[2]
# process from string -> data: mel, s, t
mel = np.load(os.path.join(data_dir, mel_path))
mel, len_pad = pad_seq(mel)
s_id = speaker_id_dict[s_name]
t_id = speaker_id_dict[t_name]
# process from data -> batch tensor: mel, s, t
mel = torch.from_numpy(mel[np.newaxis, :, :]).to(device)
s_id = torch.from_numpy(np.asarray([s_id])).to(device)
t_id = torch.from_numpy(np.asarray([t_id])).to(device)
print('speaker model out----------', s_id.size())
with torch.no_grad():
_, x_identic_psnt, _ = G(mel, s_id, t_id)
print('mel size:', x_identic_psnt.size())
if len_pad == 0:
# uttr_trg = x_identic_psnt[0, 0, :, :].cpu().numpy()
x_identic_psnt = x_identic_psnt[0, :, :].cpu().numpy()
else:
# uttr_trg = x_identic_psnt[0, 0, :-len_pad, :].cpu().numpy()
x_identic_psnt = x_identic_psnt[0, :-len_pad, :].cpu().numpy()
spect_vc.append( ('{}x{}'.format(s_name, t_name), x_identic_psnt) )
with open('results.pkl', 'wb') as handle:
pickle.dump(spect_vc, handle)
if __name__ == "__main__":
main()
|
{"hexsha": "231659671eade845fbbf25bc538f110331da32c6", "size": 2882, "ext": "py", "lang": "Python", "max_stars_repo_path": "conversion.py", "max_stars_repo_name": "ruclion/AutoVC_one_hot", "max_stars_repo_head_hexsha": "db400a224b2e4544a480e0ab3f62b51b570d378c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "conversion.py", "max_issues_repo_name": "ruclion/AutoVC_one_hot", "max_issues_repo_head_hexsha": "db400a224b2e4544a480e0ab3f62b51b570d378c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-13T06:52:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-13T06:52:43.000Z", "max_forks_repo_path": "conversion.py", "max_forks_repo_name": "ruclion/AutoVC_one_hot", "max_forks_repo_head_hexsha": "db400a224b2e4544a480e0ab3f62b51b570d378c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0208333333, "max_line_length": 125, "alphanum_fraction": 0.5940319223, "include": true, "reason": "import numpy", "num_tokens": 821}
|
# Return the index of the maximum entry of a given vector transformed by f.
Base.argmax(f::Any, v::AbsVec) = argmax(f, v, 1:length(v))
# Among specified indices, return the index of the maximum entry of a given vector
# transformed by f.
function Base.argmax(f::Any, v::AbsVec, indv::AbsVecInteger)
ind = 0 # return 0 if indv is empty
val = -Inf
for n = indv
if f(v[n]) ≥ val # ≥ rather than > to prevent returning 0 when v has -Inf
val = f(v[n])
ind = n
end
end
return ind
end
# Return the index of the minimum entry of a given vector transformed by f
Base.argmin(f::Any, v::AbsVec) = argmin(f, v, 1:length(v))
# Among specified indices, return the index of the minimum entry of a given vector
# transformed by f.
function Base.argmin(f::Any, v::AbsVec, indv::AbsVecInteger)
ind = 0 # return 0 if v is empty
val = Inf
for n = indv
if f(v[n]) ≤ val # ≤ rather than < to prevent returning 0 when v has -Inf
val = f(v[n])
ind = n
end
end
return ind
end
|
{"hexsha": "f3093db6640b4a3e4808222bdc1d85989ae2f4dd", "size": 1081, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/util.jl", "max_stars_repo_name": "wsshin/SALTBase.jl", "max_stars_repo_head_hexsha": "7e649196ebe80045e17a3227280011fb3fab1cb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-11-19T08:27:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-27T18:37:29.000Z", "max_issues_repo_path": "src/util.jl", "max_issues_repo_name": "wsshin/SALTBase.jl", "max_issues_repo_head_hexsha": "7e649196ebe80045e17a3227280011fb3fab1cb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-06T14:46:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T16:10:27.000Z", "max_forks_repo_path": "src/util.jl", "max_forks_repo_name": "wsshin/SALTBase.jl", "max_forks_repo_head_hexsha": "7e649196ebe80045e17a3227280011fb3fab1cb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2162162162, "max_line_length": 82, "alphanum_fraction": 0.6188714154, "num_tokens": 316}
|
import gc
import os
from argparse import Namespace
from timeit import default_timer as timer
from typing import Union
import numpy as np
import pandas as pd
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from termcolor import colored
from mimic import log
from mimic.run_epochs import run_epochs
from mimic.utils.exceptions import NaNInLatent, CudaOutOfMemory
from mimic.utils.experiment import MimicExperiment
from mimic.utils.filehandling import create_dir_structure, create_dir_structure_testing, get_config_path, \
get_method
from mimic.utils.flags import parser
from mimic.utils.flags import setup_flags
from mimic.utils.utils import get_gpu_memory
class Main:
def __init__(self, flags: Namespace, testing=False):
"""
config_path: (optional) path to the json config file
"""
flags = setup_flags(flags, testing)
flags = get_method(flags)
print(colored(f"running on {flags.device} with text {flags.text_encoding} encoding "
f'with method {flags.method}, batch size: {flags.batch_size} and img size {flags.img_size}, '
f'fixed_image_extractor: {flags.fixed_image_extractor}', 'blue'))
self.flags = create_dir_structure(flags)
# because of bad initialisation, the vae might return nan values. If this is the case it is best to restart the
# experiment.
self.max_tries = 10 # maximum restarts of the experiment due to nan values
self.current_tries = 0
self.start_time = 0
self.exp = None
def setup_distributed(self):
self.flags.world_size = torch.cuda.device_count()
log.info(f'setting up distributed computing with world size {self.flags.world_size}')
self.flags.distributed = self.flags.world_size > 1
self.flags.batch_size = int(self.flags.batch_size / self.flags.world_size)
def run_epochs(self) -> Union[bool, str]:
"""
Wrapper of mimic.run_epochs.run_epochs that checks if the workflow was completed and starts it over otherwise.
returns
bool: true if run_epochs finishes, False if an error occurs
string: "cuda_out_of_memory" if GPU runs out of memory
"""
print(colored(f'current free GPU memory: {get_gpu_memory()}', 'red'))
self.start_time = timer()
# need to reinitialize MimicExperiment after each retry
self.exp = MimicExperiment(self.flags)
create_dir_structure_testing(self.exp)
self.expnumber_restarts = self.current_tries
try:
if self.flags.distributed:
self.setup_distributed()
mp.spawn(run_epochs, nprocs=self.flags.world_size, args=(self.exp,), join=True)
else:
run_epochs(self.flags.device, self.exp)
except NaNInLatent as e:
print(e)
return False
except CudaOutOfMemory as e:
print(e)
return 'cuda_out_of_memory'
self.exp.update_experiments_dataframe({'experiment_duration': (timer() - self.start_time) // 60})
return True
def restart(self) -> None:
"""
Clears old dir_structure and creates new one, deletes corresponding row in the experiment dataframe.
"""
exp_df = pd.read_csv('experiments_dataframe.csv')
exp_df.drop(exp_df.index[exp_df['str_experiment'] == self.flags.str_experiment])
exp_df.to_csv('experiments_dataframe.csv', index=False)
if self.exp.tb_logger:
self.exp.tb_logger.writer.close()
if self.flags.distributed:
dist.destroy_process_group()
torch.cuda.empty_cache()
gc.collect()
command = f'rm -r {self.flags.dir_experiment_run}'
print(command)
os.system(command)
self.flags = create_dir_structure(self.flags)
def main(self):
"""
Runs "run_epochs" until it returns True. If "run_epochs" fails because of full GPU memory,
the batch size is reduced and the workflow is started again.
If during the training, the model returns NaNs, bad initialization is
assumed and the workflow is started again.
"""
success = False
while not success and self.current_tries < self.max_tries:
success = self.run_epochs()
if not success:
self.current_tries += 1
log.info(f'******** RESTARTING EXPERIMENT FOR THE {self.current_tries} TIME ********')
if success == 'cuda_out_of_memory':
old_bs = self.flags.batch_size
self.flags.batch_size = int(np.floor(self.flags.batch_size * 0.8))
log.info(f'******** GPU ran out of memory with batch size {old_bs}, '
f'trying again with batch size: {self.flags.batch_size} ********')
success = False
if not success:
self.restart()
if __name__ == '__main__':
FLAGS: Namespace = parser.parse_args()
FLAGS.config_path = get_config_path(FLAGS)
main = Main(FLAGS)
try:
main.main()
except KeyboardInterrupt:
import logging
log.info("Aborted. Bye-bye.")
logging.shutdown()
|
{"hexsha": "7b823463d44bcd38071e619783e3f837afd284a3", "size": 5288, "ext": "py", "lang": "Python", "max_stars_repo_path": "mimic/main_mimic.py", "max_stars_repo_name": "Jimmy2027/MoPoE-MIMIC", "max_stars_repo_head_hexsha": "d167719b0dc7ba002b7421eb82a83e47d2437795", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-30T07:56:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-30T07:56:46.000Z", "max_issues_repo_path": "mimic/main_mimic.py", "max_issues_repo_name": "Jimmy2027/MoPoE-MIMIC", "max_issues_repo_head_hexsha": "d167719b0dc7ba002b7421eb82a83e47d2437795", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mimic/main_mimic.py", "max_forks_repo_name": "Jimmy2027/MoPoE-MIMIC", "max_forks_repo_head_hexsha": "d167719b0dc7ba002b7421eb82a83e47d2437795", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3188405797, "max_line_length": 119, "alphanum_fraction": 0.6461800303, "include": true, "reason": "import numpy", "num_tokens": 1105}
|
#include <boost/circular_buffer.hpp>
#include <iostream>
#include <fstream>
#include <sstream>
#include <algorithm>
#include <range/v3/algorithm.hpp>
#include <range/v3/numeric.hpp>
#include <range/v3/view.hpp>
namespace views = ranges::views;
int main(int argc, char **argv)
{
if (argc > 1) {
std::ifstream ifs(argv[1]);
std::string line;
int n = 0;
while (std::getline(ifs, line)) {
std::istringstream iss(line);
std::istream_iterator<int> b(iss);
std::istream_iterator<int> e;
std::vector<int> v;
std::copy(b, e, std::back_inserter(v));
auto [pmin, pmax] = std::minmax_element(v.begin(), v.end());
n += (*pmax - *pmin);
}
std::cout << n << std::endl;
}
}
|
{"hexsha": "4e458759d26deebe795d4939bb6305e76105278c", "size": 728, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "aoc2017/aoc170201.cpp", "max_stars_repo_name": "jiayuehua/adventOfCode", "max_stars_repo_head_hexsha": "fd47ddefd286fe94db204a9850110f8d1d74d15b", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aoc2017/aoc170201.cpp", "max_issues_repo_name": "jiayuehua/adventOfCode", "max_issues_repo_head_hexsha": "fd47ddefd286fe94db204a9850110f8d1d74d15b", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aoc2017/aoc170201.cpp", "max_forks_repo_name": "jiayuehua/adventOfCode", "max_forks_repo_head_hexsha": "fd47ddefd286fe94db204a9850110f8d1d74d15b", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0, "max_line_length": 66, "alphanum_fraction": 0.614010989, "num_tokens": 208}
|
import re
import csv
import string
import numpy as np
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import WordPunctTokenizer
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
def ModelBuilding(sms_data,sms_labels):
"""
This is an example pipeline to building a text classifier.
1. sampling
2. TfidfVectorizer conversion
3. building a naive_bayes model
4. print the accuracy and other metrics
5. print most relevant features
"""
# sampling steps
train_set_size = int(round(len(sms_data)*0.70))
# i chose this threshold for 70:30 train and test split.
print('The training set size for this classifier is ' + str(train_set_size) + '\n')
x_train = np.array([''.join(el) for el in sms_data[0:train_set_size]])
y_train = np.array([el for el in sms_labels[0:train_set_size]])
x_test = np.array([''.join(el) for el in sms_data[train_set_size+1:len(sms_data)]])
y_test = np.array([el for el in sms_labels[train_set_size+1:len(sms_labels)]])
# We are building a TFIDF vectorizer here.
vectorizer = TfidfVectorizer(min_df=2, ngram_range=(1, 2), stop_words='english', strip_accents='unicode', norm='l2')
X_train = vectorizer.fit_transform(x_train)
X_test = vectorizer.transform(x_test)
# Naive Bayes.
clf = MultinomialNB().fit(X_train, y_train)
y_nb_predicted = clf.predict(X_test)
print(y_nb_predicted)
print(' \nConfusion_matrix:')
cm = confusion_matrix(y_test, y_nb_predicted)
print(cm)
print('\nHere is the classification report:')
print(classification_report(y_test, y_nb_predicted))
# print the top features
coefs = clf.coef_
coefs_with_fns = sorted(zip(coefs[0], vectorizer.get_feature_names()))
n = 10
top = zip(coefs_with_fns[:n], coefs_with_fns[:-(n + 1):-1])
for (coef_1, fn_1), (coef_2, fn_2) in top:
print('\t%.4f\t%-15s\t\t%.4f\t%-15s' % (coef_1, fn_1, coef_2, fn_2))
def PreProcessing(text):
# text = text.decode("utf8")
# remove punctuation
text = punctuation(text)
# remove extra spaces
text = re.sub(' +', ' ', text)
# tokenize into words
tokens = text.split(" ")
# remove number
tokens = [word for word in tokens if word.isalpha()]
# remove stopwords
stop = stopwords.words('english')
tokens = [token for token in tokens if token not in stop]
# remove words less than three letters
tokens = [word for word in tokens if len(word) >= 3]
# lower capitalization
tokens = [word.lower() for word in tokens]
# keep only real words
tokens = KeepRealWords(tokens)
# lemmatize
lmtzr = WordNetLemmatizer()
tokens = [lmtzr.lemmatize(word) for word in tokens]
# return only tokens with size over 1
if len(tokens) > 0:
preprocessed_text = " ".join(tokens)
return preprocessed_text
return None
def KeepRealWords(text):
wpt = WordPunctTokenizer()
only_recognized_words = []
for s in text:
tokens = wpt.tokenize(s)
if tokens: # check if empty string
for t in tokens:
if wordnet.synsets(t):
only_recognized_words.append(t) # only keep recognized words
return only_recognized_words
def punctuation(text):
translator = str.maketrans(string.punctuation, ' '*len(string.punctuation)) # map punctuation to space
return (text.translate(translator))
def main():
smsdata = open('corpus\SMSSpamCollection.txt', encoding="utf8") # check the structure of this file!
sms_data = []
sms_labels = []
csv_reader = csv.reader(smsdata,delimiter='\t')
for line in csv_reader:
sms_text = PreProcessing(line[1])
if ( sms_text != None):
# adding the sms_id
sms_labels.append( line[0])
# adding the cleaned text We are calling preprocessing method
sms_data.append(sms_text)
smsdata.close()
# we are calling the model building function here
ModelBuilding(sms_data,sms_labels)
if __name__ == '__main__':
main()
|
{"hexsha": "e18dafc0048a48d570760a126b94555c114b9962", "size": 4437, "ext": "py", "lang": "Python", "max_stars_repo_path": "NLP programmes in Python/8.Classification/classification.py", "max_stars_repo_name": "AlexandrosPlessias/NLP-Greek-Presentations", "max_stars_repo_head_hexsha": "4ae9d635a777f24bae5238b9f195bd17d00040ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "NLP programmes in Python/8.Classification/classification.py", "max_issues_repo_name": "AlexandrosPlessias/NLP-Greek-Presentations", "max_issues_repo_head_hexsha": "4ae9d635a777f24bae5238b9f195bd17d00040ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NLP programmes in Python/8.Classification/classification.py", "max_forks_repo_name": "AlexandrosPlessias/NLP-Greek-Presentations", "max_forks_repo_head_hexsha": "4ae9d635a777f24bae5238b9f195bd17d00040ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.027972028, "max_line_length": 124, "alphanum_fraction": 0.6556231688, "include": true, "reason": "import numpy", "num_tokens": 1128}
|
#!/usr/bin/env python3
import time
import argparse
import numpy as np
import gym
import gym_minigrid
from gym_minigrid.wrappers import *
from gym_minigrid.window import Window
def redraw(img):
if not args.agent_view:
img = env.render('rgb_array', tile_size=args.tile_size)
window.show_img(img)
def reset():
if args.seed != -1:
env.seed(args.seed)
obs = env.reset()
if hasattr(env, 'mission'):
print('Mission: %s' % env.mission)
window.set_caption(env.mission)
redraw(obs)
def step(action):
obs, reward, done, info = env.step(action)
print('step=%s, reward=%.2f' % (env.step_count, reward))
if done:
print('done!')
reset()
else:
redraw(obs)
def key_handler(event):
print('pressed', event.key)
if event.key == 'escape':
window.close()
return
if event.key == 'backspace':
reset()
return
if event.key == 'left':
step(env.actions.left)
return
if event.key == 'right':
step(env.actions.right)
return
if event.key == 'up':
step(env.actions.forward)
return
# Spacebar
if event.key == ' ':
step(env.actions.toggle)
return
if event.key == 'pageup':
step(env.actions.pickup)
return
if event.key == 'pagedown':
step(env.actions.drop)
return
if event.key == 'enter':
step(env.actions.done)
return
# Time Travel
if event.key == '1':
step(env.actions.timetravel_5)
return
# if event.key == '2':
# step(env.actions.timetravel_8)
# return
# if event.key == '3':
# step(env.actions.timetravel_10)
# return
parser = argparse.ArgumentParser()
parser.add_argument(
"--env",
help="gym environment to load",
default='MiniGrid-MultiRoom-N6-v0'
)
parser.add_argument(
"--seed",
type=int,
help="random seed to generate the environment with",
default=-1
)
parser.add_argument(
"--tile_size",
type=int,
help="size at which to render tiles",
default=32
)
parser.add_argument(
'--agent_view',
default=False,
help="draw the agent sees (partially observable view)",
action='store_true'
)
args = parser.parse_args()
env = gym.make(args.env)
if args.agent_view:
env = RGBImgPartialObsWrapper(env)
env = ImgObsWrapper(env)
window = Window('gym_minigrid - ' + args.env)
window.reg_key_handler(key_handler)
reset()
# Blocking event loop
window.show(block=True)
|
{"hexsha": "134fd71ba4e529544dbd99ecdb6a649ed2f7d2c5", "size": 2563, "ext": "py", "lang": "Python", "max_stars_repo_path": "manual_control.py", "max_stars_repo_name": "utnnproject/gym-minigrid", "max_stars_repo_head_hexsha": "9e8f9c12964dd36a3c940783d510be525a17e5a8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "manual_control.py", "max_issues_repo_name": "utnnproject/gym-minigrid", "max_issues_repo_head_hexsha": "9e8f9c12964dd36a3c940783d510be525a17e5a8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "manual_control.py", "max_forks_repo_name": "utnnproject/gym-minigrid", "max_forks_repo_head_hexsha": "9e8f9c12964dd36a3c940783d510be525a17e5a8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.3412698413, "max_line_length": 63, "alphanum_fraction": 0.6063207179, "include": true, "reason": "import numpy", "num_tokens": 634}
|
import numpy as np
import matplotlib.pyplot as plt
# set the data
x_data = np.linspace(0, 10)
y_data_1 = np.sin(x_data)
y_data_2 = np.cos(x_data)
y_data_3 = [i / 2 for i in y_data_1]
y_data_4 = [j / 2 for j in y_data_2]
# make the plot
ax1 = plt.subplot(2,3,1)
plt.plot(x_data, y_data_1)
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(2,3,2, sharey=ax1)
plt.plot(x_data, y_data_1)
plt.plot(x_data, y_data_2)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
ax3 = plt.subplot(1,3,3)
plt.plot(x_data)
ax4 = plt.subplot(2,3,4, sharex=ax1)
plt.plot(x_data, y_data_1)
plt.plot(x_data, y_data_2)
plt.plot(x_data, y_data_3)
ax5 = plt.subplot(2,3,5, sharex=ax2, sharey=ax4)
plt.plot(x_data, y_data_1)
plt.plot(x_data, y_data_2)
plt.plot(x_data, y_data_3)
plt.plot(x_data, y_data_4)
plt.setp(ax5.get_yticklabels(), visible=False)
plt.show()
|
{"hexsha": "8ad964cd01c819c8f213f80fb9f8c5f411aaaac9", "size": 903, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/numpy/test2.py", "max_stars_repo_name": "lcary/tmp", "max_stars_repo_head_hexsha": "1ea8e06bc25d13f5be6a0ac578d3302ee2134a77", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/numpy/test2.py", "max_issues_repo_name": "lcary/tmp", "max_issues_repo_head_hexsha": "1ea8e06bc25d13f5be6a0ac578d3302ee2134a77", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/numpy/test2.py", "max_forks_repo_name": "lcary/tmp", "max_forks_repo_head_hexsha": "1ea8e06bc25d13f5be6a0ac578d3302ee2134a77", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.1538461538, "max_line_length": 48, "alphanum_fraction": 0.7331118494, "include": true, "reason": "import numpy", "num_tokens": 329}
|
import math
import numpy as np
import pandas as pd
from scipy.special import expit
import torch
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def accuracy(y_pred, y_true, thresh):
outputs = y_pred.unsqueeze(4)
outputs = torch.chunk(outputs, 5, dim=3)
outputs = torch.cat(outputs, dim=4)
outputs = outputs.transpose(4, 3)
predconf = torch.sigmoid(outputs[..., 4])
ones = y_true[..., 4]
poz = torch.ge(predconf, thresh)
negz = torch.lt(predconf, thresh)
truez = torch.ge(ones, thresh)
falsez = torch.lt(ones, thresh)
tp = torch.sum(poz & truez)
fp = torch.sum(poz & falsez)
fn = torch.sum(negz & truez)
return tp, fp, fn
def pred_to_box(pred_in, filenmz, ankbox, thresh):
pred_in = pred_in.unsqueeze(4)
pred_in = torch.chunk(pred_in, 5, dim=3)
pred_in = torch.cat(pred_in, dim=4)
pred_in = pred_in.transpose(3, 4)
n_bat, boxsy, boxsx, ankz, vecsize = pred_in.shape
nclass = vecsize - 5
colnamez = ['filen', 'xc', 'yc', 'wid', 'hei', 'conf']
for cl in range(nclass):
clazz = 'class' + str(cl + 1)
colnamez.append(clazz)
boxes_out = pd.DataFrame(columns=colnamez)
confz = torch.sigmoid(pred_in[..., 4])
# print(torch.max(confz))
for bt in range(n_bat):
for by in range(boxsy):
for bx in range(boxsx):
for ak in range(ankz):
if confz[bt, by, bx, ak] > thresh:
xc_out = (expit(pred_in[bt, by, bx, ak, 0].tolist()) + bx) / boxsx
yc_out = (expit(pred_in[bt, by, bx, ak, 1].tolist()) + by) / boxsy
wid_out = np.exp(pred_in[bt, by, bx, ak, 2].tolist()) * ankbox[ak, 0] / boxsx
hei_out = np.exp(pred_in[bt, by, bx, ak, 3].tolist()) * ankbox[ak, 1] / boxsy
cnf_out = expit(pred_in[bt, by, bx, ak, 4].tolist())
clz_out = softmax(pred_in[bt, by, bx, ak, 5:].tolist())
vec_out = [xc_out, yc_out, wid_out, hei_out, cnf_out]
vec_out.extend(clz_out.tolist())
vec_out = np.reshape(vec_out, (1, vecsize))
vec_out = pd.DataFrame(vec_out, columns=colnamez[1:])
filenm_list = np.repeat(filenmz[bt], vec_out.shape[0])
vec_out.insert(0, 'filen', filenm_list)
# vec_out['filen'] = filenmz[bt]
boxes_out = boxes_out.append(vec_out)
return boxes_out
def calc_iou_centwh(box1, box2):
xmn1 = box1.xc - box1.wid / 2
xmx1 = box1.xc + box1.wid / 2
ymn1 = box1.yc - box1.hei / 2
ymx1 = box1.yc + box1.hei / 2
xmn2 = box2.xc - box2.wid / 2
xmx2 = box2.xc + box2.wid / 2
ymn2 = box2.yc - box2.hei / 2
ymx2 = box2.yc + box2.hei / 2
ol_xmn = max(xmn2, xmn1)
ol_xmx = min(xmx2, xmx1)
ol_ymn = max(ymn2, ymn1)
ol_ymx = min(ymx2, ymx1)
olx = max(ol_xmx - ol_xmn, 0)
oly = max(ol_ymx - ol_ymn, 0)
ol_area = olx * oly
bx1_area = box1.wid * box1.hei
bx2_area = box2.wid * box2.hei
iou = ol_area / (bx1_area + bx2_area - ol_area)
return iou
def accuracyiou(ypred, bndbxs, filenmz, ankbox, confthr, iouthr):
# convert net output to detections with bounding boxes
predbox = pred_to_box(ypred, filenmz, ankbox, confthr)
# predbox is dataframe with columns [filen, xc, yc, wid, hei, conf, class1]
# consider each image separately create df on first image then append others
# convert truths to numpy array
bndbxs = bndbxs.numpy()
# get truths for just first image
bndbxs_out = bndbxs[0, :, :]
# convert to data frame
bndbxs_out = pd.DataFrame(bndbxs_out, columns=["class", "xc", "yc", "wid", "hei"])
# add file name
bndbxs_out['filen'] = filenmz[0]
# repeat for rest of images in batch
for fl in range(1, bndbxs.shape[0]):
bndbx = bndbxs[fl, :, :]
bndbx = pd.DataFrame(bndbx, columns=["class", "xc", "yc", "wid", "hei"])
bndbx['filen'] = filenmz[fl]
bndbxs_out = bndbxs_out.append(bndbx)
# bndbxs_out is now a dataframe with truths for all images and has column headings
# ["class", "xc", "yc", "wid", "hei", "filen"]
iouz = []
bbxz = []
# for each detection
for pb in range(predbox.shape[0]):
iou_max = 0
bb_ind = math.nan
# for each truth
for bb in range(bndbxs_out.shape[0]):
# get bounding boxes of truth and detection
predb = predbox.iloc[pb]
bndb = bndbxs_out.iloc[bb]
# if the truth is an actual box as opposed to zero padding
if bndb.xc * bndb.yc > 0:
# check truth and detection are same image
if bndb.filen == predb.filen:
# calculate the iou of the truth and detection
iou = calc_iou_centwh(predb, bndb)
# if it is the most overlapping set new iou and index of truth
if iou > iou_max:
iou_max = iou
bb_ind = bb
# add iou and bound box index for that detection
iouz.append(iou_max)
bbxz.append(bb_ind)
# iouz should be the same length as number of predictions
# iouz should be zero if no overlaping prediction
# bbxz should be the same length as number of predictions
# bbxz should be nan if no overlapping prediction
# create tp all zero
tps = np.repeat(0, len(iouz))
# convert bbxz and iouz to numpy arrays
bbxz = np.array(bbxz)
iouz = np.array(iouz)
# set total true count to zero for batch
tot_true = 0
for img in range(bndbxs.shape[0]):
# find maximum number of boundboxes for that image
bbxz_img = bndbxs_out[bndbxs_out.filen == filenmz[img]]
bbxz_area = bbxz_img.xc * bbxz_img.yc
tot_bbx = np.sum(bbxz_area > 0)
tot_true += tot_bbx
# if there are any predictions
if predbox.shape[0] > 0:
# create filter so only consider predictions of one image
predz_mask = predbox.filen == filenmz[img]
predz_mask = np.array(predz_mask)
#print("pm", predz_mask.shape)
# find maximum number of boundboxes for that image
# bbxz_img = bndbxs_out[bndbxs_out.filen == filenmz[img]]
# bbxz_area = bbxz_img.xc * bbxz_img.yc
# tot_bbx = np.sum(bbxz_area > 0)
# print("total truths", tot_bbx)
# tot_true += tot_bbx
for bb in range(tot_bbx):
# create filter so only looking at detections that overlap this truth
bb_mask = bbxz == bb
# combine masks so only looking at detections for this truth and this image
fin_mask = np.logical_and(predz_mask, bb_mask)
# if there are detections that match this truth in this image
if np.sum(np.array(fin_mask, dtype=np.int32)) > 0:
# get maximum iou for this truth and this image
max_iou = np.max(iouz[fin_mask])
# check if greater than threshold
if max_iou > iouthr:
# find which detection has the maximum iou
maxiou_mask = np.logical_and(np.array(iouz == max_iou, dtype=np.int), fin_mask)
#print("mim", maxiou_mask.shape)
tps += maxiou_mask
tot_tps = np.sum(tps)
iouz = np.array(iouz)
tp_bool = np.array(tps, dtype=np.bool)
predbox["iou"] = iouz
tps = iouz > iouthr
predbox["tp"] = tp_bool
return predbox, tot_true, tot_tps
|
{"hexsha": "ade55143f79e11f2c69bad6db4ea407764131768", "size": 7841, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/yolo/yolo_accuracy.py", "max_stars_repo_name": "CMFell/phd_cnn_code", "max_stars_repo_head_hexsha": "cb343bc379f5b06241cead64089a41ae5a6fe167", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/yolo/yolo_accuracy.py", "max_issues_repo_name": "CMFell/phd_cnn_code", "max_issues_repo_head_hexsha": "cb343bc379f5b06241cead64089a41ae5a6fe167", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/yolo/yolo_accuracy.py", "max_forks_repo_name": "CMFell/phd_cnn_code", "max_forks_repo_head_hexsha": "cb343bc379f5b06241cead64089a41ae5a6fe167", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.205, "max_line_length": 103, "alphanum_fraction": 0.5733962505, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2330}
|
From Coq Require Import ZArith Reals Psatz.
From Coq Require Import Arith.Arith.
Require Import real_lemmas real_model.
From Coquelicot Require Import Coquelicot.
Set Bullet Behavior "Strict Subproofs".
Require Import Interval.Tactic.
Import Coq.Logic.FunctionalExtensionality.
Open Scope R_scope.
(* the function f is k times differentiable in the interval [a,b] *)
Definition k_differentiable f k a b:=
forall x, a <= x <= b -> forall n:nat, (n<=k)%nat -> ex_derive_n f n x
.
Definition smooth_fun (f: R -> R): Prop :=
forall (x: R) (n: nat),
ex_derive_n f n x
.
Definition dUdq x ω := ω ^ 2 * x.
(* the continuous system of equations for the simple harmonic oscillator *)
Definition Harmonic_oscillator_system (ω : R) (p q : R -> R) :=
smooth_fun p /\ smooth_fun q /\
forall t: R,
Derive_n q 1 t = p t /\
Derive_n p 1 t = - dUdq (q t) ω.
Lemma HOS_implies_k_diff:
forall p q ω t h,
Harmonic_oscillator_system ω p q ->
k_differentiable p 4 t (t + h) /\
k_differentiable q 3 t (t + h) .
Proof.
intros.
unfold Harmonic_oscillator_system in H.
destruct H as (C & D).
split; unfold smooth_fun in *;
unfold k_differentiable in *; intros.
-apply C.
-apply D.
Qed.
Lemma system_implies_cons_e_aux p q ω:
0 < ω ->
Harmonic_oscillator_system ω p q ->
forall t, Derive (fun t => p t ^ 2 + ω ^ 2 * q t ^ 2) t = 0.
Proof.
intros.
destruct H0 as (A & B & C).
rewrite Derive_plus.
-
rewrite Derive_pow.
rewrite Derive_scal.
rewrite Derive_pow.
simpl.
pose proof (C t) as Hp.
destruct Hp as (Hp1 & Hp2).
rewrite <- Hp1.
assert (- Derive_n p 1 t * 1/ ω ^ 2 = q t).
rewrite Hp2. unfold dUdq. field_simplify; try field; try nra.
rewrite <- H0.
replace (Derive_n q 1 t) with (Derive q t); auto.
replace (Derive_n p 1 t) with (Derive p t); auto.
field_simplify; try field; try nra.
all: (
specialize (A t 1%nat);
specialize (B t 1%nat);
try apply A; try apply B).
-
apply ex_derive_pow.
specialize (A t 1%nat);
specialize (B t 1%nat);
try apply A; try apply B.
-
apply ex_derive_scal.
apply ex_derive_pow.
specialize (A t 1%nat);
specialize (B t 1%nat);
try apply A; try apply B.
Qed.
Lemma system_implies_cons_e p q ω t0:
0 < ω ->
Harmonic_oscillator_system ω p q ->
forall t, (p t ^ 2 + ω ^ 2 * q t ^ 2) = (p t0 ^ 2 + ω ^ 2 * q t0 ^ 2).
Proof.
intros.
pose proof (Req_dec t t0) as Heq; destruct Heq.
-
destruct H0 as (A & B & C).
subst; auto.
-
pose proof system_implies_cons_e_aux p q ω H H0.
pose proof MVT_gen (fun t : R => p t ^ 2 + ω ^ 2 * q t ^ 2) t0 t
(Derive (fun t : R => p t ^ 2 + ω ^ 2 * q t ^ 2)) as MVT.
simpl in MVT.
simpl.
apply Rminus_diag_uniq.
replace (fun t : R => p t * (p t * 1) + ω * (ω * 1) * (q t * (q t * 1))) with
(fun t : R => (p t ^ 2 + (ω * q t) ^ 2 )) in MVT by (apply functional_extensionality => z; field; nra).
replace ( p t * (p t * 1) + ω * (ω * 1) * (q t * (q t * 1)) -
(p t0 * (p t0 * 1) + ω * (ω * 1) * (q t0 * (q t0 * 1))))
with
( p t ^ 2 + ω ^ 2 * q t ^ 2 - (p t0 ^ 2 + ω ^ 2 * q t0 ^ 2)) in MVT by field.
destruct H0 as (A & B & C).
assert (exists c : R,
Rmin t0 t <= c <= Rmax t0 t /\
p t ^ 2 + ω ^ 2 * q t ^ 2 - (p t0 ^ 2 + ω ^ 2 * q t0 ^ 2) =
Derive (fun t1 : R => p t1 ^ 2 + (ω * q t1) ^ 2) c * (t - t0)).
apply MVT.
intros.
apply Derive_correct.
apply (@ex_derive_plus R_AbsRing R_NormedModule);
apply ex_derive_pow.
specialize (A x 1%nat);
apply A.
apply ex_derive_scal.
specialize (B x 1%nat);
apply B.
intros.
apply derivable_continuous_pt.
apply ex_derive_Reals_0.
apply (@ex_derive_plus R_AbsRing R_NormedModule);
apply ex_derive_pow.
specialize (A x 1%nat); apply A.
apply ex_derive_scal.
specialize (B x 1%nat);
apply B.
destruct H0 as ( c & H3 & H4).
simpl in H4.
subst.
rewrite H4.
specialize (H2 c).
simpl in H2.
apply Rmult_eq_0_compat_r.
rewrite <- H2.
apply Derive_ext => ts; field.
Qed.
Lemma system_implies_cons_e' p q ω t0 t:
0 < ω ->
Harmonic_oscillator_system ω p q ->
∥ (p t, ω * q t) ∥ = ∥ (p t0, ω * q t0) ∥.
Proof.
intros.
pose proof (system_implies_cons_e _ _ _ t0 H H0 t).
unfold Rprod_norm. f_equal. simpl. lra.
Qed.
(* relating derivatives of the continuous system for future rewrites *)
Lemma Harm_sys_derive_eq p q ω:
0 < ω ->
Harmonic_oscillator_system ω p q ->
forall t,
Derive_n q 2 t = Derive_n p 1 t /\
Derive_n q 3 t = - ω^2 * p t /\
Derive_n p 2 t = Derive_n q 3 t /\
Derive_n p 3 t = ω^4 * q t /\
Derive_n p 4 t = ω^4 * p t.
Proof.
intros * Hω H t.
pose (t0 := t). clearbody t0.
generalize H; intros [_ [_ AB]].
destruct (AB t) as [A B].
assert (C :=system_implies_cons_e' _ _ _ t0 t Hω H).
clear AB.
do 2 apply proj2 in H.
assert (forall t, Derive_n q 2 t = Derive_n p 1 t).
- intros; replace (Derive_n q 2 t1) with
(Derive_n (Derive_n q 1) 1 t1) by auto.
apply Derive_n_ext; intros.
apply H; auto.
-
assert ((Derive_n (fun y : R => - dUdq (q y) ω) 1 t) =
(Derive_n (Derive_n q 1) 2 t )).
+
replace (Derive_n (Derive_n q 1) 2 t) with
(Derive_n (Derive_n q 2) 1 t) by auto.
symmetry.
apply Derive_n_ext. intros.
rewrite H0.
apply H.
+ split; auto; split.
* replace (Derive_n q 3 t) with
(Derive_n (fun y : R => - dUdq (q y) ω) 1 t).
rewrite <- A.
rewrite <- Ropp_mult_distr_l.
rewrite <- Derive_n_scal_l.
rewrite Derive_n_opp.
unfold dUdq; auto.
* split.
--
unfold dUdq in *.
replace (Derive_n q 3 t) with
(Derive_n (fun y : R => - dUdq (q y) ω) 1 t).
rewrite Coquelicot.Derive_nS.
replace (Derive q) with (Derive_n q 1); auto.
unfold dUdq.
apply Derive_n_ext. apply H.
-- split.
++
unfold dUdq in *.
replace ( ω ^ 4 * q t) with
( -ω ^ 2 *(-ω ^ 2 * q t)) by nra.
rewrite <- Ropp_mult_distr_l.
rewrite <- Ropp_mult_distr_l.
rewrite <- B.
replace (Derive_n p 3 t) with (Derive_n (Derive_n p 2) 1 t) by auto.
rewrite Ropp_mult_distr_l.
rewrite <- Derive_n_scal_l.
apply Derive_n_ext.
intros.
destruct (H t1) as ( J & K).
rewrite <- J.
replace (Derive_n p 2 t1) with (Derive_n (Derive_n p 1) 1 t1) by auto.
rewrite <- Derive_n_scal_l.
apply Derive_n_ext.
intros. specialize (H t2).
rewrite <- Ropp_mult_distr_l.
apply H.
++ rewrite <- A.
replace (Derive_n p 4 t) with
(Derive_n (Derive_n p 3) 1 t) by auto.
rewrite <- Derive_n_scal_l.
apply Derive_n_ext.
intros.
replace (ω ^ 4 * q t1) with
(- ω ^ 2 * Derive_n q 2 t1).
rewrite <- Derive_n_scal_l.
rewrite Coquelicot.Derive_nS.
apply Derive_n_ext.
intros.
replace (- ω ^ 2 * q t2) with
( Derive_n q 2 t2).
rewrite Coquelicot.Derive_nS.
replace (Derive p) with (Derive_n p 1); auto.
apply Derive_n_ext.
intros.
specialize (H t3).
symmetry; replace (Derive q t3) with (Derive_n q 1 t3) by auto.
apply H.
specialize (H0 t2).
rewrite H0.
specialize (H t2).
rewrite <- Ropp_mult_distr_l.
unfold dUdq in H; apply H.
specialize (H t1).
unfold dUdq in H.
replace (ω ^ 4 * q t1) with
( -ω ^ 2 *(-ω ^ 2 * q t1)) by nra.
destruct H as ( _ & K).
repeat rewrite <- Ropp_mult_distr_l.
rewrite <- K.
f_equal. f_equal.
apply H0.
Qed.
Close Scope R_scope.
|
{"author": "VeriNum", "repo": "VerifiedLeapfrog", "sha": "c8d07f86747bd9e44f4cb02f19a691cc895c1279", "save_path": "github-repos/coq/VeriNum-VerifiedLeapfrog", "path": "github-repos/coq/VeriNum-VerifiedLeapfrog/VerifiedLeapfrog-c8d07f86747bd9e44f4cb02f19a691cc895c1279/leapfrog_project/harmonic_oscillator_system.v"}
|
[STATEMENT]
lemma adjoint_add:
fixes A B :: "'a::conjugatable_field mat"
assumes "A \<in> carrier_mat n m" "B \<in> carrier_mat n m"
shows "adjoint (A + B) = adjoint A + adjoint B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. adjoint (A + B) = adjoint A + adjoint B
[PROOF STEP]
apply (rule eq_matI)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>i j. \<lbrakk>i < dim_row (adjoint A + adjoint B); j < dim_col (adjoint A + adjoint B)\<rbrakk> \<Longrightarrow> adjoint (A + B) $$ (i, j) = (adjoint A + adjoint B) $$ (i, j)
2. dim_row (adjoint (A + B)) = dim_row (adjoint A + adjoint B)
3. dim_col (adjoint (A + B)) = dim_col (adjoint A + adjoint B)
[PROOF STEP]
using assms conjugatable_ring_class.conjugate_dist_add
[PROOF STATE]
proof (prove)
using this:
A \<in> carrier_mat n m
B \<in> carrier_mat n m
conjugate (?a + ?b) = conjugate ?a + conjugate ?b
goal (3 subgoals):
1. \<And>i j. \<lbrakk>i < dim_row (adjoint A + adjoint B); j < dim_col (adjoint A + adjoint B)\<rbrakk> \<Longrightarrow> adjoint (A + B) $$ (i, j) = (adjoint A + adjoint B) $$ (i, j)
2. dim_row (adjoint (A + B)) = dim_row (adjoint A + adjoint B)
3. dim_col (adjoint (A + B)) = dim_col (adjoint A + adjoint B)
[PROOF STEP]
by( auto simp add: adjoint_eval)
|
{"llama_tokens": 540, "file": "QHLProver_Complex_Matrix", "length": 3}
|
\<^marker>\<open>creator "Maximilian P. L. Haslbeck"\<close>
theory ERT_Of_IID_Loop_Classic
imports PGCL_With_State IID_Loops
begin
text \<open>This theory is OBSOLETE!
It also tries to prove Theorem 4 from @{cite batzESOP18} and follows the paper
more closely than the prove in Prove_Rule.\<close>
subsection "Misc"
lemma sum_shift_index_Suc: "(\<Sum>i = 0..<n. f (Suc i)) = sum f {1..<Suc n} "
proof -
have "(sum (\<lambda>i. f (Suc i)) {0..<n})
= (sum ((\<lambda>i. f i) o (\<lambda>n. n+1)) {0..<n})"
by auto
also have "\<dots> = (sum ((\<lambda>i. f i)) ((\<lambda>n. n+1) ` {0..<n}))"
apply (rule sum.reindex[symmetric]) by auto
also have "... = (sum (\<lambda>i. f i) {1..<Suc n})" by simp
finally show ?thesis .
qed
lemma fixes f :: "nat \<Rightarrow> ennreal"
assumes "mono f"
shows SUP_shift_mono: "(\<Squnion>n. f (n+1) ) = (\<Squnion>n. f n)"
proof -
from assms have "mono (\<lambda>n. f (n+1))"
unfolding mono_def by auto
have *: "f ` UNIV = {f 0} \<union> (\<lambda>n. f (n+1)) ` UNIV"
apply (auto simp: image_iff) by (metis not0_implies_Suc)
have **: "Sup ((\<lambda>n. f (n+1)) ` UNIV) = (\<Squnion>n. f (n+1) )"
by simp
have k: "\<And>n. f 0 \<le> f (n+1)" using assms mono_def by auto
have "(\<Squnion>n. f n) = Sup ({f 0} \<union> (\<lambda>n. f (n+1)) ` UNIV)" apply(subst *) by simp
also have "\<dots> = Sup {f 0} \<squnion> Sup ((\<lambda>n. f (n+1)) ` UNIV)" by auto
also have "\<dots> = f 0 \<squnion> (\<Squnion>n. f (n+1) )" apply(subst **) by simp
also have "\<dots> = (\<Squnion>n. f (n+1) )" using k
by (meson Sup_upper2 UNIV_I antisym image_eqI le_sup_iff order_refl)
finally show ?thesis by simp
qed
lemma sum1: "suminf ((^) (1::ennreal)) = \<top>"
proof -
have i: "((^) (1::ennreal)) = (%_. 1)" by auto
have ii: "(%_. 1::ennreal) sums \<infinity> \<longleftrightarrow> (\<lambda>x. ennreal (real x)) \<longlonglongrightarrow> \<infinity>"
unfolding sums_def unfolding tendsto_def by (auto simp: ennreal_of_nat_eq_real_of_nat)
have iii: "(\<lambda>x. ennreal (real x)) \<longlonglongrightarrow> \<infinity>"
using ennreal_of_nat_eq_real_of_nat of_nat_tendsto_top_ennreal by auto
have *: "(%_. 1::ennreal) sums \<infinity>" using ii iii by simp
show ?thesis unfolding i using * sums_iff by auto
qed
lemma incseqmult: "incseq (\<lambda>i. f i) \<Longrightarrow> (c::ennreal)\<ge>0 \<Longrightarrow> incseq (\<lambda>i. c * f i)"
by (auto simp add: mult_left_mono mono_def)
lemma geometric_sums_ennreal: "(c::ennreal) < 1 \<Longrightarrow> (\<lambda>i. c ^ i) sums (1 / (1 - c))"
proof -
assume "(c::ennreal) < 1"
then obtain r where r: "c=ennreal r" "r\<ge>0" "r<1"
by (metis ennreal_cases ennreal_less_one_iff not_le top.extremum)
have 1: "(\<lambda>i. ennreal ((^) r i)) = (\<lambda>i. c ^ i)"
by (auto simp add: r(1) ennreal_power r(2))
have 2: "ennreal (1 / (1 - r)) = (1 / (1 - c))"
by (metis diff_gt_0_iff_gt divide_ennreal ennreal_1 ennreal_minus r(1) r(2) r(3) zero_le_one)
have R: "(^) r sums (1 / (1 - r)) "
apply(rule geometric_sums) using r by auto
have "(\<lambda>i. ennreal ((^) r i)) sums ennreal (1 / (1 - r))"
apply(subst sums_ennreal) using r R by auto
then show "(\<lambda>i. c ^ i) sums (1 / (1 - c))"
unfolding r 1 2 by simp
qed
lemma suminf_geometric_ennreal: "(c::ennreal) < 1 \<Longrightarrow> suminf (\<lambda>i. c ^ i) = (1 / (1 - c))"
by (rule sums_unique[symmetric]) (rule geometric_sums_ennreal)
subsection "Lemmas about Sup"
lemma fixes g ::exp and f :: "nat \<Rightarrow> exp"
shows SUP_mult_left_ennreal_fun: "(\<Squnion>i. g * (f i) ) = g * (\<Squnion>i. (f i))"
by(auto simp: SUP_mult_left_ennreal SUP_image)
lemma Sup_If:
fixes g :: "nat \<Rightarrow> exp" and f :: "exp"
assumes "A \<noteq> {}"
shows "(\<Squnion>n\<in>A. (\<lambda>s. if b s then g n s else f s)) = (\<lambda>s. if b s then (\<Squnion>n\<in>A. g n s) else f s)"
proof -
have "\<And>s. A \<noteq> {} \<Longrightarrow> (\<Squnion>n\<in>A. f s) = f s" by auto
have "(\<Squnion>n\<in>A. (\<lambda>s. if b s then g n s else f s)) = (\<lambda>s. (\<Squnion>n\<in>A. if b s then g n s else f s))"
by (auto simp: SUP_image)
also have "\<dots> = (\<lambda>s. if b s then (\<Squnion>n\<in>A. g n s) else f s)"
apply(rule ext) using assms by auto
finally show ?thesis .
qed
lemma wp_missing: "wp (compile C) (\<lambda>s. if b s then 1 else 0) s = 1
\<Longrightarrow> wp (compile C) (\<lambda>s. if b s then 0 else f s) s = 0"
(* proof in appendix A4. of Batz Paper argues via the semantics
of the program C
*) (*
proof (induct C arbitrary: s)
case (Assign x1 x2)
then show ?case sorry
next
case (Seq C1 C2)
have "wp (compile (C1;; C2)) (\<lambda>s. if b s then 1 else 0) s = 1" apply auto
sorry
have a: "\<And>x. wp (compile C2) (\<lambda>s. if b s then 1 else 0) x = 1"
proof (rule ccontr)
fix x
assume " wp (compile C2) (\<lambda>s. if b s then 1 else 0) x \<noteq> 1"
with wp_le1 have "wp (compile C2) (\<lambda>s. if b s then 1 else 0) x < 1"
by (simp add: order.not_eq_order_implies_strict)
with Seq(3) mono_wp have "False" (* no !*)
qed
have 2: "wp (compile C2) (\<lambda>s. if b s then 0 else f s) = 0"
apply(rule) apply simp apply(rule Seq(2)) by (fact a)
show ?case by(auto simp: 2 wp0)
next
case (While x1 C)
then show ?case sorry
qed auto *) sorry
subsection "Proof of the new Prove Rule"
lemma assumes "fiid C b f" and "n>0"
shows lemma2a: "
wp (compile C) (\<lambda>s. if b s
then wp (compile C) (\<lambda>s. if \<not>b s then f s else 0) s
* sum (\<lambda>i. (wp (compile C) (\<lambda>s. if b s then 1 else 0) s) ^ i) {0..<n-1}
else 0) =
wp (compile C) (\<lambda>s. if \<not>b s then f s else 0) * (%s. sum (\<lambda>i. (wp (compile C) (\<lambda>s. if b s then 1 else 0) s) ^ i) {1..<n}) "
(is "?L = ?R")
proof -
let ?W = "wp (compile C) (\<lambda>s. if b s then 1 else 0)"
let ?wnf = "wp (compile C) (\<lambda>s. if \<not>b s then f s else 0)"
{
fix s i (* here is the meat of it! *)
have t: "((\<lambda>s. if \<^bold>\<not> b s then 1 else 0) * f) = (\<lambda>s. if b s then 0 else f s)" by auto
from assms(1) have a: "unaffected ?W C"
and b: "unaffected (wp (compile C) (\<lambda>s. if b s then 0 else f s)) C"
unfolding fiid_def lift2_def t by auto
have 2: " (wp (compile C) (%s. if b s
then wp (compile C) (\<lambda>s. if \<not>b s then f s else 0) s * ((?W s) ^ i) else 0) ) s
= wp (compile C) (lift2 b * (\<lambda>s. wp (compile C) (\<lambda>s. if \<not>b s then f s else 0) s * ?W s ^ i)) s"
(is "?A = ?B")
by(simp add: lift2_fold)
also have "\<dots> = wp (compile C) ( (lift2 b * ?wnf) * (\<lambda>s. ?W s ^ i)) s"
proof -
have k: "\<And>i. (lift2 b * (\<lambda>s. ?wnf s * ?W s ^ i))
= ( (lift2 b * ?wnf) * (\<lambda>s. ?W s ^ i))" by (auto simp add: mult.assoc)
show ?thesis by(simp only: k)
qed
also have "\<dots> = (wp (compile C) (lift2 b * wp (compile C) (\<lambda>s. if \<not>b s then f s else 0)) *
(\<lambda>s. (wp (compile C) (\<lambda>s. if b s then 1 else 0) s) ^ i)) s"
by (simp add: a scale_unaffected_expectations_iter)
also have "\<dots> = (?W * wp (compile C) (\<lambda>s. if \<not>b s then f s else 0) * (\<lambda>s. (?W s) ^ i)) s"
proof -
(* second important step *)
have l: "(\<lambda>s. if \<not>b s then f s else 0) = (\<lambda>s. if b s then 0 else f s)" by auto
show ?thesis by(simp only: l lift2_def scale_unaffected_expectations_right[OF b])
qed
also have "\<dots> = (wp (compile C) (\<lambda>s. if \<not>b s then f s else 0) * ?W * (\<lambda>s. (?W s) ^ i)) s" by (auto simp add: mult.commute)
also have "\<dots> = (wp (compile C) (\<lambda>s. if \<not>b s then f s else 0) * (\<lambda>s. (?W s) ^ (i+1))) s" (is "_ = ?D")
by (auto simp add: mult.assoc)
finally have "?A=?D" .
} note 2=this
{ fix n :: nat and s
assume "n>0"
then obtain n' where n': "n-1 = n'" "n = Suc n'"
using Suc_pred' by blast
have "(\<Sum>i = 0..<n - 1. ?W s ^ (i + 1)) = sum ((^) (?W s)) {1..<n} "
apply(simp only: n' diff_Suc_1 Suc_eq_plus1[symmetric]) by(rule sum_shift_index_Suc)
} note 3=this
have "(\<lambda>s. if b s then ?wnf s * sum (\<lambda>i. ?W s ^ i) {0..<n-1} else 0)
= (\<lambda>s. sum (\<lambda>i. if b s then ?wnf s * ?W s ^ i else 0) {0..<n-1})" (is "_=?L'")
by (auto simp add: sum_distrib_left)
then
have "?L = wp (compile C) ?L'" by simp
also have "\<dots> = (\<lambda>s. sum (\<lambda>i. wp (compile C) (%s. if b s
then ?wnf s * ?W s ^ i else 0) s) {0..<n-1})"
by(rule wp_linearSum)
also have "\<dots> = (\<lambda>s. sum (\<lambda>i. (?wnf * (\<lambda>s. ?W s ^ (i+1)) ) s) {0..<n-1})"
by(simp only: 2)
also have "\<dots> = (\<lambda>s. ?wnf s * (sum (\<lambda>i. ?W s ^ (i+1)) {0..<n-1}))"
by(auto simp: sum_distrib_left)
also have "\<dots> = (\<lambda>s. ?wnf s * (sum (\<lambda>i. ?W s ^ i) {1..<n}))"
by (simp only: 3[OF assms(2)])
finally show ?thesis by auto
qed
lemma sum_extract: "n<m \<Longrightarrow> sum (g::nat\<Rightarrow>ennreal) {Suc n..<m} + g n = sum g {n..<m}"
apply(simp only: sum_head_upt_Suc) by auto
lemma lemma2: fixes C :: spgcl
assumes "fiid C b f"
shows "1\<le>n \<Longrightarrow>
((chara b C f) ^^ n) (\<bottom>::exp) =
(\<lambda>s. if b s
then wp (compile C) (\<lambda>s. if \<not>b s then f s else 0) s
* sum (\<lambda>i. (wp (compile C) (\<lambda>s. if b s then 1 else 0) s) ^ i) {0..<n-1}
else f s)"
proof(induction n rule: Nat.dec_induct)
case base
have t: "(\<lambda>a. 0) = (0::exp)" by auto
show ?case
by (auto simp add: chara_def bot0 t wp0 intro!: sum.empty)
next
case (step n)
let ?C = "chara b C f"
let ?W = "wp (compile C) (\<lambda>s. if b s then 1 else 0)"
let ?F = "wp (compile C) (\<lambda>s. if \<not> b s then f s else 0)"
let ?I = "%s. ?F s * sum ((^) (?W s)) {0..<n - 1}"
have i: "(\<lambda>s. if b s then ?I s else f s) = lift2 b * ?I + lift2 (\<lambda>s. \<not> b s) * f"
unfolding lift2_def by auto
have "(?C ^^ Suc n) \<bottom> = ?C ((?C ^^ n) \<bottom>)" by auto
also have "\<dots> = ?C ((\<lambda>s. if b s then ?I s else f s))" by(simp only: step)
also have "\<dots> = lift2 b * (wp (compile C) (lift2 b * ?I) + wp (compile C) (lift2 (\<lambda>s. \<not> b s) * f))
+ lift2 (\<lambda>s. \<not> b s) * f" (is "?D=?E") by (simp add: chara_alt i wp_linear')
also
have 1: "wp (compile C) (lift2 b * ?I) = wp (compile C) (\<lambda>s. if \<not> b s then f s else 0)
* (\<lambda>s. sum ((^) (wp (compile C) (\<lambda>s. if b s then 1 else 0) s)) {1..<n})" (is "_=?WI")
unfolding lift2_fold[symmetric] apply(rule lemma2a) using step assms by auto
have 2: "(wp (compile C) (lift2 (\<lambda>s. \<not> b s) * f))
* (\<lambda>s. (wp (compile C) (\<lambda>s. if b s then 1 else 0) s) ^ 0) = wp (compile C) (lift2 (\<lambda>s. \<not> b s) * f)"
by auto
have "?E = lift2 b * (?WI + (wp (compile C) (lift2 (\<lambda>s. \<not> b s) * f))
* (\<lambda>s. (wp (compile C) (\<lambda>s. if b s then 1 else 0) s) ^ 0)) +
lift2 (\<lambda>s. \<not> b s) * f" by (simp only: 1 2)
also have "\<dots> = lift2 b * (?F * ((\<lambda>s. sum ((^) (?W s)) {1..<n}) + (\<lambda>s. ?W s ^ 0)))
+ lift2 (\<lambda>s. \<not> b s) * f" (is "_=?g")
by (simp add: distrib_left lift2_fold[symmetric])
also
from step have z: "((\<lambda>s. sum ((^) (?W s)) {1..<n}) + (\<lambda>s. ?W s ^ 0)
= (\<lambda>s. sum ((^) (?W s)) {0..<n}))"
by (auto simp del: power.power_0 intro!: ext sum_extract)
have "?g = lift2 b * (?F * ((\<lambda>s. sum ((^) (?W s)) {0..<n}))) + lift2 (\<lambda>s. \<not> b s) * f"
by(simp only: z)
also have "\<dots> = (\<lambda>s. if b s then ?F s * sum ((^) (?W s)) {0..<Suc n - 1} else f s)"
by (auto simp: lift2_def)
finally show ?case .
qed
lemma assumes "fiid C b f"
shows thm3: (* weakest-preexpectation of f-iid loops *)
"wp (compile (While b C)) f =
(\<lambda>s. if b s
then (wp (compile C) (\<lambda>s. if b s then 0 else f s) s) / (1 - wp (compile C) (\<lambda>s. if b s then 1 else 0) s)
else f s)"
proof -
let ?I = "(wp (compile C) (\<lambda>s. if b s then 1 else 0))"
let ?Li = "(%n s. wp (compile C) (\<lambda>s. if \<not>b s then f s else 0) s
* sum (\<lambda>i. (?I s) ^ i) {0..<n-1})"
let ?L = "(%n. (\<lambda>s. if b s
then ?Li n s
else f s))"
have ttt: "{1..} = {x::nat. x>0}" by auto
have tt: "(\<lambda>i::nat. ((chara b C f) ^^ i) \<bottom>) ` {1..}
= ?L ` {1..}" using lemma2[OF assms] by force
have "(chara b C f ^^ 0) \<bottom> = 0" unfolding bot0 by simp
have t: "({0::nat}\<union>{1..}) = UNIV" by auto
have "wp (compile (While b C)) f
= (\<Squnion>i. ((chara b C f) ^^ i) \<bottom>)" using wp_sup by auto (* by definition 2 *)
also have "\<dots> = SUPREMUM ({0}\<union>{1..}) (\<lambda>i::nat. ((chara b C f) ^^ i) \<bottom>)"
unfolding t by auto
also have "\<dots> = (\<Squnion>i\<in>{1..}. (chara b C f ^^ i) \<bottom>)" by auto
also have "\<dots> = (\<Squnion>n\<in>{1..}. ?L n)" unfolding tt by simp
also have "\<dots> = (\<Squnion>n\<in>{x. 0 < x}. ?L n)" unfolding ttt by auto
also have "\<dots> = (\<lambda>s. if b s
then wp (compile C) (\<lambda>s. if \<not>b s then f s else 0) s
* suminf ((^) (wp (compile C) (\<lambda>s. if b s then 1 else 0) s))
else f s)" (* pull the limit inwards *)
proof -
have 1: "(\<Squnion>n\<in>{x. 0 < x}. ?L n) = (\<lambda>s. if b s then (\<Squnion>n\<in>{x. 0 < x}. ?Li n s) else f s)"
apply (rule Sup_If) by auto
have 2: "\<And>s. (\<Squnion>n\<in>{x. 0 < x}. ?Li n s) = wp (compile C) (\<lambda>s. if \<not>b s then f s else 0) s
* (\<Squnion>n\<in>{x. 0 < x}. sum (\<lambda>i. (?I s) ^ i) {0..<n-1})"
by (simp add: SUP_mult_left_ennreal)
{ fix s
have *: "(\<lambda>n. sum (\<lambda>i. (?I s) ^ i) {0..<n-1}) ` {x::nat. 0 < x}
= (\<lambda>n. sum (\<lambda>i. (?I s) ^ i) {0..<n}) ` UNIV"
using image_iff by fastforce
have t: "\<And>n. {0::nat..<n} = {..<n}" by auto
have "(\<Squnion>n\<in>{x. 0 < x}. sum (\<lambda>i. (?I s) ^ i) {0..<n-1})
= suminf (\<lambda>i. (?I s) ^ i)"
unfolding * by(simp only: t suminf_eq_SUP)
} note 3 = this
show ?thesis unfolding 1 2 3 by simp
qed
also have "\<dots> = (\<lambda>s. if b s
then (wp (compile C) (\<lambda>s. if b s then 0 else f s) s) / (1 - wp (compile C) (\<lambda>s. if b s then 1 else 0) s)
else f s)"
apply(rule ext) subgoal for s
proof (cases "b s")
case b: True
have kk: "(\<lambda>s. if \<not> b s then f s else 0) = (\<lambda>s. if b s then 0 else f s)" by auto
show ?thesis
proof (cases "?I s < 1")
case True (* use closed form of geometric series *)
have k: "(\<Sum>n. (?I s) ^ n) = 1 / (1 - ?I s)" using True
by (rule suminf_geometric_ennreal)
from b True show ?thesis by (auto simp: k kk ennreal_times_divide)
next
case False
with wp_le1 have i: "?I s = 1"
by (metis (mono_tags, lifting) eq_refl linorder_cases not_le zero_le_one)
then have n: "wp (compile C) (\<lambda>s. if b s then 0 else f s) s = 0"
by(rule wp_missing)
from b show ?thesis by (simp add: kk i n)
qed
qed simp
done
finally show ?thesis .
qed
lemma "(0::ennreal) / 0 = 0" by simp
lemma "(1::ennreal) / 0 = \<infinity>" by simp
lemma lemma4:
assumes
"fiid C b 0"
and
"wp (compile C) 1 = 1"
and
"unaffected (ert (compile C) 0) C"
shows
"n\<ge>1 \<Longrightarrow> ((charaErt b C 0) ^^ n) (\<bottom>::exp) = (\<lambda>s. 1 + (if b s then ert (compile C) 0 s * sum (\<lambda>i. wp (compile C) (lift2 b) s ^ i) {0..<n}
+ sum (\<lambda>i. wp (compile C) (lift2 b) s ^ i) {0..<n-1}
else 0))"
proof(induction n rule: Nat.dec_induct)
case base
have t: "(\<lambda>a. 0) = (0::exp)" by auto
show ?case
by (auto simp add: charaErt_def t bot0)
next
case (step n)
let ?C = "charaErt b C 0"
let ?W = "wp (compile C) (lift2 b)"
let ?E= "ert (compile C) 0"
let ?I = "(\<lambda>s. 1 + (if b s then ?E s * sum ((^) (?W s)) {0..<n} + sum ((^) (?W s)) {0..<n- 1} else 0))"
have "unaffected (wp (compile C) (\<lambda>s. if b s then 1 else 0)) C"
using assms by (simp add: lift2_def fiid_def)
then have pullout: "\<And>c s i. wp (compile C) (c * (%s. (?W s) ^ i)) s
= (wp (compile C) c * (%s. (?W s) ^ i)) s"
by (auto simp: scale_unaffected_expectations_iter lift2_def)
{ fix n
have 1: "lift2 b * ?E * (\<lambda>s. sum ((^) (?W s)) {0..<n})
= (\<lambda>s. sum (\<lambda>i. lift2 b s * ?E s * (?W s) ^ i) {0..<n})"
by (auto simp: sum_distrib_left)
have 2: "\<And>i. (%s. lift2 b s * ?E s * (?W s) ^ i) = ((lift2 b * ?E) * (%s. (?W s) ^ i))"
by (auto simp add: mult.assoc)
have 3: "wp (compile C) (lift2 b * ?E) = wp (compile C) (lift2 b) * ?E"
apply(rule scale_unaffected_expectations_right) by (fact assms(3))
have 4: "\<And>s. (\<lambda>i. (wp (compile C) (lift2 b) * ?E * (%s. (?W s) ^ i)) s)
= (\<lambda>i. ?E s * ((%s. (?W s) ^ Suc i)) s)"
by (auto simp add: mult.commute mult.left_commute)
have "wp (compile C) (lift2 b * ?E * (\<lambda>s. sum ((^) (?W s)) {0..<n}))
= wp (compile C) (\<lambda>s. sum (\<lambda>i. lift2 b s * ?E s * (?W s) ^ i) {0..<n})"
by(simp only: 1)
also have "\<dots> = (%s. sum (\<lambda>i. wp (compile C) (%s. lift2 b s * ?E s * (?W s) ^ i) s) {0..<n})"
by(rule wp_linearSum)
also have "\<dots> = (%s. sum (\<lambda>i. wp (compile C) ((lift2 b * ?E) * (%s. (?W s) ^ i)) s) {0..<n})"
by(simp only: 2)
also have "\<dots> = (%s. sum (\<lambda>i. (wp (compile C) (lift2 b * ?E) * (%s. (?W s) ^ i)) s) {0..<n})"
by(simp only: pullout)
also have "\<dots> = (%s. sum (\<lambda>i. (wp (compile C) (lift2 b) * ?E * (%s. (?W s) ^ i)) s) {0..<n})"
by(simp only: 3)
also have "\<dots> = (%s. sum (\<lambda>i. ?E s * ((%s. (?W s) ^ Suc i)) s) {0..<n})"
by(simp only: 4)
also have "\<dots> = ?E * (\<lambda>s. sum (\<lambda>i. (?W s) ^ Suc i) {0..<n} )"
by(auto simp add: sum_distrib_left)
also have "\<dots> = ?E * (\<lambda>s. sum (\<lambda>i. (?W s) ^ i) {1..<Suc n} )"
by (simp only: sum_shift_index_Suc)
finally have "wp (compile C) (lift2 b * ?E * (\<lambda>s. sum ((^) (?W s)) {0..<n}))
= ?E * (%s. sum ((^) (?W s)) {1..<Suc n})" .
} note eq6=this
{ fix n
have 1: "(lift2 b * (\<lambda>s. sum ((^) (?W s)) {0..<n}))
= ((\<lambda>s. sum (\<lambda>i. lift2 b s * (?W s) ^ i) {0..<n}))"
by (auto simp: sum_distrib_left)
have 2: "\<And>s. (\<lambda>i. (?W * (%s. (?W s) ^ i)) s) = (\<lambda>i. (?W s) ^ Suc i)"
apply rule by auto
have "wp (compile C) (lift2 b * (\<lambda>s. sum ((^) (?W s)) {0..<n}))
= wp (compile C) ((\<lambda>s. sum (\<lambda>i. lift2 b s * (?W s) ^ i) {0..<n}))"
by(simp only: 1)
also have "\<dots> = (%s. sum (\<lambda>i. wp (compile C) (%s. lift2 b s * (?W s) ^ i) s) {0..<n})"
by(rule wp_linearSum)
also have "\<dots> = (%s. sum (\<lambda>i. wp (compile C) (lift2 b * (\<lambda>s. (?W s) ^ i)) s) {0..<n})"
by (auto simp add: times_fun_def)
also have "\<dots> = (%s. sum (\<lambda>i. (?W * (%s. (?W s) ^ i)) s) {0..<n})"
by (simp only: pullout)
also have "\<dots> = (%s. sum (\<lambda>i. (?W s) ^ Suc i) {0..<n})"
by (simp only: 2)
also have "\<dots> = (%s. sum (\<lambda>i. (?W s) ^ i) {1..<Suc n})"
by (simp only: sum_shift_index_Suc)
finally have "wp (compile C) (lift2 b * (\<lambda>s. sum ((^) (?W s)) {0..<n}))
= (%s. sum ((^) (?W s)) {1..<Suc n})" .
} note eq7=this
have "(?C ^^ Suc n) \<bottom> = ?C ((?C ^^ n) \<bottom>)" by auto
also have "\<dots> = ?C ?I" by(simp only: step)
also have "\<dots> = 1 + (lift2 b) * ert (compile C) ?I"
unfolding charaErt_alt by simp
also have "\<dots> = 1 + (lift2 b) * (ert (compile C) 0 + wp (compile C) ?I)"
using decompose_ert by metis (* paper has different order of the last two steps, but does not matter! *)
also have "\<dots> = 1 + lift2 b * (ert (compile C) 0
+ wp (compile C) (\<lambda>s. 1 + (lift2 b * (\<lambda>s. ert (compile C) 0 s * sum ((^) (wp (compile C) (lift2 b) s)) {0..<n}
+ sum ((^) (wp (compile C) (lift2 b) s)) {0..<n - 1})) s))"
unfolding lift2_def apply clarsimp
proof -
have "1 + (\<lambda>f. if b f then 1 else 0) * (ert (compile C) 0 + wp (compile C) (\<lambda>f. 1 + (if b f then ert (compile C) 0 f * sum ((^) (wp (compile C) (\<lambda>f. if b f then 1 else 0) f)) {0..<n} + sum ((^) (wp (compile C) (\<lambda>f. if b f then 1 else 0) f)) {0..<n - 1} else 0))) = 1 + (\<lambda>f. if b f then 1 else 0) * (ert (compile C) 0 + wp (compile C) (\<lambda>f. 1 + (if b f then 1 else 0) * (ert (compile C) 0 f * sum ((^) (wp (compile C) (\<lambda>f. if b f then 1 else 0) f)) {0..<n} + sum ((^) (wp (compile C) (\<lambda>f. if b f then 1 else 0) f)) {0..<n - Suc 0}))) \<or> (\<forall>f. 1 + (if b f then ert (compile C) 0 f * sum ((^) (wp (compile C) (\<lambda>f. if b f then 1 else 0) f)) {0..<n} + sum ((^) (wp (compile C) (\<lambda>f. if b f then 1 else 0) f)) {0..<n - 1} else 0) = 1 + (if b f then 1 else 0) * (ert (compile C) 0 f * sum ((^) (wp (compile C) (\<lambda>f. if b f then 1 else 0) f)) {0..<n} + sum ((^) (wp (compile C) (\<lambda>f. if b f then 1 else 0) f)) {0..<n - Suc 0}))"
by simp
then show "1 + (\<lambda>f. if b f then 1 else 0) * (ert (compile C) 0 + wp (compile C) (\<lambda>f. 1 + (if b f then ert (compile C) 0 f * sum ((^) (wp (compile C) (\<lambda>f. if b f then 1 else 0) f)) {0..<n} + sum ((^) (wp (compile C) (\<lambda>f. if b f then 1 else 0) f)) {0..<n - 1} else 0))) = 1 + (\<lambda>f. if b f then 1 else 0) * (ert (compile C) 0 + wp (compile C) (\<lambda>f. 1 + (if b f then 1 else 0) * (ert (compile C) 0 f * sum ((^) (wp (compile C) (\<lambda>f. if b f then 1 else 0) f)) {0..<n} + sum ((^) (wp (compile C) (\<lambda>f. if b f then 1 else 0) f)) {0..<n - Suc 0})))"
by presburger
qed (*refine *)
also have "\<dots> = 1 + lift2 b * (ert (compile C) 0 + wp (compile C) (\<lambda>s. 1 + (lift2 b * (\<lambda>s. ert (compile C) 0 s * sum ((^) (wp (compile C) (lift2 b) s)) {0..<n}) + lift2 b * (%s. sum ((^) (wp (compile C) (lift2 b) s)) {0..<n - 1})) s))"
by (simp add: distrib_left)
also
let ?A = "lift2 b * ert (compile C) 0 * (%s. sum ((^) (wp (compile C) (lift2 b) s)) {0..<n})"
let ?B = "(lift2 b * (%s. sum ((^) (wp (compile C) (lift2 b) s)) {0..<n - 1}))"
have "\<dots> = 1 + lift2 b * (ert (compile C) 0 + wp (compile C) ((\<lambda>s. 1) + ?A + ?B))" (is "_= 1 + lift2 b * ?R")
proof -
have t: "(lift2 b * (\<lambda>s. ert (compile C) 0 s * sum ((^) (?W s)) {0..<n}) + lift2 b * (\<lambda>s. sum ((^) (?W s)) {0..<n - 1}))
= (lift2 b * ert (compile C) 0 * (\<lambda>s. sum ((^) (?W s)) {0..<n}) + lift2 b * (\<lambda>s. sum ((^) (?W s)) {0..<n - 1}))"
apply(rule) by (simp add: mult.assoc)
have tt: "(\<lambda>s. 1 + (lift2 b * (\<lambda>s. ert (compile C) 0 s * sum ((^) (?W s)) {0..<n}) + lift2 b * (\<lambda>s. sum ((^) (?W s)) {0..<n - 1})) s)
= ((\<lambda>s. 1) + lift2 b * ert (compile C) 0 * (\<lambda>s. sum ((^) (?W s)) {0..<n}) + lift2 b * (\<lambda>s. sum ((^) (?W s)) {0..<n - 1}))"
apply(simp only: t) by auto
show ?thesis by(simp only: tt)
qed
also
{ have "wp (compile C) ((\<lambda>s. 1) + ?A + ?B)
= wp (compile C) 1 + wp (compile C) ?A + wp (compile C) ?B"
apply (simp only: wp_linear') by (metis one_fun_apply) \<comment> linearity
also have "\<dots> = 1 + ?E * (%s. sum ((^) (?W s)) {1..<Suc n}) + (%s. sum ((^) (?W s)) {1..<n})"
apply(simp only: assms(2) eq6 eq7) using step by simp
finally have k: "wp (compile C) ((\<lambda>s. 1) + ?A + ?B) = 1 + ?E * (%s. sum ((^) (?W s)) {1..<Suc n}) + (%s. sum ((^) (?W s)) {1..<n})"
.
have "?R =
(?E + ?E * (%s. sum ((^) (?W s)) {1..<Suc n})) + (1 + (%s. sum ((^) (?W s)) {1..<n}))"
apply(simp only: k) by auto
also have "\<dots> = (?E * (1 + (%s. sum ((^) (?W s)) {Suc 0..<Suc n}))) + (1 + (%s. sum ((^) (?W s)) {Suc 0..<n}))"
by (simp add: distrib_left)
also have "\<dots> = (?E * ((%s. ?W s ^ 0 + sum ((^) (?W s)) {Suc 0..<Suc n}))) + ((%s. ?W s ^ 0 + sum ((^) (?W s)) {Suc 0..<n}))"
by auto
also have "\<dots> = (?E * ((%s. sum ((^) (?W s)) {0..<Suc n}))) + ((%s. sum ((^) (?W s)) {0..<n}))"
using step by(simp only: sum_head_upt_Suc[symmetric])
finally have "?R = (?E * ((%s. sum ((^) (?W s)) {0..<Suc n}))) + ((%s. sum ((^) (?W s)) {0..<n}))" .
} note k=this
have "1 + lift2 b * ?R = 1 + lift2 b * ((?E * ((%s. sum ((^) (?W s)) {0..<Suc n}))) + ((%s. sum ((^) (?W s)) {0..<n})))"
by(simp only: k)
also have "\<dots> = (\<lambda>s. 1 + (if b s then ert (compile C) 0 s * sum ((^) (wp (compile C) (lift2 b) s)) {0..<Suc n} + sum ((^) (wp (compile C) (lift2 b) s)) {0..<Suc n - 1} else 0))"
by (auto simp: lift2_def)
finally show ?case .
qed
lemma hauptnenner: "(eb::ennreal) / ea + e / ea = (eb + e) / ea"
by (metis (no_types) comm_semiring_class.distrib ennreal_times_divide mult.right_neutral)
theorem thm4: assumes
"fiid C b f" and
"wp (compile C) 1 = 1" (* loop body terminates almost-surely *)
"unaffected (ert (compile C) 0) C"
shows "ert (compile (While b C)) f = (%s. 1 + (if b s then (1 + ert (compile C) (\<lambda>s. if b s then 0 else f s) s)
/
(1- (wp (compile C) (\<lambda>s. if b s then 1 else 0)) s)
else f s))" (is "?E f = ?R f")
proof -
have k: "(\<lambda>s. if b s then 0 else 0 s) = 0" "\<And>a. 0 a = 0" by auto
have 2: "fiid C b 0"
apply (auto simp: unaffected_def Vars_def fiid_def k wp0)
using assms(1) fiid_def unaffaccted_fun_upd by fastforce
let ?I = "(wp (compile C) (\<lambda>s. if b s then 1 else 0))"
have 1: "?E 0 = ?R 0"
proof -
(* analogous to thm3 *)
let ?L = "(\<lambda>n s. 1 + (if b s then ert (compile C) 0 s * sum (\<lambda>i. wp (compile C) (lift2 b) s ^ i) {0..<n}
+ sum (\<lambda>i. wp (compile C) (lift2 b) s ^ i) {0..<n-1}
else 0))"
have t: "({0::nat}\<union>{1..}) = UNIV" by auto
have tt: "(\<lambda>i::nat. ((charaErt b C 0) ^^ i) \<bottom>) ` {1..}
= ?L ` {1..}" using lemma4[OF 2 assms(2,3)] by force
have ttt: "{1..} = {x::nat. x>0}" by auto
thm ert_sup
have "ert (compile (While b C)) 0 = (\<Squnion>i. (charaErt b C 0 ^^ i) \<bottom>)" apply(subst ert_sup) by auto
also have "\<dots> = SUPREMUM ({0}\<union>{1..}) (\<lambda>i::nat. ((charaErt b C 0) ^^ i) \<bottom>)"
unfolding t by auto
also have "\<dots> = (\<Squnion>i\<in>{1..}. ((charaErt b C 0) ^^ i) \<bottom>)"
by auto
also have "\<dots> = (\<Squnion>n\<in>{1..}. ?L n)" unfolding tt by simp
also have "\<dots> = (\<Squnion>n\<in>{x. 0 < x}. ?L n)" unfolding ttt by auto
also have "\<dots> = (\<lambda>s. 1 + (if b s then ert (compile C) 0 s * suminf (\<lambda>i. ?I s ^ i)
+ suminf (\<lambda>i. ?I s ^ i)
else 0))" (is "(\<Squnion>n\<in>{x. 0 < x}. ?f n) = _")
proof -
(* pull the limit inwards *)
let ?W = "wp (compile C) (lift2 b)"
let ?if = "(\<lambda>n s. 1 + ert (compile C) 0 s * sum ((^) (?W s)) {0..<n} + sum ((^) (?W s)) {0..<n-1})"
let ?f' = "(%n. (\<lambda>s. (if b s then ?if n s else 1)))"
have 1: "\<And>n. ?f n = ?f' n"
by auto
have 2: "(\<Squnion>n\<in>{x. 0 < x}. ?f' n) = (\<lambda>s. if b s then (\<Squnion>n\<in>{x. 0 < x}. ?if n s) else 1)"
apply (rule Sup_If) by auto
{ fix s
have "(\<Squnion>n\<in>{x. 0 < x}. ?if n s) = (\<Squnion>n\<in>{x. 0 < x}. 1 + (ert (compile C) 0 s * sum ((^) (wp (compile C) (lift2 b) s)) {0..<n} + sum ((^) (wp (compile C) (lift2 b) s)) {0..<n -1}))"
by (simp add: add.assoc)
also have "\<dots> = 1 + (\<Squnion>n\<in>{x. 0 < x}. (ert (compile C) 0 s * sum ((^) (wp (compile C) (lift2 b) s)) {0..<n} + sum ((^) (wp (compile C) (lift2 b) s)) {0..<n - 1}))"
apply(subst ennreal_SUP_add_right) by auto
finally have "(\<Squnion>n\<in>{x. 0 < x}. ?if n s) = 1 + (\<Squnion>n\<in>{x. 0 < x}. (ert (compile C) 0 s * sum ((^) (wp (compile C) (lift2 b) s)) {0..<n} + sum ((^) (wp (compile C) (lift2 b) s)) {0..<n - 1}))" .
} note 3=this
{ fix s
{
fix f :: "nat \<Rightarrow> ennreal"
have "f ` {x. 0 < x} = (f o Suc) ` UNIV"
apply(rule) apply (auto simp: image_iff) by (metis Suc_pred')
} note k=this
have t: "(\<lambda>n. (ert (compile C) 0 s * sum ((^) (wp (compile C) (lift2 b) s)) {0..<n} + sum ((^) (wp (compile C) (lift2 b) s)) {0..<n - 1})) ` {x. 0 < x}
= (\<lambda>n. (ert (compile C) 0 s * sum ((^) (wp (compile C) (lift2 b) s)) {0..<n+1} + sum ((^) (wp (compile C) (lift2 b) s)) {0..<n})) `UNIV"
apply(subst k) by auto
have "(\<Squnion>n\<in>{x. 0 < x}. (ert (compile C) 0 s * sum ((^) (wp (compile C) (lift2 b) s)) {0..<n} + sum ((^) (wp (compile C) (lift2 b) s)) {0..<n - 1}))
= (\<Squnion>n\<in>UNIV. (ert (compile C) 0 s * sum ((^) (wp (compile C) (lift2 b) s)) {0..<n+1} + sum ((^) (wp (compile C) (lift2 b) s)) {0..<n}))"
by(simp only: t)
} note 4=this
{ fix s
have n: "\<And>x::nat. {0..<x} = {..<x}" by auto
have i: "(\<Squnion>n. ert (compile C) 0 s * sum ((^) (wp (compile C) (\<lambda>s. if b s then 1 else 0) s)) {..<n + 1})
= ert (compile C) 0 s * (\<Squnion>n. sum ((^) (wp (compile C) (\<lambda>s. if b s then 1 else 0) s)) {..<n + 1})"
by (simp only: SUP_mult_left_ennreal)
have ii: "(\<Squnion>n. sum ((^) (wp (compile C) (\<lambda>s. if b s then 1 else 0) s)) {..<n + 1})
= (\<Squnion>n. sum ((^) (wp (compile C) (\<lambda>s. if b s then 1 else 0) s)) {..<n})"
apply(rule SUP_shift_mono) by (auto intro: incseq_SucI)
have " (\<Squnion>n. ert (compile C) 0 s * sum ((^) (wp (compile C) (lift2 b) s)) {0..<n + 1} +
sum ((^) (wp (compile C) (lift2 b) s)) {0..<n})
= (ert (compile C) 0 s * suminf ((^) (wp (compile C) (\<lambda>s. if b s then 1 else 0) s)) +
suminf ((^) (wp (compile C) (\<lambda>s. if b s then 1 else 0) s)))"
apply(subst ennreal_SUP_add)
subgoal by(auto simp: incseq_SucI intro: incseqmult)
subgoal by(auto simp: n incseq_sumI)
by(simp only: i suminf_eq_SUP lift2_def n ii)
} note 5=this
show ?thesis unfolding 1 2 3 4 5 by auto
qed
also have "\<dots> = (%s. 1 + (if b s then (1 + ert (compile C) 0 s) / (1- ?I s) else 0))"
apply(rule ext)
subgoal for s
proof (cases "b s")
case True
note b=this
have kk: "(\<lambda>s. if \<not> b s then f s else 0) = (\<lambda>s. if b s then 0 else f s)" by auto
term ?I
show ?thesis
proof (cases "?I s < 1")
case True
have geom_series: "(\<Sum>n. (?I s) ^ n) = 1 / (1 - ?I s)" using True
by (rule suminf_geometric_ennreal)
show ?thesis unfolding geom_series using True
apply (simp add: ennreal_times_divide)
by (metis (no_types, lifting) add.commute comm_semiring_class.distrib ennreal_times_divide mult.right_neutral)
next
case False
with wp_le1 have 1: "?I s = 1"
by (metis (mono_tags, lifting) eq_refl linorder_cases not_le zero_le_one)
show ?thesis by(simp add: 1 b sum1)
qed
next
case False then show ?thesis by auto
qed
done
finally show ?thesis unfolding k(2) by auto
qed
let ?I = "(wp (compile C) (\<lambda>s. if b s then 1 else 0))"
let ?F = "wp (compile C) (\<lambda>s. if b s then 0 else f s)"
{ fix s
have "(1 + ert (compile C) 0 s) / (1- ?I s) + ?F s / (1 - ?I s)
= (1 + ert (compile C) 0 s + ?F s) / (1 - ?I s)"
proof (cases "?I s < 1")
case True
then show ?thesis using hauptnenner by blast
next
case False
with wp_le1 have "?I s = 1"
by (metis (mono_tags, lifting) eq_refl linorder_cases not_le zero_le_one)
then show ?thesis by auto
qed
} note l=this
have "?E f = ?E 0 + wp (compile (While b C)) f" by(rule decompose_ert)
also have "\<dots> = ?R 0
+ (\<lambda>s. if b s then wp (compile C) (\<lambda>s. if b s then 0 else f s) s / (1 - wp (compile C) (\<lambda>s. if b s then 1 else 0) s) else f s)"
by(simp only: 1 thm3[OF assms(1)])
also
have "\<dots> = (%s. 1 + (if b s then (1 + ert (compile C) 0 s) / (1- ?I s)
+ ?F s / (1 - ?I s)
else f s))" unfolding k(2) by auto
also have "\<dots> = (%s. 1 + (if b s then (1 + ert (compile C) 0 s + ?F s) / (1 - ?I s)
else f s))"
by (auto simp add: l)
also have "\<dots> = ?R f" apply(rule ext) apply simp
using decompose_ert by (metis (no_types, lifting) add.assoc plus_fun_apply)
finally show ?thesis .
qed
end
|
{"author": "maxhaslbeck", "repo": "verERT", "sha": "193188292620a60005e528a78247323eb53084bc", "save_path": "github-repos/isabelle/maxhaslbeck-verERT", "path": "github-repos/isabelle/maxhaslbeck-verERT/verERT-193188292620a60005e528a78247323eb53084bc/ERT_Of_IID_Loop_Classic.thy"}
|
using Rubin
using Tests
using Elliptic
using HypergeometricFunctions
using Polylogarithms
using SpecialFunctions
@test integrate((c+d*x)^4*sin(a+b*x), x) == :(-1*b^-1*(c+d*x)^4*cos(a+b*x)+-24*b^-5*d^4*cos(a+b*x)+-24*b^-4*d^3*(c+d*x)*sin(a+b*x)+4*d*b^-2*(c+d*x)^3*sin(a+b*x)+12*b^-3*d^2*(c+d*x)^2*cos(a+b*x))
@test integrate((c+d*x)^3*sin(a+b*x), x) == :(-1*b^-1*(c+d*x)^3*cos(a+b*x)+-6*b^-4*d^3*sin(a+b*x)+3*d*b^-2*(c+d*x)^2*sin(a+b*x)+6*b^-3*d^2*(c+d*x)*cos(a+b*x))
@test integrate((c+d*x)^2*sin(a+b*x), x) == :(-1*b^-1*(c+d*x)^2*cos(a+b*x)+2*b^-3*d^2*cos(a+b*x)+2*d*b^-2*(c+d*x)*sin(a+b*x))
@test integrate((c+d*x)*sin(a+b*x), x) == :(d*b^-2*sin(a+b*x)+-1*b^-1*(c+d*x)*cos(a+b*x))
@test integrate((c+d*x)^-1*sin(a+b*x), x) == :(d^-1*cos(a+-1*b*c*d^-1)*sinintegral(b*x+b*c*d^-1)+d^-1*cosintegral(b*x+b*c*d^-1)*sin(a+-1*b*c*d^-1))
@test integrate((c+d*x)^-2*sin(a+b*x), x) == :(-1*d^-1*(c+d*x)^-1*sin(a+b*x)+b*d^-2*cos(a+-1*b*c*d^-1)*cosintegral(b*x+b*c*d^-1)+-1*b*d^-2*sin(a+-1*b*c*d^-1)*sinintegral(b*x+b*c*d^-1))
@test integrate((c+d*x)^-3*sin(a+b*x), x) == :(-1//2*d^-1*(c+d*x)^-2*sin(a+b*x)+-1//2*b*d^-2*(c+d*x)^-1*cos(a+b*x)+-1//2*b^2*d^-3*cos(a+-1*b*c*d^-1)*sinintegral(b*x+b*c*d^-1)+-1//2*b^2*d^-3*cosintegral(b*x+b*c*d^-1)*sin(a+-1*b*c*d^-1))
@test integrate((c+d*x)^4*sin(a+b*x)^2, x) == :(1//10*d^-1*(c+d*x)^5+-1//2*d*b^-2*(c+d*x)^3+3//4*x*b^-4*d^4+d*b^-2*(c+d*x)^3*sin(a+b*x)^2+-3//2*b^-4*d^3*sin(a+b*x)^2*(c+d*x)+-3//4*b^-5*d^4*cos(a+b*x)*sin(a+b*x)+-1//2*b^-1*(c+d*x)^4*cos(a+b*x)*sin(a+b*x)+3//2*b^-3*d^2*(c+d*x)^2*cos(a+b*x)*sin(a+b*x))
@test integrate((c+d*x)^3*sin(a+b*x)^2, x) == :(1//8*d^-1*(c+d*x)^4+-3//8*b^-4*d^3*sin(a+b*x)^2+-3//8*b^-2*d^3*x^2+-3//4*c*x*b^-2*d^2+-1//2*b^-1*(c+d*x)^3*cos(a+b*x)*sin(a+b*x)+3//4*d*b^-2*(c+d*x)^2*sin(a+b*x)^2+3//4*b^-3*d^2*(c+d*x)*cos(a+b*x)*sin(a+b*x))
@test integrate((c+d*x)^2*sin(a+b*x)^2, x) == :(1//6*d^-1*(c+d*x)^3+-1//4*x*b^-2*d^2+(1/2)*d*b^-2*sin(a+b*x)^2*(c+d*x)+-1//2*b^-1*(c+d*x)^2*cos(a+b*x)*sin(a+b*x)+1//4*b^-3*d^2*cos(a+b*x)*sin(a+b*x))
@test integrate(sin(a+b*x)^2*(c+d*x), x) == :((1/2)*c*x+1//4*d*x^2+1//4*d*b^-2*sin(a+b*x)^2+-1//2*b^-1*(c+d*x)*cos(a+b*x)*sin(a+b*x))
@test integrate((c+d*x)^-1*sin(a+b*x)^2, x) == :((1/2)*d^-1*log(c+d*x)+(1/2)*d^-1*sin(2a+-2*b*c*d^-1)*sinintegral(2*b*x+2*b*c*d^-1)+-1//2*d^-1*cos(2a+-2*b*c*d^-1)*cosintegral(2*b*x+2*b*c*d^-1))
@test integrate((c+d*x)^-2*sin(a+b*x)^2, x) == :(-1*d^-1*(c+d*x)^-1*sin(a+b*x)^2+b*d^-2*cos(2a+-2*b*c*d^-1)*sinintegral(2*b*x+2*b*c*d^-1)+b*d^-2*cosintegral(2*b*x+2*b*c*d^-1)*sin(2a+-2*b*c*d^-1))
@test integrate((c+d*x)^-3*sin(a+b*x)^2, x) == :(-1//2*d^-1*(c+d*x)^-2*sin(a+b*x)^2+b^2*d^-3*cos(2a+-2*b*c*d^-1)*cosintegral(2*b*x+2*b*c*d^-1)+-1*b^2*d^-3*sin(2a+-2*b*c*d^-1)*sinintegral(2*b*x+2*b*c*d^-1)+-1*b*d^-2*(c+d*x)^-1*cos(a+b*x)*sin(a+b*x))
@test integrate((c+d*x)^-4*sin(a+b*x)^2, x) == :(-1//3*b^2*d^-3*(c+d*x)^-1+-1//3*d^-1*(c+d*x)^-3*sin(a+b*x)^2+-2//3*b^3*d^-4*cos(2a+-2*b*c*d^-1)*sinintegral(2*b*x+2*b*c*d^-1)+-2//3*b^3*d^-4*cosintegral(2*b*x+2*b*c*d^-1)*sin(2a+-2*b*c*d^-1)+2//3*b^2*d^-3*(c+d*x)^-1*sin(a+b*x)^2+-1//3*b*d^-2*(c+d*x)^-2*cos(a+b*x)*sin(a+b*x))
@test integrate((c+d*x)^4*sin(a+b*x)^3, x) == :(-488//27*b^-5*d^4*cos(a+b*x)+-2//3*b^-1*(c+d*x)^4*cos(a+b*x)+8//81*b^-5*d^4*cos(a+b*x)^3+-160//9*b^-4*d^3*(c+d*x)*sin(a+b*x)+-8//27*b^-4*d^3*sin(a+b*x)^3*(c+d*x)+-1//3*b^-1*(c+d*x)^4*sin(a+b*x)^2*cos(a+b*x)+4//9*d*b^-2*(c+d*x)^3*sin(a+b*x)^3+8//3*d*b^-2*(c+d*x)^3*sin(a+b*x)+80//9*b^-3*d^2*(c+d*x)^2*cos(a+b*x)+4//9*b^-3*d^2*(c+d*x)^2*sin(a+b*x)^2*cos(a+b*x))
@test integrate((c+d*x)^3*sin(a+b*x)^3, x) == :(-40//9*b^-4*d^3*sin(a+b*x)+-2//3*b^-1*(c+d*x)^3*cos(a+b*x)+-2//27*b^-4*d^3*sin(a+b*x)^3+2*d*b^-2*(c+d*x)^2*sin(a+b*x)+-1//3*b^-1*(c+d*x)^3*sin(a+b*x)^2*cos(a+b*x)+1//3*d*b^-2*(c+d*x)^2*sin(a+b*x)^3+40//9*b^-3*d^2*(c+d*x)*cos(a+b*x)+2//9*b^-3*d^2*sin(a+b*x)^2*(c+d*x)*cos(a+b*x))
@test integrate((c+d*x)^2*sin(a+b*x)^3, x) == :(-2//3*b^-1*(c+d*x)^2*cos(a+b*x)+-2//27*b^-3*d^2*cos(a+b*x)^3+14//9*b^-3*d^2*cos(a+b*x)+-1//3*b^-1*(c+d*x)^2*sin(a+b*x)^2*cos(a+b*x)+2//9*d*b^-2*sin(a+b*x)^3*(c+d*x)+4//3*d*b^-2*(c+d*x)*sin(a+b*x))
@test integrate(sin(a+b*x)^3*(c+d*x), x) == :(-1//3*b^-1*(2c+2*d*x)*cos(a+b*x)+1//9*d*b^-2*sin(a+b*x)^3+2//3*d*b^-2*sin(a+b*x)+-1//3*b^-1*sin(a+b*x)^2*(c+d*x)*cos(a+b*x))
@test integrate((c+d*x)^-1*sin(a+b*x)^3, x) == :(-1//4*d^-1*cos(3a+-3*b*c*d^-1)*sinintegral(3*b*x+3*b*c*d^-1)+-1//4*d^-1*cosintegral(3*b*x+3*b*c*d^-1)*sin(3a+-3*b*c*d^-1)+3//4*d^-1*cos(a+-1*b*c*d^-1)*sinintegral(b*x+b*c*d^-1)+3//4*d^-1*cosintegral(b*x+b*c*d^-1)*sin(a+-1*b*c*d^-1))
@test integrate((c+d*x)^-2*sin(a+b*x)^3, x) == :(-1*d^-1*(c+d*x)^-1*sin(a+b*x)^3+-3//4*b*d^-2*cos(3a+-3*b*c*d^-1)*cosintegral(3*b*x+3*b*c*d^-1)+-3//4*b*d^-2*sin(a+-1*b*c*d^-1)*sinintegral(b*x+b*c*d^-1)+3//4*b*d^-2*cos(a+-1*b*c*d^-1)*cosintegral(b*x+b*c*d^-1)+3//4*b*d^-2*sin(3a+-3*b*c*d^-1)*sinintegral(3*b*x+3*b*c*d^-1))
@test integrate((c+d*x)^-3*sin(a+b*x)^3, x) == :(-1//2*d^-1*(c+d*x)^-2*sin(a+b*x)^3+-3//8*b^2*d^-3*cos(a+-1*b*c*d^-1)*sinintegral(b*x+b*c*d^-1)+-3//8*b^2*d^-3*cosintegral(b*x+b*c*d^-1)*sin(a+-1*b*c*d^-1)+9//8*b^2*d^-3*cos(3a+-3*b*c*d^-1)*sinintegral(3*b*x+3*b*c*d^-1)+9//8*b^2*d^-3*cosintegral(3*b*x+3*b*c*d^-1)*sin(3a+-3*b*c*d^-1)+-3//2*b*d^-2*(c+d*x)^-1*sin(a+b*x)^2*cos(a+b*x))
@test integrate((c+d*x)^3*csc(a+b*x), x) == :(-2*b^-1*(c+d*x)^3*arctanh(exp(im*(a+b*x)))+-6*im*b^-4*d^3*Polylogarithms.polylog(4,-1*exp(im*(a+b*x)))+-6*b^-3*d^2*(c+d*x)*Polylogarithms.polylog(3,-1*exp(im*(a+b*x)))+6*im*b^-4*d^3*Polylogarithms.polylog(4,exp(im*(a+b*x)))+6*b^-3*d^2*(c+d*x)*Polylogarithms.polylog(3,exp(im*(a+b*x)))+-3*im*d*b^-2*(c+d*x)^2*Polylogarithms.polylog(2,exp(im*(a+b*x)))+3*im*d*b^-2*(c+d*x)^2*Polylogarithms.polylog(2,-1*exp(im*(a+b*x))))
@test integrate((c+d*x)^2*csc(a+b*x), x) == :(-2*b^-1*(c+d*x)^2*arctanh(exp(im*(a+b*x)))+-2*b^-3*d^2*Polylogarithms.polylog(3,-1*exp(im*(a+b*x)))+2*b^-3*d^2*Polylogarithms.polylog(3,exp(im*(a+b*x)))+-2*im*d*b^-2*(c+d*x)*Polylogarithms.polylog(2,exp(im*(a+b*x)))+2*im*d*b^-2*(c+d*x)*Polylogarithms.polylog(2,-1*exp(im*(a+b*x))))
@test integrate((c+d*x)*csc(a+b*x), x) == :(-1*b^-1*(2c+2*d*x)*arctanh(exp(im*(a+b*x)))+im*d*b^-2*Polylogarithms.polylog(2,-1*exp(im*(a+b*x)))+-1*im*d*b^-2*Polylogarithms.polylog(2,exp(im*(a+b*x))))
@test integrate((c+d*x)^3*csc(a+b*x)^2, x) == :(-1*im*b^-1*(c+d*x)^3+-1*b^-1*(c+d*x)^3*cot(a+b*x)+3//2*b^-4*d^3*Polylogarithms.polylog(3,exp(2*im*(a+b*x)))+3*d*b^-2*(c+d*x)^2*log(1+-1*exp(2*im*(a+b*x)))+-3*im*b^-3*d^2*(c+d*x)*Polylogarithms.polylog(2,exp(2*im*(a+b*x))))
@test integrate((c+d*x)^2*csc(a+b*x)^2, x) == :(-1*im*b^-1*(c+d*x)^2+-1*b^-1*(c+d*x)^2*cot(a+b*x)+-1*im*b^-3*d^2*Polylogarithms.polylog(2,exp(2*im*(a+b*x)))+2*d*b^-2*(c+d*x)*log(1+-1*exp(2*im*(a+b*x))))
@test integrate(csc(a+b*x)^2*(c+d*x), x) == :(d*b^-2*log(sin(a+b*x))+-1*b^-1*(c+d*x)*cot(a+b*x))
@test integrate((c+d*x)^3*csc(a+b*x)^3, x) == :(-1*b^-1*(c+d*x)^3*arctanh(exp(im*(a+b*x)))+-6*b^-3*d^2*(c+d*x)*arctanh(exp(im*(a+b*x)))+-3*im*b^-4*d^3*Polylogarithms.polylog(2,exp(im*(a+b*x)))+-3*im*b^-4*d^3*Polylogarithms.polylog(4,-1*exp(im*(a+b*x)))+-3*b^-3*d^2*(c+d*x)*Polylogarithms.polylog(3,-1*exp(im*(a+b*x)))+3*im*b^-4*d^3*Polylogarithms.polylog(2,-1*exp(im*(a+b*x)))+3*im*b^-4*d^3*Polylogarithms.polylog(4,exp(im*(a+b*x)))+3*b^-3*d^2*(c+d*x)*Polylogarithms.polylog(3,exp(im*(a+b*x)))+-3//2*d*b^-2*(c+d*x)^2*csc(a+b*x)+-1//2*b^-1*(c+d*x)^3*cot(a+b*x)*csc(a+b*x)+-3//2*im*d*b^-2*(c+d*x)^2*Polylogarithms.polylog(2,exp(im*(a+b*x)))+3//2*im*d*b^-2*(c+d*x)^2*Polylogarithms.polylog(2,-1*exp(im*(a+b*x))))
@test integrate((c+d*x)^2*csc(a+b*x)^3, x) == :(b^-3*d^2*Polylogarithms.polylog(3,exp(im*(a+b*x)))+-1*b^-1*(c+d*x)^2*arctanh(exp(im*(a+b*x)))+-1*b^-3*d^2*Polylogarithms.polylog(3,-1*exp(im*(a+b*x)))+-1*b^-3*d^2*arctanh(cos(a+b*x))+-1*d*b^-2*(c+d*x)*csc(a+b*x)+-1//2*b^-1*(c+d*x)^2*cot(a+b*x)*csc(a+b*x)+im*d*b^-2*(c+d*x)*Polylogarithms.polylog(2,-1*exp(im*(a+b*x)))+-1*im*d*b^-2*(c+d*x)*Polylogarithms.polylog(2,exp(im*(a+b*x))))
@test integrate(csc(a+b*x)^3*(c+d*x), x) == :(-1*b^-1*(c+d*x)*arctanh(exp(im*(a+b*x)))+-1//2*d*b^-2*csc(a+b*x)+(1/2)*im*d*b^-2*Polylogarithms.polylog(2,-1*exp(im*(a+b*x)))+-1//2*im*d*b^-2*Polylogarithms.polylog(2,exp(im*(a+b*x)))+-1//2*b^-1*(c+d*x)*cot(a+b*x)*csc(a+b*x))
@test integrate((c+d*x)^5//2*sin(a+b*x), x) == :(-1*b^-1*(c+d*x)^5//2*cos(a+b*x)+5//2*d*b^-2*(c+d*x)^3//2*sin(a+b*x)+15//4*b^-3*d^2*(c+d*x)^(1/2)*cos(a+b*x)+-15//8*2^(1/2)*pi^(1/2)*b^-7//2*d^5//2*cos(a+-1*b*c*d^-1)*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+15//8*2^(1/2)*pi^(1/2)*b^-7//2*d^5//2*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1))
@test integrate((c+d*x)^3//2*sin(a+b*x), x) == :(-1*b^-1*(c+d*x)^3//2*cos(a+b*x)+3//2*d*b^-2*(c+d*x)^(1/2)*sin(a+b*x)+-3//4*2^(1/2)*pi^(1/2)*b^-5//2*d^3//2*cos(a+-1*b*c*d^-1)*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+-3//4*2^(1/2)*pi^(1/2)*b^-5//2*d^3//2*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1))
@test integrate((c+d*x)^(1/2)*sin(a+b*x), x) == :(-1*b^-1*(c+d*x)^(1/2)*cos(a+b*x)+(1/2)*2^(1/2)*pi^(1/2)*b^-3//2*d^(1/2)*cos(a+-1*b*c*d^-1)*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+-1//2*2^(1/2)*pi^(1/2)*b^-3//2*d^(1/2)*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1))
@test integrate((c+d*x)^-1//2*sin(a+b*x), x) == :(2^(1/2)*pi^(1/2)*b^-1//2*d^-1//2*cos(a+-1*b*c*d^-1)*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+2^(1/2)*pi^(1/2)*b^-1//2*d^-1//2*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1))
@test integrate((c+d*x)^-3//2*sin(a+b*x), x) == :(-2*d^-1*(c+d*x)^-1//2*sin(a+b*x)+-2*2^(1/2)*pi^(1/2)*b^(1/2)*d^-3//2*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1)+2*2^(1/2)*pi^(1/2)*b^(1/2)*d^-3//2*cos(a+-1*b*c*d^-1)*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2)))
@test integrate((c+d*x)^-5//2*sin(a+b*x), x) == :(-2//3*d^-1*(c+d*x)^-3//2*sin(a+b*x)+-4//3*b*d^-2*(c+d*x)^-1//2*cos(a+b*x)+-4//3*2^(1/2)*pi^(1/2)*b^3//2*d^-5//2*cos(a+-1*b*c*d^-1)*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+-4//3*2^(1/2)*pi^(1/2)*b^3//2*d^-5//2*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1))
@test integrate((c+d*x)^-7//2*sin(a+b*x), x) == :(-2//5*d^-1*(c+d*x)^-5//2*sin(a+b*x)+-4//15*b*d^-2*(c+d*x)^-3//2*cos(a+b*x)+8//15*b^2*d^-3*(c+d*x)^-1//2*sin(a+b*x)+-8//15*2^(1/2)*pi^(1/2)*b^5//2*d^-7//2*cos(a+-1*b*c*d^-1)*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+8//15*2^(1/2)*pi^(1/2)*b^5//2*d^-7//2*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1))
@test integrate((c+d*x)^5//2*sin(a+b*x)^2, x) == :(1//7*d^-1*(c+d*x)^7//2+-5//16*d*b^-2*(c+d*x)^3//2+-1//2*b^-1*(c+d*x)^5//2*cos(a+b*x)*sin(a+b*x)+5//8*d*b^-2*(c+d*x)^3//2*sin(a+b*x)^2+15//64*b^-3*d^2*(c+d*x)^(1/2)*sin(2a+2*b*x)+-15//128*pi^(1/2)*b^-7//2*d^5//2*cos(2a+-2*b*c*d^-1)*fresnels(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+-15//128*pi^(1/2)*b^-7//2*d^5//2*fresnelc(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(2a+-2*b*c*d^-1))
@test integrate((c+d*x)^3//2*sin(a+b*x)^2, x) == :(1//5*d^-1*(c+d*x)^5//2+-3//16*d*b^-2*(c+d*x)^(1/2)+-1//2*b^-1*(c+d*x)^3//2*cos(a+b*x)*sin(a+b*x)+3//8*d*b^-2*(c+d*x)^(1/2)*sin(a+b*x)^2+-3//32*pi^(1/2)*b^-5//2*d^3//2*fresnels(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(2a+-2*b*c*d^-1)+3//32*pi^(1/2)*b^-5//2*d^3//2*cos(2a+-2*b*c*d^-1)*fresnelc(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2)))
@test integrate((c+d*x)^(1/2)*sin(a+b*x)^2, x) == :(1//3*d^-1*(c+d*x)^3//2+-1//4*b^-1*(c+d*x)^(1/2)*sin(2a+2*b*x)+1//8*pi^(1/2)*b^-3//2*d^(1/2)*cos(2a+-2*b*c*d^-1)*fresnels(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+1//8*pi^(1/2)*b^-3//2*d^(1/2)*fresnelc(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(2a+-2*b*c*d^-1))
@test integrate((c+d*x)^-1//2*sin(a+b*x)^2, x) == :(d^-1*(c+d*x)^(1/2)+(1/2)*pi^(1/2)*b^-1//2*d^-1//2*fresnels(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(2a+-2*b*c*d^-1)+-1//2*pi^(1/2)*b^-1//2*d^-1//2*cos(2a+-2*b*c*d^-1)*fresnelc(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2)))
@test integrate((c+d*x)^-3//2*sin(a+b*x)^2, x) == :(-2*d^-1*(c+d*x)^-1//2*sin(a+b*x)^2+2*pi^(1/2)*b^(1/2)*d^-3//2*cos(2a+-2*b*c*d^-1)*fresnels(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+2*pi^(1/2)*b^(1/2)*d^-3//2*fresnelc(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(2a+-2*b*c*d^-1))
@test integrate((c+d*x)^-5//2*sin(a+b*x)^2, x) == :(-2//3*d^-1*(c+d*x)^-3//2*sin(a+b*x)^2+-8//3*b*d^-2*(c+d*x)^-1//2*cos(a+b*x)*sin(a+b*x)+-8//3*pi^(1/2)*b^3//2*d^-5//2*fresnels(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(2a+-2*b*c*d^-1)+8//3*pi^(1/2)*b^3//2*d^-5//2*cos(2a+-2*b*c*d^-1)*fresnelc(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2)))
@test integrate((c+d*x)^-7//2*sin(a+b*x)^2, x) == :(-16//15*b^2*d^-3*(c+d*x)^-1//2+-2//5*d^-1*(c+d*x)^-5//2*sin(a+b*x)^2+32//15*b^2*d^-3*(c+d*x)^-1//2*sin(a+b*x)^2+-32//15*pi^(1/2)*b^5//2*d^-7//2*cos(2a+-2*b*c*d^-1)*fresnels(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+-32//15*pi^(1/2)*b^5//2*d^-7//2*fresnelc(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(2a+-2*b*c*d^-1)+-8//15*b*d^-2*(c+d*x)^-3//2*cos(a+b*x)*sin(a+b*x))
@test integrate((c+d*x)^-9//2*sin(a+b*x)^2, x) == :(-16//105*b^2*d^-3*(c+d*x)^-3//2+-2//7*d^-1*(c+d*x)^-7//2*sin(a+b*x)^2+32//105*b^2*d^-3*(c+d*x)^-3//2*sin(a+b*x)^2+-128//105*pi^(1/2)*b^7//2*d^-9//2*cos(2a+-2*b*c*d^-1)*fresnelc(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+-8//35*b*d^-2*(c+d*x)^-5//2*cos(a+b*x)*sin(a+b*x)+128//105*pi^(1/2)*b^7//2*d^-9//2*fresnels(2*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(2a+-2*b*c*d^-1)+128//105*b^3*d^-4*(c+d*x)^-1//2*cos(a+b*x)*sin(a+b*x))
@test integrate((c+d*x)^5//2*sin(a+b*x)^3, x) == :(-2//3*b^-1*(c+d*x)^5//2*cos(a+b*x)+-5//144*b^-3*d^2*(c+d*x)^(1/2)*cos(3a+3*b*x)+-1//3*b^-1*(c+d*x)^5//2*sin(a+b*x)^2*cos(a+b*x)+5//3*d*b^-2*(c+d*x)^3//2*sin(a+b*x)+5//18*d*b^-2*(c+d*x)^3//2*sin(a+b*x)^3+45//16*b^-3*d^2*(c+d*x)^(1/2)*cos(a+b*x)+-45//32*2^(1/2)*pi^(1/2)*b^-7//2*d^5//2*cos(a+-1*b*c*d^-1)*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+-5//864*6^(1/2)*pi^(1/2)*b^-7//2*d^5//2*fresnels(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(3a+-3*b*c*d^-1)+5//864*6^(1/2)*pi^(1/2)*b^-7//2*d^5//2*cos(3a+-3*b*c*d^-1)*fresnelc(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+45//32*2^(1/2)*pi^(1/2)*b^-7//2*d^5//2*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1))
@test integrate((c+d*x)^3//2*sin(a+b*x)^3, x) == :(-2//3*b^-1*(c+d*x)^3//2*cos(a+b*x)+d*b^-2*(c+d*x)^(1/2)*sin(a+b*x)+-1//3*b^-1*(c+d*x)^3//2*sin(a+b*x)^2*cos(a+b*x)+1//6*d*b^-2*(c+d*x)^(1/2)*sin(a+b*x)^3+-9//16*2^(1/2)*pi^(1/2)*b^-5//2*d^3//2*cos(a+-1*b*c*d^-1)*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+-9//16*2^(1/2)*pi^(1/2)*b^-5//2*d^3//2*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1)+1//144*6^(1/2)*pi^(1/2)*b^-5//2*d^3//2*cos(3a+-3*b*c*d^-1)*fresnels(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+1//144*6^(1/2)*pi^(1/2)*b^-5//2*d^3//2*fresnelc(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(3a+-3*b*c*d^-1))
@test integrate((c+d*x)^(1/2)*sin(a+b*x)^3, x) == :(-3//4*b^-1*(c+d*x)^(1/2)*cos(a+b*x)+1//12*b^-1*(c+d*x)^(1/2)*cos(3a+3*b*x)+-3//8*2^(1/2)*pi^(1/2)*b^-3//2*d^(1/2)*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1)+-1//72*6^(1/2)*pi^(1/2)*b^-3//2*d^(1/2)*cos(3a+-3*b*c*d^-1)*fresnelc(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+1//72*6^(1/2)*pi^(1/2)*b^-3//2*d^(1/2)*fresnels(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(3a+-3*b*c*d^-1)+3//8*2^(1/2)*pi^(1/2)*b^-3//2*d^(1/2)*cos(a+-1*b*c*d^-1)*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2)))
@test integrate((c+d*x)^-1//2*sin(a+b*x)^3, x) == :(-1//12*6^(1/2)*pi^(1/2)*b^-1//2*d^-1//2*cos(3a+-3*b*c*d^-1)*fresnels(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+-1//12*6^(1/2)*pi^(1/2)*b^-1//2*d^-1//2*fresnelc(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(3a+-3*b*c*d^-1)+3//4*2^(1/2)*pi^(1/2)*b^-1//2*d^-1//2*cos(a+-1*b*c*d^-1)*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+3//4*2^(1/2)*pi^(1/2)*b^-1//2*d^-1//2*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1))
@test integrate((c+d*x)^-3//2*sin(a+b*x)^3, x) == :(-2*d^-1*(c+d*x)^-1//2*sin(a+b*x)^3+(1/2)*6^(1/2)*pi^(1/2)*b^(1/2)*d^-3//2*fresnels(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(3a+-3*b*c*d^-1)+-3//2*2^(1/2)*pi^(1/2)*b^(1/2)*d^-3//2*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1)+-1//2*6^(1/2)*pi^(1/2)*b^(1/2)*d^-3//2*cos(3a+-3*b*c*d^-1)*fresnelc(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+3//2*2^(1/2)*pi^(1/2)*b^(1/2)*d^-3//2*cos(a+-1*b*c*d^-1)*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2)))
@test integrate((c+d*x)^-5//2*sin(a+b*x)^3, x) == :(-2//3*d^-1*(c+d*x)^-3//2*sin(a+b*x)^3+-4*b*d^-2*(c+d*x)^-1//2*sin(a+b*x)^2*cos(a+b*x)+6^(1/2)*pi^(1/2)*b^3//2*d^-5//2*cos(3a+-3*b*c*d^-1)*fresnels(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+6^(1/2)*pi^(1/2)*b^3//2*d^-5//2*fresnelc(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(3a+-3*b*c*d^-1)+-1*2^(1/2)*pi^(1/2)*b^3//2*d^-5//2*cos(a+-1*b*c*d^-1)*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+-1*2^(1/2)*pi^(1/2)*b^3//2*d^-5//2*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1))
@test integrate((c+d*x)^-7//2*sin(a+b*x)^3, x) == :(-2//5*d^-1*(c+d*x)^-5//2*sin(a+b*x)^3+-16//5*b^2*d^-3*(c+d*x)^-1//2*sin(a+b*x)+24//5*b^2*d^-3*(c+d*x)^-1//2*sin(a+b*x)^3+-4//5*b*d^-2*(c+d*x)^-3//2*sin(a+b*x)^2*cos(a+b*x)+-6//5*6^(1/2)*pi^(1/2)*b^5//2*d^-7//2*fresnels(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(3a+-3*b*c*d^-1)+-2//5*2^(1/2)*pi^(1/2)*b^5//2*d^-7//2*cos(a+-1*b*c*d^-1)*fresnelc(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))+2//5*2^(1/2)*pi^(1/2)*b^5//2*d^-7//2*fresnels(2^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2))*sin(a+-1*b*c*d^-1)+6//5*6^(1/2)*pi^(1/2)*b^5//2*d^-7//2*cos(3a+-3*b*c*d^-1)*fresnelc(6^(1/2)*pi^-1//2*b^(1/2)*d^-1//2*(c+d*x)^(1/2)))
@test integrate((d*x)^3//2*sin(f*x), x) == :(-1*f^-1*(d*x)^3//2*cos(f*x)+3//2*d*f^-2*(d*x)^(1/2)*sin(f*x)+-3//4*2^(1/2)*pi^(1/2)*d^3//2*f^-5//2*fresnels(2^(1/2)*pi^-1//2*d^-1//2*f^(1/2)*(d*x)^(1/2)))
@test integrate((d*x)^(1/2)*sin(f*x), x) == :(-1*f^-1*(d*x)^(1/2)*cos(f*x)+(1/2)*2^(1/2)*pi^(1/2)*d^(1/2)*f^-3//2*fresnelc(2^(1/2)*pi^-1//2*d^-1//2*f^(1/2)*(d*x)^(1/2)))
@test integrate((d*x)^-1//2*sin(f*x), x) == :(2^(1/2)*pi^(1/2)*d^-1//2*f^-1//2*fresnels(2^(1/2)*pi^-1//2*d^-1//2*f^(1/2)*(d*x)^(1/2)))
@test integrate((d*x)^-3//2*sin(f*x), x) == :(-2*d^-1*(d*x)^-1//2*sin(f*x)+2*2^(1/2)*pi^(1/2)*d^-3//2*f^(1/2)*fresnelc(2^(1/2)*pi^-1//2*d^-1//2*f^(1/2)*(d*x)^(1/2)))
@test integrate((d*x)^-5//2*sin(f*x), x) == :(-2//3*d^-1*(d*x)^-3//2*sin(f*x)+-4//3*f*d^-2*(d*x)^-1//2*cos(f*x)+-4//3*2^(1/2)*pi^(1/2)*d^-5//2*f^3//2*fresnels(2^(1/2)*pi^-1//2*d^-1//2*f^(1/2)*(d*x)^(1/2)))
@test integrate(x*sin(e+f*x)^(1/2)+x*sin(e+f*x)^-3//2, x) == :(4*f^-2*sin(e+f*x)^(1/2)+-2*x*f^-1*sin(e+f*x)^-1//2*cos(e+f*x))
@test integrate(x^2*sin(e+f*x)^(1/2)+x^2*sin(e+f*x)^-3//2, x) == :(-16*f^-3*Elliptic.E((1/2)*e+-1//4*pi+(1/2)*f*x,2)+8*x*f^-2*sin(e+f*x)^(1/2)+-2*f^-1*x^2*sin(e+f*x)^-1//2*cos(e+f*x))
@test integrate(x*sin(e+f*x)^-5//2+-1//3*x*sin(e+f*x)^-1//2, x) == :(-4//3*f^-2*sin(e+f*x)^-1//2+-2//3*x*f^-1*sin(e+f*x)^-3//2*cos(e+f*x))
@test integrate(x*sin(e+f*x)^-7//2+3//5*x*sin(e+f*x)^(1/2), x) == :(-4//15*f^-2*sin(e+f*x)^-3//2+12//5*f^-2*sin(e+f*x)^(1/2)+-6//5*x*f^-1*sin(e+f*x)^-1//2*cos(e+f*x)+-2//5*x*f^-1*sin(e+f*x)^-5//2*cos(e+f*x))
@test integrate((c+d*x)^m*sin(a+b*x)^3, x) == :(-3//8*b^-1*(im*b*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,im*b*d^-1*(c+d*x))*exp(-1*im*(a+-1*b*c*d^-1))+-3//8*b^-1*(-1*im*b*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-1*im*b*d^-1*(c+d*x))*exp(im*(a+-1*b*c*d^-1))+1//8*3^(-1+-1m)*b^-1*(im*b*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,3*im*b*d^-1*(c+d*x))*exp(-3*im*(a+-1*b*c*d^-1))+1//8*3^(-1+-1m)*b^-1*(-1*im*b*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-3*im*b*d^-1*(c+d*x))*exp(3*im*(a+-1*b*c*d^-1)))
@test integrate((c+d*x)^m*sin(a+b*x)^2, x) == :((1/2)*d^-1*(1+m)^-1*(c+d*x)^(1+m)+im*2^(-3+-1m)*b^-1*(-1*im*b*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-2*im*b*d^-1*(c+d*x))*exp(2*im*(a+-1*b*c*d^-1))+-1*im*2^(-3+-1m)*b^-1*(im*b*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,2*im*b*d^-1*(c+d*x))*exp(-2*im*(a+-1*b*c*d^-1)))
@test integrate((c+d*x)^m*sin(a+b*x), x) == :(-1//2*b^-1*(im*b*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,im*b*d^-1*(c+d*x))*exp(-1*im*(a+-1*b*c*d^-1))+-1//2*b^-1*(-1*im*b*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-1*im*b*d^-1*(c+d*x))*exp(im*(a+-1*b*c*d^-1)))
@test integrate(x^(3+m)*sin(a+b*x), x) == :((1/2)*im*b^-4*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(4+m,-1*im*b*x)*exp(im*a)+-1//2*im*b^-4*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(4+m,im*b*x)*exp(-1*im*a))
@test integrate(x^(2+m)*sin(a+b*x), x) == :((1/2)*b^-3*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(3+m,im*b*x)*exp(-1*im*a)+(1/2)*b^-3*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(3+m,-1*im*b*x)*exp(im*a))
@test integrate(x^(1+m)*sin(a+b*x), x) == :((1/2)*im*b^-2*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(2+m,im*b*x)*exp(-1*im*a)+-1//2*im*b^-2*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(2+m,-1*im*b*x)*exp(im*a))
@test integrate(x^m*sin(a+b*x), x) == :(-1//2*b^-1*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(1+m,im*b*x)*exp(-1*im*a)+-1//2*b^-1*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(1+m,-1*im*b*x)*exp(im*a))
@test integrate(x^(-1+m)*sin(a+b*x), x) == :((1/2)*im*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(m,-1*im*b*x)*exp(im*a)+-1//2*im*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(m,im*b*x)*exp(-1*im*a))
@test integrate(x^(-2+m)*sin(a+b*x), x) == :((1/2)*b*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(-1+m,im*b*x)*exp(-1*im*a)+(1/2)*b*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(-1+m,-1*im*b*x)*exp(im*a))
@test integrate(x^(-3+m)*sin(a+b*x), x) == :((1/2)*im*b^2*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(-2+m,im*b*x)*exp(-1*im*a)+-1//2*im*b^2*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(-2+m,-1*im*b*x)*exp(im*a))
@test integrate(x^(3+m)*sin(a+b*x)^2, x) == :(x^(4+m)*(8+2m)^-1+2^(-6+-1m)*b^-4*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(4+m,2*im*b*x)*exp(-2*im*a)+2^(-6+-1m)*b^-4*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(4+m,-2*im*b*x)*exp(2*im*a))
@test integrate(x^(2+m)*sin(a+b*x)^2, x) == :(x^(3+m)*(6+2m)^-1+im*2^(-5+-1m)*b^-3*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(3+m,2*im*b*x)*exp(-2*im*a)+-1*im*2^(-5+-1m)*b^-3*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(3+m,-2*im*b*x)*exp(2*im*a))
@test integrate(x^(1+m)*sin(a+b*x)^2, x) == :(x^(2+m)*(4+2m)^-1+-1*2^(-4+-1m)*b^-2*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(2+m,2*im*b*x)*exp(-2*im*a)+-1*2^(-4+-1m)*b^-2*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(2+m,-2*im*b*x)*exp(2*im*a))
@test integrate(x^m*sin(a+b*x)^2, x) == :(x^(1+m)*(2+2m)^-1+im*2^(-3+-1m)*b^-1*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(1+m,-2*im*b*x)*exp(2*im*a)+-1*im*2^(-3+-1m)*b^-1*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(1+m,2*im*b*x)*exp(-2*im*a))
@test integrate(x^(-1+m)*sin(a+b*x)^2, x) == :((1/2)*m^-1*x^m+2^(-2+-1m)*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(m,2*im*b*x)*exp(-2*im*a)+2^(-2+-1m)*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(m,-2*im*b*x)*exp(2*im*a))
@test integrate(x^(-2+m)*sin(a+b*x)^2, x) == :(-1*x^(-1+m)*(2+-2m)^-1+im*b*2^(-1+-1m)*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(-1+m,2*im*b*x)*exp(-2*im*a)+-1*im*b*2^(-1+-1m)*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(-1+m,-2*im*b*x)*exp(2*im*a))
@test integrate(x^(-3+m)*sin(a+b*x)^2, x) == :(-1*x^(-2+m)*(4+-2m)^-1+-1*2^(-1m)*b^2*x^m*(im*b*x)^(-1m)*SpecialFunctions.gamma(-2+m,2*im*b*x)*exp(-2*im*a)+-1*2^(-1m)*b^2*x^m*(-1*im*b*x)^(-1m)*SpecialFunctions.gamma(-2+m,-2*im*b*x)*exp(2*im*a))
@test integrate(x*csc(e+f*x)^-3//2+-1//3*x*csc(e+f*x)^(1/2), x) == :(4//9*f^-2*csc(e+f*x)^-3//2+-2//3*x*f^-1*csc(e+f*x)^-1//2*cos(e+f*x))
@test integrate(x^2*csc(e+f*x)^-3//2+-1//3*x^2*csc(e+f*x)^(1/2), x) == :(8//9*x*f^-2*csc(e+f*x)^-3//2+16//27*f^-3*csc(e+f*x)^-1//2*cos(e+f*x)+-16//27*f^-3*csc(e+f*x)^(1/2)*sin(e+f*x)^(1/2)*Elliptic.F((1/2)*e+-1//4*pi+(1/2)*f*x,2)+-2//3*f^-1*x^2*csc(e+f*x)^-1//2*cos(e+f*x))
@test integrate(x*csc(e+f*x)^-5//2+-3//5*x*csc(e+f*x)^-1//2, x) == :(4//25*f^-2*csc(e+f*x)^-5//2+-2//5*x*f^-1*csc(e+f*x)^-3//2*cos(e+f*x))
@test integrate(x*csc(e+f*x)^-7//2+-5//21*x*csc(e+f*x)^(1/2), x) == :(4//49*f^-2*csc(e+f*x)^-7//2+20//63*f^-2*csc(e+f*x)^-3//2+-10//21*x*f^-1*csc(e+f*x)^-1//2*cos(e+f*x)+-2//7*x*f^-1*csc(e+f*x)^-5//2*cos(e+f*x))
@test integrate((c+d*x)^3*(a+a*sin(e+f*x)), x) == :(1//4*a*d^-1*(c+d*x)^4+-1*a*f^-1*(c+d*x)^3*cos(e+f*x)+-6*a*d^3*f^-4*sin(e+f*x)+3*a*d*f^-2*(c+d*x)^2*sin(e+f*x)+6*a*d^2*f^-3*(c+d*x)*cos(e+f*x))
@test integrate((c+d*x)^2*(a+a*sin(e+f*x)), x) == :(1//3*a*d^-1*(c+d*x)^3+-1*a*f^-1*(c+d*x)^2*cos(e+f*x)+2*a*d^2*f^-3*cos(e+f*x)+2*a*d*f^-2*(c+d*x)*sin(e+f*x))
@test integrate((a+a*sin(e+f*x))*(c+d*x), x) == :((1/2)*a*d^-1*(c+d*x)^2+a*d*f^-2*sin(e+f*x)+-1*a*f^-1*(c+d*x)*cos(e+f*x))
@test integrate((c+d*x)^-1*(a+a*sin(e+f*x)), x) == :(a*d^-1*log(c+d*x)+a*d^-1*cos(-1*e+c*f*d^-1)*sinintegral(f*x+c*f*d^-1)+-1*a*d^-1*cosintegral(f*x+c*f*d^-1)*sin(-1*e+c*f*d^-1))
@test integrate((c+d*x)^-2*(a+a*sin(e+f*x)), x) == :(-1*a*d^-1*(c+d*x)^-1+-1*a*d^-1*(c+d*x)^-1*sin(e+f*x)+a*f*d^-2*cos(-1*e+c*f*d^-1)*cosintegral(f*x+c*f*d^-1)+a*f*d^-2*sin(-1*e+c*f*d^-1)*sinintegral(f*x+c*f*d^-1))
@test integrate((c+d*x)^-3*(a+a*sin(e+f*x)), x) == :(-1//2*a*d^-1*(c+d*x)^-2+-1//2*a*d^-1*(c+d*x)^-2*sin(e+f*x)+(1/2)*a*d^-3*f^2*cosintegral(f*x+c*f*d^-1)*sin(-1*e+c*f*d^-1)+-1//2*a*f*d^-2*(c+d*x)^-1*cos(e+f*x)+-1//2*a*d^-3*f^2*cos(-1*e+c*f*d^-1)*sinintegral(f*x+c*f*d^-1))
@test integrate((a+a*sin(e+f*x))^2*(c+d*x)^3, x) == :(3//8*a^2*d^-1*(c+d*x)^4+-12*a^2*d^3*f^-4*sin(e+f*x)+-2*a^2*f^-1*(c+d*x)^3*cos(e+f*x)+-3//8*a^2*d^3*f^-4*sin(e+f*x)^2+-3//8*a^2*d^3*f^-2*x^2+6*d*a^2*f^-2*(c+d*x)^2*sin(e+f*x)+12*a^2*d^2*f^-3*(c+d*x)*cos(e+f*x)+-3//4*c*x*a^2*d^2*f^-2+-1//2*a^2*f^-1*(c+d*x)^3*cos(e+f*x)*sin(e+f*x)+3//4*d*a^2*f^-2*(c+d*x)^2*sin(e+f*x)^2+3//4*a^2*d^2*f^-3*(c+d*x)*cos(e+f*x)*sin(e+f*x))
@test integrate((a+a*sin(e+f*x))^2*(c+d*x)^2, x) == :((1/2)*a^2*d^-1*(c+d*x)^3+-2*a^2*f^-1*(c+d*x)^2*cos(e+f*x)+4*a^2*d^2*f^-3*cos(e+f*x)+-1//4*x*a^2*d^2*f^-2+(1/2)*d*a^2*f^-2*sin(e+f*x)^2*(c+d*x)+4*d*a^2*f^-2*(c+d*x)*sin(e+f*x)+-1//2*a^2*f^-1*(c+d*x)^2*cos(e+f*x)*sin(e+f*x)+1//4*a^2*d^2*f^-3*cos(e+f*x)*sin(e+f*x))
@test integrate((a+a*sin(e+f*x))^2*(c+d*x), x) == :((1/2)*c*x*a^2+(1/2)*a^2*d^-1*(c+d*x)^2+1//4*d*a^2*x^2+-2*a^2*f^-1*(c+d*x)*cos(e+f*x)+2*d*a^2*f^-2*sin(e+f*x)+1//4*d*a^2*f^-2*sin(e+f*x)^2+-1//2*a^2*f^-1*(c+d*x)*cos(e+f*x)*sin(e+f*x))
@test integrate((a+a*sin(e+f*x))^2*(c+d*x)^-1, x) == :(3//2*a^2*d^-1*log(c+d*x)+-2*a^2*d^-1*cosintegral(f*x+c*f*d^-1)*sin(-1*e+c*f*d^-1)+2*a^2*d^-1*cos(-1*e+c*f*d^-1)*sinintegral(f*x+c*f*d^-1)+-1//2*a^2*d^-1*cos(-2*e+2*c*f*d^-1)*cosintegral(2*f*x+2*c*f*d^-1)+-1//2*a^2*d^-1*sin(-2*e+2*c*f*d^-1)*sinintegral(2*f*x+2*c*f*d^-1))
@test integrate((a+a*sin(e+f*x))^2*(c+d*x)^-2, x) == :(-4*a^2*d^-1*(c+d*x)^-1*sin((1/2)*e+1//4*pi+(1/2)*f*x)^4+f*a^2*d^-2*cos(-2*e+2*c*f*d^-1)*sinintegral(2*f*x+2*c*f*d^-1)+-1*f*a^2*d^-2*cosintegral(2*f*x+2*c*f*d^-1)*sin(-2*e+2*c*f*d^-1)+2*f*a^2*d^-2*cos(-1*e+c*f*d^-1)*cosintegral(f*x+c*f*d^-1)+2*f*a^2*d^-2*sin(-1*e+c*f*d^-1)*sinintegral(f*x+c*f*d^-1))
@test integrate((a+a*sin(e+f*x))^2*(c+d*x)^-3, x) == :(-2*a^2*d^-1*(c+d*x)^-2*sin((1/2)*e+1//4*pi+(1/2)*f*x)^4+a^2*d^-3*f^2*cos(-2*e+2*c*f*d^-1)*cosintegral(2*f*x+2*c*f*d^-1)+a^2*d^-3*f^2*cosintegral(f*x+c*f*d^-1)*sin(-1*e+c*f*d^-1)+a^2*d^-3*f^2*sin(-2*e+2*c*f*d^-1)*sinintegral(2*f*x+2*c*f*d^-1)+-1*a^2*d^-3*f^2*cos(-1*e+c*f*d^-1)*sinintegral(f*x+c*f*d^-1)+-4*f*a^2*d^-2*(c+d*x)^-1*sin((1/2)*e+1//4*pi+(1/2)*f*x)^3*cos((1/2)*e+1//4*pi+(1/2)*f*x))
@test integrate((a+a*sin(e+f*x))^-1*(c+d*x)^3, x) == :(-1*im*a^-1*f^-1*(c+d*x)^3+-1*a^-1*f^-1*(c+d*x)^3*cot((1/2)*e+1//4*pi+(1/2)*f*x)+12*a^-1*d^3*f^-4*Polylogarithms.polylog(3,im*exp(im*(e+f*x)))+6*d*a^-1*f^-2*(c+d*x)^2*log(1+-1*im*exp(im*(e+f*x)))+-12*im*a^-1*d^2*f^-3*(c+d*x)*Polylogarithms.polylog(2,im*exp(im*(e+f*x))))
@test integrate((a+a*sin(e+f*x))^-1*(c+d*x)^2, x) == :(-1*im*a^-1*f^-1*(c+d*x)^2+-1*a^-1*f^-1*(c+d*x)^2*cot((1/2)*e+1//4*pi+(1/2)*f*x)+-4*im*a^-1*d^2*f^-3*Polylogarithms.polylog(2,im*exp(im*(e+f*x)))+4*d*a^-1*f^-2*(c+d*x)*log(1+-1*im*exp(im*(e+f*x))))
@test integrate((a+a*sin(e+f*x))^-1*(c+d*x), x) == :(-1*a^-1*f^-1*(c+d*x)*cot((1/2)*e+1//4*pi+(1/2)*f*x)+2*d*a^-1*f^-2*log(sin((1/2)*e+1//4*pi+(1/2)*f*x)))
@test integrate((a+a*sin(e+f*x))^-2*(c+d*x)^3, x) == :(4*a^-2*d^3*f^-4*Polylogarithms.polylog(3,im*exp(im*(e+f*x)))+4*a^-2*d^3*f^-4*log(sin((1/2)*e+1//4*pi+(1/2)*f*x))+-1//3*im*a^-2*f^-1*(c+d*x)^3+-1//3*a^-2*f^-1*(c+d*x)^3*cot((1/2)*e+1//4*pi+(1/2)*f*x)+-2*a^-2*d^2*f^-3*(c+d*x)*cot((1/2)*e+1//4*pi+(1/2)*f*x)+2*d*a^-2*f^-2*(c+d*x)^2*log(1+-1*im*exp(im*(e+f*x)))+-1//2*d*a^-2*f^-2*(c+d*x)^2*csc((1/2)*e+1//4*pi+(1/2)*f*x)^2+-1//6*a^-2*f^-1*(c+d*x)^3*csc((1/2)*e+1//4*pi+(1/2)*f*x)^2*cot((1/2)*e+1//4*pi+(1/2)*f*x)+-4*im*a^-2*d^2*f^-3*(c+d*x)*Polylogarithms.polylog(2,im*exp(im*(e+f*x))))
@test integrate((a+a*sin(e+f*x))^-2*(c+d*x)^2, x) == :(-2//3*a^-2*d^2*f^-3*cot((1/2)*e+1//4*pi+(1/2)*f*x)+-1//3*im*a^-2*f^-1*(c+d*x)^2+-1//3*a^-2*f^-1*(c+d*x)^2*cot((1/2)*e+1//4*pi+(1/2)*f*x)+-4//3*im*a^-2*d^2*f^-3*Polylogarithms.polylog(2,im*exp(im*(e+f*x)))+-1//3*d*a^-2*f^-2*csc((1/2)*e+1//4*pi+(1/2)*f*x)^2*(c+d*x)+-1//6*a^-2*f^-1*(c+d*x)^2*csc((1/2)*e+1//4*pi+(1/2)*f*x)^2*cot((1/2)*e+1//4*pi+(1/2)*f*x)+4//3*d*a^-2*f^-2*(c+d*x)*log(1+-1*im*exp(im*(e+f*x))))
@test integrate((a+a*sin(e+f*x))^-2*(c+d*x), x) == :(-1//3*a^-2*f^-1*(c+d*x)*cot((1/2)*e+1//4*pi+(1/2)*f*x)+-1//6*d*a^-2*f^-2*csc((1/2)*e+1//4*pi+(1/2)*f*x)^2+2//3*d*a^-2*f^-2*log(sin((1/2)*e+1//4*pi+(1/2)*f*x))+-1//6*a^-2*f^-1*csc((1/2)*e+1//4*pi+(1/2)*f*x)^2*(c+d*x)*cot((1/2)*e+1//4*pi+(1/2)*f*x))
@test integrate((a+-1*a*sin(e+f*x))^-1*(c+d*x)^3, x) == :(a^-1*f^-1*(c+d*x)^3*tan((1/2)*e+1//4*pi+(1/2)*f*x)+-1*im*a^-1*f^-1*(c+d*x)^3+12*a^-1*d^3*f^-4*Polylogarithms.polylog(3,-1*im*exp(im*(e+f*x)))+6*d*a^-1*f^-2*(c+d*x)^2*log(1+im*exp(im*(e+f*x)))+-12*im*a^-1*d^2*f^-3*(c+d*x)*Polylogarithms.polylog(2,-1*im*exp(im*(e+f*x))))
@test integrate((a+-1*a*sin(e+f*x))^-1*(c+d*x)^2, x) == :(a^-1*f^-1*(c+d*x)^2*tan((1/2)*e+1//4*pi+(1/2)*f*x)+-1*im*a^-1*f^-1*(c+d*x)^2+-4*im*a^-1*d^2*f^-3*Polylogarithms.polylog(2,-1*im*exp(im*(e+f*x)))+4*d*a^-1*f^-2*(c+d*x)*log(1+im*exp(im*(e+f*x))))
@test integrate((a+-1*a*sin(e+f*x))^-1*(c+d*x), x) == :(a^-1*f^-1*(c+d*x)*tan((1/2)*e+1//4*pi+(1/2)*f*x)+2*d*a^-1*f^-2*log(cos((1/2)*e+1//4*pi+(1/2)*f*x)))
@test integrate(x^3*(a+a*sin(c+d*x))^(1/2), x) == :(-96*d^-4*(a+a*sin(c+d*x))^(1/2)+12*d^-2*x^2*(a+a*sin(c+d*x))^(1/2)+-2*d^-1*x^3*(a+a*sin(c+d*x))^(1/2)*cot((1/2)*c+1//4*pi+(1/2)*d*x)+48*x*d^-3*(a+a*sin(c+d*x))^(1/2)*cot((1/2)*c+1//4*pi+(1/2)*d*x))
@test integrate(x^2*(a+a*sin(c+d*x))^(1/2), x) == :(8*x*d^-2*(a+a*sin(c+d*x))^(1/2)+16*d^-3*(a+a*sin(c+d*x))^(1/2)*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-2*d^-1*x^2*(a+a*sin(c+d*x))^(1/2)*cot((1/2)*c+1//4*pi+(1/2)*d*x))
@test integrate(x*(a+a*sin(c+d*x))^(1/2), x) == :(4*d^-2*(a+a*sin(c+d*x))^(1/2)+-2*x*d^-1*(a+a*sin(c+d*x))^(1/2)*cot((1/2)*c+1//4*pi+(1/2)*d*x))
@test integrate(x^-1*(a+a*sin(c+d*x))^(1/2), x) == :((a+a*sin(c+d*x))^(1/2)*cos((1/2)*c+1//4*pi)*csc((1/2)*c+1//4*pi+(1/2)*d*x)*sinintegral((1/2)*d*x)+(a+a*sin(c+d*x))^(1/2)*cosintegral((1/2)*d*x)*csc((1/2)*c+1//4*pi+(1/2)*d*x)*sin((1/2)*c+1//4*pi))
@test integrate(x^-2*(a+a*sin(c+d*x))^(1/2), x) == :(-1*x^-1*(a+a*sin(c+d*x))^(1/2)+(1/2)*d*(a+a*sin(c+d*x))^(1/2)*cos((1/2)*c+1//4*pi)*cosintegral((1/2)*d*x)*csc((1/2)*c+1//4*pi+(1/2)*d*x)+-1//2*d*(a+a*sin(c+d*x))^(1/2)*csc((1/2)*c+1//4*pi+(1/2)*d*x)*sin((1/2)*c+1//4*pi)*sinintegral((1/2)*d*x))
@test integrate(x^-3*(a+a*sin(c+d*x))^(1/2), x) == :(-1//2*x^-2*(a+a*sin(c+d*x))^(1/2)+-1//4*d*x^-1*(a+a*sin(c+d*x))^(1/2)*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-1//8*d^2*(a+a*sin(c+d*x))^(1/2)*cos((1/2)*c+1//4*pi)*csc((1/2)*c+1//4*pi+(1/2)*d*x)*sinintegral((1/2)*d*x)+-1//8*d^2*(a+a*sin(c+d*x))^(1/2)*cosintegral((1/2)*d*x)*csc((1/2)*c+1//4*pi+(1/2)*d*x)*sin((1/2)*c+1//4*pi))
@test integrate(x^3*(a+a*sin(e+f*x))^3//2, x) == :(-1280//9*a*f^-4*(a+a*sin(e+f*x))^(1/2)+16*a*f^-2*x^2*(a+a*sin(e+f*x))^(1/2)+-64//27*a*f^-4*(a+a*sin(e+f*x))^(1/2)*sin((1/2)*e+1//4*pi+(1/2)*f*x)^2+-8//3*a*f^-1*x^3*(a+a*sin(e+f*x))^(1/2)*cot((1/2)*e+1//4*pi+(1/2)*f*x)+8//3*a*f^-2*x^2*(a+a*sin(e+f*x))^(1/2)*sin((1/2)*e+1//4*pi+(1/2)*f*x)^2+640//9*a*x*f^-3*(a+a*sin(e+f*x))^(1/2)*cot((1/2)*e+1//4*pi+(1/2)*f*x)+-4//3*a*f^-1*x^3*(a+a*sin(e+f*x))^(1/2)*cos((1/2)*e+1//4*pi+(1/2)*f*x)*sin((1/2)*e+1//4*pi+(1/2)*f*x)+32//9*a*x*f^-3*(a+a*sin(e+f*x))^(1/2)*cos((1/2)*e+1//4*pi+(1/2)*f*x)*sin((1/2)*e+1//4*pi+(1/2)*f*x))
@test integrate(x^2*(a+a*sin(e+f*x))^3//2, x) == :(32//3*a*x*f^-2*(a+a*sin(e+f*x))^(1/2)+224//9*a*f^-3*(a+a*sin(e+f*x))^(1/2)*cot((1/2)*e+1//4*pi+(1/2)*f*x)+-32//27*a*f^-3*(a+a*sin(e+f*x))^(1/2)*cos((1/2)*e+1//4*pi+(1/2)*f*x)^2*cot((1/2)*e+1//4*pi+(1/2)*f*x)+-8//3*a*f^-1*x^2*(a+a*sin(e+f*x))^(1/2)*cot((1/2)*e+1//4*pi+(1/2)*f*x)+16//9*a*x*f^-2*(a+a*sin(e+f*x))^(1/2)*sin((1/2)*e+1//4*pi+(1/2)*f*x)^2+-4//3*a*f^-1*x^2*(a+a*sin(e+f*x))^(1/2)*cos((1/2)*e+1//4*pi+(1/2)*f*x)*sin((1/2)*e+1//4*pi+(1/2)*f*x))
@test integrate(x*(a+a*sin(e+f*x))^3//2, x) == :(16//3*a*f^-2*(a+a*sin(e+f*x))^(1/2)+8//9*a*f^-2*(a+a*sin(e+f*x))^(1/2)*sin((1/2)*e+1//4*pi+(1/2)*f*x)^2+-8//3*a*x*f^-1*(a+a*sin(e+f*x))^(1/2)*cot((1/2)*e+1//4*pi+(1/2)*f*x)+-4//3*a*x*f^-1*(a+a*sin(e+f*x))^(1/2)*cos((1/2)*e+1//4*pi+(1/2)*f*x)*sin((1/2)*e+1//4*pi+(1/2)*f*x))
@test integrate(x^-1*(a+a*sin(e+f*x))^3//2, x) == :((1/2)*a*(a+a*sin(e+f*x))^(1/2)*csc((1/2)*e+1//4*pi+(1/2)*f*x)*sin(1//4*pi+3//2*e)*sinintegral(3//2*f*x)+-1//2*a*(a+a*sin(e+f*x))^(1/2)*cos(1//4*pi+3//2*e)*cosintegral(3//2*f*x)*csc((1/2)*e+1//4*pi+(1/2)*f*x)+3//2*a*(a+a*sin(e+f*x))^(1/2)*cos((1/2)*e+1//4*pi)*csc((1/2)*e+1//4*pi+(1/2)*f*x)*sinintegral((1/2)*f*x)+3//2*a*(a+a*sin(e+f*x))^(1/2)*cosintegral((1/2)*f*x)*csc((1/2)*e+1//4*pi+(1/2)*f*x)*sin((1/2)*e+1//4*pi))
@test integrate(x^-2*(a+a*sin(e+f*x))^3//2, x) == :(-2*a*x^-1*(a+a*sin(e+f*x))^(1/2)*sin((1/2)*e+1//4*pi+(1/2)*f*x)^2+-3//4*a*f*(a+a*sin(e+f*x))^(1/2)*csc((1/2)*e+1//4*pi+(1/2)*f*x)*sin((1/2)*e+1//4*pi)*sinintegral((1/2)*f*x)+3//4*a*f*(a+a*sin(e+f*x))^(1/2)*cos((1/2)*e+1//4*pi)*cosintegral((1/2)*f*x)*csc((1/2)*e+1//4*pi+(1/2)*f*x)+3//4*a*f*(a+a*sin(e+f*x))^(1/2)*cos(1//4*pi+3//2*e)*csc((1/2)*e+1//4*pi+(1/2)*f*x)*sinintegral(3//2*f*x)+3//4*a*f*(a+a*sin(e+f*x))^(1/2)*cosintegral(3//2*f*x)*csc((1/2)*e+1//4*pi+(1/2)*f*x)*sin(1//4*pi+3//2*e))
@test integrate(x^-3*(a+a*sin(e+f*x))^3//2, x) == :(-1*a*x^-2*(a+a*sin(e+f*x))^(1/2)*sin((1/2)*e+1//4*pi+(1/2)*f*x)^2+-9//16*a*f^2*(a+a*sin(e+f*x))^(1/2)*csc((1/2)*e+1//4*pi+(1/2)*f*x)*sin(1//4*pi+3//2*e)*sinintegral(3//2*f*x)+-3//2*a*f*x^-1*(a+a*sin(e+f*x))^(1/2)*cos((1/2)*e+1//4*pi+(1/2)*f*x)*sin((1/2)*e+1//4*pi+(1/2)*f*x)+-3//16*a*f^2*(a+a*sin(e+f*x))^(1/2)*cos((1/2)*e+1//4*pi)*csc((1/2)*e+1//4*pi+(1/2)*f*x)*sinintegral((1/2)*f*x)+-3//16*a*f^2*(a+a*sin(e+f*x))^(1/2)*cosintegral((1/2)*f*x)*csc((1/2)*e+1//4*pi+(1/2)*f*x)*sin((1/2)*e+1//4*pi)+9//16*a*f^2*(a+a*sin(e+f*x))^(1/2)*cos(1//4*pi+3//2*e)*cosintegral(3//2*f*x)*csc((1/2)*e+1//4*pi+(1/2)*f*x))
@test integrate(x^3*(a+a*sin(c+d*x))^-1//2, x) == :(-96*im*d^-4*(a+a*sin(c+d*x))^-1//2*Polylogarithms.polylog(4,-1*exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x)+-48*x*d^-3*(a+a*sin(c+d*x))^-1//2*Polylogarithms.polylog(3,-1*exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x)+-4*d^-1*x^3*(a+a*sin(c+d*x))^-1//2*arctanh(exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x)+48*x*d^-3*(a+a*sin(c+d*x))^-1//2*Polylogarithms.polylog(3,exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x)+96*im*d^-4*(a+a*sin(c+d*x))^-1//2*Polylogarithms.polylog(4,exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x)+-12*im*d^-2*x^2*(a+a*sin(c+d*x))^-1//2*Polylogarithms.polylog(2,exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x)+12*im*d^-2*x^2*(a+a*sin(c+d*x))^-1//2*Polylogarithms.polylog(2,-1*exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x))
@test integrate(x^2*(a+a*sin(c+d*x))^-1//2, x) == :(-16*d^-3*(a+a*sin(c+d*x))^-1//2*Polylogarithms.polylog(3,-1*exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x)+16*d^-3*(a+a*sin(c+d*x))^-1//2*Polylogarithms.polylog(3,exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x)+-4*d^-1*x^2*(a+a*sin(c+d*x))^-1//2*arctanh(exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x)+-8*im*x*d^-2*(a+a*sin(c+d*x))^-1//2*Polylogarithms.polylog(2,exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x)+8*im*x*d^-2*(a+a*sin(c+d*x))^-1//2*Polylogarithms.polylog(2,-1*exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x))
@test integrate(x*(a+a*sin(c+d*x))^-1//2, x) == :(-4*im*d^-2*(a+a*sin(c+d*x))^-1//2*Polylogarithms.polylog(2,exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x)+-4*x*d^-1*(a+a*sin(c+d*x))^-1//2*arctanh(exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x)+4*im*d^-2*(a+a*sin(c+d*x))^-1//2*Polylogarithms.polylog(2,-1*exp(1//4*im*(pi+2c+2*d*x)))*sin((1/2)*c+1//4*pi+(1/2)*d*x))
@test integrate(x^3*(a+a*sin(e+f*x))^-3//2, x) == :(-3*a^-1*f^-2*x^2*(a+a*sin(e+f*x))^-1//2+-1//2*a^-1*f^-1*x^3*(a+a*sin(e+f*x))^-1//2*cot((1/2)*e+1//4*pi+(1/2)*f*x)+-1*a^-1*f^-1*x^3*(a+a*sin(e+f*x))^-1//2*arctanh(exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+-24*im*a^-1*f^-4*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(2,exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+-24*im*a^-1*f^-4*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(4,-1*exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+-24*x*a^-1*f^-3*(a+a*sin(e+f*x))^-1//2*arctanh(exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+-12*x*a^-1*f^-3*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(3,-1*exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+12*x*a^-1*f^-3*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(3,exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+24*im*a^-1*f^-4*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(2,-1*exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+24*im*a^-1*f^-4*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(4,exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+-3*im*a^-1*f^-2*x^2*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(2,exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+3*im*a^-1*f^-2*x^2*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(2,-1*exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x))
@test integrate(x^2*(a+a*sin(e+f*x))^-3//2, x) == :(-2*x*a^-1*f^-2*(a+a*sin(e+f*x))^-1//2+-4*a^-1*f^-3*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(3,-1*exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+-4*a^-1*f^-3*(a+a*sin(e+f*x))^-1//2*arctanh(cos((1/2)*e+1//4*pi+(1/2)*f*x))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+4*a^-1*f^-3*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(3,exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+-1//2*a^-1*f^-1*x^2*(a+a*sin(e+f*x))^-1//2*cot((1/2)*e+1//4*pi+(1/2)*f*x)+-1*a^-1*f^-1*x^2*(a+a*sin(e+f*x))^-1//2*arctanh(exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+-2*im*x*a^-1*f^-2*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(2,exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+2*im*x*a^-1*f^-2*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(2,-1*exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x))
@test integrate(x*(a+a*sin(e+f*x))^-3//2, x) == :(-1*a^-1*f^-2*(a+a*sin(e+f*x))^-1//2+-1//2*x*a^-1*f^-1*(a+a*sin(e+f*x))^-1//2*cot((1/2)*e+1//4*pi+(1/2)*f*x)+im*a^-1*f^-2*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(2,-1*exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+-1*im*a^-1*f^-2*(a+a*sin(e+f*x))^-1//2*Polylogarithms.polylog(2,exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x)+-1*x*a^-1*f^-1*(a+a*sin(e+f*x))^-1//2*arctanh(exp(1//4*im*(pi+2*e+2*f*x)))*sin((1/2)*e+1//4*pi+(1/2)*f*x))
@test integrate((a+a*sin(e+f*x))^3*(c+d*x)^m, x) == :(5//2*a^3*d^-1*(1+m)^-1*(c+d*x)^(1+m)+-15//8*a^3*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,im*f*d^-1*(c+d*x))*exp(-1*im*(e+-1*c*f*d^-1))+-15//8*a^3*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-1*im*f*d^-1*(c+d*x))*exp(im*(e+-1*c*f*d^-1))+1//8*3^(-1+-1m)*a^3*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,3*im*f*d^-1*(c+d*x))*exp(-3*im*(e+-1*c*f*d^-1))+1//8*3^(-1+-1m)*a^3*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-3*im*f*d^-1*(c+d*x))*exp(3*im*(e+-1*c*f*d^-1))+-3*im*2^(-3+-1m)*a^3*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,2*im*f*d^-1*(c+d*x))*exp(-2*im*(e+-1*c*f*d^-1))+3*im*2^(-3+-1m)*a^3*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-2*im*f*d^-1*(c+d*x))*exp(2*im*(e+-1*c*f*d^-1)))
@test integrate((a+a*sin(e+f*x))^2*(c+d*x)^m, x) == :(3//2*a^2*d^-1*(1+m)^-1*(c+d*x)^(1+m)+-1*a^2*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,im*f*d^-1*(c+d*x))*exp(-1*im*(e+-1*c*f*d^-1))+-1*a^2*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-1*im*f*d^-1*(c+d*x))*exp(im*(e+-1*c*f*d^-1))+im*2^(-3+-1m)*a^2*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-2*im*f*d^-1*(c+d*x))*exp(2*im*(e+-1*c*f*d^-1))+-1*im*2^(-3+-1m)*a^2*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,2*im*f*d^-1*(c+d*x))*exp(-2*im*(e+-1*c*f*d^-1)))
@test integrate((c+d*x)^m*(a+a*sin(e+f*x)), x) == :(a*d^-1*(1+m)^-1*(c+d*x)^(1+m)+-1//2*a*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,im*f*d^-1*(c+d*x))*exp(-1*im*(e+-1*c*f*d^-1))+-1//2*a*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-1*im*f*d^-1*(c+d*x))*exp(im*(e+-1*c*f*d^-1)))
@test integrate((c+d*x)^3*(a+b*sin(e+f*x)), x) == :(1//4*a*d^-1*(c+d*x)^4+-1*b*f^-1*(c+d*x)^3*cos(e+f*x)+-6*b*d^3*f^-4*sin(e+f*x)+3*b*d*f^-2*(c+d*x)^2*sin(e+f*x)+6*b*d^2*f^-3*(c+d*x)*cos(e+f*x))
@test integrate((c+d*x)^2*(a+b*sin(e+f*x)), x) == :(1//3*a*d^-1*(c+d*x)^3+-1*b*f^-1*(c+d*x)^2*cos(e+f*x)+2*b*d^2*f^-3*cos(e+f*x)+2*b*d*f^-2*(c+d*x)*sin(e+f*x))
@test integrate((a+b*sin(e+f*x))*(c+d*x), x) == :((1/2)*a*d^-1*(c+d*x)^2+b*d*f^-2*sin(e+f*x)+-1*b*f^-1*(c+d*x)*cos(e+f*x))
@test integrate((c+d*x)^-1*(a+b*sin(e+f*x)), x) == :(a*d^-1*log(c+d*x)+b*d^-1*cos(-1*e+c*f*d^-1)*sinintegral(f*x+c*f*d^-1)+-1*b*d^-1*cosintegral(f*x+c*f*d^-1)*sin(-1*e+c*f*d^-1))
@test integrate((c+d*x)^-2*(a+b*sin(e+f*x)), x) == :(-1*a*d^-1*(c+d*x)^-1+-1*b*d^-1*(c+d*x)^-1*sin(e+f*x)+b*f*d^-2*cos(-1*e+c*f*d^-1)*cosintegral(f*x+c*f*d^-1)+b*f*d^-2*sin(-1*e+c*f*d^-1)*sinintegral(f*x+c*f*d^-1))
@test integrate((c+d*x)^-3*(a+b*sin(e+f*x)), x) == :(-1//2*a*d^-1*(c+d*x)^-2+-1//2*b*d^-1*(c+d*x)^-2*sin(e+f*x)+(1/2)*b*d^-3*f^2*cosintegral(f*x+c*f*d^-1)*sin(-1*e+c*f*d^-1)+-1//2*b*f*d^-2*(c+d*x)^-1*cos(e+f*x)+-1//2*b*d^-3*f^2*cos(-1*e+c*f*d^-1)*sinintegral(f*x+c*f*d^-1))
@test integrate((a+b*sin(e+f*x))^2*(c+d*x)^3, x) == :(1//4*a^2*d^-1*(c+d*x)^4+1//8*b^2*d^-1*(c+d*x)^4+-3//8*b^2*d^3*f^-4*sin(e+f*x)^2+-3//8*b^2*d^3*f^-2*x^2+-12*a*b*d^3*f^-4*sin(e+f*x)+-2*a*b*f^-1*(c+d*x)^3*cos(e+f*x)+-3//4*c*x*b^2*d^2*f^-2+-1//2*b^2*f^-1*(c+d*x)^3*cos(e+f*x)*sin(e+f*x)+3//4*d*b^2*f^-2*(c+d*x)^2*sin(e+f*x)^2+6*a*b*d*f^-2*(c+d*x)^2*sin(e+f*x)+12*a*b*d^2*f^-3*(c+d*x)*cos(e+f*x)+3//4*b^2*d^2*f^-3*(c+d*x)*cos(e+f*x)*sin(e+f*x))
@test integrate((a+b*sin(e+f*x))^2*(c+d*x)^2, x) == :(1//3*a^2*d^-1*(c+d*x)^3+1//6*b^2*d^-1*(c+d*x)^3+-1//4*x*b^2*d^2*f^-2+(1/2)*d*b^2*f^-2*sin(e+f*x)^2*(c+d*x)+-2*a*b*f^-1*(c+d*x)^2*cos(e+f*x)+4*a*b*d^2*f^-3*cos(e+f*x)+-1//2*b^2*f^-1*(c+d*x)^2*cos(e+f*x)*sin(e+f*x)+1//4*b^2*d^2*f^-3*cos(e+f*x)*sin(e+f*x)+4*a*b*d*f^-2*(c+d*x)*sin(e+f*x))
@test integrate((a+b*sin(e+f*x))^2*(c+d*x), x) == :((1/2)*c*x*b^2+(1/2)*a^2*d^-1*(c+d*x)^2+1//4*d*b^2*x^2+1//4*d*b^2*f^-2*sin(e+f*x)^2+-2*a*b*f^-1*(c+d*x)*cos(e+f*x)+2*a*b*d*f^-2*sin(e+f*x)+-1//2*b^2*f^-1*(c+d*x)*cos(e+f*x)*sin(e+f*x))
@test integrate((a+b*sin(e+f*x))^2*(c+d*x)^-1, x) == :(a^2*d^-1*log(c+d*x)+(1/2)*b^2*d^-1*log(c+d*x)+-1//2*b^2*d^-1*cos(-2*e+2*c*f*d^-1)*cosintegral(2*f*x+2*c*f*d^-1)+-1//2*b^2*d^-1*sin(-2*e+2*c*f*d^-1)*sinintegral(2*f*x+2*c*f*d^-1)+-2*a*b*d^-1*cosintegral(f*x+c*f*d^-1)*sin(-1*e+c*f*d^-1)+2*a*b*d^-1*cos(-1*e+c*f*d^-1)*sinintegral(f*x+c*f*d^-1))
@test integrate((a+b*sin(e+f*x))^2*(c+d*x)^-2, x) == :(-1*a^2*d^-1*(c+d*x)^-1+-1*b^2*d^-1*(c+d*x)^-1*sin(e+f*x)^2+f*b^2*d^-2*cos(-2*e+2*c*f*d^-1)*sinintegral(2*f*x+2*c*f*d^-1)+-1*f*b^2*d^-2*cosintegral(2*f*x+2*c*f*d^-1)*sin(-2*e+2*c*f*d^-1)+-2*a*b*d^-1*(c+d*x)^-1*sin(e+f*x)+2*a*b*f*d^-2*cos(-1*e+c*f*d^-1)*cosintegral(f*x+c*f*d^-1)+2*a*b*f*d^-2*sin(-1*e+c*f*d^-1)*sinintegral(f*x+c*f*d^-1))
@test integrate((a+b*sin(e+f*x))^2*(c+d*x)^-3, x) == :(-1//2*a^2*d^-1*(c+d*x)^-2+-1//2*b^2*d^-1*(c+d*x)^-2*sin(e+f*x)^2+b^2*d^-3*f^2*cos(-2*e+2*c*f*d^-1)*cosintegral(2*f*x+2*c*f*d^-1)+b^2*d^-3*f^2*sin(-2*e+2*c*f*d^-1)*sinintegral(2*f*x+2*c*f*d^-1)+-1*a*b*d^-1*(c+d*x)^-2*sin(e+f*x)+a*b*d^-3*f^2*cosintegral(f*x+c*f*d^-1)*sin(-1*e+c*f*d^-1)+-1*a*b*f*d^-2*(c+d*x)^-1*cos(e+f*x)+-1*a*b*d^-3*f^2*cos(-1*e+c*f*d^-1)*sinintegral(f*x+c*f*d^-1)+-1*f*b^2*d^-2*(c+d*x)^-1*cos(e+f*x)*sin(e+f*x))
@test integrate((a+b*sin(e+f*x))^-1*(c+d*x)^3, x) == :(-6*d^3*f^-4*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+6*d^3*f^-4*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+im*f^-1*(c+d*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-1*im*f^-1*(c+d*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-3*d*f^-2*(c+d*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+3*d*f^-2*(c+d*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-6*im*d^2*f^-3*(a^2+-1*b^2)^-1//2*(c+d*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+6*im*d^2*f^-3*(a^2+-1*b^2)^-1//2*(c+d*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x))))
@test integrate((a+b*sin(e+f*x))^-1*(c+d*x)^2, x) == :(im*f^-1*(c+d*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-1*im*f^-1*(c+d*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-2*im*d^2*f^-3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-2*d*f^-2*(a^2+-1*b^2)^-1//2*(c+d*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+2*im*d^2*f^-3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+2*d*f^-2*(a^2+-1*b^2)^-1//2*(c+d*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x))))
@test integrate((a+b*sin(e+f*x))^-1*(c+d*x), x) == :(d*f^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-1*d*f^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+im*f^-1*(a^2+-1*b^2)^-1//2*(c+d*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-1*im*f^-1*(a^2+-1*b^2)^-1//2*(c+d*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x))))
@test integrate((a+b*sin(e+f*x))^-2*(c+d*x)^3, x) == :(im*f^-1*(c+d*x)^3*(a^2+-1*b^2)^-1+-6*d^3*f^-4*(a^2+-1*b^2)^-1*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-6*d^3*f^-4*(a^2+-1*b^2)^-1*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-6*a*d^3*f^-4*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-3*d*f^-2*(c+d*x)^2*(a^2+-1*b^2)^-1*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-3*d*f^-2*(c+d*x)^2*(a^2+-1*b^2)^-1*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+6*a*d^3*f^-4*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+im*a*f^-1*(c+d*x)^3*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+b*f^-1*(a+b*sin(e+f*x))^-1*(c+d*x)^3*(a^2+-1*b^2)^-1*cos(e+f*x)+-1*im*a*f^-1*(c+d*x)^3*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-3*a*d*f^-2*(c+d*x)^2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+3*a*d*f^-2*(c+d*x)^2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+6*im*d^2*f^-3*(a^2+-1*b^2)^-1*(c+d*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+6*im*d^2*f^-3*(a^2+-1*b^2)^-1*(c+d*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-6*im*a*d^2*f^-3*(a^2+-1*b^2)^-3//2*(c+d*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+6*im*a*d^2*f^-3*(a^2+-1*b^2)^-3//2*(c+d*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x))))
@test integrate((a+b*sin(e+f*x))^-2*(c+d*x)^2, x) == :(im*f^-1*(c+d*x)^2*(a^2+-1*b^2)^-1+-2*d*f^-2*(a^2+-1*b^2)^-1*(c+d*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-2*d*f^-2*(a^2+-1*b^2)^-1*(c+d*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+2*im*d^2*f^-3*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+2*im*d^2*f^-3*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+im*a*f^-1*(c+d*x)^2*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+b*f^-1*(a+b*sin(e+f*x))^-1*(c+d*x)^2*(a^2+-1*b^2)^-1*cos(e+f*x)+-1*im*a*f^-1*(c+d*x)^2*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-2*im*a*d^2*f^-3*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-2*a*d*f^-2*(a^2+-1*b^2)^-3//2*(c+d*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+2*im*a*d^2*f^-3*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+2*a*d*f^-2*(a^2+-1*b^2)^-3//2*(c+d*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x))))
@test integrate((a+b*sin(e+f*x))^-2*(c+d*x), x) == :(-1*d*f^-2*(a^2+-1*b^2)^-1*log(a+b*sin(e+f*x))+a*d*f^-2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+-1*a*d*f^-2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+im*a*f^-1*(a^2+-1*b^2)^-3//2*(c+d*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x)))+b*f^-1*(a+b*sin(e+f*x))^-1*(a^2+-1*b^2)^-1*(c+d*x)*cos(e+f*x)+-1*im*a*f^-1*(a^2+-1*b^2)^-3//2*(c+d*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(e+f*x))))
@test integrate((a+b*sin(e+f*x))^3*(c+d*x)^m, x) == :(a^3*d^-1*(1+m)^-1*(c+d*x)^(1+m)+3//2*a*b^2*d^-1*(1+m)^-1*(c+d*x)^(1+m)+-3//8*b^3*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,im*f*d^-1*(c+d*x))*exp(-1*im*(e+-1*c*f*d^-1))+-3//8*b^3*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-1*im*f*d^-1*(c+d*x))*exp(im*(e+-1*c*f*d^-1))+-3//2*b*a^2*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,im*f*d^-1*(c+d*x))*exp(-1*im*(e+-1*c*f*d^-1))+-3//2*b*a^2*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-1*im*f*d^-1*(c+d*x))*exp(im*(e+-1*c*f*d^-1))+1//8*3^(-1+-1m)*b^3*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,3*im*f*d^-1*(c+d*x))*exp(-3*im*(e+-1*c*f*d^-1))+1//8*3^(-1+-1m)*b^3*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-3*im*f*d^-1*(c+d*x))*exp(3*im*(e+-1*c*f*d^-1))+-3*im*a*2^(-3+-1m)*b^2*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,2*im*f*d^-1*(c+d*x))*exp(-2*im*(e+-1*c*f*d^-1))+3*im*a*2^(-3+-1m)*b^2*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-2*im*f*d^-1*(c+d*x))*exp(2*im*(e+-1*c*f*d^-1)))
@test integrate((a+b*sin(e+f*x))^2*(c+d*x)^m, x) == :(a^2*d^-1*(1+m)^-1*(c+d*x)^(1+m)+(1/2)*b^2*d^-1*(1+m)^-1*(c+d*x)^(1+m)+-1*a*b*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,im*f*d^-1*(c+d*x))*exp(-1*im*(e+-1*c*f*d^-1))+-1*a*b*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-1*im*f*d^-1*(c+d*x))*exp(im*(e+-1*c*f*d^-1))+im*2^(-3+-1m)*b^2*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-2*im*f*d^-1*(c+d*x))*exp(2*im*(e+-1*c*f*d^-1))+-1*im*2^(-3+-1m)*b^2*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,2*im*f*d^-1*(c+d*x))*exp(-2*im*(e+-1*c*f*d^-1)))
@test integrate((c+d*x)^m*(a+b*sin(e+f*x)), x) == :(a*d^-1*(1+m)^-1*(c+d*x)^(1+m)+-1//2*b*f^-1*(im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,im*f*d^-1*(c+d*x))*exp(-1*im*(e+-1*c*f*d^-1))+-1//2*b*f^-1*(-1*im*f*d^-1*(c+d*x))^(-1m)*(c+d*x)^m*SpecialFunctions.gamma(1+m,-1*im*f*d^-1*(c+d*x))*exp(im*(e+-1*c*f*d^-1)))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^3*sin(c+d*x), x) == :(1//4*a^-1*f^-1*(e+f*x)^4+im*a^-1*d^-1*(e+f*x)^3+a^-1*d^-1*(e+f*x)^3*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-12*a^-1*d^-4*f^3*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+-6*f*a^-1*d^-2*(e+f*x)^2*log(1+-1*im*exp(im*(c+d*x)))+12*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^2*sin(c+d*x), x) == :(1//3*a^-1*f^-1*(e+f*x)^3+im*a^-1*d^-1*(e+f*x)^2+a^-1*d^-1*(e+f*x)^2*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-4*f*a^-1*d^-2*(e+f*x)*log(1+-1*im*exp(im*(c+d*x)))+4*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)*sin(c+d*x), x) == :(e*x*a^-1+(1/2)*f*a^-1*x^2+a^-1*d^-1*(e+f*x)*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-2*f*a^-1*d^-2*log(sin((1/2)*c+1//4*pi+(1/2)*d*x)))
@test integrate((a+a*sin(c+d*x))^-1*sin(c+d*x), x) == :(x*a^-1+d^-1*(a+a*sin(c+d*x))^-1*cos(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^3*sin(c+d*x)^2, x) == :(-1//4*a^-1*f^-1*(e+f*x)^4+-1*im*a^-1*d^-1*(e+f*x)^3+-1*a^-1*d^-1*(e+f*x)^3*cos(c+d*x)+-1*a^-1*d^-1*(e+f*x)^3*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-6*a^-1*d^-4*f^3*sin(c+d*x)+12*a^-1*d^-4*f^3*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+3*f*a^-1*d^-2*(e+f*x)^2*sin(c+d*x)+6*f*a^-1*d^-2*(e+f*x)^2*log(1+-1*im*exp(im*(c+d*x)))+6*a^-1*d^-3*f^2*(e+f*x)*cos(c+d*x)+-12*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^2*sin(c+d*x)^2, x) == :(-1//3*a^-1*f^-1*(e+f*x)^3+-1*im*a^-1*d^-1*(e+f*x)^2+-1*a^-1*d^-1*(e+f*x)^2*cos(c+d*x)+-1*a^-1*d^-1*(e+f*x)^2*cot((1/2)*c+1//4*pi+(1/2)*d*x)+2*a^-1*d^-3*f^2*cos(c+d*x)+-4*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+2*f*a^-1*d^-2*(e+f*x)*sin(c+d*x)+4*f*a^-1*d^-2*(e+f*x)*log(1+-1*im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*sin(c+d*x)^2*(e+f*x), x) == :(-1*e*x*a^-1+-1//2*f*a^-1*x^2+f*a^-1*d^-2*sin(c+d*x)+-1*a^-1*d^-1*(e+f*x)*cos(c+d*x)+-1*a^-1*d^-1*(e+f*x)*cot((1/2)*c+1//4*pi+(1/2)*d*x)+2*f*a^-1*d^-2*log(sin((1/2)*c+1//4*pi+(1/2)*d*x)))
@test integrate((a+a*sin(c+d*x))^-1*sin(c+d*x)^2, x) == :(-1*x*a^-1+-1*a^-1*d^-1*cos(c+d*x)+-1*a^-1*d^-1*(1+sin(c+d*x))^-1*cos(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^3*sin(c+d*x)^3, x) == :(3//8*a^-1*f^-1*(e+f*x)^4+im*a^-1*d^-1*(e+f*x)^3+a^-1*d^-1*(e+f*x)^3*cos(c+d*x)+a^-1*d^-1*(e+f*x)^3*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-12*a^-1*d^-4*f^3*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+6*a^-1*d^-4*f^3*sin(c+d*x)+-3//8*a^-1*d^-4*f^3*sin(c+d*x)^2+-3//8*a^-1*d^-2*f^3*x^2+-6*f*a^-1*d^-2*(e+f*x)^2*log(1+-1*im*exp(im*(c+d*x)))+-6*a^-1*d^-3*f^2*(e+f*x)*cos(c+d*x)+-3*f*a^-1*d^-2*(e+f*x)^2*sin(c+d*x)+-3//4*e*x*a^-1*d^-2*f^2+-1//2*a^-1*d^-1*(e+f*x)^3*cos(c+d*x)*sin(c+d*x)+3//4*f*a^-1*d^-2*(e+f*x)^2*sin(c+d*x)^2+12*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+3//4*a^-1*d^-3*f^2*(e+f*x)*cos(c+d*x)*sin(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^2*sin(c+d*x)^3, x) == :((1/2)*a^-1*f^-1*(e+f*x)^3+im*a^-1*d^-1*(e+f*x)^2+a^-1*d^-1*(e+f*x)^2*cos(c+d*x)+a^-1*d^-1*(e+f*x)^2*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-2*a^-1*d^-3*f^2*cos(c+d*x)+-1//4*x*a^-1*d^-2*f^2+(1/2)*f*a^-1*d^-2*sin(c+d*x)^2*(e+f*x)+-4*f*a^-1*d^-2*(e+f*x)*log(1+-1*im*exp(im*(c+d*x)))+-2*f*a^-1*d^-2*(e+f*x)*sin(c+d*x)+4*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+-1//2*a^-1*d^-1*(e+f*x)^2*cos(c+d*x)*sin(c+d*x)+1//4*a^-1*d^-3*f^2*cos(c+d*x)*sin(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*sin(c+d*x)^3*(e+f*x), x) == :(3//2*e*x*a^-1+3//4*f*a^-1*x^2+a^-1*d^-1*(e+f*x)*cos(c+d*x)+a^-1*d^-1*(e+f*x)*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-1*f*a^-1*d^-2*sin(c+d*x)+-2*f*a^-1*d^-2*log(sin((1/2)*c+1//4*pi+(1/2)*d*x))+1//4*f*a^-1*d^-2*sin(c+d*x)^2+-1//2*a^-1*d^-1*(e+f*x)*cos(c+d*x)*sin(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*sin(c+d*x)^3, x) == :(3//2*x*a^-1+2*a^-1*d^-1*cos(c+d*x)+d^-1*(a+a*sin(c+d*x))^-1*sin(c+d*x)^2*cos(c+d*x)+-3//2*a^-1*d^-1*cos(c+d*x)*sin(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^3*csc(c+d*x), x) == :(im*a^-1*d^-1*(e+f*x)^3+a^-1*d^-1*(e+f*x)^3*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-12*a^-1*d^-4*f^3*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+-2*a^-1*d^-1*(e+f*x)^3*arctanh(exp(im*(c+d*x)))+-6*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,-1*exp(im*(c+d*x)))+-6*f*a^-1*d^-2*(e+f*x)^2*log(1+-1*im*exp(im*(c+d*x)))+-6*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+6*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,exp(im*(c+d*x)))+6*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,exp(im*(c+d*x)))+-3*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+3*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+12*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^2*csc(c+d*x), x) == :(im*a^-1*d^-1*(e+f*x)^2+a^-1*d^-1*(e+f*x)^2*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-2*a^-1*d^-1*(e+f*x)^2*arctanh(exp(im*(c+d*x)))+-2*a^-1*d^-3*f^2*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+2*a^-1*d^-3*f^2*Polylogarithms.polylog(3,exp(im*(c+d*x)))+-4*f*a^-1*d^-2*(e+f*x)*log(1+-1*im*exp(im*(c+d*x)))+4*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+-2*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,exp(im*(c+d*x)))+2*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,-1*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)*csc(c+d*x), x) == :(a^-1*d^-1*(e+f*x)*cot((1/2)*c+1//4*pi+(1/2)*d*x)+a^-1*d^-1*(-2*e+-2*f*x)*arctanh(exp(im*(c+d*x)))+-2*f*a^-1*d^-2*log(sin((1/2)*c+1//4*pi+(1/2)*d*x))+im*f*a^-1*d^-2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-1*im*f*a^-1*d^-2*Polylogarithms.polylog(2,exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*csc(c+d*x), x) == :(d^-1*(a+a*sin(c+d*x))^-1*cos(c+d*x)+-1*a^-1*d^-1*arctanh(cos(c+d*x)))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^3*csc(c+d*x)^2, x) == :(-1*a^-1*d^-1*(e+f*x)^3*cot(c+d*x)+-1*a^-1*d^-1*(e+f*x)^3*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-2*im*a^-1*d^-1*(e+f*x)^3+2*a^-1*d^-1*(e+f*x)^3*arctanh(exp(im*(c+d*x)))+12*a^-1*d^-4*f^3*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+3//2*a^-1*d^-4*f^3*Polylogarithms.polylog(3,exp(2*im*(c+d*x)))+-6*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,exp(im*(c+d*x)))+-6*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,exp(im*(c+d*x)))+3*f*a^-1*d^-2*(e+f*x)^2*log(1+-1*exp(2*im*(c+d*x)))+6*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,-1*exp(im*(c+d*x)))+6*f*a^-1*d^-2*(e+f*x)^2*log(1+-1*im*exp(im*(c+d*x)))+6*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+-12*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+-3*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-3*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+3*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^2*csc(c+d*x)^2, x) == :(-1*a^-1*d^-1*(e+f*x)^2*cot(c+d*x)+-1*a^-1*d^-1*(e+f*x)^2*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-2*im*a^-1*d^-1*(e+f*x)^2+-2*a^-1*d^-3*f^2*Polylogarithms.polylog(3,exp(im*(c+d*x)))+2*a^-1*d^-1*(e+f*x)^2*arctanh(exp(im*(c+d*x)))+2*a^-1*d^-3*f^2*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+-1*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+-4*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+2*f*a^-1*d^-2*(e+f*x)*log(1+-1*exp(2*im*(c+d*x)))+4*f*a^-1*d^-2*(e+f*x)*log(1+-1*im*exp(im*(c+d*x)))+-2*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+2*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*csc(c+d*x)^2*(e+f*x), x) == :(f*a^-1*d^-2*log(sin(c+d*x))+a^-1*d^-1*(2*e+2*f*x)*arctanh(exp(im*(c+d*x)))+-1*a^-1*d^-1*(e+f*x)*cot(c+d*x)+-1*a^-1*d^-1*(e+f*x)*cot((1/2)*c+1//4*pi+(1/2)*d*x)+2*f*a^-1*d^-2*log(sin((1/2)*c+1//4*pi+(1/2)*d*x))+im*f*a^-1*d^-2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+-1*im*f*a^-1*d^-2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*csc(c+d*x)^2, x) == :(a^-1*d^-1*arctanh(cos(c+d*x))+d^-1*(a+a*sin(c+d*x))^-1*cot(c+d*x)+-2*a^-1*d^-1*cot(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^3*csc(c+d*x)^3, x) == :(a^-1*d^-1*(e+f*x)^3*cot(c+d*x)+a^-1*d^-1*(e+f*x)^3*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-12*a^-1*d^-4*f^3*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+-3*a^-1*d^-1*(e+f*x)^3*arctanh(exp(im*(c+d*x)))+2*im*a^-1*d^-1*(e+f*x)^3+-3//2*a^-1*d^-4*f^3*Polylogarithms.polylog(3,exp(2*im*(c+d*x)))+-9*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,-1*exp(im*(c+d*x)))+-9*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+-6*f*a^-1*d^-2*(e+f*x)^2*log(1+-1*im*exp(im*(c+d*x)))+-6*a^-1*d^-3*f^2*(e+f*x)*arctanh(exp(im*(c+d*x)))+-3*im*a^-1*d^-4*f^3*Polylogarithms.polylog(2,exp(im*(c+d*x)))+-3*f*a^-1*d^-2*(e+f*x)^2*log(1+-1*exp(2*im*(c+d*x)))+3*im*a^-1*d^-4*f^3*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+9*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,exp(im*(c+d*x)))+9*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,exp(im*(c+d*x)))+-3//2*f*a^-1*d^-2*(e+f*x)^2*csc(c+d*x)+-1//2*a^-1*d^-1*(e+f*x)^3*cot(c+d*x)*csc(c+d*x)+3*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+12*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+-9//2*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+9//2*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^2*csc(c+d*x)^3, x) == :(a^-1*d^-1*(e+f*x)^2*cot(c+d*x)+a^-1*d^-1*(e+f*x)^2*cot((1/2)*c+1//4*pi+(1/2)*d*x)+-1*a^-1*d^-3*f^2*arctanh(cos(c+d*x))+-3*a^-1*d^-1*(e+f*x)^2*arctanh(exp(im*(c+d*x)))+-3*a^-1*d^-3*f^2*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+2*im*a^-1*d^-1*(e+f*x)^2+3*a^-1*d^-3*f^2*Polylogarithms.polylog(3,exp(im*(c+d*x)))+im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+-1*f*a^-1*d^-2*(e+f*x)*csc(c+d*x)+-4*f*a^-1*d^-2*(e+f*x)*log(1+-1*im*exp(im*(c+d*x)))+-2*f*a^-1*d^-2*(e+f*x)*log(1+-1*exp(2*im*(c+d*x)))+4*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+-1//2*a^-1*d^-1*(e+f*x)^2*cot(c+d*x)*csc(c+d*x)+-3*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,exp(im*(c+d*x)))+3*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,-1*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*csc(c+d*x)^3*(e+f*x), x) == :(a^-1*d^-1*(e+f*x)*cot(c+d*x)+a^-1*d^-1*(e+f*x)*cot((1/2)*c+1//4*pi+(1/2)*d*x)+a^-1*d^-1*(-3*e+-3*f*x)*arctanh(exp(im*(c+d*x)))+-1*f*a^-1*d^-2*log(sin(c+d*x))+-2*f*a^-1*d^-2*log(sin((1/2)*c+1//4*pi+(1/2)*d*x))+-1//2*f*a^-1*d^-2*csc(c+d*x)+-3//2*im*f*a^-1*d^-2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+-1//2*a^-1*d^-1*(e+f*x)*cot(c+d*x)*csc(c+d*x)+3//2*im*f*a^-1*d^-2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*csc(c+d*x)^3, x) == :(2*a^-1*d^-1*cot(c+d*x)+-3//2*a^-1*d^-1*arctanh(cos(c+d*x))+d^-1*(a+a*sin(c+d*x))^-1*cot(c+d*x)*csc(c+d*x)+-3//2*a^-1*d^-1*cot(c+d*x)*csc(c+d*x))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*sin(c+d*x), x) == :(1//4*b^-1*f^-1*(e+f*x)^4+-6*a*b^-1*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*a*b^-1*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*a*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*a*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*sin(c+d*x), x) == :(1//3*b^-1*f^-1*(e+f*x)^3+im*a*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*a*f*b^-1*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*a*f*b^-1*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)*sin(c+d*x), x) == :(e*x*b^-1+(1/2)*f*b^-1*x^2+a*f*b^-1*d^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*a*f*b^-1*d^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a*b^-1*d^-1*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a*b^-1*d^-1*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*sin(c+d*x), x) == :(x*b^-1+-2*a*b^-1*d^-1*(a^2+-1*b^2)^-1//2*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*sin(c+d*x)^2, x) == :(-1*b^-1*d^-1*(e+f*x)^3*cos(c+d*x)+-6*b^-1*d^-4*f^3*sin(c+d*x)+-1//4*a*b^-2*f^-1*(e+f*x)^4+3*f*b^-1*d^-2*(e+f*x)^2*sin(c+d*x)+6*b^-1*d^-3*f^2*(e+f*x)*cos(c+d*x)+-6*a^2*b^-2*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*a^2*b^-2*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a^2*b^-2*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^2*b^-2*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*f*a^2*b^-2*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*f*a^2*b^-2*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a^2*b^-2*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*a^2*b^-2*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*sin(c+d*x)^2, x) == :(-1*b^-1*d^-1*(e+f*x)^2*cos(c+d*x)+2*b^-1*d^-3*f^2*cos(c+d*x)+-1//3*a*b^-2*f^-1*(e+f*x)^3+2*f*b^-1*d^-2*(e+f*x)*sin(c+d*x)+im*a^2*b^-2*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^2*b^-2*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*a^2*b^-2*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*f*a^2*b^-2*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*a^2*b^-2*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*f*a^2*b^-2*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*sin(c+d*x)^2*(e+f*x), x) == :(f*b^-1*d^-2*sin(c+d*x)+-1*a*e*x*b^-2+-1*b^-1*d^-1*(e+f*x)*cos(c+d*x)+-1//2*a*f*b^-2*x^2+f*a^2*b^-2*d^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*f*a^2*b^-2*d^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a^2*b^-2*d^-1*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^2*b^-2*d^-1*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*sin(c+d*x)^2, x) == :(-1*a*x*b^-2+-1*b^-1*d^-1*cos(c+d*x)+2*a^2*b^-2*d^-1*(a^2+-1*b^2)^-1//2*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*sin(c+d*x)^3, x) == :(1//8*b^-1*f^-1*(e+f*x)^4+-3//8*b^-1*d^-4*f^3*sin(c+d*x)^2+-3//8*b^-1*d^-2*f^3*x^2+1//4*a^2*b^-3*f^-1*(e+f*x)^4+a*b^-2*d^-1*(e+f*x)^3*cos(c+d*x)+6*a*b^-2*d^-4*f^3*sin(c+d*x)+-3//4*e*x*b^-1*d^-2*f^2+-1//2*b^-1*d^-1*(e+f*x)^3*cos(c+d*x)*sin(c+d*x)+3//4*f*b^-1*d^-2*(e+f*x)^2*sin(c+d*x)^2+-6*a*b^-2*d^-3*f^2*(e+f*x)*cos(c+d*x)+-6*a^3*b^-3*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*a*f*b^-2*d^-2*(e+f*x)^2*sin(c+d*x)+6*a^3*b^-3*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3//4*b^-1*d^-3*f^2*(e+f*x)*cos(c+d*x)*sin(c+d*x)+im*a^3*b^-3*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^3*b^-3*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*f*a^3*b^-3*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*f*a^3*b^-3*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a^3*b^-3*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*a^3*b^-3*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*sin(c+d*x)^3, x) == :(1//6*b^-1*f^-1*(e+f*x)^3+-1//4*x*b^-1*d^-2*f^2+1//3*a^2*b^-3*f^-1*(e+f*x)^3+a*b^-2*d^-1*(e+f*x)^2*cos(c+d*x)+(1/2)*f*b^-1*d^-2*sin(c+d*x)^2*(e+f*x)+-2*a*b^-2*d^-3*f^2*cos(c+d*x)+-1//2*b^-1*d^-1*(e+f*x)^2*cos(c+d*x)*sin(c+d*x)+1//4*b^-1*d^-3*f^2*cos(c+d*x)*sin(c+d*x)+-2*a*f*b^-2*d^-2*(e+f*x)*sin(c+d*x)+im*a^3*b^-3*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^3*b^-3*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*a^3*b^-3*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*f*a^3*b^-3*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*a^3*b^-3*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*f*a^3*b^-3*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*sin(c+d*x)^3*(e+f*x), x) == :((1/2)*e*x*b^-1+1//4*f*b^-1*x^2+e*x*a^2*b^-3+(1/2)*f*a^2*b^-3*x^2+1//4*f*b^-1*d^-2*sin(c+d*x)^2+a*b^-2*d^-1*(e+f*x)*cos(c+d*x)+-1*a*f*b^-2*d^-2*sin(c+d*x)+-1//2*b^-1*d^-1*(e+f*x)*cos(c+d*x)*sin(c+d*x)+f*a^3*b^-3*d^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*f*a^3*b^-3*d^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a^3*b^-3*d^-1*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^3*b^-3*d^-1*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*sin(c+d*x)^3, x) == :((1/2)*x*b^-3*(b^2+2*a^2)+a*b^-2*d^-1*cos(c+d*x)+-1//2*b^-1*d^-1*cos(c+d*x)*sin(c+d*x)+-2*a^3*b^-3*d^-1*(a^2+-1*b^2)^-1//2*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*csc(c+d*x), x) == :(-2*a^-1*d^-1*(e+f*x)^3*arctanh(exp(im*(c+d*x)))+-6*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,-1*exp(im*(c+d*x)))+-6*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+6*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,exp(im*(c+d*x)))+6*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,exp(im*(c+d*x)))+-6*b*a^-1*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+3*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+6*b*a^-1*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*b*a^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*b*a^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*b*f*a^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*b*f*a^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*b*a^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*b*a^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*csc(c+d*x), x) == :(-2*a^-1*d^-1*(e+f*x)^2*arctanh(exp(im*(c+d*x)))+-2*a^-1*d^-3*f^2*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+2*a^-1*d^-3*f^2*Polylogarithms.polylog(3,exp(im*(c+d*x)))+-2*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,exp(im*(c+d*x)))+2*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+im*b*a^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*b*a^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*b*a^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*b*f*a^-1*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*b*a^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*b*f*a^-1*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)*csc(c+d*x), x) == :(a^-1*d^-1*(-2*e+-2*f*x)*arctanh(exp(im*(c+d*x)))+im*f*a^-1*d^-2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-1*im*f*a^-1*d^-2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+b*f*a^-1*d^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*b*f*a^-1*d^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*b*a^-1*d^-1*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*b*a^-1*d^-1*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*csc(c+d*x), x) == :(-1*a^-1*d^-1*arctanh(cos(c+d*x))+-2*b*a^-1*d^-1*(a^2+-1*b^2)^-1//2*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*csc(c+d*x)^2, x) == :(-1*im*a^-1*d^-1*(e+f*x)^3+-1*a^-1*d^-1*(e+f*x)^3*cot(c+d*x)+3//2*a^-1*d^-4*f^3*Polylogarithms.polylog(3,exp(2*im*(c+d*x)))+2*b*a^-2*d^-1*(e+f*x)^3*arctanh(exp(im*(c+d*x)))+3*f*a^-1*d^-2*(e+f*x)^2*log(1+-1*exp(2*im*(c+d*x)))+-6*im*b*a^-2*d^-4*f^3*Polylogarithms.polylog(4,exp(im*(c+d*x)))+-6*b*a^-2*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,exp(im*(c+d*x)))+-6*a^-2*b^2*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+6*im*b*a^-2*d^-4*f^3*Polylogarithms.polylog(4,-1*exp(im*(c+d*x)))+6*b*a^-2*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+6*a^-2*b^2*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a^-2*b^2*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^-2*b^2*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*b*f*a^-2*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-3*f*a^-2*b^2*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*im*b*f*a^-2*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+3*f*a^-2*b^2*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a^-2*b^2*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*a^-2*b^2*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*csc(c+d*x)^2, x) == :(-1*im*a^-1*d^-1*(e+f*x)^2+-1*a^-1*d^-1*(e+f*x)^2*cot(c+d*x)+-1*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+-2*b*a^-2*d^-3*f^2*Polylogarithms.polylog(3,exp(im*(c+d*x)))+2*b*a^-2*d^-1*(e+f*x)^2*arctanh(exp(im*(c+d*x)))+2*b*a^-2*d^-3*f^2*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+2*f*a^-1*d^-2*(e+f*x)*log(1+-1*exp(2*im*(c+d*x)))+im*a^-2*b^2*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^-2*b^2*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*b*f*a^-2*d^-2*(e+f*x)*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-2*im*a^-2*b^2*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*f*a^-2*b^2*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*b*f*a^-2*d^-2*(e+f*x)*Polylogarithms.polylog(2,exp(im*(c+d*x)))+2*im*a^-2*b^2*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*f*a^-2*b^2*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*csc(c+d*x)^2*(e+f*x), x) == :(f*a^-1*d^-2*log(sin(c+d*x))+-1*a^-1*d^-1*(e+f*x)*cot(c+d*x)+2*b*a^-2*d^-1*(e+f*x)*arctanh(exp(im*(c+d*x)))+im*b*f*a^-2*d^-2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+f*a^-2*b^2*d^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*b*f*a^-2*d^-2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-1*f*a^-2*b^2*d^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a^-2*b^2*d^-1*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^-2*b^2*d^-1*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*csc(c+d*x)^2, x) == :(-1*a^-1*d^-1*cot(c+d*x)+b*a^-2*d^-1*arctanh(cos(c+d*x))+2*a^-2*b^2*d^-1*(a^2+-1*b^2)^-1//2*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x))))
@test integrate((a+b*sin(c+d*x))^-2*(e+f*x)*sin(c+d*x), x) == :(f*b^-1*d^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*f*b^-1*d^-2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*b^-1*d^-1*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+a*f*b^-1*d^-2*(a^2+-1*b^2)^-1*log(a+b*sin(c+d*x))+f*a^2*b^-1*d^-2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*b^-1*d^-1*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*a*d^-1*(a+b*sin(c+d*x))^-1*(a^2+-1*b^2)^-1*(e+f*x)*cos(c+d*x)+-1*f*a^2*b^-1*d^-2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a^2*b^-1*d^-1*(a^2+-1*b^2)^-3//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^2*b^-1*d^-1*(a^2+-1*b^2)^-3//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-2*(e+f*x)^2*sin(c+d*x), x) == :(im*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1+-1*im*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*a*d^-1*(a+b*sin(c+d*x))^-1*(e+f*x)^2*(a^2+-1*b^2)^-1*cos(c+d*x)+-2*im*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*f*b^-1*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*f*b^-1*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a^2*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^2*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*a^2*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*f*a^2*b^-1*d^-2*(a^2+-1*b^2)^-3//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*a^2*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*a*f*b^-1*d^-2*(a^2+-1*b^2)^-1*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*a*f*b^-1*d^-2*(a^2+-1*b^2)^-1*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*f*a^2*b^-1*d^-2*(a^2+-1*b^2)^-3//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-2*(e+f*x)^3*sin(c+d*x), x) == :(-6*b^-1*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*b^-1*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1+-1*im*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*a*d^-1*(a+b*sin(c+d*x))^-1*(e+f*x)^3*(a^2+-1*b^2)^-1*cos(c+d*x)+-6*a^2*b^-1*d^-4*f^3*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*a*b^-1*d^-4*f^3*(a^2+-1*b^2)^-1*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*a*b^-1*d^-4*f^3*(a^2+-1*b^2)^-1*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*a^2*b^-1*d^-4*f^3*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a^2*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^2*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*f*a^2*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*a*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*a*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*f*a^2*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a^2*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*a^2*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-3*(e+f*x)*sin(c+d*x), x) == :(d^-1*(a+b*sin(c+d*x))^-1*(a^2+-1*b^2)^-1*(e+f*x)*cos(c+d*x)+-1*f*b^-1*d^-2*(a^2+-1*b^2)^-1*log(a+b*sin(c+d*x))+-1*a*d^-1*(a+b*sin(c+d*x))^-2*(-2*b^2+2*a^2)^-1*(e+f*x)*cos(c+d*x)+-3//2*a*f*b^-1*d^-2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3//2*f*a^3*b^-1*d^-2*(a^2+-1*b^2)^-5//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3//2*a^2*d^-1*(a+b*sin(c+d*x))^-1*(a^2+-1*b^2)^-2*(e+f*x)*cos(c+d*x)+-1//2*a*f*b^-1*d^-2*(a+b*sin(c+d*x))^-1*(a^2+-1*b^2)^-1+3//2*a*f*b^-1*d^-2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3//2*f*a^2*b^-1*d^-2*(a^2+-1*b^2)^-2*log(a+b*sin(c+d*x))+3//2*f*a^3*b^-1*d^-2*(a^2+-1*b^2)^-5//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3//2*im*a*b^-1*d^-1*(a^2+-1*b^2)^-3//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3//2*im*a^3*b^-1*d^-1*(a^2+-1*b^2)^-5//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3//2*im*a*b^-1*d^-1*(a^2+-1*b^2)^-3//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3//2*im*a^3*b^-1*d^-1*(a^2+-1*b^2)^-5//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-3*(e+f*x)^2*sin(c+d*x), x) == :(im*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1+d^-1*(a+b*sin(c+d*x))^-1*(e+f*x)^2*(a^2+-1*b^2)^-1*cos(c+d*x)+-1*a*d^-1*(a+b*sin(c+d*x))^-2*(e+f*x)^2*(-2*b^2+2*a^2)^-1*cos(c+d*x)+-2*f*b^-1*d^-2*(a^2+-1*b^2)^-1*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*f*b^-1*d^-2*(a^2+-1*b^2)^-1*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x)))+-3//2*im*a^2*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-2+-3//2*a^2*d^-1*(a+b*sin(c+d*x))^-1*(e+f*x)^2*(a^2+-1*b^2)^-2*cos(c+d*x)+-1*a*f*b^-1*d^-2*(a+b*sin(c+d*x))^-1*(a^2+-1*b^2)^-1*(e+f*x)+-3*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*a^2*b^-1*d^-3*f^2*(a^2+-1*b^2)^-2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*a^2*b^-1*d^-3*f^2*(a^2+-1*b^2)^-2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*a^3*b^-1*d^-3*f^2*(a^2+-1*b^2)^-5//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*a*f*b^-1*d^-2*(a^2+-1*b^2)^-3//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*f*a^3*b^-1*d^-2*(a^2+-1*b^2)^-5//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*im*a^3*b^-1*d^-3*f^2*(a^2+-1*b^2)^-5//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*a*f*b^-1*d^-2*(a^2+-1*b^2)^-3//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*f*a^2*b^-1*d^-2*(a^2+-1*b^2)^-2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*f*a^2*b^-1*d^-2*(a^2+-1*b^2)^-2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*f*a^3*b^-1*d^-2*(a^2+-1*b^2)^-5//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3//2*im*a*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3//2*im*a^3*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-5//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3//2*im*a*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3//2*im*a^3*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-5//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-3*(e+f*x)^3*sin(c+d*x), x) == :(im*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1+d^-1*(a+b*sin(c+d*x))^-1*(e+f*x)^3*(a^2+-1*b^2)^-1*cos(c+d*x)+-6*b^-1*d^-4*f^3*(a^2+-1*b^2)^-1*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*b^-1*d^-4*f^3*(a^2+-1*b^2)^-1*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*a*d^-1*(a+b*sin(c+d*x))^-2*(e+f*x)^3*(-2*b^2+2*a^2)^-1*cos(c+d*x)+-9*a*b^-1*d^-4*f^3*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-9*a^3*b^-1*d^-4*f^3*(a^2+-1*b^2)^-5//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*a*b^-1*d^-4*f^3*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*a*b^-1*d^-4*f^3*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+9*a*b^-1*d^-4*f^3*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+9*a^2*b^-1*d^-4*f^3*(a^2+-1*b^2)^-2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+9*a^2*b^-1*d^-4*f^3*(a^2+-1*b^2)^-2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+9*a^3*b^-1*d^-4*f^3*(a^2+-1*b^2)^-5//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3//2*im*a^2*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-2+-3//2*a^2*d^-1*(a+b*sin(c+d*x))^-1*(e+f*x)^3*(a^2+-1*b^2)^-2*cos(c+d*x)+6*im*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-9//2*a*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-9//2*f*a^3*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-5//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3//2*im*a*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3//2*im*a^3*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-5//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3//2*a*f*b^-1*d^-2*(a+b*sin(c+d*x))^-1*(e+f*x)^2*(a^2+-1*b^2)^-1+3//2*im*a*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3//2*im*a^3*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-5//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+9//2*a*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+9//2*f*a^2*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+9//2*f*a^2*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+9//2*f*a^3*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-5//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-9*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-9*im*a^2*b^-1*d^-3*f^2*(a^2+-1*b^2)^-2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-9*im*a^2*b^-1*d^-3*f^2*(a^2+-1*b^2)^-2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-9*im*a^3*b^-1*d^-3*f^2*(a^2+-1*b^2)^-5//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+9*im*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+9*im*a^3*b^-1*d^-3*f^2*(a^2+-1*b^2)^-5//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^3*cos(c+d*x), x) == :(2*a^-1*d^-1*(e+f*x)^3*log(1+-1*im*exp(im*(c+d*x)))+-1//4*im*a^-1*f^-1*(e+f*x)^4+12*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,im*exp(im*(c+d*x)))+12*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+-6*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^2*cos(c+d*x), x) == :(2*a^-1*d^-1*(e+f*x)^2*log(1+-1*im*exp(im*(c+d*x)))+4*a^-1*d^-3*f^2*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+-1//3*im*a^-1*f^-1*(e+f*x)^3+-4*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)*cos(c+d*x), x) == :(a^-1*d^-1*(2*e+2*f*x)*log(1+-1*im*exp(im*(c+d*x)))+-1//2*im*a^-1*f^-1*(e+f*x)^2+-2*im*f*a^-1*d^-2*Polylogarithms.polylog(2,im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*cos(c+d*x), x) == :(a^-1*d^-1*log(1+sin(c+d*x)))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^3*cos(c+d*x)^2, x) == :(1//4*a^-1*f^-1*(e+f*x)^4+a^-1*d^-1*(e+f*x)^3*cos(c+d*x)+6*a^-1*d^-4*f^3*sin(c+d*x)+-6*a^-1*d^-3*f^2*(e+f*x)*cos(c+d*x)+-3*f*a^-1*d^-2*(e+f*x)^2*sin(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^2*cos(c+d*x)^2, x) == :(1//3*a^-1*f^-1*(e+f*x)^3+a^-1*d^-1*(e+f*x)^2*cos(c+d*x)+-2*a^-1*d^-3*f^2*cos(c+d*x)+-2*f*a^-1*d^-2*(e+f*x)*sin(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*cos(c+d*x)^2*(e+f*x), x) == :(e*x*a^-1+(1/2)*f*a^-1*x^2+a^-1*d^-1*(e+f*x)*cos(c+d*x)+-1*f*a^-1*d^-2*sin(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*cos(c+d*x)^2, x) == :(x*a^-1+a^-1*d^-1*cos(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^-1*cos(c+d*x)^2, x) == :(a^-1*f^-1*log(e+f*x)+-1*a^-1*f^-1*cos(c+-1*d*e*f^-1)*sinintegral(d*x+d*e*f^-1)+-1*a^-1*f^-1*cosintegral(d*x+d*e*f^-1)*sin(c+-1*d*e*f^-1))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^-2*cos(c+d*x)^2, x) == :(-1*a^-1*f^-1*(e+f*x)^-1+a^-1*f^-1*(e+f*x)^-1*sin(c+d*x)+d*a^-1*f^-2*sin(c+-1*d*e*f^-1)*sinintegral(d*x+d*e*f^-1)+-1*d*a^-1*f^-2*cos(c+-1*d*e*f^-1)*cosintegral(d*x+d*e*f^-1))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^3*cos(c+d*x)^3, x) == :(1//4*a^-1*d^-1*(e+f*x)^3+a^-1*d^-1*(e+f*x)^3*sin(c+d*x)+-6*a^-1*d^-4*f^3*cos(c+d*x)+-3//8*x*a^-1*d^-3*f^3+-1//2*a^-1*d^-1*(e+f*x)^3*sin(c+d*x)^2+-6*a^-1*d^-3*f^2*(e+f*x)*sin(c+d*x)+3*f*a^-1*d^-2*(e+f*x)^2*cos(c+d*x)+3//4*a^-1*d^-3*f^2*sin(c+d*x)^2*(e+f*x)+3//8*a^-1*d^-4*f^3*cos(c+d*x)*sin(c+d*x)+-3//4*f*a^-1*d^-2*(e+f*x)^2*cos(c+d*x)*sin(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^2*cos(c+d*x)^3, x) == :(a^-1*d^-1*(e+f*x)^2*sin(c+d*x)+-2*a^-1*d^-3*f^2*sin(c+d*x)+-1//2*a^-1*d^-1*(e+f*x)^2*sin(c+d*x)^2+1//4*a^-1*d^-1*f^2*x^2+1//4*a^-1*d^-3*f^2*sin(c+d*x)^2+(1/2)*e*f*x*a^-1*d^-1+2*f*a^-1*d^-2*(e+f*x)*cos(c+d*x)+-1//2*f*a^-1*d^-2*(e+f*x)*cos(c+d*x)*sin(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*cos(c+d*x)^3*(e+f*x), x) == :(f*a^-1*d^-2*cos(c+d*x)+a^-1*d^-1*(e+f*x)*sin(c+d*x)+-1//2*a^-1*d^-1*sin(c+d*x)^2*(e+f*x)+1//4*f*x*a^-1*d^-1+-1//4*f*a^-1*d^-2*cos(c+d*x)*sin(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*cos(c+d*x)^3, x) == :(a^-1*d^-1*sin(c+d*x)+-1//2*a^-1*d^-1*sin(c+d*x)^2)
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^-1*cos(c+d*x)^3, x) == :(a^-1*f^-1*cos(c+-1*d*e*f^-1)*cosintegral(d*x+d*e*f^-1)+-1*a^-1*f^-1*sin(c+-1*d*e*f^-1)*sinintegral(d*x+d*e*f^-1)+-1//2*a^-1*f^-1*cos(2c+-2*d*e*f^-1)*sinintegral(2*d*x+2*d*e*f^-1)+-1//2*a^-1*f^-1*cosintegral(2*d*x+2*d*e*f^-1)*sin(2c+-2*d*e*f^-1))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^-2*cos(c+d*x)^3, x) == :((1/2)*a^-1*f^-1*(e+f*x)^-1*sin(2c+2*d*x)+-1*a^-1*f^-1*(e+f*x)^-1*cos(c+d*x)+d*a^-1*f^-2*sin(2c+-2*d*e*f^-1)*sinintegral(2*d*x+2*d*e*f^-1)+-1*d*a^-1*f^-2*cos(c+-1*d*e*f^-1)*sinintegral(d*x+d*e*f^-1)+-1*d*a^-1*f^-2*cos(2c+-2*d*e*f^-1)*cosintegral(2*d*x+2*d*e*f^-1)+-1*d*a^-1*f^-2*cosintegral(d*x+d*e*f^-1)*sin(c+-1*d*e*f^-1))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^3*sec(c+d*x), x) == :(-1//2*a^-1*d^-1*(e+f*x)^3*sec(c+d*x)^2+(1/2)*a^-1*d^-1*(e+f*x)^3*sec(c+d*x)*tan(c+d*x)+-1*im*a^-1*d^-1*(e+f*x)^3*arctan(exp(im*(c+d*x)))+-3*im*a^-1*d^-4*f^3*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+-3*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,-1*im*exp(im*(c+d*x)))+-3*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,-1*im*exp(im*(c+d*x)))+3*im*a^-1*d^-4*f^3*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x)))+3*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,im*exp(im*(c+d*x)))+3*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+3*a^-1*d^-3*f^2*(e+f*x)*log(1+exp(2*im*(c+d*x)))+-3//2*im*f*a^-1*d^-2*(e+f*x)^2+-3//2*im*a^-1*d^-4*f^3*Polylogarithms.polylog(2,-1*exp(2*im*(c+d*x)))+-3//2*f*a^-1*d^-2*(e+f*x)^2*sec(c+d*x)+3//2*f*a^-1*d^-2*(e+f*x)^2*tan(c+d*x)+-6*im*a^-1*d^-3*f^2*(e+f*x)*arctan(exp(im*(c+d*x)))+-3//2*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+3//2*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^2*sec(c+d*x), x) == :(a^-1*d^-3*f^2*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+a^-1*d^-3*f^2*arctanh(sin(c+d*x))+a^-1*d^-3*f^2*log(cos(c+d*x))+-1*a^-1*d^-3*f^2*Polylogarithms.polylog(3,-1*im*exp(im*(c+d*x)))+-1//2*a^-1*d^-1*(e+f*x)^2*sec(c+d*x)^2+f*a^-1*d^-2*(e+f*x)*tan(c+d*x)+(1/2)*a^-1*d^-1*(e+f*x)^2*sec(c+d*x)*tan(c+d*x)+-1*im*a^-1*d^-1*(e+f*x)^2*arctan(exp(im*(c+d*x)))+-1*f*a^-1*d^-2*(e+f*x)*sec(c+d*x)+im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x)))+-1*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)*sec(c+d*x), x) == :((1/2)*f*a^-1*d^-2*tan(c+d*x)+-1//2*f*a^-1*d^-2*sec(c+d*x)+-1//2*a^-1*d^-1*sec(c+d*x)^2*(e+f*x)+(1/2)*im*f*a^-1*d^-2*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x)))+(1/2)*a^-1*d^-1*(e+f*x)*sec(c+d*x)*tan(c+d*x)+-1*im*a^-1*d^-1*(e+f*x)*arctan(exp(im*(c+d*x)))+-1//2*im*f*a^-1*d^-2*Polylogarithms.polylog(2,im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*sec(c+d*x), x) == :(-1//2*d^-1*(a+a*sin(c+d*x))^-1+(1/2)*a^-1*d^-1*arctanh(sin(c+d*x)))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^3*sec(c+d*x)^2, x) == :(a^-1*d^-4*f^3*Polylogarithms.polylog(3,-1*exp(2*im*(c+d*x)))+a^-1*d^-4*f^3*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+a^-1*d^-4*f^3*arctanh(sin(c+d*x))+a^-1*d^-4*f^3*log(cos(c+d*x))+-1*a^-1*d^-4*f^3*Polylogarithms.polylog(3,-1*im*exp(im*(c+d*x)))+-2//3*im*a^-1*d^-1*(e+f*x)^3+-1//3*a^-1*d^-1*(e+f*x)^3*sec(c+d*x)^3+2//3*a^-1*d^-1*(e+f*x)^3*tan(c+d*x)+a^-1*d^-3*f^2*(e+f*x)*tan(c+d*x)+-1*a^-1*d^-3*f^2*(e+f*x)*sec(c+d*x)+2*f*a^-1*d^-2*(e+f*x)^2*log(1+exp(2*im*(c+d*x)))+-1//2*f*a^-1*d^-2*(e+f*x)^2*sec(c+d*x)^2+1//3*a^-1*d^-1*(e+f*x)^3*sec(c+d*x)^2*tan(c+d*x)+im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x)))+(1/2)*f*a^-1*d^-2*(e+f*x)^2*sec(c+d*x)*tan(c+d*x)+-1*im*f*a^-1*d^-2*(e+f*x)^2*arctan(exp(im*(c+d*x)))+-1*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+-2*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,-1*exp(2*im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^2*sec(c+d*x)^2, x) == :(-2//3*im*a^-1*d^-1*(e+f*x)^2+-1//3*a^-1*d^-1*(e+f*x)^2*sec(c+d*x)^3+-1//3*a^-1*d^-3*f^2*sec(c+d*x)+1//3*a^-1*d^-3*f^2*tan(c+d*x)+2//3*a^-1*d^-1*(e+f*x)^2*tan(c+d*x)+-2//3*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,-1*exp(2*im*(c+d*x)))+-1//3*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+-1//3*f*a^-1*d^-2*sec(c+d*x)^2*(e+f*x)+1//3*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x)))+1//3*a^-1*d^-1*(e+f*x)^2*sec(c+d*x)^2*tan(c+d*x)+4//3*f*a^-1*d^-2*(e+f*x)*log(1+exp(2*im*(c+d*x)))+-2//3*im*f*a^-1*d^-2*(e+f*x)*arctan(exp(im*(c+d*x)))+1//3*f*a^-1*d^-2*(e+f*x)*sec(c+d*x)*tan(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*sec(c+d*x)^2*(e+f*x), x) == :(-1//3*a^-1*d^-1*sec(c+d*x)^3*(e+f*x)+-1//6*f*a^-1*d^-2*sec(c+d*x)^2+1//3*a^-1*d^-1*(2*e+2*f*x)*tan(c+d*x)+1//6*f*a^-1*d^-2*arctanh(sin(c+d*x))+2//3*f*a^-1*d^-2*log(cos(c+d*x))+1//3*a^-1*d^-1*sec(c+d*x)^2*(e+f*x)*tan(c+d*x)+1//6*f*a^-1*d^-2*sec(c+d*x)*tan(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*sec(c+d*x)^2, x) == :(-1//3*d^-1*(a+a*sin(c+d*x))^-1*sec(c+d*x)+2//3*a^-1*d^-1*tan(c+d*x))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^3*sec(c+d*x)^3, x) == :(-1//4*a^-1*d^-1*(e+f*x)^3*sec(c+d*x)^4+-1//4*a^-1*d^-4*f^3*sec(c+d*x)+1//4*a^-1*d^-4*f^3*tan(c+d*x)+a^-1*d^-3*f^2*(e+f*x)*log(1+exp(2*im*(c+d*x)))+(1/2)*f*a^-1*d^-2*(e+f*x)^2*tan(c+d*x)+-9//4*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,-1*im*exp(im*(c+d*x)))+-9//4*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,-1*im*exp(im*(c+d*x)))+-9//8*f*a^-1*d^-2*(e+f*x)^2*sec(c+d*x)+-5//2*im*a^-1*d^-4*f^3*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+-3//4*im*a^-1*d^-1*(e+f*x)^3*arctan(exp(im*(c+d*x)))+-1//2*im*f*a^-1*d^-2*(e+f*x)^2+-1//2*im*a^-1*d^-4*f^3*Polylogarithms.polylog(2,-1*exp(2*im*(c+d*x)))+-1//4*f*a^-1*d^-2*(e+f*x)^2*sec(c+d*x)^3+-1//4*a^-1*d^-3*f^2*sec(c+d*x)^2*(e+f*x)+1//4*a^-1*d^-1*(e+f*x)^3*sec(c+d*x)^3*tan(c+d*x)+3//8*a^-1*d^-1*(e+f*x)^3*sec(c+d*x)*tan(c+d*x)+5//2*im*a^-1*d^-4*f^3*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x)))+9//4*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,im*exp(im*(c+d*x)))+9//4*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+-5*im*a^-1*d^-3*f^2*(e+f*x)*arctan(exp(im*(c+d*x)))+-9//8*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+1//4*f*a^-1*d^-2*(e+f*x)^2*sec(c+d*x)^2*tan(c+d*x)+1//4*a^-1*d^-3*f^2*(e+f*x)*sec(c+d*x)*tan(c+d*x)+9//8*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^2*sec(c+d*x)^3, x) == :(-3//4*a^-1*d^-3*f^2*Polylogarithms.polylog(3,-1*im*exp(im*(c+d*x)))+-1//4*a^-1*d^-1*(e+f*x)^2*sec(c+d*x)^4+-1//12*a^-1*d^-3*f^2*sec(c+d*x)^2+1//3*a^-1*d^-3*f^2*log(cos(c+d*x))+3//4*a^-1*d^-3*f^2*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+5//6*a^-1*d^-3*f^2*arctanh(sin(c+d*x))+-3//4*im*a^-1*d^-1*(e+f*x)^2*arctan(exp(im*(c+d*x)))+-3//4*f*a^-1*d^-2*(e+f*x)*sec(c+d*x)+-1//6*f*a^-1*d^-2*sec(c+d*x)^3*(e+f*x)+1//3*f*a^-1*d^-2*(e+f*x)*tan(c+d*x)+1//4*a^-1*d^-1*(e+f*x)^2*sec(c+d*x)^3*tan(c+d*x)+1//12*a^-1*d^-3*f^2*sec(c+d*x)*tan(c+d*x)+3//8*a^-1*d^-1*(e+f*x)^2*sec(c+d*x)*tan(c+d*x)+-3//4*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+1//6*f*a^-1*d^-2*sec(c+d*x)^2*(e+f*x)*tan(c+d*x)+3//4*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*sec(c+d*x)^3*(e+f*x), x) == :(-3//8*f*a^-1*d^-2*sec(c+d*x)+-1//4*a^-1*d^-1*sec(c+d*x)^4*(e+f*x)+-1//12*f*a^-1*d^-2*sec(c+d*x)^3+1//4*f*a^-1*d^-2*tan(c+d*x)+1//12*f*a^-1*d^-2*tan(c+d*x)^3+-3//4*im*a^-1*d^-1*(e+f*x)*arctan(exp(im*(c+d*x)))+-3//8*im*f*a^-1*d^-2*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+1//4*a^-1*d^-1*sec(c+d*x)^3*(e+f*x)*tan(c+d*x)+1//8*a^-1*d^-1*(3*e+3*f*x)*sec(c+d*x)*tan(c+d*x)+3//8*im*f*a^-1*d^-2*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x))))
@test integrate((a+a*sin(c+d*x))^-1*sec(c+d*x)^3, x) == :(-1//4*d^-1*(a+a*sin(c+d*x))^-1+1//8*d^-1*(a+-1*a*sin(c+d*x))^-1+-1//8*a*d^-1*(a+a*sin(c+d*x))^-2+3//8*a^-1*d^-1*arctanh(sin(c+d*x)))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^m*cos(c+d*x)^4, x) == :((1/2)*a^-1*f^-1*(1+m)^-1*(e+f*x)^(1+m)+1//8*a^-1*d^-1*(im*d*f^-1*(e+f*x))^(-1m)*(e+f*x)^m*SpecialFunctions.gamma(1+m,im*d*f^-1*(e+f*x))*exp(-1*im*(c+-1*d*e*f^-1))+1//8*a^-1*d^-1*(-1*im*d*f^-1*(e+f*x))^(-1m)*(e+f*x)^m*SpecialFunctions.gamma(1+m,-1*im*d*f^-1*(e+f*x))*exp(im*(c+-1*d*e*f^-1))+1//8*3^(-1+-1m)*a^-1*d^-1*(im*d*f^-1*(e+f*x))^(-1m)*(e+f*x)^m*SpecialFunctions.gamma(1+m,3*im*d*f^-1*(e+f*x))*exp(-3*im*(c+-1*d*e*f^-1))+1//8*3^(-1+-1m)*a^-1*d^-1*(-1*im*d*f^-1*(e+f*x))^(-1m)*(e+f*x)^m*SpecialFunctions.gamma(1+m,-3*im*d*f^-1*(e+f*x))*exp(3*im*(c+-1*d*e*f^-1))+im*2^(-3+-1m)*a^-1*d^-1*(im*d*f^-1*(e+f*x))^(-1m)*(e+f*x)^m*SpecialFunctions.gamma(1+m,2*im*d*f^-1*(e+f*x))*exp(-2*im*(c+-1*d*e*f^-1))+-1*im*2^(-3+-1m)*a^-1*d^-1*(-1*im*d*f^-1*(e+f*x))^(-1m)*(e+f*x)^m*SpecialFunctions.gamma(1+m,-2*im*d*f^-1*(e+f*x))*exp(2*im*(c+-1*d*e*f^-1)))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^m*cos(c+d*x)^3, x) == :(2^(-3+-1m)*a^-1*d^-1*(im*d*f^-1*(e+f*x))^(-1m)*(e+f*x)^m*SpecialFunctions.gamma(1+m,2*im*d*f^-1*(e+f*x))*exp(-2*im*(c+-1*d*e*f^-1))+2^(-3+-1m)*a^-1*d^-1*(-1*im*d*f^-1*(e+f*x))^(-1m)*(e+f*x)^m*SpecialFunctions.gamma(1+m,-2*im*d*f^-1*(e+f*x))*exp(2*im*(c+-1*d*e*f^-1))+(1/2)*im*a^-1*d^-1*(im*d*f^-1*(e+f*x))^(-1m)*(e+f*x)^m*SpecialFunctions.gamma(1+m,im*d*f^-1*(e+f*x))*exp(-1*im*(c+-1*d*e*f^-1))+-1//2*im*a^-1*d^-1*(-1*im*d*f^-1*(e+f*x))^(-1m)*(e+f*x)^m*SpecialFunctions.gamma(1+m,-1*im*d*f^-1*(e+f*x))*exp(im*(c+-1*d*e*f^-1)))
@test integrate((a+a*sin(c+d*x))^-1*(e+f*x)^m*cos(c+d*x)^2, x) == :(a^-1*f^-1*(1+m)^-1*(e+f*x)^(1+m)+(1/2)*a^-1*d^-1*(im*d*f^-1*(e+f*x))^(-1m)*(e+f*x)^m*SpecialFunctions.gamma(1+m,im*d*f^-1*(e+f*x))*exp(-1*im*(c+-1*d*e*f^-1))+(1/2)*a^-1*d^-1*(-1*im*d*f^-1*(e+f*x))^(-1m)*(e+f*x)^m*SpecialFunctions.gamma(1+m,-1*im*d*f^-1*(e+f*x))*exp(im*(c+-1*d*e*f^-1)))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*cos(c+d*x), x) == :(b^-1*d^-1*(e+f*x)^3*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+b^-1*d^-1*(e+f*x)^3*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1//4*im*b^-1*f^-1*(e+f*x)^4+6*im*b^-1*d^-4*f^3*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*b^-1*d^-4*f^3*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*b^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*b^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*f*b^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*f*b^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*cos(c+d*x), x) == :(b^-1*d^-1*(e+f*x)^2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+b^-1*d^-1*(e+f*x)^2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*b^-1*d^-3*f^2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*b^-1*d^-3*f^2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1//3*im*b^-1*f^-1*(e+f*x)^3+-2*im*f*b^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*f*b^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)*cos(c+d*x), x) == :(b^-1*d^-1*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+b^-1*d^-1*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1//2*im*b^-1*f^-1*(e+f*x)^2+-1*im*f*b^-1*d^-2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*f*b^-1*d^-2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x), x) == :(b^-1*d^-1*log(a+b*sin(c+d*x)))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*cos(c+d*x)^2, x) == :(b^-1*d^-1*(e+f*x)^3*cos(c+d*x)+6*b^-1*d^-4*f^3*sin(c+d*x)+1//4*a*b^-2*f^-1*(e+f*x)^4+-6*b^-1*d^-3*f^2*(e+f*x)*cos(c+d*x)+-6*b^-2*d^-4*f^3*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*f*b^-1*d^-2*(e+f*x)^2*sin(c+d*x)+6*b^-2*d^-4*f^3*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*b^-2*d^-1*(e+f*x)^3*(a^2+-1*b^2)^(1/2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*b^-2*d^-1*(e+f*x)^3*(a^2+-1*b^2)^(1/2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*f*b^-2*d^-2*(e+f*x)^2*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*f*b^-2*d^-2*(e+f*x)^2*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*b^-2*d^-3*f^2*(a^2+-1*b^2)^(1/2)*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*b^-2*d^-3*f^2*(a^2+-1*b^2)^(1/2)*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*cos(c+d*x)^2, x) == :(b^-1*d^-1*(e+f*x)^2*cos(c+d*x)+-2*b^-1*d^-3*f^2*cos(c+d*x)+1//3*a*b^-2*f^-1*(e+f*x)^3+-2*f*b^-1*d^-2*(e+f*x)*sin(c+d*x)+im*b^-2*d^-1*(e+f*x)^2*(a^2+-1*b^2)^(1/2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*b^-2*d^-1*(e+f*x)^2*(a^2+-1*b^2)^(1/2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*b^-2*d^-3*f^2*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*f*b^-2*d^-2*(a^2+-1*b^2)^(1/2)*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*b^-2*d^-3*f^2*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*f*b^-2*d^-2*(a^2+-1*b^2)^(1/2)*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)^2*(e+f*x), x) == :(a*e*x*b^-2+b^-1*d^-1*(e+f*x)*cos(c+d*x)+(1/2)*a*f*b^-2*x^2+-1*f*b^-1*d^-2*sin(c+d*x)+f*b^-2*d^-2*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*f*b^-2*d^-2*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*b^-2*d^-1*(a^2+-1*b^2)^(1/2)*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*b^-2*d^-1*(a^2+-1*b^2)^(1/2)*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)^2, x) == :(a*x*b^-2+b^-1*d^-1*cos(c+d*x)+-2*b^-2*d^-1*(a^2+-1*b^2)^(1/2)*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*cos(c+d*x)^3, x) == :(1//4*b^-1*d^-1*(e+f*x)^3+-3//8*x*b^-1*d^-3*f^3+-1//2*b^-1*d^-1*(e+f*x)^3*sin(c+d*x)^2+a*b^-2*d^-1*(e+f*x)^3*sin(c+d*x)+-1*b^-3*d^-1*(e+f*x)^3*(a^2+-1*b^2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*b^-3*d^-1*(e+f*x)^3*(a^2+-1*b^2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*a*b^-2*d^-4*f^3*cos(c+d*x)+1//4*im*b^-3*f^-1*(e+f*x)^4*(a^2+-1*b^2)+3//4*b^-1*d^-3*f^2*sin(c+d*x)^2*(e+f*x)+3//8*b^-1*d^-4*f^3*cos(c+d*x)*sin(c+d*x)+-1*b^-3*d^-3*f^2*(e+f*x)*(-6*b^2+6*a^2)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*b^-3*d^-3*f^2*(e+f*x)*(-6*b^2+6*a^2)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*b^-3*d^-4*f^3*(a^2+-1*b^2)*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*b^-3*d^-4*f^3*(a^2+-1*b^2)*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*a*b^-2*d^-3*f^2*(e+f*x)*sin(c+d*x)+3*a*f*b^-2*d^-2*(e+f*x)^2*cos(c+d*x)+-3//4*f*b^-1*d^-2*(e+f*x)^2*cos(c+d*x)*sin(c+d*x)+3*im*f*b^-3*d^-2*(e+f*x)^2*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*im*f*b^-3*d^-2*(e+f*x)^2*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*cos(c+d*x)^3, x) == :(-1//2*b^-1*d^-1*(e+f*x)^2*sin(c+d*x)^2+1//4*b^-1*d^-1*f^2*x^2+1//4*b^-1*d^-3*f^2*sin(c+d*x)^2+a*b^-2*d^-1*(e+f*x)^2*sin(c+d*x)+(1/2)*e*f*x*b^-1*d^-1+-1*b^-3*d^-1*(e+f*x)^2*(a^2+-1*b^2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*b^-3*d^-1*(e+f*x)^2*(a^2+-1*b^2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*b^-3*d^-3*f^2*(-2*b^2+2*a^2)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*b^-3*d^-3*f^2*(-2*b^2+2*a^2)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*a*b^-2*d^-3*f^2*sin(c+d*x)+1//3*im*b^-3*f^-1*(e+f*x)^3*(a^2+-1*b^2)+2*a*f*b^-2*d^-2*(e+f*x)*cos(c+d*x)+-1//2*f*b^-1*d^-2*(e+f*x)*cos(c+d*x)*sin(c+d*x)+2*im*f*b^-3*d^-2*(e+f*x)*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*f*b^-3*d^-2*(e+f*x)*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)^3*(e+f*x), x) == :(-1//2*b^-1*d^-1*sin(c+d*x)^2*(e+f*x)+1//4*f*x*b^-1*d^-1+a*f*b^-2*d^-2*cos(c+d*x)+a*b^-2*d^-1*(e+f*x)*sin(c+d*x)+(1/2)*im*b^-3*f^-1*(e+f*x)^2*(a^2+-1*b^2)+-1*b^-3*d^-1*(e+f*x)*(a^2+-1*b^2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*b^-3*d^-1*(e+f*x)*(a^2+-1*b^2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1//4*f*b^-1*d^-2*cos(c+d*x)*sin(c+d*x)+im*f*b^-3*d^-2*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*f*b^-3*d^-2*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)^3, x) == :(-1//2*b^-1*d^-1*sin(c+d*x)^2+a*b^-2*d^-1*sin(c+d*x)+-1*b^-3*d^-1*(a^2+-1*b^2)*log(a+b*sin(c+d*x)))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*sec(c+d*x), x) == :(b*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1*log(1+exp(2*im*(c+d*x)))+-1*b*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*b*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a*d^-4*f^3*(a^2+-1*b^2)^-1*Polylogarithms.polylog(4,-1*im*exp(im*(c+d*x)))+-6*im*b*d^-4*f^3*(a^2+-1*b^2)^-1*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*b*d^-4*f^3*(a^2+-1*b^2)^-1*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*a*d^-3*f^2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(3,-1*im*exp(im*(c+d*x)))+-6*b*d^-3*f^2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*b*d^-3*f^2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*a*d^-1*(e+f*x)^3*(a^2+-1*b^2)^-1*arctan(exp(im*(c+d*x)))+3*im*b*d^-4*f^3*(-4*b^2+4*a^2)^-1*Polylogarithms.polylog(4,-1*exp(2*im*(c+d*x)))+3*b*d^-3*f^2*(-2*b^2+2*a^2)^-1*(e+f*x)*Polylogarithms.polylog(3,-1*exp(2*im*(c+d*x)))+6*im*a*d^-4*f^3*(a^2+-1*b^2)^-1*Polylogarithms.polylog(4,im*exp(im*(c+d*x)))+6*a*d^-3*f^2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+-3*im*a*f*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+-3*im*b*f*d^-2*(e+f*x)^2*(-2*b^2+2*a^2)^-1*Polylogarithms.polylog(2,-1*exp(2*im*(c+d*x)))+3*im*a*f*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x)))+3*im*b*f*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*im*b*f*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*sec(c+d*x), x) == :(b*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1*log(1+exp(2*im*(c+d*x)))+b*d^-3*f^2*(-2*b^2+2*a^2)^-1*Polylogarithms.polylog(3,-1*exp(2*im*(c+d*x)))+-1*b*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*b*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*a*d^-3*f^2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(3,-1*im*exp(im*(c+d*x)))+-2*b*d^-3*f^2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*b*d^-3*f^2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*a*d^-3*f^2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(3,im*exp(im*(c+d*x)))+-2*im*a*d^-1*(e+f*x)^2*(a^2+-1*b^2)^-1*arctan(exp(im*(c+d*x)))+-1*im*b*f*d^-2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(2,-1*exp(2*im*(c+d*x)))+-2*im*a*f*d^-2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+2*im*a*f*d^-2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x)))+2*im*b*f*d^-2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*b*f*d^-2*(a^2+-1*b^2)^-1*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)*sec(c+d*x), x) == :(b*d^-1*(a^2+-1*b^2)^-1*(e+f*x)*log(1+exp(2*im*(c+d*x)))+-1*b*d^-1*(a^2+-1*b^2)^-1*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*b*d^-1*(a^2+-1*b^2)^-1*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a*f*d^-2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,-1*im*exp(im*(c+d*x)))+im*b*f*d^-2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*b*f*d^-2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a*f*d^-2*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*exp(im*(c+d*x)))+-1*im*b*f*d^-2*(-2*b^2+2*a^2)^-1*Polylogarithms.polylog(2,-1*exp(2*im*(c+d*x)))+-2*im*a*d^-1*(a^2+-1*b^2)^-1*(e+f*x)*arctan(exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*sec(c+d*x), x) == :(d^-1*(-2b+2a)^-1*log(1+sin(c+d*x))+-1*d^-1*(2a+2b)^-1*log(1+-1*sin(c+d*x))+-1*b*d^-1*(a^2+-1*b^2)^-1*log(a+b*sin(c+d*x)))
@test integrate((a+b*sin(c+d*x))^-1*sec(c+d*x)^2, x) == :(-1*d^-1*(a^2+-1*b^2)^-1*(b+-1*a*sin(c+d*x))*sec(c+d*x)+-2*b^2*d^-1*(a^2+-1*b^2)^-3//2*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x))))
@test integrate((a+b*sin(c+d*x))^-2*(e+f*x)*cos(c+d*x), x) == :(-1*b^-1*d^-1*(a+b*sin(c+d*x))^-1*(e+f*x)+2*f*b^-1*d^-2*(a^2+-1*b^2)^-1//2*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x))))
@test integrate((a+b*sin(c+d*x))^-2*(e+f*x)^2*cos(c+d*x), x) == :(-1*b^-1*d^-1*(a+b*sin(c+d*x))^-1*(e+f*x)^2+-2*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*f*b^-1*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*f*b^-1*d^-2*(a^2+-1*b^2)^-1//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-2*(e+f*x)^3*cos(c+d*x), x) == :(-1*b^-1*d^-1*(a+b*sin(c+d*x))^-1*(e+f*x)^3+-6*im*b^-1*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*b^-1*d^-4*f^3*(a^2+-1*b^2)^-1//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*im*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-3*(e+f*x)*cos(c+d*x), x) == :(-1//2*b^-1*d^-1*(a+b*sin(c+d*x))^-2*(e+f*x)+f*d^-2*(a+b*sin(c+d*x))^-1*(-2*b^2+2*a^2)^-1*cos(c+d*x)+a*f*b^-1*d^-2*(a^2+-1*b^2)^-3//2*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x))))
@test integrate((a+b*sin(c+d*x))^-3*(e+f*x)^2*cos(c+d*x), x) == :(-1//2*b^-1*d^-1*(a+b*sin(c+d*x))^-2*(e+f*x)^2+-1*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1*log(a+b*sin(c+d*x))+a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+f*d^-2*(a+b*sin(c+d*x))^-1*(a^2+-1*b^2)^-1*(e+f*x)*cos(c+d*x)+-1*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a*f*b^-1*d^-2*(a^2+-1*b^2)^-3//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a*f*b^-1*d^-2*(a^2+-1*b^2)^-3//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-3*(e+f*x)^3*cos(c+d*x), x) == :(-1//2*b^-1*d^-1*(a+b*sin(c+d*x))^-2*(e+f*x)^3+-3*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*b^-1*d^-3*f^2*(a^2+-1*b^2)^-1*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*im*b^-1*d^-4*f^3*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*im*b^-1*d^-4*f^3*(a^2+-1*b^2)^-1*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*f*d^-2*(a+b*sin(c+d*x))^-1*(e+f*x)^2*(-2*b^2+2*a^2)^-1*cos(c+d*x)+3//2*im*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-1+-3*im*a*b^-1*d^-4*f^3*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*im*a*b^-1*d^-4*f^3*(a^2+-1*b^2)^-3//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*a*b^-1*d^-3*f^2*(a^2+-1*b^2)^-3//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3//2*im*a*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3//2*im*a*f*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^-3//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*cos(c+d*x)*cot(c+d*x), x) == :(-1//4*b^-1*f^-1*(e+f*x)^4+-2*a^-1*d^-1*(e+f*x)^3*arctanh(exp(im*(c+d*x)))+-6*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,-1*exp(im*(c+d*x)))+-6*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+6*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,exp(im*(c+d*x)))+6*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,exp(im*(c+d*x)))+-6*a^-1*b^-1*d^-4*f^3*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+3*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+6*a^-1*b^-1*d^-4*f^3*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a^-1*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^(1/2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^-1*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)^(1/2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*f*a^-1*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*f*a^-1*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a^-1*b^-1*d^-3*f^2*(a^2+-1*b^2)^(1/2)*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*a^-1*b^-1*d^-3*f^2*(a^2+-1*b^2)^(1/2)*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*cos(c+d*x)*cot(c+d*x), x) == :(-1//3*b^-1*f^-1*(e+f*x)^3+-2*a^-1*d^-1*(e+f*x)^2*arctanh(exp(im*(c+d*x)))+-2*a^-1*d^-3*f^2*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+2*a^-1*d^-3*f^2*Polylogarithms.polylog(3,exp(im*(c+d*x)))+-2*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,exp(im*(c+d*x)))+2*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+im*a^-1*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^(1/2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^-1*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)^(1/2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*a^-1*b^-1*d^-3*f^2*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*f*a^-1*b^-1*d^-2*(a^2+-1*b^2)^(1/2)*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*a^-1*b^-1*d^-3*f^2*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*f*a^-1*b^-1*d^-2*(a^2+-1*b^2)^(1/2)*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)*cos(c+d*x)*cot(c+d*x), x) == :(-1*e*x*b^-1+-1//2*f*b^-1*x^2+-1*a^-1*d^-1*(2*e+2*f*x)*arctanh(exp(im*(c+d*x)))+im*f*a^-1*d^-2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-1*im*f*a^-1*d^-2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+f*a^-1*b^-1*d^-2*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*f*a^-1*b^-1*d^-2*(a^2+-1*b^2)^(1/2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a^-1*b^-1*d^-1*(a^2+-1*b^2)^(1/2)*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^-1*b^-1*d^-1*(a^2+-1*b^2)^(1/2)*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)*cot(c+d*x), x) == :(-1*x*b^-1+-1*a^-1*d^-1*arctanh(cos(c+d*x))+2*a^-1*b^-1*d^-1*(a^2+-1*b^2)^(1/2)*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*cos(c+d*x)^2*cot(c+d*x), x) == :(a^-1*d^-1*(e+f*x)^3*log(1+-1*exp(2*im*(c+d*x)))+-1*b^-1*d^-1*(e+f*x)^3*sin(c+d*x)+6*b^-1*d^-4*f^3*cos(c+d*x)+-1//4*im*a^-1*f^-1*(e+f*x)^4+-3*f*b^-1*d^-2*(e+f*x)^2*cos(c+d*x)+6*b^-1*d^-3*f^2*(e+f*x)*sin(c+d*x)+3//2*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,exp(2*im*(c+d*x)))+3//4*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,exp(2*im*(c+d*x)))+a^-1*b^-2*d^-1*(e+f*x)^3*(a^2+-1*b^2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+a^-1*b^-2*d^-1*(e+f*x)^3*(a^2+-1*b^2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3//2*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+-1//4*im*a^-1*b^-2*f^-1*(e+f*x)^4*(a^2+-1*b^2)+a^-1*b^-2*d^-3*f^2*(e+f*x)*(-6*b^2+6*a^2)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+a^-1*b^-2*d^-3*f^2*(e+f*x)*(-6*b^2+6*a^2)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*a^-1*b^-2*d^-4*f^3*(a^2+-1*b^2)*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*a^-1*b^-2*d^-4*f^3*(a^2+-1*b^2)*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*f*a^-1*b^-2*d^-2*(e+f*x)^2*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*f*a^-1*b^-2*d^-2*(e+f*x)^2*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*cos(c+d*x)^2*cot(c+d*x), x) == :(a^-1*d^-1*(e+f*x)^2*log(1+-1*exp(2*im*(c+d*x)))+(1/2)*a^-1*d^-3*f^2*Polylogarithms.polylog(3,exp(2*im*(c+d*x)))+-1*b^-1*d^-1*(e+f*x)^2*sin(c+d*x)+2*b^-1*d^-3*f^2*sin(c+d*x)+-1//3*im*a^-1*f^-1*(e+f*x)^3+-2*f*b^-1*d^-2*(e+f*x)*cos(c+d*x)+a^-1*b^-2*d^-1*(e+f*x)^2*(a^2+-1*b^2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+a^-1*b^-2*d^-1*(e+f*x)^2*(a^2+-1*b^2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+a^-1*b^-2*d^-3*f^2*(-2*b^2+2*a^2)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+a^-1*b^-2*d^-3*f^2*(-2*b^2+2*a^2)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+-1//3*im*a^-1*b^-2*f^-1*(e+f*x)^3*(a^2+-1*b^2)+-2*im*f*a^-1*b^-2*d^-2*(e+f*x)*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*f*a^-1*b^-2*d^-2*(e+f*x)*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)^2*(e+f*x)*cot(c+d*x), x) == :(a^-1*d^-1*(e+f*x)*log(1+-1*exp(2*im*(c+d*x)))+-1*f*b^-1*d^-2*cos(c+d*x)+-1*b^-1*d^-1*(e+f*x)*sin(c+d*x)+-1//2*im*a^-1*f^-1*(e+f*x)^2+-1//2*im*f*a^-1*d^-2*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+a^-1*b^-2*d^-1*(e+f*x)*(a^2+-1*b^2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+a^-1*b^-2*d^-1*(e+f*x)*(a^2+-1*b^2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1//2*im*a^-1*b^-2*f^-1*(e+f*x)^2*(a^2+-1*b^2)+-1*im*f*a^-1*b^-2*d^-2*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*f*a^-1*b^-2*d^-2*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)^2*cot(c+d*x), x) == :(a^-1*d^-1*log(sin(c+d*x))+-1*b^-1*d^-1*sin(c+d*x)+a^-1*b^-2*d^-1*(a^2+-1*b^2)*log(a+b*sin(c+d*x)))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*cos(c+d*x)^3*cot(c+d*x), x) == :(-1//8*b^-1*f^-1*(e+f*x)^4+a^-1*d^-1*(e+f*x)^3*cos(c+d*x)+-2*a^-1*d^-1*(e+f*x)^3*arctanh(exp(im*(c+d*x)))+6*a^-1*d^-4*f^3*sin(c+d*x)+1//4*b^-3*f^-1*(e+f*x)^4*(a^2+-1*b^2)+3//8*b^-1*d^-4*f^3*cos(c+d*x)^2+3//8*b^-1*d^-2*f^3*x^2+-6*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,-1*exp(im*(c+d*x)))+-6*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+-6*a^-1*d^-3*f^2*(e+f*x)*cos(c+d*x)+-3*f*a^-1*d^-2*(e+f*x)^2*sin(c+d*x)+6*im*a^-1*d^-4*f^3*Polylogarithms.polylog(4,exp(im*(c+d*x)))+6*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,exp(im*(c+d*x)))+-3//4*f*b^-1*d^-2*(e+f*x)^2*cos(c+d*x)^2+-1//2*b^-1*d^-1*(e+f*x)^3*cos(c+d*x)*sin(c+d*x)+3//4*e*x*b^-1*d^-2*f^2+a^-1*b^-2*d^-1*(e+f*x)^3*(a^2+-1*b^2)*cos(c+d*x)+a^-1*b^-2*d^-4*f^3*(-6*b^2+6*a^2)*sin(c+d*x)+-6*a^-1*b^-3*d^-4*f^3*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+3*im*f*a^-1*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+6*a^-1*b^-3*d^-4*f^3*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3//4*b^-1*d^-3*f^2*(e+f*x)*cos(c+d*x)*sin(c+d*x)+im*a^-1*b^-3*d^-1*(e+f*x)^3*(a^2+-1*b^2)^3//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^-1*b^-3*d^-1*(e+f*x)^3*(a^2+-1*b^2)^3//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*f*a^-1*b^-2*d^-2*(e+f*x)^2*(-3*b^2+3*a^2)*sin(c+d*x)+-1*a^-1*b^-2*d^-3*f^2*(e+f*x)*(-6*b^2+6*a^2)*cos(c+d*x)+-3*f*a^-1*b^-3*d^-2*(e+f*x)^2*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*f*a^-1*b^-3*d^-2*(e+f*x)^2*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a^-1*b^-3*d^-3*f^2*(a^2+-1*b^2)^3//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*a^-1*b^-3*d^-3*f^2*(a^2+-1*b^2)^3//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*cos(c+d*x)^3*cot(c+d*x), x) == :(-1//6*b^-1*f^-1*(e+f*x)^3+a^-1*d^-1*(e+f*x)^2*cos(c+d*x)+-2*a^-1*d^-1*(e+f*x)^2*arctanh(exp(im*(c+d*x)))+-2*a^-1*d^-3*f^2*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+-2*a^-1*d^-3*f^2*cos(c+d*x)+2*a^-1*d^-3*f^2*Polylogarithms.polylog(3,exp(im*(c+d*x)))+1//3*b^-3*f^-1*(e+f*x)^3*(a^2+-1*b^2)+1//4*x*b^-1*d^-2*f^2+-2*f*a^-1*d^-2*(e+f*x)*sin(c+d*x)+-1//2*f*b^-1*d^-2*cos(c+d*x)^2*(e+f*x)+-1//2*b^-1*d^-1*(e+f*x)^2*cos(c+d*x)*sin(c+d*x)+1//4*b^-1*d^-3*f^2*cos(c+d*x)*sin(c+d*x)+a^-1*b^-2*d^-1*(e+f*x)^2*(a^2+-1*b^2)*cos(c+d*x)+-1*a^-1*b^-2*d^-3*f^2*(-2*b^2+2*a^2)*cos(c+d*x)+-2*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,exp(im*(c+d*x)))+2*im*f*a^-1*d^-2*(e+f*x)*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+im*a^-1*b^-3*d^-1*(e+f*x)^2*(a^2+-1*b^2)^3//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^-1*b^-3*d^-1*(e+f*x)^2*(a^2+-1*b^2)^3//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*f*a^-1*b^-2*d^-2*(e+f*x)*(-2*b^2+2*a^2)*sin(c+d*x)+-2*im*a^-1*b^-3*d^-3*f^2*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*f*a^-1*b^-3*d^-2*(a^2+-1*b^2)^3//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*a^-1*b^-3*d^-3*f^2*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*f*a^-1*b^-3*d^-2*(a^2+-1*b^2)^3//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)^3*(e+f*x)*cot(c+d*x), x) == :(-1//2*e*x*b^-1+-1//4*f*b^-1*x^2+e*x*b^-3*(a^2+-1*b^2)+a^-1*d^-1*(e+f*x)*cos(c+d*x)+(1/2)*f*b^-3*x^2*(a^2+-1*b^2)+-1*f*a^-1*d^-2*sin(c+d*x)+-1*a^-1*d^-1*(2*e+2*f*x)*arctanh(exp(im*(c+d*x)))+-1//4*f*b^-1*d^-2*cos(c+d*x)^2+im*f*a^-1*d^-2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-1*im*f*a^-1*d^-2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+-1//2*b^-1*d^-1*(e+f*x)*cos(c+d*x)*sin(c+d*x)+f*a^-1*b^-3*d^-2*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+a^-1*b^-2*d^-1*(e+f*x)*(a^2+-1*b^2)*cos(c+d*x)+-1*f*a^-1*b^-3*d^-2*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*f*a^-1*b^-2*d^-2*(a^2+-1*b^2)*sin(c+d*x)+im*a^-1*b^-3*d^-1*(a^2+-1*b^2)^3//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^-1*b^-3*d^-1*(a^2+-1*b^2)^3//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)^3*cot(c+d*x), x) == :((1/2)*x*b^-3*(-3*b^2+2*a^2)+-1*a^-1*d^-1*arctanh(cos(c+d*x))+a*b^-2*d^-1*cos(c+d*x)+-1//2*b^-1*d^-1*cos(c+d*x)*sin(c+d*x)+-2*a^-1*b^-3*d^-1*(a^2+-1*b^2)^3//2*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*cot(c+d*x)^2*cos(c+d*x), x) == :(-1*a^-1*d^-1*(e+f*x)^3*csc(c+d*x)+-6*a^-1*d^-4*f^3*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+6*a^-1*d^-4*f^3*Polylogarithms.polylog(3,exp(im*(c+d*x)))+-1*b*a^-2*d^-1*(e+f*x)^3*log(1+-1*exp(2*im*(c+d*x)))+-6*f*a^-1*d^-2*(e+f*x)^2*arctanh(exp(im*(c+d*x)))+1//4*im*b*a^-2*f^-1*(e+f*x)^4+-1*a^-2*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*a^-2*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,exp(im*(c+d*x)))+6*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-3//2*b*a^-2*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,exp(2*im*(c+d*x)))+-3//4*im*b*a^-2*d^-4*f^3*Polylogarithms.polylog(4,exp(2*im*(c+d*x)))+1//4*im*a^-2*b^-1*f^-1*(e+f*x)^4*(a^2+-1*b^2)+-1*a^-2*b^-1*d^-3*f^2*(e+f*x)*(-6*b^2+6*a^2)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*a^-2*b^-1*d^-3*f^2*(e+f*x)*(-6*b^2+6*a^2)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a^-2*b^-1*d^-4*f^3*(a^2+-1*b^2)*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a^-2*b^-1*d^-4*f^3*(a^2+-1*b^2)*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3//2*im*b*f*a^-2*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+3*im*f*a^-2*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*im*f*a^-2*b^-1*d^-2*(e+f*x)^2*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*cot(c+d*x)^2*cos(c+d*x), x) == :(-1*a^-1*d^-1*(e+f*x)^2*csc(c+d*x)+-1*b*a^-2*d^-1*(e+f*x)^2*log(1+-1*exp(2*im*(c+d*x)))+-4*f*a^-1*d^-2*(e+f*x)*arctanh(exp(im*(c+d*x)))+-2*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+2*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-1//2*b*a^-2*d^-3*f^2*Polylogarithms.polylog(3,exp(2*im*(c+d*x)))+1//3*im*b*a^-2*f^-1*(e+f*x)^3+-1*a^-2*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*a^-2*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*a^-2*b^-1*d^-3*f^2*(-2*b^2+2*a^2)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*a^-2*b^-1*d^-3*f^2*(-2*b^2+2*a^2)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+1//3*im*a^-2*b^-1*f^-1*(e+f*x)^3*(a^2+-1*b^2)+im*b*f*a^-2*d^-2*(e+f*x)*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+2*im*f*a^-2*b^-1*d^-2*(e+f*x)*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*f*a^-2*b^-1*d^-2*(e+f*x)*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cot(c+d*x)^2*(e+f*x)*cos(c+d*x), x) == :(-1*f*a^-1*d^-2*arctanh(cos(c+d*x))+-1*a^-1*d^-1*(e+f*x)*csc(c+d*x)+(1/2)*im*b*a^-2*f^-1*(e+f*x)^2+-1*b*a^-2*d^-1*(e+f*x)*log(1+-1*exp(2*im*(c+d*x)))+(1/2)*im*b*f*a^-2*d^-2*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+(1/2)*im*a^-2*b^-1*f^-1*(e+f*x)^2*(a^2+-1*b^2)+-1*a^-2*b^-1*d^-1*(e+f*x)*(a^2+-1*b^2)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*a^-2*b^-1*d^-1*(e+f*x)*(a^2+-1*b^2)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*f*a^-2*b^-1*d^-2*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*f*a^-2*b^-1*d^-2*(a^2+-1*b^2)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cot(c+d*x)^2*cos(c+d*x), x) == :(-1*a^-1*d^-1*csc(c+d*x)+-1*b*a^-2*d^-1*log(sin(c+d*x))+-1*b^-1*d^-1*(1+-1*a^-2*b^2)*log(a+b*sin(c+d*x)))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*cos(c+d*x)^2*cot(c+d*x)^2, x) == :(-1//4*a^-1*f^-1*(e+f*x)^4+-1*im*a^-1*d^-1*(e+f*x)^3+-1*a^-1*d^-1*(e+f*x)^3*cot(c+d*x)+3//2*a^-1*d^-4*f^3*Polylogarithms.polylog(3,exp(2*im*(c+d*x)))+-1*b*a^-2*d^-1*(e+f*x)^3*cos(c+d*x)+-6*b*a^-2*d^-4*f^3*sin(c+d*x)+2*b*a^-2*d^-1*(e+f*x)^3*arctanh(exp(im*(c+d*x)))+3*f*a^-1*d^-2*(e+f*x)^2*log(1+-1*exp(2*im*(c+d*x)))+-1//4*a^-1*b^-2*f^-1*(e+f*x)^4*(a^2+-1*b^2)+-1*a^-2*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)*cos(c+d*x)+-1*a^-2*b^-1*d^-4*f^3*(-6*b^2+6*a^2)*sin(c+d*x)+-6*im*b*a^-2*d^-4*f^3*Polylogarithms.polylog(4,exp(im*(c+d*x)))+-6*b*a^-2*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,exp(im*(c+d*x)))+-6*a^-2*b^-2*d^-4*f^3*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+3*b*f*a^-2*d^-2*(e+f*x)^2*sin(c+d*x)+6*im*b*a^-2*d^-4*f^3*Polylogarithms.polylog(4,-1*exp(im*(c+d*x)))+6*b*a^-2*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+6*b*a^-2*d^-3*f^2*(e+f*x)*cos(c+d*x)+6*a^-2*b^-2*d^-4*f^3*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+im*a^-2*b^-2*d^-1*(e+f*x)^3*(a^2+-1*b^2)^3//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+f*a^-2*b^-1*d^-2*(e+f*x)^2*(-3*b^2+3*a^2)*sin(c+d*x)+a^-2*b^-1*d^-3*f^2*(e+f*x)*(-6*b^2+6*a^2)*cos(c+d*x)+-1*im*a^-2*b^-2*d^-1*(e+f*x)^3*(a^2+-1*b^2)^3//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*b*f*a^-2*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-3*f*a^-2*b^-2*d^-2*(e+f*x)^2*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+3*im*b*f*a^-2*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+3*f*a^-2*b^-2*d^-2*(e+f*x)^2*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-6*im*a^-2*b^-2*d^-3*f^2*(a^2+-1*b^2)^3//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*a^-2*b^-2*d^-3*f^2*(a^2+-1*b^2)^3//2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*cos(c+d*x)^2*cot(c+d*x)^2, x) == :(-1//3*a^-1*f^-1*(e+f*x)^3+-1*im*a^-1*d^-1*(e+f*x)^2+-1*a^-1*d^-1*(e+f*x)^2*cot(c+d*x)+-1*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+-1*b*a^-2*d^-1*(e+f*x)^2*cos(c+d*x)+-2*b*a^-2*d^-3*f^2*Polylogarithms.polylog(3,exp(im*(c+d*x)))+2*b*a^-2*d^-1*(e+f*x)^2*arctanh(exp(im*(c+d*x)))+2*b*a^-2*d^-3*f^2*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+2*b*a^-2*d^-3*f^2*cos(c+d*x)+2*f*a^-1*d^-2*(e+f*x)*log(1+-1*exp(2*im*(c+d*x)))+-1//3*a^-1*b^-2*f^-1*(e+f*x)^3*(a^2+-1*b^2)+a^-2*b^-1*d^-3*f^2*(-2*b^2+2*a^2)*cos(c+d*x)+-1*a^-2*b^-1*d^-1*(e+f*x)^2*(a^2+-1*b^2)*cos(c+d*x)+2*b*f*a^-2*d^-2*(e+f*x)*sin(c+d*x)+im*a^-2*b^-2*d^-1*(e+f*x)^2*(a^2+-1*b^2)^3//2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+f*a^-2*b^-1*d^-2*(e+f*x)*(-2*b^2+2*a^2)*sin(c+d*x)+-1*im*a^-2*b^-2*d^-1*(e+f*x)^2*(a^2+-1*b^2)^3//2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*b*f*a^-2*d^-2*(e+f*x)*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-2*im*a^-2*b^-2*d^-3*f^2*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*f*a^-2*b^-2*d^-2*(a^2+-1*b^2)^3//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*im*b*f*a^-2*d^-2*(e+f*x)*Polylogarithms.polylog(2,exp(im*(c+d*x)))+2*im*a^-2*b^-2*d^-3*f^2*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*f*a^-2*b^-2*d^-2*(a^2+-1*b^2)^3//2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)^2*cot(c+d*x)^2*(e+f*x), x) == :(-1*e*x*a^-1+-1//2*f*a^-1*x^2+e*x*a^-1*(1+-1*a^2*b^-2)+f*a^-1*d^-2*log(sin(c+d*x))+(1/2)*f*a^-1*x^2*(1+-1*a^2*b^-2)+-1*a^-1*d^-1*(e+f*x)*cot(c+d*x)+b*f*a^-2*d^-2*sin(c+d*x)+-1*b*a^-2*d^-1*(e+f*x)*cos(c+d*x)+2*b*a^-2*d^-1*(e+f*x)*arctanh(exp(im*(c+d*x)))+im*b*f*a^-2*d^-2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+f*a^-2*b^-1*d^-2*(a^2+-1*b^2)*sin(c+d*x)+f*a^-2*b^-2*d^-2*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*b*f*a^-2*d^-2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-1*f*a^-2*b^-2*d^-2*(a^2+-1*b^2)^3//2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*a^-2*b^-1*d^-1*(e+f*x)*(a^2+-1*b^2)*cos(c+d*x)+im*a^-2*b^-2*d^-1*(a^2+-1*b^2)^3//2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*a^-2*b^-2*d^-1*(a^2+-1*b^2)^3//2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)^2*cot(c+d*x)^2, x) == :(-1*a*x*b^-2+-1*a^-1*d^-1*cot(c+d*x)+-1*b^-1*d^-1*cos(c+d*x)+b*a^-2*d^-1*arctanh(cos(c+d*x))+2*a^-2*b^-2*d^-1*(a^2+-1*b^2)^3//2*arctan((a^2+-1*b^2)^-1//2*(b+a*tan((1/2)*c+(1/2)*d*x))))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^3*cos(c+d*x)^3*cot(c+d*x)^2, x) == :(-1*a^-1*d^-1*(e+f*x)^3*csc(c+d*x)+-1*a^-1*d^-1*(e+f*x)^3*sin(c+d*x)+-6*a^-1*d^-4*f^3*Polylogarithms.polylog(3,-1*exp(im*(c+d*x)))+6*a^-1*d^-4*f^3*Polylogarithms.polylog(3,exp(im*(c+d*x)))+6*a^-1*d^-4*f^3*cos(c+d*x)+-1//4*b*a^-2*d^-1*(e+f*x)^3+(1/2)*b*a^-2*d^-1*(e+f*x)^3*sin(c+d*x)^2+-1*b*a^-2*d^-1*(e+f*x)^3*log(1+-1*exp(2*im*(c+d*x)))+-6*f*a^-1*d^-2*(e+f*x)^2*arctanh(exp(im*(c+d*x)))+-3*f*a^-1*d^-2*(e+f*x)^2*cos(c+d*x)+6*a^-1*d^-3*f^2*(e+f*x)*sin(c+d*x)+-1//4*a^-2*b^-1*d^-1*(e+f*x)^3*(a^2+-1*b^2)+1//4*im*b*a^-2*f^-1*(e+f*x)^4+3//8*b*x*a^-2*d^-3*f^3+a^-1*b^-2*d^-4*f^3*(-6*b^2+6*a^2)*cos(c+d*x)+a^-2*b^-3*d^-1*(e+f*x)^3*(a^2+-1*b^2)^2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+a^-2*b^-3*d^-1*(e+f*x)^3*(a^2+-1*b^2)^2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+(1/2)*a^-2*b^-1*d^-1*(e+f*x)^3*sin(c+d*x)^2*(a^2+-1*b^2)+-1*a^-1*b^-2*d^-1*(e+f*x)^3*(a^2+-1*b^2)*sin(c+d*x)+-6*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,exp(im*(c+d*x)))+6*im*a^-1*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-3//2*b*a^-2*d^-3*f^2*(e+f*x)*Polylogarithms.polylog(3,exp(2*im*(c+d*x)))+-3//4*im*b*a^-2*d^-4*f^3*Polylogarithms.polylog(4,exp(2*im*(c+d*x)))+-3//4*b*a^-2*d^-3*f^2*sin(c+d*x)^2*(e+f*x)+-3//8*b*a^-2*d^-4*f^3*cos(c+d*x)*sin(c+d*x)+-1//4*im*a^-2*b^-3*f^-1*(e+f*x)^4*(a^2+-1*b^2)^2+1//8*x*a^-2*b^-1*d^-3*f^3*(-3*b^2+3*a^2)+a^-1*b^-2*d^-3*f^2*(e+f*x)*(-6*b^2+6*a^2)*sin(c+d*x)+-1*f*a^-1*b^-2*d^-2*(e+f*x)^2*(-3*b^2+3*a^2)*cos(c+d*x)+6*im*a^-2*b^-3*d^-4*f^3*(a^2+-1*b^2)^2*Polylogarithms.polylog(4,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*im*a^-2*b^-3*d^-4*f^3*(a^2+-1*b^2)^2*Polylogarithms.polylog(4,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*a^-2*b^-3*d^-3*f^2*(a^2+-1*b^2)^2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+6*a^-2*b^-3*d^-3*f^2*(a^2+-1*b^2)^2*(e+f*x)*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1//4*a^-2*b^-1*d^-3*f^2*sin(c+d*x)^2*(e+f*x)*(-3*b^2+3*a^2)+-1//8*a^-2*b^-1*d^-4*f^3*(-3*b^2+3*a^2)*cos(c+d*x)*sin(c+d*x)+3//2*im*b*f*a^-2*d^-2*(e+f*x)^2*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+3//4*b*f*a^-2*d^-2*(e+f*x)^2*cos(c+d*x)*sin(c+d*x)+-3*im*f*a^-2*b^-3*d^-2*(e+f*x)^2*(a^2+-1*b^2)^2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-3*im*f*a^-2*b^-3*d^-2*(e+f*x)^2*(a^2+-1*b^2)^2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+1//4*f*a^-2*b^-1*d^-2*(e+f*x)^2*(-3*b^2+3*a^2)*cos(c+d*x)*sin(c+d*x))
@test integrate((a+b*sin(c+d*x))^-1*(e+f*x)^2*cos(c+d*x)^3*cot(c+d*x)^2, x) == :(-1*a^-1*d^-1*(e+f*x)^2*csc(c+d*x)+-1*a^-1*d^-1*(e+f*x)^2*sin(c+d*x)+2*a^-1*d^-3*f^2*sin(c+d*x)+(1/2)*b*a^-2*d^-1*(e+f*x)^2*sin(c+d*x)^2+-1*b*a^-2*d^-1*(e+f*x)^2*log(1+-1*exp(2*im*(c+d*x)))+-4*f*a^-1*d^-2*(e+f*x)*arctanh(exp(im*(c+d*x)))+-2*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,exp(im*(c+d*x)))+-2*f*a^-1*d^-2*(e+f*x)*cos(c+d*x)+2*im*a^-1*d^-3*f^2*Polylogarithms.polylog(2,-1*exp(im*(c+d*x)))+-1//2*b*a^-2*d^-3*f^2*Polylogarithms.polylog(3,exp(2*im*(c+d*x)))+-1//4*b*a^-2*d^-1*f^2*x^2+-1//4*b*a^-2*d^-3*f^2*sin(c+d*x)^2+1//3*im*b*a^-2*f^-1*(e+f*x)^3+a^-1*b^-2*d^-3*f^2*(-2*b^2+2*a^2)*sin(c+d*x)+a^-2*b^-3*d^-1*(e+f*x)^2*(a^2+-1*b^2)^2*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+a^-2*b^-3*d^-1*(e+f*x)^2*(a^2+-1*b^2)^2*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+(1/2)*a^-2*b^-1*d^-1*(e+f*x)^2*sin(c+d*x)^2*(a^2+-1*b^2)+-1*a^-1*b^-2*d^-1*(e+f*x)^2*(a^2+-1*b^2)*sin(c+d*x)+2*a^-2*b^-3*d^-3*f^2*(a^2+-1*b^2)^2*Polylogarithms.polylog(3,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+2*a^-2*b^-3*d^-3*f^2*(a^2+-1*b^2)^2*Polylogarithms.polylog(3,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1//2*b*e*f*x*a^-2*d^-1+-1//3*im*a^-2*b^-3*f^-1*(e+f*x)^3*(a^2+-1*b^2)^2+-1//4*a^-2*b^-1*d^-1*f^2*x^2*(a^2+-1*b^2)+-1//4*a^-2*b^-1*d^-3*f^2*sin(c+d*x)^2*(a^2+-1*b^2)+im*b*f*a^-2*d^-2*(e+f*x)*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+(1/2)*b*f*a^-2*d^-2*(e+f*x)*cos(c+d*x)*sin(c+d*x)+-1*f*a^-1*b^-2*d^-2*(e+f*x)*(-2*b^2+2*a^2)*cos(c+d*x)+-1//2*e*f*x*a^-2*b^-1*d^-1*(a^2+-1*b^2)+(1/2)*f*a^-2*b^-1*d^-2*(e+f*x)*(a^2+-1*b^2)*cos(c+d*x)*sin(c+d*x)+-2*im*f*a^-2*b^-3*d^-2*(a^2+-1*b^2)^2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-2*im*f*a^-2*b^-3*d^-2*(a^2+-1*b^2)^2*(e+f*x)*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x))))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)^3*cot(c+d*x)^2*(e+f*x), x) == :(-1*f*a^-1*d^-2*arctanh(cos(c+d*x))+-1*f*a^-1*d^-2*cos(c+d*x)+-1*a^-1*d^-1*(e+f*x)*csc(c+d*x)+-1*a^-1*d^-1*(e+f*x)*sin(c+d*x)+(1/2)*im*b*a^-2*f^-1*(e+f*x)^2+(1/2)*b*a^-2*d^-1*sin(c+d*x)^2*(e+f*x)+-1*b*a^-2*d^-1*(e+f*x)*log(1+-1*exp(2*im*(c+d*x)))+-1//4*b*f*x*a^-2*d^-1+a^-2*b^-3*d^-1*(a^2+-1*b^2)^2*(e+f*x)*log(1+-1*im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+a^-2*b^-3*d^-1*(a^2+-1*b^2)^2*(e+f*x)*log(1+-1*im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+(1/2)*im*b*f*a^-2*d^-2*Polylogarithms.polylog(2,exp(2*im*(c+d*x)))+(1/2)*a^-2*b^-1*d^-1*sin(c+d*x)^2*(e+f*x)*(a^2+-1*b^2)+-1*f*a^-1*b^-2*d^-2*(a^2+-1*b^2)*cos(c+d*x)+-1*a^-1*b^-2*d^-1*(e+f*x)*(a^2+-1*b^2)*sin(c+d*x)+-1//2*im*a^-2*b^-3*f^-1*(e+f*x)^2*(a^2+-1*b^2)^2+-1//4*f*x*a^-2*b^-1*d^-1*(a^2+-1*b^2)+1//4*b*f*a^-2*d^-2*cos(c+d*x)*sin(c+d*x)+-1*im*f*a^-2*b^-3*d^-2*(a^2+-1*b^2)^2*Polylogarithms.polylog(2,im*b*(a+(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+-1*im*f*a^-2*b^-3*d^-2*(a^2+-1*b^2)^2*Polylogarithms.polylog(2,im*b*(a+-1*(a^2+-1*b^2)^(1/2))^-1*exp(im*(c+d*x)))+1//4*f*a^-2*b^-1*d^-2*(a^2+-1*b^2)*cos(c+d*x)*sin(c+d*x))
@test integrate((a+b*sin(c+d*x))^-1*cos(c+d*x)^3*cot(c+d*x)^2, x) == :((1/2)*b^-1*d^-1*sin(c+d*x)^2+-1*a^-1*d^-1*csc(c+d*x)+-1*a*b^-2*d^-1*sin(c+d*x)+-1*b*a^-2*d^-1*log(sin(c+d*x))+a^-2*b^-3*d^-1*(a^2+-1*b^2)^2*log(a+b*sin(c+d*x)))
|
{"hexsha": "45a5b45220e77c32f15003c89658efabe5f3d48b", "size": 148325, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/4 Trig functions/4.1 Sine/4.1.10 (c+d x)^m (a+b sin)^n.jl", "max_stars_repo_name": "bradeneliason/Rubin.jl", "max_stars_repo_head_hexsha": "4becf75a5b265aced3ecd4e2bc8e7473c68c4840", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-13T03:00:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-13T03:00:21.000Z", "max_issues_repo_path": "test/4 Trig functions/4.1 Sine/4.1.10 (c+d x)^m (a+b sin)^n.jl", "max_issues_repo_name": "bradeneliason/Rubin.jl", "max_issues_repo_head_hexsha": "4becf75a5b265aced3ecd4e2bc8e7473c68c4840", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/4 Trig functions/4.1 Sine/4.1.10 (c+d x)^m (a+b sin)^n.jl", "max_forks_repo_name": "bradeneliason/Rubin.jl", "max_forks_repo_head_hexsha": "4becf75a5b265aced3ecd4e2bc8e7473c68c4840", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 525.975177305, "max_line_length": 4067, "alphanum_fraction": 0.4949941008, "num_tokens": 82729}
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import multiprocessing as mp
import os
from ... import _init_paths
import cv2
#import detectron2.data.transforms as T
from PIL import Image
from numpy import asarray
import numpy as np
import torch
#from detectron2.checkpoint import DetectionCheckpointer
#from detectron2.config import get_cfg
#from detectron2.data import MetadataCatalog
#from detectron2.data.detection_utils import read_image
#from detectron2.modeling import build_model
#from detectron2.utils.logger import setup_logger
from lib.model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from grad_cam import GradCAM, GradCamPlusPlus
from skimage import io
from torch import nn
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_last_conv_name(net):
"""
:return:
"""
layer_name = None
for name, m in net.named_modules():
if isinstance(m, nn.Conv2d):
layer_name = name
return layer_name
class GuidedBackPropagation(object):
def __init__(self, net):
self.net = net
for (name, module) in self.net.named_modules():
if isinstance(module, nn.ReLU):
module.register_backward_hook(self.backward_hook)
self.net.eval()
@classmethod
def backward_hook(cls, module, grad_in, grad_out):
"""
:param module:
:param grad_in: tuple,
:param grad_out: tuple,
:return: tuple(new_grad_in,)
"""
return torch.clamp(grad_in[0], min=0.0),
def __call__(self, inputs, index=0):
"""
:param inputs: {"image": [C,H,W], "height": height, "width": width}
:param index: :return:
"""
self.net.zero_grad()
output = self.net.inference([inputs])
score = output[0]['instances'].scores[index]
score.backward()
return inputs['image'].grad # [3,H,W]
def norm_image(image):
"""
:param image: [H,W,C]
:return:
"""
image = image.copy()
image -= np.max(np.min(image), 0)
image /= np.max(image)
image *= 255.
return np.uint8(image)
def gen_cam(image, mask):
"""
:param mask: [H,W],
:return: tuple(cam,heatmap)
"""
# mask
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
heatmap = heatmap[..., ::-1] # gbr to rgb
# cam = heatmap + np.float32(image)
return norm_image(cam), heatmap
def gen_gb(grad):
"""
uided back propagation :param grad: tensor,[3,H,W]
:return:
"""
# grad = grad.data.numpy()
gb = np.transpose(grad, (1, 2, 0))
return gb
def save_image(image_dicts, input_image_name, network='frcnn', output_dir='./results'):
prefix = os.path.splitext(input_image_name)[0]
for key, image in image_dicts.items():
io.imsave(os.path.join(output_dir, '{}-{}-{}.jpg'.format(prefix, network, key)), image)
def get_parser():
parser = argparse.ArgumentParser(description="jwyang demo for builtin models")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A list of space separated input images")
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.8,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def main(args):
#setup_logger(name="fvcore")
#logger = setup_logger()
#logger.info("Arguments: " + str(args))
#cfg = setup_cfg(args)
print(cfg)
#build_model
#model = build_model(cfg)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "./../../cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
input_dir = args.load_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
#load weight
#checkpointer = DetectionCheckpointer(model)
#checkpointer.load(cfg.MODEL.WEIGHTS)
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
#load image
path = os.path.expanduser(args.input)
#original_image = read_image(path, format="BGR")
#height, width = original_image.shape[:2]
#transform_gen = T.ResizeShortestEdge(
# [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
#)
#image = transform_gen.get_transform(original_image).apply_image(original_image)
#image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)).requires_grad_(True)
original_image = asarray(Image.open(path))
height, width = original_image.shape[:2]
image = roibatchLoader(roidb, ratio_list, ratio_index, 1, \
imdb.num_classes, training=False, normalize = False)
images = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
images_iter = iter(dataloader)
inputs = {"image": image, "height": height, "width": width}
# Grad-CAM
layer_name = get_last_conv_name(fasterRCNN)
grad_cam = GradCAM(fasterRCNN, layer_name)
mask, box, class_id = grad_cam(inputs) # cam mask
grad_cam.remove_handlers()
#
image_dict = {}
img = original_image[..., ::-1]
x1, y1, x2, y2 = box
image_dict['predict_box'] = img[y1:y2, x1:x2]
image_cam, image_dict['heatmap'] = gen_cam(img[y1:y2, x1:x2], mask)
# Grad-CAM++
grad_cam_plus_plus = GradCamPlusPlus(model, layer_name)
mask_plus_plus = grad_cam_plus_plus(inputs) # cam mask
_, image_dict['heatmap++'] = gen_cam(img[y1:y2, x1:x2], mask_plus_plus)
grad_cam_plus_plus.remove_handlers()
# get name of classes
meta = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
label = meta.thing_classes[class_id]
print("label:{}".format(label))
# # GuidedBackPropagation
# gbp = GuidedBackPropagation(model)
# inputs['image'].grad.zero_() # make gradient zero
# grad = gbp(inputs)
# print("grad.shape:{}".format(grad.shape))
# gb = gen_gb(grad)
# gb = gb[y1:y2, x1:x2]
# image_dict['gb'] = gb
# Guided Grad-CAM
# cam_gb = gb * mask[..., np.newaxis]
# image_dict['cam_gb'] = norm_image(cam_gb)
save_image(image_dict, os.path.basename(path))
if __name__ == "__main__":
"""
Usage:export KMP_DUPLICATE_LIB_OK=TRUE
python detection/demo.py --config-file detection/faster_rcnn_R_50_C4.yaml \
--input ./examples/pic1.jpg \
--opts MODEL.WEIGHTS /Users/yizuotian/pretrained_model/model_final_b1acc2.pkl MODEL.DEVICE cpu
"""
mp.set_start_method("spawn", force=True)
arguments = get_parser().parse_args()
main(arguments)
|
{"hexsha": "71eefe8812dec00a8d1d2bb73415aecb0f32156d", "size": 10659, "ext": "py", "lang": "Python", "max_stars_repo_path": "Grad-CAM.pytorch/detection/demo.py", "max_stars_repo_name": "jasonlai777/Faster-R-CNN", "max_stars_repo_head_hexsha": "b5c0c18a9b5faabd4b6ef23346aff85104df7356", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Grad-CAM.pytorch/detection/demo.py", "max_issues_repo_name": "jasonlai777/Faster-R-CNN", "max_issues_repo_head_hexsha": "b5c0c18a9b5faabd4b6ef23346aff85104df7356", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Grad-CAM.pytorch/detection/demo.py", "max_forks_repo_name": "jasonlai777/Faster-R-CNN", "max_forks_repo_head_hexsha": "b5c0c18a9b5faabd4b6ef23346aff85104df7356", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5188679245, "max_line_length": 117, "alphanum_fraction": 0.6577540107, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2842}
|
# --------------
# Import Libraries
import os
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df=pd.read_csv(path)
df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_')
df=df.replace('NaN', np.nan)
print(df.head())
# Code ends here
# --------------
from sklearn.model_selection import train_test_split
df.set_index(keys='serial_number',inplace=True,drop=True)
# Code starts
df.established_date = pd.to_datetime(df.established_date)
df.acquired_date = pd.to_datetime(df.acquired_date)
y=df['2016_deposits']
X=df.drop('2016_deposits',axis=1)
X_train,X_val,y_train,y_val=train_test_split(X,y,test_size=0.25,random_state=3)
# Code ends here
# --------------
# time_col = X_train.select_dtypes(exclude=[np.number,'O']).columns
time_col = ['established_date', 'acquired_date']
# Code starts here
for col_name in time_col:
new_col_name = 'since_'+col_name
X_train[new_col_name]=pd.datetime.now() - X_train[col_name]
X_val[new_col_name]=pd.datetime.now() - X_val[col_name]
X_train[new_col_name]=X_train[new_col_name].apply(lambda x: float(x.days)/365)
X_val[new_col_name]=X_val[new_col_name].apply(lambda x: float(x.days)/365)
X_train=X_train.drop(time_col,axis=1)
X_val=X_val.drop(time_col,axis=1)
print(X_train.head())
# Code ends here
# --------------
from sklearn.preprocessing import LabelEncoder
cat = X_train.select_dtypes(include='O').columns.tolist()
# Code starts here
X_train=X_train.fillna(0)
X_val=X_val.fillna(0)
le=LabelEncoder()
for x in cat:
X_train[x] = le.fit_transform(X_train[x])
X_val[x] = le.fit_transform(X_val[x])
# One hot encoding
X_train_temp = pd.get_dummies(data = X_train, columns = cat)
X_val_temp = pd.get_dummies(data = X_val, columns = cat)
# Code ends here
# --------------
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
# Code starts here
dt=DecisionTreeRegressor(random_state=5)
dt.fit(X_train,y_train)
accuracy=dt.score(X_val,y_val)
y_pred=dt.predict(X_val)
rmse=np.sqrt(mean_squared_error(y_val,y_pred))
print(accuracy,rmse)
# --------------
from xgboost import XGBRegressor
# Code starts here
xgb=XGBRegressor(max_depth=50, learning_rate=0.83, n_estimators=100)
xgb.fit(X_train,y_train)
accuracy=xgb.score(X_val,y_val)
y_pred=xgb.predict(X_val)
rmse=np.sqrt(mean_squared_error(y_val,y_pred))
print(accuracy,rmse)
# Code ends here
|
{"hexsha": "c3fa61ffe21e2f2f25c051882e00220742044aed", "size": 2452, "ext": "py", "lang": "Python", "max_stars_repo_path": "code.py", "max_stars_repo_name": "Jyo172/ga-learner-dsmp-repo-cash-deposit-prediction", "max_stars_repo_head_hexsha": "16b6f2aabc31f131b624a5f06b8c0302f21860a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code.py", "max_issues_repo_name": "Jyo172/ga-learner-dsmp-repo-cash-deposit-prediction", "max_issues_repo_head_hexsha": "16b6f2aabc31f131b624a5f06b8c0302f21860a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code.py", "max_forks_repo_name": "Jyo172/ga-learner-dsmp-repo-cash-deposit-prediction", "max_forks_repo_head_hexsha": "16b6f2aabc31f131b624a5f06b8c0302f21860a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0204081633, "max_line_length": 82, "alphanum_fraction": 0.7336867863, "include": true, "reason": "import numpy", "num_tokens": 652}
|
\documentclass[a4paper]{article}
\input{temp}
\setcounter{section}{-1}
\begin{document}
\title{Representation Theory}
\maketitle
\newpage
\tableofcontents
\newpage
\section{Introduction}
Representaiton theory is the theory of how \emph{groups} act as groups of linear transformations on \emph{vector spaces}.
Here the groups are either \emph{finite}, or \emph{compact topological groups} (infinite), for example, $SU(n)$ and $O(n)$. The vector spaces we conside are finite dimensional, and usually over $\C$. Actions are \emph{linear} (see below).
Some books: James-Liebeck (CUP); Alperin-Bell (Springer); Charles Thomas, \emph{Representations of finite and Lie groups}; Onlne notes: SM, Teleman; P.Webb \emph{A course in finite group representation theory} (CUP); Charlie Curtis, \emph{Pioneers of representation theory} (history).
\newpage
\section{Group actions}
Throughout this course, if not specified otherwise:\\
$\bullet$ $F$ is a field, usually $\C$, $\R$ or $\Q$. When the field is one of these, we are discussing \emph{ordinary representation theory}. Sometimes $F=F_p$ or $\bar{F}_p$ (algebraic closure, see Galois Theory), in which case the theory is called \emph{modular representation theory};\\
$\bullet$ $V$ is a vector space over $F$, always finite dimensional;\\
$GL(V) =\{\theta : V \to V, \theta$ linear, invertible$\}$, i.e. $\det \theta \neq 0$.
Recall from Linear Algebra:\\
If $\dim_F V = n < \infty$, choose basis $e_1,...,e_n$ over $F$, so we can identify it with $F^n$. Then $\theta \in GL(V)$ corresponds to an $n \times n$ matrix $A_\theta = (a_{ij})$, where $\theta(e_j) = \sum_i a_{ij} e_i$. In fact, we have $A_\theta \in GL_n(F)$, the general linear group.
(1.1) $GL(V) \cong GL_n(F)$ as groups by $\theta \to A_\theta$ ($A_{\theta_1 \theta_2} = A_{\theta_1} A_{\theta_2}$ and bijection).\\
Choosing different basis gives different isomorphism to $GL_n(F)$, but:
(1.2) Matrices $A_1,A_2$ represent the same element of $GL(V)$ w.r.t different bases iff they are conjugate (similar), i.e. $\exists X \in GL_n(F)$ s.t. $A_2 =XA_1 X^{-1}$.
Recall that $\tr(A) = \sum_i a_{ii}$ where $A = (a_{ij})$, the \emph{trace} of $A$.
(1.3) $\tr(XAX^{-1}) = \tr(A)$, hence we can define $\tr(\theta) = \tr(A_{\theta_1})$ independent of basis.
(1.4) Let $\alpha \in GL(V)$ where $V$ in f.d. over $\C$, with $\alpha^m = \iota$ for some $m$ (here $\iota$ is the identity map). Then $\alpha$ is diagonalisable.
Recall $EndV$ is the set of all ilnear maps $V \to V$, e.g. $End(F^n) =M_n(F)$ some $n \times n$ matrices.
(1.5) \emph{Proposition.} Take $V$ f.d. over $\C$, $\alpha \in End(V)$. Then $\alpha$ is diagonalisable iff there exists a polynomial $f$ with distinct linear factors with $f(\alpha) = 0$. For example, in (1.4), where $\alpha^m = \iota$, we take $f = X^m - 1 = \prod_{j=0}^{m-1} (X-\omega^j)$ where $\omega = e^{2\pi i/m}$ is the ($m^{th}$) root of unity. In fact we have:
(1.4)* A finite family of commuting separately diagonalisable automorphisms of a $\C$-vector space can be simultaneously diagonalised (useful in abelian groups).
Recall from Group Theory:\\
(1.6) The symmetric group, $S_n = Sym(X)$ on the set $X = \{1,...,n\}$ is the set of all permutations of $X$. $|S_n| = n!$. The alternating group $A_n$ on $X$ is the set of products of an even number of transpositions (2-cycles). $|A_n| = \frac{n!}{2}$.
(1.7) Cyclic groups of order $m$: $C_m = <x:x^m = 1>$. For example, $(\Z/m\Z, +)$; also, the group of $m^{th}$ roots of unity in $\C$ (inside $GL_1(\C)$ = $\C^*$, the multiplicative group of $\C$). We also have the group of rotations, centre $O$ of regular $m-$gon in $\R^2$ (inside $GL_2(\R)$).
(1.8) Dihedral groups $D_{2m}$ of order $2m = <x,y: x^m = y^2 = 1, yxy^{-1} = x^{-1}>$. Think of this as the set of rotations and reflections preserving a regular $m$-gon.
(1.9) Quaternion group, $Q_8 = <x,y|x^4 = 1, y^2 = x^2, yxy^{-1} = x^{-1}>$ of order $8$. For example, in $GL_2(\C)$, put $i={{i\ 0} \choose {0 \ i}}, j = {{0 \ 1} \choose {-1 \ 0}}, k = {{0 \ i} \choose {i \ 0}}$, then $Q_8 = \{\pm I_2, \pm i, \pm j, \pm k\}$.
(1.10) The conjugacy class (ccls) of $g \in G$ is $\mathcal{C}_G(g) = \{xgx^{-1} : x \in G\}$. Then $|\mathcal{C}_G (g) | = |G:C_G(g)|$, where $C_G(g) = \{x \in G : xg = gx\}$, the centraliser of $g \in G$.
(1.11) Let $G$ be a group, $X$ be a set. $G$ acts on $X$ if there exists a map $\cdot: G \times X \to X$ by $(g,x) \to g\cdot x$ for $g \in G$, $x \in X$, s.t. $1 \cdot x = x$ for all $x \in X$, $(gh) \cdot x = g \cdot (h\cdot x)$ for all $g,h \in G, x \in X$.
(1.12) Given an action of $G$ on $X$, we obtain a homomorphism $\theta: G \to Sym(X)$, called the \emph{permutation representation} of $G$.
\begin{proof}
For $g \in G$, the function $\theta_g: X \to X$ by $x \to gx$ is a permutation on $X$, with inverse $\theta_{g^{-1}}$. Moreover, $\forall g_1,g_2 \in G$, $\theta_{g_1 g_2} = \theta_{g_1} \theta_{g_2}$ since $(g_1g_2) x = g_1(g_2 x)$ for $x \in X$.
\end{proof}
\newpage
\section{Basic Definitions}
\subsection{Representations}
Let $G$ be finite, $F$ be a field, usually $\C$.
\begin{defi} (2.1)\\
Let $V$ be a f.d. vector space over $F$. A (linear, in some books) \emph{representation} of $G$ on $V$ is a group homomorphism
\begin{equation*}
\begin{aligned}
\rho = \rho_V: & G &\to GL(V)
\end{aligned}
\end{equation*}
Write $\rho_g$ for the image $\rho_V(g)$; so for each $g \in G$, $\rho_g \in GL(V)$, and $\rho_{g_1 g_2} = \rho_{g_1} \rho_{g_2}$, and $(\rho_g)^{-1} = \rho_{g^{-1}}$.\\
The \emph{dimension} (or \emph{degree}) of $\rho$ is $\dim_F V$.
\end{defi}
(2.2) Recall $\ker \rho \triangleleft G$ (kernel is a normal subgroup), and $G/\ker \rho \cong \rho(G) \leq GL(V)$ (1st isomorphism theorem). We say $\rho$ is \emph{faithful} if $\ker \rho = 1$.
An alternative (and equivalent) approach is to observe that a representation of $G$ on $V$ is "the same as" a \emph{linear action} of $G$:
\begin{defi} (2.3)\\
$G$ \emph{acts linearly} on $V$ if there exists a \emph{linear action}
\begin{equation*}
\begin{aligned}
G \times V &\to V\\
(g,v) &\to gv
\end{aligned}
\end{equation*}
By linear action we mean: (action) $(g_1 g_2) v = g_1(g_2 v)$, $1v=v$ $\forall g_1,g_2 \in G, v \in V$, and (linear) $g(v_1+v_2) = gv_1+gv_2$, $g(\lambda v) = \lambda gv$ $\forall g \in G, v_1,v_2 \in V, \lambda \in F$.\\
Now if $G$ acts linearly on $V$, the map
\begin{equation*}
\begin{aligned}
G &\to GL(V)\\
g &\to \rho_g
\end{aligned}
\end{equation*}
with $\rho_g:v \to gv$ is a representation of $G$. Conversely, given a representation $\rho: G \to GL(V)$, we have a linear action of $G$ on $V$ via $g \cdot v := \rho(g) v$ $\forall v \in V, g \in G$.
\end{defi}
(2.4) In (2.3) we also say that $V$ is a $G$-space or that $V$ is a $G$-module. In fact if we define the \emph{group algebra} $FG$, or $F[G]$, to be $\{ \sum \alpha_j g: \alpha_j \in F\}$ with natural addition and multiplication, then $V$ is actually a $FG$-module (in the sense from GRM).
(2.5) $R$ is a \emph{matrix representation} of $G$ of degree $n$ if $R$ is a homomorphism $G \to GL_n (F)$. Given representation $\rho:G \to GL(V)$ with $\dim_F V=n$, fix basis $B$; we get matrix representation
\begin{equation*}
\begin{aligned}
G &\to GL_n(F)\\
g &\to [\rho(g)]_B
\end{aligned}
\end{equation*}
Conversely, given matrix representation $R:G \to GL_n(F)$, we get representation
\begin{equation*}
\begin{aligned}
\rho: G &\to GL(F^n)\\
g &\to \rho_g
\end{aligned}
\end{equation*}
via $\rho_g(v) = R_g v$ where $R_g$ is the matrix of $g$.
\begin{eg} (2.6)\\
Given any group $G$, take $V=F$ the 1-dimensional space, and
\begin{equation*}
\begin{aligned}
\rho:G &\to GL(F)\\
g &\to (id: F \to F)
\end{aligned}
\end{equation*}
is known as the trivial representation of $G$. So $\deg \rho = 1$ ($\dim_F F = 1$).
\end{eg}
\begin{eg} (2.7)\\
Let $G=C_4 = <x:x^4=1>$. Let $n=2$, and $F=\C$. Note that any $R:x \to X$ will determine $x^j \to X^j$ as it is a homomorphism, and also we need $X^4 = I$. So we can take $X$ to be diagonal matrix -- any such with diagonal entries a root to $x^4=1$, i.e. $\{\pm 1,\pm i\}$, or if $X$ is not diagonal then it will be similar to a diagonal matrix by (1.4) ($X^4=I$).
\end{eg}
\subsection{Equivalent representations}
\begin{defi} (2.8)\\
Fix $G,F$. Let $V,V'$ be $F$-spaces, and $\rho:G \to GL(V)$, $\rho': G \to GL(V')$ which are representations of $G$. The linear map $\phi: V \to V'$ is a $G$-homomorphism if $$\phi\rho(g) = \rho'(g)\phi \forall g \in G (*)$$ We can understand this more by the following diagram:
\includegraphics[scale=0.5]{image/Rep_01.png}
We say $\phi$ \emph{intertwines} $\rho,\rho'$. Write $Hom_G(V,V')$ for the $F$-space of all these.\\
$\phi$ is a $G$-isomorphism if it is also bijective; if such $\phi$ exists, $\rho,\rho'$ are isomorphic/equivalent representations. If $\phi$ is a $G$-isomorphism, we can write $(*)$ as $\rho' = \phi\rho\phi^{-1}$.
\end{defi}
\begin{lemma} (2.9)\\
The relation "being isomorphic" is an equivalent relation on the set of all representations of $G$ (over $F$).
\end{lemma}
\begin{rem} (2.10)\\
If $\rho,\rho'$ are isomorphic representations, they have the same dimension.
The converse may be false: $C_4$ has four non-isomorphic 1-dimensional representations: if $\omega = e^{2\pi i/4}$ then they are $\rho_j(x^i) = \omega^{ij}$ ($0 \leq i \leq 3$).
\end{rem}
\begin{rem} (2.11)\\
Given $G$, $V$ over $F$ of dimension $n$ and $\rho:G \to GL(V)$. Fix basis $B$ for $V$: we get a linear isomorphism
\begin{equation*}
\begin{aligned}
\phi:V &\to F^n\\
v &\to [v]_B
\end{aligned}
\end{equation*}
and we get a representation $\rho': G \to GL(F^n)$ isomorphic to $\rho$:
\includegraphics[scale=0.5]{image/Rep_02.png}
\end{rem}
(2.12) In terms of matrix representations, we have
\begin{equation*}
\begin{aligned}
R: G &\to GL_n(F),\\
R':G &\to GL_n(F)
\end{aligned}
\end{equation*}
are ($G$)-isomorphic or equivalent if there exists a nonsingular matrix $X \in GL_n(F)$ with $R'(g) = XR(g)X^{-1}$ $\forall g \in G$.
In terms of linear $G$-actions, the actions of $G$ on $V$,$V'$ are $G$-isomorphic if there exists isomorphisms $\phi:V \to V'$ such that $g:\phi(v) = \phi(gv)$ $\forall v \in V,g \in G$.
\subsection{Subrepresentations}
\begin{defi} (2.13)\\
Let $\rho:G \to GL(V)$ be a representation of $G$. We say $W \leq V$ is a $G$-subspace if it's a subspace and it is $\rho(G)$-invariant, i.e. $\rho_g(W) \leq W \forall g \in G$. Obviously $\{0\}$ and $V$ are $G$-subspaces, however.\\
$\rho$ is \emph{irreducible/simple} representation if there are no proper $G$-subspaces.
\end{defi}
\begin{eg} (2.14)\\
Any $1$-dimensional representation of $G$ is irreducible, but not conversely, e.g. $D_8$ has $2$-dimensional $\C$-irreducible representation.
\end{eg}
(2.15) In definition (2.13), if $W$ is a $G$-subspace, then the corresponding map
\begin{equation*}
\begin{aligned}
G &\to GL(W)\\
g &\to \rho(g)|_W
\end{aligned}
\end{equation*}
is a representation of $G$, a \emph{subrepresentation} of $\rho$.
\begin{lemma} (2.16)\\
In definition (2.13), given $\rho:G \to GL(V)$, if $W$ is a $G$-subspace of $V$ and if $B=\{v_1,...,v_n\}$ is a basis containing basis $B_1 = \{v_1,...,v_m\}$ of $W$ ($0<m<n$) then the matrix of $\rho(g)$ w.r.t. $B$ has block upper triangular form as the graph below, for each $g \in G$.
\includegraphics[scale=0.5]{image/Rep_03.png}
\end{lemma}
\begin{eg} (2.17)\\
(i) The irreducible representations of $C_4=\bra x:x^4=1\ket$ are all $1$-dimensional and four of these are $x\to i,x \to -1, x \to -i, x \to 1$. In general, $C_m=\bra x:x^m=1\ket$ has precisely $m$ irreducible complex representations, all of dimension 1. In fact, all complex irreducible representations of a finite abelian group are $1$-dimensional (use (1.4)* or see (4.4) below).\\
(ii) $G=D_6$: any irreducible $C$-representation has dimension $\leq 2$.\\
Let $\rho:G \to GL(V)$ be irreducible $G$-representation. Let $r,s$ be rotation and reflection in $D_6$ respectively. Let $V$ be eigenvector of $\rho(r)$. So $\rho(r) v = \lambda v$ for some $\lambda \neq 0$. Let $W=span\{v,\rho(s)v\} \leq V$. Since $\rho(s)\rho(s)v = v$ and $\rho(r)\rho(s) v = \rho(s)\rho(r)^{-1} v = \lambda^{-1} \rho(s) v$, both of which are in $W$; so $W$ is $G$-invariant, i.e. a $G$-subspace. Since $V$ is irreducible, $W=V$.
\end{eg}
\begin{defi} (2.18)\\
We say at $\rho:G \to GL(V)$ is \emph{decomposable} if there are proper $G$-invariant subspaces $U,W$ with $V = U \oplus W$. Say $\rho$ is direct sum $\rho_U \oplus \rho_W$. If no such decomposition exists, we say that $\rho$ is \emph{indecomposable}.
\end{defi}
\begin{lemma} (2.19)\\
Suppose $\rho:G \to GL(V)$ is decomposable with $G$-invariant decomposition $V=U \oplus W$. If $B$ is a basis $\{\underbrace{u_1,...,u_k}_{B_1}, \underbrace{w_1,...,w_l}_{B_2}\}$ of $V$ consisting of basis of $U$ and basis of $W$, then w.r.t. $B$, $\rho(g)_B$ is a block diagonal matrix $\forall g\in G$ as
\begin{equation*}
\begin{aligned}
\rho(g)_B = \begin{pmatrix}
[\rho_W(g)]_{B_1} & 0\\
0 & [\rho_W(g)]_{B_2}
\end{pmatrix}
\end{aligned}
\end{equation*}
\end{lemma}
\begin{defi} (2.20)\\
If $\rho:G \to GL(V)$, $\rho':G \to GL(V')$, the \emph{direct sum} of $\rho,\rho'$ is $$\rho \oplus \rho':G \to GL(V \oplus V')$$ where $\rho \oplus \rho'(g) (v_1+v_2) = \rho(g)v_1 + \rho'(g) v_2$, a \emph{block diagonal action}. For matrix representations $R:G \to GL_n(F)$, $R':G \to GL_{n'} (F)$, define $R \oplus R': G \to GL_{n+n'}(F)$:
\begin{equation*}
\begin{aligned}
g \to \begin{pmatrix}
R(g) & 0\\
0 & R'(g)
\end{pmatrix}
\end{aligned}
\end{equation*}
\end{defi}
\newpage
\section{Complete reducibility and Maschke's theorem}
\begin{defi} (3.1)\\
A representation $\rho:G \to GL(V)$ is \emph{completely reducible}, or \emph{semisimple}, if it is a direct sum of irreducible representations. Evidently, irreducible implies completely reducible (lol).
\end{defi}
\begin{rem} (3.2)\\
(1) The converse is false;\\
(2) See sheet 1 Q3: $\C$-representation of $\Z$ is not completely reducible and also representation of $C_p$ over $\F_p$ is not c.r..
Fron now on, take $G$ finite and $char\ F =0$.
\end{rem}
\begin{thm} (3.3)\\
Every f.d. representation $V$ of a finite group over a field of char $0$ is completely reducible, i.e. $$V \cong V_1 \oplus ... \oplus V_r$$is a direct sum of representations, each $V_i$ irreducible.
\end{thm}
It is enough to prove:
\begin{thm} (3.4 Maschke's theorem, 1899)\\
Let $G$ be finite, $\rho:G \to GL(V)$ a f.d. representation, $char\ F = 0$. If $W$ is a $G$-subspace of $V$, then there exists a $G$-subspace $U$ of $V$ s.t. $V = W \oplus U$, a direct sum of $G$-subspaces.
\begin{proof} (1)\\
Let $W'$ be any \emph{vector subspace} complement of $W$ in $V$, i.e. $V=W \oplus W'$ as vector spaces, and $W \cap W'=0$. Let $q:V \to W$ be th projection of $V$ onto $W$ along $W'$ ($\ker q = W'$), i.e. if $v=w+w'$ then $q(v) = w$. Define $$\bar{q} : v \to \frac{1}{|G|} \sum_{g \in G} g q(g^{-1}v)$$the 'average' of $q$ over $G$. Note that in order for $\frac{1}{|G|}$ to exists, we need $char\ F = 0$. It still works if $char\ F \nmid |G|$.\\
Claim (1): $\bar{q}:V \to W$: For $v \in V$, $g(q^{-1}v) \in W$ and $gW \leq W$;\\
Claim (2): $\bar{q}(w) = w$ for $w \in W$: $$\bar{q}(w) = \frac{1}{|G|} \sum_{g \in G} gq(g^{-1}w) = \frac{1}{|G|} \sum g(g^{-1}w) = \frac{1}{|G|} \sum w = w$$
So these two claims imply that $\bar{q}$ projects $V$ onto $W$.\\
Claim (3) If $h \in G$ then $h\bar{q}(v) = \bar{q}(hv)$ ($v \in V$):
\begin{equation*}
\begin{aligned}
h\bar{q}(v) &= h\frac{1}{|G|} \sum_g g \cdot q(g^{-1} v)\\
&= \frac{1}{|G|} \sum_g hgq(g^{-1} v)\\
&= \frac{1}{|G|} \sum (hg) q((hg)^{-1} hv)\\
&= \frac{1}{|G|} \sum_g gq(g^{-1}(hv))\\
&= \bar{q}(hv\\
&= \bar{q}(hv))
\end{aligned}
\end{equation*}
We'll then show that the kernel of this map is $G$-invariant, so this gives a $G$-summand on Thursday.
Let's now show $\ker \bar{q}$ is $G$-invariant. If $v \in \ker \bar{q}$, then $h\bar{q}(v) = 0 = \bar{q}(hv)$, so $hv \in \ker \bar{q}$. Thus $V = im \bar{q} \oplus \ker \bar{q} = W \oplus \ker \bar{q}$ is a $G$-subspace decomposition.
We can deduce (3.3) from (3.4) by induction on $\dim V$. If $\dim V = 0$ or $V$ is irreducible, then result is clear. Otherwise, $V$ has non-trivial $G$-invariant subspace, $W$. Then by (3.4), there exists $G$-invariant complement $U$ s.t. $V = U \oplus W$ as representations of $G$. But $\dim U, \dim W < \dim V$. So by induction they can be broken up into direct sum of irreducible subrepresentations.
\end{proof}
The second proof uses inner products, hence we need to take $F = \C$ and can be generalised to compact groups in section 15.\\
Recall, for $V$ a $\C$-space, $\bra,\ket$ is a \emph{Hermitian inner product} if\\
(a) $\bra w,v\ket =\overline{\bra v,w\ket}$ $\forall v,w$ (Hermitian);\\
(b) linear in RHS (sesquilinear);\\
(c) $\bra v,v\ket > 0$ iff $v \neq 0$ (positvie definite).
Additionally, $\bra,\ket$ is \emph{G-invariant} if \\
(d) $\bra gv,gw\ket = \bra v,w\ket$ $\forall v,w \in V, g \in G$.
Note if $W$ is $G$-invariant subspace of $V$, with $G$-invariant inner product, then $W^\perp$ is also $G$-invariant, and $V \oplus W^\perp$. For all $v \in W^\perp$, $g \in G$, we have to show that $gv \in W^\perp$. But $v \in W^\perp \iff \bra v,w\ket = 0 \forall w \in W$. Thus by (d), $\bra gv,gw\ket = 0$ $\forall g \in G \forall w \in W$. Hence $\bra gv,w'\ket = 0$ $\forall w' \in W$. Since we can choose $w=g^{-1}w' \in W$ by $G$-invariance of $W$. Thus $gv \in W^\perp$ since $g$ was arbitrary.
Hence if there is a $G$-invariant inner product on any $G$-space, we get another proof of Maschke's theorem:
(3.4*) (Weyl's unitary trick)\\
Let $\rho$ be a complex representation of the finite group $G$ on the $\C$-space $V$. Then there is a $G$-invariant Hermitian inner product on $V$.
\begin{rem}
Recall the \emph{unitary group} $U(V)$ on $V$: $\{f \in GL(V): (fu,fv) = (u,v) \forall u,v \in V\} = \{A \in GL_n(\C) : A \bar{A}^T = I\} (= U(n))$ by choosing orthonormal basis.\\
Sheet 1 Q.12: any finite subgroup of $GL_n(\C)$ is conjugate to a subgroup of $U(n)$.
\end{rem}
\begin{proof} (2)\\
There exist an inner product on $V$: take basis $e_1,...,e_n$ and define $(e_i,e_j) = \delta_{ij}$, extended sesquilinearly. Now
\begin{equation*}
\begin{aligned}
\bra v,w\ket := \frac{1}{|G|} \sum_{g \in G} (gv,gw)
\end{aligned}
\end{equation*}
we claim that $\bra,\ket$ is sesquilinear, positive definite and $G$-invariant: if $h \in G$, then
\begin{equation*}
\begin{aligned}
\bra hv,hw\ket = \frac{1}{|G|} \sum_{g \in G} ((gh)v,(gh)w)\\
&= \frac{1}{|G|} \sum_{g' \in G} (g'v, g'w)\\
&= \bra v,w\ket
\end{aligned}
\end{equation*}
for all $v,w \in V$.
\end{proof}
\end{thm}
\begin{defi} (3.5, the regular representation)\\
Recall \emph{group algebra} of $G$ is $F$-space $FG = span\{e_g:g \in G\}$. There is a linear $G$-action
\begin{equation*}
\begin{aligned}
h \in G, h \sum_{g \in G} a_g e_g = \sum_{g \in G} a_g e_{hg} (=\sum_{g' \in G} a_{h^{-1} g'} e_{g'})
\end{aligned}
\end{equation*}
$\rho_{reg}$ is the corresponding representation, the \emph{regular representation} of $G$. This is faithful of $\dim |G|$. $FG$ is the \emph{regular module}.
\end{defi}
\begin{prop}
Let $\rho$ be an irreducible representation of $G$ over a field of characteristic 0. Then $\rho$ is isomorphic to a subrepresentation of $\rho_{reg}$.
\begin{proof}
Take $\rho: G \in GL(V)$ irreducible and let $0 \neq v \in V$. Let $\theta : FG \to V$ by $\sum a_g e_g \to \sum a_g gv$. Check this is a $G$-homomorphism. Now $V$ is irreducible so $im\theta = V$ (since $im\theta$ is a $G$-subspace).
Also $\ker\theta$ is $G$-subspace of $FG$. Let $W$ be $G$-complement of $\ker \theta$ in $FG$ (Maschke), so that $W < FG$ is $G$-subspace and $FG = \ker\theta \oplus W$. Thus $W \cong FG/\ker \theta \cong(G-isomorphism) im\theta \cong V$.
\end{proof}
\end{prop}
More generally,
\begin{defi} (3.7)\\
Let $F$ be a field. Let $G$ act on set $X$. Let $FX = span\{e_x:x \in X\}$ with $G$-action
\begin{equation*}
\begin{aligned}
g(\sum a_x e_x) = \sum a_x e_{gx}
\end{aligned}
\end{equation*}
\end{defi}
The representation $G \to GL(V)$ where $V=FX$ is the corresponding \emph{permutation representation}. See section 7.
\newpage
\section{Schur's lemma}
It's really unfair that such an important result is only remembered by a lemma, so we shall call it a theorem.
\begin{thm} (4.1, Schur)\\
(a) Assume $V,W$ are irreducible $G$-spaces over field $F$. Then any $G$-homomorphism $\theta:V \to W$ is either $0$ or an isomorphism.\\
(b) Assume $F$ is \emph{algebraically closed}, and let $V$ be an irreducible $G$-space. Then any $G$-endomorphism $V \to V$ is a scalar multiple of the identity map $\iota_V$.
\begin{proof}
(a) Let $\theta:V \to W$ be a $G$-homomorphism. Then $\ker$ $\theta$ is $G$ subspace of $V$ and, since $V$ is irreducible, we get $\ker\theta = 0$ or $\ker\theta = V$.\\
And $im\theta$ is $G$-subspace of $W$, so as $W$ is irreducible, $im\theta$ is either $0$ or $W$. Hence, either $\theta=0$ or $\theta$ is injective and surjective, hence isomorphism.\\
(b) Since $F$ is algebraically closed, $\theta$ has an eigenvalue, $\lambda$. Then $\theta-\lambda \iota$ is singular $G$-endomorphism of $V$, but it cannot be an isomorphism, so it is $0$ (by (a)). So $\theta = \lambda \iota_V$.
\end{proof}
\end{thm}
Recall from (2.8), the $F$-space $Hom_G(V,W)$ of all $G$-homomorphisms $V \to W$. Write $End_G(V)$ for the $G$-endomorphisms of $V$.
\begin{coro} (4.2)\\
If $V,W$ are irreducible complex $G$-spaces, then
\begin{equation*}
\begin{aligned}
\dim_\C Hom_G (V,W) = \left\{\begin{array}{ll}
1 & \text{ if } V,W \text{ are } G- \text{ isomorphic}\\
0 & \text{ otherwise}
\end{array}
\right.
\end{aligned}
\end{equation*}
\begin{proof}
If $V,W$ are not $G$-isomorphic then the only $G$-homomorphism $V\to W$ is $0$ by (4.1). Assume $v \cong_G W$ and $\theta_1,\theta-2 \in Hom_G (V,W)$, both non-zero. Then $\theta_2$ is invertible by (4.1), and $\theta_2^{-1} \theta_1 \in End_G(V)$, and non-zero, so $\theta_2^{-1} \theta_1 = \lambda\iota_V$ for some $\lambda \in \C$. Hence $\theta_1 = \lambda\theta_2$.
\end{proof}
\end{coro}
\begin{coro} (4.3)\\
If finite group $G$ has a faithful complex irreducible representation, then $Z(G)$, the centre of the group, is cyclic.\\
Note that the converse is false (Sheet 1, Q10).
\begin{proof}
Let $\rho:G \to GL(V)$ be faithful irreducible complex representation. Let $z \in Z(G)$, so $zg = gz$ $\forall g \in G$, hence the map $\phi_z: v \to z(v)$ ($v \in V$) is $G$-endomorphism of $V$, hence is multiplication by scalar $\mu_z$, say.\\
By Schur's lemma, $z(v) = \mu_z v$ $\forall v$. Then the map
\begin{equation*}
\begin{aligned}
Z(G) &\to \C^* \ (\text{multiplicative group})\\
z &\to \mu_z
\end{aligned}
\end{equation*}
is a representation of $Z$ and is faithful, since $\rho$ is. Thus $Z(G)$ is isomorphic to some finite subgroup of $\C^*$, so is cyclic.
\end{proof}
\end{coro}
Let's now consider representation of finite abelian groups.
\begin{coro} (4.4)\\
The irreducible $\C$-representations of a finite abelian group are all $1$-dimensional.
\begin{proof}
\emph{Either}: use (1.4)* to invoke simultaneous diagonalisation: if $v$ is an eigenvector for each $g \in G$, and if $V$ is irreducible, then $V=\bra v\ket$.\\
\emph{Or}: Let $V$ be an irreducible $\C$-representation. For $g \in G$, the map
\begin{equation*}
\begin{aligned}
\theta_g: &V &\to v\\
&v &\to gv
\end{aligned}
\end{equation*}
is a $G$-endomorphism of $V$, and as $V$ irreducible, $\theta_g = \lambda_g \iota_V$ for some $\lambda_g \in \C$. Thus $gv = \lambda_g v$ for any $g \in G$ (so $\bra v\ket$ is a $G$-subspace of $V$). Thus as $0 \neq V$ is irreducible, $V = \bra v\ket$, which is $1$-dimensional.
\end{proof}
\end{coro}
\begin{rem}
Schur's lemma fails over non-algebraically closed field, in particular, over $\R$. For example, let's consider the cyclic group $C_3$. It has 2 irreducible $\R$-representations, one of dimension 1 (maps everything to 1) and one of dimension 2(imo consider $\C$ as a dimension 2 space over $\R$, then map the generator to the 3rd root of unity?) (so 'contradicting' with Schur's lemma via the corollary above).\\
Recall that every finite abelian group $G$ is isomorphic to a product of cyclic groups (see GRM). For example, $C_6 = C_2 \times C_3$. In fact, it can be written as a product of $C_{p^\alpha}$ for various primes $p$ and $\alpha \geq 1$, and the factors are uniquely determined up to reordering.
\end{rem}
\begin{prop} (4.5)\\
The finite abelian group $G=C_{n_1} \times ... \times C_{n_r}$ has precisely $|G|$ irreducible $\C$-representations, as described below:
\begin{proof}
Write $G = \bra x_1\ket \times ... \bra x_r\ket$ where $|x_j| = n_j$. Suppose $\rho$ is irreducible, so by (4.4), it's $1$-dimensional: $\rho:G \to \C^*$.\\
Let $\rho(1,...,x_j,...,1)$ (all $1$ apart from the $j^{th}$ entry) be $\lambda_j$. Then $\lambda_j^{n_j} = 1$, so $\lambda_j$ is a $n_j$-th root of unity. Now, the values $(\lambda_1,...,\lambda_r)$ determine $\rho$: $$\rho(x_1^{j_1}, ..., x_r^{j_r}) = \lambda_1^{j_1}...\lambda_r^{j_r}$$
thus $\rho \leftrightarrow (\lambda_1,...,\lambda_r)$ with $\lambda_j^{n_j} =1 $ $\forall j$; we have $n_1...n_r$ such $r$-tuples, each giving $1$-dimensional representation.
\end{proof}
\end{prop}
\begin{eg} (4.6)\\
Consider $G=C_4 = \bra x\ket$. We could have $\rho_1(x) = 1,\rho_2(x) = i,\rho_3(x)=-1,\rho_4(x)=-i$.
\end{eg}
Warning: There is no "natural" 1-1 correspondence between the elements of $G$ and the representations of $G$ ($G$-finite abelian). If you choose an isomorphism $G \cong C_{a_1} \times ... \times C_{a_r}$, then we can identify the two sets (elements of groups and representations of $G$), but it depends on the choice of isomorphism.
Isotypical decomposition:
Recall any diagonalisable endomorphism $\alpha:V \to V$ gives eigenspace decomposition of $V \cong \oplus_\lambda V(\lambda)$, where $V(\lambda) = \{v:\alpha v = \lambda v\}$. This is \emph{caconical} (one of the three useless words: \emph{arbitrary}(anything), \emph{canonical}(only one choice), \emph{uniform}(you can choose, but it doesn't really matter)), in the sense that it depends on $\alpha$ alone (and nothing else).\\
There is no canonical eigenbasis of $V$: must choose basis in each $V(\lambda)$.
We know that in $char\ 0$ every representation $V$ decomposes as $\oplus n_i V_i$, $V_i$ irreducible, $n_i \geq 0$. How unique is this?
We have this wishlist (4.7):\\
(a) Uniqueness: for each $V$ there is only one way to decompose $V$ as above. However, this doesn't work obviously.\\
(b) Isotypes: for each $V$, there exists a unique collection of subrepresentations $U_1,...,U_k$ s.t. $V=\oplus U_i$ and, if $V_i \subseteq U_i$ and $V'_j \subseteq U_j$ are irreducible subrepresentations, then $V_i \cong V'_j$ iff $i = j$.\\
(c) Uniqueness of factors: If $\oplus_{i=1}^k V_i \cong \oplus_{i=1}^k V'_i$ with $V_i,V'_i$ irreducible, then $k=k'$, and $\exists \pi \in S_k$ such that $V'_{\pi(i)} \cong V_i$ (Krull-Schimdt theorem).\
For (b),(c) see Teleman section 5.
\begin{lemma} (4.8)\\
Let $V,V_1,V_2$ be $G-$spaces over $F$.\\
(i) $Hom_G(V,V_1 \oplus V_2) \cong Hom_G (V,V_1) \oplus Hom_G(V,V_2)$;\\
(ii) $Hom_G(V_1\oplus V_2, V) \cong Hom_G(V_1,V) \oplus Hom_G(V_2,V)$;\\
\begin{proof}
(i) Let $\pi_i: V_1 \oplus V_2 \to V_i$ be $G$-linear projections onto $V_i$, with kernel $V_{3-i}$ ($i=1,2$).\\
Consider
\begin{equation*}
\begin{aligned}
Hom_G (V,V_1 \oplus V_2) &\to Hom_G (V,V_1) \oplus Hom_G (V,V_2)\\
\phi &\to (\pi_1 \phi, \pi_2 \phi)
\end{aligned}
\end{equation*}
This map has inverse $(\psi_1,\psi_2) \to \psi_1+\psi_2)$. Check details.\\
(ii) The map $\phi \to (\phi|_{V_1},\phi|_{V_2})$ has inverse $(\psi_1,\psi_2) \to \psi_1\pi_1+\psi_2\pi_2$.
\end{proof}
\end{lemma}
\begin{lemma}
Let $F$ be algebraically closed, $V=\oplus_1^n V_i$ a decomposition of $G$-space into irreducible summands. Then, for each irreducible representation $S$ of $G$,
\begin{equation*}
\begin{aligned}
\#\{j:V_j \cong S\} = \dim Hom_G(S,V)
\end{aligned}
\end{equation*}
where $\#$ means 'number of times'. This is called the \emph{multiplicity} of $S$ in $V$.
\begin{proof}
Indunction on $n$. $n=0,1$ are trivial.\\
If $n>1$, $V=\oplus_1^{n-1} V_i \oplus V_n$. By (4.8) we have
\begin{equation*}
\begin{aligned}
\dim Hom_G (S,\oplus_1^{n-1} V_i \oplus V_n) = \dim Hom(S,\oplus_1^{n-1} V_i) + \underbrace{\dim Hom_G (S,V_n)}_{\text{Schur's lemma}}
\end{aligned}
\end{equation*}
\end{proof}
\end{lemma}
\begin{defi} (4.10)\\
A decomposition of $V$ as $\oplus W_j$ where each $W_j \cong n_j$ copies of irreducible representations $S_j$ (each non-isomorphic for each $j$) is the \emph{canonical decomposition} or the decomposition into \emph{isotypical components} $W_j$. For $F$ algebraically closed, $n_j=\dim Hom_G(S_j,V)$.
\end{defi}
\newpage
\section{Character theory}
We want to attach invariants to representation $\rho$ of a finite group $G$ on $V$. Matrix coefficients of $\rho(g)$ are basis dependent, so not true invariants.\\
Let's take $F=\C$, $G$ finite, $\rho=\rho_V: G \to GL(V)$ be a representation of $G$.
\begin{defi} (5.1)\\
The \emph{character} $\chi_\rho = \chi_V = \chi$ is defined as $\chi(g) = \tr \rho(g) = \tr R(g)$ where $R(g)$ is any matrix representation of $\rho(g)$ w.r.t. any basis.\\
The degree of $\chi_V$ is $\dim_\C V$.\\
Thus $\chi$ is a function $G \to \C$. $\chi$ is \emph{linear} (not a universal name) if $\dim V=1$, in which case $\chi$ is a homomorphism $G \to \C^*$ ($=GL_1(\C)$).\\
$\chi$ is irreducible if $\rho$ is; $\chi$ is faithful if $\rho$ is; and, $\chi$ is trivial, or principal, if $\rho$ is the trivial representation (2.6). We write $\chi = 1_G$ in that case.\\
$\chi$ is a complete invariant in the sense that it determines $\rho$ up to isomorphism -- see (5.7).
\end{defi}
\begin{thm} (5.2, first properties)\\
(i) $\chi_V(1) = \dim_\C V$; (clear: $\tr I_n = n$)\\
(ii) $\chi_V$ is a \emph{class function}, via it is conjugation-invariant: $$\chi_V (hgh^{-1}) = \chi_V(g) \forall g,h \in G$$
Thus $\chi_V$ is constant on conjugacy classes.\\
(iii) $\chi_V(g^{-1}) = \overline{\chi_V(g)}$, the complex conjugate;\\
(iv) For two representations $V,W$, $\chi_{V \oplus W} = \chi_V + \chi_W$.
\begin{proof}
(ii) $\chi(hgh^{-1}) = \tr (R_h R_g R^{-1}_h) = \tr (R_g) = \chi(g)$.\\
(iii) Recall $g \in G$ has finite order, so we can assume $\rho(g)$ is represented by a diagonal matrix $Diag(\lambda_1,...,\lambda_n)$. Then $\chi(g) = \sum \lambda_i$. Now $g^{-1}$ is represented by the matrix $Diag(\lambda_1^{-1},...\lambda_n^{-1})$, and hence $\chi(g^{-1}) = \sum \lambda_i^{-1} = \sum \bar{\lambda_i} = \overline{\chi(g)}$ (since $\lambda_i$'s are roots of unity -- since $g^k = 1$ for some $k$!(I mean an exclamation mark here to express surprise) and by homomorphism we know that).\\
(iv) Suppose $V = V_1 \oplus V_2$, $\rho_i : G \to GL(V_i)$, $\rho:G \to GL(V)$. Take basis $B = B_1 \cup B_2$ of $V$ w.r.t $B$, $\rho(g)$ has matrix of block form $Diag([\rho_1(g)]_{B_1},[\rho_2(g)]_{B_2})$ and as $\chi(g)$ is the trace of the above matrix, it is equal ot $\tr \rho_1(g)+ \tr\rho_2(g) = \chi_{\rho_1} (g) + \chi_{\rho_2}(g)$.
\end{proof}
\end{thm}
\begin{rem}
We see later that $\chi_1,\chi_2$ character of $G$ implies that $\chi_1\chi_2$ is also a character of $G$: uses tensor products, see (9.6).
\end{rem}
\begin{lemma} (5.3)\\
Let $\rho:G \to GL(V)$ be a copmlex representation \emph{affording} the character $\chi$ (i.e. $\chi$ is a character of $\rho$). Then $|\chi(g)| \leq \chi(1)$, with equality iff $\rho(g) = \lambda_I$ for some $\lambda \in \C$, a root of unity. Moreover, $\chi(g) = \chi(1)$ iff $g \in \ker \rho$.
\begin{proof}
Fix $g$. W.r.t. basis of $V$ of eigenvalues $\rho(g)$, the matrix of $\rho(g)$ is $Diag(\lambda_1,...,\lambda_n)$. Hence $|\chi(g)| = |\sum \lambda_j| \leq \sum |\lambda_j|= \sum 1 = \dim V = \chi(1)$. Equality holds iff all $\lambda_j$ are equal (to $\lambda$, say).\\
If $\chi(g) = \chi(1)$, then $\rho(g) = \lambda \iota$ has $\chi(g) = \lambda \chi(1)$.
\end{proof}
\end{lemma}
\begin{lemma} (5.4)\\
(a) If $\chi$ is a complex irreducible character of $G$, so is $\bar{\chi}$;\\
(b) Under the same assumption, so is $\varepsilon\chi$ for any linear character $\varepsilon$ of $G$.\
\begin{proof}
If $R:G \to GL_n (\C)$ is a complex irreducible representation then so is $\bar{R}: G \to GL_n (\C)$ by $g \to \bar{R}(g)$. Similarly for $R': g \to \varepsilon(g) R(g)$ for $g \in G$. Check the details.
\end{proof}
\end{lemma}
\begin{defi} (5.5)\\
$\mathcal{C}(G) = \{f: G \to \C: f(hgh^{-1}) = f(g) \forall h,g \in G\}$, the $\C$-space of class functions (we call it a space since $f_1+f_2: g \to f_1(g)+f_2(g)$, $\lambda f: g \to \lambda f(g)$ are still in $\mathcal{C}(G)$), so this is a vector space.\\
Let $k = k(G)$ be the number of ccls of $G$. List the ccls $\mathcal{C}_1,...,\mathcal{C}_k$. Conventionally we choose $g_1 = 1, g_2,...,g_k$, representatives of the ccls (hence $\mathcal{C}_1 = \{1\}$). Note that $\dim_\C \mathcal{C}(G) = k$ (the characteristic functions $\delta_j$ of each ccl which maps any element in the ccl to 1 and others to 0 form a basis).\\
We define Hermitian inner product on $\mathcal{C}(G)$:
\begin{equation*}
\begin{aligned}
\bra f,f' \ket &= \frac{1}{|G|} \sum_{g \in G} \overline{f(G)} f'(g)\\
&= \frac{1}{|G|} \sum_{j=1}^k |\mathcal{C}_j| \overline{f(g_j)} f'(g_j)\\
&= \sum_{j=1}^k \frac{1}{|C_G(g_j)} \overline{f(g_j)} f'(g_j)
\end{aligned}
\end{equation*}
using $|\mathcal{C}_x| = |G:C_g(x)|$, where $\mathcal{C}_x$ is the ccl of $x$, $C_G(x)$ is the centraliser of $x$.\\
For characters
\begin{equation*}
\begin{aligned}
\bra \chi,\chi' \ket &= \sum \frac{1}{|C_G(g_j)|} \chi(g_j^{-1}) \chi'(g_j)
\end{aligned}
\end{equation*}
is a real symmetric form (in fact, $\bra \chi,\chi'\ket \in \Z$ -- see later).
\end{defi}
\begin{thm} (5.6)\\
The $\C$-irreducible characters of $G$ form an orthonormal basis of $\mathcal{C}(G)$. Moreover,\\
(a) If $\rho:G \to GL(V), \rho': G \to GL(V')$ are irreducible representations of $G$ affording characters $\chi,\chi'$ respecitvely, then
\begin{equation*}
\begin{aligned}
\bra \chi,\chi' \ket = \left\{\begin{array}{ll}
1 & \rho,\rho' \text{ are isomorphic representations}\\
0 & \text{ otherwise}
\end{array}
\right.
\end{aligned}
\end{equation*}
we call this 'row orthogonality'.\\
(b) Each class function of $G$ can be expressed as a linear combination of $G$.\\
This will be proved later in section 6.
\end{thm}
\begin{coro} (5.7)\\
Complex representations of \emph{finite} groups are characterised by their characters.\\
We emphasise on finiteness here: for example, $G=\Z$, consider $1 \to I_2$, $1 \to {{1\ 1} \choose {0\ 1}}$ are non-isomorphic but have same character.
\begin{proof}
Let $\rho:G \to GL(V)$ be representation affording $\chi$ ($G$ finite over $\C$). (3.3) says
\begin{equation*}
\begin{aligned}
\rho = m_1 \rho_1 \oplus ... \oplus m_k \rho_k
\end{aligned}
\end{equation*}
where $\rho_1,...,\rho_k$ are irreducible, and $m_j \geq 0$. Then $m_j = \bra \chi,\chi_j\ket$ where $\chi_j$ is afforded by $\rho_j$: we have $\chi = m_1\chi_1 + ... + m_k \chi_k$, but the $\rho_i$'s are orthonormal.
\end{proof}
\end{coro}
\begin{coro} (5.8, irreduciblility criterion)\\
If $\rho$ is $\C$-representation of $G$ affording $\chi$, then $\rho$ irreducible $\iff$ $\bra \chi,\chi \ket = 1$.
\begin{proof}
Forward is just the statement of orthonormality. Conversely, assume $bra \chi,\chi\ket = 1$. Now take a (complete) decomposition of $\rho$ and take characters of it we get $\chi = \sum m_j \chi_j$ with $\chi_j$ irreducible and $m_j \geq 0$. Then $\sum m^2_j = 1$. Hence $\chi = \chi_j$ for some $j$ (since the $m_j$'s are obviously integers), so is irreducible.
\end{proof}
\end{coro}
\begin{coro} (5.9)\\
If the irreducible $\C$-representations of $G$ are $\rho_1,...,\rho_k$ have dimensions $n_1,...,n_k$, then
\begin{equation*}
\begin{aligned}
|G| = \sum_{i=1}^k n_i^2
\end{aligned}
\end{equation*}
\begin{proof}
Recall from (3.5), $\rho_{reg}; G \to GL(\C G)$, the regular representation $G$ of dimension $|G|$ (where $\C G$ is just a $G$-space with basis $\{e_g: g \in G\}$ and any $h \in G$ permutes the $e_g$: $e_g \to e_{hg}$).\\
Let $\pi_{reg}$ be its charcter, the \emph{regular character} of $G$.\\
Claim 1: $\pi_{reg}(1) = |G|$, $\pi_{reg}(h) = 0$ if $h \neq 1$.\\
This is clear: take $h \in G, h \neq 1$, then we always have $0$ down the diagonal since $h$ permutes things around, so the trace is 0; if $h=1$ then we have an identity matrix so trace is $\dim \rho = |G|$.\\
Claim 2: $\pi_{reg} = \sum n_j \chi_j$ with $n_j = \chi_j(1)$.\\
This is because
\begin{equation*}
\begin{aligned}
n_j &= \bra \pi_{reg}, \chi_j\ket\\
&= \frac{1}{|G|} \sum_{g \in G} \overline{\pi_{reg}(g)} \chi_j(g)\\
&= \frac{1}{|G|} \cdot |G| \chi_j(1) = \chi_j(1)
\end{aligned}
\end{equation*}
(all the other $\pi_{reg}(g)$ are zero by claim 1).\\
Our corollary is then obvious by just calculating $|G| = \pi_{reg}(1)$.
\end{proof}
\end{coro}
\begin{coro} (5.10)\\
Number of irreducible characters of $G$ (up to equivalence) = $k$ (=number of ccls).
\end{coro}
\begin{coro} (5.11)\\
Elements $g_1,g_2 \in G$ are conjugate iff $\chi(g_1) = \chi(g_2)$ for all irreducible characters of $G$.
\begin{proof}
Forward: characters are class functions;\\
Backward: Let $\delta$ be the characteristic function of the class of $g_1$. In particular, $\delta$ is a class function, so can be written as a linear combination of the irreducible characters of $G$. Hence $\delta(g_2) = \delta(g_1) = 1$, so $g_2 \in \mathcal{C}_G (g_1)$.
\end{proof}
\end{coro}
In the end let's introduce a good friend which will be around for the next few lectures:\\
Recall from (5.5), the inner product on $\mathcal{C}(G)$ and the real symmetric form $\bra,\ket$ on characters:
\begin{defi}
The \emph{character table} of $G$ is the $k \times k$ matrix (where $k$ is the number of ccls) $X = [\chi_i (g_j)]$, the $i^{th}$ character on the $j^{th}$ class, where we let $\chi_1 =1_G, \chi_2,...,\chi_k$ are the irreducible characters of $G$, and $\mathcal{C}_1 =\{1\},...,\mathcal{C}_k$ are the ccls with $g_j \in \mathcal{C}_j$ (as we defined in 5.5).\\
So the $(i,j)^{th}$ entry of $X$ is just $\chi_i (g_j)$.
\end{defi}
\begin{eg} (5.13)\\
(a) $C_3 = \bra x:x^3=1\ket$. The character table is
\begin{equation*}
\begin{aligned}
\begin{matrix}
& 1 & x & x^2\\
\chi_1 & 1 & 1 & 1\\
\chi_2 & 1 & \omega & \omega^2\\
\chi_3 & 1 & \omega^2 & \omega
\end{matrix}
\end{aligned}
\end{equation*}
where $\omega = e^{2\pi i/3}$.\\
(b) $G=D_6 \cong S_3 = \bra r,s:r^3=s^2=1,sr^{-1} = r^{-1}\ket$.\\
ccls of $G$: $\mathcal{C}_1 = \{1\}$, $\mathcal{C}_2 = \{r,r^{-1}$, $\mathcal{C}_3 =\{s,sr,sr^2\}$. We have 3 irreducible representations over $\C$: $1_G$ (trivial); $\mathcal{S}$ (sign): $x \to 1$ for $x$ even, $x \to -1$ for $x$ odd; and $W$ (2-dimensional): $sr^i$ acts by matrix with eigenvalues $\pm 1$; $r^k$ acts by the matrix
\begin{equation*}
\begin{aligned}
\begin{matrix}
\cos 2k\pi/3 & -\sin 2k\pi/3\\
\sin 2k\pi/3 & \cos 2k\pi/3
\end{matrix}
\end{aligned}
\end{equation*}
so $\chi_w(sr^i) = 0$ $\forall j$, $\chi_w (r^k) = 2\cos 2k\pi/3 = -1$ $\forall k$. So the charactable is:
\begin{equation*}
\begin{aligned}
\begin{matrix}
& \mathcal{C}_1 & \mathcal{C}_2 & \mathcal{C}_3\\
1_G & 1 & 1 & 1\\
\chi_s & 1 & -1 & 1\\
\chi_w & 2 & 0 & -1
\end{matrix}
\end{aligned}
\end{equation*}
\end{eg}
\newpage
\section{Proofs and orthogonality}
We want to prove(5.6): irreducible characters form orthonormal basis for the space of $\C$-class functions.
\begin{proof} (of 5.6 (a))\\
Fix bases of $V$ and $V'$. Write $R(g)$, $R'(g)$ for matrices of $\rho(g),\rho'(g)$ w.r.t. these bases, respectively. Then
\begin{equation*}
\begin{aligned}
\bra \chi',\chi \ket &= \frac{1}{|G|} \chi'(g^{-1}) \chi(g) \\
&= \frac{1}{|G|} \sum_{g \in G, i,j\ s.t. 1 \leq i \leq n', 1 \leq j \leq n} R'(g^{-1})_{ii} R(g)_{jj}
\end{aligned}
\end{equation*}
the trick is to define something that annhilates almost the whole thing. Let $\phi:V \to V'$ be linear and define
\begin{equation*}
\begin{aligned}
\tilde{\phi}: V &\to &V'\\
v &\to &\frac{1}{|G|} \sum_{g \in G} \rho'(g^{-1}) \phi \rho(g) v
\end{aligned}
\end{equation*}
We claim that this is a $G$-homomorphism: if $h \in G$, let's calculate
\begin{equation*}
\begin{aligned}
\rho'(h^{-1}) \tilde{\phi} \rho(h) (v) &= \frac{1}{|G|} \sum_{g \in G} \rho' (gh)^{-1} \phi \rho(gh) (v)\\
&= \frac{1}{|G|} \sum_{g' \in G} \rho'(g'^{-1}) \phi \rho(g') (v)\\
&= \tilde{\phi} (v)
\end{aligned}
\end{equation*}
(when $g$ runs through $G$, $gh$ runs through $G$ as well). So (2.8) is satisfied, i.e. $\phi$ is a $G$-homomorphism.
Case 1: $\rho,\rho'$ are not isomorphic. Schur's lemma says $\tilde{\phi} = 0$ for any given linear $\phi:V \to V'$. Take $\phi - \varepsilon_{\alpha\beta}$, having matrix $E_{\alpha\beta}$ (w.r.t our basis). This is $0$ everywhere except $1$ in the $(\alpha,\beta)$-position. Then $\tilde{\varepsilon_{\alpha\beta}} = 0$. So $\frac{1}{|G|} \sum_{g \in G} (R'(g^{-1}) E_{\alpha\beta} R(g))_{ij} = 0$. So $\frac{1}{|G|} \sum R'(G^{-1})_{i\alpha} R(g)_{\beta j} =0 $ $\forall i,j$, with $\alpha = i, \beta = j$. Now $\frac{1}{|G|} \sum_{g \in G} R'(g^{-1})_{ii} R(g)_{jj} = 0$ sum over $i,j$. Then $\bra \chi',\chi\ket = 0$.\\
Case 2: $\rho,\rho'$ isomorphic. So $\chi = \chi'$; take $V=V'$, $\rho = \rho'$. If $\phi:V \to V$ is linear endomorphism, we claim $\tr \phi = \tr \tilde{\phi}$:
\begin{equation*}
\begin{aligned}
\tr \tilde{\phi} = \frac{1}{|G|} \sum_{g \in G} \tr(\rho(g)^{-1} \phi \rho(g)) = \frac{1}{|G|} \sum_{g \in G} \tr \phi = \tr \phi
\end{aligned}
\end{equation*}
By Schur's lemma, $\tilde{\phi} = \lambda\iota_V$ for some $\lambda \in \C$ (depending on $\phi$). Then $\lambda = \frac{1}{n} \tr\phi$. Let $\phi = \varepsilon_{\alpha\beta}$. So $\tr \phi = \delta_{\alpha\beta}$. Hence $\tilde{\varepsilon_{\alpha\beta}} = \frac{1}{n} \delta_{\alpha\beta}\iota_v = \frac{1}{|G|} \sum_{g \in G} \rho(g^{-1}) \varepsilon_{\alpha\beta} \rho(g)$. In terms of matrices, take $(i,j)$-entry: $\frac{1}{|G|} \sum_j R(g^{-1})_{i \alpha} R(g)_{\beta j} = \frac{1}{n} \delta_{\alpha\beta}\delta_{ij}$ $\forall i,j$. Put $\alpha = i,\beta =j $ to get $\frac{1}{|G|} \sum_g R(g^{-1})_{ii} R(g)_{jj} = \frac{1}{n} \delta_{ij}$. Finally sum over $i,j$ to get $\bra \chi,\chi \ket = 1$.
\end{proof}
Before proving (b), let's prove column orthogonality:
\begin{thm} (6.1, column orthogonality relations)\\
\begin{equation*}
\begin{aligned}
\sum_{i=1}^k \overline{\chi_i(g_j)} \chi_i (g_l) = \delta_{jl} |C_G(g_j)|
\end{aligned}
\end{equation*}
\end{thm}
having an easy corollary
\begin{coro} (6.2)\\
$|G| =\sum_{i=1}^k \chi_i^2(1)$.
\end{coro}
\begin{proof} (of (6.1))\\
$\delta_{ij} = \bra \chi_i,\chi_j\ket = \sum \overline{\chi_i (g_l)} \chi_j (g_l) / |C_G(g_l)|$. Consider the character table $X = (\chi_i(g_j))$. Then $\bar{X} D^{-1} X^T = I_{k \times k}$ where $D = Diag(|C_G(g_1)|,...,|C_G(g_k)|)$.\\
Since $X$ is quare, it follows that $d6{-1} \bar{X}^T$ is the inverse of $X$, so $\bar{X}^T X = D$.
\end{proof}
\begin{proof} (of (5.6(b)))\\
The $\chi_i$ generate $\mathcal{C}_G$. Let all the irreducible characters $\chi_1,...,\chi_l$ of $G$: claim these generate $\mathcal{C}_G$, the $\C$-space of class functions on $G$. It's enough to show that the orthogonal complement to $span\{\chi_1,...,\chi_l\}$ in $\mathcal{C}_G$ is $\{0\}$. To see this, assume $f \in \mathcal{C}_G$ with $\bra f,\chi_j\ket = 0 \forall j$. Let $\rho:G \to GL(V)$ be irreducible representation affording $\chi \in \{\chi_1,...,\chi_l\}$. Then $\bra f,\chi\ket = 0$.\\
Consider
\begin{equation*}
\begin{aligned}
\frac{1}{|G|} \sum_G \overline{f(g)} \rho(g): V \to V
\end{aligned}
\end{equation*}
This is a $G$-homomorphism, so as $\rho$ is irreducible, it must be $\lambda_\iota$ for some $\lambda \in \C$. Now
\begin{equation*}
\begin{aligned}
n\lambda &= \tr \frac{1}{|G|} \sum_g \overline{f(g)} \rho(g)\\
&= \frac{1}{|G|} \sum_g \overline{f(g)} \chi(g) = 0 = \bra f,\chi\ket
\end{aligned}
\end{equation*}
So $\lambda = 0$. Hence $\sum \overline{f(g)} \rho(g) = 0$, the zero endomorphism on $V$ for all representations $\rho$ (complete reducibility).
Take $\rho = \rho_{reg}$ where $\rho_{reg}(g): e_1 \to e_g$ ($g \in G$). So
\begin{equation*}
\begin{aligned}
\sum_g \overline{f(g)} \rho_{reg}(g): e_1 \to \sum_g \overline{f(g)} e_g
\end{aligned}
\end{equation*}
So it follows $\sum_g \overline{f(g)} e_g = 0$. So $\overline{f(g)} = 0 \forall g \in G$, so $f \equiv 0$.
\end{proof}
Variuous corollaries now follow:\\
$\bullet$ The number of irreducible representations of $G$ = number of ccls; (5.10)\\
$\bullet$ Column orthogonality (6.1);\\
$\bullet$ $|G| = \sum n_i^2$ (6.2);\\
$\bullet$ $g_1 \stackrel{\sim}{G} g_2 \iff \chi(g_1) = \chi(g_2)$ for all irreducible $\chi$ (5.11);\\
$\bullet$ If $g \in G$, $g \stackrel{\sim}{G} g^{-1} \iff \chi(g) \in \R$ for all irreducible $\chi$.
\newpage
\section{Permutation representations}
Preview was given in (3.7). Recall:
$\bullet$ $G$ finite group acting on finite set $X = \{x_1,...,x_n\}$;\\
$\bullet$ $\C X$ = $\C$-space, with basis $\{e_{x_1},...,e_{x_n}\}$ of dimension $|X|$, so is $\{\sum_j a_j e_{x_j}: a_j \in \C\}$;\\
$\bullet$ corresponding permutation representation $\rho_X:G \to GL(\C X)$ by $g \to \rho(g)$, where $\rho(g)$ sends $e_{x_j} \to e_{gx_j}$, extending linearly.\\
$\bullet$ $\rho_X$ is the \emph{permutation representation} corresponding to the action of $G$ on $X$.\\
$\bullet$ matrices representing $\rho_X(g)$ w.r.t. basis $\{e_x\}_{x \in X}$ are permutation matrices: 0 except for one 1 in each row and column, and $(\rho(g))_{ij} = 1$ iff $gx_j = x_i$. Consider its character:
(7.1) Permutation character, $\pi_X$, is
\begin{equation*}
\begin{aligned}
\pi_X(g) = |Fix_X(g)| =|\{x \in X:gx = x\}|.
\end{aligned}
\end{equation*}
(7.2) $\rho_X$ always contains $1_G$: $span \{e_{x_1}+...+e_{x_n}\}$ is a trivial $G$-subspace of $\C X$ with $G$-invariant complement $span\{\sum a_x e_x: \sum a_x = 0\}$.
\begin{lemma} (7.3, Burnside's lemma, after Cauchy, Frobenius)
$\bra \pi_X, 1\ket = $ number of orbits of $G$ on $X$.
\begin{proof}
If $X = X_1 \cup ... \cup X_l$ disjoint union of orbits, then $\pi_X = \pi_{X_1}+...+\pi_{X_l}$, with $\pi_{X_j}$ permutation character of $G$ on $X_j$, so to prove the claim it's enough to show that if $G$ is transitive on $X$ then $\bra \pi_X,1\ket = 1$. Assume $G$ is transitive on $X$. Now
\begin{equation*}
\begin{aligned}
\bra \pi_X,1\ket &= \frac{1}{|G|} \sum_g \pi_X(g) = \frac{1}{|G} |\{(g,x) \in G \times X: gx = x\}|\\
&= \frac{1}{|G|} \sum_{x \in X} |G_x|=\frac{1}{|G|}|X||G_x| = \frac{1}{|G|}|G| = 1
\end{aligned}
\end{equation*}
(Note the use of orbit-stabilizer theorem).
\end{proof}
\end{lemma}
\begin{lemma} (7.4)\\
Let $G$ act on the sets $X_1,X_2$. Then $G$ acts on $X_1 \times X_2$ via $g(x_1,x_2) = (gx_1,gx_2)$. The character $\pi_{X_1 \times X_2} = \pi_{X_1} \pi_{X_2}$ and so $\bra \pi_{X_1} ,\pi_{X_2}\ket =$ number of orbits of $G$ on $X_1 \times X_2$.
\begin{proof}
If $g \in G$ then $\pi_{X_1 \times X_2} (g) = \pi_{X_1} (g) \pi_{X_2}(g)$. And we have
\begin{equation*}
\begin{aligned}
\bra \pi_{X_1},\pi_{X_2} \ket = \bra \pi_{X_1}\pi_{X_2},1\ket = \bra \pi_{X_1 \times X_2} ,1\ket = (7.3) \text{ number of orbits of G on } X_1 \times X_2.
\end{aligned}
\end{equation*}
\end{proof}
\end{lemma}
\begin{defi} (7.5)\\
Let $G$ act on $X$, $|X| > 2$. Then $G$ is \emph{2-transitive} on $X$ if $G$ has precisely two orbits on $X \times X: \{(x,x):x \in X\}$ and $\{x_1,x_2) : x_i \in X,x_1 \neq x_2\}$.
\end{defi}
\begin{lemma} (7.6)\\
Let $G$ act on $X$, $|X|>2$. Then $\pi_X = 1+\chi$ with $\chi$ irreducible $\iff$ $G$ is 2-transitive on $X$.
\begin{proof}
$\pi_X = m_1 1 + m_2 \chi_2 + ... + m_l \chi_l$ with $1,\chi_2,...,\chi_l$ distinct irreducible characters and $m_i \in \N$. Then
\begin{equation*}
\begin{aligned}
\bra \pi_X,\pi_X\ket = \sum_{i=1}^l m_i^2
\end{aligned}
\end{equation*}
hence $G$ is 2-transitive on $X$ $\iff$ $l=2,m_1=m_2=1$.
\end{proof}
\end{lemma}
\begin{eg} (7.7)\\
Consider $S_n$ acting on $X=\{1,...,n\}$ which is $2$-transitive. Hence $\pi_X =1+\chi$ with $\chi$ irreducible of degree $n-1$. Similarly for $A_n$ ($n>3$).
\end{eg}
\begin{eg} (7.8)\\
Consider $G=S_4$.\\
\includegraphics[scale=0.6]{image/Rep_04.png}
\end{eg}
Last lecture we were talking about using column orthogonality to find $\chi_5$. Indeed we have
\begin{equation*}
\begin{aligned}
\chi_{reg} = \chi_1+\chi_2+3\chi_3+3\chi_4+2\chi_5
\end{aligned}
\end{equation*}
So we can use this to find $\chi_5$. Also, $S_4 / V_4 \cong S_3$ by 'lifting' -- see next chapter.
\subsection{Alternating groups}
Suppose $g \in A_n$. In 1A we've known that $|\mathcal{C}_{S_n} (g)| = |S_n:C_{S_n}(g)|$ and $|\mathcal{C}_{A_n}(g)| = |A_n : C_{A_n}(g)|$.
These are not necessarily equal. For example, $\sigma=(123) \in A_3$, $\mathcal{A}_3 (\sigma) =\{\sigma\}$, but $\mathcal{S_3}(\sigma) = \{\sigma,\sigma^{-1}\}$.
\begin{lemma} (7.9)\\
Let $g \in A_n$. Then if $g$ commutes with some odd permutation in $S_n$ then $\mathcal{C}_{S_n} (g) = \mathcal{C}_{A_n}(g)$; otherwise $\mathcal{C}_{S_n}(g)$ splits into two ccls in $A_n$ of equal size.
\end{lemma}
For example, consider $G=A_4$, so $|G| = 12$.
\includegraphics[scale=0.6]{image/Rep_05.png}
Note that if we ignore the second row and first column, the table becomes identical to that of $C_3 \cong G/V_4$. This is not a coincident, and is actually called \emph{lifting}.
\newpage
\section{Normal subgroups and lifting characters}
\begin{lemma} (8.1)\\
Let $N \triangleleft G$. Let $\tilde{\rho} : G/N \to GL(V)$ be a representation of $G/N$. Then
\begin{equation*}
\begin{aligned}
\rho:G &\xrightarrow{canonical} G/N &\xrightarrow{\tilde{\rho}} GL(V)\\
g & \to & \tilde{\rho}(gN)
\end{aligned}
\end{equation*}
is a representation of $G$, where $\rho(g) := \tilde{\rho}(gN)$. Moreover, $\rho$ is irreducible iff $\tilde{\rho}$ is irreducible.
The corresponding characters satisfy $\chi(g) =\tilde{\chi} (gN)$. We say that $\tilde{\chi}$ \emph{lifts} to $\chi$. The lifting $\tilde{\chi} \to \chi$ is a bijection between irreducible representations of $G/N$ and irreducible representations of $G$ with $N$ in $\ker$.
Well this looks like Q4/Q12 in the first example sheet.
\begin{proof}
Note $\chi(g) = \tr(\rho(g)) = \tr(\tilde{\rho}(gN)) = \tilde{\chi}(gN) \forall g$, and $\chi(1) = \tilde{\chi}(N)$. SO have some degree (?).
Bijection: if $\tilde{\chi}$ is a charcter of $G/N$-representation and $\chi$ is its lift to $G$, then $\chi(N) = \chi(1)$. Also, if $k \in N$ then
\begin{equation*}
\begin{aligned}
\chi(k) = \tilde{\chi} (kN) = \tilde{\chi}(N) = \chi(1)
\end{aligned}
\end{equation*}
So $N \leq \ker\chi$.
Now let $\chi$ be character of $G$ with $N \leq \ker\chi$. Suppose $\rho:G \to GL(V)$ affords $\chi$. Define
\begin{equation*}
\begin{aligned}
\tilde{\rho}: & G/N &\to GL(V)\\
&gN &\to \rho(g)
\end{aligned}
\end{equation*}
Check this is well-defined (uses $N \leq \ker\chi$) and $\tilde{\rho}$ is homomorphism, hence gives representation of $G/N$. If $\tilde{\chi}$ is the character of $\tilde{\rho}$ then $\tilde{\chi}(gN) = \chi(g)$ $\forall g\in G$. So $\tilde{\chi}$ lifts to $\chi$.\\
Check irreducibility.
\end{proof}
\end{lemma}
\begin{lemma} (8.2)\\
The derived subgroup, $G' = \bra[a,b],a,b \in G \ket$ of $G$ is the unique minimal normal subgroup of $G$ s.t. $G/G'$ is abelian, i.e. $G/N$ is abelian $\implies G' \leq N$ and $G^{ab}=G/G'$ is abelian, where $G^{ab}$ is the \emph{abelianisation} of $G$.\\
$G$ has precisely $l=|G/G'|$ representations of $\dim 1$, all with kernel containing $G'$ and obtained by lifting from $G/G'$. In particular, $l | |G|$.
\begin{proof}
$G'\triangleleft G$ is an easy exercise.
Let $N \triangleleft G$. Let $h,g \in G$, so
\begin{equation*}
\begin{aligned}
&g^{-1}h^{-1}gh \in N \iff &(gh)N = (hg)N\\
&[g,h] \iff (gN)(hN) = (hN)(gN)
\end{aligned}
\end{equation*}
So $G' \leq N \iff G/N$ is abelian. Since $G' \triangleleft G$ we deduce $G/G'$ is abelian.
By (4.5), $G/G'$ has exactly $l$ irreducible characters $\tilde{\chi}_1,...,\tilde{\chi}_l$ all of degree 1. The lifts of these to $G$ also have degree 1 and by (8.1) these are precisely the irreducible characters $\chi_i$ of $G$ s.t. $G' \leq \ker \chi_i$. But any linear character of $G$ is a homomorphism $\chi:G \to \C^*$, hence $G' \leq \ker \chi$ ($\chi(ghg^{-1}h^{-1}) = \chi(g)\chi(h)\chi(g^{-1} \chi(h)^{-1} = 1$), so the $\chi_1,...,\chi_l$ are all the linear characters of $G$.
\end{proof}
\end{lemma}
Examples:\\
(a) If $G=S_n$, show $s'_n = A_n$. Thus since $G/G' \cong C_2$, $S_n$ must have exactly two linear characters.\\
(b) Consider $G=A_4$. We've seen previously that this can be lifted from $C_3$ using (8.1),(8.2).
\begin{lemma} (8.4)\\
$G$ is not simple iff $\chi(g) = \chi(1)$ for some irreducible character $\chi \neq 1_G$ and some $1 \neq g \in G$.\\
Any normal subgroup of $G$ is the intersection of the kernels of some of the irreducible characters of $G$:
\begin{equation*}
\begin{aligned}
N =\bigcap_i \ker \chi_i
\end{aligned}
\end{equation*}
\begin{proof}
If $\chi(g) = \chi(1)$ for some non-trivial irreducible character $\chi$ (afforded by $\rho$, say). Then $g \in \ker\rho$ (5.3), so if $g \neq 1$, then $1 \neq \ker \rho \stackrel{\triangleleft}{\neq} G$.\\
If $1 \neq N \stackrel{\triangleleft}{\neq} G$, take irreducible $\tilde{\chi}$ of $G/N$, $\tilde{\chi}$ non-trivial. Lift to get an irreducible $\chi$, afforded by $\rho$ of $G$, then $N \leq \ker \rho \triangleleft G$. So $\chi(g) \ chi(1)$ for $g \in N$.\\
We claim that, if $1 \neq N \triangleleft G$, then $N$ is the intersection of the kernels of the lifts of all the irreducibles of $G/N$.\\
$\leq$ is clear from (8.1). If $g \in G \setminus N$, then $gN \neq N$. so $\tilde{\chi} (gN) \neq \tilde{\chi}(N)$ for some irreducible $\tilde{\chi}$ of $G/N$. Lifting $\tilde{\chi}$ to $\chi$, we have $\chi(g) \neq \chi(1)$.
\end{proof}
\end{lemma}
Recall $\ker \chi = \{g \in G: \chi(g) = \chi(1)\}$. (5.3) : $g \in \ker\chi \iff g \in \ker\rho$.
\newpage
\section{Dual spaces and tensor products of representations}
Recall (5.5):\\
$\bullet$ $\mathcal{C}(G)$ is $\C$-space of class functions on $G$;\\
$\bullet$ endowed with irreducible product, $\dim \mathcal{C}(G) = k$, orthonormal basis of irreducible characters of $G$ (5.6)l\\
$\bullet$ there exists an involution (ring homomorphism of order 2): $f \to f^*$ where $f^*(g) = f(g^{-1})$.
\begin{lemma} (9.1)\\
Let $\rho:G \to GL(V)$, representation over $F$, and let $V^* = Hom_F (V,F)$, dual space of $V$. Then $V^*$ is a $G$-space under
\begin{equation*}
\begin{aligned}
(\rho^*(g)\phi)(v) = \phi(\rho(g^{-1})v)
\end{aligned}
\end{equation*}
called the \emph{dual representation} to $\rho$. Its charcater is $\chi_{\rho^*}(g) = \chi_\rho (g^{-1})$.
\begin{proof}
\begin{equation*}
\begin{aligned}
\rho^*(g_1) (\rho^*(g_2)\phi)(v) &= (\rho^*(g_2)\phi)(\rho(g_1^{-1})(v))\\
&= \phi(\rho(g_2^{-1}) \rho(g_1^{-1}) v)\\
&= \phi(\rho(g_1g_2)^{-1}(v))\\
&= (\rho^*(g_1g_2)\phi)(v)
\end{aligned}
\end{equation*}
So this is a representation. For its character, fix $g \in G$ and let $e_1,...,e_n$ be basis of $V$ of eigenvectors of $\rho(g)$, say $\rho(g) e_j = \lambda_j e_j$. Let $\varepsilon_1,...,\varepsilon_n$ be dual basis. We claim that $\rho^*(g) \varepsilon_j =\lambda^{-1}_j \varepsilon_j$:
\begin{equation*}
\begin{aligned}
(\rho^*(g)\varepsilon_j)(e_i) = \varepsilon_j(\rho(g^{-1})e_i) = \varepsilon_j\lambda_i^{-1}e_i = \lambda_j^{-1} \varepsilon_j e_i \forall i
\end{aligned}
\end{equation*}
So $\chi_{\rho^*}(g) = \sum \lambda_j^{-1} = \chi_\rho(g^{-1})$.
\end{proof}
\end{lemma}
\begin{defi} (9.2)\\
$\rho:G \to GL(V)$ is \emph{self-dual} if $V \cong V^*$ (as $G$-spaces). Over $\C$, this holds iff $\chi_\rho(g) = \chi_\rho(g^{-1})$ ($=\overline{\chi_\rho(g)}$) $\forall g$, iff $\chi_\rho(g) \in \R$ for all $g$.
\end{defi}
Exercise: all irreducible representations of $S_n$ are self-dual (the ccls are determined by cycle type, so $g,g^{-1}$ are always $S_n$-conjugate. Not always true for $A_n$.
\subsection{tensor products}
Let $V,W$ be $F-$spaces, $\dim V = m$, $\dim W = n$. Fix bases $v_1,...,v_m$ and $w_1,...,w_n$ of $V,W$ respectively. The \emph{tensor product space} $V \otimes_F W$ is an $nm$-dimensional $F$-space with basis $\{v_i \otimes w_j: 1 \leq i \leq m, 1 \leq j \leq n\}$. Thus\\
(a) $V \otimes W = \{\sum_{i,j} \lambda_{ij} v_i \otimes w_j: \lambda_{ij} \in F\}$ with 'obvious' addition and scalar multiplication;\\
(b) If $v = \sum_{i} \alpha_iv_i \in V$, $w = \sum_j \beta_j w_j \in W$, define $v\otimes w:=\sum_{i,j} \alpha_i\beta_j (v_i\otimes w_j)$.
\begin{rem}
Not all elements of $V \otimes W$ are of this form: some are combinations, e.g. $v_1 \otimes w_1 + v_2 \times w-2$, which can't be further simplified. (like entangled)
\end{rem}
\begin{lemma} (9.3)\\
(i) For $v \in V$, $w \in W$, $\lambda \in F$, $(\lambda v) \otimes w = \lambda (v\otimes w) = v \otimes (\lambda w)$;\\
(i) If $x_1,x_2,x \in V$, $y_1,y_2,y \in W$, then
\begin{equation*}
\begin{aligned}
(x_1+x_2) \otimes y = (x_1 \otimes y) + (x_2 \otimes y),\\
x \otimes (y_1+y_2) = (x \otimes y_1) + (x \otimes y_2)
\end{aligned}
\end{equation*}
\begin{proof}
(i) $v = \sum \alpha_i v_i$, $w = \sum \beta_j w_j$. Then just multiply out everything we get the desired equality. (ii) is similar.
\end{proof}
\end{lemma}
\begin{lemma} (9.4)\\
If $\{e_1,...,e_m\}$ is a basis of $V$, $\{f_1,...,f_n\}$ is a basis of $W$, then $\{e_i \otimes f_j: 1 \leq i \leq m,1 \leq j \leq n\}$ is a basis of $V \otimes W$.
\begin{proof}
Writing $v_k = \sum_i \alpha_{ik} e_i$, $w_l = \sum_j \beta_{jl} f_j$, we have
\begin{equation*}
\begin{aligned}
v_k \otimes w_l = \sum \alpha_{ik} \beta_{jl} e_i \otimes f_j
\end{aligned}
\end{equation*}
Hence $\{e_i \otimes f_j\}$ spans $V \otimes W$ and, since we have $nm$ of them, they form a basis.
\end{proof}
\end{lemma}
\begin{rem}
One can define $V \otimes W$ in a basis-independent way in the first place, see Teleman chapter 6.
\end{rem}
\begin{prop} (9.5)\\
Let $\rho: G \to GL(V)$, $\rho': G \to GL(V')$ be representations of $G$. Define $\rho \otimes \rho': G \to GL(V \otimes V')$ by
\begin{equation*}
\begin{aligned}
(\rho \otimes \rho') (g) : \sum \lambda_{ij} v_i \otimes w_j \to \sum \lambda_{ij} \rho(g) v_i \otimes \rho'(g) w_j
\end{aligned}
\end{equation*}
Then $\rho \otimes \rho'$ is a representation of $G$ with character
\begin{equation*}
\begin{aligned}
\chi_{\rho \otimes \rho'} (g) = \chi_\rho(g) \chi_{\rho'} (g) \forall g \in G
\end{aligned}
\end{equation*}
Hence product of two characters of $G$ is still a character of $G$.
\begin{proof}
On Tuesday.
\end{proof}
\end{prop}
(After lecture 11: this is the first notes to get beyond 1000 lines!)
\begin{rem} (9.6)\\
Sheet 1, Q2 says $\rho$ irreducible, $\rho'$ of degree 1, then $\rho \otimes \rho'$ irreducible; if $\rho'$ is not of $\deg$ 1 this is usually false.
\end{rem}
\begin{proof} (of 9.5)\\
It's clear that $(\rho \otimes \rho') (g) \in GL(V \otimes V')$ $\forall g \in G$ and so $\rho \otimes \rho'$ is a homomorphism $G \to GL(V \otimes V')$. Let $g \in G$. Let $V_1,...,v_m$ be basis of $V$ of eigenvectors of $\rho(g)$; let $w_1,...,w_n$ be a basis of $V'$. Say:
\begin{equation*}
\begin{aligned}
\rho(g) v_j = \lambda_j v_j, \rho'(g) w_j = \mu_j w_j
\end{aligned}
\end{equation*}
Then
\begin{equation*}
\begin{aligned}
(\rho \otimes \rho') (g) (v_i \otimes w_j) &= \rho(g) v_i \otimes \rho'(g) w_j\\
&= \lambda_i v_i \otimes \mu_j w_j\\
&= (\lambda_i \mu_j) (v_i \otimes w_j)
\end{aligned}
\end{equation*}
So $\chi_{\rho \otimes \rho'}(g) = \sum_{i,j} \lambda_i \mu_j = (\sum \lambda_i)(\sum\lambda_j) = \chi_\rho(g)\chi_{\rho'}(g)$
\end{proof}
Now work over $\C$. Take $V=V'$ and define $V^{\otimes 2} = V \otimes V$.\\
Let
\begin{equation*}
\begin{aligned}
\tau: \sum \lambda_{ij} v_i \otimes v_j \to \sum \lambda_{ij} \lambda_j \otimes v_i
\end{aligned}
\end{equation*}
which is a linear $G$-endomorphism of $V^{\otimes 2}$, s.t. $\tau^2 =1$ (so eigenvalues $\pm 1$).
\begin{defi} (9.7)\\
\begin{equation*}
\begin{aligned}
S^2 V = \{v \in V^{\otimes 2}: \tau(x) = x\},\\
\wedge^2 V = \{x \in V^{\otimes 2}: \tau(x) = -x\}
\end{aligned}
\end{equation*}
known as the \emph{symmetric square} of $V$ and \emph{exterior square} of $V$ respectively.
\end{defi}
\begin{lemma} (9.8)\\
$S^2 V$ and $\wedge^2 V$ are $G$-subspaces of $V^{\otimes 2}$ and $V^{\otimes 2} \cong S^2 V \otimes \wedge^2 V$. $S^2 V$ has basis $\{v_iv_j := v_i \otimes v_j + v_j \otimes v_i: 1 \leq i \leq j \leq n\}$, and $\wedge^2 V$ has basis $\{v_i \wedge v_j:=v_i \otimes v_j - v_j \otimes v_i: 1 \leq i < j \leq n\}$. Hence we have $\dim S^2 V = \frac{1}{2}n(n+1)$ and $\dim \wedge^2 V = \frac{1}{2} n(n-1)$.
\begin{proof}
Exercise in linear algebra.\\
To show $V^{\otimes 2}$ is reducible, write $x \in V^{\otimes 2}$ as $x=\frac{1}{2} (x+\tau(x)) + \frac{1}{2} (x-\tau(x))$, which is in $S^2 V$ and $\wedge^2 V$ respectively.
\end{proof}
\end{lemma}
In fact, $V^{\otimes 2}$, $V^{\otimes 3} = V \otimes V \otimes V$, ...,etc. are never irreducible if $\dim V > 1$.
\begin{lemma} (9.9)\\
If $\rho:G \to GL(V)$ is a representation affording character $\chi$, then $\chi^2 = \chi_S + \chi_\wedge$ where $\chi_s$ ($=S^2 \chi$) is the character of $G$ in the subrepresentation $S^2 V$, and $\chi_\wedge$ ($=\wedge^2 \chi)$ is the character of $G$ in the subrepresentation $\wedge^2 V$. Moreover, for $g \in G$,
\begin{equation*}
\begin{aligned}
\chi_s(g) = \frac{1}{2} (chi^2(g) + \chi(g^2)),
\chi_\wedge(g) = \frac{1}{2} (\chi^2(g) - \chi(g^2)).
\end{aligned}
\end{equation*}
\begin{proof}
Let's compute the characters $\chi_s,\chi_\wedge$. Fix $g \in G$. Let $v_1,...,v_n$ be a basis of eigenvectors of $\rho(g)$, say $\rho(g)v_i = \lambda_i v_i$ (we drop the $\rho$ to write $g v_i = \lambda_i v_i$ for simplicity below). Then
\begin{equation*}
\begin{aligned}
g v_i v_j = \lambda_i \lambda_j v_i v_j\\
g v_i \wedge v_j = \lambda_i \lambda_j v_i \wedge v_j
\end{aligned}
\end{equation*}
Hence $\chi_s(g) = \sum_{1 \leq i \leq j \leq n} \lambda_i\lambda_j$ and $\chi_\wedge(g) = \sum_{1 \leq i < j \leq n} \lambda_i \lambda_j$. Now,
\begin{equation*}
\begin{aligned}
(\chi(g))^2 &= (\sum \lambda_i)^2\\
&= \sum \lambda_i^2 + 2\sum_{i < j} \lambda_i \lambda_j\\
&= \chi(g^2) + 2\sum_{i < j} \lambda_i \lambda_j\\
&= \chi(g^2) + 2 \chi_\wedge (g)
\end{aligned}
\end{equation*}
So $\chi_\wedge (g) = \frac{1}{2} (\chi^2 (g) - \chi(g^2))$. But $\chi^2 = \chi_s + \chi_\wedge$ so we get the expression for $\chi_s(g)$.
\end{proof}
\end{lemma}
\begin{eg} (9.10)\\
Consider our usual example $G=S_4$ (see 7.8).
\includegraphics[scale=0.5]{image/Rep_06.png}
\includegraphics[scale=0.5]{image/Rep_07.png}
Notice that $\wedge^2 \chi_3 = \bar{\chi}_3$ (irreducible since $\bra \chi_\wedge,\chi_\wedge\ket = 1$),\\
$S^2 \chi_3 = 1+\chi_3+\chi5$: The inner product is 3 and it contains $1$ ,$\chi_3$, so the one left is $\chi_5$.
\end{eg}
Characters of $G \times H$ (seen in (4.5) for abelian groups):\\
\begin{prop} (9.11)\\
If $G,H$ are finite groups with irreducible characters $\chi_1,...,\chi_k$ and $\psi_1,...,\psi_r$ respectively, then the irreducible characters of the direct product $G \times H$ are precisely $\{\chi_i \psi_j:1 \leq i \leq k, 1 \leq j \leq r\}$, where $\chi_i \psi_j (g,h) = \chi_i*g( \psi_j(h)$.
\begin{proof}
If $\rho:G \to GL(V)$, $\rho':H \to GL(W)$ affording $\chi$ and $\psi$ respectively, then
\begin{equation*}
\begin{aligned}
\rho \otimes \rho': &G \times H \to &GL(V \otimes W)\\
&(g,h) \to &\rho(g) \otimes \rho'(h)
& &v_i \otimes w_j \to \rho(g) v_i \otimes \rho'(h) w_j
\end{aligned}
\end{equation*}
is a representation of $G \times H$ on $V \otimes W$ by (9.5), and $\chi_{\rho \otimes \rho'} = \chi\psi$, again by (9.5).\\
We claim that $\chi_i \psi_j$ are distinct and irreducible:
\begin{equation*}
\begin{aligned}
\bra \chi_i \psi_j,\chi_r\psi_s \ket_{G \times H} &= \frac{1}{|G \times H|} \sum_{(g,h)} \overline{\chi_i\psi_j (g,h)} \chi_r \psi_s (g,h)\\
&= (\frac{1}{|G|} \overline{\chi_i(g)} \chi_r(g)) (\frac{1}{|H|} \sum_h \overline{\psi_j(h)} \psi_s(h))\\
&= \delta_{ir} \delta_{js}
\end{aligned}
\end{equation*}
...tbc.\\
Let's complete on $\chi_i\psi_j$ being distinct and irreducible:\\
Complete set: $\sum_{i,j} (\chi_i\psi_j)(1)^2 = \sum_i \chi_i(1)^2 \sum_j \psi_j(1)^2 = |G| |H| = |G \times H|$
\end{proof}
\end{prop}
\subsection{Symmetric and extreior powers}
Let $V$ be a vector space, $\dim_F V = d$, with basis $\{v_1,...,v_d\}$. Let $V^{\otimes n} = V \otimes ... \otimes V$, with basis $\{v_{i_1} \otimes ... \otimes v_{i_n} : (i_1,...,i_n) \in \{1,...,d\}^n\}$, so $\dim V^{\otimes n} = d^n$.
$S_n$-action: for any $\sigma \in S_n$, we can define linear map
\begin{equation*}
\begin{aligned}
\sigma: &V^{\otimes n} \to &V^{\otimes n}\\
v_1 \otimes ... \otimes v_n \to &v_{\sigma^{-1}(1)} \otimes ... \otimes v_{\sigma^{-1}(n)}
\end{aligned}
\end{equation*}
for $v_1,...,v_n \in V$, permuting posutums of vectors in a tensor.
For example, $(12)(v_1\otimes v_2 \otimes v_3) = v_2 \otimes v_1 \otimes v_3$, $(13)(v_2 \otimes v_1 \otimes v_3) = v_3 \otimes v_1 \otimes v_2$.
Check that this defines a representation of $S_n$ on $V^{\otimes n}$ (extended linearly).
$G$-action: given representation $\rho:G \to GL(V)$, then the action of $G$ on $V^{\otimes n}$ is
\begin{equation*}
\begin{aligned}
\rho^{\otimes n} (g) : v_1 \otimes ... \otimes v_n = \rho(g) v_1 \otimes ... \otimes \rho(g) v_n
\end{aligned}
\end{equation*}
extended linearly, and this commutes with the $S_n$-action. We can decompose $V^{\otimes n}$ as $S_n$-module, and each isotypical component (4.?) is $G$-invariant subspace of $V^{\otimes n}$. In particular:
\begin{defi} (9.12)\\
For $G$-space $V$, define\\
(i) the $n$th symmetric power of $V$, $S^n V = \{x \in V^{\otimes n}: \sigma(x) = x \forall \sigma \in S_n\}$;\\
(ii) the $n$th exterior power of $V$, $\wedge^n V = \{x \in V^{\otimes n}: \sigma(x) = sign(\sigma)x \forall \sigma \in S_n\}$.\\
Both are $G$-subspaces of $V^{\otimes n}$, but for $n>2$, $S^n V \oplus \wedge^n V \lneq V^{\otimes n}$, so in general there are lots of others for the $S_n$-action.
\end{defi}
(9.13) See Sheet 3 Q7 for bases of $S^n V$, $\wedge^n V$ and their characters.
\subsection{Tensor algebra}
Take $char F = 0$.
\begin{defi} (9.14)\\
Let $T^n V = V^{\otimes n}$. The tensor algebra of $V$ is $TV := \oplus_{n \geq 0} T^n V$, $T^0 V = F$.\\
This is $F$-space and is a (non-commutative) graded ring with product $x \in T^n V$, $y \in T^m V$ , $x \cdot y = x \otimes y \in T^{n+m} V$.\\
There are two graded quotient rings
\begin{equation*}
\begin{aligned}
SV = TV /(\text{ideal generated by all } U \otimes V - V \otimes U)\\
\wedge V = TV / \text{ ideal generated by all }V \otimes V
\end{aligned}
\end{equation*}
called the symmetric algebra and exterior algebra respectively.
\end{defi}
\begin{defi} (9.15)\\
The $2$-submodule of $\mathcal{C}(G)$ spanned by irreducible characters of $G$ is the character ring of $G$, $R(G)$. Elements of $R(G)$ are called generalised/virtual characters if $\psi = \sum n_\chi \chi$, $n_\chi \in \Z$ correspondingly.\\
$\bullet$ $R(G)$ is a commutative ring and any generalised character is a difference of two characters, $\psi = \alpha - \beta$:\\
$\alpha = \sum_{n_\chi \geq 0} n_\chi \chi, \beta = -\sum_{n_\chi < 0} n_\chi \chi$.\\
The $\{\chi_i\}$ form a $\Z$-basis for $R(G)$ as a free $\Z$-module.\\
$\bullet$ Suppose $\psi$ is virtual character and $\bra\psi,\psi\ket = 1$ and $\psi(1) > 0$. Then $\psi$ is actually the character of an irreducible representation of $G$.\\
List irreducible characters of $G$: $\chi_1,...,\chi_k$, $\psi = \sum n_i \chi_i$; orthonormality says $\bra\psi,\psi\ket = \sum n_i^2$, so $\sum n_i^2 = 1$, meaning $n_i = \pm 1$ for exactly one $i$ and $n_j = 0$ for $j \neq i$. Since $\psi(1)>0$, we must have $n_i = +1$.\\
$\bullet$ Henceforth we don't distinguish between a character and its negative and we often study generalised characters of norm 1 rather than irreducible characters.
\end{defi}
\newpage
\section{Restriction and induction}
Throughout we set $H \leq G$, $F = \C$.
\begin{defi} (10.1, restriction)\\
Let $\rho:G \to GL(V)$ be representation affording $\chi$. We can think of $V$ as a $H$-space by restricting attention to $h \in H$. We then get
\begin{equation*}
\begin{aligned}
Res_H^G \rho : &H \to &GL(V)
\end{aligned}
\end{equation*}
This is sometimes written as $\rho_H$ or $\rho\downarrow_H$, the restriction of $\rho$ to $H$. It affords the character $Res_H^G \chi = \chi_H = \chi \downarrow_H$.
\end{defi}
\begin{lemma} (10.2)\\
If $\psi$ is any non-zero character of $H \leq G$, then there exists irreducible charcater $\chi$ of $G$ s.t. $\bra Res_H^G \chi, \psi \ket_H \neq 0$. We say $\psi$ is a constituent of $Res_H^G \chi$.
\begin{proof}
\begin{equation*}
\begin{aligned}
0 \neq \frac{|G|}{|H|} \psi(1) = \bra \pi_{reg} \downarrow_H ,\psi\ket = \sum_1^k \deg \chi_i \bra \chi_i \downarrow_H ,\psi\ket
\end{aligned}
\end{equation*}
where $\psi_i$ are irreducible characters of $G$.
\end{proof}
\end{lemma}
\begin{lemma} (10.3)\\
Let $\chi$ be irreducible character of $G$, and let $Res_H^G \chi = \sum c_I \chi_i$ with $\chi_i$ irreducible characters of $H$, $c_i \in \Z_{\geq 0}$. Then
\begin{equation*}
\begin{aligned}
\sum c_I^2 \leq |G:H|
\end{aligned}
\end{equation*}
with equality iff $\chi(g) = 0$ $\forall g \in G \setminus H$.
\begin{proof}
\begin{equation*}
\begin{aligned}
\sum c_i^2 = \bra Res_H^G \chi,Res_H^G \chi \ket_H = \frac{1}{|H|} \sum_{h \in H} |\chi(h)|^2
\end{aligned}
\end{equation*}
But
\begin{equation*}
\begin{aligned}
1 = \bra \chi,\chi\ket G &= \frac{1}{|G|} \sum_{g \in G} |\chi(g)|^2\\
&= \frac{1}{|G|} (\sum_{h \in H} |\chi(h)|^2 + \sum_{g \in G \setminus H} |\chi(g)|^2)\\
&= \frac{|H|}{|G|} \sum c_i^2 + \underbrace{\frac{1}{|G|} \sum_{g \in G \setminus H} |\chi(g)|^2}_{\geq 0}
\end{aligned}
\end{equation*}
So $\sum c_i^2 \leq |G:H|$, with equality holds iff $\chi(g) = 0$. $\forall g \in G\setminus H$.
\end{proof}
\end{lemma}
\begin{eg}
Let $G=S_5$, $H=A_5$. This has 7 representations of degree $1,1,4,4,5,5,6$ respectively, where if we restrict to $H$, the two representations of degree $1,4,5$ combines into one of the same degree respectively; however, the degree $6$ representation splits into two irreducible representations of degree 3. In the first case we have $\chi(g) \neq 0$ somewhere outside $H$; for the degree $6$ representation, $\chi(g) = 0$ $\forall g \in S_5 \setminus A_5$. All restrictions are irreducible if $|G:H|=2$ which is the case here. Fact: $\chi \downarrow_H$ all constituents have same degree if $H \triangleleft G$ (Janes-Liebeck, chapter 20).
\end{eg}
Let's talk about induced characters.
\begin{defi} (10.4)\\
If $\psi \in \mathcal{C}(H)$, define $Ind_H^G \psi(g) = \frac{1}{|G|} \sum_{\chi \in G} \mathring{\psi} (x^{-1} gx)$, where
\begin{equation*}
\begin{aligned}
\mathring{\psi(g)} = \left\{\begin{array}{ll}
\psi(g) & g \in H\\
0 & g \not\in H
\end{array}
\right.
\end{aligned}
\end{equation*}
We also write $Ind_H^G \psi (g)$ as $\psi \uparrow^G = \psi^G$.
\end{defi}
\begin{lemma} (10.5)\\
If $\psi \in \mathcal{C}(H)$ then $Ind_H^G \psi \in \mathcal{C}(G)$ and $Ind_H^G \psi(1) = |G:H|\psi(1)$.
\begin{proof}
This is clear, noting that $Ind_H^G \psi(1) = \frac{1}{H} \sum \mathring{\psi}(1) = |G:H| \psi(1)$.
\end{proof}
\end{lemma}
Let $n = |G:H|$. Let $1=t_1,t_2,...,t_n$ be a \emph{left transversal} of $H$ in $G$ (complete set of coset representatives), so that $t_1H = H$, $t_2H,...,t_nH$ are precisely the $n$ left cosets of $H$ in $G$.
\begin{lemma} (10.6)\\
Given left transversal as above,
\begin{equation*}
\begin{aligned}
Ind_H^G \psi(g) = \sum_{i=1}^n \mathring{\psi} (t_i^{-1} gt_i)
\end{aligned}
\end{equation*}
\begin{proof}
For $h \in H$, $\mathring{\psi}((t_i h)^{-1} g(t_ih)) = \mathring{\psi} (t_i^{-1} g t_i)$ as $\psi$ is a class function on $H$.
\end{proof}
\end{lemma}
\begin{thm} (10.7, Frobenius reciprocity)\\
$H \leq G$. $\psi$ is a class function for $H$, $\phi$ is a class function for $G$. Then
\begin{equation*}
\begin{aligned}
\bra \underbrace{Res_H^G \phi}_{in\ \mathcal{C}(H)} , \psi\ket_H = \bra\phi,\underbrace{Ind_H^G \psi}_{in\ \mathcal{C}(G)}\ket_G
\end{aligned}
\end{equation*}
\begin{proof}
We want to show $\bra \phi_H,\psi\ket_H =\bra \phi,\psi^G \ket_G$:
\begin{equation*}
\begin{aligned}
\bra \phi,\psi^G \ket = \frac{1}{|G|} \sum_{g \in G} \overline{\phi(g)} \psi^G (g) =\frac{1}{|G||H|} \sum_{g,x \in G} \overline{\phi(g)} \mathring{\psi} (x^{-1}gx)
\end{aligned}
\end{equation*}
Put $y=x^{-1}gx$. The above then equals
\begin{equation*}
\begin{aligned}
\frac{1}{|G||H|} \sum_{x,y \in G} \overline{\phi(y)} \mathring{\psi}(y) = \frac{1}{|H|} \sum_{y \in G} \overline{\phi(y)} \mathring{\psi}(y)
\end{aligned}
\end{equation*}
which is independent of $x$, and then equals
\begin{equation*}
\begin{aligned}
\frac{1}{|H|} \sum_{y \in H} \overline{\phi(y)} \psi(y) = \bra \phi_H,\psi\ket_H
\end{aligned}
\end{equation*}
\end{proof}
\end{thm}
\begin{coro} (10.8)\\
If $\psi$ is a character of $H$, then $Ind_H^G \psi$ is a character of $G$.
\begin{proof}
Let $\chi$ be an irreducible character of $G$. Then
\begin{equation*}
\begin{aligned}
\bra Ind_H^G \psi,\chi\ket = \bra \psi,Res_H^G \chi\ket \in \Z_{\geq 0}
\end{aligned}
\end{equation*}
since $\psi$ and $Res_H^G \chi$ are characters. Hence $Ind_H^G \psi$ is a linear combination of irreducible charcaters with non-negative coefficients, hence a character.
\end{proof}
\end{coro}
\begin{lemma} (10.9)\\
Let $\psi$ be a character of $H \leq G$, and let $g \in G$. Let
\begin{equation*}
\begin{aligned}
\mathcal{C}_G(g) \cup H = \bigcup_{i=1}^m \mathcal{C}_H (x_i)
\end{aligned}
\end{equation*}
(disjoint union), where the $x_i$ are representatives of the $H$-ccls of elements of $H$ conjugate to $g$.\\
If $m =0$, then $Ind_H^G \psi(g) = 0$. Otherwise
\begin{equation*}
\begin{aligned}
Ind_H^G \psi(g) = |C_G(g)| \cdot \sum_{i=1}^m \frac{\psi(x_i)}{|C_H(x_i)|}
\end{aligned}
\end{equation*}
\begin{proof}
Assume $m>0$. Let $X_i = \{x \in G: x^{-1} g x \in H$ and is conjugate in $H$ to $x_i\}$ $\forall 1 \leq i \leq m$.
The $X_i$ are pairwise disjoint, and their union is $\{x \in G: x^{-1} gx \in H\}$. By definition,
\begin{equation*}
\begin{aligned}
Ind_H^G \psi(g) &= \frac{1}{|H|} \sum_{\alpha \in G} \mathring{\psi}(x^{-1}gx)\\
&= \frac{1}{|H|} \sum_{i=1}^m \sum_{x \in X_i} \psi (x^{-1}gx)\\
&= \frac{1}{|H|} \sum_{i=1}^m \sum_{x \in X_i} \psi(x_i)\\
&= \sum_{i=1}^m \frac{|X_i|}{|H|} \psi(x_i)
\end{aligned}
\end{equation*}
and evaluate $\frac{|X_i|}{|H|}$ to get what we want... although a bit tedious: Fix $1 \leq i \leq m$ and choose some $g_i \in G$ s.t. $g_i^{-1} gg_i = x_i$ so $\forall c \in C_G (g)$ and $h \in H$,
\begin{equation*}
\begin{aligned}
(cg_i h)^{-1} g(cg_ih) &= h^{-1}g_i^{-1}c^{-1}gcg_i h\\
&= h^{-1}g_i^{-1} c^{-1}cgg_i h\\
&=h^{-1} g_i^{-1} g g_i h\\
&= h^{-1} x_i h \in H
\end{aligned}
\end{equation*}
i.e. $cg_i h \in X_i$, hence $C_G(g) g_i H \subseteq X_i$;
Convserly, if $x\in X_i$ then $x^{-1} gx = h^{-1} x_i h = h^{-1} (g_i^{-1} gg_i)h$ for some $h \in H$; thus $xh^{-1} g_i^{-1} \in C_G(g)$. So $x \in C_G(g) g_i h \subseteq C_G(g) g_i H$. Conclude $X_i = C_G (g) g_i H$, thus
\begin{equation*}
\begin{aligned}
|X_i| = |C_G(g) g_i H| = \frac{|C_G(g)||H|}{|H \cap g_i^{-1} C_G (g) g_i}
\end{aligned}
\end{equation*}
(see notes at end). Finally $g_i^{-1} C_G(g) g_i = C_G(g_i^{-1} gg_i) = C_G(x_i)$. Thus
\begin{equation*}
\begin{aligned}
|X_i| &= |H:H \cup C_G(x_i)||C_G(g)|\\
&=|H:C_H(x_i)||C_G(g)|
\end{aligned}
\end{equation*}
Thus,
\begin{equation*}
\begin{aligned}
\frac{|X_i|}{|H|} &= \frac{|H:C_H(x_i)||C_G(g)|}{|H|}\\
&= \frac{|C_G(g)|}{|C_H(x_i)|}
\end{aligned}
\end{equation*}
for each $1 \leq i \leq m$.
\end{proof}
\end{lemma}
Note: if $H,K \leq G$, a double coset of $H$ and $K$ in $G$ is a set $HgK = \{hgk:h \in H,k \in K\}$ for some $g \in G$.
Facts:\\
$\bullet$ two double cosets are either disjoint or equal;\\
$\bullet$ $|HgK| = \frac{|H||K|}{|H \cap gKg^{-1}} = \frac{|H||K|}{|g^{-1} Hg\cap K|}$ (prove this: it's a bit like $|HK|$).
\begin{eg}
Consider $H = C_4 = \bra (1234 \ket \leq G = S_4$, of index 6. Char of induced representation $Ind_H^G(\alpha)$ wher $|alpha$ is faithful 1-dim representation of $C_4$. If $\alpha((1234)) = i$, then char of $\alpha$ is $(1\ i\ -1\ i)$ for $(1),(1234),(13)(24),(1432)$. The induced representation of $S_4$, we know $Ind_{C_4}^{S_4} \chi_\alpha$ evaluates to 6 at $(1)$ (by (10.5)) and to $0$ at $(12)$ and $(123)$.\\
For $(12)(34)$ only one of the three elements of $S_4$ it's conjugate to, lies in $H$, namely $(13)(24)$. So $Ind_H^G \chi_\alpha((12)(34)) = 8(-1/4) = -2$.\\
For $(1234)$, it is conjugate to $6$ elements of $S_4$ of which two are in $C_4$, namely $(1234)$ and $(1432)$. So $Ind_H^G \chi_\alpha (1234) = 4(\frac{i}{4} - \frac{i}{4}) = 0$.
\end{eg}
\subsection{Induced representations}
Let $H \leq G$, of index $n$. Let $1=t_1,t_2,...,t_n$ transversal, i.e. $H,t_2H,...,t_n H$ are left cosets of $H$. Let $W$ be a $H$-space.
\begin{lemma} (10.10)\\
$Ind_{\{1\}}^G 1= \rho_{reg}$.
\end{lemma}
\begin{defi} (10.11)
Let $V:=W \oplus t_2\otimes W \oplus ... \oplus t_n \otimes W = \bigoplus_{t_i} t_i \otimes W$, where $t_i \otimes W = \{t_i \otimes w: w \in W\}$. So $\dim V = n\dim W$. We write $V = Ind_H^G W$.
\end{defi}
\emph{$G$-action}: Let $g \in G$. $\forall i \exists $ unique $j$ with $t_j^{-1} g t_i \in H$ (namely $t_j H$ is the coset containing $gt_i$). You got to understand where did this $g$ come from, otherwise you can't make progress. Define
\begin{equation*}
\begin{aligned}
g(t_i \otimes W) = t_j \otimes ((t_j^{-1} gt_i) w)
\end{aligned}
\end{equation*}
We drop $\otimes$ from now. Check this is a $G$-action. Then
\begin{equation*}
\begin{aligned}
g_1(g_2 t_i w) &= g_1(t_j(t_j^{-1} g_2t_i)w)\\
&= t_l((t_l^{-1}g_1t_j)(t_j^{-1}g_2 t_i)w)\\
&= t_l (t_l^{-1} (g_1g_2) t_i)w = (g_1)(g_2) (t_i w)
\end{aligned}
\end{equation*}
where $j$ and $l$ are the unique ones such that $g_2 t_iH = t_jH$ and $g_1 t_j H = t_l H$.
It has the 'right' character: $g:t_i w \to t_j \underbrace{(t_j^{-1} gt_i})_{ \in H} w$, so the contribution to the character is $0$ unless $j=i$, i.e. if $t_i^{-1} gt_i \in H$, in which case it contributes $\psi(t_i^{-1} gt_i)$. So
\begin{equation*}
\begin{aligned}
Ind_H^G \psi_(g) = \sum_1^m \mathring{\psi} (t_i^{-1} gt_i)\ (10.6)
\end{aligned}
\end{equation*}
\begin{rem} (10.12)\\
There is Frobenius Reciprocity,
\begin{equation*}
\begin{aligned}
Hom_H(W,Res_H^G V) \cong Hom_G (Ind_H^G W,V)
\end{aligned}
\end{equation*}
\end{rem}
naturally as vector spaces ($W$ is a $H$-space, $V$ is a $G$-space).
\begin{lemma} (10.13)\\
(i) $Ind_H^G(W_1 \oplus W_2) \cong Ind_H^G W_1 \otimes Ind_H^G W_2$;\\
(ii) $\dim Ind_H^G W = |G:H|\dim W$.\\
(iii) If $H \leq K \leq G$, then $Ind_K^G Ind_H^K W \cong Ind_H^G W$.\\
(lecture had (10.10) here because he missed it previously, and labelled (iii) as (iv) while (10.10) as (iii)).\\
\begin{proof}
(10.10):
\begin{equation*}
\begin{aligned}
Ind_H^G \psi(g) &= \sum_{i=1}^n \mathring{\psi}(t_i^{-1} gt_i)\\
&= \sum_1^n \mathring{1}_H (e_i^{-1} gt_i)\\
&= |\{ i: t_i^{-1} gt_i \in H\}|\\
&= |\{i : g \in t_i H t_i^{-1}\}| = |fix_X (g)| = \pi_X
\end{aligned}
\end{equation*}
\end{proof}
\end{lemma}
\begin{rem}
$\bra \psi_X,1_G\ket_G = \bra Ind_H^G 1_H, 1_G\ket_G = \bra 1_H,1_H\ket = 1$ as predicted in chapter 7.
\end{rem}
\newpage
\section{Frobenius groups}
\begin{thm} (11.1, Frobenius theorem, 1891)\\
Let $G$ be a transitive permutation group on a finite $X$, say $|X|=n$. Assume that each non-identity element of $G$ fixes at most one element of $X$. Then
\begin{equation*}
\begin{aligned}
K = \{1\} \cup \{g \in G: g \alpha \neq \alpha \forall \alpha \in X\}
\end{aligned}
\end{equation*}
is a normal subgroup of $G$ of order $n$.\\
Note that $G$ is necessarily finite, being isomorphic to a subgroup of $S_X$.
\begin{proof}
(method of exceptional characters, due to M. Isaacs - chapter 7 books)\\
We have to show $K \triangleleft G$. Let $H = G_\alpha$ the stabiliser of $\alpha \in X$ for some $\alpha \in X$, i.e. $g G_\alpha g^{-1} = G_{g\alpha}$. Conjugates of $H$ are stabiliers of single elements of $X$. No two conjugates can share a non-identity elment (by hypothesis), so $H$ has $n$ distinct conjugate, and $G$ itself has $n(|H|-1)$ elements that fix exactly one element of $X$. But $|G| = |X||H = n|H|$ ($X$ and $G/H$ are isomorphic (because transitive action) as $G$-sets). Hence $|K| = |G| -n(|H|-1) = n$. Let $1 \neq h \in H$. Suppose $h = ghg^{-1}$ for some $g \in G, h' \in H$. Then $h$ lies in both $H = G_\alpha$ and $gHg^{-1} = G_{g\alpha}$; by hypothesis $g\alpha = \alpha$, hence $g \in H$. Therefore, the ccls in $G$ of $h$ is precisely the ccls in $H$. Similarly oif $g \in C_G(h)$, then $h=ghg^{-1} \in G_{g\alpha}$ and hence $g \in H$. We conclude $C_G(h) = C_H(h)$ ($1 \neq h \in H$). Every element of $G$ either belongs to $K$ or lies in one of the $n$ stabilisers, each of which is conjugate to $H$. So evergy element of $G \setminus K$ is conjugate with a non-identity element of $H$.\\
So $\{1,h_2,...,h_t,y_1,...,y_u\}$ (the representations of $H$-ccls and representations of ccls of $G$ which comprise $K \setminus \{1\}$ respectively) is a set of ccls reps for $G$.
Take $\theta_1 = 1_G$. $\{1_H = \psi_1,...,\psi_t\}$ be irreducible characters of $H$. Fix $1 \leq i \leq t$. Then, if $g \in G$, we know
\begin{equation*}
\begin{aligned}
Ind_H^G \psi_i (g) = \left\{\begin{array}{ll}
|G:H| \psi_i(1) = n\psi_i(1) & g=1\\
\psi_i (h_j) & g=h_j (2 \leq j \leq t)\\
0 & g=y_k (1 \leq k \leq u)
\end{array}
\right.
\end{aligned}
\end{equation*}
where in the second case we appeal to $C_G(h_j) = C_H(h_j)$ and (10.9). Now fix some $2 \leq i \leq t$ and put $\theta_i = \psi_i^G - \psi_i(1) \psi_1^G + \psi_i(1) \theta_1 \in R(G)$ by (9.15). Values for $2 \leq j \leq t$, $1 \leq k\ leq u$:
\includegraphics[scale=0.5]{image/Rep_08.png}
Now calculate
\begin{equation*}
\begin{aligned}
\bra \theta_i ,\theta_i \ket &= \frac{1}{|G|} \sum_{g \in G} |\theta_i(g)|^2\\
&= \frac{1}{|G|} \left(\sum_{g \in K} |\theta_i(g)|^2 + \sum_{\alpha \in X} \sum_{1 \neq g \in G_\alpha} |\theta_i(g)|^2\right)\\
&= \frac{1}{|G|} (n\psi_i^2 (1) + n \sum_{1 \neq h \in H} |\theta_i (h)|)^2\\
&= \frac{1}{|H|} \sum_{h \in H} |\psi_i(h)|^2\\
&= \bra \psi_i,\psi_i\ket\\
&= 1
\end{aligned}
\end{equation*}
As $\psi_i$ is irreducible. So (by (9.15)), either $\theta_i$ or $-\theta_i$ is a character. Since $\theta_i(1) > 0$, it's $+\theta_i$, an actual character. Let $\theta = \sum_{i=1}^t \theta_i(1) \theta_i$. Column orthogonality gives $\theta(h) = \sum_{i=1}^t \psi_i(1) \psi_i(h) = 0$ ($1 \neq h \in H$), and for any $y \in K$, $\theta(y) = \sum_{i=1}^t \psi_i^2 (1) = |H|$. Hence
\begin{equation*}
\begin{aligned}
\theta(g) = \left\{\begin{array}{ll}
|H| & g \in K\\
0 & g \not\in K
\end{array}
\right.
\end{aligned}
\end{equation*}
So $K = \{g \in G : \theta(g) = \theta(1) \} \triangleleft G$.
\end{proof}
\end{thm}
\begin{defi} (11.2)\\
A Frobenius group is a group $G$ having subgroup $H$ s.t. $H \cap gHg^{-1} = 1$ $\forall g \not\in H$.\\
$H$ is the Frobenius complement of $G$.
\end{defi}
\begin{prop} (11.3)\\
Any finite Frobenius group satisfies the hypothesis of (11.1). The normal subgroup $K$ is a Frobenius Kernel of $G$.
\begin{proof}
Let $G$ be Frobenius, with complement $H$. Then action of $G$ on $G/H$ is transitive and faithful. Furthermore, if $1 \neq g \in G$ fixes both $xH$ and $yH$, then $g \in xHx^{-1} \cap yhy^{-1}$ $\implies$ $H \cap (y^{-1} x) H (y^{-1}x)^{-1} \neq 1$ $\implies xH = yH$.
\end{proof}
\end{prop}
Example: If $p,q$ distinct primes, $p \equiv 1 \pmod q$, the unique non-abelian group of order $pq$ is a Frobenius group (see James-Liebeck chapter 25 or Teleman chapter 11).
Remarks:\\
$\bullet$ Thompson (thesis, 1959) proved any finite group having fixed point free automorphism of prime power order is nilpotent. This implied that in finite Frobenius group, $K$ is nilpotent (iff $K$ is a direct product of its sylow subgroups).\\
$\bullet$ There is no profo of (11.1) known in which character theory is not required.
\newpage
\section{The missing lecutre: Mackey Theory}
Let's work over $\C$. Mackey Theory describes restriction to a subgroup $K \leq G$ of an irreducible representation $Ind_H^G W$. Here $K,H$ are unrelated, but usually we take $K=H$, in which case we can characterise when $Ind_H^G W$ is irreducible. (?)
Special case: $W=1_H$ (trivial $H$-space of dimension 1). Then $Ind_H^G W$ is the permutation representation of $G$ on $G/H$ (by 10.10, action on left cosets of $H$ in $G$).
Recall: if $G$ is transitive on a set $X$ and $H=G_\alpha$ for some $\alpha \in X$, then the action of $G$ on $X$ is isomorphic to the action of $G$ on $G/H$, namely
\begin{equation*}
\begin{aligned}
g \cdot \alpha & \leftrightarrow &gH \ (12.1)\\
\in X & & \in G/H
\end{aligned}
\end{equation*}
is a well-defined bijection and commutes with $G$-actions ($x(g\alpha) = (xg)\alpha \leftrightarrow x(gH) = (xg)H$).
Consider the action of $G$ on $G/H$ and let $K \leq G$. $G/H$ splits into $K$-orbits: these correspond to \emph{double cosets} $KgH = \{KgH : k \in K, h \in H\}$, namely the $K$-orbit containing $gH$ contains precisely all $kgH$ with $k \in K$ (bunches of some $gH$ cosets together).
\begin{notation} (12.2)\\
$K\backslash G/H$ is the set of $(K,H)$-double cosets; they partition $G$. Note that $|K\backslash G/H| = \bra \pi G/K,\pi G/H\ket$ as in (7.4). Let $S$ be the set of representations.
\end{notation}
Clearly $G_{gH} = gHg^{-1}$, so $K_{gH} = gHg^{-1} \cap K = Hg$.
So by (12.1), the action of $K$ on the orbit containing $gH$ is isomorphic to the action of $K$ on $K/Hg$. From this, using $Ind_H^G 1_H = \C(G/H)$ and, if $X = \cup X_i$ a decomposition into orbits, then $\C X = \oplus_i \C X_i$, we get
\begin{prop} (12.3)\\
$G$ is a finite group, $H,K \leq G$. Then
\begin{equation*}
\begin{aligned}
Res_K^G Ind_H^G 1 \cong \oplus_{g \in S} Ind_{gHg^{-1}}^K 1
\end{aligned}
\end{equation*}
I think this is some application:\\
Let $S = \{g_1=1,g_2,...,g_r\}$ be s.t. $G = \cup_i Kg_i H$. Write $H_g = gHg^{-1} \cap K$ ($\leq K$). $(\rho,W)$ is representation of $H$. For $g \in G$, define $(\rho_g,W_g)$ to be the representation of $Hg$ with the same underlying vector space $W$, but now the $Hg$-action is $\rho_g(x) = \rho(h)$, where $x \in gHg^{-1}$. Since $H_g \leq K$, we obtain an induced represntation $Ind_{H_g}^G W_g$ from this.
\end{prop}
\begin{thm} (12.4) (Mackey's restriction formula)\\
$G$ finite, $H,K \leq G$ and $W$ $H$-space. Then
\begin{equation*}
\begin{aligned}
Res_K^G Ind_H^G W = \oplus_{g\in S} Ind_{H_g}^K W_g
\end{aligned}
\end{equation*}
as $K$-modules.\\
We'll prove this later.
\end{thm}
\begin{coro} (12.5, character version of (12.4))\\
If $\psi$ is a character of a representation of $H$, then
\begin{equation*}
\begin{aligned}
es_K^G Ind_H^G \psi = \sum_{g \ni S} Ind_{H_g}^K \psi_g
\end{aligned}
\end{equation*}
where $\psi_g$ is the character of $H_g$ given as $\psi_g(x) = \psi(g^{-1} xg)$.
\end{coro}
\begin{coro} (12.6, Mackey's irreducibility criterion)\\
Let $H \leq G$, $W$ be a $H$-vector space. Then $V = Ind_H^G W$ is irreducible iff\\
(i) $W$ is irreducible;\\
(ii) for each $g \in S\setminus H$, the two $Hg$- spaces $Wg$ and $Res_{H_g}^H W$ have no irreducible consitutnets in common (they're 'disjoint' representations).
\begin{proof}
Let $W$ afford character $\psi$. Recall $W$ irreducible $\iff \bra \psi,\psi \ket = 1$. Take $K=H$ in (12.4), so $Hg = gHg^{-1} \cap H$. Then
\begin{equation*}
\begin{aligned}
\bra Ind_H^G \psi,Ind_H^G \psi\ket_G = \bra \psi,Res_H^G Ind_H^G \psi\ket_H
\end{aligned}
\end{equation*}
by (10.7), then by (12.5) is equal to
\begin{equation*}
\begin{aligned}
\sum_{g \in S} \bra \psi Ind_{H_g}^H \psi_g\ket_H &= \sum_{g \in S} \bra Res_{H_g}^H \psi, \psi_g\ket_{H_g}\\
&= \bra \psi,\psi\ket_H + \sum_{g \in S, g \not\in H} d_g
\end{aligned}
\end{equation*}
where $d_g = \bra Res_{H_g}^H \psi,\psi_g \ket$ ($g \neq 1$).
\end{proof}
\end{coro}
For $g=1$ we have $H_g =H$, hence we get a sum of non-negative integers which is $\geq 1$. So $Ind_H^ \psi$ is irreducible iff $\bra\psi,\psi\ket = 1$ and all the other terms in the sum are 0. In other words, $W$ is irreducible representation of $H$ and $\forall g \not\in H$, $W$ and $W_g$ are disjoint representations of $H \cap gHg^{-1}$.
\begin{rem}
Set $S$ of representations was arbitrary, so could demand $g \in G \setminus H$ in (ii) but in fact suffices to check for $g \in S \setminus H$.
\end{rem}
\begin{coro} (12.7)\\
If $H \triangleleft G$, assume $\psi$ is an irreducible character of $H$. Then $Ind_H^G \psi$ is irreducible $\iff \psi$ is distinct from all its conjugates $\psi_g$ for all $g \in G\setminus H$ ($\psi_g(h) = \psi(ghg^{-1})$).
\begin{proof}
Again take $K=H$, noting double cosets $\equiv$ left cosets. Also, $Hg = H$ $\forall g$ (as $H \triangleleft G$). Moreover, $Wg$ is irreducible since $W$ is irreducible. So by (12.6), $Ind_H^G W$ is irreducible precisely when $W \not\cong Wg$ $\forall g \in G \setminus H$. This is equivalent to $\psi \neq \psi g$.
\end{proof}
\end{coro}
\begin{rem}
Again could check conditions on a set of representatives.
\end{rem}
\begin{proof} (of 12.4)\\
Write $V = Ind_H^G W$. Fix $g \in G$. Now $V$ is a direct sum of $x \oplus W$ with $x$ running through representations of left cosets of $H$ in $G$ (10.11). $V = \oplus_{x \in J} x \otimes W$.\\
Consider a particular coset $KgH = K\backslash G/H$. The terms
\begin{equation*}
\begin{aligned}
V(g) = \oplus_{x\text{ rep of H in } G, x \in KgH} x \otimes W
\end{aligned}
\end{equation*}
forms a subspace invariant under the action of $K$ (it's a direct sum of an orbit of subspaces permuted by $K$). Now viewing $V$ as a $K$-space (forget $G$-structure), $Res_K^G V = \oplus_{g \in S} V(g)$, so we need to show $V(g) = Ind_{H_g}^K W_g$ as $K$-spaces for each $g \in S$.
Now, $Stab_K (g \otimes W) = \{k \in K: kg \otimes W = g \otimes W\} = \{k \in K: g^{-1} kg \in Stab_G (1 \otimes W) = H\} = K \cap gHg^{-1}$ ($=Hg$). This implies if $x = kgh, x' =k'gh'$, then $x \otimes W = x' \otimes W$ iff $k,k'$ lie in same coset in $K/Hg$, hence $V(g)$ is direct sum $\oplus_{k \in K/Hg} k \otimes (g \otimes W)$. Therefore, as a representation of $K$, this subspace is
\begin{equation*}
\begin{aligned}
V(g) \cong Ind_{H_g}^K (g \otimes W)
\end{aligned}
\end{equation*}
\end{proof}
But $g \otimes W \cong Wg$ as a representaiton of $Hg$ ($w \to g \otimes W$ is an isomorphism). Putting everything together we are done.
\newpage
\section{Integrality in the group algebra}
\begin{defi} (13.1)\\
$a \in \C$ is an algebraic integer if: $a$ is a root of a monic polynomial in $\Z[x]$. Equivalently, the subring of $\C$ generated by $\Z[a] = \{f(a): f(x) \in \Z[x]\}$ is a finitely generated $\Z$-module.
\end{defi}
Fact 1: The algebraic integers form a subring of $\C$ (see number fields);\\
Fact 2: If $a \in \C$ is both an algebraic integer and a rational number, then it's an integer (see number fields);\\
Fact 3: Any subring of $\C$ which is a finitely-generated $\Z$-module consists of algebraic integers.
\begin{prop} (13.2)\\
If $\chi$ is character of $G$ and $g \in G$, then $\chi(g)$ is an algebraic integer.
\begin{proof}
$\chi(g)$ is a sum of $n$th roots of unity ($n=|g|$). Each root of unity is an algebraic integer, and any sum of algebraic integers is an algebraic integer by fact 1.
\end{proof}
\end{prop}
\begin{coro}
There are no entries in the chracter rables of any finite group which are rational but not integers, by Fact 2.
\end{coro}
\subsection{The centre of $\C G$}
Recall from (2.4), the group algebra $\C G =\{ \sum \alpha_g g: \alpha_g \in \C\}$ of finite group, the $\C$-space with basis $G$. Also a ring, hence a finite-dimensional $\C$-algebra.
List $\{1\} = \mathcal{C}_1,...,\mathcal{C}_k$ the $G$-ccls. Define the class sums:
\begin{equation*}
\begin{aligned}
C_j = \sum_{g \in \mathcal{C}_j} g \in \C G
\end{aligned}
\end{equation*}
Claim, each $C_j \in Z(\C G)$, the centre of $\C G$ (Note: this is not the same as $\C(Z(G))$!).
\begin{prop} (13.3)\\
$C_1,...,C_k$ is a basis of $Z(\C G)$. There exist non-negative integers $a_{ijl}$ ($1 \leq i,j,l \leq k$) with
\begin{equation*}
\begin{aligned}
C_i C_j = \sum_l a_{ijl} C_l
\end{aligned}
\end{equation*}
These are called the class algebra constants for $Z(\C G)$.
\end{prop}
Remember last time we had $C_j = \sum_{g \in \mathcal{C}_j} g$ ($=\sum_{k=1}^r x_k^{-1} g_j x_k$). We claimed that $c_1,...,c_k$ are basis for $Z(\C G)$. Let's now prove it.
\begin{proof}
Check that $g C_j g^{-1} = C_j$ $\forall g \in G$. So $C_j \in Z(\C G)$. Clear that the $C_j$ are linearly independent (because the $\mathcal{C}_J$ are pairwise disjoint). Now suppose $z \in Z(\C G)$, $z = \sum_{g \in G} \alpha_g g$. Then $\forall h \in G$ we have $\alpha_{h^{-1}gh} = \alpha_g$. So the function $g \to \alpha_g$ is constant on $G$-conjugacy classes. Wrigting $\alpha_g = \alpha_j$ for $g \in \mathcal{C}_j$, then $z = \sum_1^k \alpha_j C_j$. Finally, $Z(\C G)$ is a $\C$-algebra, so $C_i C_j = \sum_{l=1}^k \underbrace{a_{ijl}}_{\in \C} C_l$, as the $C_l$ span. We claim $a_{ijl} \in \Z_{\geq 0}$ $\forall i,j,l$: Fix $g_l \in \mathcal{C}_l$. Then
\begin{equation*}
\begin{aligned}
a_{ijl} = \text{number of} \{(x,y\in\mathcal{C}_i \times \mathcal{C}_j:xy = g_l\} \in \Z_{\geq 0}
\end{aligned}
\end{equation*}
\end{proof}
\begin{defi} (13.4)\\
Let $\rho:G \to GL(V)$ be an irreducible representation over $\C$, affording character $\chi$. Extend by linearity to $\rho:A = \C G \to End_\C V$, an algebra homomorphism. Any homomorphism of algebras $A \to End V$ is called a representation of $A$. A \emph{central character} of $A$ is a ring homomorphism $Z(A) \to \C$. Let $z \in Z(\C G)$. Then $\rho(z)$ commutes with all $|rho(g)$ ($g \in \C G$), so by Schur's lemma, $\rho(z) = \lambda_z I$ for some $\lambda_z \in \C$. Now consider the algebra homomorphism $\omega_\chi =\omega: Z(\C G) \to \C$ by $z \to \lambda_z$. Now $\rho(C_i) = \omega(C_i) I$, so, taking traces,
\begin{equation*}
\begin{aligned}
\chi(1) \omega(C_i) = \sum_{g \in \mathcal{C}_i} \chi(g) = |\mathcal{C}_i| \chi(g_i)
\end{aligned}
\end{equation*}
where $g_i$ is a representation of $\mathcal{C}_i$. So $\omega(C_i) = \frac{\chi(g_i)}{\chi(1)} |\mathcal{C}_i|$.
\end{defi}
\begin{lemma} (13.5)\\
The values $\omega(C_i) = \frac{\chi(g)}{\chi(1)} |\mathcal{C}_i|$ are algebraic integers.
\begin{proof}
Since $\omega$ is an algebra homomorphism and using (13.3),
\begin{equation*}
\begin{aligned}
\omega(C_i) \omega(C_j) = \sum_{l=1}^k a_{ijl} \omega(C_l)
\end{aligned}
\end{equation*}
where $a_{ijl} \in \Z_{\geq 0}$. Thus the span $\{\omega(C_l): 1 \leq l \leq k\}$ is a subring of $\C$ and is a finitely-generated abelian group, so by Fact 3, consists of algebraic integers.\\
$[$A bit of explanations:
\begin{equation*}
\begin{aligned}
\omega(C_i) \omega(C_j) = \sum a_{ijl} \omega(C_l)\\
\omega(C_i) \begin{pmatrix}
\omega(C_1)\\
...\\
\omega(C_k)
\end{pmatrix} = (a_{ijk}) \begin{pmatrix}
\omega(C_1)\\
...\\
\omega(C_k)
\end{pmatrix}
\end{aligned}
\end{equation*}
$\omega(C_i)$ is eigenvalue of the integer matrix $(a_{ijl})$ so an algebraic integer by definition.$]$
\end{proof}
\end{lemma}
Exercise (Burnside, 1911):\\
Show that $a_{ijl}$ can be obtained from the charcater table. In fact, $\forall i,j,l$,
\begin{equation*}
\begin{aligned}
a_{ijl} = \frac{|G|}{|C_G(g_i)| |C_G(g_j)|} \sum_{s=1}^k \frac{\chi_s (g_i) \chi_s(g_j) \chi_s(g_l^{-1})}{\chi_s(1)}
\end{aligned}
\end{equation*}
for $g_i \in \mathcal{C}_i$, $1 \leq i \leq l$.\\
(proof uses column orthogonality, JL 30.4).
\begin{thm} (13.6)\\
The degree of any irreducible charcaters of $G$ divides $|G|$.
\begin{proof}
Given irreducible charcater $\chi$, apply orthogonality,
\begin{equation*}
\begin{aligned}
\frac{|G|}{\chi(1)} &= \frac{1}{\chi(1)} \sum_{g \in G} \chi(g) \chi(g^{-1})\\
&= \frac{1}{\chi(1)} \sum_{i=1}^k |\mathcal{C}_i| \chi(g_i) \chi(g_i^{-1})\\
&= \sum_{i=1}^k \frac{|\mathcal{C}_i \chi(g_i)}{\chi(1)} \chi(g_i^{-1})
\end{aligned}
\end{equation*}
where in the last summand, the first fraction is an algebraic integer by (13.5), and $\chi(g_i^{-1})$ is sum of roots of unity so an algebraic integer. LHS is clearly also rational, so it's an integer.
\end{proof}
\end{thm}
\begin{eg} (13.7)\\
(a) If $G$ is a $p$-group, then $\chi(1)$ is a $p$-power ($\chi$ irreducible). In particualr, if $|G| = p^2$, then $\chi(1) = 1$ (since we already have a trivial character -- the idea is actually similar to the proof in Groups 1A), hence $G$ abelian.\\
(b) If $G = S_n$ then every prime dividing the degree of an irreducible charcater of $G$ also divides $n!$.
\end{eg}
\begin{thm} (13.8, Burnside, 1904)\\
If $\chi$ is irreducible, then $\chi(1) | \frac{|G|}{|Z|}$.\\
The proof is left as an exercise. As a hint, it uses tensor products.
\end{thm}
\newpage
\section{Burnside's theorem}
\begin{thm} (14.1)\\
Let $p,q$ be primes, let $|G| = p^a q^b$, where $a,b \in \Z_{\geq 0}$, with $a+b \geq 2$. Then $G$ is not simple.
\begin{proof}
The theorem gollows from 2 lemmas. We will prove this on Saturday.
\end{proof}
\end{thm}
\begin{rem}
(1) In fact, even more is true: $G$ is soluble.\\
(2) Result is best possible, in the sense that $|A_5| = 60 = 2^2 \cdot 3 \cdot 5$ has 3 prime factors, and is simple (actually there are 8 non-soluble groups of order $p^aq^br^c$ for $p,q,r$ primes).\\
(3) If either $a$ or $b$ is 0 then $G$ is a $p$ group, so is nilpotent, so soluble.\\
(4) In 1963, Feit and Thompson proved that every group of odd order was soluble.
\end{rem}
\iffalse
\begin{equation*}
\begin{aligned}
\end{aligned}
\end{equation*}
\fi
\end{document}
|
{"hexsha": "346d6ab0ff736aed47610d55880bbb15c9f2f2b6", "size": 98190, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Notes/Representation.tex", "max_stars_repo_name": "raoxiaojia/raoxiaojia.github.io", "max_stars_repo_head_hexsha": "d20c23a64794b500f2e0356fd01017ee31830fa2", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-25T17:34:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-25T17:34:25.000Z", "max_issues_repo_path": "Notes/Representation.tex", "max_issues_repo_name": "raoxiaojia/raoxiaojia.github.io", "max_issues_repo_head_hexsha": "d20c23a64794b500f2e0356fd01017ee31830fa2", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Notes/Representation.tex", "max_forks_repo_name": "raoxiaojia/raoxiaojia.github.io", "max_forks_repo_head_hexsha": "d20c23a64794b500f2e0356fd01017ee31830fa2", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.9897463573, "max_line_length": 1118, "alphanum_fraction": 0.6375394643, "num_tokens": 38555}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import numpy as np
import time
import datetime
import seaborn as sns
import pandas as pd
import os
import gc
import pathlib
import json
import queue
import math
import threading
import re
from random import randrange
import multiprocessing
from scipy import interpolate
import datasets
from datasets import load_dataset
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import AdamW, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup
from transformers import Trainer, TrainingArguments, TrainerCallback
from config import Config
import onnx
from onnx_model_manager import OnnxModelManager
from onnxruntime.quantization import quantize_dynamic, QuantType
def get_model(name):
tokenizer = AutoTokenizer.from_pretrained(name)
model = AutoModelForCausalLM.from_pretrained(name)
return model, tokenizer
def random_model_folder():
now = int(time.time())
models_dir = os.path.join(Config.work_dir, "models", str(now))
if not os.path.isdir(models_dir):
pathlib.Path(models_dir).mkdir(parents=True, exist_ok=True)
return models_dir
class ModelSeeder:
def __init__(self, tokenizer):
self.main_model_path = os.path.join("models", "main_gptneo_onnx")
if not os.path.exists(os.path.join(self.main_model_path, "model_quant.onnx")):
os.system(f"python3 -m transformers.onnx --model='EleutherAI/gpt-neo-125M' --feature=causal-lm-with-past {self.main_model_path} --atol=0.0002")
self.optimize_onnx()
self.onnx_model_manager = OnnxModelManager(os.path.join(self.main_model_path, "model_quant.onnx"))
self.tokenizer = tokenizer
self.buffer = []
self.done_queue = queue.Queue(maxsize = 10000)
self.running = False
self.buffer_thread = threading.Thread(target=self.buffer_worker)
def start_worker(self):
self.running = True
self.buffer_thread.start()
def stop_worker(self):
self.running = False
def buffer_worker(self):
num_tokens = len(self.tokenizer)
while self.running:
random_input_id = random.randint(0, num_tokens)
output = self.onnx_model_manager.say_raw(self.tokenizer.decode([random_input_id])[0], do_sample=True)
self.done_queue.put(output)
def optimize_onnx(self):
model_fp32 = os.path.join(self.main_model_path, "model.onnx")
model_quant = os.path.join(self.main_model_path, "model_quant.onnx")
model_opt = os.path.join(self.main_model_path, "model-opt.onnx")
quantized_model = quantize_dynamic(model_fp32, model_quant, weight_type=QuantType.QUInt8)
os.system(f"rm {model_opt}")
def seed_model(self, batch):
input_ids = []
attention_mask = []
for i in range(len(batch['input_ids'])):
input_ids.append(batch['input_ids'][i])
attention_mask.append(batch['attention_mask'][i])
# Add a text from the main model
output = self.done_queue.get()
output = self.tokenizer.encode(output)
input_ids.append(output)
attention_mask.append([1] * len(output))
return {
'attention_mask': attention_mask,
'input_ids': input_ids
}
def content_aware_encode(tokenizer, text) -> [int]:
tokens = tokenizer.encode(text)
new_tokens = []
for token in tokens:
if token == 6927: # ><
new_tokens += [29, 27]
else:
new_tokens.append(token)
return new_tokens
def get_dataset(tokenizer, path_train, block_size = 128):
dataset = load_dataset('text', data_files={'train': path_train, 'test': os.path.join(Config.work_dir, "data_test.txt")})
# model_seeder = ModelSeeder(tokenizer)
# model_seeder.start_worker()
def encode(batch):
result = []
attention_mask = []
for item in batch['text']:
tokens = content_aware_encode(tokenizer, item) + [tokenizer.eos_token_id]
result.append(tokens)
attention_mask.append([1] * len(tokens))
return {
'attention_mask': attention_mask,
'input_ids': result
}
inject_rp_chance_pct = 0.5
rp_list = None
with open('rp_data.txt', 'r') as f:
rp_list = f.readlines()
def inject_random_rp(batch):
for i, item in enumerate(batch['text']):
if random.random() <= inject_rp_chance_pct:
rp = random.choice(rp_list)
injection_places = [i for i in range(len(item)) if item.startswith("<d>", i) or item.startswith("<p>", i)]
random_injection_place = random.choice(injection_places)
# We cut off the text entirely to make sure the next mapping will add the EOS token
batch['text'][i] = item[:random_injection_place] + rp.strip()
return batch
def parse_variables(batch):
last_scene = None
last_character = None
result = []
re_token = re.compile(r'(<.*?>|[^<]*)')
re_command = re.compile(r'^<(.*?)>$')
re_msg = re.compile(r'([a-zA-Z]{1,2})\s"(.*?)"')
for item in batch['text']:
current_cmd = None
for token in re_token.findall(item):
cmd_match = re_command.match(token)
if cmd_match is None:
if current_cmd == 'scn':
if not token.startswith("%"):
last_scene = token
elif current_cmd == 'msg':
msg_match = re_msg.match(token)
if msg_match is not None:
msg_from = msg_match.group(1)
if msg_from in Config.interactable_characters:
last_character = msg_from
else:
current_cmd = cmd_match.group(1)
if last_scene is not None:
item = item.replace("%lastscene", last_scene)
if last_character is not None:
item = item.replace("%lastcharactercode", last_character)
item = item.replace("%lastcharacter", Config.interactable_characters[last_character])
if not '%lastcharacter' in item and not '%lastscene' in item:
result.append(item)
return { 'text': result }
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# Pad the end
to_add = (math.ceil(total_length / block_size) * block_size) - total_length
if to_add > 0:
concatenated_examples['input_ids'] += [tokenizer.eos_token_id] * to_add
concatenated_examples['attention_mask'] += [0] * to_add
total_length += to_add
# Split by chunks of block_size.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
dataset_map_cores = min(multiprocessing.cpu_count(), 1)
dataset_batch_size = 1000
class AWSWDataset(torch.utils.data.IterableDataset):
def __init__(self, dataset, dataset_type):
self.current_dataset = dataset
self.dataset_type = dataset_type
self.current_idx = 0
self.shuffle()
def shuffle(self):
self.current_dataset = self.current_dataset.shuffle()
# Hack to avoid log spam. Map() doesn't have a way to turn off the logging
# See: https://github.com/huggingface/datasets/issues/2651
datasets.utils.set_progress_bar_enabled(False)
dataset = self.current_dataset.map(
inject_random_rp,
batched=True,
batch_size=dataset_batch_size,
num_proc=dataset_map_cores
)
dataset = dataset.map(
parse_variables,
batched=True,
batch_size=dataset_batch_size,
num_proc=dataset_map_cores
)
dataset = dataset.map(
encode,
batched=True,
batch_size=dataset_batch_size,
num_proc=dataset_map_cores,
remove_columns=["text"],
)
self.mapped_dataset = dataset.map(
group_texts,
batched=True,
batch_size=dataset_batch_size,
num_proc=dataset_map_cores
)
def __len__(self):
return len(self.mapped_dataset[self.dataset_type])
def __iter__(self):
self.shuffle()
return iter(self.mapped_dataset[self.dataset_type])
return {
# 'model_seeder': model_seeder,
'train': AWSWDataset(dataset, 'train')
}
def split_branches(data):
result = []
quote_counter = 0
line = ""
for i in range(len(data)):
if data[i] == "\n":
continue
line += data[i]
if data[i] == '"':
quote_counter += 1
if quote_counter == 2:
quote_counter = 0
result.append(line.strip())
line = ""
return "\n".join(result)
def split_data(txt_file: str, shuffle_output = False):
with open(txt_file) as f:
data = f.read()
lines = data.split("\n")
train_lines = lines
eval_lines = []
if shuffle_output:
random.shuffle(train_lines)
if not os.path.isfile(os.path.join(Config.work_dir, "data_train_sample.txt")):
with open(os.path.join(Config.work_dir, "data_train_sample.txt"), "w") as f:
for l in train_lines[:10]:
f.write(l + "\n")
if not os.path.isfile(os.path.join(Config.work_dir, "data_train.txt")):
with open(os.path.join(Config.work_dir, "data_train.txt"), "w") as f:
for l in train_lines:
f.write(l + "\n")
# flat_lines = split_branches(data).split("\n")
# for l in flat_lines:
# f.write(l + "\n")
if not os.path.isfile(os.path.join(Config.work_dir, "data_test.txt")):
with open(os.path.join(Config.work_dir, "data_test.txt"), "w") as f:
for l in eval_lines:
f.write(l + "\n")
def set_pretrained_model_dropout(h, dropout):
for p in h:
p.attn.attention.attn_dropout.p = dropout
p.attn.attention.resid_dropout.p = dropout
def train_model(model, tokenizer, dataset, params: dict, results: dict):
defaults = {
"lr": 1e-4,
"warmup_factor": 1,
"scheduler": "polynomial_decay_schedule_with_warmup",
"lr_end": 0.000002,
"power": 0.6,
"freeze_layer_rate": 0.0009,
"num_epoch": 10,
"save_model": True,
"batch_size": 32,
"model_folder": os.path.join(Config.work_dir, "models", "awsw_main")
}
defaults.update(params)
params = defaults
lr = params['lr']
batch_size = params['batch_size']
train_len = len(dataset['train'])
num_steps_per_epoch = math.ceil(train_len / batch_size)
num_epoch = params['num_epoch']
num_total_steps = num_steps_per_epoch * num_epoch
num_warmup_steps = num_steps_per_epoch * params['warmup_factor']
optimizer = AdamW(model.parameters(), lr=lr)
scheduler_str = params['scheduler']
scheduler = None
if scheduler_str == "cosine_schedule_with_warmup":
scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_total_steps)
elif scheduler_str == "cosine_with_hard_restarts_schedule_with_warmup":
cycles = params['cycles']
scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps, num_total_steps, cycles)
elif scheduler_str == "polynomial_decay_schedule_with_warmup":
lr_end = params['lr_end']
power = params['power']
scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, num_warmup_steps, num_total_steps, power=power, lr_end=lr_end)
class AWSWTrainerCallback(TrainerCallback):
def __init__(self, optimizer, results):
self.random = np.random.RandomState(params['seed'])
self.old_freeze_part_layers = None
self.optimizer = optimizer
self.results = results
self.no_grad_masks = self.make_no_grad_masks(0.01)
self.named_parameters = list(model.named_parameters())
def on_train_end(self, args, state, control, **kwargs):
learning_rate_history = [h['learning_rate'] for h in state.log_history if 'learning_rate' in h]
loss_history = [h['loss'] for h in state.log_history if 'loss' in h]
self.results['loss_history'] = loss_history
self.results['learning_rate_history'] = learning_rate_history
def on_step_begin(self, args, state, control, **kwargs):
current_step = state.global_step
# Freeze a part
learning_rate = self.optimizer.param_groups[0]['lr']
freeze_layer_rate = params['freeze_layer_rate']
freeze_part_layers = learning_rate > freeze_layer_rate
if 'freeze_from_steps' in params:
freeze_part_layers = current_step > params['freeze_from_steps']
if self.old_freeze_part_layers is not freeze_part_layers:
if 'to_freeze_gpt_blocks' in params:
param_slice = self.named_parameters
for name, param in param_slice:
param.requires_grad = False
for name, param in model.transformer.h.named_parameters():
param.requires_grad = True
to_freeze_gpt_blocks = params['to_freeze_gpt_blocks']
param_slice = model.transformer.h[:to_freeze_gpt_blocks]
print(f"[{current_step}] set freeze_part_layers: {freeze_part_layers} (freezing {len(param_slice)} out of {len(model.transformer.h)} gpt blocks.)")
for name, param in param_slice.named_parameters():
param.requires_grad = not freeze_part_layers
if 'to_freeze_count' in params:
to_freeze_count = params['to_freeze_count']
param_slice = self.named_parameters[:to_freeze_count]
print(f"[{current_step}] set freeze_part_layers: {freeze_part_layers} (freezing {len(param_slice)} out of {len(self.named_parameters)} layers.)")
for name, param in param_slice:
param.requires_grad = not freeze_part_layers
self.old_freeze_part_layers = freeze_part_layers
def make_no_grad_masks(self, model_train_pct):
masks = []
for p in model.parameters():
mask = torch.zeros(*p.shape)
flattened_view = torch.flatten(mask)
to_pick_len = math.floor(len(flattened_view) * model_train_pct)
flattened_view[0:to_pick_len] = 1
mask = mask.int().to(model.device)
masks.append(mask)
return masks
# def on_before_optimizer_step(self, args, state, control, **kwargs):
# for i, w in enumerate(model.parameters()):
# if w.grad is not None:
# w.grad *= self.no_grad_masks[i]
class AWSWTrainer(Trainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
main_model, _ = get_model("EleutherAI/gpt-neo-125M")
main_model.to(model.device)
self.main_model = main_model
self.params_len = len(list(model.parameters()))
self.avg_loss_tries = 50
self.last_avg_loss = None
self.tick = 0
self.mix_rate = 0.1
self.loss_log = []
def compute_loss(self, model, inputs, return_outputs=False):
with torch.no_grad():
for p1, p2 in zip(model.parameters(), self.main_model.parameters()):
diff = abs(p1.data - p2.data)
diff_mean = diff.mean()
learning_rate = optimizer.param_groups[0]['lr']
p1.data = torch.lerp(p1.data, p2.data, self.mix_rate)
outputs = model(**inputs)
loss = outputs.get("loss")
self.loss_log.append(loss.detach().cpu().numpy())
avg_loss = 0
if len(self.loss_log) == self.avg_loss_tries:
avg_loss = sum(self.loss_log) / len(self.loss_log)
if self.last_avg_loss is None:
self.last_avg_loss = avg_loss
else:
#avg_loss_diff = abs(avg_loss - self.last_avg_loss)
#if avg_loss_diff > 0.0001:
if self.last_avg_loss < avg_loss:
# Loss gone up, time to stop mixing so much
self.mix_rate = max(0.0001, self.mix_rate * 0.5)
else:
# Loss gone down, we can keep mixing
self.mix_rate = min(0.5, self.mix_rate * 1.5)
self.last_avg_loss = avg_loss
self.loss_log.pop(0)
if not 'model_closeness_loss' in results:
results['model_closeness_loss'] = []
if not 'mix_rate' in results:
results['mix_rate'] = []
if not 'avg_loss' in results:
results['avg_loss'] = []
results['avg_loss'].append(avg_loss)
results['mix_rate'].append(self.mix_rate)
results['model_closeness_loss'].append(diff_mean.cpu().numpy())
return (loss, outputs) if return_outputs else loss
def train(model, dataset, trainer_callback):
training_args = TrainingArguments(
params['model_folder'],
seed=params['seed'],
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
num_train_epochs=num_epoch,
logging_steps=math.floor(max(num_total_steps, 100) / min(num_total_steps, 100)),
save_total_limit=2,
log_level="error",
save_strategy = "steps" if params['save_model'] else "no"
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset['train'],
optimizers=(optimizer, scheduler),
callbacks=[trainer_callback]
)
checkpoint_dirs = [os.path.join(params['model_folder'], d) for d in os.listdir(params['model_folder']) if os.path.isdir(os.path.join(params['model_folder'], d))]
if len(checkpoint_dirs) > 0:
latest_checkpoint = max(checkpoint_dirs, key=os.path.getmtime)
trainer.train(latest_checkpoint)
else:
trainer.train()
del training_args
del trainer
gc.collect()
try:
torch.distributed.destroy_process_group()
except:
pass
torch.cuda.empty_cache()
trainer_callback = AWSWTrainerCallback(optimizer, results)
train(model, dataset, trainer_callback)
del model
del dataset
del tokenizer
del optimizer
return None
|
{"hexsha": "b983d549a857a79e38c8583e963448aec29ea6ed", "size": 20005, "ext": "py", "lang": "Python", "max_stars_repo_path": "Research/model_utils.py", "max_stars_repo_name": "peterwilli/Endless-AWSW", "max_stars_repo_head_hexsha": "32dd17a01e547b946b9e32b858adc1949a295fd4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-01-25T13:44:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T10:15:16.000Z", "max_issues_repo_path": "Research/model_utils.py", "max_issues_repo_name": "peterwilli/Endless-AWSW", "max_issues_repo_head_hexsha": "32dd17a01e547b946b9e32b858adc1949a295fd4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2022-01-24T16:33:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-24T21:37:13.000Z", "max_forks_repo_path": "Research/model_utils.py", "max_forks_repo_name": "peterwilli/Endless-AWSW", "max_forks_repo_head_hexsha": "32dd17a01e547b946b9e32b858adc1949a295fd4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0273109244, "max_line_length": 169, "alphanum_fraction": 0.5947013247, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4292}
|
[STATEMENT]
lemma transrec3_succ [simp]:
"transrec3 a b c (succ i) = b i (transrec3 a b c i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. transrec3 a b c (ZFC_in_HOL.succ i) = b i (transrec3 a b c i)
[PROOF STEP]
by (simp add: transrec transrec3_def)
|
{"llama_tokens": 118, "file": "ZFC_in_HOL_ZFC_Cardinals", "length": 1}
|
import numpy as np
# from config import INPUT_SIZE
INPUT_SIZE = (448, 448)
_default_anchors_setting = (
dict(layer='p2', stride=32, size=24, scale=[2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]),
dict(layer='p3', stride=64, size=48, scale=[2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]),
dict(layer='p4', stride=128, size=96, scale=[2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]),
dict(layer='p5', stride=256, size=192, scale=[2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]),
dict(layer='p5', stride=256, size=192, scale=[2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]),
)
def generate_default_anchor_maps(anchors_setting=None, input_shape=INPUT_SIZE):
"""
generate default anchor
:param anchors_setting: all informations of anchors
:param input_shape: shape of input images, e.g. (h, w)
:return: center_anchors: # anchors * 4 (oy, ox, h, w)
edge_anchors: # anchors * 4 (y0, x0, y1, x1)
anchor_area: # anchors * 1 (area)
"""
if anchors_setting is None:
anchors_setting = _default_anchors_setting
center_anchors = np.zeros((0, 4), dtype=np.float32)
edge_anchors = np.zeros((0, 4), dtype=np.float32)
anchor_areas = np.zeros((0,), dtype=np.float32)
input_shape = np.array(input_shape, dtype=int)
for anchor_info in anchors_setting:
stride = anchor_info['stride']
size = anchor_info['size']
scales = anchor_info['scale']
aspect_ratios = anchor_info['aspect_ratio']
output_map_shape = np.ceil(input_shape.astype(np.float32) / stride)
output_map_shape = output_map_shape.astype(np.int)
output_shape = tuple(output_map_shape) + (4,)
ostart = stride / 2.
oy = np.arange(ostart, ostart + stride * output_shape[0], stride)
oy = oy.reshape(output_shape[0], 1)
ox = np.arange(ostart, ostart + stride * output_shape[1], stride)
ox = ox.reshape(1, output_shape[1])
center_anchor_map_template = np.zeros(output_shape, dtype=np.float32)
center_anchor_map_template[:, :, 0] = oy
center_anchor_map_template[:, :, 1] = ox
for scale in scales:
for aspect_ratio in aspect_ratios:
center_anchor_map = center_anchor_map_template.copy()
center_anchor_map[:, :, 2] = size * scale / float(aspect_ratio) ** 0.5
center_anchor_map[:, :, 3] = size * scale * float(aspect_ratio) ** 0.5
edge_anchor_map = np.concatenate((center_anchor_map[..., :2] - center_anchor_map[..., 2:4] / 2.,
center_anchor_map[..., :2] + center_anchor_map[..., 2:4] / 2.),
axis=-1)
anchor_area_map = center_anchor_map[..., 2] * center_anchor_map[..., 3]
center_anchors = np.concatenate((center_anchors, center_anchor_map.reshape(-1, 4)))
edge_anchors = np.concatenate((edge_anchors, edge_anchor_map.reshape(-1, 4)))
anchor_areas = np.concatenate((anchor_areas, anchor_area_map.reshape(-1)))
return center_anchors, edge_anchors, anchor_areas
def hard_nms(cdds, topn=10, iou_thresh=0.25):
if not (type(cdds).__module__ == 'numpy' and len(cdds.shape) == 2 and cdds.shape[1] >= 5):
raise TypeError('edge_box_map should be N * 5+ ndarray')
cdds = cdds.copy()
indices = np.argsort(cdds[:, 0])
cdds = cdds[indices]
cdd_results = []
res = cdds
while res.any():
cdd = res[-1]
cdd_results.append(cdd)
if len(cdd_results) == topn:
return np.array(cdd_results)
res = res[:-1]
start_max = np.maximum(res[:, 1:3], cdd[1:3])
end_min = np.minimum(res[:, 3:5], cdd[3:5])
lengths = end_min - start_max
intersec_map = lengths[:, 0] * lengths[:, 1]
intersec_map[np.logical_or(lengths[:, 0] < 0, lengths[:, 1] < 0)] = 0
iou_map_cur = intersec_map / ((res[:, 3] - res[:, 1]) * (res[:, 4] - res[:, 2]) + (cdd[3] - cdd[1]) * (
cdd[4] - cdd[2]) - intersec_map)
res = res[iou_map_cur < iou_thresh]
return np.array(cdd_results)
if __name__ == '__main__':
a = hard_nms(np.array([
[0.4, 1, 10, 12, 20],
[0.5, 1, 11, 11, 20],
[0.55, 20, 30, 40, 50]
]), topn=100, iou_thresh=0.4)
# print(a)
_, edge_anchors, _ = generate_default_anchor_maps()
print(edge_anchors.shape)
|
{"hexsha": "49f967466d6638a759b9773467c9edd6f3eb4340", "size": 4652, "ext": "py", "lang": "Python", "max_stars_repo_path": "anchors.py", "max_stars_repo_name": "fengjiqiang/related_code", "max_stars_repo_head_hexsha": "72169d94350f4c2e29b0a2b4511c518c8372160d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "anchors.py", "max_issues_repo_name": "fengjiqiang/related_code", "max_issues_repo_head_hexsha": "72169d94350f4c2e29b0a2b4511c518c8372160d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "anchors.py", "max_forks_repo_name": "fengjiqiang/related_code", "max_forks_repo_head_hexsha": "72169d94350f4c2e29b0a2b4511c518c8372160d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.8867924528, "max_line_length": 114, "alphanum_fraction": 0.5687876182, "include": true, "reason": "import numpy", "num_tokens": 1370}
|
import sys
sys.path.append("..")
from tqdm import tqdm, trange
import json
import numpy as np
import torch
""" pretrain 데이터셋"""
class PretrainDataSet(torch.utils.data.Dataset):
"""
데이터로더에 사용하기 위한 데이터 셋
is_next: tokens_a와 tokens_b가 연속된 문장인지 여부
tokens: 문장들의 tokens
segment: tokens_a(0)와 tokens_b(1)을 구분하기 위한 값
mask_idx: tokens 내 mask index
mask_label: tokens 내 mask 된 부분의 정답
"""
def __init__(self, vocab, infile):
self.vocab = vocab
self.labels_cls = []
self.labels_lm = []
self.sentences = []
self.segments = []
line_cnt = 0
with open(infile, "r") as f:
for line in f:
line_cnt += 1
with open(infile, "r") as f:
for i, line in enumerate(tqdm(f, total=line_cnt, desc=f"Loading {infile}", unit=" lines")):
instance = json.loads(line)
self.labels_cls.append(instance["is_next"])
sentences = [vocab.piece_to_id(p) for p in instance["tokens"]]
self.sentences.append(sentences)
self.segments.append(instance["segment"])
mask_idx = np.array(instance["mask_idx"], dtype=np.int)
mask_label = np.array([vocab.piece_to_id(p) for p in instance["mask_label"]], dtype=np.int)
label_lm = np.full(len(sentences), dtype=np.int, fill_value=-1)
label_lm[mask_idx] = mask_label
self.labels_lm.append(label_lm)
def __len__(self):
assert len(self.labels_cls) == len(self.labels_lm)
assert len(self.labels_cls) == len(self.sentences)
assert len(self.labels_cls) == len(self.segments)
return len(self.labels_cls)
def __getitem__(self, item):
return (torch.tensor(self.labels_cls[item]),
torch.tensor(self.labels_lm[item]),
torch.tensor(self.sentences[item]),
torch.tensor(self.segments[item]))
""" pretrain data collate_fn """
def pretrin_collate_fn(inputs):
"""
배치 단위로 데이터 처리를 위한 collate_fn
:param inputs:
:return: batch
"""
labels_cls, labels_lm, inputs, segments = list(zip(*inputs))
# LM의 라벨의 길이가 같아지도록, 짧은 문장에 대해 padding 값-1 추가
labels_lm = torch.nn.utils.rnn.pad_sequence(labels_lm, batch_first=True, padding_value=-1)
# inputs의 길이가 같아지도록 짧은 문장에 대해 padding 값 0 추가 이때 padding은 vocab 만들기 시, pad_id = 0으로 지정한 값
inputs = torch.nn.utils.rnn.pad_sequence(inputs, batch_first=True, padding_value=0)
# segments에 대한 값도 짧은 문장에 대해 padding값 0 추가
segments = torch.nn.utils.rnn.pad_sequence(segments, batch_first=True, padding_value=0)
batch = [
torch.stack(labels_cls, dim=0), # 길이가 고정 1이므로, stack 함수를 통해 torch tensor로 변환
labels_lm,
inputs,
segments
]
return batch
""" pretrain 데이터 로더 """
def pretrain_data_loader(vocab, data_dir, batch_size = 128):
dataset = PretrainDataSet(vocab, f"{data_dir}/kowiki_bert_0.json")
data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=pretrin_collate_fn)
return data_loader
|
{"hexsha": "c05a7bc30611f6bc164efb13c46de494a48901bf", "size": 2945, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataloader/kowiki.py", "max_stars_repo_name": "nawnoes/ReforBERT", "max_stars_repo_head_hexsha": "ae7c8ddf55de2e06080a0e46735ea9609262d8ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-05-24T10:50:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-16T08:34:28.000Z", "max_issues_repo_path": "dataloader/kowiki.py", "max_issues_repo_name": "nawnoes/ReforBERT", "max_issues_repo_head_hexsha": "ae7c8ddf55de2e06080a0e46735ea9609262d8ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataloader/kowiki.py", "max_forks_repo_name": "nawnoes/ReforBERT", "max_forks_repo_head_hexsha": "ae7c8ddf55de2e06080a0e46735ea9609262d8ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-21T03:15:48.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-21T03:15:48.000Z", "avg_line_length": 32.7222222222, "max_line_length": 120, "alphanum_fraction": 0.6730050934, "include": true, "reason": "import numpy", "num_tokens": 842}
|
import numpy as np
from PIL import Image
from .._results import PredictionResult
try:
import tflite_runtime.interpreter as tflite
except ImportError:
# Needs better error text
raise ImportError(
"ERROR: This is a TensorFlow Lite model and requires TensorFlow Lite interpreter to be installed on this device. Please go to https://www.tensorflow.org/lite/guide/python and download the appropriate version for you device."
)
class ImageClassificationModel:
__MAX_UINT8 = 255
def __init__(self, signature):
self.__model_path = "{}/{}.tflite".format(
signature.model_path, signature.filename
)
self.__tflite_predict_fn = None
self.__labels = signature.classes
def __load(self):
self.__tflite_predict_fn = tflite.Interpreter(
model_path=self.__model_path
)
def predict(self, image: Image.Image) -> PredictionResult:
if self.__tflite_predict_fn is None:
self.__load()
self.__tflite_predict_fn.allocate_tensors()
# Add an extra axis onto the numpy array
np_image = np.expand_dims(image, axis=0)
# Converts to floating point and standardize range from 0 to 1.
np_image = np.float32(np_image) / self.__MAX_UINT8
input_details = self.__tflite_predict_fn.get_input_details()
output_details = self.__tflite_predict_fn.get_output_details()
self.__tflite_predict_fn.set_tensor(
input_details[0]["index"],
np_image
)
self.__tflite_predict_fn.invoke()
top_prediction_output = self.__tflite_predict_fn.get_tensor(
output_details[0]["index"]
)
confidences_output = self.__tflite_predict_fn.get_tensor(
output_details[2]["index"]
)
confidences = np.squeeze(confidences_output)
top_prediction = top_prediction_output.item().decode("utf-8")
return PredictionResult(
labels=self.__labels,
confidences=confidences,
prediction=top_prediction,
)
|
{"hexsha": "6b39a4515a2b9ca84fab5d2dc5157ffd81319388", "size": 2097, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/lobe/backends/_backend_tflite.py", "max_stars_repo_name": "marlinspike/lobe-python", "max_stars_repo_head_hexsha": "91d1128c34f71526aa0e132f9e2f52e4eb971f56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-20T21:42:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-20T21:42:51.000Z", "max_issues_repo_path": "src/lobe/backends/_backend_tflite.py", "max_issues_repo_name": "marlinspike/lobe-python", "max_issues_repo_head_hexsha": "91d1128c34f71526aa0e132f9e2f52e4eb971f56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lobe/backends/_backend_tflite.py", "max_forks_repo_name": "marlinspike/lobe-python", "max_forks_repo_head_hexsha": "91d1128c34f71526aa0e132f9e2f52e4eb971f56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8382352941, "max_line_length": 232, "alphanum_fraction": 0.6680972818, "include": true, "reason": "import numpy", "num_tokens": 458}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef SERIAL_COM_TCPSERVER_HPP
#define SERIAL_COM_TCPSERVER_HPP
#include <iostream>
#include <boost/array.hpp>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
#include <stdio.h>
#include <thread>
#include <functional>
#include <string>
#include "Port.h"
#include "TcpClientPort.hpp"
using boost::asio::ip::tcp;
typedef std::function<void(std::shared_ptr<TcpClientPort> port)> ConnectionHandler;
class TcpServer
{
public:
TcpServer(const std::string& localAddr, int localPort);
// This method accepts one new connection
void accept(ConnectionHandler handler);
~TcpServer();
// close the server
void close();
int port() { return acceptor_.local_endpoint().port(); }
std::string localAddress() { return acceptor_.local_endpoint().address().to_string(); }
private:
void handleAccept(std::shared_ptr<TcpClientPort> newSocket, const boost::system::error_code& error);
void run();
boost::asio::io_service io_service_;
tcp::acceptor acceptor_;
bool closed_;
std::thread worker_thread_;
bool worker_running_;
ConnectionHandler handler_;
};
#endif
|
{"hexsha": "21fabb60ebfc71e848472d32050ef722fdbc8b4e", "size": 1246, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "MavLinkCom/src/serial_com/TcpServer.hpp", "max_stars_repo_name": "JaganathSahu/Microsoft-Open-Source-Code", "max_stars_repo_head_hexsha": "bf5cc70395da78f03d7af8592fad466088f9d84e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-11-25T18:53:58.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-17T01:43:49.000Z", "max_issues_repo_path": "MavLinkCom/src/serial_com/TcpServer.hpp", "max_issues_repo_name": "spkdroid/AirSim", "max_issues_repo_head_hexsha": "bf5cc70395da78f03d7af8592fad466088f9d84e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MavLinkCom/src/serial_com/TcpServer.hpp", "max_forks_repo_name": "spkdroid/AirSim", "max_forks_repo_head_hexsha": "bf5cc70395da78f03d7af8592fad466088f9d84e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2018-01-26T17:48:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-03T23:52:56.000Z", "avg_line_length": 24.431372549, "max_line_length": 102, "alphanum_fraction": 0.7255216693, "num_tokens": 273}
|
import nltk
from nltk import word_tokenize
#import speech_recognition as sr_audio
import numpy as np
from textblob import TextBlob
#import transcribe as ts
try:
nltk.data.find('averaged_perceptron_tagger')
except LookupError:
nltk.download('averaged_perceptron_tagger')
def nltk_featurize(file):
#Check if required resources are installed
# get transcript
if file[-4:]=='.wav':
transcript=ts.transcribe_sphinx('test.wav')
else:
transcript=file
#alphabetical features
a=transcript.count('a')
b=transcript.count('b')
c=transcript.count('c')
d=transcript.count('d')
e=transcript.count('e')
f=transcript.count('f')
g_=transcript.count('g')
h=transcript.count('h')
i=transcript.count('i')
j=transcript.count('j')
k=transcript.count('k')
l=transcript.count('l')
m=transcript.count('m')
n=transcript.count('n')
o=transcript.count('o')
p=transcript.count('p')
q=transcript.count('q')
r=transcript.count('r')
s=transcript.count('s')
t=transcript.count('t')
u=transcript.count('u')
v=transcript.count('v')
w=transcript.count('w')
x=transcript.count('x')
y=transcript.count('y')
z=transcript.count('z')
space=transcript.count(' ')
#numerical features and capital letters
num1=transcript.count('0')+transcript.count('1')+transcript.count('2')+transcript.count('3')+transcript.count('4')+transcript.count('5')+transcript.count('6')+transcript.count('7')+transcript.count('8')+transcript.count('9')
num2=transcript.count('zero')+transcript.count('one')+transcript.count('two')+transcript.count('three')+transcript.count('four')+transcript.count('five')+transcript.count('six')+transcript.count('seven')+transcript.count('eight')+transcript.count('nine')+transcript.count('ten')
number=num1+num2
capletter=sum(1 for c in transcript if c.isupper())
#part of speech
text=word_tokenize(transcript)
g=nltk.pos_tag(transcript)
cc=0
cd=0
dt=0
ex=0
in_=0
jj=0
jjr=0
jjs=0
ls=0
md=0
nn=0
nnp=0
nns=0
pdt=0
pos=0
prp=0
prp2=0
rb=0
rbr=0
rbs=0
rp=0
to=0
uh=0
vb=0
vbd=0
vbg=0
vbn=0
vbp=0
vbp=0
vbz=0
wdt=0
wp=0
wrb=0
for i in range(len(g)):
if g[i][1] == 'CC':
cc=cc+1
elif g[i][1] == 'CD':
cd=cd+1
elif g[i][1] == 'DT':
dt=dt+1
elif g[i][1] == 'EX':
ex=ex+1
elif g[i][1] == 'IN':
in_=in_+1
elif g[i][1] == 'JJ':
jj=jj+1
elif g[i][1] == 'JJR':
jjr=jjr+1
elif g[i][1] == 'JJS':
jjs=jjs+1
elif g[i][1] == 'LS':
ls=ls+1
elif g[i][1] == 'MD':
md=md+1
elif g[i][1] == 'NN':
nn=nn+1
elif g[i][1] == 'NNP':
nnp=nnp+1
elif g[i][1] == 'NNS':
nns=nns+1
elif g[i][1] == 'PDT':
pdt=pdt+1
elif g[i][1] == 'POS':
pos=pos+1
elif g[i][1] == 'PRP':
prp=prp+1
elif g[i][1] == 'PRP$':
prp2=prp2+1
elif g[i][1] == 'RB':
rb=rb+1
elif g[i][1] == 'RBR':
rbr=rbr+1
elif g[i][1] == 'RBS':
rbs=rbs+1
elif g[i][1] == 'RP':
rp=rp+1
elif g[i][1] == 'TO':
to=to+1
elif g[i][1] == 'UH':
uh=uh+1
elif g[i][1] == 'VB':
vb=vb+1
elif g[i][1] == 'VBD':
vbd=vbd+1
elif g[i][1] == 'VBG':
vbg=vbg+1
elif g[i][1] == 'VBN':
vbn=vbn+1
elif g[i][1] == 'VBP':
vbp=vbp+1
elif g[i][1] == 'VBZ':
vbz=vbz+1
elif g[i][1] == 'WDT':
wdt=wdt+1
elif g[i][1] == 'WP':
wp=wp+1
elif g[i][1] == 'WRB':
wrb=wrb+1
#sentiment
tblob=TextBlob(transcript)
polarity=float(tblob.sentiment[0])
subjectivity=float(tblob.sentiment[1])
#word repeats
words=transcript.split()
newlist=transcript.split()
repeat=0
for i in range(len(words)):
newlist.remove(words[i])
if words[i] in newlist:
repeat=repeat+1
features=np.array([a,b,c,d,
e,f,g_,h,
i,j,k,l,
m,n,o,p,
q,r,s,t,
u,v,w,x,
y,z,space,number,
capletter,cc,cd,dt,
ex,in_,jj,jjr,
jjs,ls,md,nn,
nnp,nns,pdt,pos,
prp,prp2,rbr,rbs,
rp,to,uh,vb,
vbd,vbg,vbn,vbp,
vbz,wdt,wp,wrb,
polarity,subjectivity,repeat])
labels=['a', 'b', 'c', 'd',
'e','f','g','h',
'i', 'j', 'k', 'l',
'm','n','o', 'p',
'q','r','s','t',
'u','v','w','x',
'y','z','space', 'numbers',
'capletters','cc','cd','dt',
'ex','in','jj','jjr',
'jjs','ls','md','nn',
'nnp','nns','pdt','pos',
'prp','prp2','rbr','rbs',
'rp','to','uh','vb',
'vbd','vbg','vbn','vbp',
'vbz', 'wdt', 'wp','wrb',
'polarity', 'subjectivity','repeat']
return features, labels
|
{"hexsha": "e09b1ed35cfaa85fafb526d021c7dbe950c89205", "size": 4860, "ext": "py", "lang": "Python", "max_stars_repo_path": "DigiPsych_API/Feature_Extract_API/nltk_featurize.py", "max_stars_repo_name": "larryzhang95/Voice-Analysis-Pipeline", "max_stars_repo_head_hexsha": "264ac5c70d0baab47b81718ea5b895be30a683e9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-06-22T21:03:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-21T19:46:55.000Z", "max_issues_repo_path": "DigiPsych_API/Feature_Extract_API/nltk_featurize.py", "max_issues_repo_name": "larryzhang95/Voice-Analysis-Pipeline", "max_issues_repo_head_hexsha": "264ac5c70d0baab47b81718ea5b895be30a683e9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DigiPsych_API/Feature_Extract_API/nltk_featurize.py", "max_forks_repo_name": "larryzhang95/Voice-Analysis-Pipeline", "max_forks_repo_head_hexsha": "264ac5c70d0baab47b81718ea5b895be30a683e9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-09-15T01:50:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T02:36:36.000Z", "avg_line_length": 23.2535885167, "max_line_length": 282, "alphanum_fraction": 0.5294238683, "include": true, "reason": "import numpy", "num_tokens": 1797}
|
"""Simple layer profile plots for group results."""
import os
import numpy as np
import nibabel as nb
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
FIG_DATA = [
"/home/faruk/data2/DATA_MRI_NIFTI/derived/plots/20_depth_vs_T2star/sub-01_depth_vs_T2star.npy",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/plots/20_depth_vs_T2star/sub-02_depth_vs_T2star.npy",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/plots/20_depth_vs_T2star/sub-03_depth_vs_T2star.npy",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/plots/20_depth_vs_T2star/sub-04_depth_vs_T2star.npy",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/plots/20_depth_vs_T2star/sub-05_depth_vs_T2star.npy",
]
TAGS = ["Heschl's Gyrus Right", "Heschl's Gyrus Left",
"Calcarine Sulcus Right", "Calcarine Sulcus Left"]
OUTDIR = "/home/faruk/data2/DATA_MRI_NIFTI/derived/plots/20_depth_vs_T2star"
SUBJ_ID = "group"
FIGURE_TAG = "depth_vs_T2star"
RANGE_X = (-0.7, 1.7)
RANGE_Y = (0, 100)
DPI = 300
NR_BINS = 48
# =============================================================================
# Output directory
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
print(" Output directory: {}\n".format(OUTDIR))
# -----------------------------------------------------------------------------
# Prepare figure
plt.style.use('dark_background')
fig, ax = plt.subplots(2, 2, figsize=(1920*2/DPI, 1080*2/DPI), dpi=DPI)
ax = ax.ravel()
for j in range(len(FIG_DATA)): # Loop across individual subjects
fig_data = np.load(FIG_DATA[j], allow_pickle=True).item()
for i in range(len(TAGS)): # Loop across ROIs
# Collate measurements
indvar = fig_data[TAGS[i]]["WM"]["Metric_x"]
indvar = np.hstack([indvar, fig_data[TAGS[i]]["GM"]["Metric_x"]])
indvar = np.hstack([indvar, fig_data[TAGS[i]]["CSF"]["Metric_x"]])
depvar = fig_data[TAGS[i]]["WM"]["Metric_y"]
depvar = np.hstack([depvar, fig_data[TAGS[i]]["GM"]["Metric_y"]])
depvar = np.hstack([depvar, fig_data[TAGS[i]]["CSF"]["Metric_y"]])
# Digitize independent var. based on dependent variable
bins = np.linspace(RANGE_X[0], RANGE_X[1], NR_BINS + 1)
depvar_median = np.zeros(NR_BINS)
depvar_lobo = np.zeros(NR_BINS)
depvar_hibo = np.zeros(NR_BINS)
depvar_std = np.zeros(NR_BINS)
depvar_ste = np.zeros(NR_BINS)
for k in range(NR_BINS):
idx1 = indvar > bins[k]
idx2 = indvar < bins[k+1]
idx3 = idx1 * idx2
if np.sum(idx3) > 2000:
depvar_median[k] = np.median(depvar[idx3])
depvar_lobo[k], depvar_hibo[k] = np.percentile(depvar[idx3],
[5, 95])
depvar_std[k] = np.std(depvar[idx3])
depvar_ste[k] = (np.std(depvar[idx3])
/ np.sqrt(np.size(np.std(depvar[idx3]))))
else:
depvar_median[k] = None
depvar_lobo[k] = None
depvar_hibo[k] = None
depvar_std[k] = None
depvar_ste[k] = None
# ---------------------------------------------------------------------
# Line plots
ax[i].plot(bins[:-1] + (bins[1] - bins[0]) / 2, depvar_lobo,
linewidth=0.5, color="red")
ax[i].plot(bins[:-1] + (bins[1] - bins[0]) / 2, depvar_hibo,
linewidth=0.5, color="red")
ax[i].plot(bins[:-1] + (bins[1] - bins[0]) / 2, depvar_median,
linewidth=0.5, color="white")
for i in range(4):
ax[i].set_title(r"{}".format(TAGS[i]), color="white")
ax[i].set_title(TAGS[i])
ax[i].set_ylabel(r"T$_2^*$ [ms]")
ax[i].set_ylim(RANGE_Y)
# X axis break points
ax[i].plot((0, 0), (0, 100), '-', linewidth=1.5,
color=[100/255, 149/255, 237/255])
ax[i].plot((1, 1), (0, 100), '-', linewidth=1.5,
color=[255/255, 102/255, 0/255])
# Custom tick labels
ax[i].set_xticks([-0.7, -0.35, -0.01, 0.01, 0.5, 0.99, 1.01, 1.35, 1.7])
ax[i].set_xticklabels([0.7, 0.35, None, 0, 0.5, 1, None, 0.35, 0.7])
ax[i].set_yticks(np.linspace(RANGE_Y[0], RANGE_Y[1], 6, dtype=np.int))
ax[i].set_yticklabels(np.linspace(RANGE_Y[0], RANGE_Y[1], 6,
dtype=np.int))
# Add text (positions are in data coordinates)
ax[i].text(-0.7 + 0.025, 80, 'Below\ngray matter\n(White matter)',
fontsize=10, color="white")
ax[i].text(0 + 0.025, 80, 'Gray matter\n\n',
fontsize=10, color="white")
ax[i].text(1 + 0.025, 80, 'Above\ngray matter\n(CSF & vessels)',
fontsize=10, color="white")
# Add text (units)
ax[i].text(-0.7 + 0.025, 2, 'Distance [mm]',
fontsize=10, color="white")
ax[i].text(0 + 0.025, 2, 'Equi-volume depths',
fontsize=10, color="white")
ax[i].text(1 + 0.025, 2, 'Distance [mm]',
fontsize=10, color="white")
plt.tight_layout()
plt.savefig(os.path.join(OUTDIR, "{}_{}".format(SUBJ_ID, FIGURE_TAG)))
print("Finished.\n")
|
{"hexsha": "52ed6d78d6fb0dd6f257b74d2f367e0f9b733f63", "size": 5155, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/05_depth_profiles/03_group_depth_vs_T2star.py", "max_stars_repo_name": "ofgulban/meso-MRI", "max_stars_repo_head_hexsha": "15ef8e19aae6218833a06bf01418d3d83eafd8c7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-21T13:48:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T13:48:01.000Z", "max_issues_repo_path": "scripts/05_depth_profiles/03_group_depth_vs_T2star.py", "max_issues_repo_name": "ofgulban/meso-MRI", "max_issues_repo_head_hexsha": "15ef8e19aae6218833a06bf01418d3d83eafd8c7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/05_depth_profiles/03_group_depth_vs_T2star.py", "max_forks_repo_name": "ofgulban/meso-MRI", "max_forks_repo_head_hexsha": "15ef8e19aae6218833a06bf01418d3d83eafd8c7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-21T13:48:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T13:48:08.000Z", "avg_line_length": 40.2734375, "max_line_length": 99, "alphanum_fraction": 0.5563530553, "include": true, "reason": "import numpy", "num_tokens": 1592}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.