text stringlengths 0 1.25M | meta stringlengths 47 1.89k |
|---|---|
"""
In order to connect to MongoDB on dicarlo5 server create an ssh tunnel using the
command below:
ssh -f -N -L 22334:localhost:22334 bashivan@dicarlo5.mit.edu
"""
from __future__ import print_function
import zmq
import sys
# sys.path.insert(0, '/Users/pouyabashivan/Dropbox (MIT)/Codes/Downloads/ThreeDWorld/ClientTools')
import ast
import argparse
from ClientTools.tdw_client import TDW_Client
from StringIO import StringIO
from PIL import Image
import time
import logging
import numpy as np
import time
import h5py
import re
import pymongo as pm
# Constants for image generation
DEF_NUM_SCENE_SW = 1
DEF_NUM_SCENE_IMG = 100
DEF_IMG_SIZE = [224, 224, 3]
DEF_h5_filename = 'test_file.hdf5'
DEF_obj_filename = 'ids.txt'
DEF_OBJ_DISCARD_TH = 0.001 # Threshold value to discard objects in the image (as % of image occupied by each object)
class TDWGenerator(TDW_Client):
def __init__(self, host_address,
world_objs_filename=None, # Name of the file containing names of all objects existing in the world
queue_port_num="23402",
requested_port_num=None,
auto_select_port=True,
environment_config=None,
debug=True,
selected_build=None,
selected_forward=None,
initial_command="",
username=None,
description=None,
num_frames_per_msg=4,
get_obj_data=False,
send_scene_info=False):
super(TDWGenerator, self).__init__(host_address,
queue_port_num=queue_port_num,
requested_port_num=requested_port_num,
auto_select_port=auto_select_port,
environment_config=environment_config,
debug=debug,
selected_build=selected_build,
selected_forward=selected_forward,
initial_command=initial_command,
username=username,
description=description,
num_frames_per_msg=num_frames_per_msg,
get_obj_data=get_obj_data,
send_scene_info=send_scene_info)
# Query object labels from Dicarlo5 MongoDB
if world_objs_filename is not None:
self.obj_labels_dic = self.read_world_objects_from_list(world_objs_filename) #
logging.basicConfig(filename='image_generator_log.txt', filemode='w-', level=logging.INFO)
def world_setup(self, config):
"""
Setup the 3D world.
:return: server response
"""
self.load_config(config)
self.load_profile({'screen_width': DEF_IMG_SIZE[0], 'screen_height': DEF_IMG_SIZE[1]})
self.run()
# Receive server response
return self._rcv_imageset()
def terminate_connection(self):
self.sock.send_json({'n': 4, 'msg': {"msg_type": "TERMINATE"}})
@staticmethod
def read_world_objects_from_list(filename):
"""
Creates a association table between objects IDs and lables (synset_tree) given a file containg
the list of all objects in the world simulation.
:param filename: filename which contains the list of all objects existing in the world simulation.
:return: association table of IDs <-> labels
"""
print('Reading world objects from the list...')
obj_names = []
p = re.compile('/\S+')
with open(filename, 'r') as f:
for line in f:
m = p.search(line)
if m is not None:
obj_names.append(m.group()[1:])
# Access the Mongo dabase and read the synset tree labels
try:
db_client = pm.MongoClient(port=22334)
table = db_client['synthetic_generative']['3d_models']
cursor = table.find({'type': 'shapenet',
'version': 2,
'id': {'$in': obj_names}})
obj_labels_dic = dict() # Stores the table for id-synset_tree correspondence
for doc in cursor:
obj_labels_dic[doc['id']] = doc['synset_tree']
except:
print('Could not connect to DB. Create a SSH tunnel.')
raise
print('Finished reading object list! Table created.')
return obj_labels_dic
@staticmethod
def list_2_string(in_list):
"""
Makes a string representation of a python list.
Use this function when the variable is a list.
:param in_list: input list
:return: list variable in string format
"""
return ','.join(in_list) # This is much faster but can't be used for list of lists
@staticmethod
def var_2_string(in_var):
"""
Makes a string representation of a python variable.
Use this function when the variable is anything other than a list.
:param in_var: input variable
:return: variable in string format
"""
return str(in_var)
@staticmethod
def string_2_list(in_string):
"""
Restore the original list from its string representation.
Use this function when the orignial variable is a list.
:param in_string:
:return: original list variable.
"""
return in_string.split(',')
@staticmethod
def string_2_var(in_string):
"""
Restore the original variable from its string representation.
Use this function when the original variable is anything other than a list.
:param in_string: original variable.
:return:
"""
return ast.literal_eval(in_string)
def generate_random_image(self):
"""
Generates a random image from the world.
:return: server response
"""
self.sock.send_json({'n': 4, 'msg': {"msg_type": "CLIENT_INPUT", "teleport_random": True,
'sendSceneInfo': False,
"ang_vel": np.random.uniform(low=-1, high=1, size=(3,)).tolist()
}})
return self._rcv_imageset()
def _rcv_imageset(self):
"""
Receive the imageset returned from the server.
:return:
server_server_response: A dictionary containg server response message
containing observed objects, avatar, ...
images: list of images([0]=normals, [1]=segmentations, [2]=image)
"""
images = []
# Receive server response
server_response = self.sock.recv()
if self.debug:
print(server_response)
for i in range(3):
msg = self.sock.recv()
images.append(np.array(Image.open(StringIO(msg)).convert('RGB')))
print('Received message {0}/3'.format(i))
if i == 2: # Show the scene image
Image.open(StringIO(msg)).convert('RGB').show()
else:
logging.info('%' * 20 + ' Server Response ' + '%' * 20)
logging.info(server_response)
for i in range(3):
msg = self.sock.recv()
images.append(np.array(Image.open(StringIO(msg)).convert('RGB')))
return ast.literal_eval(server_response), images
def create_color_obj_table(self, image_info):
"""
Creates a lookup table dictionary for color<>id correspondence.
:param image_info: Scene info dictionary received from server.
:return: dictionary cotaining the lookup table.
"""
color_obj_table = dict()
for row in image_info['sceneInfo']:
m = re.match('Prefabs', row[0])
if m is not None:
m = re.search(', \S+,', row[0])
color_obj_table[int(row[1], 16)] = m.group()[2:-1]
# color_obj_table[row[1]] = m.group()[2:-1]
else:
color_obj_table[int(row[1], 16)] = row[0]
# color_obj_table[row[1]] = row[0]
self.image_info = image_info
return color_obj_table
def process_image_objs(self, images, color_obj_table, discard_obj_th=DEF_OBJ_DISCARD_TH):
"""
Check the pixel area for each object in the image and discards the objects with occupied area
less than a threshold
:param images: list of images, including [0]=normals [1]=labels [2]=image.
:param discard_obj_th: Threshold for discarding object in the image
:return: accept_img: Flag for whether to accept image or not.
obj_ids: list of object IDs (names)
obj_color_codes: list of color codes associated with each object.
obj_pixel_counts: list of pixel counts associated with each object.
"""
# Extract object IDs and color codes from image_info string
# obj_ids = image_info['observed_objects'].keys()
# obj_color_codes = [int(e, 16) for e in image_info['observed_objects'].values()]
accept_img = False # Flag for accepting the image
labels_img_collapsed = images[1][:, :, 0] * 256 ** 2 + \
images[1][:, :, 1] * 256 + \
images[1][:, :, 2]
unique_colors, unique_counts = np.unique(labels_img_collapsed, return_counts=True)
# Retrieve object IDs corresponding to all unique colors codes
unique_ids, nonexist_color_ind = [], []
for i, color_id in enumerate(unique_colors):
if color_id not in color_obj_table:
logging.warning('Object color code {0} does not exist in scene info!'.format(color_id))
nonexist_color_ind.append(i)
continue
unique_ids.append(color_obj_table[color_id])
unique_ids = np.asarray(unique_ids)
if nonexist_color_ind:
unique_colors = np.delete(unique_counts, nonexist_color_ind)
unique_counts = np.delete(unique_counts, nonexist_color_ind)
# obj_pixel_counts = []
# for i in obj_color_codes:
# obj_pixel_counts.append(unique_counts[unique_ids == i])
# Check image for existence of objects (larger than threshold)
large_obj_ids = unique_ids[(unique_counts / float(images[1].shape[0] * images[1].shape[1])) > discard_obj_th]
for obj in large_obj_ids:
if obj in self.obj_labels_dic:
accept_img = True
# Setting the small object pixels to zero
# tmp_image = labels_img_collapsed.flatten()
# for i in small_obj_ids:
# tmp_image[tmp_image == i] = 0
# images[1] = tmp_image.reshape(image_shape)
return accept_img, unique_ids.tolist(), unique_colors.tolist(), unique_counts.tolist()
def extract_labels(self, obj_ids):
"""
Looks up objects in the world objects table and extracts the labels for each object in the image.
:param obj_ids: list of objects observed in the image.
:return: obj_labels: list of lists of object labels from synset_tree
valid_obj_flag: List of boolean values. True if the corresponding object is an object of interest.
"""
obj_labels, valid_obj_flag = [], []
for id in obj_ids:
if id in self.obj_labels_dic:
obj_labels.append(
self.obj_labels_dic[id]
)
valid_obj_flag.append(True)
else:
obj_labels.append(None)
valid_obj_flag.append(False)
return obj_labels, valid_obj_flag
def switch_scence(self, config):
"""
Send server request to switch scene.
:return: server response
"""
self.sock.send_json({'n': 4, 'msg': {"msg_type": "SCENE_SWITCH",
"config": config,
'sendSceneInfo': True,
}})
return self._rcv_imageset()
def main(args):
time_start = time.time()
# Receive command line args
NUM_SCENE_SW = args.NUM_SCENE_SW
NUM_SCENE_IMG = args.NUM_SCENE_IMG
IMG_SIZE = args.IMG_SIZE
h5_filename = args.HDF5_file
obj_filename = args.obj_file
# Open the HDF5 file
h5_file = h5py.File(h5_filename, 'w')
# Create datasets
dt = h5py.special_dtype(vlen=unicode) # Define Unicode variable length string type
###################################################################################################################
# ds_images = h5_file.create_dataset('images',
# tuple([NUM_SCENE_SW * NUM_SCENE_IMG] + [reduce(lambda x,y: x*y, IMG_SIZE)]),
# dtype=np.uint8)
# ds_images_segmentations = h5_file.create_dataset('images_segmentations',
# tuple([NUM_SCENE_SW * NUM_SCENE_IMG] + [reduce(lambda x,y: x*y, IMG_SIZE)]),
# dtype=np.uint8)
# ds_images_normals = h5_file.create_dataset('images_normals',
# tuple([NUM_SCENE_SW * NUM_SCENE_IMG] + [reduce(lambda x,y: x*y, IMG_SIZE)]),
# dtype=np.uint8)
###################################################################################################################
# Define HDF5 datasets
ds_images = h5_file.create_dataset('images',
tuple([NUM_SCENE_SW * NUM_SCENE_IMG] + IMG_SIZE),
dtype='uint8')
ds_images_segmentations = h5_file.create_dataset('images_segmentations',
tuple([NUM_SCENE_SW * NUM_SCENE_IMG] + IMG_SIZE),
dtype='uint8')
ds_images_normals = h5_file.create_dataset('images_normals',
tuple([NUM_SCENE_SW * NUM_SCENE_IMG] + IMG_SIZE),
dtype='uint8')
ds_ids = h5_file.create_dataset('ids',
(NUM_SCENE_SW * NUM_SCENE_IMG,),
dtype=dt)
ds_labels = h5_file.create_dataset('labels',
(NUM_SCENE_SW * NUM_SCENE_IMG,),
dtype=dt)
ds_obj_pixel_counts = h5_file.create_dataset('obj_pixel_counts',
(NUM_SCENE_SW * NUM_SCENE_IMG,),
dtype=dt)
ds_color_codes = h5_file.create_dataset('color_codes',
(NUM_SCENE_SW * NUM_SCENE_IMG,),
dtype=dt)
ds_image_info = h5_file.create_dataset('image_info',
(NUM_SCENE_SW * NUM_SCENE_IMG,),
dtype=dt)
ds_valid_obj_flag = h5_file.create_dataset('valid_obj_flag',
(NUM_SCENE_SW * NUM_SCENE_IMG,),
dtype=dt)
# Setup 3D world
gen = TDWGenerator("18.93.5.202",
username="Pouya",
world_objs_filename=obj_filename,
description="Test_movements",
initial_command="request_create_environment",
selected_build="TDW-v1.0.0b05.x86_64",
get_obj_data=True,
send_scene_info=True,
debug=False
)
# Check if the objects table exists
assert gen.obj_labels_dic is not None, 'No world object table exists. Provide list of objects in the world.'
# Setup the world with predefined configuration
config = {
"environment_scene": "ProceduralGeneration",
"random_seed": 1,
# Omit and it will just choose one at random. Chosen seeds are output into the log(under warning or log level).
"should_use_standardized_size": False,
"standardized_size": [1.0, 1.0, 1.0],
"disabled_items": [],
# ["SQUIRL", "SNAIL", "STEGOSRS"], // A list of item names to not use, e.g. ["lamp", "bed"] would exclude files with the word "lamp" or "bed" in their file path
"permitted_items": [], # ["bed1", "sofa_blue", "lamp"],
"complexity": 20000,
"num_ceiling_lights": 4,
"minimum_stacking_base_objects": 5,
"minimum_objects_to_stack": 100,
"room_width": 45.0,
"room_height": 20.0,
"room_length": 45.0,
"wall_width": 1.0,
"door_width": 1.5,
"door_height": 3.0,
"window_size_width": 5.0,
"window_size_height": 5.0,
"window_placement_height": 5.0,
"window_spacing": 10.0, # Average spacing between windows on walls
"wall_trim_height": 0.5,
"wall_trim_thickness": 0.01,
"min_hallway_width": 5.0,
"number_rooms": 1,
"max_wall_twists": 3,
"max_placement_attempts": 300,
# Maximum number of failed placements before we consider a room fully fil "grid_size": 0.4 #Determines how fine tuned a grid the objects are placed on during Proc. Gen. Smaller the number, the
}
image_info, _ = gen.world_setup(config)
# We have a double loop to generate images. The outer loop restarts the scene with new objects using
# SCENE_SWITCH option. The inside loop randomly moves the agent and generates images. It also checks
# if objects exist in the image and discards objects which are occupying a region of the image smaller
# than a threshold.
image_num = 0
discarded_images_count = 0
print('start up time: {0}'.format(time.time()-time_start))
time_start = time.time()
image_times = []
while image_num < NUM_SCENE_SW * NUM_SCENE_IMG:
loop_start_time = time.time()
if image_num % NUM_SCENE_IMG == 0:
image_info, _ = gen.switch_scence(config)
scene_obj_colors_table = gen.create_color_obj_table(image_info)
# Remove the sceneInfo key for later storage in HDF5 file
# del image_info['sceneInfo']
_, imageset = gen.generate_random_image()
image_times.append(time.time() - loop_start_time)
# Check whether the image contains objects and discard the labels for objects which are too small.
image_good, obj_ids, obj_color_codes, obj_pixel_counts = gen.process_image_objs(imageset, scene_obj_colors_table)
if image_good:
# Extract the labels vector from segmentation image.
obj_labels, obj_valid_flags = gen.extract_labels(obj_ids)
# Images as flattened arrays
# ds_images_normals[image_num], \
# ds_images_segmentations[image_num],\
# ds_images[image_num] = imageset[0].flatten(), imageset[1].flatten(), imageset[2].flatten()
# Images as actual arrays
obj_labels, obj_valid_flags = gen.extract_labels(obj_ids)
ds_images_normals[image_num], \
ds_images_segmentations[image_num], \
ds_images[image_num] = imageset
# Python dictionaries and lists should be stored as strings in HDF5 file.
ds_labels[image_num] = gen.var_2_string(obj_labels)
ds_ids[image_num] = gen.list_2_string(obj_ids)
ds_obj_pixel_counts[image_num] = gen.var_2_string(obj_pixel_counts)
ds_color_codes[image_num] = gen.var_2_string(obj_color_codes)
ds_valid_obj_flag[image_num] = gen.var_2_string(obj_valid_flags)
ds_image_info[image_num] = gen.var_2_string(image_info)
image_num += 1
else:
logging.info('Discarded image ({0}).'.format(discarded_images_count + 1))
discarded_images_count += 1
print('Image times taken: {0}'.format(image_times))
print('Total generation time: {0}'.format(time.time()-time_start))
print('Average generation time: {0}'.format(np.mean(image_times)))
print('Number of generated images: {0}'.format(image_num))
print('Number of discarded images: {0}'.format(discarded_images_count))
h5_file.close()
gen.terminate_connection()
print('Connection closed!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Runs R-CNN on fMRI data.')
parser.add_argument('obj_file', metavar='F', type=str,
help='Text file containing the list of all objects in the world simulation.')
parser.add_argument('--NUM_SCENE_SW', dest='NUM_SCENE_SW', type=int,
help='Number of scene switches throughout the simulation.',
default=DEF_NUM_SCENE_SW)
parser.add_argument('--NUM_SCENE_IMG', dest='NUM_SCENE_IMG', type=int,
help='Number of images to capture per scene.',
default=DEF_NUM_SCENE_IMG)
parser.add_argument('--IMG_SIZE', dest='IMG_SIZE', type=int, nargs=3,
help='Image size (W, H, D).',
default=DEF_IMG_SIZE)
parser.add_argument('--DISCARD_TH', dest='DISCARD_TH', type=int,
help='Object discard threshold as % of whole image.',
default=DEF_OBJ_DISCARD_TH)
parser.add_argument('--HDF5_file', dest='HDF5_file', type=str,
help='HDF5 file to save the simulation results.',
default=DEF_h5_filename)
main(parser.parse_args())
# Test for storing list of strings in HDF5
# import time
#
# our_list = ['n%08d' % i for i in range(10)]
# our_string = ','.join(our_list)
# start_time = time.time()
# for i in range(10000):
# converted_list = our_string.split(',')
# print('Elapsed time: {0}'.format(time.time() - start_time))
#
# our_string2 = str(our_list)
# start_time = time.time()
# for i in range(10000):
# converted_list = ast.literal_eval(our_string2)
# print('Elapsed time: {0}'.format(time.time() - start_time))
| {"hexsha": "3393dae959793d0736e4246c51011ddde43ff5f8", "size": 21145, "ext": "py", "lang": "Python", "max_stars_repo_path": "ClientTools/TDW_image_generator.py", "max_stars_repo_name": "neuroailab/ThreeDWorld", "max_stars_repo_head_hexsha": "62ca47c4030489b6986216000fe123c5e2367c3c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-21T01:35:35.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-21T01:35:35.000Z", "max_issues_repo_path": "ClientTools/TDW_image_generator.py", "max_issues_repo_name": "neuroailab/ThreeDWorld", "max_issues_repo_head_hexsha": "62ca47c4030489b6986216000fe123c5e2367c3c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ClientTools/TDW_image_generator.py", "max_forks_repo_name": "neuroailab/ThreeDWorld", "max_forks_repo_head_hexsha": "62ca47c4030489b6986216000fe123c5e2367c3c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0651731161, "max_line_length": 199, "alphanum_fraction": 0.6139985812, "include": true, "reason": "import numpy", "num_tokens": 4733} |
\section{Conclusion and Future Work}
Within the described simple experiment we showed that expressive,
verbal surrogate models with high fidelity can be found for DNNs using
the developed methodology. We suggest that the approach is promising
and worth future research and optimization.
% In future work we would like to validate some of the choices
% for the concept analysis optimization on different setups, and
% automate manual choosing routines.
% % Auto-determine intersection thresholds
% This includes the choice of the intersection encoding
% thresholds, % which are currently manually balanced between information loss and noise in the ground truth.
% % Validate loss
% and weighting and balancing of the losses. % which may depend on the concept shape.
%
%%% Variable concept size
The proposed concept detection approach requires a concept to have
little variance in its size. It should easily extend to
a concept with several size categories (\forexample close by and far away
faces) by merging the result for each category.
% It would be interesting to test this and maybe find a way to
% auto-determine the categorization.
% The size information could also be of benefit if included into the
% background knowledge.
%
%%% Variable number of concept instances per image
A next step for the background knowledge extraction would be to extend
it to an arbitrary number of concept occurences per image, where
currently the algorithm assumes a fixed amount (exactly one \ilprule{mouth}, one
\ilprule{nose}, two \ilprule{eyes}). This could \forexample be achieved by allowing
a maximum number per sliding window rather than an exact amount per image.
% This would correspond to the intuition that an object (of given size)
% cannot occur arbitrarily often in an image section.
%
%%% Learned relations
In cases, where the predicates cannot be pre-defined, one can learn
the relations as functions on the DNN output from examples as
demonstrated in \cite{donadello_logic_2017}.
%%% How to select concepts?
We further did not consider completeness
(cf. Sec.~\ref{sec:conceptanalysis}) of the chosen concepts:
They may not be well aligned with the decision relevant features used
by the DNN, infringing fidelity of the surrogate model.
We suggest two ways to remedy this:
One could rely on (possibly less interpretable) concepts found via
concept mining \cite{ghorbani_towards_2019}.
Or, since ILP is good at rejecting irrelevant information, one can
start with a much larger set of pre-defined, domain related concepts.
% as done in \cite{losch_interpretability_2019}.
We further
% In both cases, the set of concepts may be over-complete: There are
% several different solutions to the task relying on different sub-sets
% of the concepts. Thus, we
assume that best fidelity can only be achieved with the \emph{minimal}
complete sub-set of most decision-relevant concepts, which fosters
uniqueness of the solution.
For a decision relevance measure see \forexample
\cite{ghorbani_towards_2019}.
%%% Other than visual tasks?
It may be noted that the presented concept analysis approach is not
tied to image classification:
As long as the ground truth for concepts in the form of masks or
classification values is available, the method can be applied to any
DNN latent space
(imagine \forexample audio, text, or video classification).
However, spatial or temporal positions and relations are currently
inferred using the receptive field information of convolutional DNNs.
This restriction may again be resolved by learning of relations.
%%% User study
Lastly, in order to examine the understandability of the induced
explanation in a real world scenario, we need to let explanations be
evaluated in a human user study. For this matter, subjective
evaluation measures have to be specifically designed for verbal
explanations.
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "concept_embeddings_and_ilp"
%%% End:
| {"hexsha": "41888c79ce56351692cf33520d680deb48e12e38", "size": 3930, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "paper-tex/conclusion.tex", "max_stars_repo_name": "lthamm/concept-embeddings-and-ilp", "max_stars_repo_head_hexsha": "27592c6424147a2fbb54d7daebc92cd72b3f4a0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-11-02T12:21:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-02T14:01:37.000Z", "max_issues_repo_path": "paper-tex/conclusion.tex", "max_issues_repo_name": "lthamm/concept-embeddings-and-ilp", "max_issues_repo_head_hexsha": "27592c6424147a2fbb54d7daebc92cd72b3f4a0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-11-06T07:58:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-13T16:11:30.000Z", "max_forks_repo_path": "paper-tex/conclusion.tex", "max_forks_repo_name": "lthamm/concept-embeddings-and-ilp", "max_forks_repo_head_hexsha": "27592c6424147a2fbb54d7daebc92cd72b3f4a0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-03T14:54:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-03T14:54:16.000Z", "avg_line_length": 46.2352941176, "max_line_length": 109, "alphanum_fraction": 0.7992366412, "num_tokens": 852} |
n = rand(1:10)
A = matrixdepot("minij", n)
@test issym(A)
@test isposdef(A)
println("'minij' passed test...")
| {"hexsha": "e8a903109f406684357eabe5ee3aa905312ab982", "size": 110, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_minij.jl", "max_stars_repo_name": "JuliaPackageMirrors/MatrixDepot.jl", "max_stars_repo_head_hexsha": "86b9c9ce3ad7bf0ea8f282624696c9174c157bcc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_minij.jl", "max_issues_repo_name": "JuliaPackageMirrors/MatrixDepot.jl", "max_issues_repo_head_hexsha": "86b9c9ce3ad7bf0ea8f282624696c9174c157bcc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_minij.jl", "max_forks_repo_name": "JuliaPackageMirrors/MatrixDepot.jl", "max_forks_repo_head_hexsha": "86b9c9ce3ad7bf0ea8f282624696c9174c157bcc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.3333333333, "max_line_length": 33, "alphanum_fraction": 0.6454545455, "num_tokens": 38} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2015-2016 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""This module contains the Worker class:
A tool able to perform azimuthal integration with:
additional saving capabilities like
- save as 2/3D structure in a HDF5 File
- read from HDF5 files
Aims at being integrated into a plugin like LImA or as model for the GUI
The configuration of this class is mainly done via a dictionary transmitted as a JSON string:
Here are the valid keys:
- "dist"
- "poni1"
- "poni2"
- "rot1"
- "rot3"
- "rot2"
- "pixel1"
- "pixel2"
- "splineFile"
- "wavelength"
- "poni" #path of the file
- "chi_discontinuity_at_0"
- "do_mask"
- "do_dark"
- "do_azimuthal_range"
- "do_flat"
- "do_2D"
- "azimuth_range_min"
- "azimuth_range_max"
- "polarization_factor"
- "nbpt_rad"
- "do_solid_angle"
- "do_radial_range"
- "do_poisson"
- "delta_dummy"
- "nbpt_azim"
- "flat_field"
- "radial_range_min"
- "dark_current"
- "do_polarization"
- "mask_file"
- "detector"
- "unit"
- "radial_range_max"
- "val_dummy"
- "do_dummy"
- "method"
"""
from __future__ import with_statement, print_function, division
__author__ = "Jerome Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "10/01/2018"
__status__ = "development"
import threading
import os.path
import logging
import json
logger = logging.getLogger(__name__)
import numpy
import fabio
from .detectors import detector_factory
from .azimuthalIntegrator import AzimuthalIntegrator
from .distortion import Distortion
from . import units
try:
from .ext.preproc import preproc
except ImportError as err:
logger.warning("Unable to import preproc: %s", err)
preproc = None
# from .io import h5py, HDF5Writer
def make_ai(config):
"""Create an Azimuthal integrator from the configuration
stand alone function !
:param config: dict with all parameters
:return: configured (but uninitialized) AzimuthalIntgrator
"""
poni = config.get("poni")
if poni and os.path.isfile(poni):
ai = AzimuthalIntegrator.sload(poni)
else:
ai = AzimuthalIntegrator()
detector = config.get("detector", None)
if detector:
ai.detector = detector_factory(detector)
wavelength = config.get("wavelength", 0)
if wavelength:
if wavelength <= 0 or wavelength > 1e-6:
logger.warning("Wavelength is in meter ... unlikely value %s", wavelength)
ai.wavelength = wavelength
splinefile = config.get("splineFile")
if splinefile and os.path.isfile(splinefile):
ai.detector.splineFile = splinefile
for key in ("pixel1", "pixel2", "dist", "poni1", "poni2", "rot1", "rot2", "rot3"):
value = config.get(key)
if value is not None:
ai.__setattr__(key, value)
if config.get("chi_discontinuity_at_0"):
ai.setChiDiscAtZero()
mask_file = config.get("mask_file")
if mask_file and config.get("do_mask"):
if os.path.exists(mask_file):
try:
mask = fabio.open(mask_file).data
except Exception as error:
logger.error("Unable to load mask file %s, error %s", mask_file, error)
else:
ai.mask = mask
dark_files = [i.strip() for i in config.get("dark_current", "").split(",")
if os.path.isfile(i.strip())]
if dark_files and config.get("do_dark"):
ai.set_darkfiles(dark_files)
flat_files = [i.strip() for i in config.get("flat_field", "").split(",")
if os.path.isfile(i.strip())]
if flat_files and config.get("do_flat"):
ai.set_flatfiles(flat_files)
return ai
class Worker(object):
def __init__(self, azimuthalIntegrator=None,
shapeIn=(2048, 2048), shapeOut=(360, 500),
unit="r_mm", dummy=None, delta_dummy=None,
azimuthalIntgrator=None):
"""
:param azimuthalIntegrator AzimuthalIntegrator: pyFAI.AzimuthalIntegrator instance
:param azimuthalIntgrator AzimuthalIntegrator: pyFAI.AzimuthalIntegrator instance (deprecated)
:param shapeIn: image size in input
:param shapeOut: Integrated size: can be (1,2000) for 1D integration
:param unit: can be "2th_deg, r_mm or q_nm^-1 ...
"""
# TODO remove it in few month (added on 2016-08-04)
if azimuthalIntgrator is not None:
logger.warning("'Worker(azimuthalIntgrator=...)' parameter is deprecated cause it contains a typo. Please use 'azimuthalIntegrator='")
azimuthalIntegrator = azimuthalIntgrator
self._sem = threading.Semaphore()
if azimuthalIntegrator is None:
self.ai = AzimuthalIntegrator()
else:
self.ai = azimuthalIntegrator
# self.config = {}
# self.config_file = "azimInt.json"
# self.nbpt_azim = 0
# if type(config) == dict:
# self.config = config
# elif type(config) in types.StringTypes:
# if os.path.isfile(config):
# self.config = json.load(open(config, "r"))
# self.config_file(config)
# else:
# self.config = json.loads(config)
# if self.config:
# self.configure()
self._normalization_factor = None # Value of the monitor: divides the intensity by this value for normalization
self.nbpt_azim, self.nbpt_rad = shapeOut
self._unit = units.to_unit(unit)
self.polarization_factor = None
self.dummy = dummy
self.delta_dummy = delta_dummy
self.correct_solid_angle = True
self.dark_current_image = None
self.flat_field_image = None
self.mask_image = None
self.subdir = ""
self.extension = None
self.do_poisson = None
self.needs_reset = True
self.output = "numpy" # exports as numpy array by default
self.shape = shapeIn
self.method = "csr"
self.radial = None
self.azimuthal = None
self.radial_range = None
self.azimuth_range = None
self.safe = True
def __repr__(self):
"""
pretty print of myself
"""
lstout = ["Azimuthal Integrator:", self.ai.__repr__(),
"Input image shape: %s" % list(self.shape),
"Number of points in radial direction: %s" % self.nbpt_rad,
"Number of points in azimuthal direction: %s" % self.nbpt_azim,
"Unit in radial dimension: %s" % self.unit,
"Correct for solid angle: %s" % self.correct_solid_angle,
"Polarization factor: %s" % self.polarization_factor,
"Dark current image: %s" % self.dark_current_image,
"Flat field image: %s" % self.flat_field_image,
"Mask image: %s" % self.mask_image,
"Dummy: %s,\tDelta_Dummy: %s" % (self.dummy, self.delta_dummy),
"Directory: %s, \tExtension: %s" % (self.subdir, self.extension),
"Radial range: %s" % self.radial_range,
"Azimuth range: %s" % self.azimuth_range]
return os.linesep.join(lstout)
def do_2D(self):
return self.nbpt_azim > 1
def reset(self):
"""
this is just to force the integrator to initialize
"""
if self.needs_reset:
with self._sem:
if self.needs_reset:
self.ai.reset()
self.needs_reset = False
# print self.__repr__()
def reconfig(self, shape=(2048, 2048), sync=False):
"""
This is just to force the integrator to initialize with a given input image shape
:param shape: shape of the input image
:param sync: return only when synchronized
"""
self.shape = shape
self.ai.reset()
self.warmup(sync)
def process(self, data, normalization_factor=1.0, writer=None, metadata=None):
"""
Process a frame
#TODO:
dark, flat, sa are missing
:param data: numpy array containing the input image
:param writer: An open writer in which 'write' will be called with the result of the integration
"""
with self._sem:
monitor = self._normalization_factor * normalization_factor if self._normalization_factor else normalization_factor
kwarg = {"unit": self.unit,
"dummy": self.dummy,
"delta_dummy": self.delta_dummy,
"method": self.method,
"polarization_factor": self.polarization_factor,
# "filename": None,
"safe": self.safe,
"data": data,
"correctSolidAngle": self.correct_solid_angle,
"safe": self.safe
}
if metadata is not None:
kwarg["metadata"] = metadata
if monitor is not None:
kwarg["normalization_factor"] = monitor
if self.do_2D():
kwarg["npt_rad"] = self.nbpt_rad
kwarg["npt_azim"] = self.nbpt_azim
# if "filename" in kwarg:
# if self.extension:
# kwarg["filename"] += self.extension
# else:
# kwarg["filename"] += ".azim"
else:
kwarg["npt"] = self.nbpt_rad
# if "filename" in kwarg:
# if self.extension:
# kwarg["filename"] += self.extension
# else:
# kwarg["filename"] += ".xy"
if self.do_poisson:
kwarg["error_model"] = "poisson"
else:
kwarg["error_model"] = None
if self.radial_range is not None:
kwarg["radial_range"] = self.radial_range
if self.azimuth_range is not None:
kwarg["azimuth_range"] = self.azimuth_range
try:
if self.do_2D():
integrated_result = self.ai.integrate2d(**kwarg)
self.radial = integrated_result.radial
self.azimuthal = integrated_result.azimuthal
result = integrated_result.intensity
else:
integrated_result = self.ai.integrate1d(**kwarg)
self.radial = integrated_result.radial
self.azimuthal = None
result = numpy.vstack(integrated_result).T
except Exception as err:
err2 = ["error in integration",
str(err),
"data.shape: %s" % (data.shape,),
"data.size: %s" % data.size,
"ai:",
str(self.ai),
"csr:",
# str(self.ai._csr_integrator),
# "csr size: %s" % self.ai._lut_integrator.size
]
logger.error("; ".join(err2))
raise err
if writer is not None:
writer.write(integrated_result)
if self.output == "numpy":
return result
def setSubdir(self, path):
"""
Set the relative or absolute path for processed data
"""
self.subdir = path
def setExtension(self, ext):
"""
enforce the extension of the processed data file written
"""
if ext:
self.extension = ext
else:
self.extension = None
def setDarkcurrentFile(self, imagefile):
self.ai.set_darkfiles(imagefile)
self.dark_current_image = imagefile
def setFlatfieldFile(self, imagefile):
self.ai.set_flatfiles(imagefile)
self.flat_field_image = imagefile
def setJsonConfig(self, jsonconfig):
print("start config ...")
if os.path.isfile(jsonconfig):
config = json.load(open(jsonconfig, "r"))
else:
config = json.loads(jsonconfig)
if "poni" in config:
poni = config["poni"]
if poni and os.path.isfile(poni):
self.ai = AzimuthalIntegrator.sload(poni)
detector = config.get("detector", "detector")
self.ai.detector = detector_factory(detector)
if "wavelength" in config:
wavelength = config["wavelength"]
try:
fwavelength = float(wavelength)
except ValueError:
logger.error("Unable to convert wavelength to float: %s", wavelength)
else:
if fwavelength <= 0 or fwavelength > 1e-6:
logger.warning("Wavelength is in meter ... unlikely value %s", fwavelength)
self.ai.wavelength = fwavelength
splineFile = config.get("splineFile")
if splineFile and os.path.isfile(splineFile):
self.ai.detector.splineFile = splineFile
self.ai.pixel1 = float(config.get("pixel1", 1))
self.ai.pixel2 = float(config.get("pixel2", 1))
self.ai.dist = config.get("dist", 1)
self.ai.poni1 = config.get("poni1", 0)
self.ai.poni2 = config.get("poni2", 0)
self.ai.rot1 = config.get("rot1", 0)
self.ai.rot2 = config.get("rot2", 0)
self.ai.rot3 = config.get("rot3", 0)
if config.get("chi_discontinuity_at_0"):
self.ai.setChiDiscAtZero()
else:
self.ai.setChiDiscAtPi()
mask_file = config.get("mask_file")
do_mask = config.get("do_mask")
if mask_file and os.path.exists(mask_file) and do_mask:
try:
mask = fabio.open(mask_file).data
except Exception as error:
logger.error("Unable to load mask file %s, error %s", mask_file, error)
else:
self.ai.mask = mask
self.mask_image = os.path.abspath(mask_file)
self.ai.set_darkfiles([i.strip() for i in config.get("dark_current", "").split(",")
if os.path.isfile(i.strip())])
self.ai.set_flatfiles([i.strip() for i in config.get("flat_field", "").split(",")
if os.path.isfile(i.strip())])
self.dark_current_image = self.ai.darkfiles
self.flat_field_image = self.ai.flatfiles
if config.get("do_2D"):
self.nbpt_azim = int(config.get("nbpt_azim"))
else:
self.nbpt_azim = 1
if config.get("nbpt_rad"):
self.nbpt_rad = int(config["nbpt_rad"])
self.unit = units.to_unit(config.get("unit", units.TTH_DEG))
self.do_poisson = config.get("do_poisson")
if config.get("do_polarization"):
self.polarization_factor = config.get("polarization_factor")
else:
self.polarization_factor = None
if config.get("do_OpenCL"):
self.method = "csr_ocl"
else:
self.method = "csr"
logger.info(self.ai.__repr__())
self.reset()
# For now we do not calculate the LUT as the size of the input image is unknown
def set_unit(self, value):
self._unit = units.to_unit(value)
def get_unit(self):
return self._unit
unit = property(get_unit, set_unit)
def set_error_model(self, value):
if value == "poisson":
self.do_poisson = True
elif value is None or value == "":
self.do_poisson = False
else:
raise RuntimeError("Unsupported error model '%s'" % value)
def get_error_model(self):
if self.do_poisson:
return "poisson"
return None
error_model = property(get_error_model, set_error_model)
def get_config(self):
"""return configuration as a dictionary"""
config = {"unit": str(self.unit)}
for key in ["dist", "poni1", "poni2", "rot1", "rot3", "rot2", "pixel1", "pixel2", "splineFile", "wavelength"]:
try:
config[key] = self.ai.__getattribute__(key)
except:
pass
for key in ["nbpt_azim", "nbpt_rad", "polarization_factor", "dummy", "delta_dummy",
"correct_solid_angle", "dark_current_image", "flat_field_image",
"mask_image", "do_poisson", "shape", "method"]:
try:
config[key] = self.__getattribute__(key)
except:
pass
return config
#
# "poni" #path of the file
#
# "chi_discontinuity_at_0"
# "do_mask"
# "do_dark"
# "do_azimuthal_range"
# "do_flat"
# "do_2D"
# "azimuth_range_min"
# "azimuth_range_max"
#
# "polarization_factor"
# "nbpt_rad"
# "do_solid_angle"
# "do_radial_range"
# "do_poisson"
# "delta_dummy"
# "nbpt_azim"
# "flat_field"
# "radial_range_min"
# "dark_current"
# "do_polarization"
# "mask_file"
# "detector"
# "unit"
# "radial_range_max"
# "val_dummy"
# "do_dummy"
# "method"
# }
def get_json_config(self):
"""return configuration as a JSON string"""
pass # TODO
def save_config(self, filename=None):
if not filename:
filename = self.config_file
def warmup(self, sync=False):
"""
Process a dummy image to ensure everything is initialized
:param sync: wait for processing to be finished
"""
t = threading.Thread(target=self.process,
name="init2d",
args=(numpy.zeros(self.shape, dtype=numpy.float32),))
t.start()
if sync:
t.join()
def get_normalization_factor(self):
with self._sem:
return self._normalization_factor
def set_normalization_factor(self, value):
with self._sem:
self._normalization_factor = value
normalization_factor = property(get_normalization_factor, set_normalization_factor)
class PixelwiseWorker(object):
"""
Simple worker doing dark, flat, solid angle and polarization correction
"""
def __init__(self, dark=None, flat=None, solidangle=None, polarization=None,
mask=None, dummy=None, delta_dummy=None, device=None):
"""Constructor of the worker
:param dark: array
:param flat: array
:param solidangle: solid-angle array
:param polarization: numpy array with 2D polarization corrections
:param device: Used to influance OpenCL behavour: can be "cpu", "GPU", "Acc" or even an OpenCL context
"""
self.ctx = None
if dark is not None:
self.dark = numpy.ascontiguousarray(dark, dtype=numpy.float32)
else:
self.dark = None
if flat is not None:
self.flat = numpy.ascontiguousarray(flat, dtype=numpy.float32)
else:
self.flat = None
if solidangle is not None:
self.solidangle = numpy.ascontiguousarray(solidangle, dtype=numpy.float32)
else:
self.solidangle = None
if polarization is not None:
self.polarization = numpy.ascontiguousarray(polarization, dtype=numpy.float32)
else:
self.polarization = None
if mask is None:
self.mask = False
elif mask.min() < 0 and mask.max() == 0: # 0 is valid, <0 is invalid
self.mask = (mask < 0).astype(numpy.int8)
else:
self.mask = mask.astype(numpy.int8)
self.dummy = dummy
self.delta_dummy = delta_dummy
if device is not None:
logger.warning("GPU is not yet implemented")
def process(self, data, normalization_factor=None):
"""
Process the data and apply a normalization factor
:param data: input data
:param normalization: normalization factor
:return: processed data
"""
if preproc is not None:
proc_data = preproc(data,
dark=self.dark,
flat=self.flat,
solidangle=self.solidangle,
polarization=self.polarization,
absorption=None,
mask=self.mask,
dummy=self.dummy,
delta_dummy=self.delta_dummy,
normalization_factor=normalization_factor,
empty=None)
else:
if self.dummy is not None:
if self.delta_dummy is None:
self.mask = numpy.logical_or((data == self.dummy), self.mask)
else:
self.mask = numpy.logical_or(abs(data - self.dummy) <= self.delta_dummy,
self.mask)
do_mask = True
else:
do_mask = (self.mask is not False)
# Explicitly make an copy !
proc_data = numpy.array(data, dtype=numpy.float32)
if self.dark is not None:
proc_data -= self.dark
if self.flat is not None:
proc_data /= self.flat
if self.solidangle is not None:
proc_data /= self.solidangle
if self.polarization is not None:
proc_data /= self.polarization
if normalization_factor is not None:
proc_data /= normalization_factor
if do_mask:
proc_data[self.mask] = self.dummy or 0
return proc_data
class DistortionWorker(object):
"""
Simple worker doing dark, flat, solid angle and polarization correction
"""
def __init__(self, detector=None, dark=None, flat=None, solidangle=None, polarization=None,
mask=None, dummy=None, delta_dummy=None, device=None):
"""Constructor of the worker
:param dark: array
:param flat: array
:param solidangle: solid-angle array
:param polarization: numpy array with 2D polarization corrections
:param device: Used to influance OpenCL behavour: can be "cpu", "GPU", "Acc" or even an OpenCL context
"""
self.ctx = None
if dark is not None:
self.dark = numpy.ascontiguousarray(dark, dtype=numpy.float32)
else:
self.dark = None
if flat is not None:
self.flat = numpy.ascontiguousarray(flat, dtype=numpy.float32)
else:
self.flat = None
if solidangle is not None:
self.solidangle = numpy.ascontiguousarray(solidangle, dtype=numpy.float32)
else:
self.solidangle = None
if polarization is not None:
self.polarization = numpy.ascontiguousarray(polarization, dtype=numpy.float32)
else:
self.polarization = None
if mask is None:
self.mask = False
elif mask.min() < 0 and mask.max() == 0: # 0 is valid, <0 is invalid
self.mask = (mask < 0)
else:
self.mask = mask.astype(bool)
self.dummy = dummy
self.delta_dummy = delta_dummy
if device is not None:
logger.warning("GPU is not yet implemented")
if detector is None:
self.distortion = None
else:
self.distortion = Distortion(detector, method="LUT", device=device,
mask=self.mask, empty=self.dummy or 0)
def process(self, data, normalization_factor=1.0):
"""
Process the data and apply a normalization factor
:param data: input data
:param normalization: normalization factor
:return: processed data
"""
if preproc is not None:
proc_data = preproc(data,
dark=self.dark,
flat=self.flat,
solidangle=self.solidangle,
polarization=self.polarization,
absorption=None,
mask=self.mask,
dummy=self.dummy,
delta_dummy=self.delta_dummy,
normalization_factor=normalization_factor,
empty=None)
else:
if self.dummy is not None:
if self.delta_dummy is None:
self.mask = numpy.logical_or((data == self.dummy), self.mask)
else:
self.mask = numpy.logical_or(abs(data - self.dummy) <= self.delta_dummy,
self.mask)
do_mask = True
else:
do_mask = (self.mask is not False)
# Explicitly make an copy !
proc_data = numpy.array(data, dtype=numpy.float32)
if self.dark is not None:
proc_data -= self.dark
if self.flat is not None:
proc_data /= self.flat
if self.solidangle is not None:
proc_data /= self.solidangle
if self.polarization is not None:
proc_data /= self.polarization
if normalization_factor is not None:
proc_data /= normalization_factor
if do_mask:
proc_data[self.mask] = self.dummy or 0
if self.distortion is not None:
return self.distortion.correct(proc_data, self.dummy, self.delta_dummy)
else:
return data
| {"hexsha": "a701066bfb118bb8841a3589a6acbc5b358fc2f7", "size": 26860, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyFAI/worker.py", "max_stars_repo_name": "vallsv/pyFAI", "max_stars_repo_head_hexsha": "64143652c2b219978ec370bf2fa215af01f937c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyFAI/worker.py", "max_issues_repo_name": "vallsv/pyFAI", "max_issues_repo_head_hexsha": "64143652c2b219978ec370bf2fa215af01f937c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-09-12T11:58:05.000Z", "max_issues_repo_issues_event_max_datetime": "2016-09-12T11:58:05.000Z", "max_forks_repo_path": "pyFAI/worker.py", "max_forks_repo_name": "vallsv/pyFAI", "max_forks_repo_head_hexsha": "64143652c2b219978ec370bf2fa215af01f937c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5291005291, "max_line_length": 146, "alphanum_fraction": 0.5753164557, "include": true, "reason": "import numpy", "num_tokens": 6125} |
#!/usr/bin/env python3
from pprint import pprint
import networkx as nx
from networkx.drawing.nx_pydot import read_dot, write_dot
import matplotlib.pyplot as plt
import numpy as np
def draw_graph(g, weights=False):
g = nx.DiGraph(g)
pos = nx.circular_layout(g)
edge_weights = nx.get_edge_attributes(g, 'weight')
node_weights = nx.get_node_attributes(g, 'weight')
nx.draw_networkx(g, pos, font_color='white', font_size=10, labels=node_weights if weights else None)
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_weights)
plt.show()
def add_weighted_node(g, n, weight):
g.add_node(n, weight=weight)
def load_graph(path):
g = read_dot(path)
for v in g.nodes:
g.nodes[v]['weight'] = int(g.nodes[v]['weight'])
for e in g.edges:
g.edges[e]['weight'] = int(g.edges[e]['weight'])
return g
def save_graph(g, path):
g = g.copy()
for v in g.nodes:
g.nodes[v]['label'] = f'{v};{g.nodes[v]["weight"]}'
for e in g.edges:
g.edges[e]['label'] = g.edges[e]['weight']
write_dot(g, path)
def __reconstruct_edges(g, u, v):
"""
Compute all paths between two nodes and fix the labelling for MultiDiGraphs.
:param g: A NetworkX (Multi)DiGraph representing a synchronous circuit.
:param u: The first of the two nodes.
:param v: The second of the two nodes.
:return: The list of paths with the labels fixed.
"""
edges = list(filter(lambda p: len(p) == 2, nx.all_simple_paths(g, u, v)))
if isinstance(g, nx.MultiDiGraph):
key = list(map(lambda e: e[2], g.edges))[0]
if isinstance(key, str):
edges = [edges[i] + [f'{i}'] for i in range(len(edges))]
elif isinstance(key, int):
edges = [edges[i] + [i] for i in range(len(edges))]
else:
raise NotImplementedError('Keys should be either strings or integers')
elif isinstance(g, nx.DiGraph):
pass
else:
raise NotImplementedError('This function only works on (Multi)DiGraph')
return edges
def w(g, e):
return g.edges[e]['weight']
def d(g, v):
return g.nodes[v]['weight']
def w_path(g, p):
wp = 0
for i in range(len(p) - 1):
u = p[i]
v = p[i+1]
edges = __reconstruct_edges(g, u, v)
wp += min(map(lambda e: g.edges[e]['weight'], edges))
return wp
def d_path(g, path):
return sum(map(lambda v: g.nodes[v]['weight'], path))
def check_if_synchronous_circuit(g):
# D1: the propagation delay d(v) is non-negative for each vertex v
for v in g.nodes:
if g.nodes[v]['weight'] < 0:
return False
# W1: the register count w(e) is a non-negative integer for each edge e
for e in g.edges:
if g.edges[e]['weight'] < 0:
return False
# W2: in any directed cycle of G, there is some edge with strictly positive register count
for nodes in nx.simple_cycles(g):
cost = 0
for i in range(len(nodes)):
u = nodes[i]
v = nodes[(i + 1) % len(nodes)]
edges = __reconstruct_edges(g, u, v)
min_cost = min(map(lambda e: g.edges[e]['weight'], edges))
cost += min_cost
if min_cost > 0:
break
if cost == 0:
return False
return True
def print_wd(m):
rows, cols = zip(*m.keys())
nodes = list(set(rows).union(set(cols)))
nodes.sort()
print(' | ', end='')
for u in nodes:
print(f'{str(u):>2s}', end=' ')
print('\n---+', end='')
for _ in nodes:
print('---', end='')
print()
for u in nodes:
print(f'{str(u):>2s} |', end=' ')
for v in nodes:
if (u, v) in m:
print(f'{m[(u, v)]:>2d}', end=' ')
else:
print('XX', end=' ')
print()
| {"hexsha": "5361c092f8cbfccdf2bd12045f83289929031273", "size": 3844, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils.py", "max_stars_repo_name": "fabiocody/retiming", "max_stars_repo_head_hexsha": "63d0823fa895f0614cd9f859e3529a0956446da3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-23T16:08:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T16:08:02.000Z", "max_issues_repo_path": "src/utils.py", "max_issues_repo_name": "fabiocody/retiming", "max_issues_repo_head_hexsha": "63d0823fa895f0614cd9f859e3529a0956446da3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils.py", "max_forks_repo_name": "fabiocody/retiming", "max_forks_repo_head_hexsha": "63d0823fa895f0614cd9f859e3529a0956446da3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9022556391, "max_line_length": 104, "alphanum_fraction": 0.5749219563, "include": true, "reason": "import numpy,import networkx,from networkx", "num_tokens": 1030} |
[STATEMENT]
lemma FinalAllow_approximating_in_doubt_deny: "matcher_agree_on_exact_matches \<gamma> \<beta> \<Longrightarrow>
good_ruleset rs \<Longrightarrow>
(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs, Undecided\<rangle> \<Rightarrow> Decision FinalAllow"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
[PROOF STEP]
apply(rotate_tac 2)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
[PROOF STEP]
apply(induction rs Undecided "Decision FinalAllow" rule: approximating_bigstep_induct)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>m a. \<lbrakk>Matching_Ternary.matches (\<beta>, in_doubt_deny) m a p; a = action.Accept; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset [Rule m a]\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>[Rule m a], Undecided\<rangle> \<Rightarrow> Decision FinalAllow
2. \<And>rs rs\<^sub>1 rs\<^sub>2 t. \<lbrakk>rs = rs\<^sub>1 @ rs\<^sub>2; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> t; \<lbrakk>t = Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, t\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; \<lbrakk>t = Undecided; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
[PROOF STEP]
apply(simp_all)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>m a. \<lbrakk>Matching_Ternary.matches (\<beta>, in_doubt_deny) m action.Accept p; a = action.Accept; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset [Rule m action.Accept]\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>[Rule m action.Accept], Undecided\<rangle> \<Rightarrow> Decision FinalAllow
2. \<And>rs rs\<^sub>1 rs\<^sub>2 t. \<lbrakk>rs = rs\<^sub>1 @ rs\<^sub>2; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> t; \<lbrakk>t = Decision FinalAllow; good_ruleset rs\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, t\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; \<lbrakk>t = Undecided; good_ruleset rs\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset (rs\<^sub>1 @ rs\<^sub>2)\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
[PROOF STEP]
apply (metis action.distinct(1) action.distinct(5) iptables_bigstep.accept not_exact_match_in_doubt_deny_approx_match)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>rs rs\<^sub>1 rs\<^sub>2 t. \<lbrakk>rs = rs\<^sub>1 @ rs\<^sub>2; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> t; \<lbrakk>t = Decision FinalAllow; good_ruleset rs\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, t\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; \<lbrakk>t = Undecided; good_ruleset rs\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset (rs\<^sub>1 @ rs\<^sub>2)\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
[PROOF STEP]
apply(simp add: good_ruleset_append, clarify)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>rs rs\<^sub>1 rs\<^sub>2 t. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> t; t = Decision FinalAllow \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, t\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; t = Undecided \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
[PROOF STEP]
apply(case_tac t)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>rs rs\<^sub>1 rs\<^sub>2 t. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> t; t = Decision FinalAllow \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, t\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; t = Undecided \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2; t = Undecided\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
2. \<And>rs rs\<^sub>1 rs\<^sub>2 t x2. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> t; t = Decision FinalAllow \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, t\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; t = Undecided \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2; t = Decision x2\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
[PROOF STEP]
apply(simp)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>rs\<^sub>1 rs\<^sub>2 t. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Undecided; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2; t = Undecided\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
2. \<And>rs rs\<^sub>1 rs\<^sub>2 t x2. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> t; t = Decision FinalAllow \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, t\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; t = Undecided \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2; t = Decision x2\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
[PROOF STEP]
apply(drule(2) approximating_bigstep_undecided_to_undecided_in_doubt_deny_approx[where \<Gamma>=\<Gamma>])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>rs\<^sub>1 rs\<^sub>2 t. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Undecided; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2; t = Undecided; \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Undecided \<or> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
2. \<And>rs rs\<^sub>1 rs\<^sub>2 t x2. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> t; t = Decision FinalAllow \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, t\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; t = Undecided \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2; t = Decision x2\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
[PROOF STEP]
apply(erule disjE)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>rs\<^sub>1 rs\<^sub>2 t. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Undecided; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2; t = Undecided; \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Undecided\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
2. \<And>rs\<^sub>1 rs\<^sub>2 t. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Undecided; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2; t = Undecided; \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
3. \<And>rs rs\<^sub>1 rs\<^sub>2 t x2. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> t; t = Decision FinalAllow \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, t\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; t = Undecided \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2; t = Decision x2\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
[PROOF STEP]
apply (metis iptables_bigstep.seq)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>rs\<^sub>1 rs\<^sub>2 t. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Undecided; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2; t = Undecided; \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
2. \<And>rs rs\<^sub>1 rs\<^sub>2 t x2. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> t; t = Decision FinalAllow \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, t\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; t = Undecided \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2; t = Decision x2\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
[PROOF STEP]
apply (metis iptables_bigstep.decision iptables_bigstep.seq)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>rs rs\<^sub>1 rs\<^sub>2 t x2. \<lbrakk>(\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow>\<^sub>\<alpha> t; t = Decision FinalAllow \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; (\<beta>, in_doubt_deny),p\<turnstile> \<langle>rs\<^sub>2, t\<rangle> \<Rightarrow>\<^sub>\<alpha> Decision FinalAllow; t = Undecided \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow; matcher_agree_on_exact_matches \<gamma> \<beta>; good_ruleset rs\<^sub>1; good_ruleset rs\<^sub>2; t = Decision x2\<rbrakk> \<Longrightarrow> \<Gamma>,\<gamma>,p\<turnstile> \<langle>rs\<^sub>1 @ rs\<^sub>2, Undecided\<rangle> \<Rightarrow> Decision FinalAllow
[PROOF STEP]
by (metis Decision_approximating_bigstep_fun approximating_semantics_imp_fun iptables_bigstep.decision iptables_bigstep.seq) | {"llama_tokens": 5872, "file": "Iptables_Semantics_Semantics_Embeddings", "length": 12} |
/-
Copyright (c) 2016 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Andrew Zipperer, Jeremy Avigad
We provide two versions of the quoptient construction. They use the same names and notation:
one lives in the namespace 'quotient_group' and the other lives in the namespace
'quotient_group_general'.
The first takes a group, A, and a normal subgroup, H. We have
quotient H := the quotient of A by H
qproj H a := the projection, with notation a' * G
qproj H ' s := the image of s, with notation s / G
extend H respf := given f : A → B respecting the equivalence relation, we get a function
f : quotient G → B
bar f := the above, G = ker f)
The definition is constructive, using quotient types. We prove all the characteristic properties.
As in the SSReflect library, we also provide a construction to quotient by an *arbitrary subgroup*.
Now we have
quotient H := the quotient of normalizer H by H
qproj H a := still denoted a '* H, the projection when a is in normalizer H,
arbitrary otherwise
qproj H G := still denoted G / H, the image of the above
extend H G respf := given a homomorphism on G with ker_in G f ⊆ H, extends to a
homomorphism G / H
bar G f := the above, with H = ker_in f G
This quotient H is defined by composing the first one with the construction which turns
normalizer H into a group.
-/
import .subgroup_to_group theories.move
open set function subtype classical quot
namespace group_theory
open coset_notation
variables {A B C : Type}
/- the quotient group -/
namespace quotient_group
variables [group A] (H : set A) [is_normal H]
definition lcoset_setoid [instance] : setoid A :=
setoid.mk (lcoset_equiv H) (equivalence_lcoset_equiv H)
definition quotient := quot (lcoset_setoid H)
private definition qone : quotient H := ⟦ 1 ⟧
private definition qmul : quotient H → quotient H → quotient H :=
quot.lift₂
(λ a b, ⟦a * b⟧)
(λ a₁ a₂ b₁ b₂ e₁ e₂, quot.sound (lcoset_equiv_mul H e₁ e₂))
private definition qinv : quotient H → quotient H :=
quot.lift
(λ a, ⟦a⁻¹⟧)
(λ a₁ a₂ e, quot.sound (lcoset_equiv_inv H e))
private proposition qmul_assoc (a b c : quotient H) :
qmul H (qmul H a b) c = qmul H a (qmul H b c) :=
quot.induction_on₂ a b (λ a b, quot.induction_on c (λ c,
have H : ⟦a * b * c⟧ = ⟦a * (b * c)⟧, by rewrite mul.assoc,
H))
private proposition qmul_qone (a : quotient H) : qmul H a (qone H) = a :=
quot.induction_on a (λ a', show ⟦a' * 1⟧ = ⟦a'⟧, by rewrite mul_one)
private proposition qone_qmul (a : quotient H) : qmul H (qone H) a = a :=
quot.induction_on a (λ a', show ⟦1 * a'⟧ = ⟦a'⟧, by rewrite one_mul)
private proposition qmul_left_inv (a : quotient H) : qmul H (qinv H a) a = qone H :=
quot.induction_on a (λ a', show ⟦a'⁻¹ * a'⟧ = ⟦1⟧, by rewrite mul.left_inv)
protected definition group [instance] : group (quotient H) :=
⦃ group,
mul := qmul H,
inv := qinv H,
one := qone H,
mul_assoc := qmul_assoc H,
mul_one := qmul_qone H,
one_mul := qone_qmul H,
mul_left_inv := qmul_left_inv H
⦄
-- these theorems characterize the quotient group
definition qproj (a : A) : quotient H := ⟦a⟧
infix ` '* `:65 := λ {A' : Type} [group A'] a H' [is_normal H'], qproj H' a
infix ` / ` := λ {A' : Type} [group A'] G H' [is_normal H'], qproj H' ' G
proposition is_hom_qproj [instance] : is_hom (qproj H) :=
is_mul_hom.mk (λ a b, rfl)
variable {H}
proposition qproj_eq_qproj {a b : A} (h : a * H = b * H) : a '* H = b '* H :=
quot.sound h
proposition lcoset_eq_lcoset_of_qproj_eq_qproj {a b : A} (h : a '* H = b '* H) : a * H = b * H :=
quot.exact h
variable (H)
proposition qproj_eq_qproj_iff (a b : A) : a '* H = b '* H ↔ a * H = b * H :=
iff.intro lcoset_eq_lcoset_of_qproj_eq_qproj qproj_eq_qproj
proposition ker_qproj [is_subgroup H] : ker (qproj H) = H :=
ext (take a,
begin
rewrite [↑ker, mem_set_of_iff, -hom_one (qproj H), qproj_eq_qproj_iff,
one_lcoset],
show a * H = H ↔ a ∈ H, from iff.intro mem_of_lcoset_eq_self lcoset_eq_self_of_mem
end)
proposition qproj_eq_one_iff [is_subgroup H] (a : A) : a '* H = 1 ↔ a ∈ H :=
have H : qproj H a = 1 ↔ a ∈ ker (qproj H), from iff.rfl,
by rewrite [H, ker_qproj]
variable {H}
proposition qproj_eq_one_of_mem [is_subgroup H] {a : A} (aH : a ∈ H) : a '* H = 1 :=
iff.mpr (qproj_eq_one_iff H a) aH
proposition mem_of_qproj_eq_one [is_subgroup H] {a : A} (h : a '* H = 1) : a ∈ H :=
iff.mp (qproj_eq_one_iff H a) h
variable (H)
proposition surjective_qproj : surjective (qproj H) :=
take y, quot.induction_on y (λ a, exists.intro a rfl)
variable {H}
proposition quotient_induction {P : quotient H → Prop} (h : ∀ a, P (a '* H)) : ∀ a, P a :=
quot.ind h
proposition quotient_induction₂ {P : quotient H → quotient H → Prop}
(h : ∀ a₁ a₂, P (a₁ '* H) (a₂ '* H)) :
∀ a₁ a₂, P a₁ a₂ :=
quot.ind₂ h
variable (H)
proposition image_qproj_self [is_subgroup H] : H / H = '{1} :=
eq_of_subset_of_subset
(image_subset_of_maps_to
(take x, suppose x ∈ H,
show x '* H ∈ '{1},
from mem_singleton_of_eq (qproj_eq_one_of_mem `x ∈ H`)))
(take x, suppose x ∈ '{1},
have x = 1, from eq_of_mem_singleton this,
show x ∈ H / H, by rewrite this; apply mem_image_of_mem _ one_mem)
-- extending a function A → B to a function A / H → B
section respf
variable {H}
variables {f : A → B} (respf : ∀ a₁ a₂, a₁ * H = a₂ * H → f a₁ = f a₂)
definition extend : quotient H → B := quot.lift f respf
proposition extend_qproj (a : A) : extend respf (a '* H) = f a := rfl
proposition extend_comp_qproj : extend respf ∘ (qproj H) = f := rfl
proposition image_extend (G : set A) : (extend respf) ' (G / H) = f ' G :=
by rewrite [-image_comp]
variable [group B]
proposition is_hom_extend [instance] [is_hom f] : is_hom (extend respf) :=
is_mul_hom.mk (take a b,
show (extend respf (a * b)) = (extend respf a) * (extend respf b), from
quot.induction_on₂ a b (take a b, hom_mul f a b))
proposition ker_extend : ker (extend respf) = ker f / H :=
eq_of_subset_of_subset
(quotient_induction
(take a, assume Ha : qproj H a ∈ ker (extend respf),
have f a = 1, from Ha,
show a '* H ∈ ker f / H,
from mem_image_of_mem _ this))
(image_subset_of_maps_to
(take a, assume h : a ∈ ker f,
show extend respf (a '* H) = 1, from h))
end respf
end quotient_group
/- the first homomorphism theorem for the quotient group -/
namespace quotient_group
variables [group A] [group B] (f : A → B) [is_hom f]
lemma eq_of_lcoset_equiv_ker ⦃a b : A⦄ (h : lcoset_equiv (ker f) a b) : f a = f b :=
have b⁻¹ * a ∈ ker f, from inv_mul_mem_of_lcoset_eq_lcoset h,
eq.symm (eq_of_inv_mul_mem_ker this)
definition bar : quotient (ker f) → B := extend (eq_of_lcoset_equiv_ker f)
proposition bar_qproj (a : A) : bar f (a '* ker f) = f a := rfl
proposition is_hom_bar [instance] : is_hom (bar f) := is_hom_extend _
proposition image_bar (G : set A) : bar f ' (G / ker f) = f ' G :=
by rewrite [↑bar, image_extend]
proposition image_bar_univ : bar f ' univ = f ' univ :=
by rewrite [↑bar, -image_eq_univ_of_surjective (surjective_qproj (ker f)),
image_extend]
proposition surj_on_bar : surj_on (bar f) univ (f ' univ) :=
by rewrite [↑surj_on, image_bar_univ]; apply subset.refl
proposition ker_bar_eq : ker (bar f) = '{1} :=
by rewrite [↑bar, ker_extend, image_qproj_self]
proposition injective_bar : injective (bar f) :=
injective_of_ker_eq_singleton_one (ker_bar_eq f)
end quotient_group
/- a generic morphism extension property -/
section
variables [group A] [group B] [group C]
variables (G : set A) [is_subgroup G]
variables (g : A → C) (f : A → B)
noncomputable definition gen_extend : C → B := λ c, f (inv_fun g G 1 c)
variables {G g f}
proposition eq_of_ker_in_subset {a₁ a₂ : A} (a₁G : a₁ ∈ G) (a₂G : a₂ ∈ G)
[is_hom_on g G] [is_hom_on f G] (Hker : ker_in g G ⊆ ker f) (H' : g a₁ = g a₂) :
f a₁ = f a₂ :=
have memG : a₁⁻¹ * a₂ ∈ G, from mul_mem (inv_mem a₁G) a₂G,
have a₁⁻¹ * a₂ ∈ ker_in g G, from inv_mul_mem_ker_in_of_eq a₁G a₂G H',
have a₁⁻¹ * a₂ ∈ ker_in f G, from and.intro (Hker this) memG,
show f a₁ = f a₂, from eq_of_inv_mul_mem_ker_in a₁G a₂G this
proposition gen_extend_spec [is_hom_on g G] [is_hom_on f G] (Hker : ker_in g G ⊆ ker f)
{a : A} (aG : a ∈ G) : gen_extend G g f (g a) = f a :=
eq_of_ker_in_subset (inv_fun_spec' aG) aG Hker (inv_fun_spec aG)
proposition is_hom_on_gen_extend [is_hom_on g G] [is_hom_on f G] (Hker : ker_in g G ⊆ ker f) :
is_hom_on (gen_extend G g f) (g ' G) :=
have is_subgroup (g ' G), from is_subgroup_image g G,
take c₁, assume c₁gG : c₁ ∈ g ' G,
take c₂, assume c₂gG : c₂ ∈ g ' G,
let ginv := inv_fun g G 1 in
have Hginv : maps_to ginv (g ' G) G, from maps_to_inv_fun one_mem,
have ginvc₁ : ginv c₁ ∈ G, from Hginv c₁gG,
have ginvc₂ : ginv c₂ ∈ G, from Hginv c₂gG,
have ginvc₁c₂ : ginv (c₁ * c₂) ∈ G, from Hginv (mul_mem c₁gG c₂gG),
have HH : ∀₀ c ∈ g ' G, g (ginv c) = c,
from λ a aG, right_inv_on_inv_fun_of_surj_on _ (surj_on_image g G) aG,
have eq₁ : g (ginv c₁) = c₁, from HH c₁gG,
have eq₂ : g (ginv c₂) = c₂, from HH c₂gG,
have eq₃ : g (ginv (c₁ * c₂)) = c₁ * c₂, from HH (mul_mem c₁gG c₂gG),
have g (ginv (c₁ * c₂)) = g ((ginv c₁) * (ginv c₂)),
by rewrite [eq₃, hom_on_mul g ginvc₁ ginvc₂, eq₁, eq₂],
have f (ginv (c₁ * c₂)) = f (ginv c₁ * ginv c₂),
from eq_of_ker_in_subset (ginvc₁c₂) (mul_mem ginvc₁ ginvc₂) Hker this,
show f (ginv (c₁ * c₂)) = f (ginv c₁) * f (ginv c₂),
by rewrite [this, hom_on_mul f ginvc₁ ginvc₂]
end
/- quotient by an arbitrary group, not necessarily normal -/
namespace quotient_group_general
variables [group A] (H : set A) [is_subgroup H]
lemma is_normal_to_group_of_normalizer [instance] :
is_normal (to_group_of (normalizer H) ' H) :=
have H1 : is_normal_in (to_group_of (normalizer H) ' H)
(to_group_of (normalizer H) ' (normalizer H)),
from is_normal_in_image_image (subset_normalizer_self H) (to_group_of (normalizer H)),
have H2 : to_group_of (normalizer H) ' (normalizer H) = univ,
from image_to_group_of_eq_univ (normalizer H),
is_normal_of_is_normal_in_univ (by rewrite -H2; exact H1)
section quotient_group
open quotient_group
noncomputable definition quotient : Type := quotient (to_group_of (normalizer H) ' H)
noncomputable definition group_quotient [instance] : group (quotient H) :=
quotient_group.group (to_group_of (normalizer H) ' H)
noncomputable definition qproj : A → quotient H :=
qproj (to_group_of (normalizer H) ' H) ∘ (to_group_of (normalizer H))
infix ` '* `:65 := λ {A' : Type} [group A'] a H' [is_subgroup H'], qproj H' a
infix ` / ` := λ {A' : Type} [group A'] G H' [is_subgroup H'], qproj H' ' G
proposition is_hom_on_qproj [instance] : is_hom_on (qproj H) (normalizer H) :=
have H₀ : is_hom_on (to_group_of (normalizer H)) (normalizer H),
from is_hom_on_to_group_of (normalizer H),
have H₁ : is_hom_on (quotient_group.qproj (to_group_of (normalizer H) ' H)) univ,
from iff.mpr (is_hom_on_univ_iff (quotient_group.qproj (to_group_of (normalizer H) ' H)))
(is_hom_qproj (to_group_of (normalizer H) ' H)),
is_hom_on_comp H₀ H₁ (maps_to_univ (to_group_of (normalizer H)) (normalizer H))
proposition is_hom_on_qproj' [instance] (G : set A) [is_normal_in H G] :
is_hom_on (qproj H) G :=
is_hom_on_of_subset (qproj H) (subset_normalizer G H)
proposition ker_in_qproj : ker_in (qproj H) (normalizer H) = H :=
let tg := to_group_of (normalizer H) in
begin
rewrite [↑ker_in, ker_eq_preimage_one, ↑qproj, preimage_comp, -ker_eq_preimage_one],
have is_hom_on tg H, from is_hom_on_of_subset _ (subset_normalizer_self H),
have is_subgroup (tg ' H), from is_subgroup_image tg H,
krewrite [ker_qproj, to_group_of_preimage_to_group_of_image (subset_normalizer_self H)]
end
end quotient_group
variable {H}
proposition qproj_eq_qproj_iff {a b : A} (Ha : a ∈ normalizer H) (Hb : b ∈ normalizer H) :
a '* H = b '* H ↔ a * H = b * H :=
by rewrite [lcoset_eq_lcoset_iff, eq_iff_inv_mul_mem_ker_in Ha Hb, ker_in_qproj,
-inv_mem_iff, mul_inv, inv_inv]
proposition qproj_eq_qproj {a b : A} (Ha : a ∈ normalizer H) (Hb : b ∈ normalizer H)
(h : a * H = b * H) :
a '* H = b '* H :=
iff.mpr (qproj_eq_qproj_iff Ha Hb) h
proposition lcoset_eq_lcoset_of_qproj_eq_qproj {a b : A}
(Ha : a ∈ normalizer H) (Hb : b ∈ normalizer H) (h : a '* H = b '* H) :
a * H = b * H :=
iff.mp (qproj_eq_qproj_iff Ha Hb) h
variable (H)
proposition qproj_mem {a : A} {G : set A} (aG : a ∈ G) : a '* H ∈ G / H :=
mem_image_of_mem _ aG
proposition qproj_one : 1 '* H = 1 := hom_on_one (qproj H) (normalizer H)
variable {H}
proposition mem_of_qproj_mem {a : A} (anH : a ∈ normalizer H)
{G : set A} (HsubG : H ⊆ G) [is_subgroup G] [is_normal_in H G]
(aHGH : a '* H ∈ G / H): a ∈ G :=
have GH : G ⊆ normalizer H, from subset_normalizer G H,
obtain b [bG (bHeq : b '* H = a '* H)], from aHGH,
have b * H = a * H, from lcoset_eq_lcoset_of_qproj_eq_qproj (GH bG) anH bHeq,
have a ∈ b * H, by rewrite this; apply mem_lcoset_self,
have a ∈ b * G, from lcoset_subset_lcoset b HsubG this,
show a ∈ G, by rewrite [lcoset_eq_self_of_mem bG at this]; apply this
proposition qproj_eq_one_iff {a : A} (Ha : a ∈ normalizer H) : a '* H = 1 ↔ a ∈ H :=
by rewrite [-hom_on_one (qproj H) (normalizer H), qproj_eq_qproj_iff Ha one_mem, one_lcoset,
lcoset_eq_self_iff]
proposition qproj_eq_one_of_mem {a : A} (aH : a ∈ H) : a '* H = 1 :=
iff.mpr (qproj_eq_one_iff (subset_normalizer_self H aH)) aH
proposition mem_of_qproj_eq_one {a : A} (Ha : a ∈ normalizer H) (h : a '* H = 1) : a ∈ H :=
iff.mp (qproj_eq_one_iff Ha) h
variable (H)
section
open quotient_group
proposition surj_on_qproj_normalizer : surj_on (qproj H) (normalizer H) univ :=
have H₀ : surj_on (to_group_of (normalizer H)) (normalizer H) univ,
from surj_on_to_group_of_univ (normalizer H),
have H₁ : surj_on (quotient_group.qproj (to_group_of (normalizer H) ' H)) univ univ,
from surj_on_univ_of_surjective univ (surjective_qproj _),
surj_on_comp H₁ H₀
end
variable {H}
proposition quotient_induction {P : quotient H → Prop} (hyp : ∀₀ a ∈ normalizer H, P (a '* H)) :
∀ a, P a :=
surj_on_univ_induction (surj_on_qproj_normalizer H) hyp
proposition quotient_induction₂ {P : quotient H → quotient H → Prop}
(hyp : ∀₀ a₁ ∈ normalizer H, ∀₀ a₂ ∈ normalizer H, P (a₁ '* H) (a₂ '* H)) :
∀ a₁ a₂, P a₁ a₂ :=
surj_on_univ_induction₂ (surj_on_qproj_normalizer H) hyp
variable (H)
proposition image_qproj_self : H / H = '{1} :=
eq_of_subset_of_subset
(image_subset_of_maps_to
(take x, suppose x ∈ H,
show x '* H ∈ '{1},
from mem_singleton_of_eq (qproj_eq_one_of_mem `x ∈ H`)))
(take x, suppose x ∈ '{1},
have x = 1, from eq_of_mem_singleton this,
show x ∈ H / H,
by rewrite [this, -qproj_one H]; apply mem_image_of_mem _ one_mem)
section respf
variable (H)
variables [group B] (G : set A) [is_subgroup G] (f : A → B)
noncomputable definition extend : quotient H → B := gen_extend G (qproj H) f
variables [is_hom_on f G] [is_normal_in H G]
private proposition aux : is_hom_on (qproj H) G :=
is_hom_on_of_subset (qproj H) (subset_normalizer G H)
local attribute [instance] aux
variables {H f}
private proposition aux' (respf : H ⊆ ker f) : ker_in (qproj H) G ⊆ ker f :=
subset.trans
(show ker_in (qproj H) G ⊆ ker_in (qproj H) (normalizer H),
from inter_subset_inter_left _ (subset_normalizer G H))
(by rewrite [ker_in_qproj]; apply respf)
variable {G}
proposition extend_qproj (respf : H ⊆ ker f) {a : A} (aG : a ∈ G) :
extend H G f (a '* H) = f a :=
gen_extend_spec (aux' G respf) aG
proposition image_extend (respf : H ⊆ ker f) {s : set A} (ssubG : s ⊆ G) :
extend H G f ' (s / H) = f ' s :=
begin
rewrite [-image_comp],
apply image_eq_image_of_eq_on,
intro a amems,
apply extend_qproj respf (ssubG amems)
end
variable (G)
proposition is_hom_on_extend [instance] (respf : H ⊆ ker f) : is_hom_on (extend H G f) (G / H) :=
by unfold extend; apply is_hom_on_gen_extend (aux' G respf)
variable {G}
proposition ker_in_extend [is_subgroup G] (respf : H ⊆ ker f) (HsubG : H ⊆ G) :
ker_in (extend H G f) (G / H) = (ker_in f G) / H :=
begin
apply ext,
intro aH,
cases surj_on_qproj_normalizer H (show aH ∈ univ, from trivial) with a atemp,
cases atemp with anH aHeq,
rewrite -aHeq,
apply iff.intro,
{ intro akerin,
cases akerin with aker ain,
have a '* H ∈ G / H, from ain,
have a ∈ G, from mem_of_qproj_mem anH HsubG this,
have a '* H ∈ ker (extend H G f), from aker,
have extend H G f (a '* H) = 1, from this,
have f a = extend H G f (a '* H), from eq.symm (extend_qproj respf `a ∈ G`),
have f a = 1, by rewrite this; assumption,
have a ∈ ker_in f G, from and.intro this `a ∈ G`,
show a '* H ∈ (ker_in f G) / H, from qproj_mem H this},
intro aHker,
have aker : a ∈ ker_in f G,
begin
have Hsub : H ⊆ ker_in f G, from subset_inter respf HsubG,
have is_normal_in H (ker_in f G),
from subset.trans (inter_subset_right (ker f) G) (subset_normalizer G H),
apply (mem_of_qproj_mem anH Hsub aHker)
end,
have a ∈ G, from and.right aker,
have f a = 1, from and.left aker,
have extend H G f (a '* H) = 1,
from eq.trans (extend_qproj respf `a ∈ G`) this,
show a '* H ∈ ker_in (extend H G f) (G / H),
from and.intro this (qproj_mem H `a ∈ G`)
end
/- (comment from Jeremy)
This version kills the elaborator. I don't know why.
Tracing class instances doesn't show a problem. My best guess is that it is
the backgracking from the "obtain".
proposition ker_in_extend [is_subgroup G] (respf : H ⊆ ker f) (HsubG : H ⊆ G) :
ker_in (extend H G f) (qproj H ' G) = qproj H ' (ker_in f G) :=
ext (take aH,
obtain a [(anH : a ∈ normalizer H) (aHeq : a '* H = aH)],
from surj_on_qproj_normalizer H (show aH ∈ univ, from trivial),
begin
rewrite -aHeq, apply iff.intro, unfold ker_in,
exact
(assume aker : a '* H ∈ ker (extend H G f) ∩ (qproj H ' G),
have a '* H ∈ qproj H ' G, from and.right aker,
have a ∈ G, from mem_of_qproj_mem anH HsubG this,
-- Uncommenting the next line of code slows things down dramatically.
-- Uncommenting the one after kills the system.
-- have a '* H ∈ ker (extend H G f), from and.left aker,
-- have extend H G f (a '* H) = 1, from this,
-- have f a = extend H G f (a '* H), from eq.symm (extend_qproj respf `a ∈ G`),
-- have f a = 1, by rewrite [-this, extend_qproj respf aG],
-- have a ∈ ker_in f G, from and.intro this `a ∈ G`,
show a '* H ∈ qproj H ' (ker_in f G), from sorry),
exact
(assume hyp : a '* H ∈ qproj H ' (ker_in f G),
show a '* H ∈ ker_in (extend H G f) (qproj H ' G), from sorry)
end)
-/
end respf
attribute quotient [irreducible]
end quotient_group_general
/- the first homomorphism theorem for general quotient groups -/
namespace quotient_group_general
variables [group A] [group B] (G : set A) [is_subgroup G]
variables (f : A → B) [is_hom_on f G]
noncomputable definition bar : quotient (ker_in f G) → B :=
extend (ker_in f G) G f
proposition bar_qproj {a : A} (aG : a ∈ G) : bar G f (a '* ker_in f G) = f a :=
extend_qproj (inter_subset_left _ _) aG
proposition is_hom_on_bar [instance] : is_hom_on (bar G f) (G / ker_in f G) :=
have is_subgroup (ker f ∩ G), from is_subgroup_ker_in f G,
have is_normal_in (ker f ∩ G) G, from is_normal_in_ker_in f G,
is_hom_on_extend G (inter_subset_left _ _)
proposition image_bar {s : set A} (ssubG : s ⊆ G) : bar G f ' (s / ker_in f G) = f ' s :=
have is_subgroup (ker f ∩ G), from is_subgroup_ker_in f G,
have is_normal_in (ker f ∩ G) G, from is_normal_in_ker_in f G,
image_extend (inter_subset_left _ _) ssubG
proposition surj_on_bar : surj_on (bar G f) (G / ker_in f G) (f ' G) :=
by rewrite [↑surj_on, image_bar G f (@subset.refl _ G)]; apply subset.refl
proposition ker_in_bar : ker_in (bar G f) (G / ker_in f G) = '{1} :=
have H₀ : ker_in f G ⊆ ker f, from inter_subset_left _ _,
have H₁ : ker_in f G ⊆ G, from inter_subset_right _ _,
by rewrite [↑bar, ker_in_extend H₀ H₁, image_qproj_self]
proposition inj_on_bar : inj_on (bar G f) (G / ker_in f G) :=
inj_on_of_ker_in_eq_singleton_one (ker_in_bar G f)
end quotient_group_general
end group_theory
| {"author": "Bolt64", "repo": "lean2-aur", "sha": "1d7148e58a17b2d326b032ed1ebf8c5217320242", "save_path": "github-repos/lean/Bolt64-lean2-aur", "path": "github-repos/lean/Bolt64-lean2-aur/lean2-aur-1d7148e58a17b2d326b032ed1ebf8c5217320242/library/theories/group_theory/quotient.lean"} |
[STATEMENT]
lemma mult_le_mono2_hmset: "i \<le> j \<Longrightarrow> k * i \<le> k * j" for i j k :: hmultiset
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. i \<le> j \<Longrightarrow> k * i \<le> k * j
[PROOF STEP]
by (simp add: mult_left_mono) | {"llama_tokens": 107, "file": "Nested_Multisets_Ordinals_Syntactic_Ordinal", "length": 1} |
import numpy as np
import os
from statsmodels.tsa.arima_model import ARIMA
def housing_data_predict(destination_directory, paavo_housing_quarterly_df):
"""
Open Paavo quarterly housing price dataframe and predict the
quarterly prices between 2018 - 2020 with ARIMA(0, 1, 1) model
and save the predicted values
:param destination_directory: the path of the directory to save the prediction dataframe
:param paavo_housing_quarterly_df: the quarterly housing price dataframe
:return: None
"""
years = range(2005, 2021)
housing_obs = paavo_housing_quarterly_df
# Format the postal code values
housing_obs = housing_obs.astype({'Postal code': str})
for i in list(housing_obs.index):
housing_obs.at[i, 'Postal code'] = '0' * (5 - len(housing_obs.at[i, 'Postal code'])) + housing_obs.at[i, 'Postal code']
# Create initial data frame for storing the prediction
housing_pred = housing_obs[['Postal code']]
housing_obs.set_index('Postal code', inplace=True)
housing_pred.set_index('Postal code', inplace=True)
housing_obs = housing_obs.transpose()
# Add columns to the prediction data frame
for year in years:
for quarter in range(1, 5):
ncolumns = len(housing_pred.columns)
housing_pred.insert(ncolumns, str(year) + 'Q' + str(quarter), np.nan)
if year > 2017:
housing_pred.insert(ncolumns + 1, 'Lower_10 ' + str(year) + 'Q' + str(quarter), np.nan)
housing_pred.insert(ncolumns + 2, 'Upper_10 ' + str(year) + 'Q' + str(quarter), np.nan)
housing_pred.insert(ncolumns + 3, 'Lower_25 ' + str(year) + 'Q' + str(quarter), np.nan)
housing_pred.insert(ncolumns + 4, 'Upper_25 ' + str(year) + 'Q' + str(quarter), np.nan)
# Use ARIMA(0, 1, 1) for prediction, fill the data frame with
# predictions and prediction intervals of 90% and 75%
for code in housing_obs.columns:
X = housing_obs[code].values
X = X.astype('float32')
model = ARIMA(X, order=(0, 1, 1))
model_fit = model.fit(disp=-1)
# Calculate in-sample predictions
in_sample = model_fit.predict(end=len(X))
for i in range(len(in_sample)):
if i < len(X):
in_sample[i] = in_sample[i] + X[i]
else:
in_sample[i] = in_sample[i] + in_sample[i - 1]
# Prediction intervals for forecast values
forecast_result_10 = model_fit.forecast(12, alpha=0.1)
forecast_result_25 = model_fit.forecast(12, alpha=0.25)
# Extract forecast values and merge with in-sample predictions
forecast = np.array(forecast_result_10[0])
predictions = np.concatenate([in_sample, forecast])
# Filling the data frame
for year in years:
for quarter in range(1, 5):
pos = (year - 2005) * 4 + quarter - 1
housing_pred.at[code, str(year) + 'Q' + str(quarter)] = int(predictions[pos])
if year > 2017:
forecast_pos = (year - 2018) * 4 + quarter - 1
housing_pred.at[code, 'Lower_10 ' + str(year) + 'Q' + str(quarter)] = int(forecast_result_10[2][forecast_pos][0])
housing_pred.at[code, 'Upper_10 ' + str(year) + 'Q' + str(quarter)] = int(forecast_result_10[2][forecast_pos][1])
housing_pred.at[code, 'Lower_25 ' + str(year) + 'Q' + str(quarter)] = int(forecast_result_25[2][forecast_pos][0])
housing_pred.at[code, 'Upper_25 ' + str(year) + 'Q' + str(quarter)] = int(forecast_result_25[2][forecast_pos][1])
# Export to tsv file
housing_pred.to_csv(os.path.join(destination_directory, 'paavo_housing_quarterly_prediction.tsv'), sep='\t')
| {"hexsha": "f17d674f65890870b7942783a9c9974fe0303094", "size": 3798, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/modeling/housing_prediction.py", "max_stars_repo_name": "xiaoxiaobt/Reaktor-Data-Science-project", "max_stars_repo_head_hexsha": "c779eaa9e586ebe62929361bd4d1bc1c537e4e11", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-12-08T23:10:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-29T21:35:01.000Z", "max_issues_repo_path": "scripts/modeling/housing_prediction.py", "max_issues_repo_name": "xiaoxiaobt/Reaktor-Data-Science-project", "max_issues_repo_head_hexsha": "c779eaa9e586ebe62929361bd4d1bc1c537e4e11", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/modeling/housing_prediction.py", "max_forks_repo_name": "xiaoxiaobt/Reaktor-Data-Science-project", "max_forks_repo_head_hexsha": "c779eaa9e586ebe62929361bd4d1bc1c537e4e11", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.8888888889, "max_line_length": 133, "alphanum_fraction": 0.6279620853, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 1002} |
clean_price <- function(data_price, file_price) {
data_price <- data_price %>%
mutate(date = str_replace_all(date, "\u00C3\u00a4", "\u00e4")) %>%
mutate(date = str_replace(date, "Jan", "J\u00e4n")) %>%
mutate(date = as.Date(date, format = "%d.%b.%Y")) %>%
mutate(price = as.numeric(price))
if (file.exists(file_price)) {
data_price <- bind_rows(data_price, read_tsv(file_price)) %>%
unique() %>%
group_by(date) %>%
filter(row_number() == 1) %>%
ungroup() %>%
arrange(desc(date))
}
write_tsv(data_price, file_price)
}
| {"hexsha": "0a6e8874a9732af511a5c03d20dab6846bfe0196", "size": 581, "ext": "r", "lang": "R", "max_stars_repo_path": "clean_price.r", "max_stars_repo_name": "ha-pu/webscrap_ishares", "max_stars_repo_head_hexsha": "14c6e0cd7fd4358a503752225f3296f914165411", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-10T16:22:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-10T16:22:50.000Z", "max_issues_repo_path": "clean_price.r", "max_issues_repo_name": "ha-pu/webscrap_ishares", "max_issues_repo_head_hexsha": "14c6e0cd7fd4358a503752225f3296f914165411", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-06-27T09:48:00.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-28T13:58:56.000Z", "max_forks_repo_path": "clean_price.r", "max_forks_repo_name": "ha-pu/webscrap_ishares", "max_forks_repo_head_hexsha": "14c6e0cd7fd4358a503752225f3296f914165411", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-10T16:22:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-09T15:49:40.000Z", "avg_line_length": 30.5789473684, "max_line_length": 70, "alphanum_fraction": 0.5972461274, "num_tokens": 170} |
// =-=-=-=-=-=-=-
// legacy irods includes
#include "msParam.hpp"
#include "reGlobalsExtern.hpp"
#include "miscServerFunct.hpp"
// =-=-=-=-=-=-=-
//
#include "irods_resource_plugin.hpp"
#include "irods_file_object.hpp"
#include "irods_physical_object.hpp"
#include "irods_collection_object.hpp"
#include "irods_string_tokenize.hpp"
#include "irods_hierarchy_parser.hpp"
#include "irods_resource_redirect.hpp"
#include "irods_stacktrace.hpp"
#include "irods_server_api_call.hpp"
#include "rs_set_round_robin_context.hpp"
// =-=-=-=-=-=-=-
// stl includes
#include <iostream>
#include <sstream>
#include <vector>
#include <string>
// =-=-=-=-=-=-=-
// boost includes
#include <boost/lexical_cast.hpp>
#include <boost/function.hpp>
#include <boost/any.hpp>
/// =-=-=-=-=-=-=-
/// @brief Check the general parameters passed in to most plugin functions
template< typename DEST_TYPE >
inline irods::error round_robin_check_params(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// ask the context if it is valid
irods::error ret = _ctx.valid< DEST_TYPE >();
if ( !ret.ok() ) {
return PASSMSG( "resource context is invalid", ret );
}
return SUCCESS();
} // round_robin_check_params
/// =-=-=-=-=-=-=-
/// @brief get the next resource shared pointer given this resources name
/// as well as the object's hierarchy string
irods::error get_next_child_in_hier(
const std::string& _name,
const std::string& _hier,
irods::resource_child_map& _cmap,
irods::resource_ptr& _resc ) {
// =-=-=-=-=-=-=-
// create a parser and parse the string
irods::hierarchy_parser parse;
irods::error err = parse.set_string( _hier );
if ( !err.ok() ) {
std::stringstream msg;
msg << "get_next_child_in_hier - failed in set_string for [";
msg << _hier << "]";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// get the next resource in the series
std::string next;
err = parse.next( _name, next );
if ( !err.ok() ) {
std::stringstream msg;
msg << "get_next_child_in_hier - failed in next for [";
msg << _name << "] for hier ["
<< _hier << "]";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// get the next resource from the child map
if ( !_cmap.has_entry( next ) ) {
std::stringstream msg;
msg << "get_next_child_in_hier - child map missing entry [";
msg << next << "]";
return ERROR( CHILD_NOT_FOUND, msg.str() );
}
// =-=-=-=-=-=-=-
// assign resource
_resc = _cmap[ next ].second;
return SUCCESS();
} // get_next_child_in_hier
/// =-=-=-=-=-=-=-
/// @brief get the next resource shared pointer given this resources name
/// as well as the file object
irods::error get_next_child_for_open_or_write(
const std::string& _name,
irods::file_object_ptr& _file_obj,
irods::resource_child_map& _cmap,
irods::resource_ptr& _resc ) {
// =-=-=-=-=-=-=-
// set up iteration over physical objects
std::vector< irods::physical_object > objs = _file_obj->replicas();
std::vector< irods::physical_object >::iterator itr = objs.begin();
// =-=-=-=-=-=-=-
// check to see if the replica is in this resource, if one is requested
for ( ; itr != objs.end(); ++itr ) {
// =-=-=-=-=-=-=-
// run the hier string through the parser
irods::hierarchy_parser parser;
parser.set_string( itr->resc_hier() );
// =-=-=-=-=-=-=-
// find this resource in the hier
if ( !parser.resc_in_hier( _name ) ) {
continue;
}
// =-=-=-=-=-=-=-
// if we have a hit, get the resc ptr to the next resc
return get_next_child_in_hier(
_name,
itr->resc_hier(),
_cmap,
_resc );
} // for itr
std::string msg( "no hier found for resc [" );
msg += _name + "]";
return ERROR(
CHILD_NOT_FOUND,
msg );
} // get_next_child_for_open_or_write
// =-=-=-=-=-=-=-
/// @brief get the resource for the child in the hierarchy
/// to pass on the call
template< typename DEST_TYPE >
irods::error round_robin_get_resc_for_call(
irods::resource_plugin_context& _ctx,
irods::resource_ptr& _resc ) {
// =-=-=-=-=-=-=-
// check incoming parameters
irods::error err = round_robin_check_params< DEST_TYPE >( _ctx );
if ( !err.ok() ) {
return PASSMSG( "round_robin_get_resc_for_call - bad resource context", err );
}
// =-=-=-=-=-=-=-
// get the object's name
std::string name;
err = _ctx.prop_map().get< std::string >( irods::RESOURCE_NAME, name );
if ( !err.ok() ) {
return PASSMSG( "round_robin_get_resc_for_call - failed to get property 'name'.", err );
}
// =-=-=-=-=-=-=-
// get the object's hier string
boost::shared_ptr< DEST_TYPE > obj = boost::dynamic_pointer_cast< DEST_TYPE >( _ctx.fco() );
std::string hier = obj->resc_hier( );
// =-=-=-=-=-=-=-
// get the next child pointer given our name and the hier string
err = get_next_child_in_hier( name, hier, _ctx.child_map(), _resc );
if ( !err.ok() ) {
return PASSMSG( "round_robin_get_resc_for_call - get_next_child_in_hier failed.", err );
}
return SUCCESS();
} // round_robin_get_resc_for_call
extern "C" {
/// =-=-=-=-=-=-=-
/// @brief token to index the next child property
const std::string NEXT_CHILD_PROP( "round_robin_next_child" );
/// =-=-=-=-=-=-=-
/// @brief token to index the vector of children
const std::string CHILD_VECTOR_PROP( "round_robin_child_vector" );
/// =-=-=-=-=-=-=-
/// @brief build a sorted list of children based on hints in the context
/// string for them and their positoin in the child map
// NOTE :: this assumes the order in the icat dictates the order of the RR.
// the user can override that behavior with applying an index to the
// child. should the resc id wrap, this should still work as it
// should behave like a circular queue.
irods::error build_sorted_child_vector(
irods::resource_child_map& _cmap,
std::vector< std::string >& _child_vector ) {
// =-=-=-=-=-=-=-
// vector holding all of the children
size_t list_size = _cmap.size();
_child_vector.resize( list_size );
// =-=-=-=-=-=-=-
// iterate over the children and look for indicies on the
// childrens context strings. use those to build the initial
// list.
irods::resource_child_map::iterator itr;
for ( itr = _cmap.begin();
itr != _cmap.end();
++itr ) {
std::string ctx = itr->second.first;
irods::resource_ptr& resc = itr->second.second;
if ( !ctx.empty() ) {
try {
// =-=-=-=-=-=-=-
// cast std::string to int index
size_t idx = boost::lexical_cast<size_t>( ctx );
if ( idx >= list_size ) {
irods::log( ERROR( -1, "build_sorted_child_vector - index out of bounds" ) );
continue;
}
// =-=-=-=-=-=-=-
// make sure the map at this spot is already empty, could have
// duplicate indicies on children
if ( !_child_vector[ idx ].empty() ) {
std::stringstream msg;
msg << "build_sorted_child_vector - child map list is not empty ";
msg << "for index " << idx << " colliding with [";
msg << _child_vector[ idx ] << "]";
irods::log( ERROR( -1, msg.str() ) );
continue;
}
// =-=-=-=-=-=-=-
// snag child resource name
std::string name;
irods::error ret = resc->get_property< std::string >( irods::RESOURCE_NAME, name );
if ( !ret.ok() ) {
irods::log( ERROR( -1, "build_sorted_child_vector - get property for resource name failed." ) );
continue;
}
// =-=-=-=-=-=-=-
// finally add child to the list
_child_vector[ idx ] = name;
}
catch ( const boost::bad_lexical_cast& ) {
irods::log( ERROR( -1, "build_sorted_child_vector - lexical cast to size_t failed" ) );
}
} // if ctx != empty
} // for itr
// =-=-=-=-=-=-=-
// iterate over the children again and add in any in the holes
// left from the first pass
for ( itr = _cmap.begin();
itr != _cmap.end();
++itr ) {
std::string ctx = itr->second.first;
irods::resource_ptr& resc = itr->second.second;
// =-=-=-=-=-=-=-
// skip any resource whose context is not empty
// as they should have places already
if ( !ctx.empty() ) {
continue;
}
// =-=-=-=-=-=-=-
// iterate over the _child_vector and find a hole to
// fill in with this resource name
bool filled_flg = false;
size_t idx = 0;
std::vector< std::string >::iterator vitr;
for ( vitr = _child_vector.begin();
vitr != _child_vector.end();
++vitr ) {
if ( vitr->empty() ) {
// =-=-=-=-=-=-=-
// snag child resource name
std::string name;
irods::error ret = resc->get_property< std::string >( irods::RESOURCE_NAME, name );
if ( !ret.ok() ) {
irods::log( ERROR( -1, "build_sorted_child_vector - get property for resource name failed." ) );
idx++;
continue;
}
( *vitr ) = name;
filled_flg = true;
break;
}
else {
idx++;
}
} // for vitr
// =-=-=-=-=-=-=-
// check to ensure that the resc found its way into the list
if ( false == filled_flg ) {
irods::log( ERROR( -1, "build_sorted_child_vector - failed to find an entry in the resc list" ) );
}
} // for itr
return SUCCESS();
} // build_sorted_child_vector
/// =-=-=-=-=-=-=-
/// @brief given the property map the properties next_child and child_vector,
/// select the next property in the vector to be tapped as the RR resc
irods::error update_next_child_resource(
irods::plugin_property_map& _prop_map ) {
// =-=-=-=-=-=-=-
// extract next_child, may be empty for new RR node
std::string next_child;
_prop_map.get< std::string >( NEXT_CHILD_PROP, next_child );
// =-=-=-=-=-=-=-
// extract child_vector
std::vector< std::string > child_vector;
irods::error get_err = _prop_map.get( CHILD_VECTOR_PROP, child_vector );
if ( !get_err.ok() ) {
std::stringstream msg;
msg << "update_next_child_resource - failed to get child vector";
return ERROR( -1, msg.str() );
}
// =-=-=-=-=-=-=-
// if the next_child string is empty then the next in the round robin
// selection is the first non empty resource in the vector
if ( next_child.empty() ) {
// =-=-=-=-=-=-=-
// scan the child vector for the first non empty position
for ( size_t i = 0; i < child_vector.size(); ++i ) {
if ( child_vector[ i ].empty() ) {
std::stringstream msg;
msg << "update_next_child_resource - chlid vector at ";
msg << " posittion " << i;
irods::log( ERROR( -1, msg.str() ) );
}
else {
next_child = child_vector[ i ];
break;
}
} // for i
}
else {
// =-=-=-=-=-=-=-
// scan the child vector for the context string
// and select the next position in the series
for ( size_t i = 0; i < child_vector.size(); ++i ) {
if ( next_child == child_vector[ i ] ) {
size_t idx = ( ( i + 1 ) >= child_vector.size() ) ? 0 : i + 1;
next_child = child_vector[ idx ];
break;
}
} // for i
} // else
// =-=-=-=-=-=-=-
// if next_child is empty, something went terribly awry
if ( next_child.empty() ) {
std::stringstream msg;
msg << "update_next_child_resource - next_child is empty.";
return ERROR( -1, msg.str() );
}
// =-=-=-=-=-=-=-
// assign the next_child to the property map
_prop_map.set< std::string >( NEXT_CHILD_PROP, next_child );
return SUCCESS();
} // update_next_child_resource
// =-=-=-=-=-=-=-
/// @brief Start Up Operation - iterate over children and map into the
/// list from which to pick the next resource for the creation operation
irods::error round_robin_start_operation(
irods::plugin_property_map& _prop_map,
irods::resource_child_map& _cmap ) {
// =-=-=-=-=-=-=-
// trap case where no children are available
if ( _cmap.empty() ) {
return ERROR( -1, "round_robin_start_operation - no children specified" );
}
// =-=-=-=-=-=-=-
// build the initial list of children
std::vector< std::string > child_vector;
irods::error err = build_sorted_child_vector( _cmap, child_vector );
if ( !err.ok() ) {
return PASSMSG( "round_robin_start_operation - failed.", err );
}
// =-=-=-=-=-=-=-
// report children to log
for ( size_t i = 0; i < child_vector.size(); ++i ) {
rodsLog( LOG_DEBUG, "round_robin_start_operation :: RR Child [%s] at [%d]",
child_vector[i].c_str(), i );
}
// =-=-=-=-=-=-=-
// add the child list to the property map
err = _prop_map.set< std::vector< std::string > >( CHILD_VECTOR_PROP, child_vector );
if ( !err.ok() ) {
return PASSMSG( "round_robin_start_operation - failed.", err );
}
// =-=-=-=-=-=-=-
// if the next_child property is empty then we need to populate it
// to the first resource in the child vector
std::string next_child;
err = _prop_map.get< std::string >( NEXT_CHILD_PROP, next_child );
if ( err.ok() && next_child.empty() && child_vector.size() > 0 ) {
_prop_map.set< std::string >( NEXT_CHILD_PROP, child_vector[ 0 ] );
}
return SUCCESS();
} // round_robin_start_operation
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX create
irods::error round_robin_file_create(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call create on the child
return resc->call( _ctx.comm(), irods::RESOURCE_OP_CREATE, _ctx.fco() );
} // round_robin_file_create
// =-=-=-=-=-=-=-
// interface for POSIX Open
irods::error round_robin_file_open(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call open operation on the child
return resc->call( _ctx.comm(), irods::RESOURCE_OP_OPEN, _ctx.fco() );
} // round_robin_file_open
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX Read
irods::error round_robin_file_read(
irods::resource_plugin_context& _ctx,
void* _buf,
int _len ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call read on the child
return resc->call< void*, int >( _ctx.comm(), irods::RESOURCE_OP_READ, _ctx.fco(), _buf, _len );
} // round_robin_file_read
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX Write
irods::error round_robin_file_write(
irods::resource_plugin_context& _ctx,
void* _buf,
int _len ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call write on the child
return resc->call< void*, int >( _ctx.comm(), irods::RESOURCE_OP_WRITE, _ctx.fco(), _buf, _len );
} // round_robin_file_write
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX Close
irods::error round_robin_file_close(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call close on the child
return resc->call( _ctx.comm(), irods::RESOURCE_OP_CLOSE, _ctx.fco() );
} // round_robin_file_close
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX Unlink
irods::error round_robin_file_unlink(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::data_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call unlink on the child
return resc->call( _ctx.comm(), irods::RESOURCE_OP_UNLINK, _ctx.fco() );
} // round_robin_file_unlink
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX Stat
irods::error round_robin_file_stat(
irods::resource_plugin_context& _ctx,
struct stat* _statbuf ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::data_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call stat on the child
return resc->call< struct stat* >( _ctx.comm(), irods::RESOURCE_OP_STAT, _ctx.fco(), _statbuf );
} // round_robin_file_stat
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX lseek
irods::error round_robin_file_lseek(
irods::resource_plugin_context& _ctx,
long long _offset,
int _whence ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call lseek on the child
return resc->call< long long, int >( _ctx.comm(), irods::RESOURCE_OP_LSEEK, _ctx.fco(), _offset, _whence );
} // round_robin_file_lseek
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX mkdir
irods::error round_robin_file_mkdir(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::collection_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call mkdir on the child
return resc->call( _ctx.comm(), irods::RESOURCE_OP_MKDIR, _ctx.fco() );
} // round_robin_file_mkdir
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX rmdir
irods::error round_robin_file_rmdir(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::collection_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call rmdir on the child
return resc->call( _ctx.comm(), irods::RESOURCE_OP_RMDIR, _ctx.fco() );
} // round_robin_file_rmdir
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX opendir
irods::error round_robin_file_opendir(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::collection_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call opendir on the child
return resc->call( _ctx.comm(), irods::RESOURCE_OP_OPENDIR, _ctx.fco() );
} // round_robin_file_opendir
// =-=-=-=-=-=-=-
/// @brief interface for POSIX closedir
irods::error round_robin_file_closedir(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::collection_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call closedir on the child
return resc->call( _ctx.comm(), irods::RESOURCE_OP_CLOSEDIR, _ctx.fco() );
} // round_robin_file_closedir
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX readdir
irods::error round_robin_file_readdir(
irods::resource_plugin_context& _ctx,
struct rodsDirent** _dirent_ptr ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::collection_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call readdir on the child
return resc->call< struct rodsDirent** >( _ctx.comm(), irods::RESOURCE_OP_READDIR, _ctx.fco(), _dirent_ptr );
} // round_robin_file_readdir
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX rename
irods::error round_robin_file_rename(
irods::resource_plugin_context& _ctx,
const char* _new_file_name ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call rename on the child
return resc->call< const char* >( _ctx.comm(), irods::RESOURCE_OP_RENAME, _ctx.fco(), _new_file_name );
} // round_robin_file_rename
/// =-=-=-=-=-=-=-
/// @brief interface for POSIX truncate
irods::error round_robin_file_truncate(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
return PASS( err );
}
// =-=-=-=-=-=-=-
// call truncate on the child
return resc->call( _ctx.comm(), irods::RESOURCE_OP_TRUNCATE, _ctx.fco() );
} // round_robin_file_truncate
/// =-=-=-=-=-=-=-
/// @brief interface to determine free space on a device given a path
irods::error round_robin_file_getfs_freespace(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call freespace on the child
return resc->call( _ctx.comm(), irods::RESOURCE_OP_FREESPACE, _ctx.fco() );
} // round_robin_file_getfs_freespace
/// =-=-=-=-=-=-=-
/// @brief This routine is for testing the TEST_STAGE_FILE_TYPE.
/// Just copy the file from filename to cacheFilename. optionalInfo info
/// is not used.
irods::error round_robin_file_stage_to_cache(
irods::resource_plugin_context& _ctx,
const char* _cache_file_name ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call stage on the child
return resc->call< const char* >( _ctx.comm(), irods::RESOURCE_OP_STAGETOCACHE, _ctx.fco(), _cache_file_name );
} // round_robin_file_stage_to_cache
/// =-=-=-=-=-=-=-
/// @brief This routine is for testing the TEST_STAGE_FILE_TYPE.
/// Just copy the file from cacheFilename to filename. optionalInfo info
/// is not used.
irods::error round_robin_file_sync_to_arch(
irods::resource_plugin_context& _ctx,
const char* _cache_file_name ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call synctoarch on the child
return resc->call< const char* >( _ctx.comm(), irods::RESOURCE_OP_SYNCTOARCH, _ctx.fco(), _cache_file_name );
} // round_robin_file_sync_to_arch
/// =-=-=-=-=-=-=-
/// @brief interface to notify of a file registration
irods::error round_robin_file_registered(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call registered on the child
return resc->call( _ctx.comm(), irods::RESOURCE_OP_REGISTERED, _ctx.fco() );
} // round_robin_file_registered
/// =-=-=-=-=-=-=-
/// @brief interface to notify of a file unregistration
irods::error round_robin_file_unregistered(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call unregistered on the child
return resc->call( _ctx.comm(), irods::RESOURCE_OP_UNREGISTERED, _ctx.fco() );
} // round_robin_file_unregistered
/// =-=-=-=-=-=-=-
/// @brief interface to notify of a file modification
irods::error round_robin_file_modified(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
return PASS( err );
}
// =-=-=-=-=-=-=-
// call modified on the child
err = resc->call( _ctx.comm(), irods::RESOURCE_OP_MODIFIED, _ctx.fco() );
if ( !err.ok() ) {
return PASS( err );
}
// =-=-=-=-=-=-=-
// if file modified is successful then we will update the next
// child in the round robin within the database
std::string name;
_ctx.prop_map().get< std::string >( irods::RESOURCE_NAME, name );
std::string next_child;
_ctx.prop_map().get< std::string >( NEXT_CHILD_PROP, next_child );
setRoundRobinContextInp_t inp;
snprintf( inp.resc_name_, sizeof( inp.resc_name_ ), "%s", name.c_str() );
snprintf( inp.context_, sizeof( inp.context_ ), "%s", next_child.c_str() );
int status = irods::server_api_call(
SET_RR_CTX_AN,
_ctx.comm(),
&inp,
NULL,
( void** ) NULL,
NULL );
if ( status < 0 ) {
std::stringstream msg;
msg << "failed to update round robin context for [";
msg << name << "] with context [" << next_child << "]";
return ERROR(
status,
msg.str() );
}
else {
return SUCCESS();
}
} // round_robin_file_modified
/// =-=-=-=-=-=-=-
/// @brief find the next valid child resource for create operation
irods::error get_next_valid_child_resource(
irods::plugin_property_map& _prop_map,
irods::resource_child_map& _cmap,
irods::resource_ptr& _resc ) {
// =-=-=-=-=-=-=-
// counter and flag
int child_ctr = 0;
bool child_found = false;
// =-=-=-=-=-=-=-
// while we have not found a child and have not
// exhausted all the children in the map
while ( !child_found &&
child_ctr < _cmap.size() ) {
// =-=-=-=-=-=-=-
// increment child counter
child_ctr++;
// =-=-=-=-=-=-=-
// get the next_child property
std::string next_child;
irods::error err = _prop_map.get< std::string >( NEXT_CHILD_PROP, next_child );
if ( !err.ok() ) {
return PASSMSG( "round_robin_redirect - get property for 'next_child' failed.", err );
}
// =-=-=-=-=-=-=-
// get the next_child resource
if ( !_cmap.has_entry( next_child ) ) {
std::stringstream msg;
msg << "child map has no child by name [";
msg << next_child << "]";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// request our child resource to test it
irods::resource_ptr resc = _cmap[ next_child ].second;
// =-=-=-=-=-=-=-
// get the resource's status
int resc_status = 0;
err = resc->get_property<int>( irods::RESOURCE_STATUS, resc_status );
if ( !err.ok() ) {
return PASSMSG( "failed to get property", err );
}
// =-=-=-=-=-=-=-
// determine if the resource is up and available
if ( INT_RESC_STATUS_DOWN != resc_status ) {
// =-=-=-=-=-=-=-
// we found a valid child, set out variable
_resc = resc;
child_found = true;
}
else {
// =-=-=-=-=-=-=-
// update the next_child as we do not have a valid child yet
err = update_next_child_resource( _prop_map );
if ( !err.ok() ) {
return PASSMSG( "update_next_child_resource failed", err );
}
}
} // while
// =-=-=-=-=-=-=-
// return appropriately
if ( child_found ) {
return SUCCESS();
}
else {
return ERROR( NO_NEXT_RESC_FOUND, "no valid child found" );
}
} // get_next_valid_child_resource
/// =-=-=-=-=-=-=-
/// @brief used to allow the resource to determine which host
/// should provide the requested operation
irods::error round_robin_redirect(
irods::resource_plugin_context& _ctx,
const std::string* _opr,
const std::string* _curr_host,
irods::hierarchy_parser* _out_parser,
float* _out_vote ) {
// =-=-=-=-=-=-=-
// check incoming parameters
irods::error err = round_robin_check_params< irods::file_object >( _ctx );
if ( !err.ok() ) {
return PASSMSG( "round_robin_redirect - bad resource context", err );
}
if ( !_opr ) {
return ERROR( SYS_INVALID_INPUT_PARAM, "round_robin_redirect - null operation" );
}
if ( !_curr_host ) {
return ERROR( SYS_INVALID_INPUT_PARAM, "round_robin_redirect - null host" );
}
if ( !_out_parser ) {
return ERROR( SYS_INVALID_INPUT_PARAM, "round_robin_redirect - null outgoing hier parser" );
}
if ( !_out_vote ) {
return ERROR( SYS_INVALID_INPUT_PARAM, "round_robin_redirect - null outgoing vote" );
}
// =-=-=-=-=-=-=-
// get the object's hier string
irods::file_object_ptr file_obj = boost::dynamic_pointer_cast< irods::file_object >( _ctx.fco() );
std::string hier = file_obj->resc_hier( );
// =-=-=-=-=-=-=-
// get the object's hier string
std::string name;
err = _ctx.prop_map().get< std::string >( irods::RESOURCE_NAME, name );
if ( !err.ok() ) {
return PASSMSG( "failed to get property 'name'.", err );
}
// =-=-=-=-=-=-=-
// add ourselves into the hierarch before calling child resources
_out_parser->add_child( name );
// =-=-=-=-=-=-=-
// test the operation to determine which choices to make
if ( irods::OPEN_OPERATION == ( *_opr ) ||
irods::WRITE_OPERATION == ( *_opr ) ) {
// =-=-=-=-=-=-=-
// get the next child pointer in the hierarchy, given our name and the hier string
irods::resource_ptr resc;
err = get_next_child_for_open_or_write(
name,
file_obj,
_ctx.child_map(),
resc );
if ( !err.ok() ) {
( *_out_vote ) = 0.0;
return PASS( err );
}
// =-=-=-=-=-=-=-
// forward the redirect call to the child for assertion of the whole operation,
// there may be more than a leaf beneath us
return resc->call < const std::string*,
const std::string*,
irods::hierarchy_parser*,
float* > (
_ctx.comm(),
irods::RESOURCE_OP_RESOLVE_RESC_HIER,
_ctx.fco(),
_opr,
_curr_host,
_out_parser,
_out_vote );
}
else if ( irods::CREATE_OPERATION == ( *_opr ) ) {
// =-=-=-=-=-=-=-
// get the next available child resource
irods::resource_ptr resc;
irods::error err = get_next_valid_child_resource(
_ctx.prop_map(),
_ctx.child_map(),
resc );
if ( !err.ok() ) {
return PASS( err );
}
// =-=-=-=-=-=-=-
// forward the 'put' redirect to the appropriate child
err = resc->call < const std::string*,
const std::string*,
irods::hierarchy_parser*,
float* > (
_ctx.comm(),
irods::RESOURCE_OP_RESOLVE_RESC_HIER,
_ctx.fco(),
_opr,
_curr_host,
_out_parser,
_out_vote );
if ( !err.ok() ) {
return PASSMSG( "forward of put redirect failed", err );
}
std::string hier;
_out_parser->str( hier );
rodsLog(
LOG_DEBUG,
"round robin - create :: resc hier [%s] vote [%f]",
hier.c_str(),
_out_vote );
std::string new_hier;
_out_parser->str( new_hier );
// =-=-=-=-=-=-=-
// update the next_child appropriately as the above succeeded
err = update_next_child_resource( _ctx.prop_map() );
if ( !err.ok() ) {
return PASSMSG( "update_next_child_resource failed", err );
}
return SUCCESS();
}
// =-=-=-=-=-=-=-
// must have been passed a bad operation
std::stringstream msg;
msg << "round_robin_redirect - operation not supported [";
msg << ( *_opr ) << "]";
return ERROR( -1, msg.str() );
} // round_robin_redirect
// =-=-=-=-=-=-=-
// round_robin_file_rebalance - code which would rebalance the subtree
irods::error round_robin_file_rebalance(
irods::resource_plugin_context& _ctx ) {
// =-=-=-=-=-=-=-
// forward request for rebalance to children
irods::error result = SUCCESS();
irods::resource_child_map::iterator itr = _ctx.child_map().begin();
for ( ; itr != _ctx.child_map().end(); ++itr ) {
irods::error ret = itr->second.second->call(
_ctx.comm(),
irods::RESOURCE_OP_REBALANCE,
_ctx.fco() );
if ( !ret.ok() ) {
irods::log( PASS( ret ) );
result = ret;
}
}
if ( !result.ok() ) {
return PASS( result );
}
return update_resource_object_count(
_ctx.comm(),
_ctx.prop_map() );
} // round_robin_file_rebalancec
// =-=-=-=-=-=-=-
// interface for POSIX Open
irods::error round_robin_file_notify(
irods::resource_plugin_context& _ctx,
const std::string* _opr ) {
// =-=-=-=-=-=-=-
// get the child resc to call
irods::resource_ptr resc;
irods::error err = round_robin_get_resc_for_call< irods::file_object >( _ctx, resc );
if ( !err.ok() ) {
std::stringstream msg;
msg << "failed.";
return PASSMSG( msg.str(), err );
}
// =-=-=-=-=-=-=-
// call open operation on the child
return resc->call< const std::string* >(
_ctx.comm(),
irods::RESOURCE_OP_NOTIFY,
_ctx.fco(),
_opr );
} // round_robin_file_open
// =-=-=-=-=-=-=-
// 3. create derived class to handle round_robin file system resources
// necessary to do custom parsing of the context string to place
// any useful values into the property map for reference in later
// operations. semicolon is the preferred delimiter
class roundrobin_resource : public irods::resource {
public:
roundrobin_resource( const std::string& _inst_name,
const std::string& _context ) :
irods::resource( _inst_name, _context ) {
// =-=-=-=-=-=-=-
// assign context string as the next_child string
// in the property map. this is used to keep track
// of the last used child in the vector
properties_.set< std::string >( NEXT_CHILD_PROP, context_ );
rodsLog( LOG_DEBUG, "roundrobin_resource :: next_child [%s]", context_.c_str() );
set_start_operation( "round_robin_start_operation" );
}
}; // class
// =-=-=-=-=-=-=-
// 4. create the plugin factory function which will return a dynamically
// instantiated object of the previously defined derived resource. use
// the add_operation member to associate a 'call name' to the interfaces
// defined above. for resource plugins these call names are standardized
// as used by the irods facing interface defined in
// server/drivers/src/fileDriver.c
irods::resource* plugin_factory( const std::string& _inst_name,
const std::string& _context ) {
// =-=-=-=-=-=-=-
// 4a. create round_robinfilesystem_resource
roundrobin_resource* resc = new roundrobin_resource( _inst_name, _context );
// =-=-=-=-=-=-=-
// 4b. map function names to operations. this map will be used to load
// the symbols from the shared object in the delay_load stage of
// plugin loading.
resc->add_operation( irods::RESOURCE_OP_CREATE, "round_robin_file_create" );
resc->add_operation( irods::RESOURCE_OP_OPEN, "round_robin_file_open" );
resc->add_operation( irods::RESOURCE_OP_READ, "round_robin_file_read" );
resc->add_operation( irods::RESOURCE_OP_WRITE, "round_robin_file_write" );
resc->add_operation( irods::RESOURCE_OP_CLOSE, "round_robin_file_close" );
resc->add_operation( irods::RESOURCE_OP_UNLINK, "round_robin_file_unlink" );
resc->add_operation( irods::RESOURCE_OP_STAT, "round_robin_file_stat" );
resc->add_operation( irods::RESOURCE_OP_MKDIR, "round_robin_file_mkdir" );
resc->add_operation( irods::RESOURCE_OP_OPENDIR, "round_robin_file_opendir" );
resc->add_operation( irods::RESOURCE_OP_READDIR, "round_robin_file_readdir" );
resc->add_operation( irods::RESOURCE_OP_RENAME, "round_robin_file_rename" );
resc->add_operation( irods::RESOURCE_OP_TRUNCATE, "round_robin_file_truncate" );
resc->add_operation( irods::RESOURCE_OP_FREESPACE, "round_robin_file_getfs_freespace" );
resc->add_operation( irods::RESOURCE_OP_LSEEK, "round_robin_file_lseek" );
resc->add_operation( irods::RESOURCE_OP_RMDIR, "round_robin_file_rmdir" );
resc->add_operation( irods::RESOURCE_OP_CLOSEDIR, "round_robin_file_closedir" );
resc->add_operation( irods::RESOURCE_OP_STAGETOCACHE, "round_robin_file_stage_to_cache" );
resc->add_operation( irods::RESOURCE_OP_SYNCTOARCH, "round_robin_file_sync_to_arch" );
resc->add_operation( irods::RESOURCE_OP_REGISTERED, "round_robin_file_registered" );
resc->add_operation( irods::RESOURCE_OP_UNREGISTERED, "round_robin_file_unregistered" );
resc->add_operation( irods::RESOURCE_OP_MODIFIED, "round_robin_file_modified" );
resc->add_operation( irods::RESOURCE_OP_RESOLVE_RESC_HIER, "round_robin_redirect" );
resc->add_operation( irods::RESOURCE_OP_REBALANCE, "round_robin_file_rebalance" );
resc->add_operation( irods::RESOURCE_OP_NOTIFY, "round_robin_file_notify" );
// =-=-=-=-=-=-=-
// set some properties necessary for backporting to iRODS legacy code
resc->set_property< int >( irods::RESOURCE_CHECK_PATH_PERM, 2 );//DO_CHK_PATH_PERM );
resc->set_property< int >( irods::RESOURCE_CREATE_PATH, 1 );//CREATE_PATH );
// =-=-=-=-=-=-=-
// 4c. return the pointer through the generic interface of an
// irods::resource pointer
return dynamic_cast<irods::resource*>( resc );
} // plugin_factory
}; // extern "C"
| {"hexsha": "0eee1af0dcd4f1e93454d196c9fa1d8514fd9255", "size": 47402, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "plugins/resources/roundrobin/libroundrobin.cpp", "max_stars_repo_name": "nesi/irods", "max_stars_repo_head_hexsha": "49eeaf76305fc483f21b1bbfbdd77d540b59cfd2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plugins/resources/roundrobin/libroundrobin.cpp", "max_issues_repo_name": "nesi/irods", "max_issues_repo_head_hexsha": "49eeaf76305fc483f21b1bbfbdd77d540b59cfd2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plugins/resources/roundrobin/libroundrobin.cpp", "max_forks_repo_name": "nesi/irods", "max_forks_repo_head_hexsha": "49eeaf76305fc483f21b1bbfbdd77d540b59cfd2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6321483771, "max_line_length": 120, "alphanum_fraction": 0.5153790979, "num_tokens": 11499} |
import os
import datetime
import time
from collections.abc import Iterable
from glob import glob
import numpy as np
import netCDF4 as nc
import itertools
from .logger import get_log
from . import sario
log = get_log()
DATE_FMT = "%Y%m%d"
def find_igrams(directory=".", ext=".int", parse=True, filename=None):
"""Reads the list of igrams to return dates of images as a tuple
Args:
directory (str): path to the igram directory
ext (str): file extension when searching a directory
parse (bool): output as parsed datetime tuples. False returns the filenames
filename (str): name of a file with SAR filenames separated by newlines
Returns:
tuple(date, date) of (early, late) dates for all igrams (if parse=True)
if parse=False: returns list[str], filenames of the igrams
"""
if filename is not None:
with open(filename) as f:
igram_file_list = [
line
for line in f.read().splitlines()
if not line.strip().startswith("#")
]
else:
igram_file_list = sorted(glob(os.path.join(directory, "*" + ext)))
if parse:
igram_fnames = [os.path.split(f)[1] for f in igram_file_list]
date_pairs = [intname.strip(ext).split("_")[:2] for intname in igram_fnames]
return parse_intlist_strings(date_pairs, ext=ext)
else:
return igram_file_list
def parse_intlist_strings(date_pairs, ext=".int"):
# If we passed filename YYYYmmdd_YYYYmmdd.int
if not date_pairs:
return []
if isinstance(date_pairs, str):
date_pairs = [date_pairs.split(".")[0].split("_")[:2]]
elif isinstance(date_pairs, Iterable) and isinstance(date_pairs[0], str):
date_pairs = [f.split(".")[0].split("_")[:2] for f in date_pairs]
return [(_parse(early), _parse(late)) for early, late in date_pairs]
def dates_from_igrams(igram_list):
"""Takes a list of [(reference, secondary),...] igram date pairs
and returns the list of unique dates of SAR images used to form them
"""
return sorted(list(set(itertools.chain(*igram_list))))
def _parse(datestr):
return datetime.datetime.strptime(datestr, DATE_FMT).date()
def get_latlon_arrs(h5_filename=None, rsc_file=None, gdal_file=None):
if rsc_file is not None:
lon_arr, lat_arr = grid(**sario.load_rsc(rsc_file), sparse=True)
elif gdal_file is not None:
lon_arr, lat_arr = grid(fname=gdal_file)
lon_arr, lat_arr = lon_arr.reshape(-1), lat_arr.reshape(-1)
return lon_arr, lat_arr
def grid(
rows=None,
cols=None,
y_step=None,
x_step=None,
y_first=None,
x_first=None,
width=None,
file_length=None,
sparse=True,
fname=None,
**kwargs,
):
"""Takes sizes and spacing info, creates a grid of values
Args:
rows (int): number of rows
cols (int): number of cols
y_step (float): spacing between rows
x_step (float): spacing between cols
y_first (float): starting location of first row at top
x_first (float): starting location of first col on left
sparse (bool): Optional (default False). Passed through to
np.meshgrid to optionally conserve memory
Returns:
tuple[ndarray, ndarray]: the XX, YY grids of longitudes and lats
Examples:
>>> test_grid_data = {'cols': 2, 'rows': 3, 'x_first': -155.0, 'x_step': 0.01,\
'y_first': 19.5, 'y_step': -0.2}
>>> lons, lats = grid(**test_grid_data)
>>> np.set_printoptions(legacy="1.13")
>>> print(lons)
[[-155. -154.99]
[-155. -154.99]
[-155. -154.99]]
>>> print(lats)
[[ 19.5 19.5]
[ 19.3 19.3]
[ 19.1 19.1]]
"""
if fname is None:
rows = rows or file_length
cols = cols or width
x = np.linspace(x_first, x_first + (cols - 1) * x_step, cols).reshape((1, cols))
y = np.linspace(y_first, y_first + (rows - 1) * y_step, rows).reshape((rows, 1))
else:
try:
import rasterio as rio
except ImportError:
raise ValueError(
"Need to `conda install rasterio` to pass gdal-readable files to `grid`"
)
with rio.open(fname) as src:
rows, cols = src.shape
max_len = max(rows, cols)
lon_list, lat_list = src.xy(np.arange(max_len), np.arange(max_len))
x = np.array(lon_list[:cols])
y = np.array(lat_list[:rows])
return np.meshgrid(x, y, sparse=sparse)
def create_empty_nc_stack(
outname,
date_list=None,
rsc_file=None,
gdal_file=None,
dtype="float32",
stack_dim_name="date",
stack_data_name="igrams",
lat_units="degrees north",
lon_units="degrees east",
overwrite=False,
):
"""Creates skeleton of .nc stack without writing stack data
Args:
outname (str): name of .nc output file to save
date_list (list[datetime.date]): if layers of stack correspond to dates of SAR images
rsc_file (str): .rsc (resource) file containing the desired output lat/lon grid data
gdal_file (str): instead of .rsc, and example GDAL-readable file in desired coordinates
dtype: default="float32", the numpy datatype of the stack data
stack_dim_name (str): default = "date". Name of the 3rd dimension of the stack
(Dimensions are (stack_dim_name, lat, lon) )
stack_data_name (str): default="stack", name of the data variable in the file
lat_units (str): default = "degrees north",
lon_units (str): default = "degrees east",
overwrite (bool): default = False, will overwrite file if true
"""
if not outname.endswith(".nc"):
raise ValueError("{} must be an .nc filename".format(outname))
# TODO: allow for radar coordinates and just "x, y" generic?
lon_arr, lat_arr = get_latlon_arrs(
rsc_file=rsc_file,
gdal_file=gdal_file,
)
rows, cols = len(lat_arr), len(lon_arr)
depth = len(date_list)
if date_list is None:
raise ValueError("Need 'date_list' if 3rd dimension is 'date'")
stack_dim_arr = to_datetimes(date_list)
log.info("Making dimensions and variables")
with nc.Dataset(outname, "w", clobber=overwrite) as f:
f.history = "Created " + time.ctime(time.time())
f.createDimension("lat", rows)
f.createDimension("lon", cols)
# Could make this unlimited to add to it later?
latitudes = f.createVariable("lat", "f4", ("lat",), zlib=True)
longitudes = f.createVariable("lon", "f4", ("lon",), zlib=True)
latitudes.units = "degrees north"
longitudes.units = "degrees east"
f.createDimension(stack_dim_name, depth)
stack_dim_variable = f.createVariable(
stack_dim_name, "f4", (stack_dim_name,), zlib=True
)
stack_dim_variable.units = "days since {}".format(date_list[0])
# Write data
latitudes[:] = lat_arr
longitudes[:] = lon_arr
d2n = nc.date2num(stack_dim_arr, units=stack_dim_variable.units)
stack_dim_variable[:] = d2n
# Finally, the actual stack
# stackvar = rootgrp.createVariable("stack/1", "f4", ("date", "lat", "lon"))
log.info("Writing dummy data for %s", stack_data_name)
dt = np.dtype(dtype)
fill_value = 0
f.createVariable(
stack_data_name,
dt,
(stack_dim_name, "lat", "lon"),
fill_value=fill_value,
zlib=True,
)
def to_datetimes(date_list):
return [datetime.datetime(*d.timetuple()[:6]) for d in date_list]
# def ignore_sar_dates(
# sar_date_list, int_date_list, ignore_file="sarlist_ignore.txt", parse=True
# ):
# """Read extra file to ignore certain dates of interferograms"""
# ignore_sars = set(find_sars(filename=ignore_file, parse=parse))
# log.info("Ignoring the following .sar dates:")
# log.info(sorted(ignore_sars))
# valid_sars = [g for g in sar_date_list if g not in ignore_sars]
# valid_igrams = [
# i for i in int_date_list if i[0] not in ignore_sars and i[1] not in ignore_sars
# ]
# return valid_sars, valid_igrams
| {"hexsha": "a61260635b509061368fbf9838cc67c559a8a450", "size": 8244, "ext": "py", "lang": "Python", "max_stars_repo_path": "trodi/utils.py", "max_stars_repo_name": "scottstanie/trodi", "max_stars_repo_head_hexsha": "e359fbe65b4de27afdec093e2b41f0c63b665fe0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-16T04:23:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T04:23:42.000Z", "max_issues_repo_path": "trodi/utils.py", "max_issues_repo_name": "scottstanie/trodi", "max_issues_repo_head_hexsha": "e359fbe65b4de27afdec093e2b41f0c63b665fe0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-06T17:09:19.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-06T17:09:19.000Z", "max_forks_repo_path": "trodi/utils.py", "max_forks_repo_name": "scottstanie/trodi", "max_forks_repo_head_hexsha": "e359fbe65b4de27afdec093e2b41f0c63b665fe0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6489795918, "max_line_length": 95, "alphanum_fraction": 0.628942261, "include": true, "reason": "import numpy", "num_tokens": 2119} |
# Include this startup file prior to running Julia code
# Add project module locations to path
push!(LOAD_PATH, abspath(joinpath("src","distributions")))
push!(LOAD_PATH, abspath(joinpath("src","samplers")))
| {"hexsha": "d686f9f83787a5a3a0a9d64842d654189b08a849", "size": 209, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/startup.jl", "max_stars_repo_name": "jgorham/stein_discrepancy", "max_stars_repo_head_hexsha": "addfe17ce04e6fec4be0c441c996e732b1f7abb0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2016-12-21T16:25:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-15T18:10:41.000Z", "max_issues_repo_path": "src/startup.jl", "max_issues_repo_name": "jgorham/stochastic_stein_discrepancy", "max_issues_repo_head_hexsha": "183ce591e14053cc6cccebec6b362ea7b413b77b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-03-08T14:43:34.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-09T04:39:28.000Z", "max_forks_repo_path": "src/startup.jl", "max_forks_repo_name": "jgorham/stochastic_stein_discrepancy", "max_forks_repo_head_hexsha": "183ce591e14053cc6cccebec6b362ea7b413b77b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-06-29T13:55:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-15T18:10:45.000Z", "avg_line_length": 34.8333333333, "max_line_length": 58, "alphanum_fraction": 0.7607655502, "num_tokens": 49} |
import mpmath
tpij = 2*mpmath.pi*1j
def eta(t):
q124 = mpmath.exp(tpij * t / 24)
q = mpmath.exp(tpij * t)
return q124*mpmath.qp(q,q)
mpmath.cplot( eta, re=[-1.1,1.1],im=[0.00001,0.5], points=1000000, verbose=True)
| {"hexsha": "8c5d62c614236d55a22ee323714a4a4369738a5b", "size": 231, "ext": "py", "lang": "Python", "max_stars_repo_path": "images/etaplot.py", "max_stars_repo_name": "rantonels/rantonels.github.io", "max_stars_repo_head_hexsha": "599804c02efa365c504c696cf0b5d22745bbb85b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "images/etaplot.py", "max_issues_repo_name": "rantonels/rantonels.github.io", "max_issues_repo_head_hexsha": "599804c02efa365c504c696cf0b5d22745bbb85b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "images/etaplot.py", "max_forks_repo_name": "rantonels/rantonels.github.io", "max_forks_repo_head_hexsha": "599804c02efa365c504c696cf0b5d22745bbb85b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.7692307692, "max_line_length": 80, "alphanum_fraction": 0.619047619, "include": true, "reason": "import mpmath", "num_tokens": 100} |
# --------------
import time
import pandas as pd
import numpy as np
from nltk import pos_tag
import matplotlib.pyplot as plt
# code starts here
# Loading of data
df=pd.read_csv(path)
# Mapping of pos tags with nominees
tagged_titles = df['nominee'].str.split().map(pos_tag)
# Creating a dataframe
tagged_titles_df = pd.DataFrame(tagged_titles)
# --------------
#tagged_titles_df already defined in the last task
def count_tags(title_with_tags):
tag_count = {}
for word, tag in title_with_tags:
if tag in tag_count:
tag_count[tag] += 1
else:
tag_count[tag] = 1
return(tag_count)
# code starts here
tagged_titles_df ['tag_counts'] = tagged_titles_df.nominee.map(count_tags)
# Tagset containing all the possible tags
tag_set = list(set([tag for tags in tagged_titles_df['tag_counts'] for tag in tags]))
# Creating tag column frequency for each tags
for tag in tag_set:
tagged_titles_df[tag] = tagged_titles_df['tag_counts'].map(lambda x: x.get(tag, 0))
top_pos = tagged_titles_df.drop(columns=["nominee","tag_counts"])
top_pos = top_pos.apply(sum)
top_pos = top_pos.sort_values(ascending=False)[:10]
top_pos.plot.bar()
# code ends here
# --------------
# Function to create vocabulary of the tags
def vocab_creator(tagged_titles):
vocab = {}
for row in tagged_titles['nominee']:
for word, tag in row:
if word in vocab:
if tag in vocab[word]:
vocab[word][tag] += 1
else:
vocab[word][tag] = 1
else:
vocab[word] = {tag: 1}
return vocab
# Creating vocab of our tagged titles dataframe
vocab= vocab_creator(tagged_titles_df)
# code starts here
vocab_df = pd.DataFrame.from_dict(vocab, orient="Index")
vocab_df.fillna(0, inplace=True)
top_verb_nominee = vocab_df.VBG.sort_values()[-10:-1]
top_verb_nominee.plot.bar()
top_noun_nominee = vocab_df.NN.sort_values()[-10:-1]
top_noun_nominee.plot.bar()
# code ends here
# --------------
# code starts here
import nltk
new_df = df[df.winner==1]
new_df = new_df[new_df.category.str.contains("Comedy")]
tagged_titles_winner = new_df.nominee.str.split().map(nltk.pos_tag)
tagged_titles_winner_df = pd.DataFrame(tagged_titles_winner)
# Creating a vocabulary of the tags
vocab = vocab_creator(tagged_titles_winner_df)
vocab_df = pd.DataFrame.from_dict(vocab, orient="Index")
vocab_df.fillna(0, inplace=True)
top_proper_noun_nominee = vocab_df.NNP.sort_values(ascending=False)[:5]
top_proper_noun_nominee.plot.bar()
plt.show()
top_verb_nominee = vocab_df.VB.sort_values(ascending=False)[:5]
top_verb_nominee.plot.bar()
plt.show()
# code ends here
# --------------
""" After filling and submitting the feedback form, click the Submit button of the codeblock"""
| {"hexsha": "dcd083a30a65d93d9f572244bcc2be82f30d7efb", "size": 2837, "ext": "py", "lang": "Python", "max_stars_repo_path": "EMMY-winner-analysis/code.py", "max_stars_repo_name": "hchaudhari73/ga-learner-dsmp-repo", "max_stars_repo_head_hexsha": "42c0bf7b4bbeef10d187c74c8803b1fdca5d2cdd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "EMMY-winner-analysis/code.py", "max_issues_repo_name": "hchaudhari73/ga-learner-dsmp-repo", "max_issues_repo_head_hexsha": "42c0bf7b4bbeef10d187c74c8803b1fdca5d2cdd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EMMY-winner-analysis/code.py", "max_forks_repo_name": "hchaudhari73/ga-learner-dsmp-repo", "max_forks_repo_head_hexsha": "42c0bf7b4bbeef10d187c74c8803b1fdca5d2cdd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0275229358, "max_line_length": 96, "alphanum_fraction": 0.6862883327, "include": true, "reason": "import numpy", "num_tokens": 693} |
import numpy as np
class DatasetsIndexingHelper:
def __init__(self, dataset_length_list):
self.dataset_length_list = dataset_length_list
self.length = sum(dataset_length_list)
def __getitem__(self, index: int):
for index_of_datasets, length in enumerate(self.dataset_length_list):
if index < length:
return index_of_datasets, index
index -= length
raise IndexError
def __len__(self):
return self.length
class ApiGatewayRunThroughSamplerServerHandler:
def __init__(self, datasets, seed):
self.datasets_indexing_helper = DatasetsIndexingHelper(tuple(len(dataset) for dataset in datasets))
self.seed = seed
self.reset()
def reset(self):
rng_engine = np.random.Generator(np.random.PCG64(self.seed))
indices = np.arange(len(self.datasets_indexing_helper))
rng_engine.shuffle(indices)
self.indices = indices
self.position = 0
self.done = 0
self.stop_iteration = None
self.client_worker_stop_flags = {}
self.global_max_iteration = 0
def __call__(self, command, response):
if command[0] == 'get_next':
worker_local_rank = command[1]
stop_flag = False
if worker_local_rank in self.client_worker_stop_flags:
stop_flag = self.client_worker_stop_flags[worker_local_rank]
if self.position < len(self.datasets_indexing_helper):
index = self.indices[self.position]
index_of_dataset, index_of_sequence = self.datasets_indexing_helper[index]
response.set_body((index_of_dataset, index_of_sequence, stop_flag))
self.position += 1
else:
response.set_body((None, None, stop_flag))
elif command[0] == 'mark_done_and_get_status':
client_local_rank = command[1]
client_iteration_index = command[2]
num_done = command[3]
if self.global_max_iteration < client_iteration_index:
self.global_max_iteration = client_iteration_index
if client_local_rank not in self.client_worker_stop_flags:
self.client_worker_stop_flags[client_local_rank] = False
self.done += num_done
assert self.done <= len(self.datasets_indexing_helper)
is_done = self.done == len(self.datasets_indexing_helper)
if is_done and self.stop_iteration is None:
self.stop_iteration = self.global_max_iteration + 2
self.client_worker_stop_flags[client_local_rank] = False
if self.stop_iteration is not None:
assert client_iteration_index < self.stop_iteration
if client_iteration_index + 1 == self.stop_iteration:
self.client_worker_stop_flags[client_local_rank] = True
response.set_body((self.stop_iteration, self.position, self.done, len(self.datasets_indexing_helper)))
elif command[0] == 'reset':
self.reset()
response.set_body('ok')
else:
raise Exception(f'Unknown command received {command}')
| {"hexsha": "9eb09bde8b02f685a72a8501cd40b82fab431fbe", "size": 3217, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/tracking/sampler/_sampling_algos/sequence_picking/run_through/_server.py", "max_stars_repo_name": "zhangzhengde0225/SwinTrack", "max_stars_repo_head_hexsha": "526be17f8ef266cb924c6939bd8dda23e9b73249", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 143, "max_stars_repo_stars_event_min_datetime": "2021-12-03T02:33:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T00:01:48.000Z", "max_issues_repo_path": "data/tracking/sampler/_sampling_algos/sequence_picking/run_through/_server.py", "max_issues_repo_name": "zhangzhengde0225/SwinTrack", "max_issues_repo_head_hexsha": "526be17f8ef266cb924c6939bd8dda23e9b73249", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2021-12-03T10:32:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T02:13:55.000Z", "max_forks_repo_path": "data/tracking/sampler/_sampling_algos/sequence_picking/run_through/_server.py", "max_forks_repo_name": "zhangzhengde0225/SwinTrack", "max_forks_repo_head_hexsha": "526be17f8ef266cb924c6939bd8dda23e9b73249", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2021-12-04T06:46:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:57:47.000Z", "avg_line_length": 39.7160493827, "max_line_length": 114, "alphanum_fraction": 0.6453217283, "include": true, "reason": "import numpy", "num_tokens": 635} |
import numpy as np
import pandas as pd
import time
import xml.etree.ElementTree as ET
import os
from xml.dom import minidom
class Record:
def __init__(self, df=None, path=None):
self._df = df
self._path = path
@property
def df(self):
"""Gets the record as a dataframe.
If the _df attribute is None, the method reads the dataframe
from the csv file and returns that; otherwise it returns
self._df.
"""
if self._df is None:
if self._path is not None:
try:
df = pd.read_csv(self._path, delimiter=';')
except FileNotFoundError:
print("{} does not exist.".format(self._path))
else:
print("Please provide the record as a dataframe\
or a path to the respective .record file.")
else:
df = self._df
return df
@df.setter
def df(self, df):
self._df = df
@df.deleter
def df(self):
self._df = None
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
def get_changepoints_as_list(record, column_name,
time_column_name='TimeElapsed',
time_factor=1000,
time_out_as_integer=True):
"""Create a list of changepoints from a column of a record file.
Args:
record (pd.DataFrame): record file as pd.DataFrame
column_name (str): Name of the column to be processed
time_column_name (str):
time_factor:
time_out_as_integer (bool):
Returns:
changepoint_list (list): Elements are dictionaries
corresponding to 'timepoints' at which the value in the
record's corresponding column changes; these dictionaries
have keys 'column', 'value', 'time', 'ts_id'. The 'ts_id'
entries are initialized to emptry strings. (This list can
be used as an auxiliary object for creating a tier
corresponding to that column (besides other tiers
corresponding to other columns) in an .eaf file.)
"""
assert column_name, time_column_name in record.columns
current_value = record.iloc[0][column_name]
changepoint_list = [{'column': column_name, 'value':
current_value, 'time': 0}]
k = 1
for i in range(1, len(record)):
if record.iloc[i][column_name] != current_value:
current_value = record.iloc[i][column_name]
time = time_factor * record.iloc[i][time_column_name]
if time_out_as_integer:
time = int(time)
changepoint_list.append({'column': column_name, 'value':
current_value, 'time': time})
k += 1
end_time = time_factor * record.iloc[-1][time_column_name]
if time_out_as_integer:
end_time = int(end_time)
changepoint_list.append({'column': column_name, 'value': '',
'time': end_time})
return changepoint_list
def get_changepoints(record, column_names,
time_column_name='TimeElapsed',
time_factor=1000,
time_out_as_integer=True):
"""Create auxiliary dataframe for transferring simulator info to eaf.
Create a dataframe in which all the information from record
that should be transferred to an eaf file is collected.
Args:
record (pd.DataFrame): record file as pd.DataFrame
column_names (list(str)): List containing the names of the
record columns for which changepoint information should be
computed
time_column_name:
time_factor:
time_out_as_integer:
"""
for column_name in column_names:
assert column_name in record.columns
df = pd.DataFrame(columns=['column', 'value', 'time'])
for column_name in column_names:
df_new = pd.DataFrame(get_changepoints_as_list(record,
column_name,
time_column_name,
time_factor,
time_out_as_integer))
df = pd.concat([df, df_new], sort=True)
df.sort_values(by=['time'], inplace=True)
df.reset_index(drop=True, inplace=True)
# for i in range(len(df)):
# df.loc[i, 'ts_id'] = int(i)
return df
def write_to_eaf(changepoints, eaf_fname_in, eaf_fname_out):
"""Write information about changepoints to an eaf file.
Args:
changepoints (pd.DataFrame): Should have columns {'column',
'value', 'time'}; every row correponds to a changepoint in the
column 'column' of a record file at time 'time', and with new
value 'value'.
eaf_fname_in (str): Name of the eaf file that will be parsed,
resulting in an ET.ElementTree to which changepoint
information will be added.
eaf_fname_out (str): Name of the eaf file to which this
ET.ElementTree will be written.
CHECK for correctness
TODO Refactor, make modular
"""
assert set(changepoints.columns) == {'column', 'value', 'time'}
column_names = changepoints['column'].unique()
# Get a timestamp that will be used in order to create annotation
# and time_slot IDs that are different from those that potentially
# exist already in the eaf_fname_in.
timestamp = str(time.time()).split('.')[0]
# Create a 'timeslot_id' column of the auxiliary dataframe
changepoints = changepoints.copy()
changepoints['timeslot_id'] = changepoints.index
changepoints['timeslot_id'] = changepoints['timeslot_id'].apply(
lambda n: str(n) + '-' +
timestamp)
# Parse the input eaf file
tree = ET.parse(eaf_fname_in)
root = tree.getroot()
# Create an ET.Element for every row in the changepoints DataFrame
time_order = root.findall('TIME_ORDER')[0]
for index, row in changepoints.iterrows():
time_slot = ET.Element('TIME_SLOT',
{'TIME_SLOT_ID': str(row['timeslot_id']),
'TIME_VALUE': str(row['time'])})
time_order.insert(len(time_order), time_slot)
# For each column_name appearing in changepoints['column'], find
# the 'TIER' node with that id (create it if it doesn't
# exist). Then create an annotation for every corresponding row in
# the changepoints DataFrame.
for column_name in column_names:
tier_id = column_name
if len(root.findall("TIER[@TIER_ID=\'" + tier_id + "\']")) > 0:
tier = root.findall("TIER[@TIER_ID=\'" + tier_id + "\']")[0]
else:
tier = ET.Element('TIER', {'DEFAULT_LOCALE': 'en',
'LINGUISTIC_TYPE_REF': 'Simulator',
'TIER_ID': tier_id})
df = changepoints[changepoints['column'] == column_name].sort_values(
by='time')
row0 = df.iloc[0]
for index, row in df.iloc[1:].iterrows():
row1 = row
annotation = ET.Element('ANNOTATION')
alignable_annotation = ET.Element('ALIGNABLE_ANNOTATION',
{
'ANNOTATION_ID': 'a-'
+ tier_id + '-' +
str(row0[
'timeslot_id']),
'TIME_SLOT_REF1': str(
row0['timeslot_id']),
'TIME_SLOT_REF2': str(
row1['timeslot_id'])})
annotation_value = ET.Element('ANNOTATION_VALUE')
annotation_value.text = str(row0['value'])
alignable_annotation.insert(len(alignable_annotation),
annotation_value)
annotation.insert(len(annotation), alignable_annotation)
tier.insert(len(tier), annotation)
row0 = row1
root.insert(len(root), tier)
# Create nicely indented string and write to output file
# TODO: So far the new elements are strangely formatted (newline missing)
tree_str = minidom.parseString(
ET.tostring(root, method='xml')).toprettyxml(indent=" ",
newl="")
with open(eaf_fname_out, 'w') as file:
file.write(tree_str)
# tree.write(eaf_fname_out, encoding='UTF-8', xml_declaration=True)
return tree, root
def get_eaf_tier_as_df(eaf_fname, tier_id):
"""Create a pd.DataFrame holding information from eaf_file.
TODO Extend so it can do multiple tiers at a time
TODO handle case when there is only 1 time boundary point
Returns:
tier_as_df (pd.DataFrame): A pd.DataFrame whose columns
tier_id, 'ts_ref1' and 'ts_ref2'.
"""
tier_as_df = pd.DataFrame(columns=[tier_id, 'ts_ref1', 'ts_ref2'])
tree = ET.parse(eaf_fname)
root = tree.getroot()
time_order = root.findall('TIME_ORDER')[0]
# Create a dictionary for time slots (keys are time slots ids,
# values are time values).
time_slots = {}
for time_slot in time_order:
try:
time_slots[time_slot.get('TIME_SLOT_ID')] = int(
time_slot.get('TIME_VALUE'))
except TypeError:
continue
# Create a pd.DataFrame containing all annotations in the tier.
try:
for annotation in root.findall("TIER[@TIER_ID=\'" + tier_id + "\']") \
[0]:
try:
ts_ref1 = annotation.findall("ALIGNABLE_ANNOTATION")[0].get(
'TIME_SLOT_REF1')
ts_ref2 = annotation.findall("ALIGNABLE_ANNOTATION")[0].get(
'TIME_SLOT_REF2')
except TypeError:
continue
annotation_value = annotation.findall("./*ANNOTATION_VALUE")[
0].text
new_row = pd.DataFrame([{tier_id: annotation_value,
'ts_ref1': time_slots[ts_ref1],
'ts_ref2': time_slots[ts_ref2]}])
tier_as_df = pd.concat([tier_as_df, new_row])
except IndexError:
print(
'The file {} does not seem to have a tier whose ID is \'{}\'.'
.format(eaf_fname, tier_id))
tier_as_df = tier_as_df.reset_index(drop=True)
return tier_as_df
def transfer_eaf_tier_to_record(eaf_fname, tier_id, record,
time_column_name='TimeElapsed'):
"""CHECK for correctness
TODOs:
- time column in eafs and record typically don't have same
units (ms vs. s). Might include checking that
"""
tier_as_df = get_eaf_tier_as_df(eaf_fname, tier_id)
# Convert time info from milliseconds to seconds
tier_as_df["ts_ref1"] /= 1000
tier_as_df["ts_ref2"] /= 1000
# Initialize new column
# record[tier_id] = np.nan
# record[tier_id + '_changepoint'] = False
record.sort_values(by=time_column_name, inplace=True)
# Reset index
record = record.reset_index(drop=True)
for row in tier_as_df.index:
# print("len: ", len(record))
annotation_value = tier_as_df.loc[row, tier_id]
ts_ref1 = tier_as_df.loc[row, 'ts_ref1']
ts_ref2 = tier_as_df.loc[row, 'ts_ref2']
# Create a mask for selecting all rows of the record whose time
# value is between ts_ref1 and ts_ref2
mask = (ts_ref1 <= record[time_column_name]) & (
record[time_column_name] <= ts_ref2)
start_index = record[mask].index[0]
end_index = record[mask].index[-1]
# print(ts_ref1, ts_ref2, record.loc[start_index, time_column_name],
# record.loc[end_index, time_column_name],
# start_index, end_index)
record.loc[start_index:end_index, tier_id] = annotation_value
record.sort_values(by='TimeElapsed', inplace=True)
record = record.reset_index(drop=True)
return record, tier_as_df
def transfer_eaf_tier_to_record_dtype_num(eaf_fname, tier_id, record,
time_column_name='TimeElapsed'):
"""CHECK for correctness
TODOs:
- time column in eafs and record typically don't have same
units (ms vs. s). Might include checking that
"""
tier_as_df = get_eaf_tier_as_df(eaf_fname, tier_id)
# Convert time info from milliseconds to seconds
tier_as_df["ts_ref1"] /= 1000
tier_as_df["ts_ref2"] /= 1000
# Initialize new column
# record[tier_id] = np.nan
# record[tier_id + '_changepoint'] = False
record.sort_values(by=time_column_name, inplace=True)
# Reset index
record = record.reset_index(drop=True)
for row in tier_as_df.index:
# print("len: ", len(record))
annotation_value = float(tier_as_df.loc[row, tier_id])
ts_ref1 = tier_as_df.loc[row, 'ts_ref1']
ts_ref2 = tier_as_df.loc[row, 'ts_ref2']
# Create a mask for selecting all rows of the record whose time
# value is between ts_ref1 and ts_ref2
mask = (ts_ref1 <= record[time_column_name]) & (
record[time_column_name] <= ts_ref2)
start_index = record[mask].index[0]
end_index = record[mask].index[-1]
record.loc[start_index:end_index, tier_id] = annotation_value
record.sort_values(by='TimeElapsed', inplace=True)
record = record.reset_index(drop=True)
return record, tier_as_df
def write_to_file(data, column_name, base_path=None, FPS=40):
"""Write to file function.
Write a dict of arbitrary values to separate files. For each key
(file name) the values are written to the corresponding eaf file.
The function returns a dict with None values to adhere to the data pipeline
convention.
Args:
data (dict): Dictionary containing the extracted features. Keys are
the video file names, values are the ones to be added to the eaf
file.
base_path (str): Path to the base folder of the video files.
Returns:
dictionary: Keys are the video file names, values are None.
"""
return_data = {}
for file in data:
value = data[file][0]
video = file.split('/')[0]
return_data[file] = None
eaf_file_found = False
if os.path.isdir(base_path + video):
for f in os.listdir(base_path + video):
if f.endswith('.eaf'):
eaf_file_found = True
v = np.array(value)
v_compressed = np.concatenate([v[0].reshape(-1),
v[np.where(v[:-1] != v[1:])[
0] + 1],
v[-1].reshape(-1)], axis=0)
ind_compressed = np.concatenate([np.zeros(1), np.where(
v[:-1] != v[1:])[0] + 1, np.array(v.shape[0])
.reshape(-1)], axis=0)
t_compressed = ind_compressed / FPS * 1000
name = np.repeat(np.array([column_name]),
ind_compressed.shape[0])
changepoints = pd.DataFrame(data={"column": name,
"value": v_compressed,
"time": t_compressed
.astype(int)})
write_to_eaf(changepoints, base_path + video + "/" +
video + ".eaf", base_path + video + "/" +
video + "_new.eaf")
if not eaf_file_found:
raise FileNotFoundError("No .eaf file found in {0}!".format(
base_path + video))
return
else:
raise FileNotFoundError("Directory {0} does not exist!".format(
base_path + video))
return
return return_data
def to_eaf(state_seq, decode_df, states, eaf_file, output_dir="."):
"""Converts decoded sequence to eaf files.
Returns:
EAF files with decoded sequence imported
"""
states_dict = {}
for i, state in enumerate(states):
states_dict[i] = state
# Replace state indices by state names
decode_df["Decoded"] = state_seq
decode_df.replace({"Decoded": states_dict}, inplace=True)
state_seq_decoded = [states[s] for s in state_seq]
changepoints = get_changepoints(decode_df,
column_names=['Decoded'])
eaf_path_out = output_dir + "/" + \
eaf_file.split("/")[-1][:-4] + \
"-decoded.eaf"
# eaf_path_out = self.eaf_paths[i][:-4] + "_NEW"+str(time.time())+".eaf"
write_to_eaf(changepoints, eaf_fname_in=eaf_file,
eaf_fname_out=eaf_path_out)
| {"hexsha": "b68fda8246bdf8d100b956f0505793cfcb79fbb3", "size": 17541, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyhsmm/util/eaf_processing.py", "max_stars_repo_name": "dpaysan/pyhsmm", "max_stars_repo_head_hexsha": "2c9d57651f65d4a7b995ee7a1215da456bc410c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyhsmm/util/eaf_processing.py", "max_issues_repo_name": "dpaysan/pyhsmm", "max_issues_repo_head_hexsha": "2c9d57651f65d4a7b995ee7a1215da456bc410c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyhsmm/util/eaf_processing.py", "max_forks_repo_name": "dpaysan/pyhsmm", "max_forks_repo_head_hexsha": "2c9d57651f65d4a7b995ee7a1215da456bc410c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2420382166, "max_line_length": 91, "alphanum_fraction": 0.5623966707, "include": true, "reason": "import numpy", "num_tokens": 3827} |
module Mod_nsm_ComputeQfactor
use typre
use Mod_PointerSetter
use Mod_nsm_BaseElmope
use Mod_nsm_InterpolateGradients
implicit none
private
public SetPointersComputeQFactor
type, extends(PointerSetter) :: SPComputeQFactor
contains
procedure :: SpecificSet => SpecificSetComputeQFactor
end type
type(SPComputeQFactor) :: SetPointersComputeQFactor
real(rp), allocatable :: elqfac(:,:)
real(rp):: gpqfac(1)
contains
!----------------------------------------------------------------------------
!Setting Pointers
subroutine SpecificSetComputeQFactor(d)
implicit none
class(SPComputeQFactor) :: d
logical :: aux_logic
!Logics for deciding if we need to compute Q-factor
aux_logic = .false.
if (a%npp_stepi(21) /= 0) then
if (mod(a%istep,a%npp_stepi(21))==0) then
aux_logic = .true.
endif
endif
!If Q-factor needs to be computed
if (aux_logic .eqv. .true.) then
call SetPointersInterpolateGradients%Set
call ConcatenateProcedures(ProcHook_Initializations,AllocQfactor)
call ConcatenateProcedures(ProcHook_InGaussElmatsAssembly,ComputeQfactor)
call ConcatenateProcedures(ProcHook_AssemblyEndite,AssemblyQfactor)
call ConcatenateProcedures(ProcHook_Finalizations,DeallocQfactor)
endif
end subroutine
subroutine AllocQfactor
implicit none
call a%Memor%alloc(1,e%mnode,elqfac,'elqfac','nsm_EndElmope')
a%qfac(:) = 0_rp
end subroutine
subroutine ComputeQfactor
implicit none
integer(ip) :: inode,idime,jdime
real(rp) :: gpdef
gpdef = 0.0_rp
do idime=1,e%ndime
do jdime=1,e%ndime
if (idime/=jdime) then
gpdef = gpdef + grvel(jdime,idime)*grvel(idime,jdime)
endif
enddo
enddo
gpqfac(1) = 0.5_rp*(divvel**2_ip - gpdef)
do inode=1,e%pnode
elqfac(1,inode)=elqfac(1,inode) + e%shape(inode,e%igaus)*gpqfac(1)*dvol
end do
end subroutine
subroutine AssemblyQfactor
implicit none
integer(ip) :: inode
call a%Mesh%AssemblyToArray(e,1_ip,elqfac,a%qfac)
elqfac(:,:)=0_rp
end subroutine
subroutine DeallocQfactor
implicit none
call a%Memor%dealloc(1,e%mnode,elqfac,'elqfac','nsm_EndElmope')
end subroutine
end module
| {"hexsha": "7632a7ce53bbe52ddcbb21784705386758a0aa90", "size": 2457, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Sources/modules/nstinc/Elmopes/Mod_nsm_ComputeQfactor.f90", "max_stars_repo_name": "ciaid-colombia/InsFEM", "max_stars_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-24T08:19:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-24T08:19:54.000Z", "max_issues_repo_path": "Sources/modules/nstinc/Elmopes/Mod_nsm_ComputeQfactor.f90", "max_issues_repo_name": "ciaid-colombia/InsFEM", "max_issues_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sources/modules/nstinc/Elmopes/Mod_nsm_ComputeQfactor.f90", "max_forks_repo_name": "ciaid-colombia/InsFEM", "max_forks_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9204545455, "max_line_length": 82, "alphanum_fraction": 0.6292226292, "num_tokens": 698} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np, scipy as sp
import torch
from torch.nn.parameter import Parameter
import scipy.io
import matplotlib.pyplot as plt
# from visualdl import LogWriter
import argparse, sys
sys.path.append("../python/")
from demo import toy_data
from dAE import dAE
__author__ = "TengQi Ye"
__copyright__ = "Copyright 2017"
__credits__ = ["TengQi Ye"]
__license__ = ""
__version__ = "0.0.2"
__maintainer__ = "TengQi Ye"
__email__ = "yetengqi@gmail.com"
__status__ = "Research"
class orthdAE(dAE):
"""The class implements orthogonal denoising auto-encoder."""
def __init__(self, array_n_input, array_n_hidden, transfer_function=torch.nn.functional.softplus):
"""Initialization.
Args:
array_n_input: an array or list indicating the number of dimensions of each view.
array_n_hidden: an array or list indicating the number of dimensions of spaces,
the last one corresponds to private space.
transfer_function: activation function.
"""
super(dAE, self).__init__(np.sum(array_n_input), np.sum(array_n_hidden), transfer_function=torch.nn.functional.softplus)
self.array_n_input = array_n_input
self.array_n_hidden = array_n_hidden
self.transfer = transfer_function
# Weights. They are auto initialized.
self.encoder = Parameter(torch.Tensor(np.sum(self.array_n_hidden), np.sum(self.array_n_input) + 1))
self.decoder = Parameter(torch.Tensor(np.sum(self.array_n_input), np.sum(self.array_n_hidden) + 1))
# Masks. Note we have to manually set bias.
# For the encoder.
encoder_mask = np.zeros((np.sum(self.array_n_hidden), np.sum(self.array_n_input) + 1))
array_idx_hidden = np.cumsum(self.array_n_hidden)
array_idx_input = np.cumsum(self.array_n_input)
np.insert(array_idx_hidden, 0, 0)
np.insert(array_idx_input, 0, 0)
encoder_mask[0 : array_idx_hidden[1], :] = 1 # Shared space connects all spaces from input layer.
for idx in range(1, array_idx_input):
encoder_mask[array_idx_hidden[idx-1]:array_idx_hidden[idx], array_idx_input[idx]:array_idx_input[idx+1]] = 1 # Private space connects itself.
self.encoder = self.encoder * torch.from_numpy(np.transpose(encoder_mask))
# For the decoder.
decoder_mask = np.zeros((np.sum(self.array_n_input), np.sum(self.array_n_hidden) + 1))
decoder_mask[:, -1] = 1 # Shared space connects all spaces from input layer.
for idx in range(1, array_idx_input):
decoder_mask[array_idx_input[idx]:array_idx_input[idx+1], array_idx_hidden[idx-1]:array_idx_hidden[idx]] = 1 # Private space connects itself.
self.encoder = self.encoder * torch.from_numpy(np.transpose(decoder_mask))
def forward(self, x):
self.hidden = self.transfer(self.encoder())
return x | {"hexsha": "0ab218d132d58af6f35ee17a1a5c9cd554e8e17d", "size": 3018, "ext": "py", "lang": "Python", "max_stars_repo_path": "PyTorch/orthdAE.py", "max_stars_repo_name": "tkm646/orthogonal-denoising-autoencoder", "max_stars_repo_head_hexsha": "75f1c323d1fa18dcd28a4dcdf83916113a859abd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2015-12-20T14:52:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-20T12:18:02.000Z", "max_issues_repo_path": "PyTorch/orthdAE.py", "max_issues_repo_name": "FDeng1983/orthogonal-denoising-autoencoder", "max_issues_repo_head_hexsha": "75f1c323d1fa18dcd28a4dcdf83916113a859abd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2015-12-27T13:47:40.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-14T23:28:58.000Z", "max_forks_repo_path": "PyTorch/orthdAE.py", "max_forks_repo_name": "FDeng1983/orthogonal-denoising-autoencoder", "max_forks_repo_head_hexsha": "75f1c323d1fa18dcd28a4dcdf83916113a859abd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2015-12-24T11:05:29.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-15T10:10:12.000Z", "avg_line_length": 35.0930232558, "max_line_length": 153, "alphanum_fraction": 0.7011265739, "include": true, "reason": "import numpy,import scipy", "num_tokens": 708} |
import numpy as np
import pickle
import time
class policy_iteration:
def __init__(self,policy,state_name,action_name,prs,discount=None,theta=None,end_flag=None):
self.policy=policy
self.state_name=state_name
self.action_name=action_name
self.prs=prs
self.state_len=len(self.state_name)
self.action_len=len(self.action_name)
self.discount=discount
self.theta=theta
self.delta=0
self.end_flag=end_flag
self.ite_num=0
self.iteration_num=0
self.total_iteration=0
self.time=0
self.total_time=0
def init(self):
self._V=np.zeros(len(self.state_name),dtype=np.float16)
self.action_value=np.zeros(len(self.action_name),dtype=np.float16)
return
def set_up(self,discount=None,theta=None,init=True):
if discount!=None:
self.discount=discount
if theta!=None:
self.theta=theta
if init==True:
self.delta=0
self.ite_num=0
self.iteration_num=0
self.total_iteration=0
self.time=0
self.total_time=0
return
def policy_evaluation(self,policy,V,state_name,action_name,prs,discount,theta,iteration):
if iteration==None:
iteration=int(len(state_name)*3)
for i in range(iteration):
delta=0
for s in range(len(state_name)):
v=0
for a,action_prob in enumerate(policy[state_name[s]]):
for prob,r,next_s,done in prs[state_name[s]][action_name[a]]:
v+=action_prob[a]*prob*(r+discount*V[next_s])
delta=max(delta,np.abs(v-V[s]))
V[s]=v
if delta<=theta:
break
return V
def policy_improvement(self,policy,action_value,V,state_name,action_name,prs,discount,flag,end_flag):
for s in range(len(state_name)):
old_a=np.argmax(policy[state_name[s]])
old_action_value=0
for a in range(len(action_name)):
for prob,r,next_s,done in prs[state_name[s]][action_name[a]]:
action_value[a]+=prob*(r+discount*V[next_s])
if done and next_s!=end_flag and end_flag!=None:
action_value[a]=float('-inf')
best_a=np.argmax(action_value)
best_action_value=np.max(action_value)
for prob,r,next_s,done in prs[state_name[s]][action_name[old_a]]:
old_action_value+=prob*(r+discount*V[next_s])
if old_a!=best_a and old_action_value!=best_action_value:
flag=False
policy[state_name[s]]=np.eye(len(action_name),dtype=np.int8)[best_a]
return policy,flag
def learn(self,iteration=None,path=None,one=True):
self.delta=0
while True:
t1=time.time()
flag=True
V=self.policy_evaluation(self.policy,self._V,self.state,self.action,self.prs,self.discount,self.theta,iteration)
self.policy,flag=self.policy_improvement(self.policy,self.action_value,V,self.state,self.action,self.prs,self.discount,flag,self.end_flag)
if iteration%10!=0:
d=iteration-iteration%10
d=int(d/10)
else:
d=iteration/10
if d==0:
d=1
if self.iteration_num%d==0:
if path!=None and self.iteration_num%iteration*2==0:
self.save(path,self.iteration_num,one)
self.ite_num+=1
self.total_iteration+=1
t2=time.time()
self.time+=(t2-t1)
if flag:
self.time=self.time-int(self.time)
if self.time<0.5:
self.time=int(self.time)
else:
self.time=int(self.time)+1
self.total_time+=self.time
print()
print('time:{0}s'.format(self.time))
return
def save_policy(self,path):
output_file=open(path+'.dat','wb')
pickle.dump(self.policy,output_file)
return
def save(self,path,i=None,one=True):
if one==True:
output_file=open(path+'.dat','wb')
else:
output_file=open(path+'-{0}.dat'.format(i+1),'wb')
self.iteration_num=self.ite_num
pickle.dump(self.state_len,output_file)
pickle.dump(self.action_len,output_file)
pickle.dump(self._V,output_file)
pickle.dump(self.action_value,output_file)
pickle.dump(self.discount,output_file)
pickle.dump(self.theta,output_file)
pickle.dump(self.end_flag,output_file)
pickle.dump(self.iteration_num,output_file)
pickle.dump(self.total_iteration,output_file)
pickle.dump(self.total_time,output_file)
output_file.close()
return
def restore(self,path):
input_file=open(path,'rb')
self.state_len=pickle.load(input_file)
self.action_len=pickle.load(input_file)
self._V=pickle.load(input_file)
self.action_value=pickle.load(input_file)
self.discount=pickle.load(input_file)
self.theta=pickle.load(input_file)
self.end_flag=pickle.load(input_file)
self.iteration_num=pickle.load(input_file)
self.total_iteration=pickle.load(input_file)
self.total_time=pickle.load(input_file)
input_file.close()
return
| {"hexsha": "7dec8e22ef26376759bdbbe15af824d69989c354", "size": 5757, "ext": "py", "lang": "Python", "max_stars_repo_path": "Note/RL/DP/policy_iteration.py", "max_stars_repo_name": "7NoteDancing/Note", "max_stars_repo_head_hexsha": "d1150c313aa695efb32181638b9b35fbad5f29ed", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-30T02:48:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-30T02:48:40.000Z", "max_issues_repo_path": "Note/RL/DP/policy_iteration.py", "max_issues_repo_name": "7NoteDancing/Note", "max_issues_repo_head_hexsha": "d1150c313aa695efb32181638b9b35fbad5f29ed", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Note/RL/DP/policy_iteration.py", "max_forks_repo_name": "7NoteDancing/Note", "max_forks_repo_head_hexsha": "d1150c313aa695efb32181638b9b35fbad5f29ed", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1419354839, "max_line_length": 151, "alphanum_fraction": 0.5685252736, "include": true, "reason": "import numpy", "num_tokens": 1223} |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""LogNormal Distribution"""
import numpy as np
from mindspore.ops import operations as P
from mindspore.common import dtype as mstype
import mindspore.nn.probability.bijector as msb
import mindspore.nn.probability.distribution as msd
from ._utils.utils import check_distribution_name
from ._utils.custom_ops import exp_generic, expm1_generic, log_generic
class LogNormal(msd.TransformedDistribution):
"""
LogNormal distribution.
A log-normal (or lognormal) distribution is a continuous probability distribution of a random variable whose
logarithm is normally distributed. It is constructed as the exponential transformation of a Normal distribution.
Args:
loc (int, float, list, numpy.ndarray, Tensor, Parameter): The mean of the underlying Normal distribution.
scale (int, float, list, numpy.ndarray, Tensor, Parameter): The standard deviation of the underlying
Normal distribution.
seed (int): the seed used in sampling. The global seed is used if it is None. Default: None.
dtype (mindspore.dtype): type of the distribution. Default: mstype.float32.
name (str): the name of the distribution. Default: 'LogNormal'.
Note:
`scale` must be greater than zero.
`dist_spec_args` are `loc` and `scale`.
`dtype` must be a float type because LogNormal distributions are continuous.
Examples:
>>> # To initialize a LogNormal distribution of `loc` 3.0 and `scale` 4.0.
>>> n = msd.LogNormal(3.0, 4.0, dtype=mstype.float32)
>>>
>>> # The following creates two independent LogNormal distributions.
>>> n = msd.LogNormal([3.0, 3.0], [4.0, 4.0], dtype=mstype.float32)
>>>
>>> # A LogNormal distribution can be initilize without arguments.
>>> # In this case, `loc` and `scale` must be passed in during function calls.
>>> n = msd.LogNormal(dtype=mstype.float32)
>>>
>>> # To use a LogNormal distribution in a network.
>>> class net(Cell):
>>> def __init__(self):
>>> super(net, self).__init__():
>>> self.n1 = msd.LogNormal(0.0, 1.0, dtype=mstype.float32)
>>> self.n2 = msd.LogNormal(dtype=mstype.float32)
>>>
>>> # The following calls are valid in construct.
>>> def construct(self, value, loc_b, scale_b, loc_a, scale_a):
>>>
>>> # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same
>>> # arguments as follows.
>>> # Args:
>>> # value (Tensor): the value to be evaluated.
>>> # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None,
>>> # the mean of the underlying Normal distribution will be used.
>>> # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None,
>>> # the standard deviation of the underlying Normal distribution will be used.
>>>
>>> # Examples of `prob`.
>>> # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function.
>>> ans = self.n1.prob(value)
>>> # Evaluate with respect to distribution b.
>>> ans = self.n1.prob(value, loc_b, scale_b)
>>> # `loc` and `scale` must be passed in during function calls since they were not passed in construct.
>>> ans = self.n2.prob(value, loc_a, scale_a)
>>>
>>>
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None,
>>> # the mean of the underlying Normal distribution will be used.
>>> # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None,
>>> # the standard deviation of the underlying Normal distribution will be used.
>>>
>>> # Example of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = self.n1.mean() # return 0.0
>>> ans = self.n1.mean(loc_b, scale_b) # return mean_b
>>> # `loc` and `scale` must be passed in during function calls since they were not passed in construct.
>>> ans = self.n2.mean(loc_a, scale_a)
>>>
>>>
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
>>> # Args:
>>> # dist (str): the type of the distributions. Only "Normal" is supported.
>>> # loc_b (Tensor): the loc of distribution b.
>>> # scale_b (Tensor): the scale distribution b.
>>> # loc_a (Tensor): the loc of distribution a. Default: None. If `loc` is passed in as None,
>>> # the mean of the underlying Normal distribution will be used.
>>> # scale_a (Tensor): the scale distribution a. Default: None. If `scale` is passed in as None,
>>> # the standard deviation of the underlying Normal distribution will be used.
>>>
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = self.n1.kl_loss('Normal', loc_b, scale_b)
>>> ans = self.n1.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a)
>>> # Additional `loc` and `scale` must be passed in since they were not passed in construct.
>>> ans = self.n2.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a)
>>>
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()
>>> # loc (Tensor): the loc of the distribution. Default: None. If `loc` is passed in as None,
>>> # the mean of the underlying Normal distribution will be used.
>>> # scale (Tensor): the scale of the distribution. Default: None. If `scale` is passed in as None,
>>> # the standard deviation of the underlying Normal distribution will be used.
>>> ans = self.n1.sample()
>>> ans = self.n1.sample((2,3))
>>> ans = self.n1.sample((2,3), loc_b, scale_b)
>>> ans = self.n2.sample((2,3), loc_a, scale_a)
"""
def __init__(self,
loc=None,
scale=None,
seed=0,
dtype=mstype.float32,
name="LogNormal"):
"""
Constructor of LogNormal distribution.
"""
super(LogNormal, self).__init__(distribution=msd.Normal(loc, scale, dtype=dtype),
bijector=msb.Exp(),
seed=seed, name=name)
self.log_2pi = np.log(2 * np.pi)
#ops needed for the class
self.exp = exp_generic
self.expm1 = expm1_generic
self.log = log_generic
self.const = P.ScalarToArray()
self.erf = P.Erf()
self.fill = P.Fill()
self.shape = P.Shape()
self.sq = P.Square()
self.sqrt = P.Sqrt()
self.zeroslike = P.ZerosLike()
@property
def loc(self):
"""Distribution parameter for the pre-transformed mean."""
return self.distribution("mean")
@property
def scale(self):
"""Distribution parameter for the pre-transformed standard deviation."""
return self.distribution("sd")
def extend_repr(self):
if self.is_scalar_batch:
str_info = f'loc = {self._mean_value}, scale = {self._sd_value}'
else:
str_info = f'batch_shape = {self._broadcast_shape}'
return str_info
def _mean(self, loc=None, scale=None):
"""
The mean of the distribution.
"""
mean, sd = self._check_param_type(loc, scale)
var = self.distribution("var", mean=mean, sd=sd)
return self.exp(mean + 0.5 * var)
def _mode(self, loc=None, scale=None):
"""
The mode of the distribution.
"""
mean, sd = self._check_param_type(loc, scale)
var = self.distribution("var", mean=mean, sd=sd)
return self.exp(mean - var)
def _var(self, loc=None, scale=None):
"""
The varience of the distribution.
"""
mean, sd = self._check_param_type(loc, scale)
var = self.distribution("var", mean=mean, sd=sd)
return self.expm1(var) * self.exp(2. * mean + var)
def _entropy(self, loc=None, scale=None):
r"""
Evaluate entropy.
.. math::
H(X) = μ + 0.5 + \log(σ) + 0.5 * \log(2pi)
"""
mean, sd = self._check_param_type(loc, scale)
return mean + 0.5 + self.log(sd) + 0.5 * self.log_2pi
def _cross_entropy(self, dist, loc_b, scale_b, loc_a=None, scale_a=None):
r"""
Evaluate cross entropy between lognormal distributions.
Args:
dist (str): The type of the distributions. Should be "LogNormal" in this case.
loc_b (Tensor): The loc of distribution b.
scale_b (Tensor): The scale of distribution b.
loc_a (Tensor): The loc of distribution a. Default: None.
scale_a (Tensor): The scale of distribution a. Default: None.
"""
check_distribution_name(dist, 'LogNormal')
return self._entropy(loc_a, scale_a) + self._kl_loss(dist, loc_b, scale_b, loc_a, scale_a)
def _kl_loss(self, dist, loc_b, scale_b, loc_a=None, scale_a=None):
r"""
Evaluate LogNormal-LogNormal kl divergence, i.e. KL(a||b).
Args:
dist (str): The type of the distributions. Should be "LogNormal" in this case.
loc_b (Tensor): The loc of distribution b.
scale_b (Tensor): The scale of distribution b.
loc_a (Tensor): The loc of distribution a. Default: None.
scale_a (Tensor): The scale of distribution a. Default: None.
.. math::
KL(a||b) = 0.5 * (\fract{MEAN(a)}{STD(b)} - \fract{MEAN(b)}{STD(b)}) ^ 2 +
0.5 * EXPM1(2 * (\log(STD(a)) - \log(STD(b))) - (\log(STD(a)) - \log(STD(b)))
"""
check_distribution_name(dist, 'LogNormal')
return self.distribution("kl_loss", 'Normal', loc_b, scale_b, loc_a, scale_a)
| {"hexsha": "69d4059a3417fb51ebe3c8e6b71c4a2ea65513fe", "size": 11479, "ext": "py", "lang": "Python", "max_stars_repo_path": "mindspore/nn/probability/distribution/log_normal.py", "max_stars_repo_name": "HappyKL/mindspore", "max_stars_repo_head_hexsha": "479cb89e8b5c9d859130891567038bb849a30bce", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-18T12:27:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-18T12:27:45.000Z", "max_issues_repo_path": "mindspore/nn/probability/distribution/log_normal.py", "max_issues_repo_name": "ReIadnSan/mindspore", "max_issues_repo_head_hexsha": "c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mindspore/nn/probability/distribution/log_normal.py", "max_forks_repo_name": "ReIadnSan/mindspore", "max_forks_repo_head_hexsha": "c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.6398305085, "max_line_length": 120, "alphanum_fraction": 0.5697360397, "include": true, "reason": "import numpy", "num_tokens": 2715} |
[STATEMENT]
lemma sat_precond_as_proj_4:
fixes fm1 fm2 vs
assumes "fm2 \<subseteq>\<^sub>f fm1"
shows "(fmrestrict_set vs fm2 \<subseteq>\<^sub>f fm1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fmrestrict_set vs fm2 \<subseteq>\<^sub>f fm1
[PROOF STEP]
using assms fmpred_restrict_set fmsubset_alt_def
[PROOF STATE]
proof (prove)
using this:
fm2 \<subseteq>\<^sub>f fm1
fmpred ?P ?m \<Longrightarrow> fmpred ?P (fmrestrict_set ?A ?m)
(?m \<subseteq>\<^sub>f ?n) = fmpred (\<lambda>k v. fmlookup ?n k = Some v) ?m
goal (1 subgoal):
1. fmrestrict_set vs fm2 \<subseteq>\<^sub>f fm1
[PROOF STEP]
by metis | {"llama_tokens": 269, "file": "Factored_Transition_System_Bounding_FmapUtils", "length": 2} |
% Project Specifications
\chapter{GEMM and $col2im$}
The previous chapter discussed different ways to view the convolution operation and its cousin,
the transposed convolution, both conceptually and implementationally. When it comes down to implementation,
we have seen that both operations can be implemented with a single matrix multiplication. In other words,
matrix multiplication is the main computational burden of these operations. Meanwhile, real world applications
often involve very large matrices. Therefore, as a low-level operation, the implementation of matrix
multiplication is often heavily optimized.
\gls{gemm} is considered as the \textit{de facto} standard operation
contained in the \gls{blas} specification. It has many implementations for
different platforms which exhaustively utilize various optimizations like \gls{simd} instructions,
parallelism, cache-awareness, etc.
\gls{gemm} is defined as
$$C \leftarrow \alpha op(A) op(B) + \beta C,$$
where $\alpha, \beta \in \mathbb{R}$, $op(X)$ is either $X$ or $X^\intercal$. In our particular case for
transposed convolution, $\alpha = 1$, $\beta = 0$, $op(X) = X$, so \gls{gemm} reduces to the basic
matrix multiplication $C \leftarrow A B$.
A subtle detail in the implementation of \gls{gemm} is the order of storage of matrix entries. There are two
different orders to store the same matrix: row-major order or column-major order. In row-major order,
entries of rows are stored contiguously in memory, while in column-major order, entries of columns are
consecutive to each other in memory.
A naïve C implementation of \gls{gemm} is shown in Listing \ref{code:naivegemm}. Here,
\mintinline{c}{lda}, \mintinline{c}{ldb} and \mintinline{c}{ldc} are
the leading dimensions of matrix $A$, $B$ and $C$, respectively.
\begin{code}
\begin{minted}{c}
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
float sum = 0;
for (int l = 0; l < k; l++)
sum += a[l*lda+i] * b[l*ldb+j];
c[j*ldc+i] = beta * c[j*ldc+i] + alpha * sum;
}
}
\end{minted}
\captionof{listing}{Naïve C Implementation of \gls{gemm}}
\label{code:naivegemm}
\end{code}
This implementation is straightforward and not very efficient, but it is a good starting point to guide
us through the rest of the design.
Another operation used by transposed convolution is $col2im$. It cherry picks the elements computed by
\gls{gemm} and places them in the destination image. To describe $col2im$, it is necessary to introduce
the variables used for transposed convolution.
\begin{itemize}
\item \mintinline{c}{inputHeight} and \mintinline{c}{inputWidth} are the height and width of each input
channel (sometimes also called input plane)
\item \mintinline{c}{nInputChannel} is the number of input channels in total, thus the input is a
$3D$ tensor with dimensions \mintinline{c}{(nInputChannel, inputHeight, inputWidth)}
\item With \mintinline{c}{m = inputHeight * inputWidth} and \mintinline{c}{k = nInputChannel}, the input
tensor is stored as a matrix $A \in \mathbb{R}^{m \times k}$
\item \mintinline{c}{kernelH} and \mintinline{c}{kernelW} are the height and width of each kernel channel
\item Each kernel is also a $3D$ tensor with dimensions \mintinline{c}{(nInputChannel, kernelH, kernelW)}
\item \mintinline{c}{nOutputChannel} is the number of output channels
\item There are \mintinline{c}{nOutputChannel}'s of kernels, each responsible for producing an output channel
\item With \mintinline{c}{k = nInputChannel} and \mintinline{c}{n = nOutputChannel * kernelH * kernelW},
the kernel tensor is stored as a matrix $B \in \mathbb{R}^{k \times n}$
\end{itemize}
The code of $col2im$ is given in listing \ref{code:col2im}:
\begin{code}
\begin{minted}{c}
// content of data_im set to 0 already
const int n = nOutputChannel * kernelH * kernelW;
for (int j = 0; j < n; ++j) {
int w_offset = j % kernelW;
int h_offset = (j / kernelW) % kernelH;
int c_im = j / kernelH / kernelW;
for (int h_col = 0; h_col < inputHeight; ++h_col) {
for (int w_col = 0; w_col < inputWidth; ++w_col) {
int h_im = h_col * strideH - padH + h_offset * dilationH;
int w_im = w_col * strideW - padW + w_offset * dilationW;
if (h_im >= 0 && h_im < outputHeight &&
w_im >= 0 && w_im < outputWidth) {
data_im[(c_im * outputHeight + h_im) * outputWidth + w_im] +=
data_col[(j * inputHeight + h_col) * inputWidth + w_col];
}
}
}
}
\end{minted}
\captionof{listing}{C Implementation of $col2im$}
\label{code:col2im}
\end{code}
Here \mintinline{c}{data_im} is the target image while \mintinline{c}{data_col} is the result of \gls{gemm}, namely, \mintinline{c}{c} in listing \ref{code:naivegemm}.
The outermost loop iterates through the columns of $C \in \mathbb{R}^{m \times n}$. The two nested loops
iterate the rows of $C$, since \mintinline{c}{m = inputHeight * inputWidth}. \mintinline{c}{c_im} is the
index of the channel in the output image. %\mintinline{c}{h_offset} and \mintinline{c}{w_offset} are the offset in each
For transposed convolution, \mintinline{c}{data_im} is typically smaller than
\mintinline{c}{data_col}. This leads to a key optimization idea: it is possible to merge \gls{gemm} and
$col2im$ together and compute transposed convolution by doing \gls{gemm} sparsely. In doing so,
\mintinline{c}{data_col} can be abandoned and the results will be directly placed in \mintinline{c}{data_im},
that is, there is no need for an extra large buffer for intermediate results.
\begin{code}
\begin{minted}{c}
int j = 0;
for (int c_im = 0; c_im < nOutputChannel; c_im++) {
for (int h_offset = 0; h_offset < kernelH; h_offset++) {
for (int w_offset = 0; w_offset < kernelW; w_offset++) {
int i = 0;
for (long h_col = 0; h_col < inputHeight; h_col++) {
for (long w_col = 0; w_col < inputWidth; w_col++) {
int h_im = h_col * strideH - padH + h_offset * dilationH;
int w_im = w_col * strideW - padW + w_offset * dilationW;
if (h_im >= 0 && h_im < outputHeight &&
w_im >= 0 && w_im < outputWidth) {
float sum = 0;
for (long l = 0; l < k; l++)
sum += a[l*lda + i] * b[l*ldb + j];
int idx = (c_im * outputHeight + h_im) *
outputWidth + w_im;
data_im[idx] += sum;
}
i++;
}
}
j++;
}
}
}
\end{minted}
\captionof{listing}{C Implementation of $transposed\_convolution$}
\label{code:transposed_convolution_in_c}
\end{code}
The key to understand this code of merge is to notice that the for-loop indexed by \mintinline{c}{j} in
\mintinline{c}{gemm} can be split into three nested for-loops indexed by \mintinline{c}{c_im},
\mintinline{c}{h_offset} and \mintinline{c}{w_offset}.
Similarly, the for-loop indexed by \mintinline{c}{i} in \mintinline{c}{gemm} is equivalent to the two
nested for-loops indexed by \mintinline{c}{h_col} and \mintinline{c}{w_col}, same as in \mintinline{c}{col2im}.
This last C code in listing \ref{code:transposed_convolution_in_c} serves as the blueprint for implementing
transposed convolution on \gls{fpga} with Verilog. It can be seen from the code listing
that the output channels can be computed in groups or even by each channel individually.
This is a convenient fact when the weights of a layer cannot be fit into the
\gls{bram} on the \gls{fpga} chip all at once.
\clearpage %force the next chapter to start on a new page. Keep that as the last line of your chapter!
| {"hexsha": "e67844182539ac1740ea5b2825ef6be47b090d07", "size": 7818, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/gemm.tex", "max_stars_repo_name": "lambdalainen/metropolia-thesis-latex", "max_stars_repo_head_hexsha": "d7e705ad24f1f8065b2e7f026db5fdc90a7c8b3a", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapters/gemm.tex", "max_issues_repo_name": "lambdalainen/metropolia-thesis-latex", "max_issues_repo_head_hexsha": "d7e705ad24f1f8065b2e7f026db5fdc90a7c8b3a", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/gemm.tex", "max_forks_repo_name": "lambdalainen/metropolia-thesis-latex", "max_forks_repo_head_hexsha": "d7e705ad24f1f8065b2e7f026db5fdc90a7c8b3a", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.7662337662, "max_line_length": 167, "alphanum_fraction": 0.6774111026, "num_tokens": 2194} |
import queue
import time
try:
import cupy as xp
except ImportError:
import numpy as xp
import numpy as np
from common import plot
import common.const
import comms.const
L_head = len(comms.const.PN_SEQ) + 1
L_msg = comms.const.MSG_BYTES * 8 // comms.const.SYMBOL_SIZE
L_plot = (L_head + L_msg) * comms.const.L_SYM
class CorrelationPlot(plot.PlotBase):
def plot(self, corr_in, corr_pn, corr_orth, thresh):
if hasattr(xp, 'asnumpy'):
corr_in = xp.asnumpy(corr_in)
corr_pn = xp.asnumpy(corr_pn)
corr_orth = xp.asnumpy(corr_orth)
thresh = xp.asnumpy(thresh)
try:
self._q.put_nowait((corr_in, corr_pn, corr_orth, thresh))
except queue.Full:
pass
@staticmethod
def _daemon(q):
(pyplot, fig) = plot.PlotBase._daemon_init()
indices = np.arange(0, L_plot)
pyplot.suptitle('Correlation Plot')
(corr_in_ax, corr_in_lines) = (
CorrelationPlot._define_corr_in_plot(fig, indices))
(corr_results_ax, corr_results_lines) = (
CorrelationPlot._define_corr_results_plot(fig, indices))
while True:
try:
(corr_in, corr_pn, corr_orth, thresh) = q.get_nowait()
plot.PlotBase._auto_ylim(corr_in_ax, corr_in)
corr_in_lines[0].set_ydata(corr_in[0])
plot.PlotBase._auto_ylim(corr_results_ax,
np.concatenate((corr_pn, corr_orth, thresh)))
corr_results_lines[0].set_ydata(corr_pn[0])
corr_results_lines[1].set_ydata(corr_orth[0])
corr_results_lines[2].set_ydata(thresh[0])
pyplot.draw()
pyplot.show(block=False)
except queue.Empty:
pass
fig.canvas.flush_events()
time.sleep(common.const.GUI_UPDATE_TIME)
@staticmethod
def _define_corr_in_plot(fig, indices):
ax = fig.add_subplot(211)
ax.set_ylabel('Correlation Input')
ax.set_xticks(np.arange(0, L_plot, L_plot // 10))
ax.set_xlim(0, L_plot - 1)
lines = ax.plot(indices, np.zeros(indices.shape), 'k-',
linewidth=0.5)
return (ax, lines)
@staticmethod
def _define_corr_results_plot(fig, indices):
ax = fig.add_subplot(212)
ax.set_xlabel('Decimated Sample Number')
ax.set_ylabel('Correlation Results')
ax.set_xticks(np.arange(0, L_plot, L_plot // 10))
ax.set_xlim(0, L_plot - 1)
lines = ax.plot(indices, np.zeros(indices.shape), 'g-',
indices, np.zeros(indices.shape), 'r-',
indices, np.zeros(indices.shape), 'b-',
linewidth=0.5)
ax.legend(('PN Code Correlation', 'Orthogonal Code Correlation',
'Threshold'), fontsize='x-small')
return (ax, lines) | {"hexsha": "96f00797b640d5dbc571820c5afff18ba81670c2", "size": 2930, "ext": "py", "lang": "Python", "max_stars_repo_path": "hydrocode/modules/comms/corrplot.py", "max_stars_repo_name": "cuauv/software", "max_stars_repo_head_hexsha": "5ad4d52d603f81a7f254f365d9b0fe636d03a260", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 70, "max_stars_repo_stars_event_min_datetime": "2015-11-16T18:04:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T09:04:02.000Z", "max_issues_repo_path": "hydrocode/modules/comms/corrplot.py", "max_issues_repo_name": "cuauv/software", "max_issues_repo_head_hexsha": "5ad4d52d603f81a7f254f365d9b0fe636d03a260", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-08-03T05:13:19.000Z", "max_issues_repo_issues_event_max_datetime": "2016-08-03T06:19:39.000Z", "max_forks_repo_path": "hydrocode/modules/comms/corrplot.py", "max_forks_repo_name": "cuauv/software", "max_forks_repo_head_hexsha": "5ad4d52d603f81a7f254f365d9b0fe636d03a260", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2015-12-15T17:29:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-18T14:15:12.000Z", "avg_line_length": 34.0697674419, "max_line_length": 72, "alphanum_fraction": 0.5941979522, "include": true, "reason": "import numpy,import cupy", "num_tokens": 725} |
# -*- coding: utf-8 -*-
"""
Created on 2021/12/09 21:01:03
@File -> mi_partition.py
@Author: luolei
@Email: dreisteine262@163.com
@Describe: 基于离散化的互信息计算
"""
__doc__ = """
参考文献:
Georges A. Darbellay: Predictability: An Information-Theoretic Perspective, Signal Analysis \
and Prediction, 1998.
"""
from sklearn.preprocessing import MinMaxScaler
from matplotlib import pyplot as plt
from typing import List
import numpy as np
# ---- 数据离散化 -----------------------------------------------------------------------------------
class Cell(object):
"""边际等概率离散化中的单元格对象"""
def __init__(self, arr: np.ndarray) -> None:
"""初始化
:param arr: 存储有x,y坐标的二维数组, shape = (N, D=2)
"""
if len(arr) == 0:
self.arr = arr.copy()
self.N, self.D = 0, 0
else:
self.arr = arr.copy()
self.N, self.D = arr.shape
if self.D != 2:
raise ValueError('the input dimension is not equal to 2')
def _cal_area(self):
area = 1.0
for i in range(self.D):
area *= self.bounds[i][1] - self.bounds[i][0]
self.area = area
def def_cell_bounds(self, bounds: List[tuple]):
"""用户定义cell的边界
:param bounds: 边界值list, 如[(x_min, x_max), (y_min, y_max)]
"""
self.bounds = bounds
self._cal_area()
def cal_proba_dens(self) -> float:
"""计算以样本数计的概率密度
"""
if self.area == 0.0:
return 0.0
else:
return self.N / self.area
def _get_marginal_partition_thres(self) -> List[float]:
"""获取各维度上等边际概率(即等边际样本数)分箱的阈值
"""
if self.N == 1:
part_thres = list(self.arr.flatten())
else:
part_idx = self.N // 2 # 离散化位置idx
part_thres = []
for i in range(self.D):
arr_srt = self.arr[np.argsort(self.arr[:, i]), :] # 对应维度值升序排列
if self.N % 2 == 0: # 以均值划分
marginal_part_value = (
arr_srt[part_idx - 1, i] + arr_srt[part_idx, i]) / 2
else:
marginal_part_value = (
arr_srt[part_idx - 1, i] + arr_srt[part_idx + 1, i]) / 2
part_thres.append(marginal_part_value)
return part_thres
def get_marginal_partition_thres(self):
self.part_thres = self._get_marginal_partition_thres()
def exec_partition(self):
"""执行边际等概率离散化, 执行这一步的要求为self.N > 0
"""
# 先在x方向上分为左右两部分.
part_arr_l = self.arr[
np.where((self.arr[:, 0] < self.part_thres[0]) &
(self.arr[:, 0] >= self.bounds[0][0]))
]
part_arr_r = self.arr[
np.where((self.arr[:, 0] >= self.part_thres[0])
& (self.arr[:, 0] <= self.bounds[0][1]))
]
# 再在y方向上继续切分.
# TODO: 这段代码啰嗦.
part_arr_ul = part_arr_l[np.where(
(part_arr_l[:, 1] >= self.part_thres[1]) & (part_arr_l[:, 1] <= self.bounds[1][1]))]
part_arr_ll = part_arr_l[np.where(
(part_arr_l[:, 1] < self.part_thres[1]) & (part_arr_l[:, 1] >= self.bounds[1][0]))]
part_arr_ur = part_arr_r[np.where(
(part_arr_r[:, 1] >= self.part_thres[1]) & (part_arr_r[:, 1] <= self.bounds[1][1]))]
part_arr_lr = part_arr_r[np.where(
(part_arr_r[:, 1] < self.part_thres[1]) & (part_arr_r[:, 1] >= self.bounds[1][0]))]
cell_ul, cell_ur, cell_ll, cell_lr = Cell(part_arr_ul), Cell(part_arr_ur), \
Cell(part_arr_ll), Cell(part_arr_lr)
# 确定边界.
(xl, xu), (yl, yu) = self.bounds
x_thres, y_thres = self.part_thres
cell_ul.def_cell_bounds([(xl, x_thres), (y_thres, yu)])
cell_ur.def_cell_bounds([(x_thres, xu), (y_thres, yu)])
cell_ll.def_cell_bounds([(xl, x_thres), (yl, y_thres)])
cell_lr.def_cell_bounds([(x_thres, xu), (yl, y_thres)])
return cell_ul, cell_ur, cell_ll, cell_lr
def show(self, linewidth: float = 0.5):
(xl, xu), (yl, yu) = self.bounds
plt.plot([xl, xu], [yl, yl], '-', c='k', linewidth=linewidth)
plt.plot([xu, xu], [yl, yu], '-', c='k', linewidth=linewidth)
plt.plot([xu, xl], [yu, yu], '-', c='k', linewidth=linewidth)
plt.plot([xl, xl], [yu, yl], '-', c='k', linewidth=linewidth)
# 递归离散化.
def _try_partition(cell: Cell, min_samples_split: int, p_eps: float):
if cell.N < min_samples_split:
return Cell([]), Cell([]), Cell([]), Cell([])
else:
proba_dens = cell.cal_proba_dens()
# 尝试分裂一下, 并检查分裂效果.
cell.get_marginal_partition_thres()
cell_ul, cell_ur, cell_ll, cell_lr = cell.exec_partition()
is_proba_dens_converged = True
for c in [cell_ul, cell_ur, cell_ll, cell_lr]:
if (np.abs(c.cal_proba_dens() - proba_dens) / proba_dens > p_eps):
is_proba_dens_converged = False
break
if not is_proba_dens_converged:
return cell_ul, cell_ur, cell_ll, cell_lr
else:
return Cell([]), Cell([]), Cell([]), Cell([])
def recursively_partition(cell: Cell, min_samples_split: int = 30, p_eps: float = 1e-3) -> tuple:
"""对一个cell进行递归离散化
:param cell: 初始cell
:param p_eps: 子cell概率与父cell相对偏差阈值, 如果所有都小于该值则终止离散化, defaults to 1e-3
"""
leaf_cells = []
def _partition(cell):
part_ul, part_ur, part_ll, part_lr = _try_partition(
cell, min_samples_split, p_eps)
if len(part_ul.arr) == 0:
leaf_cells.append(cell)
else:
_partition(part_ul)
_partition(part_ur)
_partition(part_ll)
_partition(part_lr)
_partition(cell)
return leaf_cells
# ---- 互信息计算 -----------------------------------------------------------------------------------
def _minmax_norm(arr: np.ndarray):
D = arr.shape[1]
scaler = MinMaxScaler()
arr_norm = None
for i in range(D):
a = scaler.fit_transform(arr[:, i: i + 1])
if arr_norm is None:
arr_norm = a
else:
arr_norm = np.hstack((arr_norm, a))
return arr_norm
class MutualInfoEntropy(object):
def __init__(self, x: np.ndarray, y: np.ndarray) -> None:
self.x = x.reshape(-1, 1).copy()
self.y = y.reshape(-1, 1).copy()
self.arr = _minmax_norm(np.hstack((self.x, self.y)))
self.N = self.arr.shape[0]
def _equiquantize(self, **kwargs):
cell = Cell(self.arr)
cell.def_cell_bounds([(0.0, 1.0), (0.0, 1.0)])
leaf_cells = recursively_partition(cell, **kwargs)
leaf_cells = [c for c in leaf_cells if c.N > 0]
return leaf_cells
def equiquantize(self, **kwargs):
self.leaf_cells = self._equiquantize(**kwargs)
def cal_mi(self):
n_leafs = len(self.leaf_cells)
mi = 0.0
for i in range(n_leafs):
cell = self.leaf_cells[i] # type: Cell
(xl, xu), (yl, yu) = cell.bounds
Nxy = len(cell.arr)
Nx = len(np.where((self.arr[:, 0] >= xl) & (self.arr[:, 0] < xu))[0])
Ny = len(np.where((self.arr[:, 1] >= yl) & (self.arr[:, 1] < yu))[0])
gain = Nxy * np.log2(Nxy / Nx / Ny)
mi += gain
mi = mi / self.N + np.log2(self.N)
return mi
def cal_mi(x: np.ndarray, y: np.ndarray, **kwargs):
"""计算互信息, kwargs设置参见recursively_partition的入参"""
mutual_info_entropy = MutualInfoEntropy(x, y)
mutual_info_entropy.equiquantize(**kwargs)
mi = mutual_info_entropy.cal_mi()
return mi, mutual_info_entropy
def cal_rho(x: np.ndarray, y: np.ndarray, **kwargs):
"""计算可预测度, rho的定义参见Darbellay的文献"""
mi, mutual_info_entropy = cal_mi(x, y, **kwargs)
rho = np.sqrt(1 - np.power(2, -2 * mi))
return rho, mutual_info_entropy
if __name__ == '__main__':
from typing import Tuple
import sys
import os
BASE_DIR = os.path.abspath(os.path.join(os.getcwd(), '../' * 2))
sys.path.append(BASE_DIR)
from core.dataset.data_generator import FUNC_NAMES, DataGenerator
from mod.data_process.numpy import add_circlular_noise
from src.setting import plt
# ---- 载入测试数据并计算互信息 ------------------------------------------------------------------
def load_data(func: str, radius: float, N_ticks: int = int(1e4), N=5000) -> Tuple[np.ndarray]:
"""载入或生成数据"""
data_gener = DataGenerator(N_ticks=N_ticks)
x, y, _, _ = data_gener.gen_data(N, func, normalize=True)
# 加入噪音.
x, y = add_circlular_noise(x, y, radius=radius)
return x, y
# 选定数据分布, 增加数据噪声水平, 查看计算所得MI值变化.
plt.figure(figsize=[3, 3])
func = FUNC_NAMES[1]
radius_lst = np.arange(0.1, 10.0, 0.1)
mi_lst = []
params = {'p_eps': 1e-3, 'min_samples_split': 1000}
for radius in radius_lst:
x, y = load_data(func, radius)
# mi = cal_mi(x, y, **params)
mi, mutual_info_entropy = cal_rho(x, y, **params)
mi_lst.append(mi)
plt.scatter(radius_lst, mi_lst, s=6)
plt.xlabel('radius')
plt.ylabel('MI')
# 查看数据分箱情况.
radius = 0.1
x, y = load_data(func, radius)
mi, mutual_info_entropy = cal_rho(x, y, **params)
plt.figure(figsize=[3, 3])
plt.scatter(mutual_info_entropy.arr[:, 0], mutual_info_entropy.arr[:, 1], s=1)
for cell in mutual_info_entropy.leaf_cells:
cell.show()
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('$x$')
plt.ylabel('$y$') | {"hexsha": "c09bbdb4020b420e6655594adff7b75a5f64352d", "size": 9654, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/ref/mi_partition.py", "max_stars_repo_name": "Ulti-Dreisteine/data-information-measurement", "max_stars_repo_head_hexsha": "9ef777c28534867d07d9ab1a1b95d69a385043f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-17T13:51:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-17T13:51:11.000Z", "max_issues_repo_path": "core/ref/mi_partition.py", "max_issues_repo_name": "Ulti-Dreisteine/data-information-measurement", "max_issues_repo_head_hexsha": "9ef777c28534867d07d9ab1a1b95d69a385043f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/ref/mi_partition.py", "max_forks_repo_name": "Ulti-Dreisteine/data-information-measurement", "max_forks_repo_head_hexsha": "9ef777c28534867d07d9ab1a1b95d69a385043f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-12T12:38:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-12T12:38:36.000Z", "avg_line_length": 31.4462540717, "max_line_length": 98, "alphanum_fraction": 0.546716387, "include": true, "reason": "import numpy", "num_tokens": 2955} |
"""
Classes to solve canonical consumption-saving models with idiosyncratic shocks
to income. All models here assume CRRA utility with geometric discounting, no
bequest motive, and income shocks that are fully transitory or fully permanent.
It currently solves three types of models:
1) A very basic "perfect foresight" consumption-savings model with no uncertainty.
2) A consumption-savings model with risk over transitory and permanent income shocks.
3) The model described in (2), with an interest rate for debt that differs
from the interest rate for savings.
See NARK https://HARK.githhub.io/Documentation/NARK for information on variable naming conventions.
See HARK documentation for mathematical descriptions of the models being solved.
"""
from copy import copy, deepcopy
import numpy as np
from scipy.optimize import newton
from HARK import AgentType, NullFunc, MetricObject, make_one_period_oo_solver
from HARK.utilities import warnings # Because of "patch" to warnings modules
from HARK.interpolation import (
CubicInterp,
LowerEnvelope,
LinearInterp,
ValueFuncCRRA,
MargValueFuncCRRA,
MargMargValueFuncCRRA
)
from HARK.distribution import Lognormal, MeanOneLogNormal, Uniform
from HARK.distribution import (
DiscreteDistribution,
add_discrete_outcome_constant_mean,
calc_expectation,
combine_indep_dstns,
)
from HARK.utilities import (
make_grid_exp_mult,
CRRAutility,
CRRAutilityP,
CRRAutilityPP,
CRRAutilityP_inv,
CRRAutility_invP,
CRRAutility_inv,
CRRAutilityP_invP,
)
from HARK import _log
from HARK import set_verbosity_level
from HARK.Calibration.Income.IncomeTools import parse_income_spec, parse_time_params, Cagetti_income
from HARK.datasets.SCF.WealthIncomeDist.SCFDistTools import income_wealth_dists_from_scf
from HARK.datasets.life_tables.us_ssa.SSATools import parse_ssa_life_table
__all__ = [
"ConsumerSolution",
"ConsPerfForesightSolver",
"ConsIndShockSetup",
"ConsIndShockSolverBasic",
"ConsIndShockSolver",
"ConsKinkedRsolver",
"PerfForesightConsumerType",
"IndShockConsumerType",
"KinkedRconsumerType",
"init_perfect_foresight",
"init_idiosyncratic_shocks",
"init_kinked_R",
"init_lifecycle",
"init_cyclical",
]
utility = CRRAutility
utilityP = CRRAutilityP
utilityPP = CRRAutilityPP
utilityP_inv = CRRAutilityP_inv
utility_invP = CRRAutility_invP
utility_inv = CRRAutility_inv
utilityP_invP = CRRAutilityP_invP
# =====================================================================
# === Classes that help solve consumption-saving models ===
# =====================================================================
class ConsumerSolution(MetricObject):
"""
A class representing the solution of a single period of a consumption-saving
problem. The solution must include a consumption function and marginal
value function.
Here and elsewhere in the code, Nrm indicates that variables are normalized
by permanent income.
Parameters
----------
cFunc : function
The consumption function for this period, defined over market
resources: c = cFunc(m).
vFunc : function
The beginning-of-period value function for this period, defined over
market resources: v = vFunc(m).
vPfunc : function
The beginning-of-period marginal value function for this period,
defined over market resources: vP = vPfunc(m).
vPPfunc : function
The beginning-of-period marginal marginal value function for this
period, defined over market resources: vPP = vPPfunc(m).
mNrmMin : float
The minimum allowable market resources for this period; the consump-
tion function (etc) are undefined for m < mNrmMin.
hNrm : float
Human wealth after receiving income this period: PDV of all future
income, ignoring mortality.
MPCmin : float
Infimum of the marginal propensity to consume this period.
MPC --> MPCmin as m --> infinity.
MPCmax : float
Supremum of the marginal propensity to consume this period.
MPC --> MPCmax as m --> mNrmMin.
"""
distance_criteria = ["vPfunc"]
def __init__(
self,
cFunc=None,
vFunc=None,
vPfunc=None,
vPPfunc=None,
mNrmMin=None,
hNrm=None,
MPCmin=None,
MPCmax=None,
):
# Change any missing function inputs to NullFunc
self.cFunc = cFunc if cFunc is not None else NullFunc()
self.vFunc = vFunc if vFunc is not None else NullFunc()
self.vPfunc = vPfunc if vPfunc is not None else NullFunc()
# vPFunc = NullFunc() if vPfunc is None else vPfunc
self.vPPfunc = vPPfunc if vPPfunc is not None else NullFunc()
self.mNrmMin = mNrmMin
self.hNrm = hNrm
self.MPCmin = MPCmin
self.MPCmax = MPCmax
def append_solution(self, new_solution):
"""
Appends one solution to another to create a ConsumerSolution whose
attributes are lists. Used in ConsMarkovModel, where we append solutions
*conditional* on a particular value of a Markov state to each other in
order to get the entire solution.
Parameters
----------
new_solution : ConsumerSolution
The solution to a consumption-saving problem; each attribute is a
list representing state-conditional values or functions.
Returns
-------
None
"""
if type(self.cFunc) != list:
# Then we assume that self is an empty initialized solution instance.
# Begin by checking this is so.
assert (
NullFunc().distance(self.cFunc) == 0
), "append_solution called incorrectly!"
# We will need the attributes of the solution instance to be lists. Do that here.
self.cFunc = [new_solution.cFunc]
self.vFunc = [new_solution.vFunc]
self.vPfunc = [new_solution.vPfunc]
self.vPPfunc = [new_solution.vPPfunc]
self.mNrmMin = [new_solution.mNrmMin]
else:
self.cFunc.append(new_solution.cFunc)
self.vFunc.append(new_solution.vFunc)
self.vPfunc.append(new_solution.vPfunc)
self.vPPfunc.append(new_solution.vPPfunc)
self.mNrmMin.append(new_solution.mNrmMin)
# =====================================================================
# === Classes and functions that solve consumption-saving models ===
# =====================================================================
class ConsPerfForesightSolver(MetricObject):
"""
A class for solving a one period perfect foresight
consumption-saving problem.
An instance of this class is created by the function solvePerfForesight
in each period.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one-period problem.
DiscFac : float
Intertemporal discount factor for future utility.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the next period.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt : float or None
Artificial borrowing constraint, as a multiple of permanent income.
Can be None, indicating no artificial constraint.
MaxKinks : int
Maximum number of kink points to allow in the consumption function;
additional points will be thrown out. Only relevant in infinite
horizon model with artificial borrowing constraint.
"""
def __init__(
self,
solution_next,
DiscFac,
LivPrb,
CRRA,
Rfree,
PermGroFac,
BoroCnstArt,
MaxKinks,
):
self.solution_next = solution_next
self.DiscFac = DiscFac
self.LivPrb = LivPrb
self.CRRA = CRRA
self.Rfree = Rfree
self.PermGroFac = PermGroFac
self.BoroCnstArt = BoroCnstArt
self.MaxKinks = MaxKinks
def def_utility_funcs(self):
"""
Defines CRRA utility function for this period (and its derivatives),
saving them as attributes of self for other methods to use.
Parameters
----------
None
Returns
-------
None
"""
self.u = lambda c: utility(c, gam=self.CRRA) # utility function
self.uP = lambda c: utilityP(c, gam=self.CRRA) # marginal utility function
self.uPP = lambda c: utilityPP(
c, gam=self.CRRA
) # marginal marginal utility function
def def_value_funcs(self):
"""
Defines the value and marginal value functions for this period.
Uses the fact that for a perfect foresight CRRA utility problem,
if the MPC in period t is :math:`\kappa_{t}`, and relative risk
aversion :math:`\rho`, then the inverse value vFuncNvrs has a
constant slope of :math:`\kappa_{t}^{-\rho/(1-\rho)}` and
vFuncNvrs has value of zero at the lower bound of market resources
mNrmMin. See PerfForesightConsumerType.ipynb documentation notebook
for a brief explanation and the links below for a fuller treatment.
https://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/consumption/PerfForesightCRRA/#vFuncAnalytical
https://www.econ2.jhu.edu/people/ccarroll/SolvingMicroDSOPs/#vFuncPF
Parameters
----------
None
Returns
-------
None
"""
# See the PerfForesightConsumerType.ipynb documentation notebook for the derivations
vFuncNvrsSlope = self.MPCmin ** (-self.CRRA / (1.0 - self.CRRA))
vFuncNvrs = LinearInterp(
np.array([self.mNrmMinNow, self.mNrmMinNow + 1.0]),
np.array([0.0, vFuncNvrsSlope]),
)
self.vFunc = ValueFuncCRRA(vFuncNvrs, self.CRRA)
self.vPfunc = MargValueFuncCRRA(self.cFunc, self.CRRA)
def make_cFunc_PF(self):
"""
Makes the (linear) consumption function for this period.
Parameters
----------
None
Returns
-------
None
"""
# Use a local value of BoroCnstArt to prevent comparing None and float below.
if self.BoroCnstArt is None:
BoroCnstArt = -np.inf
else:
BoroCnstArt = self.BoroCnstArt
# Calculate human wealth this period
self.hNrmNow = (self.PermGroFac / self.Rfree) * (self.solution_next.hNrm + 1.0)
# Calculate the lower bound of the marginal propensity to consume
PatFac = ((self.Rfree * self.DiscFacEff) ** (1.0 / self.CRRA)) / self.Rfree
self.MPCmin = 1.0 / (1.0 + PatFac / self.solution_next.MPCmin)
# Extract the discrete kink points in next period's consumption function;
# don't take the last one, as it only defines the extrapolation and is not a kink.
mNrmNext = self.solution_next.cFunc.x_list[:-1]
cNrmNext = self.solution_next.cFunc.y_list[:-1]
# Calculate the end-of-period asset values that would reach those kink points
# next period, then invert the first order condition to get consumption. Then
# find the endogenous gridpoint (kink point) today that corresponds to each kink
aNrmNow = (self.PermGroFac / self.Rfree) * (mNrmNext - 1.0)
cNrmNow = (self.DiscFacEff * self.Rfree) ** (-1.0 / self.CRRA) * (
self.PermGroFac * cNrmNext
)
mNrmNow = aNrmNow + cNrmNow
# Add an additional point to the list of gridpoints for the extrapolation,
# using the new value of the lower bound of the MPC.
mNrmNow = np.append(mNrmNow, mNrmNow[-1] + 1.0)
cNrmNow = np.append(cNrmNow, cNrmNow[-1] + self.MPCmin)
# If the artificial borrowing constraint binds, combine the constrained and
# unconstrained consumption functions.
if BoroCnstArt > mNrmNow[0]:
# Find the highest index where constraint binds
cNrmCnst = mNrmNow - BoroCnstArt
CnstBinds = cNrmCnst < cNrmNow
idx = np.where(CnstBinds)[0][-1]
if idx < (mNrmNow.size - 1):
# If it is not the *very last* index, find the the critical level
# of mNrm where the artificial borrowing contraint begins to bind.
d0 = cNrmNow[idx] - cNrmCnst[idx]
d1 = cNrmCnst[idx + 1] - cNrmNow[idx + 1]
m0 = mNrmNow[idx]
m1 = mNrmNow[idx + 1]
alpha = d0 / (d0 + d1)
mCrit = m0 + alpha * (m1 - m0)
# Adjust the grids of mNrm and cNrm to account for the borrowing constraint.
cCrit = mCrit - BoroCnstArt
mNrmNow = np.concatenate(([BoroCnstArt, mCrit], mNrmNow[(idx + 1):]))
cNrmNow = np.concatenate(([0.0, cCrit], cNrmNow[(idx + 1):]))
else:
# If it *is* the very last index, then there are only three points
# that characterize the consumption function: the artificial borrowing
# constraint, the constraint kink, and the extrapolation point.
mXtra = (cNrmNow[-1] - cNrmCnst[-1]) / (1.0 - self.MPCmin)
mCrit = mNrmNow[-1] + mXtra
cCrit = mCrit - BoroCnstArt
mNrmNow = np.array([BoroCnstArt, mCrit, mCrit + 1.0])
cNrmNow = np.array([0.0, cCrit, cCrit + self.MPCmin])
# If the mNrm and cNrm grids have become too large, throw out the last
# kink point, being sure to adjust the extrapolation.
if mNrmNow.size > self.MaxKinks:
mNrmNow = np.concatenate((mNrmNow[:-2], [mNrmNow[-3] + 1.0]))
cNrmNow = np.concatenate((cNrmNow[:-2], [cNrmNow[-3] + self.MPCmin]))
# Construct the consumption function as a linear interpolation.
self.cFunc = LinearInterp(mNrmNow, cNrmNow)
# Calculate the upper bound of the MPC as the slope of the bottom segment.
self.MPCmax = (cNrmNow[1] - cNrmNow[0]) / (mNrmNow[1] - mNrmNow[0])
# Add two attributes to enable calculation of steady state market resources.
self.Ex_IncNext = 1.0 # Perfect foresight income of 1
self.mNrmMinNow = mNrmNow[0] # Relabeling for compatibility with add_mNrmStE
def add_mNrmTrg(self, solution):
"""
Finds value of (normalized) market resources m at which individual consumer
expects m not to change.
This will exist if the GICNrm holds.
https://econ-ark.github.io/BufferStockTheory#UniqueStablePoints
Parameters
----------
solution : ConsumerSolution
Solution to this period's problem, which must have attribute cFunc.
Returns
-------
solution : ConsumerSolution
Same solution that was passed, but now with the attribute mNrmStE.
"""
# If no uncertainty, return the degenerate targets for the PF model
if hasattr(self, "TranShkMinNext"): # Then it has transitory shocks
# Handle the degenerate case where shocks are of size zero
if ((self.TranShkMinNext == 1.0) and (self.PermShkMinNext == 1.0)):
# but they are of zero size (and also permanent are zero)
if self.GICRaw: # max of nat and art boro cnst
if type(self.BoroCnstArt) == type(None):
solution.mNrmStE = -self.hNrmNow
solution.mNrmTrg = -self.hNrmNow
else:
bNrmNxt = -self.BoroCnstArt * self.Rfree/self.PermGroFac
solution.mNrmStE = bNrmNxt + 1.0
solution.mNrmTrg = bNrmNxt + 1.0
else: # infinity
solution.mNrmStE = float('inf')
solution.mNrmTrg = float('inf')
return solution
# First find
# \bar{\mathcal{R}} = E_t[R/Gamma_{t+1}] = R/Gamma E_t[1/psi_{t+1}]
if type(self) == ConsPerfForesightSolver:
Ex_PermShkInv = 1.0
else:
Ex_PermShkInv = np.dot(1/self.PermShkValsNext, self.ShkPrbsNext)
Ex_RNrmFac = (self.Rfree/self.PermGroFac)*Ex_PermShkInv
# mNrmTrg solves Rcalbar*(m - c(m)) + E[inc_next] = m. Define a
# rearranged version.
Ex_m_tp1_minus_m_t = (
lambda m: Ex_RNrmFac * (m - solution.cFunc(m)) + self.Ex_IncNext - m
)
# Minimum market resources plus next income is okay starting guess
m_init_guess = self.mNrmMinNow + self.Ex_IncNext
try:
mNrmTrg = newton(Ex_m_tp1_minus_m_t, m_init_guess)
except:
mNrmTrg = None
# Add mNrmTrg to the solution and return it
solution.mNrmTrg = mNrmTrg
return solution
def add_mNrmStE(self, solution):
"""
Finds market resources ratio at which 'balanced growth' is expected.
This is the m ratio such that the expected growth rate of the M level
matches the expected growth rate of permanent income. This value does
not exist if the Growth Impatience Condition does not hold.
https://econ-ark.github.io/BufferStockTheory#Unique-Stable-Points
Parameters
----------
solution : ConsumerSolution
Solution to this period's problem, which must have attribute cFunc.
Returns
-------
solution : ConsumerSolution
Same solution that was passed, but now with the attribute mNrmStE
"""
# Probably should test whether GICRaw holds and log error if it does not
# using check_conditions
# All combinations of c and m that yield E[PermGroFac PermShkVal mNext] = mNow
# https://econ-ark.github.io/BufferStockTheory/#The-Individual-Steady-State
PF_RNrm = self.Rfree/self.PermGroFac
# If we are working with a model that permits uncertainty but that
# uncertainty has been set to zero, return the correct answer
# by hand because in this degenerate case numerical search may
# have trouble
if hasattr(self, "TranShkMinNext"): # Then it has transitory shocks
if ((self.TranShkMinNext == 1.0) and (self.PermShkMinNext == 1.0)):
# but they are of zero size (and permanent shocks also not there)
if self.GICRaw: # max of nat and art boro cnst
# breakpoint()
if type(self.BoroCnstArt) == type(None):
solution.mNrmStE = -self.hNrmNow
solution.mNrmTrg = -self.hNrmNow
else:
bNrmNxt = -self.BoroCnstArt * self.Rfree/self.PermGroFac
solution.mNrmStE = bNrmNxt + 1.0
solution.mNrmTrg = bNrmNxt + 1.0
else: # infinity
solution.mNrmStE = float('inf')
solution.mNrmTrg = float('inf')
return solution
Ex_PermShk_tp1_times_m_tp1_minus_m_t = (
lambda mStE: PF_RNrm * (mStE - solution.cFunc(mStE)) + 1.0 - mStE
)
# Minimum market resources plus next income is okay starting guess
m_init_guess = self.mNrmMinNow + self.Ex_IncNext
try:
mNrmStE = newton(Ex_PermShk_tp1_times_m_tp1_minus_m_t, m_init_guess)
except:
mNrmStE = None
solution.mNrmStE = mNrmStE
return solution
def add_stable_points(self, solution):
"""
Checks necessary conditions for the existence of the individual steady
state and target levels of market resources (see above).
If the conditions are satisfied, computes and adds the stable points
to the solution.
Parameters
----------
solution : ConsumerSolution
Solution to this period's problem, which must have attribute cFunc.
Returns
-------
solution : ConsumerSolution
Same solution that was provided, augmented with attributes mNrmStE and
mNrmTrg, if they exist.
"""
# 0. There is no non-degenerate steady state for any unconstrained PF model.
# 1. There is a non-degenerate SS for constrained PF model if GICRaw holds.
# Therefore
# Check if (GICRaw and BoroCnstArt) and if so compute them both
thorn = (self.Rfree*self.DiscFacEff)**(1/self.CRRA)
GICRaw = 1 > thorn/self.PermGroFac
if self.BoroCnstArt is not None and GICRaw:
solution = self.add_mNrmStE(solution)
solution = self.add_mNrmTrg(solution)
return solution
def solve(self):
"""
Solves the one period perfect foresight consumption-saving problem.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to this period's problem.
"""
self.def_utility_funcs()
self.DiscFacEff = self.DiscFac * self.LivPrb # Effective=pure x LivPrb
self.make_cFunc_PF()
self.def_value_funcs()
solution = ConsumerSolution(
cFunc=self.cFunc,
vFunc=self.vFunc,
vPfunc=self.vPfunc,
mNrmMin=self.mNrmMinNow,
hNrm=self.hNrmNow,
MPCmin=self.MPCmin,
MPCmax=self.MPCmax,
)
solution = self.add_stable_points(solution)
return solution
###############################################################################
###############################################################################
class ConsIndShockSetup(ConsPerfForesightSolver):
"""
A superclass for solvers of one period consumption-saving problems with
constant relative risk aversion utility and permanent and transitory shocks
to income. Has methods to set up but not solve the one period problem.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncShkDstn : distribution.Distribution
A discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next).
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
"""
def __init__(
self,
solution_next,
IncShkDstn,
LivPrb,
DiscFac,
CRRA,
Rfree,
PermGroFac,
BoroCnstArt,
aXtraGrid,
vFuncBool,
CubicBool,
):
"""
Constructor for a new solver-setup for problems with income subject to
permanent and transitory shocks.
"""
self.solution_next = solution_next
self.IncShkDstn = IncShkDstn
self.LivPrb = LivPrb
self.DiscFac = DiscFac
self.CRRA = CRRA
self.Rfree = Rfree
self.PermGroFac = PermGroFac
self.BoroCnstArt = BoroCnstArt
self.aXtraGrid = aXtraGrid
self.vFuncBool = vFuncBool
self.CubicBool = CubicBool
self.def_utility_funcs()
def def_utility_funcs(self):
"""
Defines CRRA utility function for this period (and its derivatives,
and their inverses), saving them as attributes of self for other methods
to use.
Parameters
----------
none
Returns
-------
none
"""
ConsPerfForesightSolver.def_utility_funcs(self)
self.uPinv = lambda u: utilityP_inv(u, gam=self.CRRA)
self.uPinvP = lambda u: utilityP_invP(u, gam=self.CRRA)
self.uinvP = lambda u: utility_invP(u, gam=self.CRRA)
if self.vFuncBool:
self.uinv = lambda u: utility_inv(u, gam=self.CRRA)
def set_and_update_values(self, solution_next, IncShkDstn, LivPrb, DiscFac):
"""
Unpacks some of the inputs (and calculates simple objects based on them),
storing the results in self for use by other methods. These include:
income shocks and probabilities, next period's marginal value function
(etc), the probability of getting the worst income shock next period,
the patience factor, human wealth, and the bounding MPCs.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncShkDstn : distribution.DiscreteDistribution
A DiscreteDistribution with a pmf
and two point value arrays in X, order:
permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
Returns
-------
None
"""
self.DiscFacEff = DiscFac * LivPrb # "effective" discount factor
self.IncShkDstn = IncShkDstn
self.ShkPrbsNext = IncShkDstn.pmf
self.PermShkValsNext = IncShkDstn.X[0]
self.TranShkValsNext = IncShkDstn.X[1]
self.PermShkMinNext = np.min(self.PermShkValsNext)
self.TranShkMinNext = np.min(self.TranShkValsNext)
self.vPfuncNext = solution_next.vPfunc
self.WorstIncPrb = np.sum(
self.ShkPrbsNext[
(self.PermShkValsNext * self.TranShkValsNext)
== (self.PermShkMinNext * self.TranShkMinNext)
]
)
if self.CubicBool:
self.vPPfuncNext = solution_next.vPPfunc
if self.vFuncBool:
self.vFuncNext = solution_next.vFunc
# Update the bounding MPCs and PDV of human wealth:
self.PatFac = ((self.Rfree * self.DiscFacEff) ** (1.0 / self.CRRA)) / self.Rfree
self.MPCminNow = 1.0 / (1.0 + self.PatFac / solution_next.MPCmin)
self.Ex_IncNext = np.dot(
self.ShkPrbsNext, self.TranShkValsNext * self.PermShkValsNext
)
self.hNrmNow = (
self.PermGroFac / self.Rfree * (self.Ex_IncNext + solution_next.hNrm)
)
self.MPCmaxNow = 1.0 / (
1.0
+ (self.WorstIncPrb ** (1.0 / self.CRRA))
* self.PatFac
/ solution_next.MPCmax
)
self.cFuncLimitIntercept = self.MPCminNow * self.hNrmNow
self.cFuncLimitSlope = self.MPCminNow
def def_BoroCnst(self, BoroCnstArt):
"""
Defines the constrained portion of the consumption function as cFuncNowCnst,
an attribute of self. Uses the artificial and natural borrowing constraints.
Parameters
----------
BoroCnstArt : float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
Returns
-------
none
"""
# Calculate the minimum allowable value of money resources in this period
self.BoroCnstNat = (
(self.solution_next.mNrmMin - self.TranShkMinNext)
* (self.PermGroFac * self.PermShkMinNext)
/ self.Rfree
)
# Note: need to be sure to handle BoroCnstArt==None appropriately.
# In Py2, this would evaluate to 5.0: np.max([None, 5.0]).
# However in Py3, this raises a TypeError. Thus here we need to directly
# address the situation in which BoroCnstArt == None:
if BoroCnstArt is None:
self.mNrmMinNow = self.BoroCnstNat
else:
self.mNrmMinNow = np.max([self.BoroCnstNat, BoroCnstArt])
if self.BoroCnstNat < self.mNrmMinNow:
self.MPCmaxEff = 1.0 # If actually constrained, MPC near limit is 1
else:
self.MPCmaxEff = self.MPCmaxNow
# Define the borrowing constraint (limiting consumption function)
self.cFuncNowCnst = LinearInterp(
np.array([self.mNrmMinNow, self.mNrmMinNow + 1]), np.array([0.0, 1.0])
)
def prepare_to_solve(self):
"""
Perform preparatory work before calculating the unconstrained consumption
function.
Parameters
----------
none
Returns
-------
none
"""
self.set_and_update_values(
self.solution_next, self.IncShkDstn, self.LivPrb, self.DiscFac
)
self.def_BoroCnst(self.BoroCnstArt)
####################################################################################################
####################################################################################################
class ConsIndShockSolverBasic(ConsIndShockSetup):
"""
This class solves a single period of a standard consumption-saving problem,
using linear interpolation and without the ability to calculate the value
function. ConsIndShockSolver inherits from this class and adds the ability
to perform cubic interpolation and to calculate the value function.
Note that this class does not have its own initializing method. It initial-
izes the same problem in the same way as ConsIndShockSetup, from which it
inherits.
"""
def prepare_to_calc_EndOfPrdvP(self):
"""
Prepare to calculate end-of-period marginal value by creating an array
of market resources that the agent could have next period, considering
the grid of end-of-period assets and the distribution of shocks he might
experience next period.
Parameters
----------
none
Returns
-------
aNrmNow : np.array
A 1D array of end-of-period assets; also stored as attribute of self.
"""
# We define aNrmNow all the way from BoroCnstNat up to max(self.aXtraGrid)
# even if BoroCnstNat < BoroCnstArt, so we can construct the consumption
# function as the lower envelope of the (by the artificial borrowing con-
# straint) uconstrained consumption function, and the artificially con-
# strained consumption function.
self.aNrmNow = np.asarray(self.aXtraGrid) + self.BoroCnstNat
return self.aNrmNow
def m_nrm_next(self, shocks, a_nrm):
"""
Computes normalized market resources of the next period
from income shocks and current normalized market resources.
Parameters
----------
shocks: [float]
Permanent and transitory income shock levels. a_nrm: float
Normalized market assets this period
Returns
-------
float
normalized market resources in the next period
"""
return self.Rfree / (self.PermGroFac * shocks[0]) \
* a_nrm + shocks[1]
def calc_EndOfPrdvP(self):
"""
Calculate end-of-period marginal value of assets at each point in aNrmNow.
Does so by taking a weighted sum of next period marginal values across
income shocks (in a preconstructed grid self.mNrmNext).
Parameters
----------
none
Returns
-------
EndOfPrdvP : np.array
A 1D array of end-of-period marginal value of assets
"""
def vp_next(shocks, a_nrm):
return shocks[0] ** (-self.CRRA) \
* self.vPfuncNext(self.m_nrm_next(shocks, a_nrm))
EndOfPrdvP = (
self.DiscFacEff
* self.Rfree
* self.PermGroFac ** (-self.CRRA)
* calc_expectation(
self.IncShkDstn,
vp_next,
self.aNrmNow
)
)
return EndOfPrdvP
def get_points_for_interpolation(self, EndOfPrdvP, aNrmNow):
"""
Finds interpolation points (c,m) for the consumption function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrmNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
c_for_interpolation : np.array
Consumption points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
"""
cNrmNow = self.uPinv(EndOfPrdvP)
mNrmNow = cNrmNow + aNrmNow
# Limiting consumption is zero as m approaches mNrmMin
c_for_interpolation = np.insert(cNrmNow, 0, 0.0, axis=-1)
m_for_interpolation = np.insert(mNrmNow, 0, self.BoroCnstNat, axis=-1)
# Store these for calcvFunc
self.cNrmNow = cNrmNow
self.mNrmNow = mNrmNow
return c_for_interpolation, m_for_interpolation
def use_points_for_interpolation(self, cNrm, mNrm, interpolator):
"""
Constructs a basic solution for this period, including the consumption
function and marginal value function.
Parameters
----------
cNrm : np.array
(Normalized) consumption points for interpolation.
mNrm : np.array
(Normalized) corresponding market resource points for interpolation.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
"""
# Construct the unconstrained consumption function
cFuncNowUnc = interpolator(mNrm, cNrm)
# Combine the constrained and unconstrained functions into the true consumption function
# breakpoint() # LowerEnvelope should only be used when BoroCnstArt is true
cFuncNow = LowerEnvelope(cFuncNowUnc, self.cFuncNowCnst, nan_bool=False)
# Make the marginal value function and the marginal marginal value function
vPfuncNow = MargValueFuncCRRA(cFuncNow, self.CRRA)
# Pack up the solution and return it
solution_now = ConsumerSolution(
cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=self.mNrmMinNow
)
return solution_now
def make_basic_solution(self, EndOfPrdvP, aNrm, interpolator):
"""
Given end of period assets and end of period marginal value, construct
the basic solution for this period.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrm : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
"""
cNrm, mNrm = self.get_points_for_interpolation(EndOfPrdvP, aNrm)
solution_now = self.use_points_for_interpolation(cNrm, mNrm, interpolator)
return solution_now
def add_MPC_and_human_wealth(self, solution):
"""
Take a solution and add human wealth and the bounding MPCs to it.
Parameters
----------
solution : ConsumerSolution
The solution to this period's consumption-saving problem.
Returns:
----------
solution : ConsumerSolution
The solution to this period's consumption-saving problem, but now
with human wealth and the bounding MPCs.
"""
solution.hNrm = self.hNrmNow
solution.MPCmin = self.MPCminNow
solution.MPCmax = self.MPCmaxEff
return solution
def add_stable_points(self, solution):
"""
Checks necessary conditions for the existence of the individual steady
state and target levels of market resources (see above).
If the conditions are satisfied, computes and adds the stable points
to the solution.
Parameters
----------
solution : ConsumerSolution
Solution to this period's problem, which must have attribute cFunc.
Returns
-------
solution : ConsumerSolution
Same solution that was passed, but now with attributes mNrmStE and
mNrmTrg, if they exist.
"""
# 0. Check if GICRaw holds. If so, then mNrmStE will exist. So, compute it.
# 1. Check if GICNrm holds. If so, then mNrmTrg will exist. So, compute it.
thorn = (self.Rfree*self.DiscFacEff)**(1/self.CRRA)
GPFRaw = thorn / self.PermGroFac
self.GPFRaw = GPFRaw
GPFNrm = thorn / self.PermGroFac / np.dot(1/self.PermShkValsNext, self.ShkPrbsNext)
self.GPFNrm = GPFNrm
GICRaw = 1 > thorn/self.PermGroFac
self.GICRaw = GICRaw
GICNrm = 1 > GPFNrm
self.GICNrm = GICNrm
if GICRaw:
solution = self.add_mNrmStE(solution) # find steady state m, if it exists
if GICNrm:
solution = self.add_mNrmTrg(solution) # find target m, if it exists
return solution
def make_linear_cFunc(self, mNrm, cNrm):
"""
Makes a linear interpolation to represent the (unconstrained) consumption function.
Parameters
----------
mNrm : np.array
Corresponding market resource points for interpolation.
cNrm : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : LinearInterp
The unconstrained consumption function for this period.
"""
cFuncUnc = LinearInterp(
mNrm, cNrm, self.cFuncLimitIntercept, self.cFuncLimitSlope
)
return cFuncUnc
def solve(self):
"""
Solves a one period consumption saving problem with risky income.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem.
"""
self.aNrmNow = np.asarray(self.aXtraGrid) + self.BoroCnstNat
aNrm = self.aNrmNow
EndOfPrdvP = self.calc_EndOfPrdvP()
solution = self.make_basic_solution(EndOfPrdvP, aNrm, self.make_linear_cFunc)
solution = self.add_MPC_and_human_wealth(solution)
solution = self.add_stable_points(solution)
return solution
###############################################################################
###############################################################################
class ConsIndShockSolver(ConsIndShockSolverBasic):
"""
This class solves a single period of a standard consumption-saving problem.
It inherits from ConsIndShockSolverBasic, adding the ability to perform cubic
interpolation and to calculate the value function.
"""
def make_cubic_cFunc(self, mNrm, cNrm):
"""
Makes a cubic spline interpolation of the unconstrained consumption
function for this period.
Parameters
----------
mNrm : np.array
Corresponding market resource points for interpolation.
cNrm : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : CubicInterp
The unconstrained consumption function for this period.
"""
def vpp_next(shocks, a_nrm):
return shocks[0] ** (- self.CRRA - 1.0) \
* self.vPPfuncNext(self.m_nrm_next(shocks, a_nrm))
EndOfPrdvPP = (
self.DiscFacEff
* self.Rfree
* self.Rfree
* self.PermGroFac ** (-self.CRRA - 1.0)
* calc_expectation(
self.IncShkDstn,
vpp_next,
self.aNrmNow
)
)
dcda = EndOfPrdvPP / self.uPP(np.array(cNrm[1:]))
MPC = dcda / (dcda + 1.0)
MPC = np.insert(MPC, 0, self.MPCmaxNow)
cFuncNowUnc = CubicInterp(
mNrm, cNrm, MPC, self.MPCminNow * self.hNrmNow, self.MPCminNow
)
return cFuncNowUnc
def make_EndOfPrdvFunc(self, EndOfPrdvP):
"""
Construct the end-of-period value function for this period, storing it
as an attribute of self for use by other methods.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal value of assets corresponding to the
asset values in self.aNrmNow.
Returns
-------
none
"""
def v_lvl_next(shocks, a_nrm):
return (
shocks[0] ** (1.0 - self.CRRA)
* self.PermGroFac ** (1.0 - self.CRRA)
) * self.vFuncNext(self.m_nrm_next(shocks, a_nrm))
EndOfPrdv = self.DiscFacEff * calc_expectation(
self.IncShkDstn, v_lvl_next, self.aNrmNow
)
EndOfPrdvNvrs = self.uinv(
EndOfPrdv
) # value transformed through inverse utility
EndOfPrdvNvrsP = EndOfPrdvP * self.uinvP(EndOfPrdv)
EndOfPrdvNvrs = np.insert(EndOfPrdvNvrs, 0, 0.0)
EndOfPrdvNvrsP = np.insert(
EndOfPrdvNvrsP, 0, EndOfPrdvNvrsP[0]
) # This is a very good approximation, vNvrsPP = 0 at the asset minimum
aNrm_temp = np.insert(self.aNrmNow, 0, self.BoroCnstNat)
EndOfPrdvNvrsFunc = CubicInterp(aNrm_temp, EndOfPrdvNvrs, EndOfPrdvNvrsP)
self.EndOfPrdvFunc = ValueFuncCRRA(EndOfPrdvNvrsFunc, self.CRRA)
def add_vFunc(self, solution, EndOfPrdvP):
"""
Creates the value function for this period and adds it to the solution.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, likely including the
consumption function, marginal value function, etc.
EndOfPrdvP : np.array
Array of end-of-period marginal value of assets corresponding to the
asset values in self.aNrmNow.
Returns
-------
solution : ConsumerSolution
The single period solution passed as an input, but now with the
value function (defined over market resources m) as an attribute.
"""
self.make_EndOfPrdvFunc(EndOfPrdvP)
solution.vFunc = self.make_vFunc(solution)
return solution
def make_vFunc(self, solution):
"""
Creates the value function for this period, defined over market resources m.
self must have the attribute EndOfPrdvFunc in order to execute.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
vFuncNow : ValueFuncCRRA
A representation of the value function for this period, defined over
normalized market resources m: v = vFuncNow(m).
"""
# Compute expected value and marginal value on a grid of market resources
mNrm_temp = self.mNrmMinNow + self.aXtraGrid
cNrmNow = solution.cFunc(mNrm_temp)
aNrmNow = mNrm_temp - cNrmNow
vNrmNow = self.u(cNrmNow) + self.EndOfPrdvFunc(aNrmNow)
vPnow = self.uP(cNrmNow)
# Construct the beginning-of-period value function
vNvrs = self.uinv(vNrmNow) # value transformed through inverse utility
vNvrsP = vPnow * self.uinvP(vNrmNow)
mNrm_temp = np.insert(mNrm_temp, 0, self.mNrmMinNow)
vNvrs = np.insert(vNvrs, 0, 0.0)
vNvrsP = np.insert(
vNvrsP, 0, self.MPCmaxEff ** (-self.CRRA / (1.0 - self.CRRA))
)
MPCminNvrs = self.MPCminNow ** (-self.CRRA / (1.0 - self.CRRA))
vNvrsFuncNow = CubicInterp(
mNrm_temp, vNvrs, vNvrsP, MPCminNvrs * self.hNrmNow, MPCminNvrs
)
vFuncNow = ValueFuncCRRA(vNvrsFuncNow, self.CRRA)
return vFuncNow
def add_vPPfunc(self, solution):
"""
Adds the marginal marginal value function to an existing solution, so
that the next solver can evaluate vPP and thus use cubic interpolation.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
solution : ConsumerSolution
The same solution passed as input, but with the marginal marginal
value function for this period added as the attribute vPPfunc.
"""
vPPfuncNow = MargMargValueFuncCRRA(solution.cFunc, self.CRRA)
solution.vPPfunc = vPPfuncNow
return solution
def solve(self):
"""
Solves the single period consumption-saving problem using the method of
endogenous gridpoints. Solution includes a consumption function cFunc
(using cubic or linear splines), a marginal value function vPfunc, a min-
imum acceptable level of normalized market resources mNrmMin, normalized
human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also
have a value function vFunc and marginal marginal value function vPPfunc.
Parameters
----------
none
Returns
-------
solution : ConsumerSolution
The solution to the single period consumption-saving problem.
"""
# Make arrays of end-of-period assets and end-of-period marginal value
aNrm = self.prepare_to_calc_EndOfPrdvP()
EndOfPrdvP = self.calc_EndOfPrdvP()
# Construct a basic solution for this period
if self.CubicBool:
solution = self.make_basic_solution(
EndOfPrdvP, aNrm, interpolator=self.make_cubic_cFunc
)
else:
solution = self.make_basic_solution(
EndOfPrdvP, aNrm, interpolator=self.make_linear_cFunc
)
solution = self.add_MPC_and_human_wealth(solution) # add a few things
solution = self.add_stable_points(solution)
# Add the value function if requested, as well as the marginal marginal
# value function if cubic splines were used (to prepare for next period)
if self.vFuncBool:
solution = self.add_vFunc(solution, EndOfPrdvP)
if self.CubicBool:
solution = self.add_vPPfunc(solution)
return solution
####################################################################################################
####################################################################################################
class ConsKinkedRsolver(ConsIndShockSolver):
"""
A class to solve a single period consumption-saving problem where the interest
rate on debt differs from the interest rate on savings. Inherits from
ConsIndShockSolver, with nearly identical inputs and outputs. The key diff-
erence is that Rfree is replaced by Rsave (a>0) and Rboro (a<0). The solver
can handle Rboro == Rsave, which makes it identical to ConsIndShocksolver, but
it terminates immediately if Rboro < Rsave, as this has a different solution.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncShkDstn : distribution.Distribution
A discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next).
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rboro: float
Interest factor on assets between this period and the succeeding
period when assets are negative.
Rsave: float
Interest factor on assets between this period and the succeeding
period when assets are positive.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
"""
def __init__(
self,
solution_next,
IncShkDstn,
LivPrb,
DiscFac,
CRRA,
Rboro,
Rsave,
PermGroFac,
BoroCnstArt,
aXtraGrid,
vFuncBool,
CubicBool,
):
assert (
Rboro >= Rsave
), "Interest factor on debt less than interest factor on savings!"
# Initialize the solver. Most of the steps are exactly the same as in
# the non-kinked-R basic case, so start with that.
ConsIndShockSolver.__init__(
self,
solution_next,
IncShkDstn,
LivPrb,
DiscFac,
CRRA,
Rboro,
PermGroFac,
BoroCnstArt,
aXtraGrid,
vFuncBool,
CubicBool,
)
# Assign the interest rates as class attributes, to use them later.
self.Rboro = Rboro
self.Rsave = Rsave
def make_cubic_cFunc(self, mNrm, cNrm):
"""
Makes a cubic spline interpolation that contains the kink of the unconstrained
consumption function for this period.
Parameters
----------
mNrm : np.array
Corresponding market resource points for interpolation.
cNrm : np.array
Consumption points for interpolation.
Returns
-------
cFuncUnc : CubicInterp
The unconstrained consumption function for this period.
"""
# Call the make_cubic_cFunc from ConsIndShockSolver.
cFuncNowUncKink = super().make_cubic_cFunc(mNrm, cNrm)
# Change the coeffients at the kinked points.
cFuncNowUncKink.coeffs[self.i_kink + 1] = [
cNrm[self.i_kink],
mNrm[self.i_kink + 1] - mNrm[self.i_kink],
0,
0,
]
return cFuncNowUncKink
def add_stable_points(self, solution):
"""
TODO:
Placeholder method for a possible future implementation of stable
points in the kinked R model. For now it simply serves to override
ConsIndShock's method, which does not apply here given the multiple
interest rates.
Discusson:
- The target and steady state should exist under the same conditions
as in ConsIndShock.
- The ConsIndShock code as it stands can not be directly applied
because it assumes that R is a constant, and in this model R depends
on the level of wealth.
- After allowing for wealth-depending interest rates, the existing
code might work without modification to add the stable points. If not,
it should be possible to find these values by checking within three
distinct intervals:
- From h_min to the lower kink.
- From the lower kink to the upper kink
- From the upper kink to infinity.
the stable points must be in one of these regions.
"""
return solution
def prepare_to_calc_EndOfPrdvP(self):
"""
Prepare to calculate end-of-period marginal value by creating an array
of market resources that the agent could have next period, considering
the grid of end-of-period assets and the distribution of shocks he might
experience next period. This differs from the baseline case because
different savings choices yield different interest rates.
Parameters
----------
none
Returns
-------
aNrmNow : np.array
A 1D array of end-of-period assets; also stored as attribute of self.
"""
KinkBool = (
self.Rboro > self.Rsave
) # Boolean indicating that there is actually a kink.
# When Rboro == Rsave, this method acts just like it did in IndShock.
# When Rboro < Rsave, the solver would have terminated when it was called.
# Make a grid of end-of-period assets, including *two* copies of a=0
if KinkBool:
aNrmNow = np.sort(
np.hstack(
(np.asarray(self.aXtraGrid) + self.mNrmMinNow, np.array([0.0, 0.0]))
)
)
else:
aNrmNow = np.asarray(self.aXtraGrid) + self.mNrmMinNow
aXtraCount = aNrmNow.size
# Make tiled versions of the assets grid and income shocks
ShkCount = self.TranShkValsNext.size
aNrm_temp = np.tile(aNrmNow, (ShkCount, 1))
PermShkVals_temp = (np.tile(self.PermShkValsNext, (aXtraCount, 1))).transpose()
TranShkVals_temp = (np.tile(self.TranShkValsNext, (aXtraCount, 1))).transpose()
ShkPrbs_temp = (np.tile(self.ShkPrbsNext, (aXtraCount, 1))).transpose()
# Make a 1D array of the interest factor at each asset gridpoint
Rfree_vec = self.Rsave * np.ones(aXtraCount)
if KinkBool:
self.i_kink = (
np.sum(aNrmNow <= 0) - 1
) # Save the index of the kink point as an attribute
Rfree_vec[0: self.i_kink] = self.Rboro
self.Rfree = Rfree_vec
Rfree_temp = np.tile(Rfree_vec, (ShkCount, 1))
# Make an array of market resources that we could have next period,
# considering the grid of assets and the income shocks that could occur
mNrmNext = (
Rfree_temp / (self.PermGroFac * PermShkVals_temp) * aNrm_temp
+ TranShkVals_temp
)
# Recalculate the minimum MPC and human wealth using the interest factor on saving.
# This overwrites values from set_and_update_values, which were based on Rboro instead.
if KinkBool:
PatFacTop = (
(self.Rsave * self.DiscFacEff) ** (1.0 / self.CRRA)
) / self.Rsave
self.MPCminNow = 1.0 / (1.0 + PatFacTop / self.solution_next.MPCmin)
self.hNrmNow = (
self.PermGroFac
/ self.Rsave
* (
np.dot(
self.ShkPrbsNext, self.TranShkValsNext * self.PermShkValsNext
)
+ self.solution_next.hNrm
)
)
# Store some of the constructed arrays for later use and return the assets grid
self.PermShkVals_temp = PermShkVals_temp
self.ShkPrbs_temp = ShkPrbs_temp
self.mNrmNext = mNrmNext
self.aNrmNow = aNrmNow
return aNrmNow
# ============================================================================
# == Classes for representing types of consumer agents (and things they do) ==
# ============================================================================
# Make a dictionary to specify a perfect foresight consumer type
init_perfect_foresight = {
'cycles' : 1, # Finite, non-cyclic model
'CRRA': 2.0, # Coefficient of relative risk aversion,
'Rfree': 1.03, # Interest factor on assets
'DiscFac': 0.96, # Intertemporal discount factor
'LivPrb': [0.98], # Survival probability
'PermGroFac': [1.01], # Permanent income growth factor
'BoroCnstArt': None, # Artificial borrowing constraint
'MaxKinks': 400, # Maximum number of grid points to allow in cFunc (should be large)
'AgentCount': 10000, # Number of agents of this type (only matters for simulation)
'aNrmInitMean': 0.0, # Mean of log initial assets (only matters for simulation)
'aNrmInitStd': 1.0, # Standard deviation of log initial assets (only for simulation)
'pLvlInitMean': 0.0, # Mean of log initial permanent income (only matters for simulation)
# Standard deviation of log initial permanent income (only matters for simulation)
'pLvlInitStd': 0.0,
# Aggregate permanent income growth factor: portion of PermGroFac attributable to aggregate productivity growth (only matters for simulation)
'PermGroFacAgg': 1.0,
'T_age': None, # Age after which simulated agents are automatically killed
'T_cycle': 1, # Number of periods in the cycle for this agent type
"PerfMITShk": False # Do Perfect Foresight MIT Shock: Forces Newborns to follow solution path of the agent he/she replaced when True
}
class PerfForesightConsumerType(AgentType):
"""
A perfect foresight consumer type who has no uncertainty other than mortality.
His problem is defined by a coefficient of relative risk aversion, intertemporal
discount factor, interest factor, an artificial borrowing constraint (maybe)
and time sequences of the permanent income growth rate and survival probability.
Parameters
----------
"""
# Define some universal values for all consumer types
cFunc_terminal_ = LinearInterp([0.0, 1.0], [0.0, 1.0]) # c=m in terminal period
vFunc_terminal_ = LinearInterp([0.0, 1.0], [0.0, 0.0]) # This is overwritten
solution_terminal_ = ConsumerSolution(
cFunc=cFunc_terminal_,
vFunc=vFunc_terminal_,
mNrmMin=0.0,
hNrm=0.0,
MPCmin=1.0,
MPCmax=1.0,
)
time_vary_ = ["LivPrb", "PermGroFac"]
time_inv_ = ["CRRA", "Rfree", "DiscFac", "MaxKinks", "BoroCnstArt" ]
state_vars = ['pLvl', 'PlvlAgg', 'bNrm', 'mNrm', "aNrm"]
shock_vars_ = []
def __init__(self, verbose=1, quiet=False, **kwds):
params = init_perfect_foresight.copy()
params.update(kwds)
kwds = params
# Initialize a basic AgentType
AgentType.__init__(
self,
solution_terminal=deepcopy(self.solution_terminal_),
pseudo_terminal=False,
**kwds
)
# Add consumer-type specific objects, copying to create independent versions
self.time_vary = deepcopy(self.time_vary_)
self.time_inv = deepcopy(self.time_inv_)
self.shock_vars = deepcopy(self.shock_vars_)
self.verbose = verbose
self.quiet = quiet
self.solve_one_period = make_one_period_oo_solver(ConsPerfForesightSolver)
set_verbosity_level((4 - verbose) * 10)
def pre_solve(self):
self.update_solution_terminal() # Solve the terminal period problem
# Fill in BoroCnstArt and MaxKinks if they're not specified or are irrelevant.
if not hasattr(self, "BoroCnstArt"): # If no borrowing constraint specified...
self.BoroCnstArt = None # ...assume the user wanted none
if not hasattr(self, "MaxKinks"):
if self.cycles > 0: # If it's not an infinite horizon model...
self.MaxKinks = np.inf # ...there's no need to set MaxKinks
elif self.BoroCnstArt is None: # If there's no borrowing constraint...
self.MaxKinks = np.inf # ...there's no need to set MaxKinks
else:
raise (
AttributeError(
"PerfForesightConsumerType requires the attribute MaxKinks to be specified when BoroCnstArt is not None and cycles == 0."
)
)
def check_restrictions(self):
"""
A method to check that various restrictions are met for the model class.
"""
if self.DiscFac < 0:
raise Exception("DiscFac is below zero with value: " + str(self.DiscFac))
return
def update_solution_terminal(self):
"""
Update the terminal period solution. This method should be run when a
new AgentType is created or when CRRA changes.
Parameters
----------
none
Returns
-------
none
"""
self.solution_terminal.vFunc = ValueFuncCRRA(self.cFunc_terminal_, self.CRRA)
self.solution_terminal.vPfunc = MargValueFuncCRRA(self.cFunc_terminal_, self.CRRA)
self.solution_terminal.vPPfunc = MargMargValueFuncCRRA(
self.cFunc_terminal_, self.CRRA
)
def unpack_cFunc(self):
""" DEPRECATED: Use solution.unpack('cFunc') instead.
"Unpacks" the consumption functions into their own field for easier access.
After the model has been solved, the consumption functions reside in the
attribute cFunc of each element of ConsumerType.solution. This method
creates a (time varying) attribute cFunc that contains a list of consumption
functions.
Parameters
----------
none
Returns
-------
none
"""
_log.critical(
"unpack_cFunc is deprecated and it will soon be removed, "
"please use unpack('cFunc') instead."
)
self.unpack("cFunc")
def initialize_sim(self):
self.PermShkAggNow = self.PermGroFacAgg # This never changes during simulation
self.state_now['PlvlAgg'] = 1.0
AgentType.initialize_sim(self)
def sim_birth(self, which_agents):
"""
Makes new consumers for the given indices. Initialized variables include aNrm and pLvl, as
well as time variables t_age and t_cycle. Normalized assets and permanent income levels
are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc).
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
"""
# Get and store states for newly born agents
N = np.sum(which_agents) # Number of new consumers to make
self.state_now['aNrm'][which_agents] = Lognormal(
mu=self.aNrmInitMean,
sigma=self.aNrmInitStd,
seed=self.RNG.randint(0, 2 ** 31 - 1),
).draw(N)
# why is a now variable set here? Because it's an aggregate.
pLvlInitMeanNow = self.pLvlInitMean + np.log(
self.state_now['PlvlAgg']
) # Account for newer cohorts having higher permanent income
self.state_now['pLvl'][which_agents] = Lognormal(
pLvlInitMeanNow,
self.pLvlInitStd,
seed=self.RNG.randint(0, 2 ** 31 - 1)
).draw(N)
self.t_age[which_agents] = 0 # How many periods since each agent was born
if self.PerfMITShk == False: # If True, Newborns inherit t_cycle of agent they replaced (i.e. t_cycles are not reset).
self.t_cycle[
which_agents
] = 0 # Which period of the cycle each agent is currently in
return None
def sim_death(self):
"""
Determines which agents die this period and must be replaced. Uses the sequence in LivPrb
to determine survival probabilities for each agent.
Parameters
----------
None
Returns
-------
which_agents : np.array(bool)
Boolean array of size AgentCount indicating which agents die.
"""
# Determine who dies
DiePrb_by_t_cycle = 1.0 - np.asarray(self.LivPrb)
DiePrb = DiePrb_by_t_cycle[
self.t_cycle - 1
] # Time has already advanced, so look back one
# In finite-horizon problems the previous line gives newborns the
# survival probability of the last non-terminal period. This is okay,
# however, since they will be instantly replaced by new newborns if
# they die.
# See: https://github.com/econ-ark/HARK/pull/981
DeathShks = Uniform(seed=self.RNG.randint(0, 2 ** 31 - 1)).draw(
N=self.AgentCount
)
which_agents = DeathShks < DiePrb
if self.T_age is not None: # Kill agents that have lived for too many periods
too_old = self.t_age >= self.T_age
which_agents = np.logical_or(which_agents, too_old)
return which_agents
def get_shocks(self):
"""
Finds permanent and transitory income "shocks" for each agent this period. As this is a
perfect foresight model, there are no stochastic shocks: PermShkNow = PermGroFac for each
agent (according to their t_cycle) and TranShkNow = 1.0 for all agents.
Parameters
----------
None
Returns
-------
None
"""
PermGroFac = np.array(self.PermGroFac)
self.shocks['PermShk'] = PermGroFac[
self.t_cycle - 1
] # cycle time has already been advanced
self.shocks['TranShk'] = np.ones(self.AgentCount)
def get_Rfree(self):
"""
Returns an array of size self.AgentCount with self.Rfree in every entry.
Parameters
----------
None
Returns
-------
RfreeNow : np.array
Array of size self.AgentCount with risk free interest rate for each agent.
"""
RfreeNow = self.Rfree * np.ones(self.AgentCount)
return RfreeNow
def transition(self):
pLvlPrev = self.state_prev['pLvl']
aNrmPrev = self.state_prev['aNrm']
RfreeNow = self.get_Rfree()
# Calculate new states: normalized market resources and permanent income level
pLvlNow = pLvlPrev*self.shocks['PermShk'] # Updated permanent income level
# Updated aggregate permanent productivity level
PlvlAggNow = self.state_prev['PlvlAgg']*self.PermShkAggNow
# "Effective" interest factor on normalized assets
ReffNow = RfreeNow/self.shocks['PermShk']
bNrmNow = ReffNow*aNrmPrev # Bank balances before labor income
mNrmNow = bNrmNow + self.shocks['TranShk'] # Market resources after income
return pLvlNow, PlvlAggNow, bNrmNow, mNrmNow, None
def get_controls(self):
"""
Calculates consumption for each consumer of this type using the consumption functions.
Parameters
----------
None
Returns
-------
None
"""
cNrmNow = np.zeros(self.AgentCount) + np.nan
MPCnow = np.zeros(self.AgentCount) + np.nan
for t in range(self.T_cycle):
these = t == self.t_cycle
cNrmNow[these], MPCnow[these] = self.solution[t].cFunc.eval_with_derivative(
self.state_now['mNrm'][these]
)
self.controls['cNrm'] = cNrmNow
# MPCnow is not really a control
self.MPCnow = MPCnow
return None
def get_poststates(self):
"""
Calculates end-of-period assets for each consumer of this type.
Parameters
----------
None
Returns
-------
None
"""
# should this be "Now", or "Prev"?!?
self.state_now['aNrm'] = self.state_now['mNrm'] - self.controls['cNrm']
# Useful in some cases to precalculate asset level
self.state_now['aLvl'] = self.state_now['aNrm'] * self.state_now['pLvl']
# moves now to prev
super().get_poststates()
return None
def check_condition(self, name, test, messages, verbose, verbose_messages=None):
"""
Checks one condition.
Parameters
----------
name : string
Name for the condition.
test : function(self -> boolean)
A function (of self) which tests the condition
messages : dict{boolean : string}
A dictiomary with boolean keys containing values
for messages to print if the condition is
true or false.
verbose_messages : dict{boolean : string}
(Optional) A dictiomary with boolean keys containing values
for messages to print if the condition is
true or false under verbose printing.
"""
self.conditions[name] = test(self)
set_verbosity_level((4 - verbose) * 10)
_log.info(messages[self.conditions[name]].format(self))
if verbose_messages:
_log.debug(verbose_messages[self.conditions[name]].format(self))
def check_AIC(self, verbose=None):
"""
Evaluate and report on the Absolute Impatience Condition
"""
name = "AIC"
def test(agent): return agent.thorn < 1
messages = {
True: "The value of the Absolute Patience Factor (APF) for the supplied parameter values satisfies the Absolute Impatience Condition.",
False: "The given type violates the Absolute Impatience Condition with the supplied parameter values; the APF is {0.thorn}",
}
verbose_messages = {
True: " Because the APF < 1, the absolute amount of consumption is expected to fall over time.",
False: " Because the APF > 1, the absolute amount of consumption is expected to grow over time.",
}
verbose = self.verbose if verbose is None else verbose
self.check_condition(name, test, messages, verbose, verbose_messages)
def check_GICRaw(self, verbose=None):
"""
Evaluate and report on the Growth Impatience Condition for the Perfect Foresight model
"""
name = "GICRaw"
self.GPFRaw = self.thorn / self.PermGroFac[0]
def test(agent): return agent.GPFRaw < 1
messages = {
True: "The value of the Growth Patience Factor for the supplied parameter values satisfies the Perfect Foresight Growth Impatience Condition.",
False: "The value of the Growth Patience Factor for the supplied parameter values fails the Perfect Foresight Growth Impatience Condition; the GPFRaw is: {0.GPFRaw}",
}
verbose_messages = {
True: " Therefore, for a perfect foresight consumer, the ratio of individual wealth to permanent income will fall indefinitely.",
False: " Therefore, for a perfect foresight consumer, the ratio of individual wealth to permanent income is expected to grow toward infinity.",
}
verbose = self.verbose if verbose is None else verbose
self.check_condition(name, test, messages, verbose, verbose_messages)
def check_RIC(self, verbose=None):
"""
Evaluate and report on the Return Impatience Condition
"""
self.RPF = self.thorn / self.Rfree
name = "RIC"
def test(agent): return self.RPF < 1
messages = {
True: "The value of the Return Patience Factor for the supplied parameter values satisfies the Return Impatience Condition.",
False: "The value of the Return Patience Factor for the supplied parameter values fails the Return Impatience Condition; the factor is {0.RPF}",
}
verbose_messages = {
True: " Therefore, the limiting consumption function is not c(m)=0 for all m",
False: " Therefore, if the FHWC is satisfied, the limiting consumption function is c(m)=0 for all m.",
}
verbose = self.verbose if verbose is None else verbose
self.check_condition(name, test, messages, verbose, verbose_messages)
def check_FHWC(self, verbose=None):
"""
Evaluate and report on the Finite Human Wealth Condition
"""
self.FHWF = self.PermGroFac[0] / self.Rfree
self.cNrmPDV = 1.0 / (1.0 - self.thorn / self.Rfree)
name = "FHWC"
def test(agent): return self.FHWF < 1
messages = {
True: "The Finite Human wealth factor value for the supplied parameter values satisfies the Finite Human Wealth Condition.",
False: "The given type violates the Finite Human Wealth Condition; the Finite Human wealth factor value is {0.FHWF}",
}
verbose_messages = {
True: " Therefore, the limiting consumption function is not c(m)=Infinity\nand human wealth normalized by permanent income is {0.hNrm}\nand the PDV of future consumption growth is {0.cNrmPDV}",
False: " Therefore, the limiting consumption function is c(m)=Infinity for all m unless the RIC is also violated. If both FHWC and RIC fail and the consumer faces a liquidity constraint, the limiting consumption function is nondegenerate but has a limiting slope of 0. (https://econ-ark.github.io/BufferStockTheory#PFGICRawHoldsFHWCFailsRICFailsDiscuss)",
}
verbose = self.verbose if verbose is None else verbose
self.check_condition(name, test, messages, verbose)
def check_conditions(self, verbose=None):
"""
This method checks whether the instance's type satisfies the
Absolute Impatience Condition (AIC),
the Return Impatience Condition (RIC),
the Finite Human Wealth Condition (FHWC), the perfect foresight
model's Growth Impatience Condition (GICRaw) and
Perfect Foresight Finite Value of Autarky Condition (FVACPF). Depending on the configuration of parameter values, some
combination of these conditions must be satisfied in order for the problem to have
a nondegenerate solution. To check which conditions are required, in the verbose mode
a reference to the relevant theoretical literature is made.
Parameters
----------
verbose : boolean
Specifies different levels of verbosity of feedback. When False, it only reports whether the
instance's type fails to satisfy a particular condition. When True, it reports all results, i.e.
the factor values for all conditions.
Returns
-------
None
"""
self.conditions = {}
self.violated = False
# This method only checks for the conditions for infinite horizon models
# with a 1 period cycle. If these conditions are not met, we exit early.
if self.cycles != 0 or self.T_cycle > 1:
return
self.thorn = (self.Rfree * self.DiscFac * self.LivPrb[0]) ** (1 / self.CRRA)
verbose = self.verbose if verbose is None else verbose
self.check_AIC(verbose)
self.check_GICRaw(verbose)
self.check_RIC(verbose)
self.check_FHWC(verbose)
if hasattr(self, "BoroCnstArt") and self.BoroCnstArt is not None:
self.violated = not self.conditions["RIC"]
else:
self.violated = not self.conditions["RIC"] or not self.conditions["FHWC"]
# Make a dictionary to specify an idiosyncratic income shocks consumer
init_idiosyncratic_shocks = dict(
init_perfect_foresight,
**{
# assets above grid parameters
"aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax": 20, # Maximum end-of-period "assets above minimum" value
"aXtraNestFac": 3, # Exponential nesting factor when constructing "assets above minimum" grid
"aXtraCount": 48, # Number of points in the grid of "assets above minimum"
"aXtraExtra": [
None
], # Some other value of "assets above minimum" to add to the grid, not used
# Income process variables
"PermShkStd": [0.1], # Standard deviation of log permanent income shocks
"PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd": [0.1], # Standard deviation of log transitory income shocks
"TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb": 0.05, # Probability of unemployment while working
"UnempPrbRet": 0.005, # Probability of "unemployment" while retired
"IncUnemp": 0.3, # Unemployment benefits replacement rate
"IncUnempRet": 0.0, # "Unemployment" benefits when retired
"BoroCnstArt": 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets
"tax_rate": 0.0, # Flat income tax rate
"T_retire": 0, # Period of retirement (0 --> no retirement)
"vFuncBool": False, # Whether to calculate the value function during solution
"CubicBool": False, # Use cubic spline interpolation when True, linear interpolation when False
}
)
class IndShockConsumerType(PerfForesightConsumerType):
"""
A consumer type with idiosyncratic shocks to permanent and transitory income.
His problem is defined by a sequence of income distributions, survival prob-
abilities, and permanent income growth rates, as well as time invariant values
for risk aversion, discount factor, the interest rate, the grid of end-of-
period assets, and an artificial borrowing constraint.
Parameters
----------
cycles : int
Number of times the sequence of periods should be solved.
"""
time_inv_ = PerfForesightConsumerType.time_inv_ + [
"BoroCnstArt",
"vFuncBool",
"CubicBool",
]
time_inv_.remove(
"MaxKinks"
) # This is in the PerfForesight model but not ConsIndShock
shock_vars_ = ['PermShk', 'TranShk']
def __init__(self, verbose=1, quiet=False, **kwds):
params = init_idiosyncratic_shocks.copy()
params.update(kwds)
# Initialize a basic AgentType
PerfForesightConsumerType.__init__(
self, verbose=verbose, quiet=quiet, **params
)
# Add consumer-type specific objects, copying to create independent versions
if (not self.CubicBool) and (not self.vFuncBool):
solver = ConsIndShockSolverBasic
else: # Use the "advanced" solver if either is requested
solver = ConsIndShockSolver
self.solve_one_period = make_one_period_oo_solver(solver)
self.update() # Make assets grid, income process, terminal solution
def update_income_process(self):
"""
Updates this agent's income process based on his own attributes.
Parameters
----------
none
Returns:
-----------
none
"""
(
IncShkDstn,
PermShkDstn,
TranShkDstn,
) = self.construct_lognormal_income_process_unemployment()
self.IncShkDstn = IncShkDstn
self.PermShkDstn = PermShkDstn
self.TranShkDstn = TranShkDstn
self.add_to_time_vary("IncShkDstn", "PermShkDstn", "TranShkDstn")
def update_assets_grid(self):
"""
Updates this agent's end-of-period assets grid by constructing a multi-
exponentially spaced grid of aXtra values.
Parameters
----------
none
Returns
-------
none
"""
aXtraGrid = construct_assets_grid(self)
self.aXtraGrid = aXtraGrid
self.add_to_time_inv("aXtraGrid")
def update(self):
"""
Update the income process, the assets grid, and the terminal solution.
Parameters
----------
None
Returns
-------
None
"""
self.update_income_process()
self.update_assets_grid()
self.update_solution_terminal()
def reset_rng(self):
"""
Reset the RNG behavior of this type. This method is called automatically
by initialize_sim(), ensuring that each simulation run uses the same sequence
of random shocks; this is necessary for structural estimation to work.
This method extends AgentType.reset_rng() to also reset elements of IncShkDstn.
Parameters
----------
None
Returns
-------
None
"""
PerfForesightConsumerType.reset_rng(self)
# Reset IncShkDstn if it exists (it might not because reset_rng is called at init)
if hasattr(self, "IncShkDstn"):
for dstn in self.IncShkDstn:
dstn.reset()
def get_shocks(self):
"""
Gets permanent and transitory income shocks for this period. Samples from IncShkDstn for
each period in the cycle.
Parameters
----------
None
Returns
-------
None
"""
PermShkNow = np.zeros(self.AgentCount) # Initialize shock arrays
TranShkNow = np.zeros(self.AgentCount)
newborn = self.t_age == 0
for t in range(self.T_cycle):
these = t == self.t_cycle
N = np.sum(these)
if N > 0:
IncShkDstnNow = self.IncShkDstn[
t - 1
] # set current income distribution
PermGroFacNow = self.PermGroFac[t - 1] # and permanent growth factor
# Get random draws of income shocks from the discrete distribution
IncShks = IncShkDstnNow.draw(N)
PermShkNow[these] = (
IncShks[0, :] * PermGroFacNow
) # permanent "shock" includes expected growth
TranShkNow[these] = IncShks[1, :]
# That procedure used the *last* period in the sequence for newborns, but that's not right
# Redraw shocks for newborns, using the *first* period in the sequence. Approximation.
N = np.sum(newborn)
if N > 0:
these = newborn
IncShkDstnNow = self.IncShkDstn[0] # set current income distribution
PermGroFacNow = self.PermGroFac[0] # and permanent growth factor
# Get random draws of income shocks from the discrete distribution
EventDraws = IncShkDstnNow.draw_events(N)
PermShkNow[these] = (
IncShkDstnNow.X[0][EventDraws] * PermGroFacNow
) # permanent "shock" includes expected growth
TranShkNow[these] = IncShkDstnNow.X[1][EventDraws]
# PermShkNow[newborn] = 1.0
TranShkNow[newborn] = 1.0
# Store the shocks in self
self.EmpNow = np.ones(self.AgentCount, dtype=bool)
self.EmpNow[TranShkNow == self.IncUnemp] = False
self.shocks['PermShk'] = PermShkNow
self.shocks['TranShk'] = TranShkNow
def calc_bounding_values(self):
"""
Calculate human wealth plus minimum and maximum MPC in an infinite
horizon model with only one period repeated indefinitely. Store results
as attributes of self. Human wealth is the present discounted value of
expected future income after receiving income this period, ignoring mort-
ality (because your income matters to you only if you are still alive).
The maximum MPC is the limit of the MPC as m --> mNrmMin. The
minimum MPC is the limit of the MPC as m --> infty.
Parameters
----------
None
Returns
-------
None
"""
# Unpack the income distribution and get average and worst outcomes
PermShkValsNext = self.IncShkDstn[0][1]
TranShkValsNext = self.IncShkDstn[0][2]
ShkPrbsNext = self.IncShkDstn[0][0]
Ex_IncNext = np.dot(ShkPrbsNext, PermShkValsNext * TranShkValsNext)
PermShkMinNext = np.min(PermShkValsNext)
TranShkMinNext = np.min(TranShkValsNext)
WorstIncNext = PermShkMinNext * TranShkMinNext
WorstIncPrb = np.sum(
ShkPrbsNext[(PermShkValsNext * TranShkValsNext) == WorstIncNext]
)
# Calculate human wealth and the infinite horizon natural borrowing constraint
hNrm = (Ex_IncNext * self.PermGroFac[0] / self.Rfree) / (
1.0 - self.PermGroFac[0] / self.Rfree
)
temp = self.PermGroFac[0] * PermShkMinNext / self.Rfree
BoroCnstNat = -TranShkMinNext * temp / (1.0 - temp)
PatFac = (self.DiscFac * self.LivPrb[0] * self.Rfree) ** (
1.0 / self.CRRA
) / self.Rfree
if BoroCnstNat < self.BoroCnstArt:
MPCmax = 1.0 # if natural borrowing constraint is overridden by artificial one, MPCmax is 1
else:
MPCmax = 1.0 - WorstIncPrb ** (1.0 / self.CRRA) * PatFac
MPCmin = 1.0 - PatFac
# Store the results as attributes of self
self.hNrm = hNrm
self.MPCmin = MPCmin
self.MPCmax = MPCmax
def make_euler_error_func(self, mMax=100, approx_inc_dstn=True):
"""
Creates a "normalized Euler error" function for this instance, mapping
from market resources to "consumption error per dollar of consumption."
Stores result in attribute eulerErrorFunc as an interpolated function.
Has option to use approximate income distribution stored in self.IncShkDstn
or to use a (temporary) very dense approximation.
Only works on (one period) infinite horizon models at this time, will
be generalized later.
Parameters
----------
mMax : float
Maximum normalized market resources for the Euler error function.
approx_inc_dstn : Boolean
Indicator for whether to use the approximate discrete income distri-
bution stored in self.IncShkDstn[0], or to use a very accurate
discrete approximation instead. When True, uses approximation in
IncShkDstn; when False, makes and uses a very dense approximation.
Returns
-------
None
Notes
-----
This method is not used by any other code in the library. Rather, it is here
for expository and benchmarking purposes.
"""
# Get the income distribution (or make a very dense one)
if approx_inc_dstn:
IncShkDstn = self.IncShkDstn[0]
else:
TranShkDstn = MeanOneLogNormal(sigma=self.TranShkStd[0]).approx(
N=200, tail_N=50, tail_order=1.3, tail_bound=[0.05, 0.95]
)
TranShkDstn = add_discrete_outcome_constant_mean(
TranShkDstn, self.UnempPrb, self.IncUnemp
)
PermShkDstn = MeanOneLogNormal(sigma=self.PermShkStd[0]).approx(
N=200, tail_N=50, tail_order=1.3, tail_bound=[0.05, 0.95]
)
IncShkDstn = combine_indep_dstns(PermShkDstn, TranShkDstn)
# Make a grid of market resources
mNowMin = self.solution[0].mNrmMin + 10 ** (
-15
) # add tiny bit to get around 0/0 problem
mNowMax = mMax
mNowGrid = np.linspace(mNowMin, mNowMax, 1000)
# Get the consumption function this period and the marginal value function
# for next period. Note that this part assumes a one period cycle.
cFuncNow = self.solution[0].cFunc
vPfuncNext = self.solution[0].vPfunc
# Calculate consumption this period at each gridpoint (and assets)
cNowGrid = cFuncNow(mNowGrid)
aNowGrid = mNowGrid - cNowGrid
# Tile the grids for fast computation
ShkCount = IncShkDstn[0].size
aCount = aNowGrid.size
aNowGrid_tiled = np.tile(aNowGrid, (ShkCount, 1))
PermShkVals_tiled = (np.tile(IncShkDstn[1], (aCount, 1))).transpose()
TranShkVals_tiled = (np.tile(IncShkDstn[2], (aCount, 1))).transpose()
ShkPrbs_tiled = (np.tile(IncShkDstn[0], (aCount, 1))).transpose()
# Calculate marginal value next period for each gridpoint and each shock
mNextArray = (
self.Rfree / (self.PermGroFac[0] * PermShkVals_tiled) * aNowGrid_tiled
+ TranShkVals_tiled
)
vPnextArray = vPfuncNext(mNextArray)
# Calculate expected marginal value and implied optimal consumption
ExvPnextGrid = (
self.DiscFac
* self.Rfree
* self.LivPrb[0]
* self.PermGroFac[0] ** (-self.CRRA)
* np.sum(
PermShkVals_tiled ** (-self.CRRA) * vPnextArray * ShkPrbs_tiled, axis=0
)
)
cOptGrid = ExvPnextGrid ** (
-1.0 / self.CRRA
) # This is the 'Endogenous Gridpoints' step
# Calculate Euler error and store an interpolated function
EulerErrorNrmGrid = (cNowGrid - cOptGrid) / cOptGrid
eulerErrorFunc = LinearInterp(mNowGrid, EulerErrorNrmGrid)
self.eulerErrorFunc = eulerErrorFunc
def pre_solve(self):
# AgentType.pre_solve(self)
# Update all income process variables to match any attributes that might
# have been changed since `__init__` or `solve()` was last called.
# self.update_income_process()
self.update_solution_terminal()
if not self.quiet:
self.check_conditions(verbose=self.verbose)
def check_GICNrm(self, verbose=None):
"""
Check Individual Growth Patience Factor.
"""
self.GPFNrm = self.thorn / (
self.PermGroFac[0] * self.InvEx_PermShkInv
) # [url]/#GICRawI
name = "GICRaw"
def test(agent): return agent.GPFNrm <= 1
messages = {
True: "\nThe value of the Individual Growth Patience Factor for the supplied parameter values satisfies the Growth Impatience Condition; the value of the GPFNrm is: {0.GPFNrm}",
False: "\nThe given parameter values violate the Normalized Growth Impatience Condition; the GPFNrm is: {0.GPFNrm}",
}
verbose_messages = {
True: " Therefore, a target level of the individual market resources ratio m exists (see {0.url}/#onetarget for more).\n",
False: " Therefore, a target ratio of individual market resources to individual permanent income does not exist. (see {0.url}/#onetarget for more).\n",
}
verbose = self.verbose if verbose is None else verbose
self.check_condition(name, test, messages, verbose, verbose_messages)
def check_GICAggLivPrb(self, verbose=None):
name = "GICAggLivPrb"
def test(agent): return agent.GPFAggLivPrb <= 1
messages = {
True: "\nThe value of the Mortality Adjusted Aggregate Growth Patience Factor for the supplied parameter values satisfies the Mortality Adjusted Aggregate Growth Imatience Condition; the value of the GPFAggLivPrb is: {0.GPFAggLivPrb}",
False: "\nThe given parameter values violate the Mortality Adjusted Aggregate Growth Imatience Condition; the GPFAggLivPrb is: {0.GPFAggLivPrb}",
}
verbose_messages = {
# (see {0.url}/#WRIC for more).',
True: " Therefore, a target level of the ratio of aggregate market resources to aggregate permanent income exists.\n",
# (see {0.url}/#WRIC for more).'
False: " Therefore, a target ratio of aggregate resources to aggregate permanent income may not exist.\n",
}
verbose = self.verbose if verbose is None else verbose
self.check_condition(name, test, messages, verbose, verbose_messages)
def check_WRIC(self, verbose=None):
"""
Evaluate and report on the Weak Return Impatience Condition
[url]/#WRPF modified to incorporate LivPrb
"""
self.WRPF = (
(self.UnempPrb ** (1 / self.CRRA))
* (self.Rfree * self.DiscFac * self.LivPrb[0]) ** (1 / self.CRRA)
/ self.Rfree
)
name = "WRIC"
def test(agent): return agent.WRPF <= 1
messages = {
True: "\nThe Weak Return Patience Factor value for the supplied parameter values satisfies the Weak Return Impatience Condition; the WRPF is {0.WRPF}.",
False: "\nThe Weak Return Patience Factor value for the supplied parameter values fails the Weak Return Impatience Condition; the WRPF is {0.WRPF} (see {0.url}/#WRIC for more).",
}
verbose_messages = {
True: " Therefore, a nondegenerate solution exists if the FVAC is also satisfied. (see {0.url}/#WRIC for more) \n",
False: " Therefore, a nondegenerate solution is not available (see {0.url}/#WRIC for more). \n",
}
verbose = self.verbose if verbose is None else verbose
self.check_condition(name, test, messages, verbose, verbose_messages)
def check_FVAC(self, verbose=None):
"""
Evaluate and report on the Finite Value of Autarky Condition
Hyperlink to paper: [url]/#Autarky-Value
"""
EpShkuInv = calc_expectation(
self.PermShkDstn[0],
lambda x: x ** (1 - self.CRRA)
)
if self.CRRA != 1.0:
uInvEpShkuInv = EpShkuInv ** (
1 / (1 - self.CRRA)
) # The term that gives a utility-consequence-adjusted utility growth
else:
uInvEpShkuInv = 1.0
self.uInvEpShkuInv = uInvEpShkuInv
self.VAF = self.LivPrb[0] * self.DiscFac * self.uInvEpShkuInv
name = "FVAC"
def test(agent): return agent.VAF <= 1
messages = {
True: "\nThe Value of Autarky Factor (VAF) for the supplied parameter values satisfies the Finite Value of Autarky Condition; the VAF is {0.VAF}",
False: "\nThe Value of Autarky Factor (VAF) for the supplied parameter values fails the Finite Value of Autarky Condition; the VAF is {0.VAF}",
}
verbose_messages = {
True: " Therefore, a nondegenerate solution exists if the WRIC also holds; see {0.url}/#Conditions-Under-Which-the-Problem-Defines-a-Contraction-Mapping\n",
False: " Therefore, a nondegenerate solution is not available (see {0.url}/#Conditions-Under-Which-the-Problem-Defines-a-Contraction-Mapping\n",
}
verbose = self.verbose if verbose is None else verbose
self.check_condition(name, test, messages, verbose, verbose_messages)
def check_conditions(self, verbose=None):
"""
This method checks whether the instance's type satisfies the Absolute Impatience Condition (AIC), Weak Return
Impatience Condition (WRIC), Finite Human Wealth Condition (FHWC) and Finite Value of
Autarky Condition (FVAC). When combinations of these conditions are satisfied, the
solution to the problem exhibits different characteristics. (For an exposition of the
conditions, see https://econ-ark.github.io/BufferStockTheory/)
Parameters
----------
verbose : boolean
Specifies different levels of verbosity of feedback. When False, it only reports whether the
instance's type fails to satisfy a particular condition. When True, it reports all results, i.e.
the factor values for all conditions.
Returns
-------
None
"""
self.conditions = {}
# PerfForesightConsumerType.check_conditions(self, verbose=False, verbose_reference=False)
self.violated = False
if self.cycles != 0 or self.T_cycle > 1:
return
# For theory, see hyperlink targets to expressions in
# url=https://econ-ark.github.io/BufferStockTheory
# For example, the hyperlink to the relevant section of the paper
self.url = "https://econ-ark.github.io/BufferStockTheory"
# would be referenced below as:
# [url]/#Uncertainty-Modified-Conditions
self.Ex_PermShkInv = calc_expectation(
self.PermShkDstn[0], lambda x: 1 / x
)
# $\Ex_{t}[\psi^{-1}_{t+1}]$ (in first eqn in sec)
# [url]/#Pat, adjusted to include mortality
self.InvEx_PermShkInv = (
1 / self.Ex_PermShkInv
) # $\underline{\psi}$ in the paper (\bar{\isp} in private version)
self.PermGroFacAdj = self.PermGroFac[0] * self.InvEx_PermShkInv # [url]/#PGroAdj
self.thorn = ((self.Rfree * self.DiscFac)) ** (1 / self.CRRA)
# self.Ex_RNrm = self.Rfree*Ex_PermShkInv/(self.PermGroFac[0]*self.LivPrb[0])
self.GPFRaw = self.thorn / (self.PermGroFac[0]) # [url]/#GPF
# Lower bound of aggregate wealth growth if all inheritances squandered
self.GPFAggLivPrb = self.thorn * self.LivPrb[0] / self.PermGroFac[0]
self.DiscFacGPFRawMax = ((self.PermGroFac[0]) ** (self.CRRA)) / (
self.Rfree
) # DiscFac at growth impatience knife edge
self.DiscFacGPFNrmMax = (
(self.PermGroFac[0] * self.InvEx_PermShkInv) ** (self.CRRA)
) / (
self.Rfree
) # DiscFac at growth impatience knife edge
self.DiscFacGPFAggLivPrbMax = ((self.PermGroFac[0]) ** (self.CRRA)) / (
self.Rfree * self.LivPrb[0]
) # DiscFac at growth impatience knife edge
verbose = self.verbose if verbose is None else verbose
# self.check_GICRaw(verbose)
self.check_GICNrm(verbose)
self.check_GICAggLivPrb(verbose)
self.check_WRIC(verbose)
self.check_FVAC(verbose)
self.violated = not self.conditions["WRIC"] or not self.conditions["FVAC"]
if self.violated:
_log.warning(
'\n[!] For more information on the conditions, see Tables 3 and 4 in "Theoretical Foundations of Buffer Stock Saving" at '
+ self.url
+ "/#Factors-Defined-And-Compared"
)
_log.warning("GPFRaw = %2.6f " % (self.GPFRaw))
_log.warning("GPFNrm = %2.6f " % (self.GPFNrm))
_log.warning("GPFAggLivPrb = %2.6f " % (self.GPFAggLivPrb))
_log.warning("Thorn = APF = %2.6f " % (self.thorn))
_log.warning("PermGroFacAdj = %2.6f " % (self.PermGroFacAdj))
_log.warning("uInvEpShkuInv = %2.6f " % (self.uInvEpShkuInv))
_log.warning("VAF = %2.6f " % (self.VAF))
_log.warning("WRPF = %2.6f " % (self.WRPF))
_log.warning("DiscFacGPFNrmMax = %2.6f " % (self.DiscFacGPFNrmMax))
_log.warning("DiscFacGPFAggLivPrbMax = %2.6f " % (self.DiscFacGPFAggLivPrbMax))
def Ex_Mtp1_over_Ex_Ptp1(self, mNrm):
cNrm = self.solution[-1].cFunc(mNrm)
aNrm = mNrm - cNrm
Ex_Ptp1 = PermGroFac[0]
Ex_bLev_tp1 = aNrm * self.Rfree
Ex_Mtp1 = Ex_bLev_tp1
return Ex_Mtp1 / Ex_Ptp1
def Ex_mtp1(self, mNrm):
cNrm = self.solution[-1].cFunc(mNrm)
aNrm = mNrm - cNrm
Ex_bNrm_tp1 = aNrm * self.Rfree * self.Ex_PermShkInv / self.PermGroFac[0]
Ex_Mtp1 = (Ex_bNrm_tp1 + 1) * Ex_Ptp1 # mean TranShk and PermShk are 1
return Ex_Mtp1 / Ex_Ptp1
def calc_stable_points(self):
"""
If the problem is one that satisfies the conditions required for target ratios of different
variables to permanent income to exist, and has been solved to within the self-defined
tolerance, this method calculates the target values of market resources, consumption,
and assets.
Parameters
----------
None
Returns
-------
None
"""
infinite_horizon = cycles_left == 0
if not infinite_horizon:
_log.warning(
"The calc_stable_points method works only for infinite horizon models."
)
return
# = Functions for generating discrete income processes and
# simulated income shocks =
# ========================================================
def construct_lognormal_income_process_unemployment(self):
"""
Generates a list of discrete approximations to the income process for each
life period, from end of life to beginning of life. Permanent shocks are mean
one lognormally distributed with standard deviation PermShkStd[t] during the
working life, and degenerate at 1 in the retirement period. Transitory shocks
are mean one lognormally distributed with a point mass at IncUnemp with
probability UnempPrb while working; they are mean one with a point mass at
IncUnempRet with probability UnempPrbRet. Retirement occurs
after t=T_retire periods of working.
Note 1: All time in this function runs forward, from t=0 to t=T
Note 2: All parameters are passed as attributes of the input parameters.
Parameters (passed as attributes of the input parameters)
----------
PermShkStd : [float]
List of standard deviations in log permanent income uncertainty during
the agent's life.
PermShkCount : int
The number of approximation points to be used in the discrete approxima-
tion to the permanent income shock distribution.
TranShkStd : [float]
List of standard deviations in log transitory income uncertainty during
the agent's life.
TranShkCount : int
The number of approximation points to be used in the discrete approxima-
tion to the permanent income shock distribution.
UnempPrb : float
The probability of becoming unemployed during the working period.
UnempPrbRet : float
The probability of not receiving typical retirement income when retired.
T_retire : int
The index value for the final working period in the agent's life.
If T_retire <= 0 then there is no retirement.
IncUnemp : float
Transitory income received when unemployed.
IncUnempRet : float
Transitory income received while "unemployed" when retired.
T_cycle : int
Total number of non-terminal periods in the consumer's sequence of periods.
Returns
-------
IncShkDstn : [distribution.Distribution]
A list with T_cycle elements, each of which is a
discrete approximation to the income process in a period.
PermShkDstn : [[distribution.Distributiony]]
A list with T_cycle elements, each of which
a discrete approximation to the permanent income shocks.
TranShkDstn : [[distribution.Distribution]]
A list with T_cycle elements, each of which
a discrete approximation to the transitory income shocks.
"""
# Unpack the parameters from the input
PermShkStd = self.PermShkStd
PermShkCount = self.PermShkCount
TranShkStd = self.TranShkStd
TranShkCount = self.TranShkCount
T_cycle = self.T_cycle
T_retire = self.T_retire
UnempPrb = self.UnempPrb
IncUnemp = self.IncUnemp
UnempPrbRet = self.UnempPrbRet
IncUnempRet = self.IncUnempRet
IncShkDstn = [] # Discrete approximations to income process in each period
PermShkDstn = [] # Discrete approximations to permanent income shocks
TranShkDstn = [] # Discrete approximations to transitory income shocks
# Fill out a simple discrete RV for retirement, with value 1.0 (mean of shocks)
# in normal times; value 0.0 in "unemployment" times with small prob.
if T_retire > 0:
if UnempPrbRet > 0:
PermShkValsRet = np.array(
[1.0, 1.0]
) # Permanent income is deterministic in retirement (2 states for temp income shocks)
TranShkValsRet = np.array(
[
IncUnempRet,
(1.0 - UnempPrbRet * IncUnempRet) / (1.0 - UnempPrbRet),
]
)
ShkPrbsRet = np.array([UnempPrbRet, 1.0 - UnempPrbRet])
else:
PermShkValsRet = np.array([1.0])
TranShkValsRet = np.array([1.0])
ShkPrbsRet = np.array([1.0])
IncShkDstnRet = DiscreteDistribution(
ShkPrbsRet,
[PermShkValsRet, TranShkValsRet],
seed=self.RNG.randint(0, 2 ** 31 - 1),
)
# Loop to fill in the list of IncShkDstn random variables.
for t in range(T_cycle): # Iterate over all periods, counting forward
if T_retire > 0 and t >= T_retire:
# Then we are in the "retirement period" and add a retirement income object.
IncShkDstn.append(deepcopy(IncShkDstnRet))
PermShkDstn.append([np.array([1.0]), np.array([1.0])])
TranShkDstn.append([ShkPrbsRet, TranShkValsRet])
else:
# We are in the "working life" periods.
TranShkDstn_t = MeanOneLogNormal(sigma=TranShkStd[t]).approx(
TranShkCount, tail_N=0
)
if UnempPrb > 0:
TranShkDstn_t = add_discrete_outcome_constant_mean(
TranShkDstn_t, p=UnempPrb, x=IncUnemp
)
PermShkDstn_t = MeanOneLogNormal(sigma=PermShkStd[t]).approx(
PermShkCount, tail_N=0
)
IncShkDstn.append(
combine_indep_dstns(
PermShkDstn_t,
TranShkDstn_t,
seed=self.RNG.randint(0, 2 ** 31 - 1),
)
) # mix the independent distributions
PermShkDstn.append(PermShkDstn_t)
TranShkDstn.append(TranShkDstn_t)
return IncShkDstn, PermShkDstn, TranShkDstn
# Make a dictionary to specify a "kinked R" idiosyncratic shock consumer
init_kinked_R = dict(
init_idiosyncratic_shocks,
**{
"Rboro": 1.20, # Interest factor on assets when borrowing, a < 0
"Rsave": 1.02, # Interest factor on assets when saving, a > 0
"BoroCnstArt": None, # kinked R is a bit silly if borrowing not allowed
"CubicBool": True, # kinked R is now compatible with linear cFunc and cubic cFunc
"aXtraCount": 48, # ...so need lots of extra gridpoints to make up for it
}
)
del init_kinked_R["Rfree"] # get rid of constant interest factor
class KinkedRconsumerType(IndShockConsumerType):
"""
A consumer type that faces idiosyncratic shocks to income and has a different
interest factor on saving vs borrowing. Extends IndShockConsumerType, with
very small changes. Solver for this class is currently only compatible with
linear spline interpolation.
Same parameters as AgentType.
Parameters
----------
"""
time_inv_ = copy(IndShockConsumerType.time_inv_)
time_inv_.remove("Rfree")
time_inv_ += ["Rboro", "Rsave"]
def __init__(self, **kwds):
params = init_kinked_R.copy()
params.update(kwds)
# Initialize a basic AgentType
PerfForesightConsumerType.__init__(self, **params)
# Add consumer-type specific objects, copying to create independent versions
self.solve_one_period = make_one_period_oo_solver(ConsKinkedRsolver)
self.update() # Make assets grid, income process, terminal solution
def pre_solve(self):
# AgentType.pre_solve(self)
self.update_solution_terminal()
def calc_bounding_values(self):
"""
Calculate human wealth plus minimum and maximum MPC in an infinite
horizon model with only one period repeated indefinitely. Store results
as attributes of self. Human wealth is the present discounted value of
expected future income after receiving income this period, ignoring mort-
ality. The maximum MPC is the limit of the MPC as m --> mNrmMin. The
minimum MPC is the limit of the MPC as m --> infty. This version deals
with the different interest rates on borrowing vs saving.
Parameters
----------
None
Returns
-------
None
"""
# Unpack the income distribution and get average and worst outcomes
PermShkValsNext = self.IncShkDstn[0][1]
TranShkValsNext = self.IncShkDstn[0][2]
ShkPrbsNext = self.IncShkDstn[0][0]
Ex_IncNext = calc_expectation(
IncShkDstn,
lambda trans, perm: trans * perm
)
PermShkMinNext = np.min(PermShkValsNext)
TranShkMinNext = np.min(TranShkValsNext)
WorstIncNext = PermShkMinNext * TranShkMinNext
WorstIncPrb = np.sum(
ShkPrbsNext[(PermShkValsNext * TranShkValsNext) == WorstIncNext]
)
# Calculate human wealth and the infinite horizon natural borrowing constraint
hNrm = (Ex_IncNext * self.PermGroFac[0] / self.Rsave) / (
1.0 - self.PermGroFac[0] / self.Rsave
)
temp = self.PermGroFac[0] * PermShkMinNext / self.Rboro
BoroCnstNat = -TranShkMinNext * temp / (1.0 - temp)
PatFacTop = (self.DiscFac * self.LivPrb[0] * self.Rsave) ** (
1.0 / self.CRRA
) / self.Rsave
PatFacBot = (self.DiscFac * self.LivPrb[0] * self.Rboro) ** (
1.0 / self.CRRA
) / self.Rboro
if BoroCnstNat < self.BoroCnstArt:
MPCmax = 1.0 # if natural borrowing constraint is overridden by artificial one, MPCmax is 1
else:
MPCmax = 1.0 - WorstIncPrb ** (1.0 / self.CRRA) * PatFacBot
MPCmin = 1.0 - PatFacTop
# Store the results as attributes of self
self.hNrm = hNrm
self.MPCmin = MPCmin
self.MPCmax = MPCmax
def make_euler_error_func(self, mMax=100, approx_inc_dstn=True):
"""
Creates a "normalized Euler error" function for this instance, mapping
from market resources to "consumption error per dollar of consumption."
Stores result in attribute eulerErrorFunc as an interpolated function.
Has option to use approximate income distribution stored in self.IncShkDstn
or to use a (temporary) very dense approximation.
SHOULD BE INHERITED FROM ConsIndShockModel
Parameters
----------
mMax : float
Maximum normalized market resources for the Euler error function.
approx_inc_dstn : Boolean
Indicator for whether to use the approximate discrete income distri-
bution stored in self.IncShkDstn[0], or to use a very accurate
discrete approximation instead. When True, uses approximation in
IncShkDstn; when False, makes and uses a very dense approximation.
Returns
-------
None
Notes
-----
This method is not used by any other code in the library. Rather, it is here
for expository and benchmarking purposes.
"""
raise NotImplementedError()
def get_Rfree(self):
"""
Returns an array of size self.AgentCount with self.Rboro or self.Rsave in each entry, based
on whether self.aNrmNow >< 0.
Parameters
----------
None
Returns
-------
RfreeNow : np.array
Array of size self.AgentCount with risk free interest rate for each agent.
"""
RfreeNow = self.Rboro * np.ones(self.AgentCount)
RfreeNow[self.state_prev['aNrm'] > 0] = self.Rsave
return RfreeNow
def check_conditions(self):
"""
This method checks whether the instance's type satisfies the Absolute Impatience Condition (AIC),
the Return Impatience Condition (RIC), the Growth Impatience Condition (GICRaw), the Normalized Growth Impatience Condition (GIC-Nrm), the Weak Return
Impatience Condition (WRIC), the Finite Human Wealth Condition (FHWC) and the Finite Value of
Autarky Condition (FVAC). To check which conditions are relevant to the model at hand, a
reference to the relevant theoretical literature is made.
Parameters
----------
None
Returns
-------
None
"""
raise NotImplementedError()
def apply_flat_income_tax(
IncShkDstn, tax_rate, T_retire, unemployed_indices=None, transitory_index=2
):
"""
Applies a flat income tax rate to all employed income states during the working
period of life (those before T_retire). Time runs forward in this function.
Parameters
----------
IncShkDstn : [distribution.Distribution]
The discrete approximation to the income distribution in each time period.
tax_rate : float
A flat income tax rate to be applied to all employed income.
T_retire : int
The time index after which the agent retires.
unemployed_indices : [int]
Indices of transitory shocks that represent unemployment states (no tax).
transitory_index : int
The index of each element of IncShkDstn representing transitory shocks.
Returns
-------
IncShkDstn_new : [distribution.Distribution]
The updated income distributions, after applying the tax.
"""
unemployed_indices = (
unemployed_indices if unemployed_indices is not None else list()
)
IncShkDstn_new = deepcopy(IncShkDstn)
i = transitory_index
for t in range(len(IncShkDstn)):
if t < T_retire:
for j in range((IncShkDstn[t][i]).size):
if j not in unemployed_indices:
IncShkDstn_new[t][i][j] = IncShkDstn[t][i][j] * (1 - tax_rate)
return IncShkDstn_new
# =======================================================
# ================ Other useful functions ===============
# =======================================================
def construct_assets_grid(parameters):
"""
Constructs the base grid of post-decision states, representing end-of-period
assets above the absolute minimum.
All parameters are passed as attributes of the single input parameters. The
input can be an instance of a ConsumerType, or a custom Parameters class.
Parameters
----------
aXtraMin: float
Minimum value for the a-grid
aXtraMax: float
Maximum value for the a-grid
aXtraCount: int
Size of the a-grid
aXtraExtra: [float]
Extra values for the a-grid.
exp_nest: int
Level of nesting for the exponentially spaced grid
Returns
-------
aXtraGrid: np.ndarray
Base array of values for the post-decision-state grid.
"""
# Unpack the parameters
aXtraMin = parameters.aXtraMin
aXtraMax = parameters.aXtraMax
aXtraCount = parameters.aXtraCount
aXtraExtra = parameters.aXtraExtra
grid_type = "exp_mult"
exp_nest = parameters.aXtraNestFac
# Set up post decision state grid:
aXtraGrid = None
if grid_type == "linear":
aXtraGrid = np.linspace(aXtraMin, aXtraMax, aXtraCount)
elif grid_type == "exp_mult":
aXtraGrid = make_grid_exp_mult(
ming=aXtraMin, maxg=aXtraMax, ng=aXtraCount, timestonest=exp_nest
)
else:
raise Exception(
"grid_type not recognized in __init__."
+ "Please ensure grid_type is 'linear' or 'exp_mult'"
)
# Add in additional points for the grid:
for a in aXtraExtra:
if a is not None:
if a not in aXtraGrid:
j = aXtraGrid.searchsorted(a)
aXtraGrid = np.insert(aXtraGrid, j, a)
return aXtraGrid
# Make a dictionary to specify a lifecycle consumer with a finite horizon
# Main calibration characteristics
birth_age = 25
death_age = 90
adjust_infl_to = 1992
# Use income estimates from Cagetti (2003) for High-school graduates
education = "HS"
income_calib = Cagetti_income[education]
# Income specification
income_params = parse_income_spec(
age_min=birth_age,
age_max=death_age,
adjust_infl_to=adjust_infl_to,
**income_calib,
SabelhausSong=True
)
# Initial distribution of wealth and permanent income
dist_params = income_wealth_dists_from_scf(
base_year=adjust_infl_to, age=birth_age, education=education, wave=1995
)
# We need survival probabilities only up to death_age-1, because survival
# probability at death_age is 1.
liv_prb = parse_ssa_life_table(
female=False, cross_sec=True, year=2004, min_age=birth_age, max_age=death_age - 1
)
# Parameters related to the number of periods implied by the calibration
time_params = parse_time_params(age_birth=birth_age, age_death=death_age)
# Update all the new parameters
init_lifecycle = copy(init_idiosyncratic_shocks)
init_lifecycle.update(time_params)
init_lifecycle.update(dist_params)
# Note the income specification overrides the pLvlInitMean from the SCF.
init_lifecycle.update(income_params)
init_lifecycle.update({"LivPrb": liv_prb})
# Make a dictionary to specify an infinite consumer with a four period cycle
init_cyclical = copy(init_idiosyncratic_shocks)
init_cyclical['PermGroFac'] = [1.082251, 2.8, 0.3, 1.1]
init_cyclical['PermShkStd'] = [0.1, 0.1, 0.1, 0.1]
init_cyclical['TranShkStd'] = [0.1, 0.1, 0.1, 0.1]
init_cyclical['LivPrb'] = 4*[0.98]
init_cyclical['T_cycle'] = 4 | {"hexsha": "db6c8419d3d9f89b11c296587fd5acb1bf35f9da", "size": 119386, "ext": "py", "lang": "Python", "max_stars_repo_path": "HARK/ConsumptionSaving/ConsIndShockModel.py", "max_stars_repo_name": "nicksawhney/HARK", "max_stars_repo_head_hexsha": "f7608a96c3b491f9cf605472768dd996eb624f76", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HARK/ConsumptionSaving/ConsIndShockModel.py", "max_issues_repo_name": "nicksawhney/HARK", "max_issues_repo_head_hexsha": "f7608a96c3b491f9cf605472768dd996eb624f76", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-09-03T13:23:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-26T04:31:20.000Z", "max_forks_repo_path": "HARK/ConsumptionSaving/ConsIndShockModel.py", "max_forks_repo_name": "nicksawhney/HARK", "max_forks_repo_head_hexsha": "f7608a96c3b491f9cf605472768dd996eb624f76", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0533202486, "max_line_length": 370, "alphanum_fraction": 0.6171075335, "include": true, "reason": "import numpy,from scipy", "num_tokens": 28891} |
import numpy as np
from sksfa.utils import ReceptiveRebuilder, ReceptiveSlicer
from sklearn.preprocessing import PolynomialFeatures
from sksfa import SFA
from time import time
class Flatten:
def fit(self, X, y=None):
pass
def partial(self, X, y=None):
pass
def transform(self, X):
return X.reshape((X.shape[0], -1))
class AdditiveNoise:
def __init__(self, std=0.01):
self.std = std
def fit(self, X, y=None):
pass
def partial(self, X, y=None):
pass
def transform(self, X):
return X + np.random.normal(0.0, self.std, X.shape)
class Clipper:
def __init__(self, val_min=-4, val_max=+4):
self.val_min = val_min
self.val_max = val_max
def fit(self, X, y=None):
pass
def partial(self, X, y=None):
pass
def transform(self, X):
return np.clip(X, self.val_min, self.val_max)
class HSFA:
"""Hierarchical Slow Feature Analysis (HSFA).
A network of SFA estimators interlaced with receptive field transformers
and linear SFA estimators for intermediate pre-expansion dimensionality reduction.
This can deal with high-dimensional image time-series significantly better than
standard (non-linear) SFA by using receptive fields to slice the images in a way
comparable convolutional layers in neural networks.
In each layer, the image representation is first sliced into receptive fields that
are defined by field-dimensions and corresponding strides. The field inputs are then
fed as flattened input-batches to a combination of linear SFA (for dimensionality
reduction), quadratic polynomial expansion, and linear SFA (for feature extraction).
The dimension after the reduction is the same as the number of subsequently extracted
features and can be specified for each layer individually.
The final layer does not need to be specified and always consists of the same combination,
but without prior slicing into receptive fields.
Important: SFA estimators cannot be further fit after using them to transform input. This
is why for training an HSFA network, the data has to be repeatedly fed through the network
until the last layer is trained. HSFA's 'fit' function will take care of the logistics of
that.
----------
n_components : int
Number of features extracted by the complete network.
final_degree : int, default=2
The degree of the final layer's polynomial expansion.
input_shape : tuple (int)
The shape of a single input (i.e., without sample-dimension) to the
input layer.
layer_configurations : list of 6-tuples
A list of tuples to configure the intermediate layers. Each tuple needs to contain:
(field_width, field_height, stride_width, stride_height, n_intermediate_components, polynomial_degree)
internal_batch_size : int, default=50
The size of mini-batches used internally. This should not be chosen too small as
the SFA nodes at this point do not respect connections between batches.
noise_std : float, default=0.05
Additive noise added at intermediate layers. Crank this up if you run into problems
with singular covariance matrices during training. This noise will not be applied
at transformation time.
In general, this has a slight regularizing effect, but should not be chosen too high.
If you run into repeated problems, consider changing your network architecture and/or
increase the size of your dataset.
verbose : bool, default=False
This switch decides if there will be some additional info printed during training.
Attributes
----------
layer_configurations : list of tuples
This contains all layer configurations except the final, fully-connected one.
input_shape : tuple
See 'input_shape' parameter.
internal_batch_size : int
See 'internal_batch_size' parameter.
n_components : int
The number of output features.
sequence : list of transformers/estimators
This list will contain the used transformers and estimators in correct order.
layer_outputs : list
This list will contain all the output shapes of all intermediate layers.
self.
Examples
--------
>>> from sksfa import HSFA
>>> import numpy as np
>>> n_samples = 5000
>>> image_width, image_height = 10, 10
>>> dimension = image_width * image_height
>>> t = np.linspace(0, 8*np.pi, n_samples).reshape(n_samples, 1)
>>> t = t * np.arange(1, dimension + 1)
>>>
>>> ordered_cosines = np.cos(t)
>>> mixed_cosines = np.dot(ordered_cosines, np.random.normal(0, 1, (dimension, dimension)))
>>> mixed_cosines = mixed_cosines.reshape(n_samples, image_width, image_height, 1)
>>> layer_configurations = [(5, 5, 5, 5, 4, 1)]
>>>
>>> hsfa = HSFA(2, mixed_cosines.shape[1:], layer_configurations, noise_std=0.1)
>>> hsfa = hsfa.fit(mixed_cosines)
>>> unmixed_cosines = hsfa.transform(mixed_cosines)
"""
def __init__(self, n_components, input_shape, layer_configurations, final_degree=2, internal_batch_size=50, noise_std=0.05, verbose=False):
self.layer_configurations = layer_configurations
self.verbose = verbose
self.input_shape = input_shape
self.internal_batch_size = internal_batch_size
self.n_components = n_components
self.noise_std = noise_std
self.sequence = []
self.layer_outputs = []
self.final_degree = final_degree
self.initialize_layers()
def initialize_layers(self):
# First layer does not need reconstructor
field_w, field_h, stride_w, stride_h, n_components, poly_degree = self.layer_configurations[0]
try:
slicer = ReceptiveSlicer(input_shape=self.input_shape, field_size=(field_w, field_h), strides=(stride_w, stride_h))
except AssertionError:
raise ValueError(f"Layer 1: Field ({field_w}, {field_h}) with stride ({stride_w}, {stride_h}) does not fit data dimension ({self.input_shape[0]}, {self.input_shape[1]})")
self.sequence.append(slicer)
if poly_degree > 1:
sfa = SFA(n_components, batch_size=self.internal_batch_size, fill_mode=None)
self.sequence.append(sfa)
expansion = PolynomialFeatures(poly_degree)
expansion.partial = expansion.fit
self.sequence.append(expansion)
post_expansion_sfa = SFA(n_components, batch_size=self.internal_batch_size, fill_mode=None)
self.sequence.append(post_expansion_sfa)
reconstructor = ReceptiveRebuilder((slicer.reconstruction_shape))
if self.verbose:
print(slicer.reconstruction_shape)
self.layer_outputs.append(slicer.reconstruction_shape)
self.sequence.append(reconstructor)
for build_idx, (field_w, field_h, stride_w, stride_h, n_components, poly_degree) in enumerate(self.layer_configurations[1:]):
if (field_w == field_h == -1):
field_w = slicer.reconstruction_shape[0]
field_h = slicer.reconstruction_shape[1]
try:
slicer = ReceptiveSlicer(input_shape=slicer.reconstruction_shape, field_size=(field_w, field_h), strides=(stride_w, stride_h))
except AssertionError:
raise ValueError(f"Layer {2 + build_idx}: Field ({field_w}, {field_h}) with stride ({stride_w}, {stride_h}) does not fit data dimension ({slicer.reconstruction_shape[0]}, {slicer.reconstruction_shape[1]})")
if self.verbose:
print(slicer.reconstruction_shape)
self.layer_outputs.append(slicer.reconstruction_shape)
self.sequence.append(slicer)
if poly_degree > 1:
pre_expansion_sfa = SFA(n_components, batch_size=self.internal_batch_size, fill_mode=None)
self.sequence.append(pre_expansion_sfa)
expansion = PolynomialFeatures(poly_degree)
expansion.partial = expansion.fit
self.sequence.append(expansion)
self.sequence.append(AdditiveNoise(self.noise_std))
post_expansion_sfa = SFA(n_components, batch_size=self.internal_batch_size, fill_mode=None)
self.sequence.append(post_expansion_sfa)
self.sequence.append(Clipper(-4, 4))
reconstructor = ReceptiveRebuilder((slicer.reconstruction_shape))
self.sequence.append(reconstructor)
self.sequence.append(Flatten())
if self.final_degree > 1:
pre_expansion_sfa = SFA(n_components, batch_size=self.internal_batch_size, fill_mode=None)
self.sequence.append(pre_expansion_sfa)
expansion = PolynomialFeatures(self.final_degree)
expansion.partial = expansion.fit
self.sequence.append(expansion)
self.sequence.append(AdditiveNoise(self.noise_std))
post_expansion_sfa = SFA(self.n_components, batch_size=self.internal_batch_size, fill_mode=None)
if self.verbose:
print((self.n_components,))
self.sequence.append(post_expansion_sfa)
self.sequence.append(Clipper(-4, 4))
def fit(self, X):
X = np.copy(X)
n_samples = X.shape[0]
batch_size = self.internal_batch_size
n_batches = int(np.ceil(n_samples / batch_size))
accumulating_indices = [idx for idx, member in enumerate(self.sequence) if type(member) == SFA]
accumulating_indices += [len(self.sequence)]
last_idx = -1
if self.verbose:
try:
from tqdm import tqdm
except ImportError:
raise ImportError("If 'verbose' is used, tqdm package needs to be installed")
iterator = tqdm(accumulating_indices)
else:
iterator = accumulating_indices
for idx in iterator:
transform_only = self.sequence[:last_idx+1]
partial_sequence = self.sequence[last_idx+1:idx]
for batch_idx in range(n_batches):
current_batch = X[batch_idx * batch_size: (batch_idx + 1) * batch_size]
for member in transform_only:
current_batch = member.transform(current_batch)
for member in partial_sequence:
member.partial(current_batch)
current_batch = member.transform(current_batch)
if idx < len(self.sequence):
self.sequence[idx].partial(current_batch)
last_idx = idx
return self
def transform(self, X, seq_end=None):
n_samples = X.shape[0]
batch_size = self.internal_batch_size
n_batches = int(np.ceil(n_samples / batch_size))
result = None
sequence = self.sequence if seq_end is None else self.sequence[:seq_end]
if self.verbose:
try:
from tqdm import tqdm
except ImportError:
raise ImportError("If 'verbose' is used, tqdm package needs to be installed")
iterator = tqdm(range(n_batches))
else:
iterator = range(n_batches)
for batch_idx in iterator:
current_batch = X[batch_idx * batch_size: (batch_idx + 1) * batch_size]
for transformer in sequence:
if type(transformer) == AdditiveNoise:
continue
current_batch = transformer.transform(current_batch)
if result is None:
result = np.empty((n_samples,) + current_batch.shape[1:])
result[batch_idx * batch_size: (batch_idx + 1) * batch_size] = current_batch
return result
def summary(self):
""" Prints a summary of the network architecture.
"""
print()
print(" = = = = NETWORK ARCHITECTURE = = = = ")
print()
print("Input Layer:")
print(f"\tinput shape: \t\t{self.input_shape}")
for layer_idx, (field_w, field_h, stride_w, stride_h, n_components, poly_degree) in enumerate(self.layer_configurations):
print(f"Layer {layer_idx + 1}:")
print(f"\treceptive field: \t({field_w}, {field_h})\n\tstrides: \t\t({stride_w}, {stride_h})\n\texpansion degree: \t{poly_degree}")
output_shape = self.layer_outputs[layer_idx]
print(f"\toutput shape: \t\t{output_shape + (n_components,)}")
print(f"Final Layer:")
print("\tfully connected")
print(f"\texpansion degree \t{self.final_degree}")
print(f"\toutput shape \t\t({self.n_components},)")
print()
print()
| {"hexsha": "056677079d2d8159fb50f0ba0ec501d6d0fc7696", "size": 12707, "ext": "py", "lang": "Python", "max_stars_repo_path": "sksfa/_hsfa.py", "max_stars_repo_name": "wiskott-lab/sklearn-sfa", "max_stars_repo_head_hexsha": "0db443a5df013627a0ca573ea8be9e7ef591ecd2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-05-22T11:14:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T11:34:48.000Z", "max_issues_repo_path": "sksfa/_hsfa.py", "max_issues_repo_name": "wiskott-lab/sklearn-sfa", "max_issues_repo_head_hexsha": "0db443a5df013627a0ca573ea8be9e7ef591ecd2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-30T22:27:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-19T12:05:28.000Z", "max_forks_repo_path": "sksfa/_hsfa.py", "max_forks_repo_name": "wiskott-lab/sklearn-sfa", "max_forks_repo_head_hexsha": "0db443a5df013627a0ca573ea8be9e7ef591ecd2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-06-11T07:49:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T12:16:00.000Z", "avg_line_length": 46.2072727273, "max_line_length": 222, "alphanum_fraction": 0.6597151177, "include": true, "reason": "import numpy", "num_tokens": 2774} |
from joblib import Parallel, delayed
from astropy.io import fits
import warnings
import glob
import os
warnings.filterwarnings('ignore')
########## USER PARAMETERS ######################
path = '/Users/felipegran/Desktop/Doctorado/ESO/m0.7m1.4pt1/' #with final /
datacube_name = 'm0.7m1.4pt1.fits' #image names will be datacube_name without .fits
img_name = datacube_name.split('.fits')[0]
v_image = 'IMAGE_FOV_0002.fits'
r_image = 'IMAGE_FOV_0003.fits'
i_image = 'IMAGE_FOV_0004.fits'
reference_image = i_image
detect_tresh = 2.0
analysis_tresh = 2.0
#################################################
#Create Folders
os.chdir('%s' %path) #move to path
os.system('mkdir fits cmds_psfex_output cmds_output slice_catalogs slice_psfex_output')
########### Phase 1: Commands to execute the CMDs ##########
i_fwhm = fits.open('%s' %i_image)[0].header['HIERARCH ESO OCS SGS FWHM MED']
os.system('sex -PARAMETERS_NAME default.param.ape -DETECT_THRESH %s -ANALYSIS_THRESH %s -PHOT_APERTURES %1.2f -CATALOG_TYPE FITS_LDAC -CATALOG_NAME v.ldac %s,%s' %(detect_tresh,analysis_tresh,1.5*i_fwhm,reference_image,v_image))
os.system('sex -PARAMETERS_NAME default.param.ape -DETECT_THRESH %s -ANALYSIS_THRESH %s -PHOT_APERTURES %1.2f -CATALOG_TYPE FITS_LDAC -CATALOG_NAME r.ldac %s,%s' %(detect_tresh,analysis_tresh,1.5*i_fwhm,reference_image,r_image))
os.system('sex -PARAMETERS_NAME default.param.ape -DETECT_THRESH %s -ANALYSIS_THRESH %s -PHOT_APERTURES %1.2f -CATALOG_TYPE FITS_LDAC -CATALOG_NAME i.ldac %s,%s' %(detect_tresh,analysis_tresh,1.5*i_fwhm,reference_image,i_image))
os.system('psfex v.ldac')
os.system('psfex r.ldac')
os.system('psfex i.ldac')
os.system('sex -PARAMETERS_NAME default.param.psf.vri.psf -DETECT_THRESH %s -ANALYSIS_THRESH %s -PHOT_APERTURES %1.2f -CATALOG_TYPE ASCII_HEAD -CATALOG_NAME v.psf.cat -PSF_NAME v.psf %s,%s' %(detect_tresh,analysis_tresh,1.5*i_fwhm,reference_image,v_image))
os.system('sex -PARAMETERS_NAME default.param.psf.vri.psf -DETECT_THRESH %s -ANALYSIS_THRESH %s -PHOT_APERTURES %1.2f -CATALOG_TYPE ASCII_HEAD -CATALOG_NAME r.psf.cat -PSF_NAME r.psf %s,%s' %(detect_tresh,analysis_tresh,1.5*i_fwhm,reference_image,r_image))
os.system('sex -PARAMETERS_NAME default.param.psf.vri.psf -DETECT_THRESH %s -ANALYSIS_THRESH %s -PHOT_APERTURES %1.2f -CATALOG_TYPE ASCII_HEAD -CATALOG_NAME i.psf.cat -PSF_NAME i.psf %s,%s' %(detect_tresh,analysis_tresh,1.5*i_fwhm,reference_image,i_image))
#Move output to cmds_psfex_output (PSF diagnostics) ...
os.system('mv chi_* proto_* resi_* samp_* snap_* cmds_psfex_output/')
os.system('mv *.ldac cmds_psfex_output/')
os.system('mv v.psf r.psf i.psf cmds_psfex_output/')
#... and cmds_output (catalogs)
os.system('mv v.psf.cat r.psf.cat i.psf.cat cmds_output/')
#############################################################
####### Phase 2: Slicing the DATACUBE ########################
os.chdir('%sfits/' %path) #move to path/fits/
os.system('mv ../%s %sfits/' %(datacube_name, path)) #move the datacube to the /fits/ folder
os.system('missfits -c ../default.missfits %s' %datacube_name) #extract the datacubes
##############################################################
######### Phase 3: Sextractor/PSFex photometry on slices #########
os.chdir('%s' %path) #move to path
data = glob.glob('fits/*.s*.fits') #select sliced fits
#Function to perform the parallel calls
def sextractor(img,i_fwhm):
img_name = img_name = img.split('.fits')[0].split('fits/')[1]
cmd1 = 'sex -PARAMETERS_NAME default.param.ape -DETECT_THRESH %s -ANALYSIS_THRESH %s -PHOT_APERTURES %1.2f -CATALOG_TYPE FITS_LDAC -CATALOG_NAME %s.ldac %s,%s' %(detect_tresh,analysis_tresh,1.5*i_fwhm,img_name,reference_image,img)
os.system('%s' %cmd1)
cmd2 = 'psfex %s.ldac' %(img_name)
os.system('%s' %cmd2)
cmd3 = 'sex -PARAMETERS_NAME default.param.psf -DETECT_THRESH %s -ANALYSIS_THRESH %s -PHOT_APERTURES %1.2f -CATALOG_TYPE ASCII_HEAD -CATALOG_NAME %s.psf.cat -PSF_NAME %s.psf %s,%s' %(detect_tresh,analysis_tresh,1.5*i_fwhm,img_name,img_name,reference_image,img)
os.system('%s' %cmd3)
os.system('mv samp_%s.fits slice_psfex_output/' %img_name)
os.system('mv snap_%s.fits slice_psfex_output/' %img_name)
os.system('mv resi_%s.fits slice_psfex_output/' %img_name)
os.system('mv chi_%s.fits slice_psfex_output/' %img_name)
os.system('mv proto_%s.fits slice_psfex_output/' %img_name)
os.system('mv %s.psf slice_psfex_output/' %img_name)
os.system('mv %s.ldac slice_psfex_output/' %img_name)
os.system('mv %s.psf.cat slice_catalogs/' %img_name)
pass
#Delete old failures :(
os.system('rm slice_catalogs/*.psf.cat')
os.system('rm slice_psfex_output/*.fits')
#Explanation: Parallel for doing multiple slices at a time
#n_jobs=-1 to select all the available cores
#verbose=1 very little output, normally only to check if the system is doing well (Total slices: 3720)
#Then iterate over images in data (which contains all the slices)
pix_to_asec = 0.2 #arcsec/pix MUSE pixel scale
i_fwhm = fits.open('%s' %i_image)[0].header['HIERARCH ESO OCS SGS FWHM MED']/pix_to_asec
Parallel(n_jobs=-1, verbose=1)(delayed(sextractor)(img,i_fwhm) for img in data) #if you want to test the code first change to data[0:20]
#At the end of the iteration the folder slice_catalogs/ will contains all the photometry catalogs
############################################################## | {"hexsha": "6b69e726c96eeccb9d9358db295a02f17c36ade3", "size": 5406, "ext": "py", "lang": "Python", "max_stars_repo_path": "GoMUSEvPSFex.py", "max_stars_repo_name": "fegran/GoMUSEvPSFex", "max_stars_repo_head_hexsha": "d0b2ec4491b9214ee37956fd6bfa1f92a3cc8811", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "GoMUSEvPSFex.py", "max_issues_repo_name": "fegran/GoMUSEvPSFex", "max_issues_repo_head_hexsha": "d0b2ec4491b9214ee37956fd6bfa1f92a3cc8811", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GoMUSEvPSFex.py", "max_forks_repo_name": "fegran/GoMUSEvPSFex", "max_forks_repo_head_hexsha": "d0b2ec4491b9214ee37956fd6bfa1f92a3cc8811", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.0555555556, "max_line_length": 264, "alphanum_fraction": 0.7049574547, "include": true, "reason": "from astropy", "num_tokens": 1670} |
#pragma once
#include "BeastContext.hpp"
#include "BeastSocket.hpp"
#include <arepa/communication/Signal.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/beast.hpp>
#include <memory>
namespace arepa::networking::websocket {
/**
* A class that binds a TCP endpoint and accepts websockets.
* This uses boost::asio and boost::beast to handle the underlying I/O opertaions.
*/
class BeastSocketListener : public std::enable_shared_from_this<BeastSocketListener> {
#pragma mark - Fields -
private:
BeastContext& _context;
boost::asio::ip::tcp::acceptor _acceptor;
#pragma mark - Signals -
public:
/**
* A signal for when a new socket connection failed to be established.
*/
arepa::communication::Signal<arepa::networking::NetworkException> on_error;
/**
* A signal for when a new socket connection is established.
*/
arepa::communication::Signal<std::shared_ptr<arepa::networking::Socket>> on_accept;
#pragma mark - Methods (Private) -
private:
void _on_async_accept(boost::beast::error_code ec, boost::asio::ip::tcp::socket socket);
void _on_async_accept_websocket(std::shared_ptr<BeastSocket::BeastSocketConnection> socket, boost::beast::error_code ec);
void _do_async_accept();
#pragma mark - Constructors -
public:
explicit BeastSocketListener(BeastContext& context) noexcept(false);
#pragma mark - Methods -
public:
/**
* Starts listening for socket connections.
*/
void start() noexcept(false);
/**
* Stops listening for socket connections.
*/
void stop() noexcept(false);
};
}
| {"hexsha": "9d0d3e43175b571e5b4cf2b651fa7e705221afe8", "size": 1599, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "module/networking_websocket/include/BeastSocketListener.hpp", "max_stars_repo_name": "selfeki/social-gaming-platform", "max_stars_repo_head_hexsha": "8d59512620470c57fa760998f3bcf1e4469130ef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "module/networking_websocket/include/BeastSocketListener.hpp", "max_issues_repo_name": "selfeki/social-gaming-platform", "max_issues_repo_head_hexsha": "8d59512620470c57fa760998f3bcf1e4469130ef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "module/networking_websocket/include/BeastSocketListener.hpp", "max_forks_repo_name": "selfeki/social-gaming-platform", "max_forks_repo_head_hexsha": "8d59512620470c57fa760998f3bcf1e4469130ef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.984375, "max_line_length": 125, "alphanum_fraction": 0.7098186366, "num_tokens": 370} |
[STATEMENT]
lemma "test (do {
tmp0 \<leftarrow> slots_fallback_document . getElementById(''test5'');
n \<leftarrow> createTestTree(tmp0);
tmp1 \<leftarrow> n . ''test5'';
removeWhiteSpaceOnlyTextNodes(tmp1);
tmp2 \<leftarrow> n . ''f2'';
tmp2 . remove();
tmp3 \<leftarrow> n . ''s1'';
tmp4 \<leftarrow> tmp3 . assignedNodes(True);
tmp5 \<leftarrow> n . ''c1'';
assert_array_equals(tmp4, [tmp5]);
tmp6 \<leftarrow> n . ''s2'';
tmp7 \<leftarrow> tmp6 . assignedNodes(True);
tmp8 \<leftarrow> n . ''c1'';
assert_array_equals(tmp7, [tmp8]);
tmp9 \<leftarrow> n . ''s3'';
tmp10 \<leftarrow> tmp9 . assignedNodes(True);
tmp11 \<leftarrow> n . ''c1'';
assert_array_equals(tmp10, [tmp11]);
tmp12 \<leftarrow> n . ''s4'';
tmp13 \<leftarrow> tmp12 . assignedNodes(True);
tmp14 \<leftarrow> n . ''c1'';
tmp15 \<leftarrow> n . ''f4'';
assert_array_equals(tmp13, [tmp14, tmp15])
}) slots_fallback_heap"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. test (Heap_Error_Monad.bind slots_fallback_document . getElementById(''test5'') (\<lambda>tmp0. Heap_Error_Monad.bind (createTestTree tmp0) (\<lambda>n. Heap_Error_Monad.bind (n . ''test5'') (\<lambda>tmp1. Heap_Error_Monad.bind (removeWhiteSpaceOnlyTextNodes tmp1) (\<lambda>_. Heap_Error_Monad.bind (n . ''f2'') (\<lambda>tmp2. Heap_Error_Monad.bind tmp2 . remove() (\<lambda>_. Heap_Error_Monad.bind (n . ''s1'') (\<lambda>tmp3. Heap_Error_Monad.bind tmp3 . assignedNodes(True) (\<lambda>tmp4. Heap_Error_Monad.bind (n . ''c1'') (\<lambda>tmp5. Heap_Error_Monad.bind assert_array_equals(tmp4, [tmp5]) (\<lambda>_. Heap_Error_Monad.bind (n . ''s2'') (\<lambda>tmp6. Heap_Error_Monad.bind tmp6 . assignedNodes(True) (\<lambda>tmp7. Heap_Error_Monad.bind (n . ''c1'') (\<lambda>tmp8. Heap_Error_Monad.bind assert_array_equals(tmp7, [tmp8]) (\<lambda>_. Heap_Error_Monad.bind (n . ''s3'') (\<lambda>tmp9. Heap_Error_Monad.bind tmp9 . assignedNodes(True) (\<lambda>tmp10. Heap_Error_Monad.bind (n . ''c1'') (\<lambda>tmp11. Heap_Error_Monad.bind assert_array_equals(tmp10, [tmp11]) (\<lambda>_. Heap_Error_Monad.bind (n . ''s4'') (\<lambda>tmp12. Heap_Error_Monad.bind tmp12 . assignedNodes(True) (\<lambda>tmp13. Heap_Error_Monad.bind (n . ''c1'') (\<lambda>tmp14. Heap_Error_Monad.bind (n . ''f4'') (\<lambda>tmp15. assert_array_equals(tmp13, [tmp14, tmp15])))))))))))))))))))))))) slots_fallback_heap
[PROOF STEP]
by eval | {"llama_tokens": 980, "file": "Shadow_SC_DOM_tests_slots_fallback", "length": 1} |
\documentclass[letterpaper]{article}
%% Language and font encodings
\usepackage[english]{babel}
\usepackage[utf8x]{inputenc}
\usepackage[T1]{fontenc}
%% Sets page size and margins
\usepackage[letterpaper,top=2.5cm,bottom=2cm,left=2cm,right=2cm,marginparwidth=1.75cm]{geometry}
%% Useful packages
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsfonts}
\usepackage{graphicx}
\usepackage[colorinlistoftodos]{todonotes}
\usepackage[colorlinks=true, allcolors=blue]{hyperref}
\usepackage{listings}
\usepackage{multicol}
\usepackage{float}
\usepackage{bm}
\date{\today}
\title{CSC411 Assignment 1}
\author{Yue Guo}
\begin{document}
\maketitle
%\centering
% \includegraphics[width=0.5\textwidth]{1.3.2/1_3_2_k50.png}
% \caption{k-NN Regression of $x \in [0, 11]$ with $k$ = 50.}
%\end{figure}
\section{Learning basics of regression in Python }
\subsection{Describe and summarize the data}
Dimension: 13\\
Target: price\\
Data points: for each feature, we have 506 data points
\subsection{visualization}
\subsection{}
\begin{figure}[H]
\centering
\includegraphics[width=1\textwidth]{Figure_1.png}
\caption{\label{fig:q1}}
\end{figure}
\subsubsection{Feature weights}
Weights of each feature: \\
\begin{center}
\begin{tabular}{ |c|c| }
\hline
CRIM & 39.3306546864 \\
ZN & -0.105570660997\\
INDUS & 0.033569463917\\
CHAS & 0.0501338462503\\
NOX & 2.44159672082\\
RM &-18.9563291605\\
AGE & 3.65717113479\\
DIS & 0.00193877592741\\
RAD & -1.46699325228\\
TAX & 0.349594800713\\
PTRATIO &-0.0145786907583\\
B & -0.959592750851\\
LSTAT & 0.008452561222\\
\hline
\end{tabular}
\end{center}
INDUS matches my expectation. The more business we have, the more prosperous an area is, therefore more expensive housing.
\subsubsection{MSE of my model}
19.0490487755
\subsubsection{Two more error measurement}
normal error = 313.546384855\\
mean square root= 0.281596677525\\
I suggest these two error measurements because they do not square the differences.
\subsubsection{Most significant feature}
Based on my results, the most significant feature is RM and CRIM. It has larger weight value among all features.
%%%%%%%%Q 2%%%%%%%%%%%
\section{Locally weighted regression}
\subsection{weighted least square problem and analytic solution proof}
Since the matrix A is diagonal and $\hat{y} =A^T x$ and $L(w) = \frac{1}{2} \Sigma a^{(i)}(y^{(i)} - W^T x^{(i)} )^{2} + \frac{\lambda}{2} \lVert W \rVert ^{2}$ \\
$ L(w) = \frac{1}{2} A [(y - W^T x)(y - W^T x)] + \frac{\lambda}{2} \lVert W \rVert ^{2}$\\
$ L(w) = \frac{1}{2} A (y^T y + W^T X^T XW -2W^T X^T y) +\frac{\lambda}{2} \lVert W \rVert ^{2}$\\
$ \frac{\partial}{\partial{w}} =\frac{1}{2} \times 2 A[X^T X W^{*} - X^T y] + \lambda \lVert W \rVert = 0$\\
$A X^T X W^{*} - X^T Ay + \lambda W^{*} = 0 $\\
$(A X^T X + \lambda)W^{*} = X^T Ay $\\
$W^{*} = X^T Ay (A X^T X + \lambda I)^{-1} $
\subsection{}
x is Tau, y is losses
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{q2_final.png}
\caption{\label{fig:q2}}
\end{figure}
\subsection{}
The algorithm produces results or weights of each feature with a large variance if $\tau$ -> 0, and
variance -> constant if $\tau $-> $\inf$
%%%%%%%%%%%% Q3%%%%%%%%%%%%%
\section{Mini-batch}
\subsection{Proof of expected value of mini batches}
$RHS = \frac{1}{n} \Sigma_{i=1}^{n}a_{i}$ is the average of all samples in the data set\\
LHS: $ \frac{1}{m} \Sigma_{i=1}^{m} a_{i} $ is the average of each random batch. Since each batch is drown randomly from the dataset, the m elements each have $ \frac{1}{n}$ chance to be drawn. The probability of the batch is $ \frac{m}{n}$. $E(\frac{1}{m} \Sigma a_{i}) = \frac{m}{n} \times \frac{1}{m} \Sigma a_{i} = \frac{1}{n} \Sigma_{i=1}^{n} a_{i} $ \\
QED
\subsection{Proof of gradients}
From the result of part 1, substitute $l$ into $a_{i}$\\
$E[ \frac{1}{m} \Sigma l(x ,y, \theta) ] = \frac{1}{n} \Sigma l(x ,y, \theta) $\\
$E[L(x, y, \theta)] = L(x ,y, \theta)$\\
apply gradient, we have \\
$\nabla E[L(x, y, \theta)] =\nabla L(x ,y, \theta) = \frac{1}{n} \Sigma_{i=1}^{n} \nabla l $\\
$\nabla \frac{1}{n} \Sigma_{i=1}^{n} l = \nabla E(\frac{1}{m} \Sigma a_{i}) $ \\
$E[ \nabla L(x, y, \theta)] = \nabla L(x, y, \theta) $
\subsection{Importance of this result}
This implies that k random batches of data can approximate the gradient of the complete dataset.
\subsection{Gradient}
\subsubsection{Analytic solution}
$\nabla L = 2X^TXw - 2X^Ty$
\subsubsection{}
see q3.py
\subsection{Error measurements}
square metric = 79165708.6263 \\
cosine similarity = 0.999998432222\\
I suggest cosine similarity because square distance takes the difference to the power of 2, which punishes certain cases more.
\subsection{plot}
X axis is weights, y axis is log of M
this is the graph if we average all the weights
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{q3_final.png}
\caption{\label{}}
\end{figure}
This is the graph if we average each $w_{j}$
X axis is weights, y axis is log of M
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{q3_f2.png}
\caption{\label{fig:q3}}
\end{figure}
% \section{Some examples to get started}
% \subsection{How to add Comments}
% Comments can be added to your project by clicking on the comment icon in the toolbar above. % * <john.hammersley@gmail.com> 2014-09-03T09:54:16.211Z:
% %
% % Here's an example comment!
% %
% To reply to a comment, simply click the reply button in the lower right corner of the comment, and you can close them when you're done.
% \subsection{How to include Figures}
% First you have to upload the image file from your computer using the upload link the project menu. Then use the includegraphics command to include it in your document. Use the figure environment and the caption command to add a number and a caption to your figure. See the code for Figure \ref{fig:frog} in this section for an example.
% \begin{figure}
% \centering
% \includegraphics[width=0.3\textwidth]{frog.jpg}
% \caption{\label{fig:frog}This frog was uploaded via the project menu.}
% \end{figure}
% \subsection{How to add Tables}
% Use the table and tabular commands for basic tables --- see Table~\ref{tab:widgets}, for example.
% \subsection{How to write Mathematics}
% \LaTeX{} is great at typesetting mathematics. Let $X_1, X_2, \ldots, X_n$ be a sequence of independent and identically distributed random variables with $\text{E}[X_i] = \mu$ and $\text{Var}[X_i] = \sigma^2 < \infty$, and let
% \[S_n = \frac{X_1 + X_2 + \cdots + X_n}{n}
% = \frac{1}{n}\sum_{i}^{n} X_i\]
% denote their mean. Then as $n$ approaches infinity, the random variables $\sqrt{n}(S_n - \mu)$ converge in distribution to a normal $\mathcal{N}(0, \sigma^2)$.
% \subsection{How to create Sections and Subsections}
% Use section and subsections to organize your document. Simply use the section and subsection buttons in the toolbar to create them, and we'll handle all the formatting and numbering automatically.
% \subsection{How to add Lists}
% You can make lists with automatic numbering \dots
% \begin{enumerate}
% \item Like this,
% \item and like this.
% \end{enumerate}
% \dots or bullet points \dots
% \begin{itemize}
% \item Like this,
% \item and like this.
% \end{itemize}
% \subsection{How to add Citations and a References List}
% You can upload a \verb|.bib| file containing your BibTeX entries, created with JabRef; or import your \href{https://www.overleaf.com/blog/184}{Mendeley}, CiteULike or Zotero library as a \verb|.bib| file. You can then cite entries from it, like this: \cite{greenwade93}. Just remember to specify a bibliography style, as well as the filename of the \verb|.bib|.
% You can find a \href{https://www.overleaf.com/help/97-how-to-include-a-bibliography-using-bibtex}{video tutorial here} to learn more about BibTeX.
% We hope you find Overleaf useful, and please let us know if you have any feedback using the help menu above --- or use the contact form at \url{https://www.overleaf.com/contact}!
% \bibliographystyle{alpha}
% \bibliography{sample}
\end{document} | {"hexsha": "e13d3b68661b5a1b04075207c791838eacbd0d49", "size": 8091, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "a1/a1.tex", "max_stars_repo_name": "violetguos/intro_machine_learning", "max_stars_repo_head_hexsha": "744b7bfc586a8d629086c92248b9b9c2aa1eb071", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "a1/a1.tex", "max_issues_repo_name": "violetguos/intro_machine_learning", "max_issues_repo_head_hexsha": "744b7bfc586a8d629086c92248b9b9c2aa1eb071", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "a1/a1.tex", "max_forks_repo_name": "violetguos/intro_machine_learning", "max_forks_repo_head_hexsha": "744b7bfc586a8d629086c92248b9b9c2aa1eb071", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7253218884, "max_line_length": 363, "alphanum_fraction": 0.7002842665, "num_tokens": 2638} |
! this is the first example in C.3.1
type id_numbers
integer ssn
integer employee_number
end type id_numbers
type person_id
character(len=30) last_name
character(len=1) middle_initial
character(len=30) first_name
type(id_numbers) number
end type person_id
type person
integer age
type(person_id) id
end type person
type (person) george, mary
print *, george%age
print *, mary%id%last_name
print *, mary%id%number%ssn
print *, george%id%number
end
| {"hexsha": "b272695a438aab8f21391206f7dde10d01af67e1", "size": 477, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/annex_c/c_3_1_0.f90", "max_stars_repo_name": "OpenFortranProject/ofp-sdf", "max_stars_repo_head_hexsha": "202591cf4ac4981b21ddc38c7077f9c4d1c16f54", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2015-03-05T14:41:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-22T23:51:25.000Z", "max_issues_repo_path": "tests/annex_c/c_3_1_0.f90", "max_issues_repo_name": "OpenFortranProject/ofp-sdf", "max_issues_repo_head_hexsha": "202591cf4ac4981b21ddc38c7077f9c4d1c16f54", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2015-11-05T09:50:04.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-10T21:32:48.000Z", "max_forks_repo_path": "tests/annex_c/c_3_1_0.f90", "max_forks_repo_name": "OpenFortranProject/ofp-sdf", "max_forks_repo_head_hexsha": "202591cf4ac4981b21ddc38c7077f9c4d1c16f54", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2015-06-24T01:22:58.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-16T06:47:15.000Z", "avg_line_length": 17.0357142857, "max_line_length": 36, "alphanum_fraction": 0.7463312369, "num_tokens": 140} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#HW2 for EECS 598 Motion Planning
#code based on the simplemanipulation.py example
import time
import openravepy
#### YOUR IMPORTS GO HERE ####
import astarTool as a
from copy import deepcopy
#### END OF YOUR IMPORTS ####
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
def waitrobot(robot):
"""busy wait for robot completion"""
while not robot.GetController().IsDone():
time.sleep(0.01)
def tuckarms(env,robot):
with env:
jointnames = ['l_shoulder_lift_joint','l_elbow_flex_joint','l_wrist_flex_joint','r_shoulder_lift_joint','r_elbow_flex_joint','r_wrist_flex_joint']
robot.SetActiveDOFs([robot.GetJoint(name).GetDOFIndex() for name in jointnames])
robot.SetActiveDOFValues([1.29023451,-2.32099996,-0.69800004,1.27843491,-2.32100002,-0.69799996]);
robot.GetController().SetDesired(robot.GetDOFValues());
waitrobot(robot)
if __name__ == "__main__":
env = Environment()
env.SetViewer('qtcoin')
collisionChecker = RaveCreateCollisionChecker(env,'ode')
env.SetCollisionChecker(collisionChecker)
env.Reset()
# load a scene from ProjectRoom environment XML file
env.Load('data/pr2test2.env.xml')
time.sleep(0.1)
# 1) get the 1st robot that is inside the loaded scene
# 2) assign it to the variable named 'robot'
robot = env.GetRobots()[0]
# tuck in the PR2's arms for driving
tuckarms(env,robot);
with env:
# the active DOF are translation in X and Y and rotation about the Z axis of the base of the robot.
robot.SetActiveDOFs([],DOFAffine.X|DOFAffine.Y|DOFAffine.RotationAxis,[0,0,1])
goalconfig = [2.6,-1.3,-pi/2]
#### YOUR CODE HERE ####
GoalNode = a.Node(goalconfig[0],goalconfig[1],goalconfig[2]);
CloseSet = set();
OpenSet = set();
clideSet=set();
a.stepx = 0.2;
a.stepy = 0.1;
a.theta_step = pi/2;
handles = [];
str = a.Node(-3.4,-1.4,0);
str.G=0;
#str.H = a.manhattan(str,GoalNode);
str.H = a.euclidean(str,GoalNode);
OpenSet.add(deepcopy(str));
print(str.H);
Timer_str = time.clock();
while len(OpenSet) > 0:
current = min(OpenSet,key=lambda o:o.H + o.G);
OpenSet.remove(current)
CloseSet.add(current);
if a.GoalCheck(GoalNode,current):
Timer_end = time.clock();
Total_Time = (Timer_end -Timer_str);
print("Found goal!!! cost = ",current.G," use ",Total_Time);
break;
#neighbor = a.FindNeighbor8(current);
neighbor = a.FindNeighbor4(current);
for neighborNode in neighbor:
if a.nodeIsInSet(neighborNode,CloseSet):
continue;
if a.check_collision(neighborNode,env,robot):
handles.append(env.plot3(points=[neighborNode.x,neighborNode.y,0],pointsize=0.05,colors=[1,0,0],drawstyle=1))
else:
tentative_g = current.G + a.MoveCost(current,neighborNode);
temp = a.nodeIsInOpenSet(neighborNode,OpenSet);
if temp == None:
neighborNode.G = tentative_g;
neighborNode.H = a.manhattan(neighborNode,GoalNode);
#neighborNode.H = a.euclidean(neighborNode,GoalNode) ;
neighborNode.parent = current;
OpenSet.add(deepcopy(neighborNode));
handles.append(env.plot3(points=[neighborNode.x,neighborNode.y,0],pointsize=0.05,colors=[0,0,1],drawstyle=1))
else:
if(tentative_g >= temp.G):
continue;
else:
OpenSet.remove(temp);
temp.G = tentative_g;
temp.parent = current;
OpenSet.add(deepcopy(temp));
PathAry = a.computePathAry(current);
PathAry.reverse();
for i in PathAry:
handles.append(env.plot3(points=[i.x,i.y,0],pointsize=0.05,colors=[0,0,0],drawstyle=1));
#### Implement your algorithm to compute a path for the robot's base starting from the current configuration of the robot and ending at goalconfig. The robot's base DOF have already been set as active. It may be easier to implement this as a function in a separate file and call it here.
#### Draw the X and Y components of the configurations explored by your algorithm
#### Now that you have computed a path, execute it on the robot using the controller. You will need to convert it into an openrave trajectory. You can set any reasonable timing for the configurations in the path. Then, execute the trajectory using robot.GetController().SetPath(mypath);
traj = RaveCreateTrajectory(env,'');
config = robot.GetActiveConfigurationSpecification('linear');
config.AddDeltaTimeGroup();
traj.Init(config);
myPath = [ [point.x, point.y,point.theta,i*0.01] for i,point in enumerate(PathAry) ];
for i ,wayPoint in enumerate(myPath):
traj.Insert(i,wayPoint,config,True);
robot.GetController().SetPath(traj);
#### END OF YOUR CODE ###
waitrobot(robot)
raw_input("Press enter to exit...")
| {"hexsha": "79fafee12702ee18b7c490fc226601839bd64fcb", "size": 4802, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/HW2_astar.py", "max_stars_repo_name": "willsirius/DualTreeRRTStartMotionPlanning", "max_stars_repo_head_hexsha": "d3e6d2ec0cd7c38379d5b0ff42924b7216bd29cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-14T14:36:16.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-14T14:36:16.000Z", "max_issues_repo_path": "data/HW2_astar.py", "max_issues_repo_name": "williamissirius/DualTreeRRTStartMotionPlanning", "max_issues_repo_head_hexsha": "d3e6d2ec0cd7c38379d5b0ff42924b7216bd29cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/HW2_astar.py", "max_forks_repo_name": "williamissirius/DualTreeRRTStartMotionPlanning", "max_forks_repo_head_hexsha": "d3e6d2ec0cd7c38379d5b0ff42924b7216bd29cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-12-20T19:00:09.000Z", "max_forks_repo_forks_event_max_datetime": "2017-12-20T19:00:09.000Z", "avg_line_length": 34.7971014493, "max_line_length": 295, "alphanum_fraction": 0.6899208663, "include": true, "reason": "from numpy", "num_tokens": 1333} |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib
from six import BytesIO
def add_figure_to_archive(fig, zipfile, filename):
bytes_buf = BytesIO()
plt.savefig(bytes_buf, format='png')
bytes_buf.seek(0)
zipfile.writestr(filename, bytes_buf.read())
bytes_buf.close()
class BasicPlots():
def __init__(self, name):
self.name = name
def uniqual_boxplot(self, data, filename, grid, color, sample):
fig, axes = plt.subplots(figsize = (10,8))
if color != "blue":
white = False
else:
white = True
axes.boxplot(data, sym = "", patch_artist = white, meanline=True, showmeans=True)
if not grid:
axes.grid()
axes.set_title("Quality per Cycle " + sample)
axes.set_xlabel("Cycle")
axes.set_ylabel("Quality level")
add_figure_to_archive(fig, filename, "cycle-quality.png")
def uniqualitysec_bar(self, data, filename, grid, color, sample):
fig, axes = plt.subplots()
max_len = max(data.keys())
x_axis = tuple(range(max_len + 1))
axes.bar(x_axis, [data[n] for n in x_axis], color=color)
if not grid:
axes.grid()
axes.set_title('Mean Sequence Quality ' + sample)
axes.set_xlabel('Quality level')
axes.set_ylabel('Number of sequences')
add_figure_to_archive(fig, filename, "sequence-quality.png")
def uninuc_plot(self, data, filename, grid, sample):
fig, axes = plt.subplots()
axes.plot(data)
if not grid:
axes.grid()
axes.set_title("Nucleotide per Cycle " + sample)
axes.set_ylabel("Nucleotide percentage (%)")
axes.set_xlabel("Cycle")
axes.axis([0, data.shape[0], 0, 100])
axes.legend(["A", "T", "C", "G", "N"])
add_figure_to_archive(fig, filename, "nucleotide-percentage.png")
def unigc_plot(self, data, filename, grid, sample):
fig, axes = plt.subplots()
axes.plot(data)
if not grid:
axes.grid()
axes.set_title("GC per Cycle " + sample)
axes.axis([0, len(data), 0, 100])
axes.set_ylabel("GC percentage (%)")
axes.set_xlabel("Cycle")
add_figure_to_archive(fig, filename, "gc-percentage.png")
def unigcproportion_scatter(self, data, filename, grid, color, sample):
fig, axes = plt.subplots()
gc_list = []
for key in data.keys():
for counter in range(data[key]):
gc_list.append(key)
mean = np.mean(gc_list)
sigma = np.std(gc_list)
second_x = np.linspace(0,100,100)
x_axis = tuple(range(101))
y_axis = [data[height] for height in x_axis]
axes.scatter(x_axis, y_axis, color=color)
if not grid:
axes.grid()
x1,x2,y1,y2 = axes.axis()
axes.axis((x1,x2,0,y2))
axes2 = axes.twinx()
axes2.plot(second_x,mlab.normpdf(second_x,mean,sigma), color='red')
axes2.get_yaxis().set_visible(False)
handles, labels = axes.get_legend_handles_labels()
display = (0,1,2)
a = plt.Line2D((0,1),(0,0), color=(0.1,0.6,0.8))
b = plt.Line2D((0,1),(0,0), color='red')
axes.yaxis.grid(b=True, which='major', **{'color':'gray', 'linestyle':':'})
axes.set_axisbelow(True)
axes.set_title('Sequence GC Content Distribution ' + sample)
axes.set_xlabel('Mean sequence GC content (%)')
axes.set_ylabel('Number of sequences')
axes.legend([handle for i,handle in enumerate(handles) if i in display]+[a,b],
[label for i,label in enumerate(labels) if i in display]+['Observed','Theoretical'])
add_figure_to_archive(fig, filename, "gc-sequence-distribution.png")
def unilenght_bar(self, data, filename, grid, color, sample):
fig, axes = plt.subplots()
max_len = max(data.keys())
x_axis = tuple(range(max_len + 1))
axes.bar(x_axis, [data[n] for n in x_axis], color=color)
if not grid:
axes.grid()
axes.set_title('Sequence Lengths ' + sample)
axes.set_xlabel('Lenght')
axes.set_ylabel('Number of sequences')
add_figure_to_archive(fig, filename, "sequence-lengths.png")
def unioverkmer_bar(self, data, filename, grid, color, sample):
fig, axes = plt.subplots(figsize = (10,8))
x_axis = range(len(data))
y_axis = []
x_proaxis = []
for i in data:
x_proaxis.append(i[0])
y_axis.append(i[1])
axes.bar(x_axis, y_axis, color=color)
if not grid:
axes.grid()
plt.xticks(x_axis, x_proaxis, rotation = "vertical")
axes.set_title("Over-Represented Kmers " + sample)
axes.set_ylabel("Number of repeats")
axes.set_xlabel("Kmer")
axes.set_xticks(x_axis)
axes.set_xticklabels(x_proaxis)
add_figure_to_archive(fig, filename, "overrepresented-kmer.png")
def unikmer_plot(self, data, over, filename, grid, sample):
fig, axes = plt.subplots()
x_proaxis = []
for i in over:
x_proaxis.append(i[0])
axes.plot(data)
if not grid:
axes.grid()
axes.legend(x_proaxis)
axes.grid()
axes.set_ylabel("Number of Repeats")
axes.set_xlabel("Cycle")
axes.set_title("Over-represented Kmers Per Cycle " + sample)
add_figure_to_archive(fig, filename, "kmer-cycle.png")
def uniduplicants_hist(self, data, filename, grid, color, sample):
fig, axes = plt.subplots()
label = data[2]
data = data[3:]
x_axis = range(3, len(data) + 3)
axes.bar(x_axis, data, color=color)
if not grid:
axes.grid()
axes.set_title("Duplicated Sequences " + sample)
axes.set_xlabel("Repeats" + " " + "Sequences with 2 repeats: " + str(label))
axes.set_ylabel("Number of sequences")
add_figure_to_archive(fig, filename, "sequences-duplicated.png")
def unisecn_bar(self, data, filename, grid, color, sample):
fig, axes = plt.subplots()
x_axis = range(1,(len(data)+1))
axes.bar(x_axis, data, color=color)
if not grid:
axes.grid()
axes.set_title("Ns per sequence " + sample)
axes.set_ylabel("Number of sequences")
axes.set_xlabel("Number of Ns")
add_figure_to_archive(fig, filename, "sequence-n.png")
def unicyclen_bar(self, data, filename, grid, color, sample):
fig, axes = plt.subplots()
x_axis = range(1,len(data) + 1)
axes.bar(x_axis, data, color=color)
if not grid:
axes.grid()
axes.set_title("Ns per cycle " + sample)
axes.set_ylabel("Number of Ns")
axes.set_xlabel("Cycle")
add_figure_to_archive(fig, filename, "cycle-n.png")
class SamPlots(BasicPlots):
def __init__(self, name):
self.name = name
def uniflag_bar(self, data, filename, grid, color, sample):
fig, axes = plt.subplots(figsize = (10,8))
x_axis = range(1,13)
x_proaxis = [1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000, 100000000000]
axes.bar(x_axis, data, color=color)
if not grid:
axes.grid
plt.xticks(x_axis, x_proaxis, rotation = "vertical")
axes.set_title("Flags bits " + sample)
axes.set_ylabel("Number of sequences")
axes.set_xlabel("Flags bits")
add_figure_to_archive(fig, filename, "sequence-flag.png")
def unicigar_bar(self, data, filename, grid, color, sample):
fig, axes = plt.subplots()
x_axis = range(8)
x_proaxis = ["I", "D", "N", "S", "H", "P", "=", "X"]
axes.bar(x_axis, data[1:], color=color)
if not grid:
axes.grid
plt.xticks(x_axis, x_proaxis)
axes.set_title("Cigars " + sample)
axes.set_ylabel("Number of cigars")
axes.set_xlabel("Cigars" + " " + "Number of cigars M: " + str(data[0]))
add_figure_to_archive(fig, filename, "cigars.png")
def uniposition_bar(self, data, filename, grid, color, sample):
fig, axes = plt.subplots()
x_axis = range(1,(len(data)+1))
axes.bar(x_axis, data, color=color)
if not grid:
axes.grid()
axes.set_title("Mapping positions " + sample)
axes.set_ylabel("Number of sequences")
axes.set_xlabel("Position")
add_figure_to_archive(fig, filename, "position.png")
def unimapq_bar(self, data, filename, grid, color, sample):
fig, axes = plt.subplots()
x_axis = range(len(data))
axes.bar(x_axis, data, color=color)
if not grid:
axes.grid()
axes.set_title("Mapping Quality " + sample)
axes.set_ylabel("Number of sequences")
axes.set_xlabel("Mapping quality")
add_figure_to_archive(fig, filename, "mapping-quality.png")
| {"hexsha": "c1a4932bc1b4b6ccef801bc957fca395d6ab578e", "size": 9072, "ext": "py", "lang": "Python", "max_stars_repo_path": "pystq/plots.py", "max_stars_repo_name": "JuantonioMS/pyngs", "max_stars_repo_head_hexsha": "5c929e68c975aae94669d0a0ff29ceb462de9b5e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pystq/plots.py", "max_issues_repo_name": "JuantonioMS/pyngs", "max_issues_repo_head_hexsha": "5c929e68c975aae94669d0a0ff29ceb462de9b5e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pystq/plots.py", "max_forks_repo_name": "JuantonioMS/pyngs", "max_forks_repo_head_hexsha": "5c929e68c975aae94669d0a0ff29ceb462de9b5e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2784810127, "max_line_length": 122, "alphanum_fraction": 0.5973324515, "include": true, "reason": "import numpy", "num_tokens": 2266} |
from sklearn.metrics import accuracy_score,classification_report,roc_curve,auc,confusion_matrix
import numpy as np
from sklearn.model_selection import cross_val_predict
class Ensemble(object):
def __init__(self, estimators):
self.estimators_names=[]
self.estimators=[]
self.result={}
self.accuracy={}
self.prob={}
self.votedResult=[]
self.votedAccuracy=0
self.datasize=0
for item in estimators:
self.estimators.append(item[1])
self.estimators_names.append(item[0])
pass
pass
def fit(self, x,y):
for i in self.estimators:
i.fit(x,y)
pass
def predict(self, x,y=None):
self.datasize=x.shape[0]
for name,fun in zip(self.estimators_names,self.estimators):
self.result[name]=fun.predict(x)
if y.any():
self.accuracy[name]=accuracy_score(y,self.result[name])
print("{} accuracy is {}".format(name, self.accuracy[name]))
if self.accuracy[name]>0.5:
target_names = ['0', '1']
print(classification_report(y,self.result[name], target_names=target_names,digits=3))
pass
def predict_prob(self, x,y):
for name,fun in zip(self.estimators_names,self.estimators):
self.prob[name]=fun.predict_proba(x)
fpr, tpr, thresholds = roc_curve(y, self.prob[name][:,1])
roc_auc = auc(fpr, tpr)
for i in zip(fpr,tpr,thresholds):
print('fpr:%0.2f tpr:%0.2f t:%0.3f' % i)
# print(thresholds)
# plt.figure()
# lw = 2
# plt.plot(fpr, tpr, color='darkorange',
# lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
# plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('Receiver operating characteristic example')
# plt.legend(loc="lower right")
# plt.show()
self.result[name]=[0 for i in range(len(self.prob[name]))]
for i,v in enumerate(self.prob[name]):
maxi=np.argmax(v)
if(maxi==1):
if v[maxi]>=0.9:
self.result[name][i]=1
else:
self.result[name][i]=0
# else:
# if v[maxi]>=0.93:
# self.result[name][i]=0
# else:
# self.result[name][i]=1
# else:
# self.result[name][i]=0
target_names = ['0', '1']
print(classification_report(y,self.result[name], target_names=target_names,digits=3))
print(confusion_matrix(y,self.result[name]) )
# plt.figure(figsize=(200,200))
# axis=np.arange(0,self.datasize)
# axis=axis/10
# for j in range(1,4):
# plt.subplot(3,1,j)
# for i in range(self.datasize):
# plt.scatter(axis[i],self.prob[name][i,j-1],c='r' if y[i]==0 else( 'y' if y[i]==2 else 'b'),s=10 if y[i]==self.result[name][i] else 40)
# plt.show()
def select(self,y):
for index,name in enumerate(self.estimators_names):
prob=self.prob[name]
# error=np.zeros((self.datasize,3))
# good=np.zeros((self.datasize,3))
# plt.figure(index,figsize=(200,200))
# axis=np.arange(0,self.datasize)
# axis=axis
# for j in range(1,4):
# plt.subplot(3,1,j)
# for i in range(self.datasize):
# flag=y[i]/2==np.argmax(prob[i])
# plt.scatter(axis[i],prob[i,j-1],c='r' if y[i]==0 else( 'y' if y[i]==2 else 'b'),s=10 if flag else 40)
# if not flag:
# error[i,j-1]=prob[i,j-1]
# if y[i]==2:
# good[i,j-1]=prob[i,j-1]
# # plt.annotate(str(prob[i,j-1]), xy = (axis[i],prob[i,j-1]), xytext = (axis[i]+0.1, prob[i,j-1]+0.1))
# # plt.savefig(name+".png")
# plt.show()
# np.save(name+"-error.npy", error)
# np.save(name+"-good.npy", good)
for i,v in enumerate(prob):
maxi=np.argmax(v)
if(maxi==1):
# if i[0]
# if v[maxi]>=0.90:
# self.result[name][i]=maxi*2
# else:
# if(min(v[0],v[2])<0.02):
# self.result[name][i]=6
# else:
# self.result[name][i]=2
if 0.9>v[maxi]>=0.75 and (min(v[0],v[2])>0.02):
self.result[name][i]=maxi*2
elif v[maxi]>=0.9:
self.result[name][i]=maxi*2
else:
self.result[name][i]=6
else:
self.result[name][i]=maxi*2
# def crossValidation(self, x,y,cv=10):
# self.datasize=x.shape[0]
# for name,fun in zip(self.estimators_names,self.estimators):
# self.result[name]=cross_val_predict(fun,x,y,cv)
#
# self.accuracy[name]=accuracy_score(y,self.result[name])
# pass
def vote(self,y=None, weight=None):
temp=np.zeros((self.datasize,len(self.estimators_names)))
i=0
for _,value in self.result.items():
value=np.reshape(value,(value.shape[0],1))
# print(value[:][0]) [:,0]才可以得到一列
temp[:,i]=value[:,0]
i=i+1
for i in range(temp.shape[0]):
count=np.bincount(temp[i].astype('int'),weights=weight)
self.votedResult.append(np.argmax(count))
# print(self.votedResult)
if y.any():
self.votedAccuracy=accuracy_score(y,self.votedResult)
print("voted accuracy is {}".format( self.votedAccuracy))
target_names = ['0', '1']
print(classification_report(y,self.votedResult, target_names=target_names))
| {"hexsha": "812242f75ab06926d83d1da97c9dd144636ec6b0", "size": 6498, "ext": "py", "lang": "Python", "max_stars_repo_path": "birthplace/FeatureExtraction/ensemble.py", "max_stars_repo_name": "sakuranew/KGAttributesExtraction", "max_stars_repo_head_hexsha": "f4d796046ced6ff508442a802962549f4c4a51de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 247, "max_stars_repo_stars_event_min_datetime": "2018-12-28T10:56:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T09:02:23.000Z", "max_issues_repo_path": "birthplace/FeatureExtraction/ensemble.py", "max_issues_repo_name": "SunYanCN/BERT-AttributeExtraction", "max_issues_repo_head_hexsha": "f4d796046ced6ff508442a802962549f4c4a51de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2018-12-28T06:05:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-25T07:53:19.000Z", "max_forks_repo_path": "birthplace/FeatureExtraction/ensemble.py", "max_forks_repo_name": "SunYanCN/BERT-AttributeExtraction", "max_forks_repo_head_hexsha": "f4d796046ced6ff508442a802962549f4c4a51de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 61, "max_forks_repo_forks_event_min_datetime": "2019-01-03T08:54:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T16:18:48.000Z", "avg_line_length": 40.8679245283, "max_line_length": 156, "alphanum_fraction": 0.4766081871, "include": true, "reason": "import numpy", "num_tokens": 1584} |
module TestDefaultlogger
using Historic.Internal: DefaultLogger
using Logging
using Test
function with_defaultlogger(body)
buffer = (main = IOBuffer(), fallback = IOBuffer())
context = (:displaysize => (5, 20),)
io = (
main = IOContext(buffer.main, context...),
fallback = IOContext(buffer.fallback, context...),
)
logger = DefaultLogger(io.main, ConsoleLogger(io.fallback))
ans = with_logger(body, logger)
output = (main = String(take!(buffer.main)), fallback = String(take!(buffer.fallback)))
return (; output, logger, io, buffer, ans)
end
function test()
(; output) = with_defaultlogger() do
@info(:msg1, a = 1, b = 2, c = 3)
@info(:msg2, a = 1, very_very_very_very_very_very_long_key = 2)
try
error("an error to be caught")
catch err
@error(:msg3, a = 1, exception = (err, catch_backtrace()))
end
end
@test output.main == """
msg1 a=1 b=2 c=3
msg2 a=1...
Error: msg3 a=1
"""
@test occursin("an error to be caught", output.fallback)
end
end # module
| {"hexsha": "747a38e88a3d820a272de1ae2214f276f78f648b", "size": 1118, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/HistoricTests/src/test_defaultlogger.jl", "max_stars_repo_name": "JuliaConcurrent/Historic.jl", "max_stars_repo_head_hexsha": "e212319016f145bbd9fc136fac21a5a18ce4ffa4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-31T17:29:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T17:29:35.000Z", "max_issues_repo_path": "test/HistoricTests/src/test_defaultlogger.jl", "max_issues_repo_name": "JuliaConcurrent/Historic.jl", "max_issues_repo_head_hexsha": "e212319016f145bbd9fc136fac21a5a18ce4ffa4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2021-12-29T21:53:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-04T18:59:38.000Z", "max_forks_repo_path": "test/HistoricTests/src/test_defaultlogger.jl", "max_forks_repo_name": "JuliaConcurrent/Historic.jl", "max_forks_repo_head_hexsha": "e212319016f145bbd9fc136fac21a5a18ce4ffa4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6666666667, "max_line_length": 91, "alphanum_fraction": 0.6019677996, "num_tokens": 312} |
#=
Character and string classification functions
Copyright 2017-2018 Gandalf Software, Inc., Scott P. Jones,
Licensed under MIT License, see LICENSE.md
=#
# Recommended by deprecate
@static if V6_COMPAT
text_width(str::AbstractString) = strwidth(str)
text_width(ch::Char) = charwidth(ch)
import Base: is_assigned_char, normalize_string, isnumber
Base.is_assigned_char(ch::Chr) = is_assigned(ch)
Base.normalize_string(str::Str, opt::Symbol) = normalize(str, opt)
Base.strwidth(str::Str) = text_width(str)
Base.charwidth(ch::Chr) = text_width(ch)
isnumber(val::Chr) = is_numeric(val)
else
Base.Unicode.normalize(str::Str, opt::Symbol) = normalize(str, opt)
Base.Unicode.isassigned(ch::Chr) = is_assigned(ch)
is_graphic(ch::Char) = is_graphic(codepoint(ch))
is_alphanumeric(ch::Char) = is_alphanumeric(codepoint(ch))
end
############################################################################
## character column width function ##
text_width(ch::UInt8) = Int(ifelse(ch < 0x7f, ch > 0x1f, ch > 0x9f & ch != 0xad))
text_width(ch::UInt16) = utf8proc_charwidth(ch)
text_width(ch::UInt32) = utf8proc_charwidth(ch)
text_width(ch::Chr) = text_width(codepoint(ch))
text_width(ch::ASCIIChr) = Int(32 <= codepoint(ch) <= 126)
text_width(str::Str) = mapreduce(text_width, +, 0, str)
text_width(str::Str{Union{ASCIICSE,Latin_CSEs}}) = length(str)
############################################################################
@inline _cat(ch::CodeUnitTypes) = ch <= 0x10ffff ? utf8proc_cat(ch) : Cint(30)
@inline _cat(ch::Chr) = _cat(codepoint(ch))
# returns code in 0:30 giving Unicode category
@inline category_code(ch::Union{Chr,CodeUnitTypes}) = _cat(ch)
# more human-readable representations of the category code
@inline category_abbrev(ch::CodeUnitTypes) = ch <= 0x10ffff ? utf8proc_cat_abbr(ch) : "In"
@inline category_abbrev(ch::Chr) = category_abbrev(codepoint(ch))
category_string(ch::CodeUnitTypes) = category_strings[_cat(ch) + 1]
category_string(ch::Chr) = category_string(codepoint(ch))
is_assigned(ch::CodeUnitTypes) = category_code(ch) != Uni.CN
is_assigned(ch::Chr) = is_assigned(codepoint(ch))
_cat_mask(a) = a
@inline _cat_mask(a, b) = (1%UInt << a%UInt) | (1%UInt << b%UInt)
@inline _cat_mask(rng::UnitRange) =
((2%UInt << rng.stop%UInt) - 1) & ~((1%UInt << rng.start%UInt) - 1)
@inline _check_mask(ch, mask) = ((1%UInt << _cat(ch)%UInt) & mask) != 0
## libc character class predicates ##
# 0xb5, 0xdf, and 0xff cannot be uppercased in LatinStr, although they are lowercase
@inline _can_upper_l(c) = (0xe0 <= c <= 0xfe) & (c != 0xf7)
@inline _can_upper(c) = _islower_a(c) | _can_upper_l(c)
@inline _iscntrl(ch) = (ch <= 0x1f) | (0x7f <= ch <= 0x9f)
@inline _isdigit(ch) = (ch - '0'%UInt8) <= 9
@inline _isxdigit(ch) = _isdigit(ch) | (ch - 'A'%UInt8 < 6) | (ch - 'a'%UInt8 < 6)
const _isupper_mask = _cat_mask(Uni.LU, Uni.LT)
const _isalpha_mask = _cat_mask(Uni.LU : Uni.LO)
const _isnumeric_mask = _cat_mask(Uni.ND : Uni.NO)
const _ispunct_mask = _cat_mask(Uni.PC : Uni.PO)
const _isprint_mask = _cat_mask(Uni.LU : Uni.ZS)
const _isgraph_mask = _cat_mask(Uni.LU : Uni.SO)
const _isalnum_mask = _isnumeric_mask | _isalpha_mask
############################################################################
# Definitions for characters in the ASCII subset of Unicode
const _isnumeric_a = _isdigit
@inline _ispunct_a(ch) = ((1%UInt128 << ch) & 0x2800_0000_b800_0001_8c00_f7ee_0000_0000) != 0
@inline _isspace_a(ch) = (ch == 32) | (9 <= ch <= 13)
@inline _islower_a(ch) = (ch - 'a'%UInt8) < 26
@inline _isupper_a(ch) = (ch - 'A'%UInt8) < 26
@inline _isalpha_a(ch) = _islower_a(ch) | _isupper_a(ch)
@inline _isalnum_a(ch) = _isdigit(ch) | _isalpha_a(ch)
@inline _isprint_a(ch) = 0x20 <= ch < 0x7f
@inline _isgraph_a(ch) = 0x20 < ch < 0x7f
############################################################################
# Definitions for characters in the Latin1 subset of Unicode, but not in the ASCII subset
@inline _isnumeric_l(ch) = (ch <= 0xbe && ((1<<(ch-0xb2)) & 0x1c83) != 0)
@inline _ispunct_l(ch) = ((1%UInt64 << (ch-0x80)) & 0x88c0_0882_0000_0000) != 0
@inline _isspace_l(ch) = (ch == 0x85) | (ch == 0xa0)
@inline _islower_l(c) = ((0xdf <= c <= 0xff) & (c != 0xf7)) | (c == 0xb5)
@inline _isupper_l(c) = (0xc0 <= c%UInt8 <= 0xde) & (c != 0xd7)
@inline _isalpha_l(c) = ((0xc0 <= c <= 0xff) & (c != 0xf7) & (c != 0xd7)) | (c == 0xb5)
@inline _isalnum_l(c) = _isalpha_l(c) || _isnumeric_l(c)
@inline _isprint_l(ch) = ((0xa0 <= ch <= 0xff) & (ch != 0xad))
@inline _isgraph_l(ch) = ((0xa0 < ch <= 0xff) & (ch != 0xad))
############################################################################
# Definitions for any Unicode codepoint (requires call to utf8proc) (only used for non-Latin1)
@inline _isnumeric_u(ch) = _check_mask(ch, _isnumeric_mask)
@inline _ispunct_u(ch) = _check_mask(ch, _ispunct_mask)
@inline _isspace_u(ch) = _cat(ch) == Uni.ZS
@inline _islower_u(ch) = _cat(ch) == Uni.LL
@inline _isupper_u(ch) = _check_mask(ch, _isupper_mask)
@inline _isalpha_u(ch) = _check_mask(ch, _isalpha_mask)
@inline _isalnum_u(ch) = _check_mask(ch, _isalnum_mask)
@inline _isprint_u(ch) = _check_mask(ch, _isprint_mask)
@inline _isgraph_u(ch) = _check_mask(ch, _isgraph_mask)
############################################################################
# Fallback definitions for all Chr types
@inline is_control(ch::CodeUnitTypes) = _iscntrl(ch)
@inline is_digit(ch::CodeUnitTypes) = _isdigit(ch)
@inline is_hex_digit(ch::CodeUnitTypes) = _isxdigit(ch)
@inline is_control(ch::Chr) = is_control(codepoint(ch))
@inline is_digit(ch::Chr) = is_digit(codepoint(ch))
@inline is_hex_digit(ch::Chr) = is_hex_digit(codepoint(ch))
@inline is_ascii(ch::Chr) = is_ascii(codepoint(ch))
@inline is_ascii(ch::Unsigned) = ch <= 0x7f
@inline is_ascii(ch::ASCIIChr) = true
@inline is_latin(ch::AbstractChar) = is_latin(codepoint(ch))
@inline is_latin(ch::Unsigned) = ch <= 0xff
@inline is_bmp(ch::AbstractChar) = is_bmp(codepoint(ch))
@inline is_bmp(ch::Unsigned) = ch <= 0xffff && !is_surrogate_codeunit(ch)
@inline is_bmp(ch::UInt8) = true
@inline is_unicode(ch::AbstractChar) = is_unicode(codepoint(ch))
@inline is_unicode(ch::Unsigned) = ch <= 0x10ffff && !is_surrogate_codeunit(ch)
@inline is_unicode(ch::UInt8) = true
const _catfuns =
((:numeric, :numeric),
(:punctuation, :punct),
(:space, :space),
(:lowercase, :lower),
(:uppercase, :upper),
(:alpha, :alpha),
(:alphanumeric, :alnum),
(:printable, :print),
(:graphic, :graph))
for (nnam, fnam) in _catfuns
isnam = Symbol(string("is_", nnam))
namroot = string("_is", fnam)
fnam_a = Symbol(string(namroot, "_a"))
fnam_al = Symbol(string(namroot, "_al"))
fnam_ch = Symbol(string(namroot, "_ch"))
@eval $(fnam_al)(ch) = is_ascii(ch) ? $(fnam_a)(ch) : $(Symbol(string(namroot, "_l")))(ch)
@eval $(fnam_ch)(ch) = is_latin(ch) ? $(fnam_al)(ch) : $(Symbol(string(namroot, "_u")))(ch)
@eval $(isnam)(ch::CodeUnitTypes) = $(fnam_ch)(ch)
@eval $(isnam)(ch::Chr) = $(fnam_ch)(codepoint(ch))
@eval $(isnam)(ch::ASCIIChr) = $(fnam_a)(codepoint(ch))
@eval $(isnam)(ch::LatinChars) = $(fnam_al)(codepoint(ch))
end
############################################################################
@static if isdefined(Base, :ismalformed)
Base.ismalformed(ch::Chr) = false
Base.isoverlong(ch::Chr) = false
is_malformed(ch) = ismalformed(ch)
is_overlong(ch) = isoverlong(ch)
else
is_malformed(ch) = false
is_overlong(ch) = false
end
function is_latin(str::MaybeSub{String})
(siz = sizeof(str)) == 0 && return true
@preserve str begin
pnt = pointer(str)
fin = pnt + siz
while pnt < fin
cu = get_codeunit(pnt)
# cu must be 1) 0-0x7f, or 2) 0xc2 or 0xc3 followed by 0x80-0xbf
(cu < 0x7f ||
((cu - 0xc2) < 0x02 &&
(pnt += 1) < fin && is_valid_continuation(get_codeunit(pnt)))) ||
return false
pnt += 1
end
true
end
end
@inline function check_3byte(cu, pnt)
b2 = get_codeunit(pnt-1)
b3 = get_codeunit(pnt)
is_valid_continuation(b2) && is_valid_continuation(b3) &&
!is_surrogate_codeunit(((cu & 0x0f)%UInt32 << 12) | ((b2 & 0x3f)%UInt32 << 6) | (b3 & 0x3f))
end
function is_bmp(str::MaybeSub{String})
(siz = sizeof(str)) == 0 && return true
@preserve str begin
pnt = pointer(str)
fin = pnt + siz
while pnt < fin
cu = get_codeunit(pnt)
# cu must be 1) 0-0x7f, or 2) 0xc2 or 0xc3 followed by 0x80-0xbf
# c2-df -> de,df
(cu < 0x7f ||
((cu - 0xc2) < 0x1e && (pnt += 1) < fin && checkcont(pnt)) ||
((cu - 0xe0) < 0x0f && (pnt += 2) < fin && check_3byte(cu, pnt))) ||
return false
pnt += 1
end
true
end
end
function is_latin(str::AbstractString)
@inbounds for ch in str
is_latin(ch) || return false
end
true
end
function is_bmp(str::AbstractString)
@inbounds for ch in str
is_bmp(ch) || return false
end
true
end
function is_unicode(str::AbstractString)
@inbounds for ch in str
is_unicode(ch) || return false
end
true
end
| {"hexsha": "55b5d27e53be0536209192c3e3d7a0a7b68625d7", "size": 9531, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/unicode.jl", "max_stars_repo_name": "oxinabox/Strs.jl", "max_stars_repo_head_hexsha": "fe071fd34fc8de23076f2d8e7dcbae5efd8d9011", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/unicode.jl", "max_issues_repo_name": "oxinabox/Strs.jl", "max_issues_repo_head_hexsha": "fe071fd34fc8de23076f2d8e7dcbae5efd8d9011", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/unicode.jl", "max_forks_repo_name": "oxinabox/Strs.jl", "max_forks_repo_head_hexsha": "fe071fd34fc8de23076f2d8e7dcbae5efd8d9011", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2771084337, "max_line_length": 100, "alphanum_fraction": 0.6035043542, "num_tokens": 3076} |
# coding=utf-8
"""Given image and homography matrix, visualize the homograph."""
from __future__ import print_function
import argparse
import cv2
import os
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("image")
parser.add_argument("homography")
parser.add_argument("new_image")
def read_h(h_file):
"""Read the homography matrix into np."""
h_matrix = np.zeros((3, 3), dtype="float")
for i, line in enumerate(open(h_file, "r").readlines()):
h_matrix[i, :] = [float(x) for x in line.strip().split(",")]
return h_matrix
if __name__ == "__main__":
args = parser.parse_args()
image = cv2.imread(args.image)
h, w, c = image.shape
print(h, w)
overlay = np.zeros((h, w, 3), dtype="uint8")
H = read_h(args.homography)
_, H_inv = cv2.invert(H)
# [x1, y1, x2, y2]
image_box = (0, 0), (h, 0), (0, w), (h, w)
world_box = []
for x, y in image_box:
w_x, w_y, w_z = np.tensordot(H, np.array([x, y, 1]), axes=1)
world_box.append((w_x/w_z, w_y/w_z))
xy = np.float32(image_box).reshape(-1, 1, 2)
world_box_xy = cv2.perspectiveTransform(
xy, H)
# these are the same
print(world_box)
print(world_box_xy)
world_x1, world_y1 = world_box_xy[0, 0, :]
world_x2, world_y2 = world_box_xy[-1, 0, :]
world_x1, world_y1 = -10, -10
world_x2, world_y2 = 100, 100
step = 100
step_size_x = (world_x2 - world_x1) / step
step_size_y = (world_y2 - world_y1) / step
for step_x in range(step):
for step_y in range(step):
this_world_x = world_x1 + step_x * step_size_x
this_world_y = world_y1 + step_y * step_size_y
image_x, image_y, z = np.tensordot(
H_inv, np.array([this_world_x, this_world_y, 1]), axes=1)
image_x /= z
image_y /= z
# image_xy = cv2.perspectiveTransform(np.array([[[this_world_x, this_world_y]]]), H_inv)
# image_x, image_y = np.squeeze(image_xy)
if (image_x >= 0) and (image_x < w) and (image_y >= 0) and (image_y < h):
overlay[int(image_y), int(image_x), 2] = 255
new_image = cv2.addWeighted(image, 1.0, overlay, 1.0, 0)
cv2.imwrite(args.new_image, new_image)
| {"hexsha": "b925baa05173cdfe96c43739efe80aeea964816e", "size": 2136, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/baselines/vis_homography.py", "max_stars_repo_name": "JunweiLiang/next-prediction", "max_stars_repo_head_hexsha": "0b7f78321fd43037fa1e6582715eb734000c2cf8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-08-23T08:19:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T01:59:01.000Z", "max_issues_repo_path": "code/baselines/vis_homography.py", "max_issues_repo_name": "JunweiLiang/next-prediction", "max_issues_repo_head_hexsha": "0b7f78321fd43037fa1e6582715eb734000c2cf8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/baselines/vis_homography.py", "max_forks_repo_name": "JunweiLiang/next-prediction", "max_forks_repo_head_hexsha": "0b7f78321fd43037fa1e6582715eb734000c2cf8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-07-15T01:44:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-11T09:18:19.000Z", "avg_line_length": 27.3846153846, "max_line_length": 94, "alphanum_fraction": 0.6507490637, "include": true, "reason": "import numpy", "num_tokens": 702} |
! H0 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
! H0 X
! H0 X libAtoms+QUIP: atomistic simulation library
! H0 X
! H0 X Portions of this code were written by
! H0 X Albert Bartok-Partay, Silvia Cereda, Gabor Csanyi, James Kermode,
! H0 X Ivan Solt, Wojciech Szlachta, Csilla Varnai, Steven Winfield.
! H0 X
! H0 X Copyright 2006-2010.
! H0 X
! H0 X These portions of the source code are released under the GNU General
! H0 X Public License, version 2, http://www.gnu.org/copyleft/gpl.html
! H0 X
! H0 X If you would like to license the source code under different terms,
! H0 X please contact Gabor Csanyi, gabor@csanyi.net
! H0 X
! H0 X Portions of this code were written by Noam Bernstein as part of
! H0 X his employment for the U.S. Government, and are not subject
! H0 X to copyright in the USA.
! H0 X
! H0 X
! H0 X When using this software, please cite the following reference:
! H0 X
! H0 X http://www.libatoms.org
! H0 X
! H0 X Additional contributions by
! H0 X Alessio Comisso, Chiara Gattinoni, and Gianpietro Moras
! H0 X
! H0 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!X
!X Minimization module
!X
!% This module contains subroutines to perform conjugate gradient and
!% damped MD minimisation of an objective function.
!% The conjugate gradient minimiser can use various different line minimisation routines.
!X
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "error.inc"
module minimization_module
use error_module
use system_module
use linearalgebra_module
implicit none
private
SAVE
public :: minim, n_minim, fire_minim, test_gradient, n_test_gradient, precon_data, preconminim, precondimer
public :: KahanSum, DoubleKahanSum
real(dp),parameter:: DXLIM=huge(1.0_dp) !% Maximum amount we are willing to move any component in a linmin step
! parameters from Numerical Recipes */
real(dp),parameter:: GOLD=1.618034_dp
integer, parameter:: MAXFIT=5
real(dp),parameter:: TOL=1e-2_dp
real(dp),parameter:: CGOLD=0.3819660_dp
real(dp),parameter:: ZEPS=1e-10_dp
integer, parameter:: ITER=50
type precon_data
logical :: multI = .FALSE.
logical :: diag = .FALSE.
logical :: dense = .FALSE.
integer, allocatable :: preconrowlengths(:)
integer, allocatable :: preconindices(:,:)
real(dp), allocatable :: preconcoeffs(:,:,:)
character(10) :: precon_id
integer :: nneigh,mat_mult_max_iter,max_sub
real(dp) :: energy_scale,length_scale,cutoff,res2
logical :: has_fixed = .FALSE.
real(dp) :: cell_coeff = 1.0_dp
real(dp) :: bulk_modulus, number_density, mu
end type precon_data
integer, parameter :: E_FUNC_BASIC=1, E_FUNC_KAHAN=2, E_FUNC_DOUBLEKAHAN=3
interface minim
module procedure minim
end interface
interface preconminim
module procedure preconminim
end interface
interface precondimer
module procedure precondimer
end interface
interface test_gradient
module procedure test_gradient
end interface
interface n_test_gradient
module procedure n_test_gradient
end interface
interface smartmatmul
module procedure smartmatmulmat, smartmatmulvec
end interface
! LBFGS stuff
external LB2
integer::MP,LP
real(dp)::GTOL,STPMIN,STPMAX
common /lb3/MP,LP,GTOL,STPMIN,STPMAX
CONTAINS
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!
!% Smart line minimiser, adapted from Numerical Recipes.
!% The objective function is 'func'.
!
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
function linmin(x0,xdir,y,epsilon,func,data)
real(dp) :: x0(:) !% Starting position
real(dp) :: xdir(:)!% Direction of gradient at 'x0'
real(dp) :: y(:) !% Finishing position returned in 'y' after 'linmin'
real(dp)::epsilon !% Initial step size
INTERFACE
function func(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::func
end function func
END INTERFACE
character(len=1),optional::data(:)
integer:: linmin
integer::i,it,sizeflag,N, bracket_it
real(dp)::Ea,Eb,Ec,a,b,c,r,q,u,ulim,Eu,fallback
real(dp)::v,w,x,Ex,Ev,Ew,e,d,xm,tol1,tol2,p,etmp
! Dynamically allocate to avoid stack overflow madness with ifort
real(dp), dimension(:), allocatable :: tmpa, tmpb, tmpc, tmpu
!%RV Number of linmin steps taken, or zero if an error occured
N=size(x0)
allocate(tmpa(N), tmpb(N), tmpc(N), tmpu(N))
tmpa=x0
y=x0
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
Ea=func(tmpa,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call print(" Linmin: Ea = " // Ea // " a = " // 0.0_dp, PRINT_NORMAL)
Eb= Ea + 1.0_dp !just to start us off
a=0.0_dp
b=2.0_dp*epsilon
! lets figure out if we can go downhill at all
it = 2
do while( (Eb.GT.Ea) .AND. (.NOT.(Eb.FEQ.Ea)))
b=b*0.5_dp
tmpb(:)= x0(:)+b*xdir(:)
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
Eb=func(tmpb,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call print(" Linmin: Eb = " // Eb // " b = " // b, PRINT_VERBOSE)
it = it+1
if(b.LT.1.0e-20) then
write(line,*) " Linmin: Direction points the wrong way\n" ; call print(line, PRINT_NORMAL)
epsilon=0.0_dp
linmin=0
return
end if
end do
! does it work in fortran?
! two: if(isnan(Eb)) then
! write(global_err%unit,*) "linmin: got a NaN!"
! epsilon=0
! linmin=0
! return
! end if two
if(Eb.FEQ.Ea) then
epsilon=b
linmin=1
call print(" Linmin: Eb.feq.Ea, returning after one step", PRINT_VERBOSE)
return
end if
! we now have Ea > Eb */
fallback = b ! b is the best point so far, make that the fallback
write(line,*) " Linmin: xdir is ok.."; call print(line,PRINT_VERBOSE)
c = b + GOLD*b !first guess for c */
tmpc = x0 + c*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
Ec = func(tmpc,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call print(" Linmin: Ec = " // Ec // " c = " // c, PRINT_VERBOSE)
it = it + 1
! ! does it work in fortran?
! four: if(isnan(Ec)) then
! write(global_err%unit,*) "linmin: Ec is a NaN!"
! epsilon=0
! linmin=0
! return
! end if four
!let's bracket the minimum
do while(Eb.GT.Ec)
write(line,*) a,Ea; call print(line,PRINT_VERBOSE)
write(line,*) b,Eb; call print(line,PRINT_VERBOSE)
write(line,*) c,Ec; call print(line,PRINT_VERBOSE)
! compute u by quadratic fit to a, b, c
!inverted ?????????????????????
r = (b-a)*(Eb-Ec)
q = (b-c)*(Eb-Ea)
u = b-((b-c)*q-(b-a)*r)/(2.0_dp*max(abs(q-r), 1.0e-20_dp)*sign(q-r))
ulim = b+MAXFIT*(c-b)
write(line,*) "u= ",u ; call print(line,PRINT_VERBOSE)
if((u-b)*(c-u).GT. 0) then ! b < u < c
write(line,*)"b < u < c" ; call print(line,PRINT_VERBOSE)
tmpu = x0 + u*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
Eu = func(tmpu,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call print("linmin got one Eu " // Eu // " " // u, PRINT_NERD)
it = it + 1
if(Eu .LT. Ec) then ! Eb > Eu < Ec
a = b
b = u
Ea = Eb
Eb = Eu
exit !break?
else if(Eu .GT. Eb) then ! Ea > Eb < Eu
c = u
Ec = Eu
exit
end if
!no minimum found yet
u = c + GOLD*(c-b)
tmpu = x0 + u*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
Eu = func(tmpu,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call print("linmin got second Eu " // Eu // " " // u, PRINT_NERD)
it = it + 1
else if((u-c)*(ulim-u) .GT. 0) then ! c < u < ulim
write(line,*) " c < u < ulim= ", ulim; call print(line,PRINT_VERBOSE)
tmpu = x0 + u*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
Eu = func(tmpu,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call print("linmin got one(2) Eu " // Eu // " " // u, PRINT_NERD)
it = it + 1
if(Eu .LT. Ec) then
b = c
c = u
u = c + GOLD*(c-b)
Eb = Ec
Ec = Eu
tmpu = x0 + u*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
Eu = func(tmpu,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call print("linmin got second(2) Eu " // Eu // " " // u, PRINT_NERD)
it = it + 1
end if
else ! c < ulim < u or u is garbage (we are in a linear regime)
write(line,*) " ulim=",ulim," < u or u garbage"; call print(line,PRINT_VERBOSE)
u = ulim
tmpu = x0 + u*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
Eu = func(tmpu,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
it = it + 1
call print("linmin got one(3) Eu " // Eu // " " // u, PRINT_NERD)
end if
write(line,*) " "; call print(line,PRINT_VERBOSE)
write(line,*) " "; call print(line,PRINT_VERBOSE)
! test to see if we change any component too much
do i = 1,N
if(abs(c*xdir(i)) .GT. DXLIM)then
tmpb = x0+b*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
Eb = func(tmpb,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call print("linmin got new Eb " // Eb // " " // b, PRINT_NERD)
it = it + 1
y = tmpb
write(line,*) " bracket: step too big", b, Eb
call print(line, PRINT_VERBOSE)
write(line,'("I= ",I4," C= ",F16.12," xdir(i)= ",F16.12," DXLIM =",F16.12)')&
i, c, xdir(i), DXLIM
call print(line, PRINT_VERBOSE)
epsilon = b
linmin= it
return
end if
end do
a = b
b = c
c = u
Ea = Eb
Eb = Ec
Ec = Eu
end do
epsilon = b
fallback = b
bracket_it = it
call print(" Linmin: bracket OK in "//bracket_it//" steps", PRINT_VERBOSE)
! ahhh.... now we have a minimum between a and c, Ea > Eb < Ec
write(line,*) " "; call print(line,PRINT_VERBOSE)
write(line,*) a,Ea; call print(line,PRINT_VERBOSE)
write(line,*) b,Eb; call print(line,PRINT_VERBOSE)
write(line,*) c,Ec; call print(line,PRINT_VERBOSE)
write(line,*) " "; call print(line,PRINT_VERBOSE)
!********************************************************************
! * primitive linmin
! * do a quadratic fit to a, b, c
! *
!
! r = (b-a)*(Eb-Ec);
! q = (b-c)*(Eb-Ea);
! u = b-((b-c)*q-(b-a)*r)/(2*Mmax(fabs(q-r), 1e-20)*sign(q-r));
! y = x0 + u*xdir;
! Eu = (*func)(y);
!
! if(Eb < Eu){ // quadratic fit was bad
! if(current_verbosity() > MUMBLE) logger(" Quadratic fit was bad, returning 'b'\n");
! u = b;
! y = x0 + u*xdir;
! Eu = (*func)(y);
! }
!
! if(current_verbosity() > PRINT_SILENT)
! logger(" simple quadratic fit: %25.16e%25.16e\n\n", u, Eu);
!
! //return(u);
! *
! * end primitive linmin
!**********************************************************************/
! now we need a<b as the two endpoints
b=c
Eb=Ec
v = b; w = b; x = b
Ex = Eb; Ev = Eb; Ew = Eb
e = 0.0_dp
d = 0.0_dp
sizeflag = 0
call print("linmin got bracket", PRINT_NERD)
! main loop for parabolic search
DO it = 1, ITER
xm = 0.5_dp*(a+b)
tol1 = TOL*abs(x)+ZEPS
tol2 = 2.0_dp*tol1
! are we done?
if((abs(x-xm) < tol2-0.5_dp*(b-a)) .OR.(sizeflag.gt.0)) then
tmpa = x0 + x*xdir
y = tmpa
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
Ex = func(y,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call print("linmin got new Ex " // Ex // " " // x, PRINT_NERD)
if(sizeflag.gt.0) call print('Linmin: DXlim exceeded')
if( Ex .GT. Ea )then ! emergency measure, linmin screwed up, use fallback
call print(' Linmin screwed up! current Ex= '//Ex//' at x= '//x//' is worse than bracket')
call print(' Linmin variables: a='//a//' b='//b//' Ev='//Ev//' v='//v//' Ew='//Ew//' Eu='//Eu//' u='//u);
x = fallback
epsilon = fallback
tmpa = x0 + x*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
Ex = func(tmpa,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call print("linmin got new Ex " // Ex // " " // x, PRINT_NERD)
y = tmpa
else
epsilon = x
end if
call print(" Linmin done "//bracket_it//" bracket and "//it//" steps Ex= "//Ex//" x= "//x)
linmin=it
return
endif
! try parabolic fit on subsequent steps
if(abs(e) > tol1) then
r = (x-w)*(Ex-Ev)
q = (x-v)*(Ex-Ew)
p = (x-v)*q-(x-w)*r
q = 2.0_dp*(q-r)
if(q > 0.0_dp) p = -p
q = abs(q)
etmp = e
e = d
! is the parabolic fit acceptable?
if(abs(p) .GE. abs(0.5_dp*q*etmp) .OR.&
p .LE. q*(a-x) .OR. p .GE. q*(b-x)) then
! no, take a golden section step
call print(' Linmin: Taking Golden Section step', PRINT_VERBOSE+1)
if(x .GE. xm) then
e = a-x
else
e = b-x
end if
d = CGOLD*e
else
call print(' Linmin: Taking parabolic step', PRINT_VERBOSE+1)
! yes, take the parabolic step
d = p/q
u = x+d
if(u-a < tol2 .OR. b-u < tol2) d = tol1 * sign(xm-x)
end if
else
! on the first pass, do golden section
if(x .GE. xm) then
e = a-x
else
e = b-x
end if
d = CGOLD*e
end if
! construct new step
if(abs(d) > tol1) then
u = x+d
else
u = x + tol1*sign(d)
end if
! evaluate function
tmpa = u*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
Eu = func(x0+tmpa,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call print(' Linmin: new point u= '//u//' Eu= '//Eu, PRINT_VERBOSE)
if(any(abs(tmpa) > DXLIM)) then
if(sizeflag .EQ. 0) then
call print(' Linmin: an element of x moved more than '//DXLIM)
end if
sizeflag = 1
endif
! analyse new point result
if(Eu .LE. Ex) then
call print(' Linmin: new point best so far', PRINT_VERBOSE+1)
if(u .GE. x) then
a = x
else
b = x
end if
v=w;w=x;x=u
Ev=Ew;Ew=Ex;Ex=Eu
else
call print(' Linmin: new point is no better', PRINT_VERBOSE+1)
if(u < x) then
a = u
else
b = u
endif
if(Eu .LE. Ew .OR. w .EQ. x) then
v=w;w=u
Ev = Ew; Ew = Eu
else if(Eu .LE. Ev .OR. v .EQ. x .OR. v .EQ. w) then
v = u
Ev = Eu
end if
end if
call print(' Linmin: a='//a//' b='//b//' x='//x, PRINT_VERBOSE+1)
end do
write(line,*) " Linmin: too many iterations"; call print(line, PRINT_NORMAL)
y = x0 + x*xdir
epsilon = x
linmin = it
deallocate(tmpa, tmpb, tmpc, tmpu)
return
end function linmin
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!
!% Simple (fast, dumb) line minimiser. The 'func' interface is
!% the same as for 'linmin' above.
!
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
function linmin_fast(x0,fx0,xdir,y,epsilon,func,data) result(linmin)
real(dp)::x0(:) !% Starting vector
real(dp)::fx0 !% Value of 'func' at 'x0'
real(dp)::xdir(:) !% Direction
real(dp)::y(:) !% Result
real(dp)::epsilon !% Initial step
INTERFACE
function func(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::func
end function func
END INTERFACE
character(len=1),optional::data(:)
integer::linmin
! Dynamically allocate to avoid stack overflow madness with ifort
real(dp),allocatable::xb(:),xc(:)
real(dp)::r,q,new_eps,a,b,c,fxb,fxc,ftol
integer::n
logical::reject_quadratic_extrap
!%RV Number of linmin steps taken, or zero if an error occured
N=size(x0)
new_eps=-1.0_dp
a=0.0_dp
ftol=1.e-13
allocate(xb(N))
allocate(xc(N))
call print("Welcome to linmin_fast", PRINT_NORMAL)
reject_quadratic_extrap = .true.
do while(reject_quadratic_extrap)
b = a+epsilon
c = b+GOLD*epsilon
xb = x0+b*xdir
xc = x0+c*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
fxb = func(xb,data)
fxc = func(xc,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
write(line,*) " abc = ", a, b, c; call print(line, PRINT_NORMAL)
write(line,*) " f = ",fx0, fxb, fxc; call print(line, PRINT_NORMAL)
if(abs(fx0-fxb) < abs(ftol*fx0))then
write(line,*) "*** fast_linmin is stuck, returning 0"
call print(line,PRINT_SILENT)
linmin=0
return
end if
!WARNING: find a way to do the following
! probably we will need to wrap isnan
!if(isnan(fxb) .OR. isnan(fxc))then
! write(global_err%unit,*)" Got a NaN!!!"
! linmin=0
! return
!end if
! if(.NOT.finite(fxb) .OR. .NOT.finite(fxc))then
! write(global_err%unit,*)" Got a INF!!!"
! linmin=0
! return
! end if
r = (b-a)*(fxb-fxc)
q = (b-c)*(fxb-fx0)
new_eps = b-((b-c)*q-(b-a)*r)/(2.0*max(abs(q-r), 1.0_dp-20)*sign(1.0_dp,q-r))
write(line,*) " neweps = ", new_eps; call print(line, PRINT_NORMAL)
if (abs(fxb) .GT. 100.0_dp*abs(fx0) .OR. abs(fxc) .GT. 100.0_dp*abs(fx0)) then ! extrapolation gave stupid results
epsilon = epsilon/10.0_dp
reject_quadratic_extrap = .true.
else if(new_eps > 10.0_dp*(b-a))then
write(line,*) "*** new epsilon > 10.0 * old epsilon, capping at factor of 10.0 increase"
call print(line, PRINT_NORMAL)
epsilon = 10.0_dp*(b-a)
a = c
fx0 = fxc
reject_quadratic_extrap = .true.
else if(new_eps < 0.0_dp) then
! new proposed minimum is behind us!
if(fxb < fx0 .and. fxc < fx0 .and. fxc < fxb) then
! increase epsilon
epsilon = epsilon*2.0_dp
call print("*** quadratic extrapolation resulted in backward step, increasing epsilon: "//epsilon, PRINT_NORMAL)
else
epsilon = epsilon/10.0_dp
write(line,*)"*** xdir wrong way, reducing epsilon ",epsilon
call print(line, PRINT_NORMAL)
endif
reject_quadratic_extrap = .true.
else if(new_eps < epsilon/10.0_dp) then
write(line,*) "*** new epsilon < old epsilon / 10, reducing epsilon by a factor of 2"
epsilon = epsilon/2.0_dp
reject_quadratic_extrap = .true.
else
reject_quadratic_extrap = .false.
end if
end do
y = x0+new_eps*xdir
epsilon = new_eps
linmin=1
deallocate(xb, xc)
return
end function linmin_fast
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!
!% Line minimizer that uses the derivative and extrapolates
!% its projection onto the search direction to zero. Again,
!% the 'func' interface is the same as for 'linmin'.
!
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
function linmin_deriv(x0, xdir, dx0, y, epsilon, dfunc, data) result(linmin)
real(dp)::x0(:) !% Starting vector
real(dp)::xdir(:)!% Search direction
real(dp)::dx0(:) !% Initial gradient
real(dp)::y(:) !% Result
real(dp)::epsilon!% Initial step
INTERFACE
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
END INTERFACE
character(len=1),optional::data(:)
integer::linmin
! local vars
integer::N
real(dp), allocatable::x1(:), dx1(:)
real(dp)::dirdx0, dirdx1, gamma, new_eps
!%RV Number of linmin steps taken, or zero if an error occured
N=size(x0)
dirdx1 = 9.9e30_dp
new_eps = epsilon
allocate(x1(N), dx1(N))
linmin = 0
dirdx0 = xdir .DOT. dx0
if( dirdx0 > 0.0_dp) then ! should be negative
call print("WARNING: linmin_deriv: xdir*dx0 > 0 !!!!!", PRINT_ALWAYS)
return
endif
do while(abs(dirdx1) > abs(dirdx0))
x1 = x0+new_eps*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
dx1 = dfunc(x1,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
dirdx1 = xdir .DOT. dx1
if(abs(dirdx1) > abs(dirdx0)) then ! this eps leads to a point with larger abs gradient
call print("WARNING: linmin_deriv: |dirdx1| > |dirdx0|, reducing epsilon by factor of 5", PRINT_NORMAL)
new_eps = new_eps/5.0_dp
if(new_eps < 1.0e-12_dp) then
call print("WARNING: linmin_deriv: new_eps < 1e-12 !!!!!", PRINT_NORMAL)
linmin = 0
return
endif
else
gamma = dirdx0/(dirdx0-dirdx1)
if(gamma > 2.0_dp) then
call print("*** gamma > 2.0, capping at 2.0", PRINT_NORMAL)
gamma = 2.0_dp
endif
new_eps = gamma*epsilon
endif
linmin = linmin+1
end do
write (line,'(a,e10.2,a,e10.2)') ' gamma = ', gamma, ' new_eps = ', new_eps
call Print(line, PRINT_NORMAL)
y = x0+new_eps*xdir
epsilon = new_eps
deallocate(x1, dx1)
return
end function linmin_deriv
!% Iterative version of 'linmin_deriv' that avoid taking large steps
!% by iterating the extrapolation to zero gradient.
function linmin_deriv_iter(x0, xdir, dx0, y, epsilon, dfunc,data,do_line_scan) result(linmin)
real(dp)::x0(:) !% Starting vector
real(dp)::xdir(:)!% Search direction
real(dp)::dx0(:) !% Initial gradient
real(dp)::y(:) !% Result
real(dp)::epsilon!% Initial step
logical, optional :: do_line_scan
INTERFACE
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
END INTERFACE
character(len=1),optional :: data(:)
integer::linmin
! local vars
integer::N, extrap_steps,i
real(dp), allocatable::xn(:), dxn(:)
real(dp)::dirdx1, dirdx2, dirdx_new, eps1, eps2, new_eps, eps11, step, old_eps
integer, parameter :: max_extrap_steps = 50
logical :: extrap
!%RV Number of linmin steps taken, or zero if an error occured
N=size(x0)
allocate(xn(N), dxn(N))
if (present(do_line_scan)) then
if (do_line_scan) then
call print('line scan:', PRINT_NORMAL)
new_eps = 1.0e-5_dp
do i=1,50
xn = x0 + new_eps*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
dxn = dfunc(xn,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
dirdx_new = xdir .DOT. dxn
call print(new_eps//' '//dirdx_new//' <-- LS', PRINT_NORMAL)
new_eps = new_eps*1.15
enddo
end if
end if
eps1 = 0.0_dp
eps2 = epsilon
new_eps = epsilon
linmin = 0
dirdx1 = xdir .DOT. dx0
dirdx2 = dirdx1
if( dirdx1 > 0.0_dp) then ! should be negative
call print("WARNING: linmin_deriv_iter: xdir*dx0 > 0 !!!!!", PRINT_ALWAYS)
return
endif
extrap_steps = 0
do while ( (abs(eps1-eps2) > TOL*abs(eps1)) .and. extrap_steps < max_extrap_steps)
do
xn = x0 + new_eps*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
dxn = dfunc(xn,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
dirdx_new = xdir .DOT. dxn
linmin = linmin + 1
call print('eps1 = '//eps1//' eps2 = '//eps2//' new_eps = '//new_eps, PRINT_NORMAL)
call print('dirdx1 = '//dirdx1//' dirdx2 = '//dirdx2//' dirdx_new = '//dirdx_new, PRINT_NORMAL)
extrap = .false.
if (dirdx_new > 0.0_dp) then
if(abs(dirdx_new) < 2.0_dp*abs(dirdx1)) then
! projected gradient at new point +ve, but not too large.
call print("dirdx_new > 0, but not too large", PRINT_NORMAL)
eps2 = new_eps
dirdx2 = dirdx_new
extrap_steps = 0
! we're straddling the minimum well, so gamma < 2 and we can interpolate to the next step
! let's try to bring in eps1
step = 0.5_dp*(new_eps-eps1)
dirdx1 = 1.0_dp
do while (dirdx1 > 0.0_dp)
eps11 = eps1+step
call print("Trying to bring in eps1: "//eps11, PRINT_NORMAL)
xn = x0 + eps11*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
dxn = dfunc(xn,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
dirdx1 = xdir .DOT. dxn
step = step/2.0_dp
end do
eps1 = eps11
exit
else
! projected gradient at new point +ve and large
! let's decrease the step we take
call print("*** reducing trial epsilon by factor of 2, making eps2=current new_eps", PRINT_NORMAL)
eps2 = new_eps
dirdx2 = dirdx_new
new_eps = (eps1+new_eps)/2.0_dp
! check if we are just fiddling around
if(new_eps < 1.0e-12_dp) then
call print("WARNING: linmin_deriv_iter: total_eps < 1e-12 !!!!!", PRINT_NORMAL)
linmin = 0
return
endif
end if
else ! projected gradient is -ve
if(abs(dirdx_new) <= abs(dirdx1)) then
! projected gradient smaller than at x1
if(dirdx2 > 0.0_dp) then
! we have good bracketing, so interpolate
call print("dirdx_new <= 0, and we have good bracketing", PRINT_NORMAL)
eps1 = new_eps
dirdx1 = dirdx_new
extrap_steps = 0
! let's try to bring in eps2
step = 0.5_dp*(eps2-new_eps)
dirdx2 = -1.0_dp
do while(dirdx2 < 0.0_dp)
eps11 = eps2-step
call print("Trying to bring in eps2: "//eps11, PRINT_NORMAL)
xn = x0 + eps11*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
dxn = dfunc(xn,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
dirdx2 = xdir .DOT. dxn
step = step/2.0_dp
end do
eps2 = eps11
exit
else
! we have not bracketed yet, but can take this point and extrapolate to a bigger stepsize
old_eps = new_eps
if(abs(dirdx_new-dirdx1) .fne. 0.0_dp) new_eps = eps1-dirdx1/(dirdx_new-dirdx1)*(new_eps-eps1)
call print("we have not bracketed yet, extrapolating: "//new_eps, PRINT_NORMAL)
extrap_steps = extrap_steps + 1
if(new_eps > 5.0_dp*old_eps) then
! extrapolation is too large, let's just move closer
call print("capping extrapolation at "//2.0_dp*old_eps, PRINT_NORMAL)
eps1 = old_eps
new_eps = 2.0_dp*old_eps
else
! accept the extrapolation
eps1 = old_eps
dirdx1 = dirdx_new
end if
endif
else
if (dirdx2 < 0.0_dp) then
! have not bracketed yet, and projected gradient too big - minimum is behind us! lets move forward
call print("dirdx2 < 0.0_dp and projected gradient too big, closest stationary point is behind us!", PRINT_NORMAL)
eps1 = new_eps
dirdx1 = dirdx_new
new_eps = eps1*2.0_dp
eps2 = new_eps
extrap_steps = extrap_steps+1
else
call print("abs(dirdx_new) > abs(dirdx1) but dirdx2 > 0, should only happen when new_eps is converged. try to bring in eps2", PRINT_NORMAL)
eps2 = 0.5_dp*(new_eps+eps2)
xn = x0 + eps2*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
dxn = dfunc(xn,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
dirdx2 = xdir .DOT. dxn
exit
endif
endif
end if
end do
new_eps = eps1 - dirdx1/(dirdx2-dirdx1)*(eps2-eps1)
end do
if (extrap_steps == max_extrap_steps) then
call Print('*** linmin_deriv: max consequtive extrapolation steps exceeded', PRINT_ALWAYS)
linmin = 0
return
end if
call print('linmin_deriv_iter done in '//linmin//' steps')
epsilon = new_eps
y = x0 + epsilon*xdir
deallocate(xn, dxn)
return
end function linmin_deriv_iter
!% Simplified Iterative version of 'linmin_deriv' that avoid taking large steps
!% by iterating the extrapolation to zero gradient. It does not try to reduce
!% the bracketing interval on both sides at every step
function linmin_deriv_iter_simple(x0, xdir, dx0, y, epsilon, dfunc,data,do_line_scan) result(linmin)
real(dp)::x0(:) !% Starting vector
real(dp)::xdir(:)!% Search direction
real(dp)::dx0(:) !% Initial gradient
real(dp)::y(:) !% Result
real(dp)::epsilon!% Initial step
logical, optional :: do_line_scan
INTERFACE
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
END INTERFACE
character(len=1),optional :: data(:)
integer::linmin
! local vars
integer::N, extrap_steps, i
real(dp), allocatable::xn(:), dxn(:)
real(dp)::dirdx1, dirdx2, dirdx_new, eps1, eps2, new_eps, old_eps
integer, parameter :: max_extrap_steps = 50
logical :: extrap
!%RV Number of linmin steps taken, or zero if an error occured
N=size(x0)
allocate(xn(N), dxn(N))
if (present(do_line_scan)) then
if (do_line_scan) then
call print('line scan:', PRINT_NORMAL)
new_eps = 1.0e-5_dp
do i=1,50
xn = x0 + new_eps*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
dxn = dfunc(xn,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
dirdx_new = xdir .DOT. dxn
call print(new_eps//' '//dirdx_new, PRINT_NORMAL)
new_eps = new_eps*1.15
enddo
end if
end if
eps1 = 0.0_dp
eps2 = epsilon
old_eps = 0.0_dp
new_eps = epsilon
linmin = 0
dirdx1 = xdir .DOT. dx0
dirdx2 = 0.0_dp
if( dirdx1 > 0.0_dp) then ! should be negative
call print("WARNING: linmin_deriv_iter_simple: xdir*dx0 > 0 !!!!!", PRINT_ALWAYS)
return
endif
extrap_steps = 0
do while ( (abs(old_eps-new_eps) > TOL*abs(new_eps)) .and. extrap_steps < max_extrap_steps)
xn = x0 + new_eps*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
dxn = dfunc(xn,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
dirdx_new = xdir .DOT. dxn
linmin = linmin + 1
call print('eps1 = '//eps1//' eps2 = '//eps2//' new_eps = '//new_eps, PRINT_NORMAL)
call print('dirdx1 = '//dirdx1//' dirdx2 = '//dirdx2//' dirdx_new = '//dirdx_new, PRINT_NORMAL)
extrap = .false.
if (dirdx_new > 0.0_dp) then
if(abs(dirdx_new) < 10.0_dp*abs(dirdx1)) then
! projected gradient at new point +ve, but not too large.
call print("dirdx_new > 0, but not too large", PRINT_NORMAL)
eps2 = new_eps
dirdx2 = dirdx_new
extrap_steps = 0
! we're straddling the minimum well, so gamma < 2 and we can interpolate to the next step
else
! projected gradient at new point +ve and large
! let's decrease the step we take
call print("*** reducing trial epsilon by factor of 2, making eps2=current new_eps", PRINT_NORMAL)
eps2 = new_eps
dirdx2 = dirdx_new
old_eps = new_eps
new_eps = (eps1+new_eps)/2.0_dp
! check if we are just fiddling around
if(new_eps < 1.0e-12_dp) then
call print("WARNING: linmin_deriv_iter_simple: new_eps < 1e-12 !!!!!", PRINT_NORMAL)
linmin = 0
return
endif
cycle
end if
else ! projected gradient is -ve
if(abs(dirdx_new) <= abs(dirdx1)) then
! projected gradient smaller than at x1
if(dirdx2 > 0.0_dp) then
! we have good bracketing, so interpolate
call print("dirdx_new <= 0, and we have good bracketing", PRINT_NORMAL)
eps1 = new_eps
dirdx1 = dirdx_new
extrap_steps = 0
else
! we have not bracketed yet, but can take this point and extrapolate to a bigger stepsize
old_eps = new_eps
if(abs(dirdx_new-dirdx1) .fne. 0.0_dp) new_eps = eps1-dirdx1/(dirdx_new-dirdx1)*(new_eps-eps1)
call print("we have not bracketed yet, extrapolating: "//new_eps, PRINT_NORMAL)
if(new_eps > 5.0_dp*old_eps) then
! extrapolation is too large, let's just move closer
call print("capping extrapolation at "//2.0_dp*old_eps, PRINT_NORMAL)
eps1 = old_eps
new_eps = 2.0_dp*old_eps
else
! accept the extrapolation
eps1 = old_eps
dirdx1 = dirdx_new
end if
extrap_steps = extrap_steps + 1
cycle
endif
else
if (dirdx2 .eq. 0.0_dp) then
! have not bracketed yet, and projected gradient too big - minimum is behind us! lets move forward
call print("dirdx2 < 0.0_dp and projected gradient too big, closest stationary point is behind us!", PRINT_NORMAL)
eps1 = new_eps
dirdx1 = dirdx_new
old_eps = new_eps
new_eps = eps1*2.0_dp
eps2 = new_eps
extrap_steps = extrap_steps+1
cycle
else
call print("dirdx_new < 0, abs(dirdx_new) > abs(dirdx1) but dirdx2 > 0, function not monotonic?", PRINT_NORMAL)
eps1 = new_eps
dirdx1 = dirdx_new
endif
endif
end if
old_eps = new_eps
new_eps = eps1 - dirdx1/(dirdx2-dirdx1)*(eps2-eps1)
end do
if (extrap_steps == max_extrap_steps) then
call Print('*** linmin_deriv_iter_simple: max consequtive extrapolation steps exceeded', PRINT_ALWAYS)
linmin = 0
return
end if
epsilon = new_eps
y = x0 + epsilon*xdir
deallocate(xn, dxn)
return
end function linmin_deriv_iter_simple
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!
!% Damped molecular dynamics minimiser. The objective
!% function is 'func(x)' and its gradient is 'dfunc(x)'.
!
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
function damped_md_minim(x,mass,func,dfunc,damp,tol,max_change,max_steps,data)
real(dp)::x(:) !% Starting vector
real(dp)::mass(:) !% Effective masses of each degree of freedom
INTERFACE
function func(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::func
end function func
end INTERFACE
INTERFACE
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
END INTERFACE
real(dp)::damp !% Velocity damping factor
real(dp)::tol !% Minimisation is taken to be converged when 'normsq(force) < tol'
real(dp)::max_change !% Maximum position change per time step
integer:: max_steps !% Maximum number of MD steps
character(len=1),optional::data(:)
integer :: N,i
integer :: damped_md_minim
real(dp),allocatable::velo(:),acc(:),force(:)
real(dp)::dt,df2, f
!%RV Returns number of MD steps taken.
N=size(x)
allocate(velo(N),acc(N),force(N))
call print("Welcome to damped md minim()")
write(line,*)"damping = ", damp ; call print(line)
velo=0.0_dp
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
acc = -dfunc(x,data)/mass
#ifndef _OPENMP
call verbosity_pop()
#endif
dt = sqrt(max_change/maxval(abs(acc)))
write(line,*)"dt = ", dt ; call print(line)
do I=0,max_steps
velo(:)=velo(:) + (0.5*dt)*acc(:)
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
force(:)= -dfunc(X,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
df2=normsq(force)
if(df2 .LT. tol) then
write(line,*) I," force^2 =",df2 ; call print(line)
write(line,*)"Converged in ",i," steps!!" ; call print(line)
exit
else if (mod(i,100) .EQ. 0) then
write(line,*)i," f = ", func(x,data), "df^2 = ", df2, "max(abs(df)) = ",maxval(abs(force)); call print(line)
end if
acc(:)=force(:)/mass(:)
velo(:)=velo(:) + (0.5*dt)*acc(:)
velo(:)=velo(:) * (1.0-damp)/(1.0+damp)
x(:)=x(:)+dt*velo(:)
x(:)=x(:)+0.5*dt*dt*acc(:)
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
f = func(x,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call print("f=" // f, PRINT_VERBOSE)
call print(x, PRINT_NERD)
end do
!
if(i .EQ. max_steps) then
write(line,*) "Failed to converge in ",i," steps" ; call print(line)
end if
damped_md_minim = i
return
end function damped_md_minim
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!
!% Gradient descent minimizer, using either the conjugate gradients or
!% steepest descent methods. The objective function is
!% 'func(x)' and its gradient is 'dfunc(x)'.
!% There is an additional 'hook' interface which is called at the
!% beginning of each gradient descent step.
!
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
function minim(x_in,func,dfunc,method,convergence_tol,max_steps, linminroutine, hook, hook_print_interval, &
eps_guess, always_do_test_gradient, data, status)
real(dp), intent(inout) :: x_in(:) !% Starting position
INTERFACE
function func(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::func
end function func
end INTERFACE
INTERFACE
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
END INTERFACE
character(*), intent(in) :: method !% 'cg' for conjugate gradients or 'sd' for steepest descent
real(dp), intent(in) :: convergence_tol !% Minimisation is treated as converged once $|\mathbf{\nabla}f|^2 <$
!% 'convergence_tol'.
integer, intent(in) :: max_steps !% Maximum number of 'cg' or 'sd' steps
integer::minim
character(*), intent(in), optional :: linminroutine !% Name of the line minisation routine to use.
!% This should be one of 'NR_LINMIN', 'FAST_LINMIN' and
!% 'LINMIN_DERIV'.
!% If 'FAST_LINMIN' is used and problems with the line
!% minisation are detected, 'minim' automatically switches
!% to the more reliable 'NR_LINMIN', and then switches back
!% once no more problems have occurred for some time.
!% the default is NR_LINMIN
optional :: hook
INTERFACE
subroutine hook(x,dx,E,done,do_print,data)
use system_module
real(dp), intent(in) ::x(:)
real(dp), intent(in) ::dx(:)
real(dp), intent(in) ::E
logical, intent(out) :: done
logical, optional, intent(in) :: do_print
character(len=1),optional, intent(in) ::data(:)
end subroutine hook
end INTERFACE
integer, intent(in), optional :: hook_print_interval
real(dp), intent(in), optional :: eps_guess
logical, intent(in), optional :: always_do_test_gradient
character(len=1), optional, intent(inout) :: data(:)
integer, optional, intent(out) :: status
!%RV Returns number of gradient descent steps taken during minimisation
integer, parameter:: max_bad_cg = 5
integer, parameter:: max_bad_iter = 5
integer, parameter:: convergence_window = 3
integer:: convergence_counter
integer:: main_counter
integer:: bad_cg_counter
integer:: bad_iter_counter
integer:: resetflag
integer:: exit_flag
integer:: lsteps
integer:: i, extra_report
real(dp), parameter:: stuck_tol = NUMERICAL_ZERO
real(dp):: linmin_quality
real(dp):: eps, alpha
real(dp):: oldeps
real(dp), parameter :: default_eps_guess = 0.1_dp ! HACK
real(dp) :: my_eps_guess
real(dp):: f, f_new
real(dp):: gg, dgg, hdirgrad_before, hdirgrad_after
real(dp):: dcosine, gdirlen, gdirlen_old, normsqgrad_f, normsqgrad_f_old
real(dp):: obj, obj_new
logical:: do_sd, do_sd2, do_cg, do_pcg, do_lbfgs
integer:: fast_linmin_switchback
logical:: do_fast_linmin
logical:: do_linmin_deriv
logical:: dumbool, done
logical:: do_test_gradient
integer :: my_hook_print_interval
! working arrays
! Dynamically allocate to avoid stack overflow madness with ifort
real(dp),dimension(:), allocatable :: x, y, hdir, gdir, gdir_old, grad_f, grad_f_old, x_old
! for lbfgs
real(dp), allocatable :: lbfgs_work(:), lbfgs_diag(:)
integer :: lbfgs_flag
integer, parameter :: lbfgs_M = 40
if (current_verbosity() >= PRINT_VERBOSE) then
my_hook_print_interval = optional_default(1, hook_print_interval)
else if (current_verbosity() >= PRINT_NORMAL) then
my_hook_print_interval = optional_default(10, hook_print_interval)
else
my_hook_print_interval = optional_default(100000, hook_print_interval)
endif
call system_timer("minim")
call system_timer("minim/init")
allocate(x(size(x_in)))
x = x_in
allocate(y(size(x)), hdir(size(x)), gdir(size(x)), gdir_old(size(x)), &
grad_f(size(x)), grad_f_old(size(x)))
extra_report = 0
if (present(status)) status = 0
call print("Welcome to minim()", PRINT_NORMAL)
call print("space is "//size(x)//" dimensional", PRINT_NORMAL)
do_sd = .false.
do_sd2 = .false.
do_cg = .false.
do_pcg = .false.
do_lbfgs = .false.
if(trim(method).EQ."sd") then
do_sd = .TRUE.
call print("Method: Steepest Descent", PRINT_NORMAL)
else if(trim(method).EQ."sd2")then
do_sd2 = .TRUE.
call print("Method: Two-Point Step Size Gradient Methods, J Barzilai and JM Borwein, IMA J Num Anal (1988) 8, 141-148", PRINT_NORMAL)
allocate(x_old(size(x)))
y=x
x_old = x
else if(trim(method).EQ."cg")then
do_cg = .TRUE.
call print("Method: Conjugate Gradients", PRINT_NORMAL)
else if(trim(method).EQ."pcg")then
do_cg = .TRUE.
do_pcg = .TRUE.
call print("Method: Preconditioned Conjugate Gradients", PRINT_NORMAL)
else if(trim(method).EQ."lbfgs") then
do_lbfgs = .TRUE.
call print("Method: LBFGS by Jorge Nocedal, please cite D. Liu and J. Nocedal, Mathematical Programming B 45 (1989) 503-528", PRINT_NORMAL)
allocate(lbfgs_diag(size(x)))
allocate(lbfgs_work(size(x)*(lbfgs_M*2+1)+2*lbfgs_M))
call print("Allocating LBFGS work array: "//(size(x)*(lbfgs_M*2+1)+2*lbfgs_M)//" bytes")
lbfgs_flag = 0
! set units
MP = mainlog%unit
LP = errorlog%unit
!
y=x ! this is reqired for lbfgs on first entry into the main loop, as no linmin is done
else
call System_abort("Invalid method in optimize: '"//trim(method)//"'")
end if
do_fast_linmin = .FALSE.
do_linmin_deriv = .FALSE.
if(present(linminroutine)) then
if(do_lbfgs) &
call print("Minim warning: a linminroutine was specified for use with LBFGS")
if(do_sd2) &
call print("Minim warning: a linminroutine was specified for use with two-point steepest descent SD2")
if(trim(linminroutine) .EQ. "FAST_LINMIN") then
do_fast_linmin =.TRUE.
call print("Using FAST_LINMIN linmin", PRINT_NORMAL)
else if(trim(linminroutine).EQ."NR_LINMIN") then
call print("Using NR_LINMIN linmin", PRINT_NORMAL)
else if(trim(linminroutine).EQ."LINMIN_DERIV") then
do_linmin_deriv = .TRUE.
call print("Using LINMIN_DERIV linmin", PRINT_NORMAL)
else
call System_abort("Invalid linminroutine: "//linminroutine)
end if
end if
if (current_verbosity() .GE. PRINT_NERD .and. .not. do_linmin_deriv) then
dumbool=test_gradient(x, func, dfunc,data=data)
end if
! initial function calls
if (.not. do_linmin_deriv) then
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
f = func(x,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
else
f = 0.0_dp
end if
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
grad_f = dfunc(x,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
if (my_hook_print_interval > 0) then
if (present(hook)) then
call hook(x, grad_f, f, done, .true., data)
if (done) then
call print('hook reports that minim finished, exiting.', PRINT_NORMAL)
exit_flag = 1
end if
else
call print("hook is not present", PRINT_VERBOSE)
end if
endif
grad_f_old = grad_f
gdir = (-1.0_dp)*grad_f
hdir = gdir
gdir_old = gdir
normsqgrad_f = normsq(grad_f)
normsqgrad_f_old = normsqgrad_f
gdirlen = sqrt(normsqgrad_f)
gdirlen_old = gdirlen
! quality monitors
dcosine = 0.0_dp
linmin_quality = 0.0_dp
bad_cg_counter = 0
bad_iter_counter = 0
convergence_counter = 0
resetflag = 0
exit_flag = 0
fast_linmin_switchback = -1
lsteps = 0
do_test_gradient = optional_default(.false., always_do_test_gradient)
my_eps_guess = optional_default(default_eps_guess, eps_guess)
eps = my_eps_guess
!********************************************************************
!*
!* MAIN CG LOOP
!*
!**********************************************************************
if(normsqgrad_f .LT. convergence_tol)then
call print("Minimization is already converged!")
call print(trim(method)//" iter = "// 0 //" df^2 = " // normsqgrad_f // " f = " // f &
&// " "//lsteps//" linmin steps eps = "//eps,PRINT_VERBOSE)
exit_flag = 1
end if
call system_timer("minim/init")
call system_timer("minim/main_loop")
main_counter=1 ! incremented at the end of the loop
do while((main_counter .LT. max_steps) .AND. (.NOT.(exit_flag.gt.0)))
call system_timer("minim/main_loop/"//main_counter)
if ((current_verbosity() >= PRINT_ANALYSIS .or. do_test_gradient) &
.and. .not. do_linmin_deriv) then
dumbool=test_gradient(x, func, dfunc,data=data)
if (.not. dumbool) call print("Gradient test failed")
end if
!**********************************************************************
!*
!* Print stuff
!*
!**********************************************************************/
#ifndef _OPENMP
if (my_hook_print_interval == 1 .or. mod(main_counter,my_hook_print_interval) == 1) call verbosity_push_increment()
#endif
call print(trim(method)//" iter = "//main_counter//" df^2 = "//normsqgrad_f//" f = "//f// &
' max(abs(df)) = '//maxval(abs(grad_f)),PRINT_VERBOSE)
if(.not. do_lbfgs) &
call print(" dcos = "//dcosine//" q = " //linmin_quality,PRINT_VERBOSE)
#ifndef _OPENMP
if (my_hook_print_interval == 1 .or. mod(main_counter,my_hook_print_interval) == 1) call verbosity_pop()
#endif
! call the hook function
if (present(hook)) then
call hook(x, grad_f, f, done, (mod(main_counter-1,my_hook_print_interval) == 0), data)
if (done) then
call print('hook reports that minim finished, exiting.', PRINT_NORMAL)
exit_flag = 1
call system_timer("minim/main_loop/"//main_counter)
cycle
end if
else
call print("hook is not present", PRINT_VERBOSE)
end if
!**********************************************************************
!*
!* test to see if we've converged, let's quit
!*
!**********************************************************************/
!
if(normsqgrad_f < convergence_tol) then
convergence_counter = convergence_counter + 1
!call print("Convergence counter = "//convergence_counter)
else
convergence_counter = 0
end if
if(convergence_counter == convergence_window) then
call print("Converged after step " // main_counter)
call print(trim(method)//" iter = " // main_counter // " df^2= " // normsqgrad_f // " f= " // f)
exit_flag = 1 ! while loop will quit
call system_timer("minim/main_loop/"//main_counter)
cycle !continue
end if
if( (.not. do_lbfgs) .and. (.not. do_sd2) ) then
!**********************************************************************
!*
!* do line minimization
!*
!**********************************************************************/
oldeps = eps
! no output from linmin unless level >= PRINT_VERBOSE
call system_timer("minim/main_loop/"//main_counter//"/linmin")
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
if(do_fast_linmin) then
lsteps = linmin_fast(x, f, hdir, y, eps, func,data)
if (lsteps .EQ.0) then
call print("Fast linmin failed, calling normal linmin at step " // main_counter)
lsteps = linmin(x, hdir, y, eps, func,data)
end if
else if(do_linmin_deriv) then
lsteps = linmin_deriv_iter_simple(x, hdir, grad_f, y, eps, dfunc,data)
else
lsteps = linmin(x, hdir, y, eps, func,data)
end if
if ((oldeps .fne. my_eps_guess) .and. (eps > oldeps*2.0_dp)) then
eps = oldeps*2.0_dp
endif
#ifndef _OPENMP
call verbosity_pop()
#endif
call system_timer("minim/main_loop/"//main_counter//"/linmin")
!**********************************************************************
!*
!* check the result of linmin
!*
!**********************************************************************
if(lsteps .EQ. 0) then ! something very bad happenned, gradient is bad?
call print("*** LINMIN returned 0, RESETTING CG CYCLE and eps at step " // main_counter)
#ifndef _OPENMP
call verbosity_push_increment()
#endif
extra_report = extra_report + 1
hdir = -1.0 * grad_f
eps = my_eps_guess
if (current_verbosity() >= PRINT_NERD) call line_scan(x, hdir, func, .not. do_linmin_deriv, dfunc, data)
bad_iter_counter = bad_iter_counter + 1
if(bad_iter_counter .EQ. max_bad_iter) then
call print("*** BAD linmin counter reached maximum, exiting " // max_bad_iter)
exit_flag = 1
if (present(status)) status = 1
end if
call system_timer("minim/main_loop/"//main_counter)
cycle !continue
end if
end if ! .not. lbfgs .and. .not. sd2
!**********************************************************************
!*
!* Evaluate function at new position
!*
!**********************************************************************
if (.not. do_linmin_deriv) then
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
f_new = func(y,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
else
f_new = 0.0_dp
end if
! if(!finite(f_new)){
! logger(ERROR, "OUCH!!! f_new is not finite!\n");
! return -1; // go back screaming
! end if
!********************************************************************
! Is everything going normal?
!*********************************************************************/
! let's make sure we are progressing
! obj is the thing we are trying to minimize
! are we going down,and enough?
if(.not. do_lbfgs .and. .not. do_sd2) then
if (.not. do_linmin_deriv) then
obj = f
obj_new = f_new
else
! let's pretend that linmin_deriv never goes uphill!
obj = 0.0_dp
obj_new = -1.0_dp
end if
if(obj-obj_new > abs(stuck_tol*obj_new)) then
! everything is fine, clear some monitoring flags
bad_iter_counter = 0
do i=1, extra_report
#ifndef _OPENMP
call verbosity_pop()
#endif
end do
extra_report = 0
if(present(linminroutine)) then
if(trim(linminroutine) .EQ. "FAST_LINMIN" .and. .not. do_fast_linmin .and. &
main_counter > fast_linmin_switchback) then
call print("Switching back to FAST_LINMIN linmin")
do_fast_linmin = .TRUE.
end if
end if
!**********************************************************************
!* Otherwise, diagnose problems
!**********************************************************************
else if(obj_new > obj)then ! are we not going down ? then things are very bad
if( abs(obj-obj_new) < abs(stuck_tol*obj_new))then ! are we stuck ?
call print("*** Minim is stuck, exiting")
exit_flag = 1
if (present(status)) status = 0
call system_timer("minim/main_loop/"//main_counter)
cycle !continue
end if
call print("*** Minim is not going down at step " // main_counter //" ==> eps /= 10")
eps = oldeps / 10.0_dp
if (current_verbosity() >= PRINT_NERD) call line_scan(x, hdir, func, .not. do_linmin_deriv, dfunc, data)
if(current_verbosity() >= PRINT_NERD .and. .not. do_linmin_deriv) then
if(.NOT.test_gradient(x, func, dfunc,data=data)) then
call print("*** Gradient test failed!!")
end if
end if
if(do_fast_linmin) then
do_fast_linmin = .FALSE.
fast_linmin_switchback = main_counter+5
call print("Switching off FAST_LINMIN (back after " //&
(fast_linmin_switchback - main_counter) // " steps if all OK")
end if
call print("Resetting conjugacy")
resetflag = 1
main_counter=main_counter-1
bad_iter_counter=bad_iter_counter+1 ! increment BAD counter
else ! minim went downhill, but not a lot
call print("*** Minim is stuck at step " // main_counter // ", trying to unstick", PRINT_VERBOSE)
#ifndef _OPENMP
if (current_verbosity() >= PRINT_NORMAL) then
call verbosity_push_increment()
extra_report = extra_report + 1
end if
#endif
if (current_verbosity() >= PRINT_NERD) call line_scan(x, hdir, func, .not. do_linmin_deriv, dfunc, data)
!**********************************************************************
!*
!* do gradient test if we need to
!*
!**********************************************************************/
!
if(current_verbosity() >= PRINT_NERD .and. .not. do_linmin_deriv) then
if(.NOT.test_gradient(x, func, dfunc,data=data)) then
call print("*** Gradient test failed!! Exiting linmin!")
exit_flag = 1
if (present(status)) status = 1
call system_timer("minim/main_loop/"//main_counter)
cycle !continue
end if
end if
bad_iter_counter=bad_iter_counter+1 ! increment BAD counter
eps = my_eps_guess ! try to unstick
call print("resetting eps to " // eps,PRINT_VERBOSE)
resetflag = 1 ! reset CG
end if
if(bad_iter_counter == max_bad_iter)then
call print("*** BAD iteration counter reached maximum " // max_bad_iter // " exiting")
exit_flag = 1
if (present(status)) status = 1
call system_timer("minim/main_loop/"//main_counter)
cycle !continue
end if
end if ! .not. do_bfgs and .not. do_sd2
!**********************************************************************
!*
!* accept iteration, get new gradient
!*
!**********************************************************************/
f = f_new
x = y
! Removed resetting
! if(mod(main_counter,50) .EQ. 0) then ! reset CG every now and then regardless
! resetflag = 1
! end if
! measure linmin_quality
if(.not. do_lbfgs) hdirgrad_before = hdir.DOT.grad_f
grad_f_old = grad_f
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
grad_f = dfunc(x,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
if( (.not. do_lbfgs) .and. (.not. do_sd2) ) then
hdirgrad_after = hdir.DOT.grad_f
if (hdirgrad_after /= 0.0_dp) then
linmin_quality = hdirgrad_before/hdirgrad_after
else
linmin_quality = HUGE(1.0_dp)
endif
end if
normsqgrad_f_old = normsqgrad_f
normsqgrad_f = normsq(grad_f)
!**********************************************************************
!* Choose minimization method
!**********************************************************************/
if(do_sd) then !steepest descent
hdir = -1.0_dp * grad_f
elseif(do_sd2) then
if(main_counter == 1) then
alpha = 1.0e-6_dp
else
alpha = dot_product(x-x_old,grad_f-grad_f_old) / dot_product(grad_f-grad_f_old,grad_f-grad_f_old)
endif
x_old = x
x = x - alpha * grad_f
y = x
else if(do_cg)then ! conjugate gradients
if( bad_cg_counter == max_bad_cg .OR.(resetflag > 0)) then ! reset the conj grad cycle
if( bad_cg_counter .EQ. max_bad_cg) then
call print("*** bad_cg_counter == "// max_bad_cg, PRINT_VERBOSE)
end if
call print("*** Resetting conjugacy", PRINT_VERBOSE)
hdir = -1.0_dp * grad_f
bad_cg_counter = 0
resetflag = 0
else ! do CG
gdirlen = 0.0
dcosine = 0.0
gg = normsq(gdir)
if(.NOT.do_pcg) then ! no preconditioning
dgg = max(0.0_dp, (gdir + grad_f).DOT.grad_f) ! Polak-Ribiere formula
gdir = (-1.0_dp) * grad_f
! the original version was this, I had to change because intrinsic does'nt return allocatables.
! dgg = (grad_f + gdir).DOT.grad_f
! gdir = (-1.0_dp) * grad_f
else ! precondition
call System_abort("linmin: preconditioning not implemented")
dgg = 0.0 ! STUPID COMPILER
! //dgg = (precond^grad_f+gdir)*grad_f;
! //gdir = -1.0_dp*(precond^grad_f);
end if
if(gg .ne. 0.0_dp) then
hdir = gdir+hdir*(dgg/gg)
else
hdir = gdir
endif
! calculate direction cosine
dcosine = gdir.DOT.gdir_old
gdir_old = gdir
gdirlen = norm(gdir)
if(gdirlen .eq. 0.0_dp .or. gdirlen_old .eq. 0.0_dp) then
dcosine = 0.0_dp
else
dcosine = dcosine/(gdirlen*gdirlen_old)
endif
gdirlen_old = gdirlen
if(abs(dcosine) > 0.2) then
bad_cg_counter= bad_cg_counter +1
else
bad_cg_counter = 0
end if
end if
else if(do_lbfgs)then ! LBFGS method
y = x
call LBFGS(size(x),lbfgs_M,y, f, grad_f, .false., lbfgs_diag, (/-1,0/), 1e-12_dp, 1e-12_dp, lbfgs_work, lbfgs_flag)
! do while(lbfgs_flag == 2)
! call LBFGS(size(x),lbfgs_M,y, f, grad_f, .false., lbfgs_diag, (/-1,0/), 1e-12_dp, 1e-12_dp, lbfgs_work, lbfgs_flag)
! end do
if(lbfgs_flag < 0) then ! internal LBFGS error
call print('LBFGS returned error code '//lbfgs_flag//', exiting')
exit_flag = 1
if (present(status)) status = 1
call system_timer("minim/main_loop/"//main_counter)
cycle
end if
else
call System_abort("minim(): c'est ci ne pas une erreur!")
end if
call system_timer("minim/main_loop/"//main_counter)
main_counter=main_counter + 1
end do
call system_timer("minim/main_loop")
if(main_counter >= max_steps) then
call print("Iterations exceeded " // max_steps)
end if
call print("Goodbye from minim()")
call print("")
minim = main_counter-1
x_in = x
deallocate(x)
deallocate(y, hdir, gdir, gdir_old, grad_f, grad_f_old)
if(allocated(x_old)) deallocate(x_old)
if(do_lbfgs) then
deallocate(lbfgs_diag)
deallocate(lbfgs_work)
end if
! just in case extra pushes weren't popped
#ifndef _OPENMP
do i=1, extra_report
call verbosity_pop()
end do
#endif
call system_timer("minim")
end function minim
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!
! test_gradient
!
!% Test a function against its gradient by evaluating the gradient from the
!% function by finite differnces. We can only test the gradient if energy and force
!% functions are pure in that they do not change the input vector
!% (e.g. no capping of parameters). The interface for 'func(x)' and 'dfunc(x)'
!% are the same as for 'minim' above.
!
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
function test_gradient(xx,func,dfunc, dir,data)
real(dp),intent(in)::xx(:) !% Position
INTERFACE
function func(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::func
end function func
end INTERFACE
INTERFACE
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
END INTERFACE
real(dp), intent(in), optional, target :: dir(:) !% direction along which to test
character(len=1),optional::data(:)
!%RV Returns true if the gradient test passes, or false if it fails
logical :: test_gradient
integer:: N,I,loopend
logical::ok,printit,monitor_ratio
real(dp)::f0, f, tmp, eps, ratio, previous_ratio
! Dynamically allocate to avoid stack overflow madness with ifort
real(dp),dimension(:), allocatable ::x,dx,x_0
real(dp), allocatable :: my_dir(:)
N=size(xx)
allocate(x(N), dx(N), x_0(N))
x=xx
if(current_verbosity() > PRINT_VERBOSE) then
printit = .TRUE.
loopend=0
else
printit = .FALSE.
loopend=1
end if
if(printit) then
write(line,*)" "; call print(line)
write(line,*)" " ; call print(line)
write(line,*) "Gradient test"; call print(line)
write(line,*)" " ; call print(line)
end if
if(printit) then
write(line, *) "Calling func(x)"; call print(line)
end if
!f0 = param_penalty(x_0);
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
f0 = func(x,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
!!//logger("f0: %24.16f\n", f0);
if(printit) then
write(line, *) "Calling dfunc(x)"; call print(line)
end if
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
dx = dfunc(x,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
!!//dx = param_penalty_deriv(x);
allocate(my_dir(N))
if (present(dir)) then
if (norm(dir) .feq. 0.0_dp) &
call system_abort("test_gradient got dir = 0.0, can't use as normalized direction for test")
my_dir = dir/norm(dir)
else
if (norm(dx) == 0.0_dp) &
call system_abort("test_gradient got dfunc = 0.0, can't use as normalized direction for test")
my_dir = (-1.0_dp)*dx/norm(dx)
endif
!//my_dir.zero();
!//my_dir.randomize(0.1);
!//my_dir.x[4] = 0.1;
!//logger("dx: ");dx.print(logger_stream);
tmp = my_dir.DOT.dx
x_0(:) = x(:)
!//logger("x0: "); x_0.print(logger_stream);
if(printit) then
call print("GT eps (f-f0)/(eps*df) f")
end if
ok = .FALSE.
ratio = 0.0
do i=0,loopend ! do it twice, print second time if not OK
previous_ratio = 0.0
monitor_ratio = .FALSE.
eps=1.0e-1_dp
do while(eps>1.e-20)
x = x_0 + eps*my_dir
!//logger("x: "); x.print(logger_stream);
!//f = param_penalty(x);
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
f = func(x,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
!//logger("f: %24.16f\n", f);
previous_ratio = ratio
ratio = (f-f0)/(eps*tmp)
if(printit) then
write(line,'("GT ",e8.2,f22.16,e24.16)') eps, ratio, f;
call print(line)
end if
if(abs(ratio-1.0_dp) .LT. 1e-2_dp) monitor_ratio = .TRUE.
if(.NOT.monitor_ratio) then
if(abs((f-f0)/f0) .LT. NUMERICAL_ZERO) then ! we ran out of precision, gradient is really bad
call print("(f-f0)/f0 " // ((f-f0)/f0) // " ZERO " // NUMERICAL_ZERO, PRINT_ANALYSIS)
call print("ran out of precision, quitting loop", PRINT_ANALYSIS)
exit
end if
end if
if(monitor_ratio) then
if( abs(ratio-1.0_dp) > abs(previous_ratio-1.0_dp) )then ! sequence broke
if(abs((f-f0)/f0*(ratio-1.0_dp)) < 1e-10_dp) then ! lets require 10 digits of precision
ok = .TRUE.
!//break;
end if
end if
end if
eps=eps/10.0
end do
if(.NOT.ok) then
printit = .TRUE. ! go back and print it
else
exit
end if
end do
if(printit) then
write(line,*)" "; call print(line, PRINT_NORMAL)
end if
if(ok) then
write(line,*)"Gradient test OK"; call print(line, PRINT_VERBOSE)
end if
test_gradient= ok
deallocate(x, dx, x_0)
deallocate(my_dir)
end function test_gradient
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!
! n_test_gradient
!
!% Test a function against its gradient by evaluating the gradient from the
!% function by symmetric finite differnces. We can only test the gradient if
!% energy and force functions are pure in that they do not change the input
!% vector (e.g. no capping of parameters). The interface for 'func(x)' and
!% 'dfunc(x)'are the same as for 'minim' above.
!
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
subroutine n_test_gradient(xx,func,dfunc, dir,data)
real(dp),intent(in)::xx(:) !% Position
INTERFACE
function func(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::func
end function func
end INTERFACE
INTERFACE
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
END INTERFACE
real(dp), intent(in), optional, target :: dir(:) !% direction along which to test
character(len=1),optional::data(:)
integer N, i
real(dp) :: eps, tmp, fp, fm
real(dp), allocatable :: x(:), dx(:)
real(dp), pointer :: my_dir(:)
N=size(xx)
allocate(x(N), dx(N))
x=xx
dx = dfunc(x,data)
if (present(dir)) then
if (norm(dir) == 0.0_dp) &
call system_abort("n_test_gradient got dir = 0.0, can't use as normalized direction for test")
allocate(my_dir(N))
my_dir = dir/norm(dir)
else
if (norm(dx) == 0.0_dp) &
call system_abort("n_test_gradient got dfunc = 0.0, can't use as normalized direction for test")
allocate(my_dir(N))
my_dir = (-1.0_dp)*dx/norm(dx)
endif
tmp = my_dir.DOT.dx
do i=2, 10
eps = 10.0_dp**(-i)
x = xx + eps*my_dir
fp = func(x,data)
x = xx - eps*my_dir
fm = func(x,data)
call print ("fp " // fp // " fm " // fm)
call print("GT_N eps " // eps // " D " // tmp // " FD " // ((fp-fm)/(2.0_dp*eps)) // " diff " // (tmp-(fp-fm)/(2.0_dp*eps)))
end do
deallocate(x, dx)
deallocate(my_dir)
end subroutine n_test_gradient
!% FIRE MD minimizer from Bitzek et al., \emph{Phys. Rev. Lett.} {\bfseries 97} 170201.
!% Beware, this algorithm is patent pending in the US.
function fire_minim(x, mass, func, dfunc, dt0, tol, max_steps, hook, hook_print_interval, data, dt_max, status)
real(dp), intent(inout), dimension(:) :: x
real(dp), intent(in) :: mass
interface
function func(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::func
end function func
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
end interface
real(dp), intent(in) :: dt0
real(dp), intent(in) :: tol
integer, intent(in) :: max_steps
optional :: hook
interface
subroutine hook(x,dx,E,done,do_print,data)
use system_module
real(dp), intent(in) ::x(:)
real(dp), intent(in) ::dx(:)
real(dp), intent(in) ::E
logical, intent(out) :: done
logical, optional, intent(in) :: do_print
character(len=1),optional, intent(in) ::data(:)
end subroutine hook
end interface
integer, optional :: hook_print_interval
character(len=1),optional::data(:)
real(dp), intent(in), optional :: dt_max
integer, optional, intent(out) :: status
integer :: fire_minim
integer :: my_hook_print_interval
real(dp), allocatable, dimension(:) :: velo, acc, force
real(dp) :: f, df2, alpha_start, alpha, P, dt, my_dt_max
integer :: i, Pcount
logical :: done
if (present(status)) status = 0
if (current_verbosity() >= PRINT_VERBOSE) then
my_hook_print_interval = optional_default(1, hook_print_interval)
else if (current_verbosity() >= PRINT_NORMAL) then
my_hook_print_interval = optional_default(10, hook_print_interval)
else if (current_verbosity() >= PRINT_SILENT) then
my_hook_print_interval = optional_default(100000, hook_print_interval)
endif
allocate (velo(size(x)), acc(size(x)), force(size(x)))
alpha_start = 0.1_dp
alpha = alpha_start
dt = dt0
my_dt_max = optional_default(20.0_dp*dt, dt_max)
Pcount = 0
call Print('Welcome to fire_minim', PRINT_NORMAL)
call Print('Attempting to find minimum with tolerance '// tol, PRINT_NORMAL)
velo = 0
acc = -dfunc(x,data)/mass
do i = 1, max_steps
velo = velo + (0.5_dp*dt)*acc
force = dfunc(x,data) ! keep original sign of gradient for now as hook() expects gradient, not force
df2 = normsq(force)
if (df2 < tol) then
write (line, '(i4,a,e10.2)') i, ' force^2 = ', df2
call Print(line, PRINT_NORMAL)
write (line, '(a,i0,a)') 'Converged in ', i, ' steps.'
call Print(line, PRINT_NORMAL)
exit
else if(mod(i, my_hook_print_interval) == 0) then
f = func(x,data)
write (line, '(i4,a,e24.16,a,e24.16,a,f0.3,a,e24.16)') i,' f=',f,' df^2=',df2,' dt=', dt, 'max(abs(df))=', maxval(abs(force))
call Print(line, PRINT_NORMAL)
if(present(hook)) then
call hook(x, force, f, done, (mod(i-1,my_hook_print_interval) == 0), data)
if (done) then
call print('hook reports that fire_minim is finished, exiting.', PRINT_NORMAL)
exit
end if
end if
end if
force = -force ! convert from gradient of energy to force
acc = force/mass
velo = velo + (0.5_dp*dt)*acc
P = velo .dot. force
velo = (1.0_dp-alpha)*velo + (alpha*norm(velo)/norm(force))*force
if (P > 0) then
if(Pcount > 5) then
dt = min(dt*1.1_dp, my_dt_max)
alpha = alpha*0.99_dp
else
Pcount = Pcount + 1
end if
else
dt = dt*0.5_dp
velo = 0.0_dp
alpha = alpha_start
Pcount = 0
end if
x = x + dt*velo
x = x + (0.5_dp*dt*dt)*acc
if(current_verbosity() >= PRINT_NERD) then
write (line, '(a,e24.16)') 'E=', func(x,data)
call print(line,PRINT_NERD)
call print(x,PRINT_NERD)
end if
end do
if(i == max_steps) then
write (line, '(a,i0,a)') 'fire_minim: Failed to converge in ', i, ' steps.'
call Print(line, PRINT_ALWAYS)
if (present(status)) status = 1
end if
fire_minim = i
deallocate(velo, acc, force)
end function fire_minim
!%%%%
!% Noam's line minimizer
!%
!% args
!% x: input vector, flattened into 1-D double array
!% bothfunc: input pointer to function computing value and gradient
!% neg_gradient: input negative of gradient (i.e. force) for input x
!% E: input value at x
!% search_dir: input vector search direction, not necessarily normalized
!% new_x: output x at minimimum
!% new_new_gradient: output negative of gradient at new_x
!% new_E: output value at new_x
!% max_step_size: input max step size (on _normalized_ search dir), output actual step size for this linmin
!% accuracy: input desired accuracy on square of L2 norm of projected gradient
!% N_evals: input initial number of function evals so far, output final number of function evalutions
!% max_N_evals: input max number of function evaluations before giving up
!% hook: input pointer to function to call after each evaluation
!% data: input pointer to other data needed for calc, flattened to 1-D character array with transfer()
!% error: output error state
!%%%%
subroutine n_linmin(x, bothfunc, neg_gradient, E, search_dir, &
new_x, new_neg_gradient, new_E, &
max_step_size, accuracy, N_evals, max_N_evals, hook, hook_print_interval, &
data, error)
real(dp), intent(inout) :: x(:)
real(dp), intent(in) :: neg_gradient(:)
interface
subroutine bothfunc(x,E,f,data,error)
use system_module
real(dp)::x(:)
real(dp)::E
real(dp)::f(:)
character(len=1),optional::data(:)
integer,optional :: error
end subroutine bothfunc
end interface
real(dp), intent(inout) :: E
real(dp), intent(inout) :: search_dir(:)
real(dp), intent(out) :: new_x(:), new_neg_gradient(:)
real(dp), intent(out) :: new_E
real(dp), intent(inout) :: max_step_size
real(dp), intent(in) :: accuracy
integer, intent(inout) :: N_evals
integer, intent(in) :: max_N_evals
optional :: hook
interface
subroutine hook(x,dx,E,done,do_print,data)
use system_module
real(dp), intent(in) ::x(:)
real(dp), intent(in) ::dx(:)
real(dp), intent(in) ::E
logical, intent(out) :: done
logical, optional, intent(in) :: do_print
character(len=1),optional, intent(in) ::data(:)
end subroutine hook
end interface
integer, intent(in), optional :: hook_print_interval
character(len=1),optional::data(:)
integer, intent(out), optional :: error
logical :: do_print
real(dp) search_dir_mag
real(dp) p0_dot, p1_dot, new_p_dot
real(dp), allocatable :: p0(:), p1(:), p0_ng(:), p1_ng(:), new_p(:), new_p_ng(:), t_projected(:)
real(dp) p0_E, p1_E, new_p_E
real(dp) p0_pos, p1_pos, new_p_pos
real(dp) tt
real(dp) t_a, t_b, t_c, Ebar, pbar, soln_1, soln_2
integer :: n_pure_linesearch
integer :: max_pure_linesearch = 10
logical done, use_cubic, got_valid_cubic
integer l_error
real(dp) est_step_size
INIT_ERROR(error)
do_print = .false.
allocate(p0(size(x)))
allocate(p1(size(x)))
allocate(p0_ng(size(x)))
allocate(p1_ng(size(x)))
allocate(new_p(size(x)))
allocate(new_p_ng(size(x)))
allocate(t_projected(size(x)))
call print ("n_linmin starting line minimization", PRINT_NERD)
call print ("n_linmin initial |x| " // norm(x) // " |neg_gradient| " // &
norm(neg_gradient) // " |search_dir| " // norm(search_dir), PRINT_NERD)
search_dir_mag = norm(search_dir)
! search_dir = search_dir / search_dir_mag
search_dir = search_dir / search_dir_mag
p0_dot = neg_gradient .dot. search_dir
t_projected = search_dir*p0_dot
if (normsq(t_projected) .lt. accuracy) then
call print ("n_linmin initial config is apparently converged " // norm(t_projected) // " " // accuracy, PRINT_NERD)
endif
p0_pos = 0.0_dp
p0 = x
p0_ng = neg_gradient
p0_E = E
p0_dot = p0_ng .dot. search_dir
call print("initial p0_dot " // p0_dot, PRINT_NERD)
if (p0_dot .lt. 0.0_dp) then
p0_ng = -p0_ng
p0_dot = -p0_dot
endif
call print("cg_n " // p0_pos // " " // p0_e // " " // p0_dot // " " // &
normsq(p0_ng) // " " // N_evals // " bracket starting", PRINT_VERBOSE)
est_step_size = 4.0_dp*maxval(abs(p0_ng))**2/p0_dot
if (max_step_size .gt. 0.0_dp .and. est_step_size .gt. max_step_size) then
est_step_size = max_step_size
endif
p1_pos = est_step_size
p1 = x + p1_pos*search_dir
p1_ng = p0_ng
l_error = 1
do while (l_error .ne. 0)
N_evals = N_evals + 1
l_error=0
call bothfunc(p1, p1_e, p1_ng, data, error=l_error); p1_ng = -p1_ng
if (present(hook_print_interval)) do_print = (mod(N_evals, hook_print_interval) == 0)
if (present(hook)) call hook(p1,p1_ng,p1_E,done,do_print,data)
if (l_error .ne. 0) then
call print("cg_n " // p1_pos // " " // p1_e // " " // 0.0_dp // " " // &
0.0_dp // " " // N_evals // " bracket first step ERROR", PRINT_ALWAYS)
est_step_size = est_step_size*0.5_dp
p1_pos = est_step_size
p1 = x + p1_pos*search_dir
endif
if (N_evals .gt. max_N_evals) then
RAISE_ERROR_WITH_KIND(ERROR_MINIM_NOT_CONVERGED, "n_linmin ran out of iterations", error)
endif
end do
p1_dot = p1_ng .dot. search_dir
call print("cg_n " // p1_pos // " " // p1_e // " " // p1_dot // " " // &
normsq(p1_ng) // " " // N_evals // " bracket first step", PRINT_VERBOSE)
t_projected = search_dir*p1_dot
! if (object_norm(t_projected,norm_type) .lt. accuracy) then
! ! search_dir = search_dir * search_dir_mag
! call scalar_selfmult (search_dir, search_dir_mag)
! minimize_along = 0
! call print ("returning from minimize_along, t_projected is_converged")
! return
! endif
call print ("starting bracketing loop", PRINT_NERD)
! bracket solution
do while (p1_dot .ge. 0.0_dp)
p0 = p1
p0_ng = p1_ng
p0_E = p1_E
p0_pos = p1_pos
p0_dot = p1_dot
p1_pos = p1_pos + est_step_size
call print ("checking bracketing for " // p1_pos, PRINT_NERD)
p1 = x + p1_pos*search_dir
l_error = 1
do while (l_error .ne. 0)
N_evals = N_evals + 1
l_error = 0
call bothfunc (p1, p1_E, p1_ng, data, error=l_error); p1_ng = -p1_ng
if (present(hook_print_interval)) do_print = (mod(N_evals, hook_print_interval) == 0)
if (present(hook)) call hook(p1,p1_ng,p1_E,done,do_print,data)
if (done) then
call print("hook reported done", PRINT_NERD)
search_dir = search_dir * search_dir_mag
new_x = p1
new_neg_gradient = p1_ng
new_E = p1_E
return
endif
if (l_error .ne. 0) then
call print("cg_n " // p0_pos // " " // p0_e // " " // 0.0_dp // " " // &
0.0_dp // " " // N_evals // " bracket loop ERROR", PRINT_ALWAYS)
call print ("Error in bracket loop " // l_error // " stepping back", PRINT_ALWAYS)
p1_pos = p1_pos - est_step_size
est_step_size = est_step_size*0.5_dp
p1_pos = p1_pos + est_step_size
p1 = x + p1_pos*search_dir
endif
if (N_evals .gt. max_N_evals) then
search_dir = search_dir * search_dir_mag
RAISE_ERROR_WITH_KIND(ERROR_MINIM_NOT_CONVERGED, "n_linmin ran out of iterations", error)
endif
end do
p1_dot = p1_ng .dot. search_dir
! tt = -p0_dot/(p1_dot-p0_dot)
! if (1.5D0*tt*(p1_pos-p0_pos) .lt. 10.0*est_step_size) then
! est_step_size = 1.5_dp*tt*(p1_pos-p0_pos)
! else
est_step_size = est_step_size*2.0_dp
! end if
call print("cg_n " // p1_pos // " " // p1_e // " " // p1_dot // " " // &
normsq(p1_ng) // " " // N_evals // " bracket loop", PRINT_VERBOSE)
end do
call print ("bracketed by" // p0_pos // " " // p1_pos, PRINT_NERD)
done = .false.
t_projected = 2.0_dp*sqrt(accuracy)
!new_p_dot = accuracy*2.0_dp
n_pure_linesearch = 0
do while (n_pure_linesearch < max_pure_linesearch .and. normsq(t_projected) .ge. accuracy .and. (.not. done))
n_pure_linesearch = n_pure_linesearch + 1
call print ("n_linmin starting true minimization loop", PRINT_NERD)
use_cubic = .false.
if (use_cubic) then
!!!! fit to cubic polynomial
Ebar = p1_E-p0_E
pbar = p1_pos-p0_pos
t_a = (-p0_dot)
t_c = (pbar*((-p1_dot)-(-p0_dot)) - 2.0_dp*Ebar + 2*(-p0_dot)*pbar)/pbar**3
t_b = (Ebar - (-p0_dot)*pbar - t_c*pbar**3)/pbar**2
soln_1 = (-2.0_dp*t_b + sqrt(4.0_dp*t_b**2 - 12.0_dp*t_a*t_c))/(6.0_dp*t_c)
soln_2 = (-2.0_dp*t_b - sqrt(4.0_dp*t_b**2 - 12.0_dp*t_a*t_c))/(6.0_dp*t_c)
if (soln_1 .ge. 0.0_dp .and. soln_1 .le. pbar) then
new_p_pos = p0_pos + soln_1
got_valid_cubic = .true.
else if (soln_2 .ge. 0.0_dp .and. soln_2 .le. pbar) then
new_p_pos = p0_pos + soln_2
got_valid_cubic = .true.
else
call print ("n_linmin warning: no valid solution for cubic", PRINT_ALWAYS)
!!!! use only derivative information to find pt. where derivative = 0
tt = -p0_dot/(p1_dot-p0_dot)
new_p_pos = p0_pos + tt*(p1_pos-p0_pos)
done = .false.
endif
else
!!!! use only derivative information to find pt. where derivative = 0
tt = -p0_dot/(p1_dot-p0_dot)
new_p_pos = p0_pos + tt*(p1_pos-p0_pos)
! done = .true.
done = .false.
endif
new_p = x + new_p_pos*search_dir
N_evals = N_evals + 1
call bothfunc (new_p, new_p_E, new_p_ng, data, error); new_p_ng = -new_p_ng
if (error .ne. 0) then
call system_abort("n_linmin: Error in line search " // error)
endif
if (N_evals .gt. max_N_evals) done = .true.
! if (inner_prod(new_p_ng,new_p_ng) .lt. 0.1 .and. got_valid_cubic) done = .true.
! if (got_valid_cubic) done = .true.
new_p_dot = new_p_ng .dot. search_dir
call print("cg_n " // new_p_pos // " " // new_p_E // " " // new_p_dot // " " // &
normsq(new_p_ng) // " " // N_evals // " during line search", PRINT_VERBOSE)
if (new_p_dot .gt. 0) then
p0 = new_p
p0_pos = new_p_pos
p0_dot = new_p_dot
p0_ng = new_p_ng
p0_E = new_p_E
else
p1 = new_p
p1_pos = new_p_pos
p1_dot = new_p_dot
p1_ng = new_p_ng
p1_E = new_p_E
endif
t_projected = search_dir*new_p_dot
end do
new_x = new_p
new_neg_gradient = new_p_ng
new_E = new_p_E
max_step_size = new_p_pos
search_dir = search_dir * search_dir_mag
call print ("done with line search", PRINT_NERD)
end subroutine n_linmin
!%%%%
!% Noam's minimizer with preconditioning from Cristoph Ortner
!% return value: number of function evaluations
!% args
!% x_i: input vector, flattened into a 1-D double array
!% bothfunc: input pointer to function that returns value and gradient
!% can apply simple constraints and external (body) forces
!% use_precond: input logical controling preconditioning
!% apply_precond_func: input pointer to function that applies preconditioner to a vector
!% initial_E: output initial value
!% final_E: output final value
!% expected reduction: input expected reduction in value used to estimate initial step
!% max_N_evals: max number of function evaluations before giving up
!% accuracy: desired accuracy on square of L2 norm of gradient
!% hook: pointer to function passed to linmin and called once per CG step
!% does stuff like print configuration, and can also apply other ending conditions,
!% although latter capability isn't used
!% hook_print_interval: how often to call hook, depending on verbosity level
!% data: other data both_func will need to actually do calculation, flattened into
!% character array by transfer() function. Maybe be replaced with F2003 pointer soon
!% error: output error state
!%%%%
function n_minim(x_i, bothfunc, use_precond, apply_precond_func, initial_E, final_E, &
expected_reduction, max_N_evals, accuracy, hook, hook_print_interval, data, error) result(N_evals)
real(dp), intent(inout) :: x_i(:)
interface
subroutine bothfunc(x,E,f,data,error)
use system_module
real(dp)::x(:)
real(dp)::E
real(dp)::f(:)
character(len=1),optional::data(:)
integer, optional :: error
end subroutine bothfunc
end interface
logical :: use_precond
interface
subroutine apply_precond_func(x,g,P_g,data,error)
use system_module
real(dp)::x(:),g(:),P_g(:)
character(len=1),optional::data(:)
integer, optional :: error
end subroutine apply_precond_func
end interface
real(dp), intent(out) :: initial_E, final_E
real(dp), intent(inout) :: expected_reduction
integer, intent(in) :: max_N_evals
real(dp), intent(in) :: accuracy
optional :: hook
integer :: N_evals
interface
subroutine hook(x,dx,E,done,do_print,data)
use system_module
real(dp), intent(in) ::x(:)
real(dp), intent(in) ::dx(:)
real(dp), intent(in) ::E
logical, intent(out) :: done
logical, optional, intent(in) :: do_print
character(len=1),optional, intent(in) ::data(:)
end subroutine hook
end interface
integer, optional :: hook_print_interval
character(len=1),optional::data(:)
integer, optional, intent(out) :: error
real(dp) :: E_i, E_ip1
logical :: done, hook_done
integer :: iter
real(dp) :: max_step_size, initial_step_size
real(dp), allocatable :: g_i(:)
real(dp), allocatable :: x_ip1(:), g_ip1(:)
real(dp), allocatable :: h_i(:)
real(dp), allocatable :: P_g(:)
real(dp) :: g_i_dot_g_i, g_ip1_dot_g_i, g_ip1_dot_g_ip1
real(dp) :: gamma_i
integer :: l_error
integer :: my_hook_print_interval
INIT_ERROR(error)
if (current_verbosity() >= PRINT_VERBOSE) then
my_hook_print_interval = optional_default(1, hook_print_interval)
else if (current_verbosity() >= PRINT_NORMAL) then
my_hook_print_interval = optional_default(10, hook_print_interval)
else if (current_verbosity() >= PRINT_SILENT) then
my_hook_print_interval = optional_default(100000, hook_print_interval)
endif
allocate(g_i(size(x_i)))
allocate(x_ip1(size(x_i)))
allocate(g_ip1(size(x_i)))
allocate(h_i(size(x_i)))
N_evals = 1
call bothfunc(x_i, E_i, g_i, data, error=error); g_i = -g_i
if (present(hook)) call hook(x_i, g_i, E_i, done, .true., data)
PASS_ERROR_WITH_INFO("n_minim first evaluation", error)
initial_E = E_i
allocate(P_g(size(x_i)))
! need to get P into the routine somehow
if (use_precond) then
call apply_precond_func(x_i, g_i, P_g, data, error=error)
PASS_ERROR_WITH_INFO("n_miniCGinitial preconditioning call", error)
else
P_g = g_i
endif
call print("#cg_n use_precond="//use_precond)
call print("#cg_n " // " x" // &
" val" // &
" -grad.dir" // &
" |grad|^2" // " n_evals")
call print("cg_n " // 0.0_dp // " " // E_i // " " // (g_i.dot.g_i) // " " // &
normsq(g_i) // " " // N_evals // " INITIAL_VAL")
if (normsq(g_i) .lt. accuracy) then
call print("cg_n " // 0.0_dp // " " // E_i // " " // (g_i.dot.g_i) // " " // &
normsq(g_i) // " " // N_evals // " FINAL_VAL")
call print ("n_minim initial config is converged " // norm(g_i) // " " // accuracy, PRINT_VERBOSE)
final_E = initial_E
return
endif
h_i = P_g
iter = 1
done = .false.
do while (N_evals .le. max_N_evals .and. (.not.(done)))
!! max_step_size = 4.0_dp*expected_reduction / norm(g_i)
max_step_size = 1.0_dp * expected_reduction / (g_i .dot. (h_i/norm(h_i))) ! dividing by norm(h_i) because n_linmin will normalize h_i
! if (max_step_size .gt. 1.0_dp) then
! max_step_size = 1.0_dp
! endif
call print("max_step_size "//max_step_size, verbosity=PRINT_VERBOSE)
call print("cg_n " // 0.0_dp // " " // E_i // " " // (g_i.dot.h_i) // " " // &
normsq(g_i) // " " // N_evals // " n_minim pre linmin")
l_error = ERROR_NONE
call n_linmin(x_i, bothfunc, g_i, E_i, h_i, &
x_ip1, g_ip1, E_ip1, &
max_step_size, accuracy, N_evals, max_N_evals, hook, hook_print_interval, data, l_error)
if (l_error == ERROR_MINIM_NOT_CONVERGED) then
if (N_evals > max_N_evals) then
final_E = E_i
RAISE_ERROR_WITH_KIND(l_error, "linmin: n_minim didn't converge", error)
endif
! we're just going to continue after an unconverged linmin,
CLEAR_ERROR()
else
PASS_ERROR_WITH_INFO("linmin: n_minim error", error)
endif
call print("cg_n " // 0.0_dp // " " // E_ip1 // " " // (g_ip1.dot.h_i) // " " // &
normsq(g_ip1) // " " // N_evals // " n_minim post linmin")
if (E_ip1 > E_i) then
final_E = E_i
call print("WARNING:n_minim: n_limin stepped uphill - forces may not be consistent with energy", verbosity=PRINT_ALWAYS)
! RAISE_ERROR("n_minim: n_limin stepped uphill - forces may not be consistent with energy", error)
endif
if (normsq(g_ip1) .lt. accuracy) then
call print("n_minim is converged", PRINT_VERBOSE)
E_i = E_ip1
x_i = x_ip1
g_i = g_ip1
done = .true.
endif
! gamma_i = sum(g_ip1*g_ip1)/sum(g_i*g_i) ! Fletcher-Reeves
! gamma_i = sum((g_ip1-g_i)*g_ip1)/sum(g_i*g_i) ! Polak-Ribiere
g_i_dot_g_i = g_i .dot. P_g
g_ip1_dot_g_i = g_ip1 .dot. P_g
!! perhaps have some way of telling apply_precond_func to update/not update preconitioner?
if (use_precond) then
call apply_precond_func(x_ip1, g_ip1, P_g, data, error=error)
PASS_ERROR_WITH_INFO("n_minim in-loop preconditioning call", error)
else
P_g = g_ip1
endif
g_ip1_dot_g_ip1 = g_ip1 .dot. P_g
gamma_i = (g_ip1_dot_g_ip1 - g_ip1_dot_g_i)/g_i_dot_g_i
! steepest descent
! gamma_i = 0.0_dp
h_i = gamma_i*h_i + P_g
if (iter .eq. 1) then
expected_reduction = abs(E_i - E_ip1)/10.0_dp
else
expected_reduction = abs(E_i - E_ip1)/2.0_dp
endif
E_i = E_ip1
x_i = x_ip1
g_i = g_ip1
! P_g is already P_g_ip1 from gamma_i evaluation code
if (present(hook)) then
call hook(x_i,g_i,E_i,hook_done,(mod(iter-1,my_hook_print_interval) == 0), data)
if (hook_done) done = .true.
endif
call print("cg_n " // 0.0_dp // " " // E_i // " " // (g_i.dot.h_i) // " " // &
normsq(g_i) // " " // N_evals // " n_minim with new dir")
call print("n_minim loop end, N_evals " // N_evals // " max_N_evals " // max_N_evals // &
" done " // done, PRINT_VERBOSE)
iter = iter + 1
end do
if (present(hook)) then
call hook(x_i,g_i,E_i,hook_done,.true.,data)
if (hook_done) done = .true.
endif
final_E = E_i
call print("cg_n " // 0.0_dp // " " // final_E // " " // (g_i.dot.h_i) // " " // &
normsq(g_i) // " " // N_evals // " FINAL_VAL")
end function n_minim
subroutine line_scan(x0, xdir, func, use_func, dfunc, data)
real(dp)::x0(:) !% Starting vector
real(dp)::xdir(:)!% Search direction
INTERFACE
function func(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::func
end function func
END INTERFACE
logical :: use_func
INTERFACE
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
END INTERFACE
character(len=1), optional::data(:)
integer :: i
real(dp) :: new_eps
real(dp) :: fn, dirdx_new
real(dp), allocatable :: xn(:), dxn(:)
allocate(xn(size(x0)))
allocate(dxn(size(x0)))
fn = 0.0_dp
call print('line scan:', PRINT_NORMAL)
new_eps = 1.0e-5_dp
do i=1,50
xn = x0 + new_eps*xdir
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
if (use_func) fn = func(xn,data)
dxn = dfunc(xn,data)
#ifndef _OPENMP
call verbosity_pop()
#endif
dirdx_new = xdir .DOT. dxn
call print('LINE_SCAN ' // new_eps//' '//fn// ' '//dirdx_new, PRINT_NORMAL)
new_eps = new_eps*1.15
enddo
deallocate(xn)
end subroutine line_scan
function func_wrapper(func, x, data, local_energy, gradient, doefunc)
INTERFACE
function func(x,data,local_energy,gradient)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp), intent(inout),optional :: local_energy(:)
real(dp), intent(inout),optional :: gradient(:)
real(dp)::func
end function func
end INTERFACE
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp), intent(inout),optional :: local_energy(:)
real(dp), intent(inout),optional :: gradient(:)
integer, intent(in) :: doefunc
real(dp)::func_wrapper
if (doefunc == E_FUNC_BASIC) then
func_wrapper = func(x, data, gradient=gradient)
if (present(local_energy)) then
local_energy = 0.0_dp
local_energy(1) = func_wrapper
endif
else
func_wrapper = func(x, data, local_energy=local_energy, gradient=gradient)
endif
end function func_wrapper
! Interface is made to imitate the existing interface.
function preconminim(x_in,func,dfunc,build_precon,pr,method,convergence_tol,max_steps,efuncroutine,LM, linminroutine, hook, &
hook_print_interval, am_data, status,writehessian,gethessian,getfdhconnectivity,infoverride,infconvext)
implicit none
real(dp), intent(inout) :: x_in(:) !% Starting position
INTERFACE
function func(x,data,local_energy,gradient)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp), intent(inout),optional :: local_energy(:)
real(dp), intent(inout),optional :: gradient(:)
real(dp)::func
end function func
end INTERFACE
INTERFACE
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
END INTERFACE
INTERFACE
subroutine build_precon(pr,am_data)
use system_module
import precon_data
type(precon_data),intent(inout) ::pr
character(len=1)::am_data(:)
end subroutine
END INTERFACE
type(precon_data):: pr
character(*), intent(in) :: method !% 'cg' for conjugate gradients or 'sd' for steepest descent
real(dp), intent(in) :: convergence_tol !% Minimisation is treated as converged once $|\mathbf{\nabla}f|^2 <$
!% 'convergence_tol'.
integer, intent(in) :: max_steps !% Maximum number of 'cg' or 'sd' steps
character(*), intent(in), optional :: efuncroutine !% Control of the objective function evaluation
character(*), intent(in), optional :: linminroutine !% Name of the line minisation routine to use.
integer, optional :: LM
optional :: hook
INTERFACE
subroutine hook(x,dx,E,done,do_print,data)
use system_module
real(dp), intent(in) ::x(:)
real(dp), intent(in) ::dx(:)
real(dp), intent(in) ::E
logical, intent(out) :: done
logical, optional, intent(in) :: do_print
character(len=1),optional, intent(in) ::data(:)
end subroutine hook
end INTERFACE
integer, intent(in), optional :: hook_print_interval
character(len=1), optional, intent(inout) :: am_data(:)
integer, optional, intent(out) :: status
optional :: writehessian
INTERFACE
subroutine writehessian(x,data,filename)
use system_module
real(dp) :: x(:)
character(len=1)::data(:)
character(*) :: filename
end subroutine writehessian
end INTERFACE
optional :: gethessian
INTERFACE
subroutine gethessian(x,data,FDHess)
use system_module
real(dp),intent(in):: x(:)
character(len=1),intent(in)::data(:)
real(dp),intent(inout) :: FDHess(:,:)
end subroutine gethessian
end INTERFACE
optional :: getfdhconnectivity
INTERFACE
subroutine getfdhconnectivity(rows,columns,rn,data)
use system_module
integer, intent(inout) :: rows(:), columns(:)
integer, intent(out) :: rn
character(len=1), intent(in)::data(:)
end subroutine
end INTERFACE
! result
real(dp), optional :: infoverride
logical, optional :: infconvext
integer::preconminim
logical :: doFD,doSD, doCG,doLBFGS,doDLLBFGS,doSHLBFGS,doSHLSR1,doGHLBFGS,doGHLSR1,doGHFD,doSHFD, doGHFDH, doprecon,done
logical :: doLSbasic,doLSbasicpp,doLSstandard, doLSnone,doLSMoreThuente,doLSunit
integer :: doefunc
real(dp),allocatable :: x(:),xold(:),s(:),sold(:),g(:),gold(:),pg(:),pgold(:),xcand(:),gcand(:)
real(dp) :: alpha,alphamax, beta,betanumer,betadenom,f
integer :: n_iter,N, abortcount
real(dp),allocatable :: alpvec(:),dirderivvec(:)
integer :: my_hook_print_interval
real(dp) :: normsqgrad, normsqs
integer :: this_ls_count, total_ls_count
real(dp) :: amax
real(dp),allocatable :: local_energy(:),local_energyold(:),local_energycand(:)
real(dp) :: dotpgout
type (precon_data) :: hess
real(dp),allocatable :: LBFGSs(:,:), LBFGSy(:,:), LBFGSa(:,:), LBFGSb(:,:), LBFGSalp(:), LBFGSbet(:), LBFGSrho(:), LBFGSq(:), LBFGSz(:), LBFGSbuf1(:), LBFGSbuf2(:)
real(dp),allocatable :: LBFGSdlr(:,:)
integer :: LBFGSm, LBFGScount
integer :: I, n_back,thisind
integer :: k_out
real(dp), allocatable :: TRcandg(:),TRBs(:),TRyk(:)
real(dp) :: TRared,TRpred,TRdelta,fcand,TRrho,ftest
type(precon_data) :: TRB
real(dp) :: TReta = 0.25
real(dp) :: TRr = 10.0**(-8)
real(dp), allocatable :: FDhess(:,:)
integer, allocatable :: IPIV(:)
real(dp), allocatable :: LBFGSd(:,:), LBFGSl(:,:), SR1testvec(:)
integer :: SR1I,SR1J
integer :: INFO
real(dp) :: SR1testLHS,SR1testRHS
logical :: SR1doupdate
logical :: term2norm
N = size(x_in)
!allocate NLCG vectors
allocate(x(N))
allocate(xold(N))
allocate(s(N))
allocate(sold(N))
allocate(g(N))
allocate(gold(N))
allocate(pg(N))
allocate(pgold(N))
!allocate linesearch history vectors
allocate(alpvec(max_steps))
allocate(dirderivvec(max_steps))
allocate(local_energy( (size(x) - 9)/3 ))
allocate(local_energyold( (size(x) - 9)/3 ))
if (current_verbosity() >= PRINT_VERBOSE) then
my_hook_print_interval = optional_default(1, hook_print_interval)
else if (current_verbosity() >= PRINT_NORMAL) then
my_hook_print_interval = optional_default(10, hook_print_interval)
else
my_hook_print_interval = optional_default(100000, hook_print_interval)
endif
doFD = .false.
doCG = .FALSE.
doSD = .FALSE.
doLBFGS = .false.
doDLLBFGS = .false.
doSHLBFGS = .false.
doSHLSR1 = .false.
doGHLBFGS = .false.
doGHLSR1 = .false.
doGHFD = .false.
doGHFDH = .false.
doSHFD = .false.
if (trim(method) == 'preconCG') then
doCG = .TRUE.
call print("Using preconditioned Polak-Ribiere Conjugate Gradients")
else if (trim(method) == 'preconSD') then
doSD = .TRUE.
call print("Using preconditioned Steepest Descent")
else if (trim(method) == 'preconLBFGS') then
doLBFGS = .TRUE.
call print ("Using linesearching limited memory BFGS")
else if (trim(method) == 'preconDLLBFGS') then
doDLLBFGS = .true.
call print ("Using dogleg trust region limited memory BFGS")
else if (trim(method) == 'preconSHLBFGS') then
doSHLBFGS = .true.
call print ("Using Steihaug trust region limited memory BFGS")
else if (trim(method) == 'preconSHLSR1') then
doSHLSR1 = .true.
call print ("Using Steihaug trust region limited memory SR1")
else if (trim(method) == 'preconSHFD') then
doSHFD = .true.
call print ("Using Steihaug trust region with FD based Hessian")
else if (trim(method) == 'FD') then
doFD = .true.
else
call print('Unrecognized minim method, exiting')
call exit()
end if
if (doLBFGS .or. doDLLBFGS .or. doSHLBFGS .or. doSHLSR1 .or. doSHFD) then
LBFGSm = 20
if ( present(LM) ) LBFGSm = LM
allocate(LBFGSs(N,LBFGSm))
allocate(LBFGSy(N,LBFGSm))
allocate(LBFGSalp(LBFGSm))
allocate(LBFGSbet(LBFGSm))
allocate(LBFGSrho(LBFGSm))
allocate(LBFGSz(N))
allocate(LBFGSq(N))
allocate(LBFGSbuf1(N))
allocate(LBFGSdlr(LBFGSm,LBFGSm))
LBFGSbuf1 = 0.0
LBFGScount = 0
LBFGSy = 0.0_dp
LBFGSs = 0.0_dp
LBFGSdlr = 0.0_dp
LBFGSrho = 0.0
end if
if(doDLLBFGS .or. doSHLBFGS .or. doSHLSR1 .or. doSHFD ) then
allocate(xcand(N))
allocate(gcand(N))
allocate(LBFGSb(N,LBFGSm))
allocate(TRBs(N))
end if
if(doSHLSR1) then
allocate(SR1testvec(N))
end if
if(doFD .or. doSHFD) then
allocate(FDHess(N,N))
allocate(IPIV(N))
end if
doLSbasic = .FALSE.
doLSbasicpp = .FALSE.
doLSstandard = .FALSE.
doLSnone = .FALSE.
doLSMoreThuente = .false.
doLSunit = .false.
doefunc = 0
if ( present(efuncroutine) ) then
if (trim(efuncroutine) == 'basic') then
doefunc = E_FUNC_BASIC
call print('Using naive summation of local energies')
elseif (trim(efuncroutine) == 'kahan') then
doefunc = E_FUNC_KAHAN
allocate(local_energycand((size(x)-9)/3))
call print('Using Kahan summation of local energies')
elseif (trim(efuncroutine) == 'doublekahan') then
doefunc = E_FUNC_DOUBLEKAHAN
allocate(local_energycand((size(x)-9)/3))
call print('Using double Kahan summation of local energies with quicksort')
else
call print('Unrecognized efuncroutine, normally use "basic" or "kahan", aborting for safety')
call exit()
end if
else
doefunc = E_FUNC_BASIC
call print('Using naive summation of local energies by default')
end if
if (doSD .or. doCG .or. doLBFGS) then
if ( present(linminroutine) ) then
if (trim(linminroutine) == 'basic') then
call print('Using basic backtracking linesearch')
doLSbasic = .TRUE.
elseif (trim(linminroutine) == 'basicpp') then
call print('Using backtracking linesearch with cubic interpolation')
doLSbasicpp = .TRUE.
elseif (trim(linminroutine) == 'standard') then
call print('Using standard two-stage linesearch with cubic interpolation in the zoom phase, with bisection as backup')
doLSstandard = .TRUE.
elseif (trim(linminroutine) == 'none') then
call print('Using no linesearch method (relying on init_alpha to make good guesses)')
doLSnone = .TRUE.
elseif (trim(linminroutine) == 'morethuente') then
call print('Using More & Thuente minpack linesearch')
doLSMoreThuente = .TRUE.
elseif (trim(linminroutine) == 'unit') then
call print('Using fixed stepsize of 1')
doLSunit = .true.
else
call print('Unrecognized linmin routine')
call exit()
end if
else
call print('Defaulting to basic linesearch')
doLSbasic = .TRUE.
end if
end if
term2norm = .true.
if (present(infconvext)) then
if (infconvext .eqv. .true.) then
term2norm = .false.
end if
end if
x = x_in
!Main Loop
this_ls_count = 0
total_ls_count = 0
n_iter = 1
call system_timer("preconminim/func")
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
f = func_wrapper(func,x,am_data,local_energy,g,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
call system_timer("preconminim/func")
abortcount = 0
alpha = 0
this_ls_count = 0
dotpgout = 0
do
if(doSD .or. doCG .or. doLBFGS .or. doFD) then
normsqgrad = smartdotproduct(g,g,doefunc)
if ( normsqgrad < convergence_tol .and. term2norm) then
call print('Extended minim completed with |df|^2 = '// normsqgrad // ' < tolerance = ' // convergence_tol // ' total linesearch iterations = '// total_ls_count)
! call print(trim(method)//" iter = "//n_iter//" f = "//f// ' |df|^2 = '// normsqgrad// ' max(abs(df)) = '//maxval(abs(g))//' last alpha = '//alpha)
call print(trim(method)//" iter = "//n_iter//" f = "//f// ' |g|^2 = '// normsqgrad// ' sg/(|s||g|) = '//dotpgout//' last alpha = '//alpha//' max(abs(g)) = '//maxval(abs(g)) &
// ' last ls_iter = ' // this_ls_count)
exit
elseif ( maxval(abs(g)) < convergence_tol .and. .not. term2norm) then
call print('Extended minim completed with |df|_infty = '// maxval(abs(g)) // ' < tolerance = ' // convergence_tol // ' total linesearch iterations = '// total_ls_count)
! call print(trim(method)//" iter = "//n_iter//" f = "//f// ' |df|^2 = '// normsqgrad// ' max(abs(df)) = '//maxval(abs(g))//' last alpha = '//alpha)
call print(trim(method)//" iter = "//n_iter//" f = "//f// ' |g|^2 = '// normsqgrad// ' sg/(|s||g|) = '//dotpgout//' last alpha = '//alpha//' max(abs(g)) = '//maxval(abs(g)) &
// ' last ls_iter = ' // this_ls_count)
exit
end if
call print(trim(method)//" iter = "//n_iter//" f = "//f// ' |g|^2 = '// normsqgrad// ' sg/(|s||g|) = '//dotpgout //' last alpha = '//alpha//' max(abs(g)) = '//maxval(abs(g)) &
// ' last ls_iter = ' // this_ls_count,PRINT_NORMAL)
! call the hook function
if (present(hook)) then
call hook(x, g, f, done, (mod(n_iter-1,my_hook_print_interval) == 0), am_data)
else
call print("hook is not present", PRINT_VERBOSE)
end if
!if(n_iter == 1) call build_precon(pr,am_data)
call build_precon(pr,am_data)
!call writeprecon(pr,'pr')
!call exit()
if (doCG .or. doSD) then
pgold = pg
if (n_iter > 1) then
pg = apply_precon(g,pr,doefunc,init=pgold)
elseif (n_iter == 1) then
pg = apply_precon(g,pr,doefunc)
end if
end if
sold = s
if (n_iter > 1 .AND. doCG) then
betanumer = smartdotproduct(pg, (g - gold),doefunc)
betadenom = smartdotproduct(pgold,gold,doefunc)
beta = betanumer/betadenom
if (beta > 0) then
beta = 0
end if
s = -pg + beta*sold
elseif (doLBFGS) then
!call print(LBFGSrho)
if (n_iter > 1) then
LBFGSs(1:N,1:(LBFGSm-1)) = LBFGSs(1:N,2:LBFGSm)
LBFGSy(1:N,1:(LBFGSm-1)) = LBFGSy(1:N,2:LBFGSm)
LBFGSrho(1:(LBFGSm-1)) = LBFGSrho(2:LBFGSm)
LBFGSs(1:N,LBFGSm) = x - xold
LBFGSy(1:N,LBFGSm) = g - gold
LBFGSrho(LBFGSm) = 1.0/smartdotproduct(LBFGSs(1:N,LBFGSm),LBFGSy(1:N,LBFGSm),doefunc)
end if
n_back = min(LBFGSm,LBFGScount,n_iter-1)
!n_back = 0
LBFGSq = g
do I = 1,n_back
thisind = LBFGSm - I + 1
LBFGSalp(thisind) = LBFGSrho(thisind)*smartdotproduct(LBFGSs(1:N,thisind),LBFGSq,doefunc)
!call print(LBFGSy(1:N,thisind))
LBFGSq = LBFGSq - LBFGSalp(thisind)*LBFGSy(1:N,thisind)
end do
if (n_iter == 1) then
LBFGSz = apply_precon(LBFGSq,pr,doefunc,k_out=k_out)
else
LBFGSz = apply_precon(LBFGSq,pr,doefunc,init=LBFGSbuf1,k_out=k_out)
end if
LBFGSbuf1 = LBFGSz
do I = 1,n_back
thisind = LBFGSm - n_back + I
LBFGSbet(thisind) = LBFGSrho(thisind)*smartdotproduct(LBFGSy(1:N,thisind),LBFGSz,doefunc)
LBFGSz = LBFGSz + LBFGSs(1:N,thisind)*(LBFGSalp(thisind) - LBFGSbet(thisind))
end do
s = -LBFGSz
elseif (doFD) then
call gethessian(x,am_data,FDHess)
!call writemat(FDHess,'densehess' // n_iter)
s = -g
call dgesv(size(x),1,FDHess,size(x),IPIV,s,size(x),INFO)
else
s = -pg
end if
dirderivvec(n_iter) = smartdotproduct(g,s,doefunc)
dotpgout = -dirderivvec(n_iter)/(norm(g)*norm(s))
if (dirderivvec(n_iter) > 0) then
call print('Problem, directional derivative of search direction = '// dirderivvec(n_iter))
if(doLBFGS) then
call print('Restarting LBFGS')
LBFGScount = 0
end if
abortcount = abortcount + 1
if (abortcount >= 5) then
call print(' Extended Minim aborted due to multiple bad search directions, possibly reached machine precision')
call print(' |df|^2 = '// normsqgrad // ', tolerance = ' // convergence_tol // ' total linesearch iterations = '// total_ls_count)
!call writehessian(x,am_data,'outfinal')
exit
end if
cycle
else
if(doLBFGS) then
LBFGScount = LBFGScount + 1
end if
abortcount = 0
end if
!initial guess of alpha
alpha = init_alpha(alpvec,dirderivvec,n_iter)
if(n_iter == 1 .and. (doCG .or. doSD)) then
alpha = calc_amax(s,pr,doefunc)
elseif (doLBFGS .and. (pr%precon_id == 'C1' .or. pr%precon_id == 'LJ')) then
alpha = 1.0
else
alpha = init_alpha(alpvec,dirderivvec,n_iter)
if (pr%precon_id == 'ID') then
alpha = alpha*2.0
end if
end if
gold = g
amax = calc_amax(s,pr,doefunc,infoverride)
call print('Beginning linesearch, initial alpha = ' //alpha// ', alpha_max = ' //amax ,PRINT_VERBOSE)
if (doLSbasic) then
alpha = linesearch_basic(x,s,f,g,local_energy,alpha,func,doefunc,am_data,dirderivvec(n_iter),this_ls_count,amaxin=amax)
!call print('moo1')
elseif (doLSbasicpp) then
alpha = linesearch_basic_pp(x,s,f,alpha,func,doefunc,am_data,dirderivvec(n_iter),this_ls_count)
!call print('moo2')
elseif (doLSstandard) then
alpha = linesearch_standard(x,s,f,g,local_energy,alpha,func,doefunc,am_data,dirderivvec(n_iter),this_ls_count,amaxin=amax)
!call print('moo2')
elseif (doLSMoreThuente) then
alpha = linesearch_morethuente(x,s,f,local_energy,alpha,func,doefunc,am_data,dirderivvec(n_iter),this_ls_count,amaxin=amax)
elseif (doLSunit) then
alpha = 1.0
call system_timer("preconminim/func")
f = func_wrapper(func,x+s,am_data,doefunc=doefunc)
call system_timer("preconminim/func")
elseif (doLSnone) then
!do nothing
this_ls_count = 0
end if
total_ls_count = total_ls_count + this_ls_count
alpvec(n_iter) = alpha
xold = x
if (alpha < 10.0_dp**(-14) ) then
call print(' Extended Minim aborted due to being unable to find a step along the given descent direction, probably your dE is not accurate else extremely badly conditioned')
call print(' |df|^2 = '// normsqgrad // ', tolerance = ' // convergence_tol // ' total linesearch iterations = '// total_ls_count)
exit
end if
x = x + alpha*s
elseif (doDLLBFGS .or. doSHLBFGS .or. doSHLSR1 .or. doSHFD ) then
if (n_iter == 1) then
call system_timer("preconminim/func")
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
f = func_wrapper(func,x,am_data,local_energy,g,doefunc=doefunc)
normsqgrad = smartdotproduct(g,g,doefunc)
TRDelta = 1.0
call build_precon(pr,am_data)
#ifndef _OPENMP
call verbosity_pop()
#endif
call system_timer("preconminim/func")
call print(trim(method)//" iter = 0 f = "//f// ' |g|^2 = '// normsqgrad,PRINT_NORMAL)
end if
n_back = min(LBFGSm,LBFGScount)
if (doSHLBFGS) then
s = steihaug(x,g,pr,TRDelta,doefunc,n_back,LBFGSs,LBFGSy,LBFGSdlr,doBFGS=.true.)
elseif (doSHLSR1) then
s = steihaug(x,g,pr,TRDelta,doefunc,n_back,LBFGSs,LBFGSy,LBFGSdlr,doSR1=.true.)
elseif (doSHFD) then
call gethessian(x,am_data,FDHess)
s = steihaug(x,g,pr,TRDelta,doefunc,n_back,LBFGSs,LBFGSy,LBFGSdlr,doFD=.true.,FDhess=FDHess)
elseif (doDLLBFGS) then
s = LBFGSdogleg(x,g,pr,TRDelta,doefunc,n_back,LBFGSs,LBFGSy,LBFGSdlr)
end if
xcand = x + s
normsqs = Pdotproduct(s,s,pr,doefunc)
call system_timer("preconminim/func")
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
fcand = func_wrapper(func,xcand,am_data,local_energycand,gcand,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
call system_timer("preconminim/func")
!call print(f // ' '//fcand)
if (doDLLBFGS .or. doSHLBFGS) then
TRBs = calc_LBFGS_Bk_mult_v(LBFGSs(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSy(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSl,LBFGSd,s,pr,doefunc)
elseif (doSHLSR1) then
TRBs = calc_LSR1_Bk_mult_v(LBFGSs(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSy(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSl,LBFGSd,s,pr,doefunc)
elseif ( doSHFD) then
TRBs = smartmatmul(FDHess,s,doefunc)
end if
TRared = calcdeltaE(doefunc,f,fcand,local_energy,local_energycand)
TRpred = -( smartdotproduct(g,s,doefunc) + 0.5*(smartdotproduct(s,TRBs,doefunc)) )
TRrho = TRared/TRpred
if (TRrho < 0) then
abortcount = abortcount+1
else
abortcount = 0
end if
if (abortcount >= 15) then
call print(' Extended Minim aborted due to multiple bad trust region models, possibly reached machine precision')
exit
end if
if (TRrho < 0.25) then
TRDelta = 0.25*TRdelta
else if (TRrho > 0.75 .and. abs(sqrt(normsqs) - TRDelta) < 10.0**(-2.0)) then
TRDelta = 2.0*TRDelta
end if
if (TRrho > TReta) then
xold = x
gold = g
x = xcand
f = fcand
local_energy = local_energycand
g = gcand
normsqgrad = smartdotproduct(g,g,doefunc)
call build_precon(pr,am_data)
end if
SR1doupdate = .false.
if (doSHLSR1) then
SR1testvec = g - gold - calc_LSR1_Bk_mult_v(LBFGSs(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSy(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSl,LBFGSd,x-xold,pr,doefunc)
SR1testLHS = abs(smartdotproduct(x-xold,SR1testvec,doefunc))
SR1testRHS = 10.0**(-8.0)*sqrt(Pdotproduct(x-xold,x-xold,pr,doefunc))*sqrt(Pdotproduct(SR1testvec,SR1testvec,pr,doefunc))
if(SR1testLHS >= SR1testRHS) SR1doupdate = .true.
end if
if (doLBFGS .or. doDLLBFGS .or. doSHLBFGS .or. SR1doupdate) then
LBFGSs(1:N,1:(LBFGSm-1)) = LBFGSs(1:N,2:LBFGSm)
LBFGSy(1:N,1:(LBFGSm-1)) = LBFGSy(1:N,2:LBFGSm)
LBFGSdlr(1:(LBFGSm-1),1:(LBFGSm-1)) = LBFGSdlr(2:LBFGSm,2:LBFGSm)
LBFGSs(1:N,LBFGSm) = x - xold
LBFGSy(1:N,LBFGSm) = g - gold
LBFGScount = LBFGScount + 1
n_back = min(LBFGSm,LBFGScount)
do I = 1,n_back
thisind = LBFGSm - I + 1
LBFGSdlr(LBFGSm,thisind) = smartdotproduct(LBFGSs(1:N,LBFGSm),LBFGSy(1:N,thisind),doefunc)
LBFGSdlr(thisind,LBFGSm) = smartdotproduct(LBFGSs(1:N,thisind),LBFGSy(1:N,LBFGSm),doefunc)
if(allocated(LBFGSd)) deallocate(LBFGSd)
if(allocated(LBFGSl)) deallocate(LBFGSl)
allocate(LBFGSd(n_back,n_back))
allocate(LBFGSl(n_back,n_back))
LBFGSd = 0.0
LBFGSl = 0.0
do SR1I = 1,n_back
do SR1J = 1,n_back
if (SR1I == SR1J) LBFGSd(SR1I,SR1J) = LBFGSdlr(LBFGSm-n_back+SR1I,LBFGSm-n_back+SR1J)
if (SR1I > SR1J) LBFGSl(SR1I,SR1J) = LBFGSdlr(LBFGSm-n_back+SR1I,LBFGSm-n_back+SR1J)
end do
end do
end do
end if
if ( normsqgrad < convergence_tol ) then
call print('Extended minim completed with |df|^2 = '// normsqgrad // ' < tolerance = ' // convergence_tol)
! call print(trim(method)//" iter = "//n_iter//" f = "//f// ' |df|^2 = '// normsqgrad// ' max(abs(df)) = '//maxval(abs(g))//' last alpha = '//alpha)
call print(trim(method)//" iter = "//n_iter//" f = "//f// ' |g|^2 = '// normsqgrad // ' |s|^2 = ' //normsqs //' rho = ' // TRrho// ' Delta^2 = '// TRDelta**2)
exit
end if
call print(trim(method)//" iter = "//n_iter//" f = "//f// ' |g|^2 = '// normsqgrad // ' |s|^2 = ' //normsqs //' rho = ' // TRrho// ' Delta^2 = '// TRDelta**2,PRINT_NORMAL)
end if
if (n_iter >= max_steps) then
exit
end if
n_iter = n_iter+1
end do
x_in = x
end function preconminim
function fdhmultiply(x,FDH_rows,FDH_H,FDH_diag)
real(dp) :: x(:), FDH_H(:)
integer :: FDH_rows(:), FDH_diag(:)
real(dp) :: fdhmultiply(size(x))
integer :: N, I, NH, rowind, colind
N = size(x)
NH = size(FDH_H)
fdhmultiply = 0.0
colind = 1
do I = 1,NH
rowind = FDH_rows(I)
if (colind < N) then
if (I == FDH_diag(colind+1)) then
colind = colind+1
end if
end if
fdhmultiply(rowind) = fdhmultiply(rowind) + FDH_H(I)*x(colind)
fdhmultiply(colind) = fdhmultiply(colind) + FDH_H(I)*x(rowind)
end do
end function
function LBFGSdogleg(x,g,pr,Delta,doefunc,n_back,LBFGSs,LBFGSy,LBFGSdlr) result(s)
implicit none
real(dp) :: x(:),g(:)
type(precon_data) :: pr
real(dp) :: Delta
integer :: doefunc
integer :: n_back
real(dp):: LBFGSs(:,:), LBFGSy(:,:), LBFGSdlr(:,:)
real(dp) :: s(size(x))
real(dp) :: deltak,gammak
real(dp) :: sqn(size(x)), ssd(size(x)), LBFGSl(n_back,n_back), LBFGSr(n_back,n_back), LBFGSrinv(n_back,n_back),LBFGSd(n_back,n_back), su(size(x)),Bg(size(x))
integer :: N, INFO, IPIV(n_back), I, J, LBFGSm
real(dp) :: WORK(n_back), sunorm, sqnnorm,a,b,c,tau, Pg(size(x))
N = size(x)
deltak = pr%energy_scale
gammak = 1.0/deltak
LBFGSm = size(LBFGSs,dim=2)
LBFGSr = 0.0
LBFGSd = 0.0
LBFGSl = 0.0
LBFGSrinv = 0.0
do I = 1,n_back
do J = 1,n_back
if (I <= J) LBFGSr(I,J) = LBFGSdlr(LBFGSm-n_back+I,LBFGSm-n_back+J)
if (I == J) LBFGSd(I,J) = LBFGSdlr(LBFGSm-n_back+I,LBFGSm-n_back+J)
if (I > J) LBFGSl(I,J) = LBFGSdlr(LBFGSm-n_back+I,LBFGSm-n_back+J)
end do
end do
Bg = calc_LBFGS_Bk_mult_v(LBFGSs(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSy(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSl,LBFGSd,g,pr,doefunc)
su = -(smartdotproduct(g,g,doefunc)/smartdotproduct(g,Bg,doefunc))*g
sunorm = sqrt(Pdotproduct(su,su,pr,doefunc))
if (sunorm >= Delta) then
s = su*Delta/sunorm
else
LBFGSrinv = LBFGSr
if (n_back >= 1) then
call dgetrf(n_back,n_back,LBFGSrinv,n_back,IPIV,INFO)
call dgetri(n_back,LBFGSrinv,n_back,IPIV,WORK,n_back,INFO)
end if
sqn = -calc_LBFGS_Hk_mult_v(LBFGSs(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSy(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSd,LBFGSrinv,gammak,g,pr,doefunc)
sqnnorm = sqrt(Pdotproduct(sqn,sqn,pr,doefunc))
if (sqnnorm <= Delta) then
s = sqn
else
a = Pdotproduct(sqn-su,sqn-su,pr,doefunc)
b = 2.0*Pdotproduct(su,sqn-su,pr,doefunc)
c = Pdotproduct(su,su,pr,doefunc) - Delta**2.0
tau = (-b + sqrt(b**2.0 -4.0*a*c))/(2.0*a)
s = su + tau*(sqn-su)
end if
end if
!call exit()
end function
function steihaug(x,g,pr,Delta,doefunc,n_back,LBFGSs,LBFGSy,LBFGSdlr,doSR1,doBFGS,doFD,FDHess) result(s)
implicit none
real(dp) :: x(:),g(:)
type(precon_data) :: pr
real(dp) :: Delta
integer :: doefunc
integer :: n_back
real(dp):: LBFGSs(:,:), LBFGSy(:,:), LBFGSdlr(:,:)
logical, optional :: doSR1,doBFGS,doFD
real(dp), optional :: FDHess(:,:)
real(dp) :: s(size(x))
real(dp) :: a,b,c,tau
real(dp) :: alpn,alpd,betn,alp,bet,normzcand,normr
integer :: thisind,I,J,K,N,thisind2
integer :: LBFGSm
real(dp) :: LBFGSd(n_back,n_back), LBFGSl(n_back,n_back)
real(dp) :: eps = 10.0**(-3)
real(dp) :: d(size(x)), Bd(size(x)), r(size(x)), z(size(x))
real(dp) :: rtilde(size(x))
real(dp) :: zcand(size(x))
real(dp) :: LBFGSbufinterior(size(x))
real(dp) :: deltak
logical :: first_cg
integer :: cg_iter_count
LBFGSm = size(LBFGSs,dim=2)
N = size(x)
first_cg = .true.
deltak=pr%energy_scale
!Extract submatrices of S^T*Y
LBFGSd = 0.0_dp
LBFGSl = 0.0_dp
do I = 1,n_back
do J = 1,n_back
if (I == J) LBFGSd(I,J) = LBFGSdlr(LBFGSm-n_back+I,LBFGSm-n_back+J)
if (I > J) LBFGSl(I,J) = LBFGSdlr(LBFGSm-n_back+I,LBFGSm-n_back+J)
end do
end do
!Main Steihaug loop
z = 0.0_dp
r = -g
rtilde = apply_precon_gs(r,pr,doefunc,force_k=10)
d = rtilde
do
if(present(doSR1)) then
Bd = calc_LSR1_Bk_mult_v(LBFGSs(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSy(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSl,LBFGSd,d,pr,doefunc)
elseif(present(doBFGS)) then
Bd = calc_LBFGS_Bk_mult_v(LBFGSs(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSy(1:N,(LBFGSm-n_back+1):LBFGSm),LBFGSl,LBFGSd,d,pr,doefunc)
elseif(present(doFD)) then
BD = smartmatmul(FDhess,d,doefunc)
end if
alpd = smartdotproduct(d,Bd,doefunc)
if (alpd <= 0.0) then
a = Pdotproduct(d,d,pr,doefunc)
b = 2.0*Pdotproduct(z,d,pr,doefunc)
c = Pdotproduct(z,z,pr,doefunc) - Delta**2.0
tau = (-b + sqrt(b**2.0 -4.0*a*c))/(2.0*a)
s = z + tau*d
exit
end if
alpn = smartdotproduct(r,rtilde,doefunc)
alp = alpn/alpd
zcand = z + alp*d
normzcand = sqrt(Pdotproduct(zcand,zcand,pr,doefunc))
if (normzcand >= Delta) then
a = Pdotproduct(d,d,pr,doefunc)
b = 2.0*Pdotproduct(z,d,pr,doefunc)
c = Pdotproduct(z,z,pr,doefunc) - Delta**2.0
tau = (-b + sqrt(b**2.0 -4.0*a*c))/(2.0*a)
s = z + tau*d
exit
end if
z = zcand
r = r - alp*Bd
normr = sqrt(Pdotproduct(r,r,pr,doefunc))
if (normr < eps) then
s = z
exit
endif
rtilde = apply_precon_gs(r,pr,doefunc,force_k=20)
betn = smartdotproduct(r,rtilde,doefunc)
bet = betn/alpn
d = rtilde + bet*d
end do
end function
function Pdotproduct(v1,v2,pr,doefunc)
implicit none
real(dp) :: v1(:),v2(:)
type(precon_data) :: pr
integer :: doefunc
real(dp) :: Pdotproduct
real(dp) :: Pv(size(v2))
Pv = do_mat_mult_vec(pr,v2,doefunc)
Pdotproduct = smartdotproduct(v1,Pv,doefunc)
end function
function calc_LBFGS_Bk_mult_v(LBFGSs,LBFGSy,LBFGSl,LBFGSd,v,pr,doefunc) result(Bkv)
implicit none
real(dp) :: LBFGSs(:,:), LBFGSy(:,:), LBFGSl(:,:), LBFGSd(:,:), v(:)
type(precon_data) :: pr
integer :: doefunc
real(dp) :: Bkv(size(v))
!
integer :: n_back
integer :: INFO
integer, allocatable :: IPIV(:)
real(dp), allocatable :: midmat(:,:), midvec(:)
n_back = size(LBFGSs,dim=2)
Bkv = do_mat_mult_vec(pr,v,doefunc)
if (n_back>=1) then
allocate(IPIV(n_back*2))
allocate(midmat(2*n_back,2*n_back),midvec(2*n_back))
midvec(1:n_back) = smartmatmul(transpose(LBFGSs),do_mat_mult_vec(pr,v,doefunc),doefunc)
midvec((n_back+1):) = smartmatmul(transpose(LBFGSy),v,doefunc)
midmat(1:n_back,1:n_back) = smartmatmul(transpose(LBFGSs),do_mat_mult_vecs(pr,LBFGSs,doefunc),doefunc)
midmat((n_back+1):,1:n_back) = LBFGSl
midmat(1:n_back,(n_back+1):) = transpose(LBFGSl)
midmat((n_back+1):,(n_back+1):) = -LBFGSd
call dgesv(n_back*2,1,midmat,n_back*2,IPIV,midvec,n_back*2,INFO)
Bkv = Bkv - do_mat_mult_vec(pr,smartmatmul(LBFGSs,midvec(1:n_back),doefunc),doefunc) - smartmatmul(LBFGSy,midvec((n_back+1):),doefunc)
end if
end function
function calc_LBFGS_Hk_mult_v(LBFGSs,LBFGSy,LBFGSd,LBFGSrinv,gammak,v,pr,doefunc) result(Hkv)
implicit none
! real(dp) :: LBFGSs(:,:), LBFGSy(:,:), LBFGSd(:,:), LBFGSrinv(:,:), gammak, v(:)
! real(dp) :: Hkv(size(v))
! type(precon_data) :: pr
! logical :: doefunc(:)
!
! integer :: n_back
! integer :: INFO
! real(dp), allocatable :: midmat(:,:), midvec(:)
!
! n_back = size(LBFGSs,dim=2)
! Hkv = apply_precon_gs(v,pr,doefunc,force_k=20)
! if (n_back>=1) then
! allocate(midmat(2*n_back,2*n_back),midvec(2*n_back))
! midvec(1:n_back) = smartmatmul(transpose(LBFGSs),v,doefunc)
! midvec((n_back+1):2*n_back) = smartmatmul(transpose(LBFGSy),apply_precon_gs(v,pr,doefunc,force_k=20),doefunc)
!
! midmat = 0.0
! midmat(1:n_back,1:n_back) = smartmatmul(transpose(LBFGSrinv),smartmatmul(LBFGSd + smartmatmul(transpose(LBFGSy),apply_precon_vecs(LBFGSy,pr,doefunc),doefunc),LBFGSrinv,doefunc),doefunc)
! midmat((n_back+1):,1:n_back) = -LBFGSrinv
! midmat(1:n_back,(n_back+1):) = -transpose(LBFGSrinv)
!
! midvec = smartmatmul(midmat,midvec,doefunc)
!
! Hkv = Hkv + smartmatmul(LBFGSs,midvec(1:n_back),doefunc) + apply_precon_gs(smartmatmul(LBFGSy,midvec((n_back+1):),doefunc),pr,doefunc,force_k=20)
! end if
real(dp) :: LBFGSs(:,:), LBFGSy(:,:), LBFGSd(:,:), LBFGSrinv(:,:), gammak, v(:)
real(dp) :: Hkv(size(v))
type(precon_data) :: pr
integer :: doefunc
integer :: I,J,n_back,thisind,N
real(dp), allocatable :: LBFGSq(:),LBFGSz(:),LBFGSalp(:),LBFGSbet(:),LBFGSrho(:)
N = size(LBFGSs,dim=1)
n_back = size(LBFGSs,dim=2)
allocate(LBFGSq(N),LBFGSz(N),LBFGSbet(n_back),LBFGSalp(n_back),LBFGSrho(n_back))
do I =1,n_back
LBFGSrho(I) = 1.0/smartdotproduct(LBFGSs(1:N,I),LBFGSy(1:N,I),doefunc)
end do
LBFGSq = v
do I = 1,n_back
thisind = n_back - I + 1
LBFGSalp(thisind) = LBFGSrho(thisind)*smartdotproduct(LBFGSs(1:N,thisind),LBFGSq,doefunc)
LBFGSq = LBFGSq - LBFGSalp(thisind)*LBFGSy(1:N,thisind)
end do
LBFGSz = apply_precon(LBFGSq,pr,doefunc)
do I = 1,n_back
thisind = I
LBFGSbet(thisind) = LBFGSrho(thisind)*smartdotproduct(LBFGSy(1:N,thisind),LBFGSz,doefunc)
LBFGSz = LBFGSz + LBFGSs(1:N,thisind)*(LBFGSalp(thisind) - LBFGSbet(thisind))
end do
Hkv = LBFGSz
end function
function calc_LSR1_Bk_mult_v(LBFGSs,LBFGSy,LBFGSd,LBFGSl,v,pr,doefunc) result(Bkv)
implicit none
real(dp) :: LBFGSs(:,:), LBFGSy(:,:), LBFGSd(:,:), LBFGSl(:,:), v(:)
type(precon_data) :: pr
integer :: doefunc
real(dp) :: Bkv(size(v))
integer :: n_back, INFO
real(dp), allocatable :: midmat(:,:), midvec(:)
integer, allocatable :: IPIV(:)
n_back = size(LBFGSs,dim=2)
Bkv = do_mat_mult_vec(pr,v,doefunc)
if (n_back >= 1) then
allocate(IPIV(n_back))
allocate(midmat(n_back,n_back),midvec(n_back))
midvec = smartmatmul(transpose(LBFGSy - do_mat_mult_vecs(pr,LBFGSs,doefunc)),v,doefunc)
midmat = LBFGSd + LBFGSl + transpose(LBFGSl) - smartmatmul(transpose(LBFGSs),do_mat_mult_vecs(pr,LBFGSs,doefunc),doefunc)
call dgesv(n_back,1,midmat,n_back,IPIV,midvec,n_back,INFO)
Bkv = Bkv + smartmatmul(LBFGSy-do_mat_mult_vecs(pr,LBFGSs,doefunc),midvec,doefunc)
end if
end function
function calc_amax(g,pr,doefunc,infoverride)
implicit none
real(dp) :: g(:)
type(precon_data) :: pr
real(dp) :: calc_amax
real(dp), optional :: infoverride
integer :: doefunc
real(dp) :: P_amax, inf_amax
real(dp) :: infcoeff = 0.5
if (present(infoverride)) then
infcoeff = infoverride
end if
P_amax = pr%length_scale/sqrt(pdotproduct(g,g,pr,doefunc))
inf_amax = infcoeff/maxval(abs(g))
calc_amax = min(P_amax,inf_amax)
end function
!basic linear backtracking linesearch, relies on changing initial alpha to increase step size
function linesearch_basic(x,s,f,g,local_energy,alpha,func,doefunc,data,d0,n_iter_final,amaxin)
implicit none
real(dp) :: x(:)
real(dp) :: s(:)
real(dp), intent(inout) :: f
real(dp), intent(inout) :: g(:)
real(dp), intent(inout) :: local_energy(:)
real(dp) :: alpha
INTERFACE
function func(x,data,local_energy,gradient)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp), intent(inout),optional :: local_energy(:)
real(dp), intent(inout),optional :: gradient(:)
real(dp)::func
end function func
end INTERFACE
integer :: doefunc
character(len=1)::data(:)
real(dp) :: linesearch_basic
integer, optional, intent(out) :: n_iter_final
real(dp) , optional :: amaxin
integer,parameter :: ls_it_max = 1000
real(dp),parameter :: C = 10.0_dp**(-4.0)
integer :: ls_it
real(dp) :: f1, f0, d0, g1(size(g))
real(dp) :: amax
real(dp) :: local_energy0(size(local_energy)),local_energy1(size(local_energy))
real(dp) :: deltaE,deltaE2
alpha = alpha*4.0
if (present(amaxin)) then
amax = amaxin
else
amax = 4.1*alpha
end if
if(alpha>amax) alpha = amax
f0 = f
local_energy0 = local_energy
ls_it = 0
do
!f1 = func_wrapper(func,x+alpha*s,data,doefunc=doefunc)
call system_timer("preconminim/linesearch_basic/func")
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
f1 = func_wrapper(func,x+alpha*s,data,local_energy1,g1,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
call system_timer("preconminim/linesearch_basic/func")
call print("linesearch_basic loop "//" iter = "//ls_it//" f = "//f1// ' |g|^2 = '// normsq(g1)//' last alpha = '//alpha)
deltaE = calcdeltaE(doefunc,f1,f0,local_energy1,local_energy0)
!call print(deltaE)
ls_it = ls_it + 1
if ( deltaE < C*alpha*d0) then
exit
end if
if(ls_it>ls_it_max) then
exit
end if
alpha = alpha/4.0_dp
if (alpha <1.0e-15) then
exit
end if
end do
call print("linesearch_basic returning "//" iter = "//ls_it//" f = "//f1// ' |g|^2 = '// normsq(g1)//' last alpha = '//alpha)
linesearch_basic = alpha
f = f1
g = g1
local_energy = local_energy1
!linesearch_basic = 15.0
if(present(n_iter_final)) n_iter_final = ls_it
end function
! Backtracking linesearch with cubic min
function linesearch_basic_pp(x,s,f,alpha,func,doefunc,data,d0,n_iter_final,amaxin)
implicit none
real(dp) :: x(:)
real(dp) :: s(:)
real(dp) :: f
real(dp) :: alpha
INTERFACE
function func(x,data,local_energy,gradient)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::func
real(dp), intent(inout),optional :: local_energy(:)
real(dp), intent(inout),optional :: gradient(:)
end function func
end INTERFACE
integer, intent(in) :: doefunc
character(len=1)::data(:)
real(dp) :: linesearch_basic_pp
integer, optional, intent(out) :: n_iter_final
real(dp), optional :: amaxin
integer,parameter :: ls_it_max = 1000
real(dp),parameter :: C = 10.0_dp**(-4.0)
integer :: ls_it
real(dp) :: f1, f0, d0, d1, a1, acand
real(dp) :: g1(size(x))
real(dp) :: amax
real(dp) :: deltaE
!real(dp) :: local_energy0(size(local_energy)),local_energy1(size(local_energy))
alpha = alpha*4.0
if (present(amaxin)) then
amax = amaxin
if(alpha>amax) alpha = amax
else
amax = 100.0_dp*alpha
end if
a1 = alpha
f0 = f
ls_it = 1
do
!f1 = func_wrapper(func,x+alpha*s,data,doefunc=doefunc)
call system_timer("preconminim/linesearch_basic_pp/func")
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
f1 = func_wrapper(func,x+a1*s,data,gradient=g1,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
call system_timer("preconminim/linesearch_basic_pp/func")
call print("linesearch_basic_pp loop "//" iter = "//ls_it//" f = "//f1// ' |g|^2 = '// normsq(g1)//' last alpha = '//a1)
d1 = dot_product(g1,s)
!call print(alpha)
if ( f1-f0 < C*a1*d0) then
exit
end if
if(ls_it>ls_it_max) then
exit
end if
ls_it = ls_it + 1
acand = cubic_min(0.0_dp,f0,d0,a1,f1,d1)
!acand = quad_min(0.0_dp,a1,f0,f1,d0)
!call print(a1 //' '// f0//' ' //f1// ' ' //d0)
!call print('a1=' // a1 // ' acand=' // acand)
!if ( acand == 0.0_dp) acand = a1/4.0_dp
acand = max(0.1*a1, min(acand,0.8*a1) )
a1 = acand
!call print(a1)
end do
call print("linesearch_basic_pp returning "//" iter = "//ls_it//" f = "//f1// ' |g|^2 = N/A '//' last alpha = '//a1)
linesearch_basic_pp = a1
f = f1
if(present(n_iter_final)) n_iter_final = ls_it
end function
! standard two stage lineseach from N&W
function linesearch_standard(x,s,f,g,local_energy,alpha,func,doefunc,data,d,n_iter_final,amaxin)
implicit none
real(dp) :: x(:)
real(dp) :: s(:)
real(dp), intent(inout) :: f
real(dp), intent(inout) :: g(:)
real(dp), intent(inout) :: local_energy(:)
real(dp) :: alpha
INTERFACE
function func(x,data,local_energy,gradient)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp), intent(inout),optional :: local_energy(:)
real(dp), intent(inout),optional :: gradient(:)
real(dp)::func
end function func
end INTERFACE
integer :: doefunc
character(len=1)::data(:)
real(dp) :: d
real(dp) :: linesearch_standard
integer, optional, intent(out) :: n_iter_final
real(dp), optional :: amaxin
integer, parameter :: ls_it_max = 20
real(dp), parameter :: C1 = 10.0_dp**(-4.0)
real(dp), parameter :: C2 = 0.9
real(dp) :: amax
real(dp), parameter :: amin = 10.0_dp**(-5.0)
real(dp) :: f0, f1, ft, a0, a1, a2, at, d0, d1, dt
real(dp) :: flo, fhi, alo, ahi, dlo, dhi
integer :: ls_it
real(dp) :: g1(size(x)), gt(size(x))
logical :: dozoom = .FALSE.
real (dp) :: deltaE,deltaE0, deltaET, deltaETlo
real(dp) :: local_energy0(size(local_energy)),local_energy1(size(local_energy)),local_energyT(size(local_energy)),local_energylo(size(local_energy)),local_energyhi(size(local_energy))
a0 = 0.0_dp
f0 = f
local_energy0 = local_energy
d0 = d
a1 = alpha
if (present(amaxin)) then
amax = amaxin
if(a1>amax) a1 = amax
else
amax = 100.0_dp*alpha
end if
!begin bracketing
ls_it = 0
do
call system_timer("preconminim/linesearch_standard/func")
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
f1 = func_wrapper(func,x+a1*s,data,local_energy1,g1,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
call system_timer("preconminim/linesearch_standard/func")
ls_it = ls_it + 1
call print("linesearch_standard bracket "//" iter = "//ls_it//" f = "//f1// ' |g|^2 = '// normsq(g1)//' last alpha = '//a1)
d1 = smartdotproduct(s,g1,doefunc)
deltaE = calcdeltaE(doefunc,f1,f,local_energy1,local_energy)
deltaE0 = calcdeltaE(doefunc,f1,f0,local_energy1,local_energy0)
if ( deltaE > C1*a1*d .OR. (deltaE0 >= 0.0 .AND. ls_it > 1 )) then
dozoom = .TRUE.
alo = a0
ahi = a1
flo = f0
local_energylo = local_energy0
fhi = f1
local_energyhi = local_energy1
dlo = d0
dhi = d1
exit
end if
if ( abs(d1) <= C2*abs(d) ) then
dozoom = .FALSE.
exit
end if
if ( d1 >= 0.0_dp) then
alo = a1
ahi = a0
flo = f1
local_energylo = local_energy1
fhi = f0
local_energyhi = local_energy0
dlo = d1
dhi = d0
exit
end if
if(ls_it>ls_it_max) then
call print('linesearch_standard Ran out of line search iterations in phase 1')
a1 = linesearch_basic(x,s,f,g,local_energy,alpha,func,doefunc,data,d,n_iter_final,amax)
! *1 quantities will be copied into function arguments for return at end
f1 = f
g1 = g
local_energy1 = local_energy
n_iter_final = n_iter_final + ls_it
dozoom = .FALSE.
exit
end if
if(a1 >= amax) then
call print('linesearch_standard Bracketing failed to find an interval, reverting to basic linesearch')
a1 = linesearch_basic(x,s,f,g,local_energy,alpha,func,doefunc,data,d,n_iter_final,amax)
! *1 quantities will be copied into function arguments for return at end
f1 = f
g1 = g
local_energy1 = local_energy
n_iter_final = n_iter_final + ls_it
dozoom = .FALSE.
exit
end if
a2 = min(a1 + 4.0*(a1-a0),amax)
a0 = a1
a1 = a2
f0 = f1
local_energy0 = local_energy1
d0 = d1
!call print(a1)
end do
if ( dozoom ) then
!ls_it = ls_it+1
do
at = cubic_min(alo,flo,dlo,ahi,fhi,dhi)
call system_timer("preconminim/linesearch_standard/func")
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
ft = func_wrapper(func,x+at*s,data,local_energyT,gt,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
call system_timer("preconminim/linesearch_standard/func")
ls_it = ls_it + 1
call print("linesearch_standard zoom "//" iter = "//ls_it//" f = "//ft// ' |g|^2 = '// normsq(gt)//' last alpha = '//at)
deltaET = calcdeltaE(doefunc,ft,f0,local_energyT,local_energy0)
deltaETlo = calcdeltaE(doefunc,ft,flo,local_energyT,local_energylo)
if ( deltaET > C1*at*d .OR. deltaETlo >= 0.0) then
ahi = at
fhi = ft
local_energyhi = local_energyT
else
dt = dot_product(gt,s)
if ( abs(dt) <= C2*abs(d) ) then
a1 = at
f1 = ft
g1 = gt
local_energy1 = local_energyT
exit
end if
if ( dt*(ahi-alo) >= 0.0 ) then
ahi = alo
fhi = flo
local_energyhi = local_energylo
end if
alo = at
flo = ft
local_energylo = local_energyT
end if
if (abs(ahi - alo) < amin) then
call print('Bracket got small without satisfying curvature condition')
deltaET = calcdeltaE(doefunc,ft,f,local_energyT,local_energy)
if ( deltaET < C1*at*d) then
call print('Bracket lowpoint satisfies sufficient decrease, using that')
a1 = at
f1 = ft
g1 = gt
exit
else
call print('Bracket lowpoint no good, doing a step of basic linesearch with original initial inputs')
a1 = linesearch_basic(x,s,f,g,local_energy,alpha,func,doefunc,data,d,n_iter_final,amax)
f1 = ft
g1 = gt
n_iter_final = n_iter_final + ls_it
exit
end if
end if
if(ls_it>ls_it_max) then
call print('Ran out of line search iterations in phase 2')
a1 = linesearch_basic(x,s,f,g,local_energy,alpha,func,doefunc,data,d,n_iter_final,amax)
f1 = f
g1 = g
local_energy1 = local_energy
n_iter_final = n_iter_final + ls_it
exit
end if
end do ! zoom loop
end if
call print("linesearch_standard returning "//" iter = "//ls_it//" f = "//f1// ' |g|^2 = '// normsq(g1)//' last alpha = '//a1)
!call print('boo ' // ls_it)
n_iter_final = ls_it
linesearch_standard = a1
f = f1
local_energy = local_energy1
g = g1
end function linesearch_standard
function linesearch_morethuente(x,s,finit,local_energy,alpha,func,doefunc,data,d,n_iter_final,amaxin)
implicit none
real(dp) :: x(:)
real(dp) :: s(:)
real(dp), intent(inout) :: finit
real(dp), intent(inout) :: local_energy(:)
real(dp) :: alpha
INTERFACE
function func(x,data,local_energy,gradient)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp), intent(inout),optional :: local_energy(:)
real(dp), intent(inout),optional :: gradient(:)
real(dp)::func
end function func
end INTERFACE
integer :: doefunc
character(len=1)::data(:)
real(dp) :: d
real(dp) :: linesearch_morethuente
integer, optional, intent(out) :: n_iter_final
real(dp), optional :: amaxin
integer, parameter :: ls_it_max = 20
real(dp), parameter :: ftol = 10.0_dp**(-4.0) ! sufficient decrease
real(dp), parameter :: gtol = 0.9 ! curvature
real(dp), parameter :: xtol = 10.0_dp**(-5.0) ! bracket size
real(dp) :: amax
real(dp), parameter :: amin = 10.0_dp**(-10.0)
logical :: brackt
integer :: nfev,stage
real(dp) :: f, g, ftest, fm, fx, fxm, fy, fym, ginit, gtest, gm, gx, gxm, gy, gym, stx, sty, stmin, stmax, width, width1,stp
real(dp) :: f1,g1(size(x)),local_energy1(size(x))
integer :: ls_it
real(dp), parameter :: xtrapl = 1.1_dp, xtrapu=4.0_dp, p5 = 0.5_dp, p66 = 0.66_dp
stp = alpha
stpmin = amin
if (present(amaxin)) then
stpmax = amaxin
if(stp>stpmax) stp = stpmax
else
stpmax = 100.0_dp*stp
end if
brackt = .false.
stage = 1
nfev = 0
ginit = d
gtest = ftol*d
width = stpmax-stpmin
width1 = width/p5
stx = 0.0_dp
fx = finit
gx = ginit
sty = 0.0_dp
fy = finit
gy = ginit
stmin = 0.0_dp
stmax = stp + xtrapu*stp
ls_it = 0
do
ftest = finit + stp*gtest
ls_it = ls_it + 1
call system_timer("preconminim/linesearch_morethuente/func")
#ifndef _OPENMP
call verbosity_push_decrement()
#endif
f1 = func_wrapper(func,x+stp*s,data,local_energy1,gradient=g1,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
call system_timer("preconminim/linesearch_morethuente/func")
f = calcE(doefunc,f1,local_energy1)
g = smartdotproduct(g1,s,doefunc)
ftest = finit + stp*gtest
if (stage .eq. 1 .and. f .le. ftest .and. g .ge. 0.0_dp) stage = 2
if (brackt .and. (stp .le. stmin .or. stp .ge. stmax)) then
call print("Rounding errors in linesearch")
exit
end if
if (brackt .and. stmax-stmin .le. xtol*stmax) then
call print("Bracket too small")
exit
end if
if (stp .eq. stpmax .and. f .le. ftest .and. g .le. gtest) then
call print("Maximum stepsize reached")
exit
end if
if (stp .eq. stpmin .and. (f .gt. ftest .or. g .ge. gtest)) then
call print("Minimum stepsize reached")
exit
end if
if (f .le. ftest .and. abs(g) .le. gtol*(-ginit)) exit
if (stage .eq. 1 .and. f .le. fx .and. f .gt. ftest) then
fm = f - stp*gtest
fxm = fx - stx*gtest
fym = fy - sty*gtest
gm = g - gtest
gxm = gx - gtest
gym = gy - gtest
call cstep(stx,fxm,gxm,sty,fym,gym,stp,fm,gm,brackt,stmin,stmax)
fx = fxm + stx*gtest
fy = fym + sty*gtest
gx = gxm + gtest
gy = gym + gtest
else
call cstep(stx,fx,gx,sty,fy,gy,stp,f,g,brackt,stmin,stmax)
end if
if (brackt) then
if (abs(sty-stx) .ge. p66*width1) stp = stx + p5*(sty-stx)
width1 = width
width = abs(sty-stx)
else
stmin = stp + xtrapl*(stp-stx)
stmax = stp + xtrapu*(stp-stx)
end if
stp = max(stp,stpmin)
stp = min(stp,stpmax)
if (brackt .and. (stp .le. stmin .or. stp .ge. stmax) .or. (brackt .and. stmax-stmin .le. xtol*stmax)) stp = stx
call print(stx// ' '//sty// ' '//stp)
end do
finit = f
linesearch_morethuente = stp
end function
subroutine cstep(stx,fx,dx,sty,fy,dy,stp,fp,dpp,brackt,stpmin,stpmax)
implicit none
real(dp),intent(inout) :: stx,fx,dx,sty,fy,dy,stp,fp,dpp
logical, intent(inout) :: brackt
real(dp), intent(in) :: stpmin,stpmax
integer :: info
real(dp), parameter :: p66 = 0.66
logical :: bound
real(dp) :: sgnd,theta,s,gamm,p,q,r,stpc,stpq,stpf
info = 0
if(brackt .and. (stp<=min(stx,sty) .or. stp>=max(stx,sty))) then
call print("cstep received mixed information about the bracketing")
return
end if
if (stpmax<stpmin) then
call print("cstep received strange information about min/max step sizes")
return
end if
if ( dx*(stp-stx)>=0.0) then
call print("cstep didn't receive a descent direction")
!return
end if
if (fp > fx) then
info = 1
bound = .true.
theta = 3.0*(fx - fp)/(stp - stx) + dx + dpp
s = max(abs(theta),abs(dx),abs(dpp))
gamm = s*sqrt((theta/s)**2.0 - (dx/s)*(dpp/s))
if (stp .lt. stx) then
gamm = -gamm
end if
p = (gamm - dx) + theta
q = ((gamm - dx) + gamm) + dp
r = p/q
stpc = stx + r*(stp-stx)
stpq = stx + ((dx/((fx-fp)/(stp-stx)+dx))/2.0)*(stp-stx)
if( abs(stpc-stx) .lt. abs(stpq-stx)) then
stpf = stpc
else
stpf = stpc + (stpq-stpc)/2.0
end if
brackt = .true.
elseif (sgnd<0.0) then
info = 2
bound = .false.
theta = 3.0*(fx - fp)/(stp - stx) + dx + dpp
s = max( abs(theta),abs(dx),abs(dpp))
gamm = s*sqrt((theta/s)**2.0 - (dx/s)*(dpp/s))
if (stp .gt. stx) then
gamm = -gamm
end if
p = (gamm - dpp) + theta
q = ((gamm - dpp) + gamm) + dx
r = p/q
stpc = stp + r*(stx-stp)
stpq = stp + (dpp/(dpp-dx))*(stx-stp)
if( abs(stpc-stp) .gt. abs(stpq-stp)) then
stpf = stpc
else
stpf = stpq
end if
brackt = .true.
elseif (abs(dpp) .lt. abs(dx))then
info = 3
bound = .true.
theta = 3.0*(fx - fp)/(stp - stx) + dx + dpp
s = max(abs(theta),abs(dx),abs(dpp))
gamm = s*sqrt(max(0.0,(theta/s)**2.0 - (dx/s)*(dpp/s)))
if (stp .gt. stx) then
gamm = -gamm
end if
p = (gamm - dpp) + theta
q = (gamm + (dx-dpp)) + gamm
r = p/q
if(r .lt. 0.0 .and. gamm .ne. 0.0_dp) then
stpc = stp + r*(stx-stp)
elseif (stp > stx)then
stpc = stpmax
else
stpc = stpmin
end if
stpq = stp + (dpp/(dpp-dx))*(stx-stp)
if (brackt) then
if(abs(stpc-stp) .lt. abs(stpq-stp))then
stpf = stpc
else
stpf = stpq
end if
if(stp .gt. stx) then
stpf = min(stp+p66*(sty-stp),stpf)
else
stpf = max(stp+p66*(sty-stp),stpf)
end if
else
if(abs(stpc-stp) .gt. abs(stpq-stp)) then
stpf = stpc
else
stpf = stpq
end if
stpf = min(stpmax,stpf)
stpf = max(stpmin,stpf)
end if
else
info = 4
bound = .false.
if (brackt) then
theta = 3.0*(fp-fy)/(sty - stp) + dy + dpp
s = max(abs(theta),abs(dy),abs(dpp))
gamm = s*sqrt((theta/s)**2.0 - (dy/s)*(dpp/s))
if (stp .gt. sty ) then
gamm = -gamm
end if
p = (gamm - dpp) + theta
q = ((gamm - dpp) + gamm) + dy
r = p/q
stpc = stp + r*(sty-stp)
stpf = stpc
elseif(stp.gt.stx)then
stpf = stpmax
else
stpf = stpmin
end if
end if
if (fp .gt. fx) then
sty = stp
fy = fp
dy = dpp
else
if (sgnd .lt. 0.0) then
sty = stx
fy = fx
dy = dx
end if
stx = stp
fx = fp
dx = dpp
end if
stpf = min(stpmax,stpf)
stpf = max(stpmin,stpf)
stp = stpf
end subroutine
function cubic_min(a0,f0,d0,a1,f1,d1)
implicit none
real(dp) :: a0,f0,d0,a1,f1,d1
real(dp) :: cubic_min
real(dp) :: h1,h2
h1 = d0 + d1 - 3.0_dp*(f0-f1)/(a0-a1)
if ( h1**2.0 - d0*d1 <= 10.0**(-10.0)*abs(a1-a0) ) then
!call print ('Line search routine cubic_min failed, crit1, will do a linear backtracking (linminroutine=basic) step')
cubic_min = (a0 + a1)/2.0_dp
else
h2 = sign(1.0_dp,a1-a0)*sqrt(h1**2.0 - d0*d1)
if ( abs(d1-d0+2.0*h2) <= 10.0**(-8.0)*abs(d1+h2-h1) ) then
!call print ('cubic min failed, crit2, will do a linear backtracking (linminroutine=basic) step')
cubic_min = (a0 + a1)/2.0_dp
else
cubic_min = a1 - (a1-a0)*((d1+h2-h1)/(d1-d0+2.0*h2))
end if
end if
end function cubic_min
function quad_min(a1,a2,f1,f2,g1)
implicit none
real(dp) :: a1, a2, f1, f2, g1
real(dp) :: quad_min
real(dp) :: ama2, x, y
ama2 = (a1-a2)**2.0
x = (f2 - f1 + a1*g1 - a2*g1)/ama2
y = (2.0_dp*a1*(f1 - f2) - g1*ama2)/ama2
quad_min = y/(2.0_dp*x)
end function quad_min
!function to choose initial guess of steplength
function init_alpha(alpvec,dirderivvec,n_iter)
implicit none
real(dp) :: alpvec(:)
real(dp) :: dirderivvec(:)
integer :: n_iter
integer :: touse,I
real(dp) :: init_alpha
if (n_iter > 1) then
touse = min(n_iter-1,5)
init_alpha = 0.0
do I =1,touse
init_alpha = init_alpha+alpvec(n_iter-I)*dirderivvec(n_iter-I)/dirderivvec(n_iter-I+1)
end do
init_alpha = init_alpha/touse
else
init_alpha = 0.01_dp
end if
end function
recursive function apply_precon(g,pr,doefunc,init,res2,max_iter,init_k,max_sub_iter,k_out,force_k) result (ap_result)
implicit none
real(dp) :: g(:) !to apply to
type (precon_data) :: pr
integer :: doefunc
real(dp),optional :: init(size(g))
real(dp),optional :: res2
integer,optional :: max_iter
integer,optional :: init_k
integer,optional :: max_sub_iter
integer,optional,intent(out) :: k_out
integer,optional :: force_k
real(dp) :: ap_result(size(g))
integer :: k_out_internal
logical :: do_force_k
real(dp) :: x(size(g))
real(dp) :: r(size(g))
real(dp) :: p(size(g))
real(dp) :: Ap(size(g))
real(dp) :: passx(size(g))
real(dp) :: alpn,alpd,alp,betn,bet,betnold
real(dp) :: my_res2
integer :: my_max_iter, gs, my_max_sub
integer :: k,subk
real(dp),parameter :: betnstop = 10.0_dp**(-10)
real(dp),parameter :: alpdstop = 10.0_dp**(-14)
real(dp),parameter :: alpdsubbstop = 10.0_dp**(-1)
call system_timer("apply_precon")
do_force_k = .false.
if(present(force_k)) then
do_force_k = .true.
end if
subk = 0
k = 0
if ( present(init_k) ) k = init_k
!call print(pr%mat_mult_max_iter)
gs = size(g)
my_res2 = optional_default(pr%res2,res2)
my_max_iter = optional_default(pr%mat_mult_max_iter,max_iter)
my_max_sub = optional_default(pr%max_sub,max_sub_iter)
if ( present(init) ) then
x = init
r = g - do_mat_mult_vec(pr,x,doefunc)
p = r
else
x = 0.0_dp
r = g
p = r
end if
betn = smartdotproduct(r,r,doefunc)
!call print(p)
!call print(pr%preconrowlengths)
!call exit()
do
Ap = do_mat_mult_vec(pr,p,doefunc)
!call print(' ')
!call print(Ap)
alpn = smartdotproduct(r,r,doefunc)
alpd = smartdotproduct(p,Ap,doefunc)
if (alpd <= alpdstop) then
if ( betn < alpdsubbstop) then
call print("Gave up inverting matrix due to lack of precision, result may be of some use though")
ap_result = x
else
call print("Gave up inverting matrix due to lack of precision, result was garbage returning input")
ap_result = g
end if
if(present(k_out)) k_out = k
exit
end if
alp = alpn/alpd
!call print(alp)
x = x + alp*p
r = r - alp*Ap
betnold = betn
betn = smartdotproduct(r,r,doefunc)
!Usual exit condition
if ( ((betn < my_res2 .and. k >= 10) .or. betn < my_res2 .and. betn > betnold) .and. .not. do_force_k ) then
ap_result = x
if(present(k_out)) k_out = k
exit
end if
!Force iteration count search direction
if (do_force_k) then
if(k == force_k) then
ap_result = x
exit
endif
endif
! Safeguard on search direction
if (betn < betnstop ) then
ap_result = x
if(present(k_out)) k_out = k
exit
end if
!CG is not converging
! if (betn>betnold) then
! ap_result= x
! if(present(k_out)) k_out = k
! call print("CG failed to invert the preconditioner and aborted with |r|^2 = " // betn)
! exit
! end if
bet = betn/alpn
p = r + bet*p
k = k+1
subk = subk + 1
if (subk >= my_max_sub) then
!call print("Restarting preconditioner inverter")
passx = x
ap_result = apply_precon(g,pr,doefunc,init=passx,res2=res2,max_iter=max_iter,init_k=k,max_sub_iter=my_max_sub,k_out=k_out_internal)
if (present(k_out)) then
k_out = k_out_internal
end if
exit
end if
if (k >= my_max_iter) then
if ( betn < 10.0**(-1)) then
call print("Gave up inverting preconditioner afer "// k // " iterations of CG, result may be of some use though")
ap_result = x
else
call print("Gave up inverting preconditioner afer "// k // " iterations of CG, result was garbage returning input")
ap_result = g
end if
if(present(k_out)) then
k_out = k
end if
exit
end if
end do
!call print(k)
call system_timer("apply_precon")
end function apply_precon
function apply_precon_gs(b,pr,doefunc,init,res2,max_iter,k_out,force_k) result (ap_result)
real(dp) :: b(:)
type(precon_data) :: pr
integer :: doefunc
real(dp), optional :: init(:), res2
integer, optional :: max_iter, k_out, force_k
real(dp) :: ap_result(size(b))
integer :: my_max_iter, N, I, J, thisind, k
real(dp) :: my_res2, scoeff, r2
real(dp) :: x(size(b)), r(size(b))
integer :: target_elements(3), row_elements(3)
real(dp) :: Y(3),T(3),C(3)
logical :: do_force_k
N = size(b)
my_res2 = optional_default(pr%res2,res2)
my_max_iter = optional_default(pr%mat_mult_max_iter,max_iter)
if ( present(init) ) then
if (size(init) == size(b)) then
x = init
else
call print("init vector of incorrect dimension")
endif
else
x = 0.0_dp
end if
do_force_k = .false.
if(present(force_k)) then
do_force_k = .true.
end if
k=0
x(1:9) = b(1:9)
do
do I = 1,size(pr%preconindices,DIM=2)
C = 0.0
target_elements = (/ I*3-2+9, I*3-1+9, I*3+9 /)
if (pr%multI) then
x(target_elements) = b(target_elements)/pr%preconcoeffs(1,I,1)
end if
!call print(pr%preconcoeffs(1,I,1))
do J = 2,(pr%preconrowlengths(I))
thisind = pr%preconindices(J,I)
row_elements = (/ thisind*3-2+9, thisind*3-1+9, thisind*3+9/)
if (pr%multI) then
scoeff = pr%preconcoeffs(J,I,1)
if(doefunc == E_FUNC_BASIC) then
x(target_elements) = x(target_elements) - scoeff*x(row_elements)/pr%preconcoeffs(1,I,1)
else
Y = -scoeff*x(row_elements)/pr%preconcoeffs(1,I,1) - C
T = x(target_elements) + Y
C = (T - x(target_elements)) - Y
x(target_elements) = T
endif
endif
end do
end do
!call print(x)
!call exit()
k=k+1
r = b - do_mat_mult_vec(pr,x,doefunc)
r2 = smartdotproduct(r,r,doefunc)
!call print(k // ' '// r2)
if(r2**2.0<my_res2 .and. .not. do_force_k) then
if(present(k_out)) then
k_out = k
end if
ap_result = x
exit
end if
if( do_force_k) then
if(k== force_k) then
ap_result = x
!call print(r2)
exit
end if
end if
if (k >= my_max_iter) then
if ( r2 < 10.0**(-1)) then
call print("Gave up inverting preconditioner afer "// k // " iterations of GS, result may be of some use though")
ap_result = x
else
call print("Gave up inverting preconditioner afer "// k // " iterations of GS, result was garbage returning input")
ap_result = b
end if
if(present(k_out)) then
k_out = k
end if
exit
end if
end do
end function
function apply_precon_csi(b,pr,doefunc,init,res2,max_iter,iter_out,force_iter) result (ap_result)
implicit none
real(dp) :: b(:)
type(precon_data) :: pr
integer :: doefunc
real(dp), optional :: init(:), res2
integer, optional :: max_iter, iter_out, force_iter
real(dp) :: ap_result(size(b))
integer :: iter = 1
real(dp) :: ykp1(size(b)), yk(size(b)), ykm1(size(b)), zk(size(b))
real(dp) :: omega, gamm, alpha, beta
alpha = 0.0
do
zk = b - do_mat_mult_vec(pr,yk,doefunc)
gamm = 1.0
ykp1 = omega*(yk - ykm1 + gamm*zk) + ykm1
end do
ap_result = ykp1
end function
function apply_precon_vecs(x,pr,doefunc)
real(dp) :: x(:,:)
type(precon_data) :: pr
integer :: doefunc
real(dp) :: apply_precon_vecs(size(x,dim=1),size(x,dim=2))
integer :: I,M
M = size(x,dim=2)
apply_precon_vecs = 0.0_dp
do I = 1,M
apply_precon_vecs(1:,I) = apply_precon(x(1:,I),pr,doefunc)
end do
end function
function apply_precon_vecs_gs(x,pr,doefunc,force_k)
real(dp) :: x(:,:)
type(precon_data) :: pr
integer :: doefunc
integer :: force_k
real(dp) :: apply_precon_vecs_gs(size(x,dim=1),size(x,dim=2))
integer :: I,M
M = size(x,dim=2)
apply_precon_vecs_gs = 0.0_dp
do I = 1,M
apply_precon_vecs_gs(1:,I) = apply_precon_gs(x(1:,I),pr,doefunc,force_k=force_k)
end do
end function
function do_mat_mult_vecs(pr,x,doefunc)
real(dp) :: x(:,:)
type(precon_data) :: pr
integer :: doefunc
real(dp) :: do_mat_mult_vecs(size(x,dim=1),size(x,dim=2))
integer :: I,M
M = size(x,dim=2)
do_mat_mult_vecs = 0.0_dp
do I = 1,M
do_mat_mult_vecs(1:,I) = do_mat_mult_vec(pr,x(1:,I),doefunc)
end do
end function
function do_mat_mult_vec(pr,x,doefunc)
implicit none
real(dp) :: x(:)
type(precon_data) :: pr
integer :: doefunc
real(dp) :: do_mat_mult_vec(size(x))
integer :: I,J,thisind,K,L
real(dp) :: scoeff
real(dp) :: dcoeffs(6)
integer,dimension(3) :: target_elements, row_elements
real(dp) :: C(3), T(3), Y(3)
do_mat_mult_vec = 0.0_dp
do_mat_mult_vec(1:9) = pr%cell_coeff*x(1:9)
do I = 1,size(pr%preconindices,DIM=2)
!call print(pr%preconindices(1:pr%preconrowlengths(I),I))
target_elements = (/ I*3-2+9, I*3-1+9, I*3+9 /)
C = 0.0_dp
if (pr%preconrowlengths(I) >= 1) then
do J = 1,(pr%preconrowlengths(I))
thisind = pr%preconindices(J,I)
row_elements = (/ thisind*3-2+9, thisind*3-1+9, thisind*3+9/)
!call print(target_elements)
if (pr%multI) then
!call print(target_elements)
!call print(row_elements)
!call print(I // ' ' // thisind)
!call exit()
scoeff = pr%preconcoeffs(J,I,1)
if(doefunc == E_FUNC_BASIC) then
do_mat_mult_vec(target_elements) = do_mat_mult_vec(target_elements) + scoeff*x(row_elements)
else
Y = scoeff*x(row_elements) - C
T = do_mat_mult_vec(target_elements) + Y
C = (T - do_mat_mult_vec(target_elements)) - Y
do_mat_mult_vec(target_elements) = T
endif
elseif(pr%dense) then
!call print(size(pr%preconcoeffs(J,I,1:)))
!call exit()
dcoeffs(1) = pr%preconcoeffs(J,I,1)
dcoeffs(2) = pr%preconcoeffs(J,I,2)
dcoeffs(3) = pr%preconcoeffs(J,I,3)
dcoeffs(4) = pr%preconcoeffs(J,I,4)
dcoeffs(5) = pr%preconcoeffs(J,I,5)
dcoeffs(6) = pr%preconcoeffs(J,I,6)
!dcoeffs(6) =0.0! pr%preconcoeffs(J,I,6)
!call print(dcoeffs)
!call exit()
!call writevec(dcoeffs,'dcoeffs.dat')
!call writevec(do_mat_mult(target_elements),'t1.dat')
!call writevec(x(row_elements),'r1.dat')
do_mat_mult_vec(target_elements(1)) = do_mat_mult_vec(target_elements(1)) + dcoeffs(1)*x(row_elements(1))
do_mat_mult_vec(target_elements(1)) = do_mat_mult_vec(target_elements(1)) + dcoeffs(2)*x(row_elements(2))
do_mat_mult_vec(target_elements(1)) = do_mat_mult_vec(target_elements(1)) + dcoeffs(3)*x(row_elements(3))
do_mat_mult_vec(target_elements(2)) = do_mat_mult_vec(target_elements(2)) + dcoeffs(2)*x(row_elements(1))
do_mat_mult_vec(target_elements(2)) = do_mat_mult_vec(target_elements(2)) + dcoeffs(4)*x(row_elements(2))
do_mat_mult_vec(target_elements(2)) = do_mat_mult_vec(target_elements(2)) + dcoeffs(5)*x(row_elements(3))
do_mat_mult_vec(target_elements(3)) = do_mat_mult_vec(target_elements(3)) + dcoeffs(3)*x(row_elements(1))
do_mat_mult_vec(target_elements(3)) = do_mat_mult_vec(target_elements(3)) + dcoeffs(5)*x(row_elements(2))
do_mat_mult_vec(target_elements(3)) = do_mat_mult_vec(target_elements(3)) + dcoeffs(6)*x(row_elements(3))
!call writevec(do_mat_mult(target_elements),'t2.dat')
!call exit()
end if
end do
else
do_mat_mult_vec(target_elements) = x(target_elements)
end if
end do
end function
function convert_mat_to_dense(pr) result(prout)
type(precon_data) :: pr
type(precon_data) :: prout
logical :: multI = .FALSE.
logical :: diag = .FALSE.
logical :: dense = .FALSE.
integer, allocatable :: preconrowlengths(:)
integer, allocatable :: preconindices(:,:)
real(dp), allocatable :: preconcoeffs(:,:,:)
character(10) :: precon_id
integer :: nneigh,mat_mult_max_iter,max_sub
real(dp) :: energy_scale,length_scale,cutoff,res2
logical :: has_fixed = .FALSE.
integer :: M,N
prout%dense = .true.
prout%precon_id = "genericdense"
prout%nneigh = pr%nneigh
prout%mat_mult_max_iter = pr%mat_mult_max_iter
prout%max_sub = pr%max_sub
prout%energy_scale = pr%energy_scale
prout%length_scale = pr%length_scale
prout%cutoff = pr%cutoff
prout%res2 = pr%res2
prout%has_fixed = pr%has_fixed
M = size(pr%preconrowlengths)
allocate(prout%preconrowlengths(M))
prout%preconrowlengths(1:M) = pr%preconrowlengths(1:M)
M = size(pr%preconindices,dim=1)
N = size(pr%preconindices,dim=2)
allocate(prout%preconindices(M,N))
prout%preconindices(1:M,1:N) = pr%preconindices(1:M,1:N)
M = size(pr%preconcoeffs,dim=1)
N = size(pr%preconcoeffs,dim=2)
allocate(prout%preconcoeffs(M,N,6))
prout%preconcoeffs = 0.0
prout%preconcoeffs(1:M,1:N,1) = pr%preconcoeffs(1:M,1:N,1)
prout%preconcoeffs(1:M,1:N,4) = pr%preconcoeffs(1:M,1:N,1)
prout%preconcoeffs(1:M,1:N,6) = pr%preconcoeffs(1:M,1:N,1)
end function
function calcdeltaE(doefunc,f1,f0,le1,le0)
integer :: doefunc
real(dp) :: f1, f0
real(dp) :: le1(:), le0(:)
real(dp) :: sorted1(size(le1)),sorted0(size(le0))
real(dp) :: calcdeltaE
if (doefunc == E_FUNC_BASIC) then
calcdeltaE = f1 - f0
elseif (doefunc == E_FUNC_KAHAN) then
calcdeltaE = KahanSum(le1 - le0)
elseif (doefunc == E_FUNC_DOUBLEKAHAN) then
sorted1 = qsort(le1-le0)
calcdeltaE = DoubleKahanSum(sorted1)
endif
end function
function calcE(doefunc,f0,le0)
integer :: doefunc
real(dp) :: f0
real(dp) :: le0(:)
real(dp) :: sorted0(size(le0))
real(dp) :: calcE
if (doefunc == E_FUNC_BASIC) then
calcE = f0
elseif (doefunc == E_FUNC_KAHAN) then
calcE = KahanSum(le0)
elseif (doefunc == E_FUNC_DOUBLEKAHAN) then
sorted0 = qsort(le0)
calcE = DoubleKahanSum(sorted0)
endif
end function
function smartdotproduct(v1,v2,doefunc)
integer :: doefunc
real(dp) :: v1(:),v2(:)
real(dp) :: vec(size(v1)),sorted(size(v1))
real(dp) :: smartdotproduct
if(size(v1) .ne. size(v2)) then
call print("Dot Product called with mismatching vector sizes, exiting")
call exit()
end if
vec = v1*v2
if (doefunc == E_FUNC_BASIC) then
smartdotproduct = sum(vec)
elseif (doefunc == E_FUNC_KAHAN) then
smartdotproduct = KahanSum(vec)
elseif (doefunc == E_FUNC_DOUBLEKAHAN) then
sorted = qsort(vec)
smartdotproduct = DoubleKahanSum(sorted)
endif
end function
function smartmatmulmat(m1,m2,doefunc) result(prod)
real(dp) :: m1(:,:), m2(:,:)
integer :: doefunc
real(dp) :: prod(size(m1,dim=1),size(m2,dim=2))
integer :: I,J,M,N
M = size(m1,dim=1)
N = size(m2,dim=2)
do I = 1,M
do J = 1,N
prod(I,J) = smartdotproduct(m1(I,1:),m2(1:,J),doefunc)
end do
end do
end function
function smartmatmulvec(m1,m2,doefunc) result(prod)
real(dp) :: m1(:,:), m2(:)
integer :: doefunc
real(dp) :: prod(size(m1,dim=1))
integer :: I,M
!call print(size(m1,dim=1) // ' ' // size(m1,dim=2) // ' '// size(m2))
M = size(m1,dim=1)
do I = 1,M
prod(I) = smartdotproduct(m1(I,1:),m2,doefunc)
end do
end function
function KahanSum(vec)
real(dp) :: vec(:)
real(dp) :: KahanSum
integer :: I,N
real(dp) :: C,T,Y
N = size(vec)
KahanSum = 0.0
C = 0.0
do I = 1,N
Y = vec(I) - C
T = KahanSum + Y
C = (T - KahanSum) - Y
KahanSum = T
end do
end function
function DoubleKahanSum(vec)
real(dp) :: vec(:)
real(dp) :: DoubleKahanSum
integer :: I,N
real(dp) :: C,T,Y,U,V,Z
N = size(vec)
DoubleKahanSum = 0.0
C = 0.0
do I = 1,N
Y = C + vec(I)
U = vec(I) - (Y - C)
T = Y + DoubleKahanSum
V = Y - (T - DoubleKahanSum)
Z = U + V
DoubleKahanSum = T + Z
C = Z - (DoubleKahanSum - T)
end do
end function
recursive function qsort( data ) result( sorted )
real(dp), dimension(:), intent(in) :: data
real(dp), dimension(1:size(data)) :: sorted
if ( size(data) > 1 ) then
sorted = (/ qsort( pack( data(2:), abs(data(2:)) > abs(data(1)) ) ), &
data(1), &
qsort( pack( data(2:), abs(data(2:)) <= abs(data(1)) ) ) /)
else
sorted = data
endif
end function
subroutine precongradcheck(x,func,dfunc,data)
real(dp) :: x(:)
INTERFACE
function func(x,data,local_energy)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp), intent(inout),optional :: local_energy(:)
real(dp)::func
end function func
end INTERFACE
INTERFACE
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
END INTERFACE
character(len=1) :: data(:)
integer :: N, I, J, levels
real(dp) :: eps,initeps
real(dp) :: f1,f2,deltaE
real(dp), allocatable :: grads(:,:), le1(:),le2(:),xp(:),xm(:)
initeps = 1.0
levels = 10
N = size(x)
allocate(grads(levels+1,N))
allocate(le1( (size(x) - 9)/3))
allocate(le2( (size(x) - 9)/3))
allocate(xp(N))
allocate(xm(N))
grads(1,1:N) = dfunc(x,data)
do I = 1,N
eps = initeps
do J = 1,levels
call print(I // " of "//N// '; '//J // ' of ' //levels)
xp = x
xm = x
xp(I) = xp(I) + eps
xm(I) = xm(I) - eps
f1 = func(xp,data,le1)
f2 = func(xm,data,le2)
deltaE = calcdeltaE(E_FUNC_KAHAN ,f1,f2,le1,le2)
grads(J+1,I) = deltaE/(2.0*eps)
eps = eps/10.0
end do
end do
call writemat(grads,'gradsGab.dat')
end subroutine
subroutine sanity(x,func,dfunc,data)
real(dp) :: x(:)
INTERFACE
function func(x,data,local_energy,gradient)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp), intent(inout),optional :: local_energy(:)
real(dp), intent(inout),optional :: gradient(:)
real(dp)::func
end function func
end INTERFACE
INTERFACE
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
END INTERFACE
character(len=1) :: data(:)
real(dp), allocatable :: output(:),le(:)
integer :: I
real(dp),parameter :: stepsize = 10**(-3.0)
integer :: N = 1000
real(dp) :: g(size(x))
real(dp) :: f
g = dfunc(x,data)
allocate(le( (size(x)-9)/3 ))
allocate(output(N))
do I = 1,N
call print(I // ' of ' // N)
f = func(x-I*stepsize*g,data,le)
output(I) = KahanSum(le)
end do
call writevec(output,'sanity.dat')
end subroutine
function infnorm(v)
real(dp) :: v(:)
real(dp) :: infnorm, temp
integer :: l, I
l = size(v)
temp = abs(v(1))
do I = 2,l
if ( abs(v(I)) > temp ) temp = v(I)
end do
infnorm = temp
end function
! utility function to dump a vector into a file (for checking,debugging)
subroutine writevec(vec,filename)
real(dp) :: vec(:)
character(*) :: filename
integer :: outid = 10
open(unit=outid,file=filename,action="write",status="replace")
write(outid,*) vec
close(outid)
end subroutine
subroutine writeveci(vec,filename)
integer :: vec(:)
character(*) :: filename
integer :: outid = 10
open(unit=outid,file=filename,action="write",status="replace")
write(outid,*) vec
close(outid)
end subroutine
subroutine writemat(mat,filename)
real(dp) :: mat(:,:)
character(*) :: filename
integer :: outid = 10
open(unit=outid,file=filename,action="write",status="replace")
write(outid,*) mat
close(outid)
end subroutine
subroutine writeprecon(precon,filename)
type(precon_data) :: precon
character(*) :: filename
call writepreconcoeffs(precon,filename // 'coeffs')
call writepreconindices(precon,filename // 'indices')
call writepreconrowlengths(precon,filename // 'lengths')
end subroutine
subroutine writepreconcoeffs(precon,filename)
type(precon_data) :: precon
character(*) :: filename
integer :: outid = 10
open(unit=outid,file=filename,action="write",status="replace")
write(outid,*) precon%preconcoeffs
close(outid)
end subroutine
subroutine writepreconindices(precon,filename)
type(precon_data) :: precon
character(*) :: filename
integer :: outid = 10
open(unit=outid,file=filename,action="write",status="replace")
write(outid,*) precon%preconindices
close(outid)
end subroutine
subroutine writepreconrowlengths(precon,filename)
type(precon_data) :: precon
character(*) :: filename
integer :: outid = 10
open(unit=outid,file=filename,action="write",status="replace")
write(outid,*) precon%preconrowlengths
close(outid)
end subroutine
subroutine writeLBFGS(LBFGSs,LBFGSy,n_back,filename)
real(dp):: LBFGSs(:,:), LBFGSy(:,:)
integer :: n_back
character(*) :: filename
integer :: outid1 = 10
integer :: outid2 = 11
integer :: M
M = size(LBFGSs,dim=2)
open(unit=outid1,file=(filename//'s'),action="write",status="replace")
write(outid1,*) LBFGSs(1:,(M-n_back+1):)
close(outid1)
open(unit=outid2,file=(filename//'y'),action="write",status="replace")
write(outid2,*) LBFGSy(1:,(M-n_back+1):)
close(outid2)
end subroutine
function precondimer(x_in,v_in,func,dfunc,build_precon,pr,method,convergence_tol,max_steps,efuncroutine,LM, linminroutine, hook, hook_print_interval, am_data, status,writehessian,gethessian,infoverride)
implicit none
real(dp), intent(inout) :: x_in(:) !% Starting position
real(dp), intent(inout) :: v_in(:) !% Starting dimer orientation
INTERFACE
function func(x,data,local_energy,gradient)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp), intent(inout),optional :: local_energy(:)
real(dp), intent(inout),optional :: gradient(:)
real(dp)::func
end function func
end INTERFACE
INTERFACE
function dfunc(x,data)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp)::dfunc(size(x))
end function dfunc
END INTERFACE
INTERFACE
subroutine build_precon(pr,am_data)
use system_module
import precon_data
type(precon_data),intent(inout) ::pr
character(len=1)::am_data(:)
end subroutine
END INTERFACE
type(precon_data):: pr
character(*), intent(in) :: method !% 'cg' for conjugate gradients or 'sd' for steepest descent
real(dp), intent(in) :: convergence_tol !% Minimisation is treated as converged once $|\mathbf{\nabla}f|^2 <$
!% 'convergence_tol'.
integer, intent(in) :: max_steps !% Maximum number of 'cg' or 'sd' steps
integer::precondimer
character(*), intent(in), optional :: efuncroutine !% Control of the objective function evaluation
character(*), intent(in), optional :: linminroutine !% Name of the line minisation routine to use.
integer, optional :: LM
optional :: hook
INTERFACE
subroutine hook(x,dx,E,done,do_print,data)
use system_module
real(dp), intent(in) ::x(:)
real(dp), intent(in) ::dx(:)
real(dp), intent(in) ::E
logical, intent(out) :: done
logical, optional, intent(in) :: do_print
character(len=1),optional, intent(in) ::data(:)
end subroutine hook
end INTERFACE
integer, intent(in), optional :: hook_print_interval
character(len=1), optional, intent(inout) :: am_data(:)
integer, optional, intent(out) :: status
optional :: writehessian
INTERFACE
subroutine writehessian(x,data,filename)
use system_module
real(dp) :: x(:)
character(len=1)::data(:)
character(*) :: filename
end subroutine writehessian
end INTERFACE
optional :: gethessian
INTERFACE
subroutine gethessian(x,data,FDHess)
use system_module
real(dp),intent(in):: x(:)
character(len=1),intent(in)::data(:)
real(dp),intent(inout) :: FDHess(:,:)
end subroutine gethessian
end INTERFACE
real(dp), optional :: infoverride
integer :: N,k,k2,k3,kmax,k2max
real(dp) :: h = 1e-3_dp
real(dp), parameter :: pi = 4.0_dp*datan(1.0_dp)
real(dp), parameter :: TOLvdefault = 10.0_dp**(-1)
real(dp), allocatable :: x(:), F1(:), F2(:), F10(:), F20(:), v(:), vstar(:), Gd(:), s(:), sl2(:), Gv(:), Gvp(:), Gx(:), Gxp(:), Gdl2(:), Gvpold(:), Gxpold(:), Gs(:), Qx(:)
real(dp), allocatable :: local_energy1(:),local_energy2(:),local_energy10(:),local_energy20(:),alpvec(:),dirderivvec(:)
real(dp) :: alpha_x,crit,avn,dC,delE,e1,e10,e2,e20,lam,res_v,res_v_rot,res_x,rotC,traC,TOLv,dt
logical :: rotationfailed, totalfailure
logical :: doLSbasic, doLSstandard
integer :: doefunc
logical :: noimprove
real(dp) :: res_x_hist(5) = 1000.0_dp
integer :: neval
alpha_x = 0.01
rotC = 10.0_dp**(-3)
traC = 10.0_dp**(-3)
TOLv = TOLvdefault
kmax = max_steps
k2max = 5
if ( present(linminroutine) ) then
call print('linmin options not currently supported by dimer')
! if (trim(linminroutine) == 'basic') then
! call print('Using basic backtracking linesearch')
! doLSbasic = .TRUE.
! elseif (trim(linminroutine) == 'standard') then
! call print('Using standard two-stage linesearch with cubic interpolation in the zoom phase, with bisection as backup')
! call print('Not recommended for dimer method!!!')
! doLSstandard = .TRUE.
! end if
! else
! doLSbasic = .true.
end if
N = size(x_in)
allocate(local_energy1((N-9)/3),local_energy2((N-9)/3),local_energy10((N-9)/3),local_energy20((N-9)/3))
allocate(x(N),F1(N),F2(N),F10(N),F20(N),v(N),vstar(N),Gd(N),s(N),sl2(N),Gv(N),Gvp(N),Gx(N),Gxp(N),Gdl2(N),Gvpold(N),Gxpold(N),Qx(N),gs(N))
allocate(alpvec(max_steps))
allocate(dirderivvec(max_steps))
!open(1,file='dimerplot.dat',status='replace',access='stream',action='write')
doefunc = E_FUNC_BASIC
if ( present(efuncroutine) ) then
if (trim(efuncroutine) == 'basic') then
doefunc = E_FUNC_BASIC
call print('Using naive summation of local energies')
elseif (trim(efuncroutine) == 'kahan') then
doefunc = E_FUNC_KAHAN
! allocate(local_energycand((size(x)-9)/3))
call print('Using Kahan summation of local energies')
elseif (trim(efuncroutine) == 'doublekahan') then
doefunc = E_FUNC_DOUBLEKAHAN
! allocate(local_energycand((size(x)-9)/3))
call print('Using double Kahan summation of local energies with quicksort')
end if
else
doefunc = E_FUNC_BASIC
call print('Using naive summation of local energies by default')
end if
x = x_in
v = v_in
!call random_number(d)
avn = pi/4.0
k = 0
do
call build_precon(pr,am_data)
v = v/sqrt(Pdotproduct(v,v,pr,doefunc))
call writeprecon(pr,'dimerpr')
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
e1 = func_wrapper(func,x+h*v,am_data,local_energy=local_energy1,gradient=F1,doefunc=doefunc)
e2 = func_wrapper(func,x-h*v,am_data,local_energy=local_energy2,gradient=F2,doefunc=doefunc)
neval = neval + 2
#ifndef _OPENMP
call verbosity_pop()
#endif
Gxpold = Gxp
Gx = 0.5_dp*(F1 + F2)
if (k > 0) then
Gxp = apply_precon(Gx,pr,doefunc,init=Gxpold)
else
Gxp = apply_precon(Gx,pr,doefunc)
!call exit()
end if
res_x = sqrt(smartdotproduct(Gxp,Gx,doefunc))
res_x_hist(1:4) = res_x_hist(2:5)
res_x_hist(5) = res_x
!call print(abs(sum(res_x_hist)/5.0_dp) )
! noimprove = .false.
! if ( abs(sum(res_x_hist)/5.0_dp - res_x) < 10.0_dp**(-2)) then
! noimprove = .true.
! end if
!
! if (noimprove) then
! call print("Residual does not seem to be improving, switching to simple dimer method")
! k = simpleprecondimer(x,v,h,func,am_data,build_precon,pr,doefunc,0.005_dp,0.005_dp)
! exit
! end if
Gvpold = Gvp
Gv = (F1 - F2)/(2.0_dp*h)
if (k > 0) then
Gvp = apply_precon(Gv,pr,doefunc,init=Gvpold)
else
Gvp = apply_precon(Gv,pr,doefunc)
end if
lam = smartdotproduct(Gv,v,doefunc)
Gd = Gvp - lam*v
Gdl2 = Gv - lam*v
res_v = sqrt(Pdotproduct(Gd,Gd,pr,doefunc))
call print('precon_dimer n_iter = ' // k // ', res_x = ' // res_x // ', res_v = ' // res_v // ', lam = ' // lam //', alpha = '// alpha_x // ', nf = '//neval)
if (res_x < convergence_tol) then
call print('Precon Dimer exiting with translation residual = ' //res_x)
exit
end if
rotationfailed = .false.
totalfailure = .false.
k2 = 0
! Do a rotation if necessary
do while (res_v > max(TOLv,res_x))
s = -Gd
sl2 = -Gdl2
dC = smartdotproduct(s,sl2,doefunc)
e10 = e1
e20 = e2
F10 = F1
F20 = F2
local_energy10 = local_energy1
local_energy20 = local_energy2
if (rotationfailed .eqv. .false.) then
k3 = 0
do
crit = -h*h*rotC*dC*avn
vstar = cos(avn)*v + sin(avn)*s
vstar = vstar/sqrt(Pdotproduct(vstar,vstar,pr,doefunc))
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
e1 = func_wrapper(func,x+h*vstar,am_data,local_energy=local_energy1,gradient=F1,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
e2 = func_wrapper(func,x-h*vstar,am_data,local_energy=local_energy2,gradient=F2,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
neval = neval + 2
delE = calcdeltaE(doefunc,e1,e10,local_energy1,local_energy10) + calcdeltaE(doefunc,e2,e20,local_energy2,local_energy20)
call print("precon_dimer rotation inner, theta = " // avn // ", delE = "// delE//" crit = "//crit)
! call print(avn // ' '// delE // ' ' //crit//' '//norm(v-vstar))
!call print(e2// ' '//e20)
if (delE < crit) then
v = vstar
exit
end if
if (abs(delE) < 10.0**(-12) .and. delE < 10.0**(-15)) then
rotationfailed = .true.
call print('Ran out of precision in objective based rotation')
exit
else
avn = avn/2.0_dp
end if
end do
else
exit
end if
if (rotationfailed .eqv. .false.) then
Gvpold = Gvp;
Gv = (F1 - F2)/(2.0_dp*h)
Gvp = apply_precon(Gv,pr,doefunc,init=Gvpold)
lam = smartdotproduct(Gv,v,doefunc)
Gd = Gvp - lam*v
Gdl2 = Gv - lam*v
res_v = sqrt(Pdotproduct(Gd,Gd,pr,doefunc))
end if
call print('precon_dimer rotating, iter = '// k2 //',theta = '//avn// ',delE = ' //delE //',res_v = '//res_v)
k2 = k2 + 1
if (res_v < TOLv .or. totalfailure .eqv. .true. .or. k2 > k2max) then
!call print(res_v // ' ' //k2)
exit
end if
end do
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
e1 = func_wrapper(func,x+h*v,am_data,local_energy=local_energy1,gradient=F1,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
e2 = func_wrapper(func,x-h*v,am_data,local_energy=local_energy2,gradient=F2,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
neval = neval + 2
Gx = 0.5*(F1+F2)
Gxpold = Gxp
Gxp = apply_precon(Gx,pr,doefunc,init=Gxpold)
gs = -Gx + 2.0*smartdotproduct(v,Gx,doefunc)*v
s = -Gxp + 2.0*smartdotproduct(v,Gx,doefunc)*v
dt = -smartdotproduct(gs,s,doefunc)
if (dt > 0) then
gs = -gs
dt = -dt
end if
!alpha_x = 2.0*init_alpha(alpvec,dirderivvec,k)
alpha_x = 2.0*alpha_x
e10 = e1
e20 = e2
F10 = F1
F20 = F2
local_energy10 = local_energy1
local_energy20 = local_energy2
res_v_rot = res_v
k2 = 0
do
delE = dimerdelE(x+alpha_x*s,v,h,lam,x,Gx,pr,func,am_data,doefunc,e10,e20,local_energy10,local_energy20)
crit = dt*traC*alpha_x
neval = neval + 2
call print('precon_dimer translating, iter = '// k2 //', alpha = '//alpha_x// ', delE = ' //delE)
if (delE < crit + 10.0**(-14) .and. res_v < 10.0*res_v_rot) then
x = x + alpha_x*s
exit
end if
alpha_x = alpha_x/2.0
k2 = k2 + 1
end do
k = k + 1
dirderivvec(k) = dt
alpvec(k) = alpha_x
if ( k >= max_steps) then
exit
end if
end do
x_in = x
end function
function dimerdelE(x,v,h,lam,x0,gx0,pr,func,am_data,doefunc,e10,e20,local_energy10,local_energy20) result(delE)
real(dp) :: x(:),v(:),x0(:),gx0(:)
type(precon_data) :: pr
real(dp) :: h,lam
character(len=1), intent(inout) :: am_data(:)
INTERFACE
function func(x,data,local_energy,gradient)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp), intent(inout),optional :: local_energy(:)
real(dp), intent(inout),optional :: gradient(:)
real(dp)::func
end function func
end INTERFACE
real(dp) :: delE
integer :: doefunc
real(dp) :: e10,e20,local_energy10(:),local_energy20(:)
real(dp) :: e1,e2,local_energy1(size(local_energy10)),local_energy2(size(local_energy20))
real(dp) :: Qx(size(x))
real(dp) :: F1(size(x)), F2(size(x))
Qx = smartdotproduct(v,x-x0,doefunc)*v
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
e1 = func_wrapper(func,x+h*v,am_data,local_energy=local_energy1,gradient=F1,doefunc=doefunc)
e2 = func_wrapper(func,x-h*v,am_data,local_energy=local_energy2,gradient=F2,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
delE = (calcdeltaE(doefunc,e1,e10,local_energy1,local_energy10) + calcdeltaE(doefunc,e2,e20,local_energy2,local_energy20))/2.0_dp &
-2.0*Pdotproduct(v,x-x0,pr,doefunc)*smartdotproduct(v,gx0,doefunc) - lam*Pdotproduct(Qx,Qx,pr,doefunc)
! Gv = (F1 - F2)/(2.0_dp*h)
! if (k > 0) then
! Gvp = apply_precon(Gv,pr,doefunc,init=Gvpold)
! else
! Gvp = apply_precon(Gv,pr,doefunc)
! end if
! lam = smartdotproduct(Gv,v,doefunc)
! Gd = Gvp - lam*v
! res_v = sqrt(Pdotproduct(Gd,Gd,pr,doefunc))
!
end function
function simpleprecondimer(x,v,h,func,am_data,build_precon,pr,doefunc,alpha_x,alpha_v) result(k)
real(dp), intent(inout) :: x(:),v(:)
real(dp) :: h
INTERFACE
function func(x,data,local_energy,gradient)
use system_module
real(dp)::x(:)
character(len=1),optional::data(:)
real(dp), intent(inout),optional :: local_energy(:)
real(dp), intent(inout),optional :: gradient(:)
real(dp)::func
end function func
end INTERFACE
character(len=1), intent(inout) :: am_data(:)
INTERFACE
subroutine build_precon(pr,am_data)
use system_module
import precon_data
type(precon_data),intent(inout) ::pr
character(len=1)::am_data(:)
end subroutine
END INTERFACE
type(precon_data):: pr
integer :: doefunc
real(dp) :: alpha_x,alpha_v
integer :: k,N
integer :: kmax = 1000
real(dp) :: e1,e2,lam,res_x,res_v
real(dp), allocatable :: F1(:), F2(:), F10(:), F20(:), vstar(:), Gd(:), s(:), sl2(:), Gv(:), Gvp(:), Gx(:), Gxp(:), Gdl2(:), Gvpold(:), Gxpold(:), Gs(:), Qx(:)
real(dp), allocatable :: local_energy1(:),local_energy2(:),local_energy10(:),local_energy20(:),alpvec(:),dirderivvec(:)
N = size(x,1)
allocate(local_energy1((N-9)/3),local_energy2((N-9)/3))
allocate(F1(N),F2(N),F10(N),F20(N),vstar(N),Gd(N),s(N),sl2(N),Gv(N),Gvp(N),Gx(N),Gxp(N),Gdl2(N),Gvpold(N),Gxpold(N),Qx(N),gs(N))
k = 0
do
call build_precon(pr,am_data)
v = v/sqrt(Pdotproduct(v,v,pr,doefunc))
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
e1 = func_wrapper(func,x+v*h,am_data,local_energy=local_energy1,gradient=F1,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
#ifndef _OPENMP
call verbosity_push_decrement(2)
#endif
e2 = func_wrapper(func,x-v*h,am_data,local_energy=local_energy2,gradient=F2,doefunc=doefunc)
#ifndef _OPENMP
call verbosity_pop()
#endif
Gxpold = Gxp
Gx = 0.5_dp*(F1 + F2);
if (k > 1) then
Gxp = apply_precon(Gx,pr,doefunc,init=Gxpold)
else
Gxp = apply_precon(Gx,pr,doefunc)
end if
res_x = sqrt(smartdotproduct(Gxp,Gx,doefunc))
Gvpold = Gvp
Gv = (F1 - F2)/(2.0_dp*h)
if (k > 1) then
Gvp = apply_precon(Gv,pr,doefunc,init=Gvpold)
else
Gvp = apply_precon(Gv,pr,doefunc)
end if
lam = smartdotproduct(Gv,v,doefunc)
Gd = Gvp - lam*v
Gdl2 = Gv - lam*v
res_v = sqrt(Pdotproduct(Gd,Gd,pr,doefunc))
call print('n_iter = ' // k // ', res_x = ' // res_x // ', res_v = ' // res_v // ', lam = ' // lam //', alpha = '// alpha_x)
v = v - alpha_v*Gvp
x = x - alpha_x*(Gxp - 2.0*smartdotproduct(v,Gx,doefunc)*v)
k = k + 1
if (k>=kmax) then
exit
end if
end do
end function
subroutine printlikematlab(x)
real(dp) :: x(:)
integer :: vl
vl = size(x)
call print(reshape(x(10:vl),(/ 3 , (vl-9)/3 /)))
end subroutine
end module minimization_module
| {"hexsha": "0328fec23af9a5f00c64aa90311677a820445b27", "size": 197231, "ext": "f95", "lang": "FORTRAN", "max_stars_repo_path": "src/libAtoms/minimization.f95", "max_stars_repo_name": "albapa/QUIP", "max_stars_repo_head_hexsha": "ecde1e332c6bd62c238d3cd90e31dba4fb390313", "max_stars_repo_licenses": ["NRL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/libAtoms/minimization.f95", "max_issues_repo_name": "albapa/QUIP", "max_issues_repo_head_hexsha": "ecde1e332c6bd62c238d3cd90e31dba4fb390313", "max_issues_repo_licenses": ["NRL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/libAtoms/minimization.f95", "max_forks_repo_name": "albapa/QUIP", "max_forks_repo_head_hexsha": "ecde1e332c6bd62c238d3cd90e31dba4fb390313", "max_forks_repo_licenses": ["NRL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.572114615, "max_line_length": 204, "alphanum_fraction": 0.5843300495, "num_tokens": 59909} |
Require Import Category.Lib.
Require Import Category.Instance.Lambda.Ltac.
Require Import Category.Instance.Lambda.Ty.
Require Import Category.Instance.Lambda.Exp.
Require Import Category.Instance.Lambda.Sub.
From Equations Require Import Equations.
Set Equations With UIP.
Generalizable All Variables.
Section Log.
Context {A : Type}.
Context `{L : HostExprs A}.
Context {Γ : Env}.
Variable P : ∀ {τ}, Exp Γ τ → Type.
(** [ExpP] is a logical predicate that permits type-directed induction on
expressions. *)
Equations ExpP `(e : Exp Γ τ) : Type :=
ExpP (τ:=_ ⟶ _) e := P e ∧ (∀ x, ExpP x → ExpP (APP e x));
ExpP (τ:=_ × _) e := P e ∧ ExpP (Fst e) ∧ ExpP (Snd e);
ExpP e := P e.
Inductive SubP : ∀ {Γ'}, Sub Γ Γ' → Type :=
| NoSubP : SubP (NoSub (Γ:=Γ))
| PushP {Γ' τ} (e : Exp Γ τ) (s : Sub Γ Γ') :
ExpP e → SubP s → SubP (Push e s).
Derive Signature for SubP.
Lemma ExpP_P {τ} {e : Γ ⊢ τ} : ExpP e → P e.
Proof. intros; induction τ; simpl in *; simp ExpP in X; now reduce. Qed.
Variable R : ∀ {τ}, Exp Γ τ → Exp Γ τ → Type.
(** [ExpR] is a logical predicate that permits type-directed induction on
expressions. *)
Equations ExpR {τ} (e1 e2 : Exp Γ τ) : Type :=
ExpR (τ:=_ ⟶ _) f1 f2 :=
R f1 f2 ∧ (∀ x1 x2, ExpR x1 x2 → ExpR (APP f1 x1) (APP f1 x2));
ExpR (τ:=_ × _) e1 e2 :=
R e1 e2 ∧ ExpR (Fst e1) (Fst e2) ∧ ExpR (Snd e1) (Snd e2);
ExpR e1 e2 := R e1 e2.
Lemma ExpR_R {τ} {e1 e2 : Γ ⊢ τ} : ExpR e1 e2 → R e1 e2.
Proof. intros; induction τ; simpl in *; simp ExpR in X; now reduce. Qed.
Inductive SubR : ∀ {Γ'}, Sub Γ Γ' → Type :=
| NoSubR : SubR (NoSub (Γ:=Γ))
| PushR {Γ' τ} (e e' : Exp Γ τ) (s : Sub Γ Γ') :
ExpR e e' → SubR s → SubR (Push e s).
Derive Signature for SubR.
End Log.
| {"author": "jwiegley", "repo": "category-theory", "sha": "5376e32a4eeace4a84674820083bc2985a2a593f", "save_path": "github-repos/coq/jwiegley-category-theory", "path": "github-repos/coq/jwiegley-category-theory/category-theory-5376e32a4eeace4a84674820083bc2985a2a593f/Instance/Lambda/Log.v"} |
program phase_iterative
implicit none
!! # Grid parameters
integer maxmx, maxmy
parameter(maxmx = 2**10, maxmy = 2**10)
!! # Input parameters
double precision ax_in, ay_in, dx_in, dy_in, t_in, tstart
double precision domain_length
integer mx_in, my_in, nstp, izero, nchar
character*100 fname
logical restart
double precision phi(maxmx, maxmy)
double precision u(maxmx, maxmy)
double precision theta, r, rp, amp
!! # More grid parameters
integer mx, my, i, j, n, nout, nstep
double precision ax, ay, bx, by, dx, dy, dt, tfinal, t
double precision x(maxmx), y(maxmy)
!! # Misc
integer iframe, iframe_start
double precision pi, dsgn
integer mbdcnd, nbdcnd, ierror, idimf
double precision lambda, lambda_phi, pertrb
double precision bda(maxmy), bdb(maxmy)
double precision bdc(maxmx), bdd(maxmx)
double precision f(maxmx, maxmy)
!! # Temporary variables
double precision u_n, phi_n, g0, g
double precision phik(maxmx, maxmy)
double precision uk(maxmx, maxmy)
double precision phikp1(maxmx, maxmy)
double precision ukp1(maxmx, maxmy)
double precision f1(maxmx, maxmy)
double precision f2(maxmx, maxmy)
double precision S1(maxmx, maxmy)
double precision S2(maxmx, maxmy)
double precision S3(maxmx, maxmy)
double precision err(2), errmax(2), tol
!! # Model parameters
double precision S, alpha, mparm, xi, gamma
double precision r0, x0, y0, kanio, pdel
common /parm_comm/ r0, x0, y0, kanio, pdel
!! Temporary variables
double precision beta, Tinv
integer wsize, wsize_t
!! 13M + 4N + M*INT(LOG2(N))
!! log2(N) <= log2(maxmy) ~ 10
parameter(wsize = 4*maxmy + (13 + 12)*maxmx)
double precision work(wsize)
double precision xlow, ylow, w
integer k, kmax, ktotal, avg_iterations, method
logical prt_iterations
integer jacobi, gauss_seidel
parameter(jacobi=0, gauss_seidel=1)
wsize_t = 4*(maxmy) + &
(13 + int(log(real(my))/log(2.d0))*mx)
if (wsize < wsize_t) then
write(6,*) 'Increase wsize from ', wsize, ' to ',wsize_t
stop
endif
pi = 4.d0*datan(1.d0)
!! # --------------------------------
!! # Set up grid
!! # --------------------------------
open(10,file='phase_iterative.dat')
read(10,*) mx, my
!! read(10,*) ax, bx, ay, by
read(10,*) domain_length
read(10,*) nstep, nout
read(10,*) dt
if (mx > maxmx .or. my > maxmy) then
write(6,*) 'mx or my too big'
stop
endif
ax = -domain_length/2
bx = domain_length/2
ay = -domain_length/2
by = domain_length/2
dx = (bx - ax)/mx
dy = (by - ay)/my
do i = 1,mx
x(i) = ax + (i-0.5)*dx
enddo
do j = 1,my
y(j) = ay + (j-0.5)*dy
enddo
!! # ------------------------------------
!! # Numerical parameters - Jacobi method
!! # ------------------------------------
read(10,*) kmax
read(10,*) tol
read(10,*) method
read(10,*) prt_iterations
!! # ----------------------------
!! # model parameters
!! # ----------------------------
read(10,*) S
read(10,*) alpha
read(10,*) mparm
read(10,*) xi
read(10,*) kanio
read(10,*) r0
read(10,*) pdel
read(10,*) gamma
!! # ----------------------------
!! # Get restart information
!! # ----------------------------
read(10,*) restart
if (restart) then
read(10,*) fname
endif
close(10)
write(6,*) 'Parameters used for this run : '
write(6,*) '---------------------------'
write(6,*) 'Non-dimensional parameters'
write(6,*) '---------------------------'
write(6,200) 'S = ', S
write(6,200) 'alpha = ', alpha
write(6,200) 'm = ', mparm
write(6,200) 'xi = ', xi
write(6,200) 'k = ', kanio
write(6,200) 'r0 = ', r0
write(6,200) 'pdel = ', pdel
write(6,200) 'gamma = ', gamma
write(6,*) ' '
write(6,*) '---------------------------'
write(6,*) 'Grid parameters'
write(6,*) '---------------------------'
write(6,220) 'mx = ', mx
write(6,220) 'my = ', my
write(6,200) 'ax = ', ax
write(6,200) 'bx = ', bx
write(6,200) 'ay = ', ay
write(6,200) 'by = ', by
write(6,200) 'dx = ', dx
write(6,200) 'dy = ', dy
write(6,220) 'kmax = ',kmax
write(6,225) 'tol = ',tol
if (restart) then
write(6,*) 'Restart from file ', fname
else
write(6,*) 'Initializing run from scratch...'
endif
200 format(a,F10.5)
210 format(a,E12.6)
225 format(a,E12.1)
220 format(a,I5)
!! # --------------------------------
!! # Initialize phi
!! # --------------------------------
write(6,*) 'Initializing phi and u...'
if (restart) then
open(20,file=fname)
read(20,*) mx_in
read(20,*) my_in
read(20,*) ax_in
read(20,*) ay_in
read(20,*) dx_in
read(20,*) dy_in
read(20,*) t_in
if (mx_in .ne. mx .or. my_in .ne. my) then
write(6,*) 'Problem with restart: mx or my not right.'
stop
elseif (ax_in .ne. ax .or. ay_in .ne. ay) then
write(6,*) 'Problem with restart: ax or ay not right'
stop
endif
izero = iachar('0')
tstart = t_in
iframe_start = 0
nstp = 1
do i = 10,7,-1
nchar = iachar(fname(i:i)) - izero
iframe_start = iframe_start + nchar*nstp
nstp = nstp*10
enddo
do j = 1,my
do i = 1,mx
read(20,*) phi(i,j), u(i,j)
enddo
enddo
close(20)
else
x0 = (ax + bx)/2.d0
y0 = (ay + by)/2.d0
tstart = 0.0
iframe = 0
do j = 1,my
do i = 1,mx
xlow = ax + (i-1)*dx
ylow = ay + (j-1)*dy
!! rp = sqrt((xlow)**2 + (ylow)**2)
call cellave(xlow,ylow,dx,dy,w)
u(i,j) = w-1.d0
phi(i,j) = 1-w
!! r = sqrt((amp*(x(i) - x0))**2 + (y(j) - y0)**2)
!! theta = atan2(y(j) - y0,x(i) - x0)
!! rp = r0*(1.d0 + pdel*cos(kanio*theta))
!! d = tanh((r-rp)/xi)
!! u(i,j) = -0.5d0*(d + 1) !! In [-1,0]
!! phi(i,j) = 0 0.5d0*(d + 1) !! In [0,1]
enddo
enddo
write(6,*) 'Done with initialization'
!! # Output frame only if we have initialized data from scratch.
write(6,600) 0,tstart,0
call out2(maxmx,maxmy,mx,my,ax,ay,dx,dy,phi,u,tstart, &
iframe_start)
endif
!! % --------------------------------------
!! % Time step through results
!! % --------------------------------------
do j = 1,my
bda(j) = 0.d0
bdb(j) = 0.d0
enddo
do i = 1,mx
bdc(i) = 0.d0
bdd(i) = 0.d0
enddo
lambda = -1.d0/dt
idimf = maxmx
mbdcnd = 3
nbdcnd = 3
beta = xi**2/mparm
Tinv = 1.d0/xi**2
lambda_phi = lambda*beta*Tinv
avg_iterations = 0
iframe = iframe_start
do n = 1,nout
t = tstart + n*dt
!! write(6,100) n,t
100 format('Step',I5,' at time t = ',1PE12.5)
!! From right hand side for block linear system
do j = 1,my
do i = 1,mx
phi_n = phi(i,j) !! old phi(i,j)
u_n = u(i,j)
g0 = phi_n*(1-phi_n)
g = g0*g0
S1(i,j) = 30*g/S
S2(i,j) = 30*g*xi*alpha*S
S3(i,j) = g0*(phi_n - 0.5d0)
f1(i,j) = lambda*(u_n + S1(i,j)*phi_n);
f2(i,j) = lambda*beta*phi_n - S3(i,j)
end do
end do
!! # Start Jacobi iterations
do j = 1,my
do i = 1,mx
uk(i,j) = u(i,j)
phik(i,j) = phi(i,j)
end do
end do
do k = 1,kmax
!! ----------------------------------------------
!! # Set up right hand side for u
!! ----------------------------------------------
do j = 1,my
do i = 1,mx
!! this could be computed outside of k-loop
!! phi_n = phi(i,j)
!! g0 = phi_n*(1-phi_n)
!! g = g0*g0
!! S1 = 30*g/S
f(i,j) = -lambda*S1(i,j)*phik(i,j) + f1(i,j)
end do
end do
!! # Solve equation for u; solution will be in f
call hstcrt(ax,bx,mx,mbdcnd,bda,bdb,ay,by,my,nbdcnd, &
bdc,bdd,lambda,f,idimf,pertrb,ierror,work)
if (work(1) .gt. wsize) then
write(6,*) 'hstcrt : Allocate more work for wsize; ', wsize, work(1)
endif
if (ierror .ne. 0) then
write(6,*) 'hstcrt : ierror /= 0', ierror
stop
endif
if (abs(pertrb) .gt. 0) then
write(6,*) 'pertrb > 0; ', pertrb
stop
endif
do j = 1,my
do i = 1,mx
ukp1(i,j) = f(i,j)
end do
end do
!! ----------------------------------------------
!! Set-up right hand side for phi
!! ----------------------------------------------
do j = 1,my
do i = 1,mx
!! u_n = u(i,j)
!! phi_n = phi(i,j)
!! g0 = phi_n*(1-phi_n)
!! g = g0*g0
!! S2 = 30*g*xi*alpha*S
if (method .eq. jacobi) then
f(i,j) = Tinv*(-S2(i,j)*uk(i,j) + f2(i,j))
else
f(i,j) = Tinv*(-S2(i,j)*ukp1(i,j) + f2(i,j))
endif
end do
end do
!! # Solve equation for phi; solution will be in f
call hstcrt(ax,bx,mx,mbdcnd,bda,bdb,ay,by,my,nbdcnd, &
bdc,bdd,lambda_phi,f,idimf,pertrb,ierror,work)
if (ierror .ne. 0) then
write(6,*) 'hstcrt : ierror /= 0; ', ierror
stop
endif
do j = 1,my
do i = 1,mx
phikp1(i,j) = f(i,j)
end do
end do
errmax(1) = 0
errmax(2) = 0
do j = 1,my
do i = 1,mx
err(1) = abs(ukp1(i,j) - uk(i,j))
err(2) = abs(phikp1(i,j) - phik(i,j))
errmax(1) = max(errmax(1),err(1))
errmax(2) = max(errmax(2),err(2))
end do
end do
if (prt_iterations) then
!! write(6,601) 'Step ', n, 'Iteration ',k,'Residual(u)', errmax(1), &
!! 'Residual(phi)', errmax(2)
endif
ktotal = k
if (max(errmax(1), errmax(2)) < tol) then
exit !! Exit Jacobi iteration
endif
!! Swap k and kp1
do j = 1,my
do i = 1,mx
uk(i,j) = ukp1(i,j)
phik(i,j) = phikp1(i,j)
end do
end do
!! end of Jacobi iteration
end do
if (prt_iterations) then
write(6,601) 'Step ', n, 'Iterations/step ',ktotal,'Residual(u)', errmax(1), &
'Residual(phi)', errmax(2)
!! write(6,*) ' '
endif
avg_iterations = avg_iterations + ktotal
!! # ---------------------------------
!! # Output results
!! # ---------------------------------
if (nstep*(n/nstep) == n) then
iframe = iframe + 1
write(6,600) iframe, t, avg_iterations/nstep
call out2(maxmx,maxmy,mx,my,ax,ay,dx,dy,phi,u,t,iframe)
avg_iterations = 0
endif
600 format('Writing frame ',I5,' at time t = ',1PE12.5,' (avg. iterations = ',I3,')')
601 format(A,I5,A20,I4,A15,E12.4,A15,E12.4)
!! Update time level n solutions
do j = 1,my
do i = 1,mx
u(i,j) = ukp1(i,j)
phi(i,j) = phikp1(i,j)
end do
end do
!! End of time loop
end do
end program phase_iterative
double precision function fdisc(x,y)
implicit none
double precision x,y, r, rp, theta
double precision r0, x0, y0, kanio, pdel
common /parm_comm/ r0, x0, y0, kanio, pdel
r = sqrt((x-x0)**2 + (y-y0)**2)
theta = atan2(y - y0,x - x0)
rp = r0*(1.d0 + pdel*cos(kanio*theta))
fdisc = r - rp
end function fdisc
| {"hexsha": "13eae360a390429df695c5684fc59d0a18b273c1", "size": 12883, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "applications/elliptic/phasefield/fishpack/phase_iterative.f90", "max_stars_repo_name": "ECLAIRWaveS/ForestClaw", "max_stars_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2017-09-26T13:39:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T08:56:23.000Z", "max_issues_repo_path": "applications/elliptic/phasefield/fishpack/phase_iterative.f90", "max_issues_repo_name": "ECLAIRWaveS/ForestClaw", "max_issues_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 75, "max_issues_repo_issues_event_min_datetime": "2017-08-02T19:56:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T12:36:32.000Z", "max_forks_repo_path": "applications/elliptic/phasefield/fishpack/phase_iterative.f90", "max_forks_repo_name": "ECLAIRWaveS/ForestClaw", "max_forks_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2018-02-21T00:10:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T19:08:36.000Z", "avg_line_length": 27.7650862069, "max_line_length": 91, "alphanum_fraction": 0.4353023364, "num_tokens": 4034} |
import numpy as np
import torch
import torch.nn as nn
from torch.nn import Parameter
class Self_Attn(nn.Module):
""" Self attention Layer"""
'''
https://github.com/heykeetae/Self-Attention-GAN/blob/master/sagan_models.py
'''
def __init__(self, in_dim, activation, with_attention=False):
super(Self_Attn, self).__init__()
self.chanel_in = in_dim
self.activation = activation
self.with_attention = with_attention
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1) #
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize, C, width, height = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N)
proj_key = self.key_conv(x).view(m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = self.gamma * out + x
if self.with_attention:
return out, attention
else:
return out
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
def spectral_norm(module, mode=True):
if mode:
return nn.utils.spectral_norm(module)
return module
class SwitchNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.9, using_moving_average=True, using_bn=True,
last_gamma=False):
super(SwitchNorm2d, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.last_gamma = last_gamma
self.weight = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1))
if self.using_bn:
self.mean_weight = nn.Parameter(torch.ones(3))
self.var_weight = nn.Parameter(torch.ones(3))
else:
self.mean_weight = nn.Parameter(torch.ones(2))
self.var_weight = nn.Parameter(torch.ones(2))
if self.using_bn:
self.register_buffer('running_mean', torch.zeros(1, num_features, 1))
self.register_buffer('running_var', torch.zeros(1, num_features, 1))
self.reset_parameters()
def reset_parameters(self):
if self.using_bn:
self.running_mean.zero_()
self.running_var.zero_()
if self.last_gamma:
self.weight.data.fill_(0)
else:
self.weight.data.fill_(1)
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
N, C, H, W = x.size()
x = x.view(N, C, -1)
mean_in = x.mean(-1, keepdim=True)
var_in = x.var(-1, keepdim=True)
mean_ln = mean_in.mean(1, keepdim=True)
temp = var_in + mean_in ** 2
var_ln = temp.mean(1, keepdim=True) - mean_ln ** 2
if self.using_bn:
if self.training:
mean_bn = mean_in.mean(0, keepdim=True)
var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * mean_bn.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * var_bn.data)
else:
self.running_mean.add_(mean_bn.data)
self.running_var.add_(mean_bn.data ** 2 + var_bn.data)
else:
mean_bn = torch.autograd.Variable(self.running_mean)
var_bn = torch.autograd.Variable(self.running_var)
softmax = nn.Softmax(0)
mean_weight = softmax(self.mean_weight)
var_weight = softmax(self.var_weight)
if self.using_bn:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln + mean_weight[2] * mean_bn
var = var_weight[0] * var_in + var_weight[1] * var_ln + var_weight[2] * var_bn
else:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln
var = var_weight[0] * var_in + var_weight[1] * var_ln
x = (x - mean) / (var + self.eps).sqrt()
x = x.view(N, C, H, W)
return x * self.weight + self.bias
class PartialConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(PartialConv).__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, False)
# self.input_conv.apply(weights_init('kaiming'))
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
# mask is not updated
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, input, mask):
output = self.input_conv(input * mask)
if self.input_conv.bias is not None:
output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as(
output)
else:
output_bias = torch.zeros_like(output)
with torch.no_grad():
output_mask = self.mask_conv(mask)
no_update_holes = output_mask == 0
mask_sum = output_mask.masked_fill_(no_update_holes, 1.0)
output_pre = (output - output_bias) / mask_sum + output_bias
output = output_pre.masked_fill_(no_update_holes, 0.0)
new_mask = torch.ones_like(output)
new_mask = new_mask.masked_fill_(no_update_holes, 0.0)
return output, new_mask
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
| {"hexsha": "809354fc2954f9dd4c49ce41ab1dbfda91208a64", "size": 8249, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/modules/modules.py", "max_stars_repo_name": "7568/Shift-Net_pytorch", "max_stars_repo_head_hexsha": "4863127301862457030cb8027cbe567c33aa90b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/modules/modules.py", "max_issues_repo_name": "7568/Shift-Net_pytorch", "max_issues_repo_head_hexsha": "4863127301862457030cb8027cbe567c33aa90b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/modules/modules.py", "max_forks_repo_name": "7568/Shift-Net_pytorch", "max_forks_repo_head_hexsha": "4863127301862457030cb8027cbe567c33aa90b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 8249.0, "max_line_length": 8249, "alphanum_fraction": 0.5907382713, "include": true, "reason": "import numpy", "num_tokens": 2215} |
# Originally by adamb70 from https://github.com/adamb70/Python-Spherical-Projection
# Modified to be used with Source Engine cubemaps.
# Converted to numpy to achieve reasonable performance.
import numpy
from numpy import ndarray
from enum import IntEnum
from typing import Tuple
def spherical_coordinates(i: ndarray, j: ndarray, w: float, h: float) -> Tuple[ndarray, ndarray]:
""" Returns spherical coordinates of the pixel from the output image. """
theta = 2*i/w-1
phi = 2*j/h-1
# phi = lat, theta = long
return phi*(numpy.pi/2), theta*numpy.pi
def vector_coordinates(phi: ndarray, theta: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
""" Returns 3D vector which points to the pixel location inside a sphere. """
phi_cos = numpy.cos(phi)
return (phi_cos * numpy.cos(theta), # X
numpy.sin(phi), # Y
phi_cos * numpy.sin(theta)) # Z
class CubemapFace(IntEnum):
LEFT = 0
RIGHT = 1
TOP = 2
BOTTOM = 3
FRONT = 4
BACK = 5
def get_face(x: ndarray, y: ndarray, z: ndarray) -> ndarray:
""" Uses 3D vector to find which cube face the pixel lies on. """
abs_x = numpy.abs(x)
abs_y = numpy.abs(y)
abs_z = numpy.abs(z)
largest_magnitude = numpy.maximum.reduce((abs_x, abs_y, abs_z))
x_selector: ndarray = largest_magnitude - abs_x < 1e-9
x_specifier: ndarray = x < 0
y_selector: ndarray = largest_magnitude - abs_y < 1e-9
y_specifier: ndarray = y < 0
z_selector: ndarray = largest_magnitude - abs_z < 1e-9
z_specifier: ndarray = z < 0
return numpy.select(
(
x_selector & x_specifier, x_selector & ~x_specifier,
y_selector & y_specifier, y_selector & ~y_specifier,
z_selector & z_specifier, z_selector & ~z_specifier,
),
(
CubemapFace.LEFT, CubemapFace.RIGHT,
CubemapFace.TOP, CubemapFace.BOTTOM,
CubemapFace.BACK, CubemapFace.FRONT,
),
)
def raw_face_coordinates(face: ndarray, x: ndarray, y: ndarray, z: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""
Return coordinates with necessary sign (- or +) depending on which face they lie on.
From Open-GL specification (chapter 3.8.10) https://www.opengl.org/registry/doc/glspec41.core.20100725.pdf
"""
front = face == CubemapFace.FRONT
back = face == CubemapFace.BACK
bottom = face == CubemapFace.BOTTOM
top = face == CubemapFace.TOP
left = face == CubemapFace.LEFT
right = face == CubemapFace.RIGHT
x_neg = -x
xc = numpy.select(
(
front, back, bottom, top, left, right,
),
(
x_neg, x, z, z, -z, z,
)
)
yc = numpy.select(
(
front, back, bottom, top, left, right,
),
(
y, y, x_neg, x, y, y,
)
)
ma = numpy.select(
(
front, back, bottom, top, left, right,
),
(
z, z, y, y, x, x,
)
)
return xc, yc, ma
def raw_coordinates(xc: ndarray, yc: ndarray, ma: ndarray) -> Tuple[ndarray, ndarray]:
"""
Return 2D coordinates on the specified face relative to the bottom-left corner of the face.
Also from Open-GL spec.
"""
return (xc/numpy.abs(ma) + 1) / 2, (yc/numpy.abs(ma) + 1) / 2
def normalized_coordinates(face: ndarray, x: ndarray, y: ndarray, n: int) -> Tuple[ndarray, ndarray]:
""" Return pixel coordinates in the input image where the specified pixel lies. """
return (x*n).clip(0, n-1), (y*n).clip(0, n-1)
def find_corresponding_pixels(width: int, height: int, out_dim: int) -> Tuple[ndarray, Tuple[ndarray, ndarray]]:
""" Returns face index, pixel coordinates for the input image that a specified pixel in the output image maps to."""
y, x = numpy.mgrid[0:height, 0:width]
y = y[::-1]
spherical = spherical_coordinates(x, y, width, height)
vector_coords = vector_coordinates(spherical[0], spherical[1])
face = get_face(vector_coords[0], vector_coords[1], vector_coords[2])
raw_face_coords = raw_face_coordinates(face, vector_coords[0], vector_coords[1], vector_coords[2])
cube_coords = raw_coordinates(raw_face_coords[0], raw_face_coords[1], raw_face_coords[2])
return face, normalized_coordinates(face, cube_coords[0], cube_coords[1], out_dim)
| {"hexsha": "407b6e4b8455e58a330ae6b3135d49e3be5ec388", "size": 4365, "ext": "py", "lang": "Python", "max_stars_repo_path": "io_import_vmf/cube2equi.py", "max_stars_repo_name": "lasa01/io_import_vmf", "max_stars_repo_head_hexsha": "3341b8e2d0be77cba8f3ec30812f6c859f9b9a83", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 153, "max_stars_repo_stars_event_min_datetime": "2020-04-19T20:57:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:36:27.000Z", "max_issues_repo_path": "io_import_vmf/cube2equi.py", "max_issues_repo_name": "lasa01/io_import_vmf", "max_issues_repo_head_hexsha": "3341b8e2d0be77cba8f3ec30812f6c859f9b9a83", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 112, "max_issues_repo_issues_event_min_datetime": "2020-04-20T08:20:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T00:59:33.000Z", "max_forks_repo_path": "io_import_vmf/cube2equi.py", "max_forks_repo_name": "lasa01/io_import_vmf", "max_forks_repo_head_hexsha": "3341b8e2d0be77cba8f3ec30812f6c859f9b9a83", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2020-04-28T18:41:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T12:58:09.000Z", "avg_line_length": 32.3333333333, "max_line_length": 120, "alphanum_fraction": 0.6288659794, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1187} |
#ifndef JHMI_UTILITY_LOAD_PROTOBUF_HPP_NRC_20160520
#define JHMI_UTILITY_LOAD_PROTOBUF_HPP_NRC_20160520
#include "utility/scope_exit.hpp"
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/io/gzip_stream.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <boost/filesystem.hpp>
#ifndef WIN32
#include <sys/stat.h>
#include <fcntl.h>
#endif
namespace jhmi {
template <typename T>
T load_protobuf(boost::filesystem::path const& filename) {
namespace io = google::protobuf::io;
#ifndef WIN32
auto fd = open(filename.string().c_str(), O_RDONLY, S_IREAD);
if (!fd)
throw std::runtime_error("open failed on output file");
auto ex = scope_exit([&] { close(fd); });
auto file_stream = std::make_unique<io::FileInputStream>(fd);
#else
auto f = std::ifstream{filename.string(), std::ios::binary};
auto file_stream = std::make_unique<io::IstreamInputStream>(&f);
#endif
io::GzipInputStream gzip_stream{file_stream.get()};
auto cs = std::make_unique<io::CodedInputStream>(&gzip_stream);
cs->SetTotalBytesLimit(1024*1024*1024, 1024*1024*1024);
T vt;
if (!vt.ParseFromCodedStream(cs.get()))
throw std::runtime_error("Invalid pb file");
return vt;
}
}
#endif
| {"hexsha": "fb516aa9975e27d293503aed46262e522b48afe2", "size": 1253, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "utility/load_protobuf.hpp", "max_stars_repo_name": "ncrookston/liver_source", "max_stars_repo_head_hexsha": "9876ac4e9ea57d8e23767af9be061a9b10c6f1e5", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utility/load_protobuf.hpp", "max_issues_repo_name": "ncrookston/liver_source", "max_issues_repo_head_hexsha": "9876ac4e9ea57d8e23767af9be061a9b10c6f1e5", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utility/load_protobuf.hpp", "max_forks_repo_name": "ncrookston/liver_source", "max_forks_repo_head_hexsha": "9876ac4e9ea57d8e23767af9be061a9b10c6f1e5", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.325, "max_line_length": 68, "alphanum_fraction": 0.7190742219, "num_tokens": 341} |
import numpy as np
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
import warnings
warnings.filterwarnings('ignore')
from src.data_handler.sharable_dataset import SharableListDataset, SharableMultiVariateDataset
def evaluation(train, test, predictor=None, estimator=None,
verbose=False, metric='MSE', target_dim=1):
"""
Calculate the performance metrics of a predictor
on the testing data.
Args:
- predictor: Gluonts Predictor - e.g., prophet.ProphetPredictor
- estimator: Gluonts Estimator - e.g., DeepAREstimator
- train: the training ListDataset aka. X
- test: the testing ListDataset aka. Y
Returns:
- rms: Root Mean Squared Error between prediction and ground truth.
"""
assert isinstance(
train, SharableListDataset) or isinstance(
train, SharableMultiVariateDataset)
assert isinstance(
test, SharableListDataset) or isinstance(
test, SharableMultiVariateDataset)
train = train.to_local()
test = test.to_local()
if predictor is not None:
assert estimator is None
predictor = predictor(
freq="D",
prediction_length=list(test)[0]['target'].shape[0]
)
else:
assert (predictor is None) and (estimator is not None)
predictor = estimator.train(training_data=train, num_workers=0)
if verbose:
print('[evaluation] predictor created!')
forecast_it, ts_it = make_evaluation_predictions(
dataset=test, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
if verbose:
print('[evaluation] make evaluation!')
tss = list(ts_it)
forecasts = list(forecast_it)
if target_dim > 1:
evaluator = MultivariateEvaluator(
target_agg_funcs={'sum': np.mean},
num_workers=0
)
elif target_dim == 1:
evaluator = Evaluator(num_workers=0)
else:
raise ValueError('target_dim should not be less than 1')
agg_metrics, _ = evaluator(
iter(tss),
iter(forecasts),
num_series=len(test)
)
if verbose:
print(f'Metrics calculated in Agg_Metrics: {agg_metrics.keys()}')
return agg_metrics[metric]
| {"hexsha": "34814a3abe839235174629b7a67a78885ac14121", "size": 2391, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/evaluator.py", "max_stars_repo_name": "jeffrey82221/gluonts_fund_price_forecast", "max_stars_repo_head_hexsha": "fed7c484c4dba663201f9cf96aa86ca98119b54c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-21T13:27:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T13:27:32.000Z", "max_issues_repo_path": "src/evaluator.py", "max_issues_repo_name": "jeffrey82221/gluonts_fund_price_forecast", "max_issues_repo_head_hexsha": "fed7c484c4dba663201f9cf96aa86ca98119b54c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-02-20T10:36:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-22T03:47:13.000Z", "max_forks_repo_path": "src/evaluator.py", "max_forks_repo_name": "jeffrey82221/gluonts_fund_price_forecast", "max_forks_repo_head_hexsha": "fed7c484c4dba663201f9cf96aa86ca98119b54c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6865671642, "max_line_length": 94, "alphanum_fraction": 0.6658301966, "include": true, "reason": "import numpy", "num_tokens": 541} |
% main.tex
\documentclass{report}
\setcounter{secnumdepth}{5}
\begin{document}
\chapter{A}
a
\section{AA}
aa
\subsection{AAA}
\subsubsection*{AAAA}
aaaa
\paragraph{AAAAA}
aaaaa
\subparagraph{AAAAAA}
aaaaaa
\end{document} | {"hexsha": "cdd1f4c548ede2e0bbaf0564d90e51095f06e500", "size": 220, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "tex/test_misc/main copy 5.tex", "max_stars_repo_name": "imagingbook/latextree", "max_stars_repo_head_hexsha": "272ee1594b3bdea39a043fb2ac2b86ac9a1728e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-16T22:41:21.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-16T22:41:21.000Z", "max_issues_repo_path": "tex/test_misc/main copy 5.tex", "max_issues_repo_name": "imagingbook/latextree", "max_issues_repo_head_hexsha": "272ee1594b3bdea39a043fb2ac2b86ac9a1728e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tex/test_misc/main copy 5.tex", "max_forks_repo_name": "imagingbook/latextree", "max_forks_repo_head_hexsha": "272ee1594b3bdea39a043fb2ac2b86ac9a1728e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-09-11T09:38:25.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-11T15:30:21.000Z", "avg_line_length": 13.75, "max_line_length": 27, "alphanum_fraction": 0.7681818182, "num_tokens": 77} |
"""Automatic verification of ANUGA flows.
See functions exercised by this wrapper for more details
"""
import unittest
import os
import numpy
import anuga
indent = anuga.indent
args = anuga.get_args()
verbose = args.verbose
class Test_results(unittest.TestCase):
def setUp(self):
for file in os.listdir('.'):
if file.endswith('.stdout') or\
file.endswith('.sww') or\
file.endswith('.msh') or\
file.endswith('.png'):
os.remove(file)
def tearDown(self):
pass
def test_avalanche_dry(self):
if verbose:
print()
print(indent+'Running simulation script')
# Run basic script (can be parallel if -np used in call
# to this script
s = 'numerical_avalanche_dry.py'
res = anuga.run_anuga_script(s,args=args)
# Test that script runs ok
assert res == 0
if verbose:
print(indent+'Testing accuracy')
import anuga.utilities.plot_utils as util
from analytical_avalanche_dry import analytical_sol
p_st = util.get_output('avalanche.sww')
p2_st=util.get_centroids(p_st)
v = p2_st.y[10]
v2=(p2_st.y==v)
x_n = p2_st.x[v2]
u0,h0,w0,z0,p0 = analytical_sol(x_n, p2_st.time[0])
u10,h10,w10,z10,p10 = analytical_sol(x_n, p2_st.time[10])
u30,h30,w30,z30,p30 = analytical_sol(x_n, p2_st.time[30])
w0_n = p2_st.stage[0,v2]
w10_n = p2_st.stage[10,v2]
w30_n = p2_st.stage[30,v2]
z_n = p2_st.elev[v2]
uh0_n = p2_st.xmom[0,v2]
uh10_n = p2_st.xmom[10,v2]
uh30_n = p2_st.xmom[30,v2]
u0_n = p2_st.xvel[0,v2]
u10_n = p2_st.xvel[10,v2]
u30_n = p2_st.xvel[30,v2]
#Test stages
# Calculate L^1 error at times corrsponding to slices 10, 50 and 100
eh10 = numpy.sum(numpy.abs(w10_n-w10))/numpy.sum(numpy.abs(w10))
eh30 = numpy.sum(numpy.abs(w30_n-w30))/numpy.sum(numpy.abs(w30))
print()
print(indent+'Errors in stage: ', eh10, eh30)
# Test xmomenta
# Calculate L^1 error at times corrsponding to slices 10, 50 and 100
euh10 = numpy.sum(numpy.abs(uh10_n-u10*h10))/numpy.sum(numpy.abs(u10*h10))
euh30 = numpy.sum(numpy.abs(uh30_n-u30*h30))/numpy.sum(numpy.abs(u30*h30))
print(indent+'Errors in xmomentum: ', euh10, euh30)
#Test xvelocity
# Calculate L^1 error at times corrsponding to slices 10, 50 and 100
eu10 = numpy.sum(numpy.abs(u10_n-u10))/numpy.sum(numpy.abs(u10))
eu30 = numpy.sum(numpy.abs(u30_n-u30))/numpy.sum(numpy.abs(u30))
print(indent+'Errors in xvelocity: ', eu10, eu30)
assert eh10 < 0.01, 'L^1 error %g greater than 1 percent'% eh10
assert eh30 < 0.01, 'L^1 error %g greater than 1 percent'% eh30
assert euh10 < 0.025, 'L^1 error %g greater than 2.5 percent'% euh10
assert euh30 < 0.025, 'L^1 error %g greater than 2.5 percent'% euh30
assert eu10 < 0.2, 'L^1 error %g greater than 20 percent'% eu10
assert eu30 < 0.2, 'L^1 error %g greater than 20 percent'% eu30
#-------------------------------------------------------------
if __name__ == '__main__':
suite = unittest.makeSuite(Test_results, 'test')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| {"hexsha": "6dd50f3cf112e72664e53084bc911f4d072e1b10", "size": 3499, "ext": "py", "lang": "Python", "max_stars_repo_path": "validation_tests/analytical_exact/avalanche_dry/validate_avalanche_dry.py", "max_stars_repo_name": "samcom12/anuga_core", "max_stars_repo_head_hexsha": "f4378114dbf02d666fe6423de45798add5c42806", "max_stars_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_stars_count": 136, "max_stars_repo_stars_event_min_datetime": "2015-05-07T05:47:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T03:07:40.000Z", "max_issues_repo_path": "validation_tests/analytical_exact/avalanche_dry/validate_avalanche_dry.py", "max_issues_repo_name": "samcom12/anuga_core", "max_issues_repo_head_hexsha": "f4378114dbf02d666fe6423de45798add5c42806", "max_issues_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_issues_count": 184, "max_issues_repo_issues_event_min_datetime": "2015-05-03T09:27:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-20T04:22:48.000Z", "max_forks_repo_path": "validation_tests/analytical_exact/avalanche_dry/validate_avalanche_dry.py", "max_forks_repo_name": "samcom12/anuga_core", "max_forks_repo_head_hexsha": "f4378114dbf02d666fe6423de45798add5c42806", "max_forks_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_forks_count": 70, "max_forks_repo_forks_event_min_datetime": "2015-03-18T07:35:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-01T07:07:29.000Z", "avg_line_length": 26.9153846154, "max_line_length": 82, "alphanum_fraction": 0.5838811089, "include": true, "reason": "import numpy", "num_tokens": 1011} |
#!/usr/bin/env python3
import struct
import time
import numpy as np
import pandas as pd
from getpass import getpass
from bluepy.btle import Peripheral, DefaultDelegate
addr = 'C0:98:E5:51:EE:C5'
if len(addr) != 17:
raise ValueError("Invalid address supplied")
# Define UUIDs for BLE connection
SERVICE_UUID = "4607eda0-f65e-4d59-a9ff-84420d87a4ca" # Romi BLE service
DRIVE_COMMAND_UUID = "4607eda1-f65e-4d59-a9ff-84420d87a4ca"# Romi drive command
DATA_UUID = "4607eda2-f65e-4d59-a9ff-84420d87a4ca"# Data recieved from Romi
ACK_UUID = "4607eda3-f65e-4d59-a9ff-84420d87a4ca"# Acknowledgement from Romi
DATA_READY_UUID = "4607eda4-f65e-4d59-a9ff-84420d87a4ca"# Data ready to be read from Romi
class RobotDelegate(DefaultDelegate):
def __init__(self, controller):
self.robot_controller = controller
DefaultDelegate.__init__(self)
def handleNotification(self, cHandle, data):
ack = struct.unpack("B", self.ack.read())[0]
print(cHandle);
print(data)
class RobotController():
def __init__(self, address):
self.robot = Peripheral(addr)
self.robot.setDelegate(RobotDelegate(self))
print("connected")
# get service from robot
# get characteristic handles from service/robot
self.sv = self.robot.getServiceByUUID(SERVICE_UUID)
self.drive_command = self.sv.getCharacteristics(CHAR_UUID)[0]
self.data = self.sv.getCharacteristics(DATA_UUID)[0]
self.ack = self.sv.getCharacteristics(ACK_UUID)[0]
self.data_ready = self.sv.getCharacteristics(DATA_READY_UUID)[0]
self.data_list = []
# Set trial parameters
ls = float(input("Left Speed: "))
rs = float(input("Right Speed: "))
t = float(input("Time (float seconds): "))
name = "l_{}_r_{}".format(int(ls), int(rs))
n = int(input("Num trials: "))
for i in range(n):
while True:
ack = struct.unpack("B", self.ack.read())[0]
# Send command when the robot is ready
if ack==0:
if self.data_list:
self.write_data(ls, rs, "{}_{}".format(name, i))
self.data_list = []
break
input("Ready?")
self.send_command(ls, rs, t);
ack = struct.unpack("B", self.ack.read())[0]
# Wait until robot acknowledges before proceding
while ack != 1:
continue
else:
data_ready = struct.unpack("B", self.data_ready.read())[0]
# Read data when the robot is ready
if data_ready:
data = struct.unpack("f" * 30, self.data.read())
self.data_list.extend(data)
self.data_ready.write(struct.pack('B', *[False]))
else:
print("Waiting...")
def send_command(self, ls, rs, t):
# Tell Romi to drive specified left and right speeds for set amount of time
self.ch.write(struct.pack('fff', *[ls, rs, t]))
def write_data(self, ls, rs, name):
# Write data to CSV file
left_input = [ls] * (len(self.data_list)//3)
right_input = [rs] * (len(self.data_list)//3)
left_dists = self.data_list[::3]
right_dists = self.data_list[1::3]
times = self.data_list[2::3]
header = ["left_input", "right_input", "left_distance", "right_distance", "time"]
all_data = [left_input, right_input, left_dists, right_dists, times]
df = pd.DataFrame(all_data).transpose()
df.columns = header
print(df)
df.to_csv("data/{}.csv".format(name), index=False)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.robot.disconnect()
with RobotController(addr) as robot:
getpass('Input waypoints for car')
| {"hexsha": "bb712194c0716a240b35cfed02e4de622db63532", "size": 4059, "ext": "py", "lang": "Python", "max_stars_repo_path": "software/apps/romi_sysid/robot_control.py", "max_stars_repo_name": "aparande/Robo-AR", "max_stars_repo_head_hexsha": "797b7375e59e049b9b2c21ff926cc42286d39b05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-17T06:07:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-17T06:07:44.000Z", "max_issues_repo_path": "software/apps/romi_sysid/robot_control.py", "max_issues_repo_name": "aparande/Robo-AR", "max_issues_repo_head_hexsha": "797b7375e59e049b9b2c21ff926cc42286d39b05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-12T14:07:57.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-12T14:07:57.000Z", "max_forks_repo_path": "software/apps/romi_sysid/robot_control.py", "max_forks_repo_name": "aparande/Robo-AR", "max_forks_repo_head_hexsha": "797b7375e59e049b9b2c21ff926cc42286d39b05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2924528302, "max_line_length": 89, "alphanum_fraction": 0.5865976842, "include": true, "reason": "import numpy", "num_tokens": 989} |
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy.linalg as linalg
import numpy as np
import pathlib
path = pathlib.Path().absolute()
from E import *
def plot_matrices(N,J,h,PBC = True):
H = HeisenbergHamiltonian(N,J,h,PBC = PBC)
T = getT(N)
# For testing the simultanious diagonalization may not work -> Plot the matrices in 2d
HT = H+0.001*T
U1, U2 = linalg.eig(HT)
idx = U1.real.argsort()[::+1]
U1 = U1[idx]
U2 = U2[:,idx]
energies, en_vec = linalg.eig(H)
momenta, mom_vec = linalg.eig(T)
momenta = np.angle(momenta)
ind = energies.real.argsort()[::+1]
energies = energies[ind]
momenta = momenta[ind]
# Identify -pi with pi
ind2 = np.where(np.abs(momenta+np.pi) <1E-6)
momenta[ind2] = -1.* momenta[ind2]
Hd = np.dot(np.dot(U2.conj().T,H),U2).real
Td = np.dot(np.dot(U2.conj().T,T),U2).real
Matrices1 = [Hd,Td]
Matrices = []
for M1 in Matrices1:
Matrices.append(M1)
fig, ax = plt.subplots(nrows=1,ncols=2, figsize=(22, 10))
text = ['Hamiltonian','Translation Operator']
for ax,txt,M in zip(ax.flat,text,Matrices):
ax.set(xlabel='state')
ax.set(ylabel='state')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.label.set_size(24)
ax.xaxis.label.set_size(24)
im = ax.imshow(M.real)
ax.set_title("{}".format(txt),fontdict={'fontsize': 24 })#rcParams['axes.titlesize']
fig.colorbar(im,ax=ax)
for i in range(len(M)):
for j in range(len(M)):
if np.abs(M[i][j])>1E-10:
c = np.round(M[i][j].real,4)
ax.text(i, j, c, va='center', ha='center',color ='#71ACB3',fontsize=14)
plt.savefig(str(path)+'/trials/matrices.png')
plt.show()
def plot_scatter(N,J,h,PBC):
H = HeisenbergHamiltonian(N,J,h,PBC = PBC)
T = getT(N)
HT = H+0.001*T
U1, U2 = linalg.eig(HT)
idx = U1.real.argsort()[::+1]
U1 = U1[idx]
U2 = U2[:,idx]
# energies, en_vec = linalg.eig(H)
# momenta, mom_vec = linalg.eig(T)
energies = np.diag(np.dot(np.dot(U2.conj().T,H),U2)).real
momenta = np.angle(np.diag(np.dot(np.dot(U2.conj().T,T),U2)))
ind = energies.real.argsort()[::+1]
energies = energies[ind]
momenta = momenta[ind]
# Identify -pi with pi
ind2 = np.where(np.abs(momenta+np.pi) <1E-6)
momenta[ind2] = -1.* momenta[ind2]
# return(momenta)
# Plot the dispersion
fig, ax = plt.subplots(figsize=(7, 5), tight_layout=True)
ax.title.set_text("N={},$J_x$={},$J_y$={},$J_z$={},$h_x$={},$h_y$={},$h_z$={}".format(N,J[0],J[1],J[2],h[0],h[1],h[2]))
ax.set(xlabel='p/$\pi$', ylabel='E(p)')
ax.scatter(momenta / np.pi,energies.real)
_, ind = (energies.real.min(),np.where(energies == energies.min())[0][0])
ax.scatter(momenta[ind] / np.pi,energies[ind].real,marker = "o",s = 100)
plt.savefig(str(path)+'/trials/scatter.png')
plt.show()
| {"hexsha": "d95e30c2dff576a13bd5d0962eeac8bc0be6ac6e", "size": 3109, "ext": "py", "lang": "Python", "max_stars_repo_path": "matrix_plot.py", "max_stars_repo_name": "giopolykra/Transverse_Ising", "max_stars_repo_head_hexsha": "1d8bf96112ae6e89fecc35d0f9c4bd820804da07", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "matrix_plot.py", "max_issues_repo_name": "giopolykra/Transverse_Ising", "max_issues_repo_head_hexsha": "1d8bf96112ae6e89fecc35d0f9c4bd820804da07", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matrix_plot.py", "max_forks_repo_name": "giopolykra/Transverse_Ising", "max_forks_repo_head_hexsha": "1d8bf96112ae6e89fecc35d0f9c4bd820804da07", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7263157895, "max_line_length": 123, "alphanum_fraction": 0.5982631071, "include": true, "reason": "import numpy", "num_tokens": 994} |
#include "VariableSpace.h"
#include "ContainerImpl.h"
#include "../API/Yap/VdfParser.h"
#include <vector>
#include <sstream>
#include <boost/lexical_cast.hpp>
using namespace Yap;
using namespace std;
#define IMPLEMENT_VARIABLE_NO_ENABLE public: \
virtual int GetType() const override { return _type;} \
virtual const wchar_t * GetId() const override { return _id.c_str(); }\
virtual void SetId(const wchar_t * id) override { _id = id; } \
virtual const wchar_t * GetTitle() const override { return _title.c_str();}\
virtual void SetTitle(const wchar_t * title) override { _title = title;}\
virtual const wchar_t * GetDescription() const override { return _description.c_str(); } \
virtual void SetDescription(const wchar_t * description) override { _description = description; }\
protected: \
std::wstring _id; \
std::wstring _description; \
std::wstring _title;\
int _type;
#define IMPLEMENT_VARIABLE IMPLEMENT_VARIABLE_NO_ENABLE \
public: \
virtual void Enable(bool enable) { _enabled = enable;} \
virtual bool IsEnabled() const override { return _enabled;} \
protected: \
bool _enabled;
namespace Yap
{
namespace _details
{
template<typename T> struct variable_store_type
{
typedef T type;
typedef std::vector<T> vector_type;
};
template <> struct variable_store_type<bool>
{
typedef bool type;
typedef std::vector<bool> vector_type;
};
template <> struct variable_store_type<const wchar_t * const>
{
typedef std::wstring type;
typedef std::vector<std::wstring> vector_type;
};
template <> struct variable_store_type<IVariable*>
{
typedef SmartPtr<IVariable> type;
typedef std::vector<type> vector_type;
};
template <typename TYPE>
class SimpleVariable : public ISimpleVariable<TYPE>
{
static_assert(std::is_same<TYPE, int>::value ||
std::is_same<TYPE, double>::value ||
std::is_same<TYPE, bool>::value ||
std::is_same<TYPE, const wchar_t * const>::value,
"You can only use one of the following types: int, double, bool, const wchar_t * const");
typedef typename variable_store_type<TYPE>::type type;
IMPLEMENT_SHARED(SimpleVariable<TYPE>)
IMPLEMENT_VARIABLE
public:
SimpleVariable(
const wchar_t * id,
const wchar_t * description) :
_id{ id },
_description{ description != nullptr ? description : L"" },
_type{ variable_type_id<TYPE>::type_id },
_value{TYPE(0)}
{
}
SimpleVariable(const SimpleVariable<TYPE>& rhs) :
_id{ rhs._id },
_description{ rhs._description },
_type{ rhs._type },
_value{ rhs._value }
{
}
SimpleVariable(ISimpleVariable<TYPE> & rhs) :
_id{ rhs.GetId() },
_description{ rhs.GetDescription() },
_type{ rhs.GetType() },
_value{ rhs.Get() }
{
}
virtual TYPE Get() const override
{
return _value;
}
virtual void Set(TYPE value) override
{
_value = value;
}
virtual size_t FromString(const wchar_t * value_string) override
{
std::wistringstream input(value_string);
auto begin = input.tellg();
input >> _value;
return static_cast<size_t>(input.tellg() - begin);
}
virtual const wchar_t * const ToString() override
{
_value_string = boost::lexical_cast<wstring>(_value);
return _value_string.c_str();
}
private:
type _value;
std::wstring _value_string;
};
// Begin specialization for string type.
template <>
class SimpleVariable<const wchar_t* const> : public ISimpleVariable<const wchar_t* const>
{
IMPLEMENT_SHARED(SimpleVariable<const wchar_t* const>)
IMPLEMENT_VARIABLE
public:
SimpleVariable(
const wchar_t * id,
const wchar_t * description) :
_id{ id },
_description{ description != nullptr ? description : L"" },
_type{ VariableString },
_value{ L"" }
{
}
SimpleVariable(const SimpleVariable<const wchar_t* const>& rhs) :
_id{ rhs._id },
_description{ rhs._description },
_type{ rhs._type },
_value{ rhs._value }
{
}
SimpleVariable(ISimpleVariable<const wchar_t* const> & rhs) :
_id{ rhs.GetId() },
_description{ rhs.GetDescription() },
_type{ rhs.GetType() },
_value{ rhs.Get() }
{
}
virtual const wchar_t * const Get() const override
{
return _value.c_str();
}
void Set(const wchar_t * const value)
{
_value = value;
}
const wchar_t * const ToString()
{
_value_string = L'\"';
_value_string += _value + L'\"';
return _value_string.c_str();
}
size_t FromString(const wchar_t * value_string)
{
assert(value_string != nullptr);
_value_string = value_string;
assert(!_value_string.empty());
auto first_quote_pos = _value_string.find_first_not_of(L" \t\n\r");
if (first_quote_pos == wstring::npos)
return 0;
if (_value_string[first_quote_pos] != L'\"')
return 0;
auto second_quote_pos = _value_string.find(L'\"', first_quote_pos + 1);
if (second_quote_pos == wstring::npos)
return 0;
_value = _value_string.substr(first_quote_pos + 1, second_quote_pos - first_quote_pos - 1);
return second_quote_pos + 1;
}
private:
std::wstring _value;
std::wstring _value_string;
};
// template <>
// const wchar_t * const SimpleVariable<const wchar_t * const>::Get() const
// {
// return _value.c_str();
// }
//
// template <>
// void SimpleVariable<const wchar_t * const>::Set(const wchar_t * const value)
// {
// _value = value;
// }
//
// template <>
// const wchar_t * const SimpleVariable<const wchar_t * const>::ToString()
// {
// _value_string = L'\"';
// _value_string += _value + L'\"';
// return _value_string.c_str();
// }
//
// template <>
// size_t SimpleVariable<const wchar_t * const>::FromString(const wchar_t * value_string)
// {
// assert(value_string != nullptr);
// _value_string = value_string;
// assert(!_value_string.empty());
// _value = _value_string;
// return _value_string.size();
//
// // auto first_quote_pos = _value_string.find_first_not_of(L" \t\n\r");
// // if (first_quote_pos == wstring::npos)
// // return 0;
// //
// // if (_value_string[first_quote_pos] != L'\"')
// // return 0;
// //
// // auto second_quote_pos = _value_string.find(L'\"', first_quote_pos + 1);
// // if (second_quote_pos == wstring::npos)
// // return 0;
// //
// // _value = _value_string.substr(first_quote_pos + 1, second_quote_pos - first_quote_pos - 1);
// // return second_quote_pos + 1;
// }
// End specialization for SimpleVariable<wchar_t *>.
template <typename T>
class ValueArrayVariable : public virtual IValueArrayVariable<T>
{
static_assert(std::is_same<T, int>::value ||
std::is_same<T, double>::value ||
std::is_same<T, bool>::value ||
std::is_same<T, const wchar_t * const>::value ||
std::is_same<T, IVariable*>::value,
"You can only use one of the following types: int, double, bool, const wchar_t * const");
typedef typename variable_store_type<T>::vector_type vector_type;
typedef typename variable_store_type<T>::type type;
IMPLEMENT_SHARED(ValueArrayVariable<T>)
IMPLEMENT_VARIABLE
public:
ValueArrayVariable(size_t size, T value, const wchar_t * id, const wchar_t * title = nullptr, const wchar_t * description = nullptr) :
_id{ id },
_title{ title != nullptr ? description : L"" },
_description{ description != nullptr ? description : L"" },
_type{ variable_type_id<T>::array_type_id }
{
_elements.resize(size, value);
}
ValueArrayVariable(const ValueArrayVariable<T>& rhs) :
_elements{ rhs._elements },
_id{ rhs._id },
_title{ rhs._title },
_description{ rhs._description },
_type{ rhs._type }
{
}
ValueArrayVariable(IValueArrayVariable<T>& rhs) :
_id{ rhs.GetId() },
_title{ rhs.GetTitle() },
_type{ variable_type_id<VALUE_TYPE>::array_type_id },
_description{ rhs.GetDescription() }
{
auto source_elements = rhs.Data();
_elements.resize(rhs.GetSize());
for (size_t i = 0; i < rhs.GetSize(); ++i) {
_elements[i] = source_elements[i];
}
}
virtual size_t GetSize() const override
{
return _elements.size();
}
virtual void SetSize(size_t size) override
{
_elements.resize(size);
}
virtual T Get(size_t index) const override
{
assert(index < _elements.size());
return _elements[index];
}
virtual void Set(size_t index, T value) override
{
assert(index < _elements.size());
_elements[index] = value;
}
virtual const wchar_t * const ToString() override
{
_value_string = L'[';
bool first = true;
for (auto element : _elements)
{
if (!first)
{
_value_string += L", ";
}
else
{
first = false;
}
_value_string += ElementToString(element);
}
_value_string += L']';
return _value_string.c_str();
}
virtual size_t FromString(const wchar_t * value_string) override
{
_elements.clear();
_value_string = value_string;
auto left_bracket_pos = _value_string.find_first_not_of(L" \t\n\r");
if (left_bracket_pos == wstring::npos)
return 0;
if (_value_string[left_bracket_pos] != L'[' || _value_string.size() < left_bracket_pos + 3)
return 0;
size_t begin = left_bracket_pos + 1;
for (;;)
{
type element;
auto chars_consumed_by_element = ElementFromString(element, _value_string.c_str() + begin);
if (chars_consumed_by_element == 0)
return 0;
_elements.push_back(element);
auto separator_pos = _value_string.find_first_not_of(L" \t\n\r", begin + chars_consumed_by_element);
if (separator_pos == wstring::npos)
return 0;
if (_value_string[separator_pos] == L']')
{
return separator_pos + 1;
}
else if (_value_string[separator_pos] == L',')
{
begin = separator_pos + 1;
if (begin >= _value_string.size())
return 0;
}
else
{
return 0;
}
}
}
protected:
std::wstring ElementToString(type element)
{
std::wostringstream output;
output << element;
return output.str();
}
size_t ElementFromString(type& element, const wchar_t * value_string)
{
wistringstream input(value_string);
input >> element;
return static_cast<size_t>(input.tellg());
}
vector_type _elements;
std::wstring _value_string;
};
template <typename T>
class ReferenceArrayVariable : public ValueArrayVariable<T>, public IElementReference<T>
{
IMPLEMENT_SHARED(ReferenceArrayVariable<T>)
public:
using ValueArrayVariable<T>::ValueArrayVariable;
virtual T& Element(size_t index) override
{
assert(index < _elements.size());
return _elements[index];
}
};
template <typename T>
class RawArrayVariable : public ReferenceArrayVariable<T>, public IRawArray<T>
{
IMPLEMENT_SHARED(RawArrayVariable<T>)
public:
using ReferenceArrayVariable<T>::ReferenceArrayVariable;
virtual T * Data() override
{
return _elements.data();
}
};
// Specialization for ValueArrayVariable<wchar_t *>.
template <>
const wchar_t * const ValueArrayVariable<const wchar_t * const>::Get(size_t index) const
{
assert(index < _elements.size());
return _elements[index].c_str();
}
template <>
void ValueArrayVariable<const wchar_t * const>::Set(size_t index, const wchar_t * const value)
{
assert(index < _elements.size());
_elements[index] = value;
}
template <>
IVariable * ValueArrayVariable<IVariable *>::Get(size_t index) const
{
assert(index < _elements.size());
return (const_cast<ValueArrayVariable<IVariable*>*>(this))->_elements[index].get();
}
template <>
void ValueArrayVariable<IVariable*>::Set(size_t index, IVariable * value)
{
assert(index < _elements.size());
_elements[index] = YapShared(value);
}
// Begin specialization for ValueArrayVariable<bool>.
template <>
std::wstring ValueArrayVariable<bool>::ElementToString(bool element)
{
return element ? L"true" : L"false";
}
template <>
size_t ValueArrayVariable<bool>::ElementFromString(bool& element, const wchar_t * value_string)
{
wstring str;
if (str.substr(0, 4) == L"true")
{
element = true;
return 4;
}
else if (str.substr(0, 5) == L"false")
{
element = false;
return 5;
}
else
{
return 0;
}
}
// Begin specialization for ArrayVariable<SmartPtr<IVariable>>
template <>
ValueArrayVariable<IVariable*>::ValueArrayVariable(size_t size,
IVariable* value, const wchar_t * id, const wchar_t * title, const wchar_t * description)
{
assert(size >= 1 && "There should be at least one element in the array.");
assert(value != nullptr && "The template could not be null.");
_type = variable_type_id<IVariable*>::array_type_id;
_elements.resize(size);
for (auto& element : _elements)
{
element = YapShared(value->Clone());
}
}
template <>
ValueArrayVariable<IVariable*>::ValueArrayVariable(const ValueArrayVariable<IVariable*>& rhs) :
_id{ rhs._id },
_title{rhs._title},
_description{ rhs._description }
{
_type = rhs._type;
_elements.reserve(rhs.GetSize());
for (auto element : rhs._elements)
{
_elements.push_back(YapShared(element->Clone()));
}
}
template <>
IVariable *& ReferenceArrayVariable<IVariable*>::Element(size_t index)
{
assert(index < _elements.size());
return _elements[index].get();
}
template <>
void ReferenceArrayVariable<IVariable*>::SetSize(size_t size)
{
size_t old_size = _elements.size();
_elements.resize(size);
for (size_t i = old_size; i < size; ++i)
{
_elements[i] = YapShared(_elements[0]->Clone());
}
}
template <>
std::wstring ReferenceArrayVariable<IVariable*>::ElementToString(type element)
{
return element->ToString();
}
template <>
size_t ReferenceArrayVariable<IVariable*>::ElementFromString(type& element, const wchar_t * value_string)
{
return element->FromString(value_string);
}
struct StructVariable : public IStructVariable
{
IMPLEMENT_SHARED(StructVariable)
IMPLEMENT_VARIABLE_NO_ENABLE
public:
StructVariable(const wchar_t * id, const wchar_t * title = nullptr, const wchar_t * description = nullptr) :
_id{ id },
_title{title != nullptr ? title : L""},
_description{ description != nullptr ? description : L"" },
_type{ VariableStruct },
_enabled{false},
_members{ YapShared(new PtrContainerImpl<IVariable>) }
{
}
StructVariable(const StructVariable& rhs) :
_id{ rhs._id },
_title{rhs._title},
_description{ rhs._description },
_type{ rhs._type },
_enabled{rhs._enabled},
_members{ YapShared( rhs._members->Clone()) }
{
}
StructVariable(IStructVariable& rhs) :
_id{ rhs.GetId() },
_title {rhs.GetTitle()},
_description{ rhs.GetDescription() },
_type{ VariableStruct },
_enabled{ rhs.IsEnabled() },
_members{ YapShared(rhs.Members()->Clone()) }
{
}
StructVariable(IPtrContainer<IVariable> * variables, const wchar_t * id, const wchar_t * title = nullptr, const wchar_t * description = nullptr) :
_members{ YapShared(variables) },
_id{ id },
_title{ title != nullptr ? title : L"" },
_description{ description != nullptr ? description : L"" },
_enabled{false},
_type{ VariableStruct }
{
}
virtual IPtrContainer<IVariable> * Members() override
{
return _members.get();
}
virtual void Enable(bool enable)
{
_enabled = enable;
assert(_members);
auto iter = _members->GetIterator();
for (auto member = iter->GetFirst(); member != nullptr; member = iter->GetNext())
{
member->Enable(enable);
}
}
virtual bool IsEnabled() const override
{
return _enabled;
}
virtual const wchar_t * const ToString()
{
assert(_members);
_value_string = L'{';
auto iter = _members->GetIterator();
bool first = true;
for (auto member = iter->GetFirst(); member != nullptr; member = iter->GetNext())
{
if (!first)
{
_value_string += L", ";
}
else
{
first = false;
}
_value_string += L'\"';
_value_string += member->GetId();
_value_string += L"\"=";
_value_string += member->ToString();
}
_value_string += L'}';
return _value_string.c_str();
}
virtual size_t FromString(const wchar_t * value_string) override
{
assert(_members);
_value_string = value_string;
auto pos = _value_string.find_first_not_of(L" \t\n\r");
if (pos == wstring::npos)
return 0;
if (_value_string[pos] != L'{')
return 0;
for (;;)
{
if (pos + 1 >= _value_string.size())
return 0;
pos = _value_string.find_first_not_of(L" \t\n\r", pos + 1);
if (pos == wstring::npos || _value_string[pos] != L'\"')
return 0;
if (pos + 1 >= _value_string.size())
return 0;
auto quote_pos = _value_string.find(L'\"', pos + 1);
if (quote_pos == wstring::npos)
return 0;
auto member_id = _value_string.substr(pos + 1, quote_pos - pos - 1);
pos = quote_pos;
if (pos + 1 > _value_string.size())
return 0;
pos = _value_string.find_first_not_of(L" \t\n\r", pos + 1);
if (_value_string[pos] != L'=')
return 0;
auto member = _members->Find(member_id.c_str());
auto char_consumed = member->FromString(_value_string.c_str() + pos + 1);
if (char_consumed == 0)
return 0;
pos = pos + 1 + char_consumed;
pos = _value_string.find_first_not_of(L" \t\n\r", pos);
if (pos == wstring::npos)
return 0;
if (_value_string[pos] == L',')
{
if (pos + 1 >= _value_string.size())
return 0;
}
else if (_value_string[pos] == L'}')
{
return pos + 1;
}
else
{
return 0;
}
}
}
private:
SmartPtr<PtrContainerImpl<IVariable>> _members;
bool _enabled;
wstring _value_string;
};
}; // end Yap::_details
using namespace _details;
VariableSpace::VariableSpace() :
_variables(YapShared(new PtrContainerImpl<IVariable>))
{
InitTypes();
}
VariableSpace::VariableSpace(IVariableContainer * variables) :
_variables(YapShared(variables))
{
InitTypes();
}
VariableSpace::VariableSpace(const VariableSpace& rhs) :
_variables{ YapShared(rhs.Variables()->Clone()) },
_types {rhs._types}
{
InitTypes();
}
VariableSpace::VariableSpace(VariableSpace&& rhs) :
_variables{rhs._variables},
_types{rhs._types}
{
rhs._variables.reset();
}
VariableSpace::~VariableSpace()
{
}
const VariableSpace& VariableSpace::operator = (const VariableSpace& rhs)
{
_variables = YapShared(rhs.Variables()->Clone());
_types = rhs._types;
return *this;
}
const VariableSpace& VariableSpace::operator = (VariableSpace&& rhs)
{
_variables = rhs._variables;
_types = rhs._types;
rhs._variables.reset();
return *this;
}
bool VariableSpace::InitTypes()
{
_types.emplace(L"int", YapShared(new SimpleVariable<int>(L"int", nullptr)));
_types.emplace(L"float", YapShared(new SimpleVariable<double>(L"float", nullptr)));
_types.emplace(L"string", YapShared(new SimpleVariable<const wchar_t * const>(L"string", nullptr)));
_types.emplace(L"bool", YapShared(new SimpleVariable<bool>(L"bool", nullptr)));
_basic_array_types.emplace(L"int", YapShared(new RawArrayVariable<int>(1, 0, L"array<int>", nullptr)));
_basic_array_types.emplace(L"float", YapShared(new RawArrayVariable<double>(1, 0.0, L"array<float>", nullptr)));
_basic_array_types.emplace(L"bool", YapShared(new ValueArrayVariable<bool>(1, false, L"array<bool>", nullptr)));
_basic_array_types.emplace(L"string", YapShared(new ValueArrayVariable<const wchar_t * const>(1, L"", L"array<string>", nullptr)));
return true;
}
using namespace _details;
bool VariableSpace::Add(int type, const wchar_t * name, const wchar_t * description)
{
static map<int, wstring> type_to_string{
{VariableInt, L"int"},
{VariableFloat, L"float"},
{VariableBool, L"bool"},
{VariableString, L"string"},};
assert(type_to_string.find(type) != type_to_string.end());
return Add(type_to_string[type].c_str(), name, description);
}
bool VariableSpace::Add(const wchar_t * type,
const wchar_t * id,
const wchar_t * description)
{
auto iter = _types.find(type);
if (iter == _types.end())
return false;
if (!iter->second)
return false;
auto new_variable = dynamic_cast<IVariable*>(iter->second->Clone());
if (new_variable == nullptr)
return false;
new_variable->SetId(id);
new_variable->SetDescription(description);
return _variables->Add(id, new_variable);
}
bool VariableSpace::AddArray(const wchar_t * element_type_id,
const wchar_t * id,
const wchar_t * description)
{
IVariable * new_variable = nullptr;
auto basic_array_type = _basic_array_types.find(element_type_id);
if (basic_array_type != _basic_array_types.end())
{
new_variable = dynamic_cast<IVariable*>(basic_array_type->second->Clone());
if (new_variable == nullptr)
return false;
new_variable->SetId(id);
}
else
{
auto iter = _types.find(element_type_id);
if (iter == _types.end() || !iter->second)
return false;
auto template_element = dynamic_cast<IVariable*>(iter->second->Clone());
new_variable = new ReferenceArrayVariable<IVariable*>(1, template_element, id);
}
new_variable->SetDescription(description);
return _variables->Add(id, new_variable);
}
bool VariableSpace::Add(IVariable* variable)
{
assert(variable != nullptr);
return _variables->Add(variable->GetId(), variable);
}
IVariableContainer* VariableSpace::Variables()
{
return _variables.get();
}
const IVariableContainer* VariableSpace::Variables() const
{
return _variables.get();
}
/**
@param id Specify the id of the variable to be enabled or disabled. If it's null,
then all variables will be enabled or disabled.
@param enable @a true to enable, @a false to disable.
*/
bool VariableSpace::Enable(const wchar_t * id, bool enable)
{
if (id != nullptr)
{
auto variable = GetVariable(id);
if (variable == nullptr)
return false;
variable->Enable(enable);
}
else
{
auto iter = _variables->GetIterator();
for (auto variable = iter->GetFirst(); variable != nullptr; variable = iter->GetNext())
{
variable->Enable(enable);
}
}
return true;
}
bool VariableSpace::IsEnabled(const wchar_t * id) const
{
auto variable = GetVariable(const_cast<VariableSpace*>(this)->_variables.get(), id);
return (variable != nullptr) ? variable->IsEnabled() : false;
}
IVariable * VariableSpace::GetVariable(IVariableContainer * variables,
const wchar_t * id,
int type)
{
assert(variables != nullptr);
assert(id != nullptr);
wstring variable_id{ id };
auto dot_pos = variable_id.find_first_of(L'.');
if (dot_pos != wstring::npos)
{
IVariable * lhs_variable = nullptr;
auto left_square_pos = variable_id.find_first_of(L'[');
if (left_square_pos != wstring::npos && left_square_pos < dot_pos)
{
auto right_square_pos = variable_id.find_first_of(L']', left_square_pos);
if (right_square_pos == wstring::npos || right_square_pos > dot_pos)
throw VariableException(id, VariableException::InvalidId);
auto variable = GetVariable(variables, variable_id.substr(0, left_square_pos).c_str(), VariableStructArray);
if (variable == nullptr)
throw VariableException(id, VariableException::VariableNotFound);
auto index = _wtoi(variable_id.substr(left_square_pos + 1, right_square_pos - left_square_pos - 1).c_str());
auto array_variable = dynamic_cast<IValueArrayVariable<IVariable*>*>(variable);
assert(array_variable != nullptr);
if (index < 0 || index >= int(array_variable->GetSize()))
throw VariableException(id, VariableException::OutOfRange);
auto element_reference = dynamic_cast<IElementReference<IVariable*>*>(variable);
assert(element_reference != nullptr);
lhs_variable = element_reference->Element(index);
}
else
{
auto lhs = variable_id.substr(0, dot_pos); // left hand side of the dot operator.
lhs_variable = variables->Find(lhs.c_str());
}
if (!lhs_variable)
throw VariableException(id, VariableException::VariableNotFound);
if (lhs_variable->GetType() != VariableStruct)
throw VariableException(id, VariableException::NotAStruct);
auto struct_variable = dynamic_cast<IStructVariable*>(lhs_variable);
assert(struct_variable != nullptr);
auto members = struct_variable->Members();
assert(members != nullptr);
return GetVariable(members, variable_id.substr(dot_pos + 1).c_str(), type);
}
else
{
auto variable = variables->Find(id);
// if (variable == nullptr)
// throw VariableException(id, VariableException::VariableNotFound);
if (type != VariableAllTypes && ((variable->GetType() | type) != type))
throw VariableException(id, VariableException::TypeNotMatch);
return variable;
}
}
IVariable * VariableSpace::GetVariable(const wchar_t * id, int expected_type)
{
assert(id != nullptr && id[0] != 0);
return GetVariable(_variables.get(), id, expected_type);
}
shared_ptr<VariableSpace> VariableSpace::Load(const wchar_t * path)
{
VdfParser parser;
return parser.CompileFile(path);
}
void VariableSpace::Reset()
{
_variables = YapShared(new PtrContainerImpl<IVariable>);
}
bool VariableSpace::TypeExists(const wchar_t * type) const
{
return _types.find(type) != _types.end();
}
bool VariableSpace::VariableExists(const wchar_t *variable_id) const
{
auto This = const_cast<VariableSpace*>(this);
return This->GetVariable(variable_id) != nullptr;
}
const IVariable * VariableSpace::GetType(const wchar_t * type) const
{
auto iter = _types.find(type);
return (iter != _types.end()) ? iter->second.get() : nullptr;
}
bool VariableSpace::AddType(const wchar_t * type_id, IPtrContainer<IVariable> * variables)
{
try
{
auto variable = new StructVariable(variables, type_id, nullptr);
return AddType(type_id, variable);
}
catch(bad_alloc&)
{
return false;
}
}
bool VariableSpace::AddType(const wchar_t * type_id, IVariable *type)
{
assert(_types.find(type_id) == _types.end());
assert(type != nullptr);
assert(type_id != nullptr);
if (_types.find(type_id) != _types.end())
return false;
_types.insert({type_id, YapShared(type)});
return true;
}
bool VariableSpace::ResizeArray(const wchar_t * id, size_t size)
{
auto array = GetVariable(id, VariableBoolArray | VariableFloatArray | VariableIntArray | VariableStructArray);
if (array == nullptr)
return false;
if (!(array->GetType() == VariableBoolArray || array->GetType() == VariableIntArray ||
array->GetType() == VariableFloatArray || array->GetType() == VariableStructArray))
{
throw VariableException(id, VariableException::NotAnArray);
}
auto array_base = dynamic_cast<IArrayBase*>(array);
assert(array_base != nullptr);
array_base->SetSize(size);
return true;
}
} // end Yap
| {"hexsha": "2d3b42f5e7e3459763421b5da1a2a8e962b70447", "size": 26698, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Shared/Implement/VariableSpace.cpp", "max_stars_repo_name": "yangshadip/YAP-SELF", "max_stars_repo_head_hexsha": "c715baa61c9504304629f28c05fd0f70b629f32a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Shared/Implement/VariableSpace.cpp", "max_issues_repo_name": "yangshadip/YAP-SELF", "max_issues_repo_head_hexsha": "c715baa61c9504304629f28c05fd0f70b629f32a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Shared/Implement/VariableSpace.cpp", "max_forks_repo_name": "yangshadip/YAP-SELF", "max_forks_repo_head_hexsha": "c715baa61c9504304629f28c05fd0f70b629f32a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4024738344, "max_line_length": 148, "alphanum_fraction": 0.6789647165, "num_tokens": 7053} |
[STATEMENT]
lemma lcp_ext_right_conv: "\<not> r \<bowtie> r' \<Longrightarrow> (r \<cdot> u) \<and>\<^sub>p (r' \<cdot> v) = r \<and>\<^sub>p r'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> r \<bowtie> r' \<Longrightarrow> r \<cdot> u \<and>\<^sub>p r' \<cdot> v = r \<and>\<^sub>p r'
[PROOF STEP]
by (induct r r' rule: list_induct2', simp+) | {"llama_tokens": 151, "file": "Combinatorics_Words_CoWBasic", "length": 1} |
!==============================================================================!
subroutine Cpu_Timer_Mod_Stop(f_name)
!------------------------------------------------------------------------------!
implicit none
!-----------------------------------[Locals]-----------------------------------!
character(len=*) :: f_name
integer :: f
!==============================================================================!
!-------------------------------------!
! Find which function is stopping !
!-------------------------------------!
! Browse through stored functions
do f = 1, n_funct
if(f_name .eq. funct_name(f)) then
goto 2
end if
end do
! If here, Cpu_Timer_Start wasn't invoked for this function
print *, 'CRITICAL ERROR in ''Cpu_Timer_End'':'
print *, 'For function ''', trim(f_name), ''', ''Cpu_Ti' // &
'mer_Start'' wasn''t invoked. Exiting!'
stop
! Function has been found, continue
2 continue
!-------------------------------------------------------------!
! Update the time for the function which is being stopped !
!-------------------------------------------------------------!
time_prev = time_curr ! store the last time which was recorded
call cpu_time(time_curr) ! refresh the value of time_curr
funct_time(f) = funct_time(f) + time_curr - time_prev
end subroutine
| {"hexsha": "329666661bf2235eface8f6d085a53acc7a7107b", "size": 1378, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Sources/Shared/Cpu_Timer_Mod/Stop.f90", "max_stars_repo_name": "Dundj/Convex_Geomotry", "max_stars_repo_head_hexsha": "38507824d97270b3e4ead194a16148ff6158b59f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 64, "max_stars_repo_stars_event_min_datetime": "2018-05-29T09:39:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T13:59:18.000Z", "max_issues_repo_path": "Sources/Shared/Cpu_Timer_Mod/Stop.f90", "max_issues_repo_name": "EdinSmartLab/T-Flows", "max_issues_repo_head_hexsha": "5a7f70421f18069453977142e6515cdc959a9e50", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 124, "max_issues_repo_issues_event_min_datetime": "2018-05-28T12:58:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-03T11:12:31.000Z", "max_forks_repo_path": "Sources/Shared/Cpu_Timer_Mod/Stop.f90", "max_forks_repo_name": "EdinSmartLab/T-Flows", "max_forks_repo_head_hexsha": "5a7f70421f18069453977142e6515cdc959a9e50", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 39, "max_forks_repo_forks_event_min_datetime": "2018-05-28T13:13:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-27T17:41:08.000Z", "avg_line_length": 36.2631578947, "max_line_length": 80, "alphanum_fraction": 0.4078374456, "num_tokens": 275} |
"""
Title :Base_tester.py
Description :Base class for dataset benchmarks
Author :Ilke Cugu
Date Created :16-01-2020
Date Modified :02-05-2020
version :1.0.2
python_version :3.6.6
"""
import keras
import numpy as np
class Base_tester:
def __init__(self, wait=False):
# Base class variables
self.base_lr = 1e-3
self.dataset = None
self.x_train = None
self.y_train = None
self.x_val = None
self.y_val = None
self.x_test = None
self.y_test = None
self.dim_x = None
self.dim_y = None
self.training_set_size = None
self.val_set_size = None
self.test_set_size = None
def preprocess_dataset(self):
# Normalize the data
self.x_train = self.x_train.astype('float32') / 255.0
self.x_test = self.x_test.astype('float32') / 255.0
x_train_mean = np.mean(self.x_train, axis=0)
self.x_train -= x_train_mean
self.x_test -= x_train_mean
def get_optimizer(self, optimizer, lr=None, momentum=None, decay=None):
if optimizer == "adam":
return keras.optimizers.Adam(lr=lr)
elif optimizer == "sgd":
return keras.optimizers.SGD(lr=lr, momentum=momentum, decay=decay)
elif optimizer == "rmsprop":
return keras.optimizers.RMSprop(lr=lr, momentum=momentum, decay=decay)
elif optimizer == "adagrad":
return keras.optimizers.Adagrad(lr=lr, momentum=momentum, decay=decay)
else:
return None
def get_n_classes(self):
return None
def get_input_shape(self):
return self.x_train.shape[1:]
def get_y_test(self):
return self.y_test
def evaluate(self, model):
return model.evaluate(self.x_test, self.y_test, verbose=0)
def predict(self, model):
return model.predict(self.x_test, verbose=0)
def run(self, model,
optimizer="adam",
lr=1e-3,
momentum=None,
decay=None,
loss='categorical_crossentropy',
batch_size=128,
epochs=200,
verbose=0,
callbacks=None,
schedule_lr=True,
custom_lr_scheduler=None):
"""
Runs the benchmark
# Arguments
:param model: Keras model (including MicroResNet)
:param optimizer: (string) name of the selected Keras optimizer
:param lr: (float) learning rate
:param momentum: (float) only relevant for the optimization algorithms that use momentum
:param decay: (float) only relevant for the optimization algorithms that use weight decay
:param loss: (string) name of the selected Keras loss function
:param batch_size: (int) # of inputs in a mini-batch
:param epochs: (int) # of full training passes
:param verbose: (int) Keras verbose argument
:param callbacks: list of Keras callbacks
:param schedule_lr: (bool) enable/disable learning rate scheduler (default or custom)
:param custom_lr_scheduler: user defined learning rate scheduler
:return: (history, score)
"""
hist = None
score = None
return hist, score | {"hexsha": "f882a0fcbc55e17a48728478895cc7d1c5be48b5", "size": 2916, "ext": "py", "lang": "Python", "max_stars_repo_path": "testers/Base_tester.py", "max_stars_repo_name": "cuguilke/psykedelic", "max_stars_repo_head_hexsha": "ecdcd0c4fd1ed1316c4e98f42aae0c1bc38d337d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-05T05:53:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-16T11:59:12.000Z", "max_issues_repo_path": "testers/Base_tester.py", "max_issues_repo_name": "cuguilke/psykedelic", "max_issues_repo_head_hexsha": "ecdcd0c4fd1ed1316c4e98f42aae0c1bc38d337d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "testers/Base_tester.py", "max_forks_repo_name": "cuguilke/psykedelic", "max_forks_repo_head_hexsha": "ecdcd0c4fd1ed1316c4e98f42aae0c1bc38d337d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8712871287, "max_line_length": 93, "alphanum_fraction": 0.6882716049, "include": true, "reason": "import numpy", "num_tokens": 780} |
// Copyright Abel Sinkovics (abel@sinkovics.hu) 2015.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/metaparse/v1/impl/next_digit.hpp>
#include <boost/mpl/assert.hpp>
#include <boost/mpl/equal_to.hpp>
#include <boost/mpl/int.hpp>
#include "test_case.hpp"
BOOST_METAPARSE_TEST_CASE(next_digit)
{
using boost::metaparse::v1::impl::next_digit;
using boost::mpl::equal_to;
using boost::mpl::int_;
BOOST_MPL_ASSERT((equal_to<int_< 0>, next_digit::apply<int_<0>, int_<0> > >));
BOOST_MPL_ASSERT((equal_to<int_<10>, next_digit::apply<int_<1>, int_<0> > >));
BOOST_MPL_ASSERT((equal_to<int_<13>, next_digit::apply<int_<1>, int_<3> > >));
}
| {"hexsha": "ca7be64124e4e0089e7944ab6bf357fc93382b11", "size": 787, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "deps/src/boost_1_65_1/libs/metaparse/test/next_digit.cpp", "max_stars_repo_name": "shreyasvj25/turicreate", "max_stars_repo_head_hexsha": "32e84ca16aef8d04aff3d49ae9984bd49326bffd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11356.0, "max_stars_repo_stars_event_min_datetime": "2017-12-08T19:42:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:55:25.000Z", "max_issues_repo_path": "deps/src/boost_1_65_1/libs/metaparse/test/next_digit.cpp", "max_issues_repo_name": "shreyasvj25/turicreate", "max_issues_repo_head_hexsha": "32e84ca16aef8d04aff3d49ae9984bd49326bffd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2402.0, "max_issues_repo_issues_event_min_datetime": "2017-12-08T22:31:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T19:25:52.000Z", "max_forks_repo_path": "deps/src/boost_1_65_1/libs/metaparse/test/next_digit.cpp", "max_forks_repo_name": "shreyasvj25/turicreate", "max_forks_repo_head_hexsha": "32e84ca16aef8d04aff3d49ae9984bd49326bffd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1343.0, "max_forks_repo_forks_event_min_datetime": "2017-12-08T19:47:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T11:31:36.000Z", "avg_line_length": 31.48, "max_line_length": 80, "alphanum_fraction": 0.7128335451, "num_tokens": 235} |
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import time
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = (image_numpy - np.min(image_numpy)) / (np.max(image_numpy) - np.min(image_numpy))
image_numpy = image_numpy * 2 - 1
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
image_numpy = np.clip(image_numpy, 0.0, 255.0)
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
class Timer(object):
def __init__(self, name=None, acc=False, avg=False):
self.name = name
self.acc = acc
self.avg = avg
self.total = 0.0
self.iters = 0
def __enter__(self):
self.start()
def __exit__(self, type, value, traceback):
self.stop()
def start(self):
self.tstart = time.time()
def stop(self):
self.iters += 1
self.total += time.time() - self.tstart
if not self.acc:
self.reset()
def reset(self):
name_string = ''
if self.name:
name_string = '[' + self.name + '] '
value = self.total
msg = 'Elapsed'
if self.avg:
value /= self.iters
msg = 'Avg Elapsed'
print('%s%s: %.4f' % (name_string, msg, value))
self.total = 0.0
| {"hexsha": "1e14248506f744064ed0fa14883747215e11725b", "size": 2677, "ext": "py", "lang": "Python", "max_stars_repo_path": "util/util.py", "max_stars_repo_name": "VMReyes/keypointgan", "max_stars_repo_head_hexsha": "17b6f6f43430d532603d25edb2f42c087119986e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-01-12T00:58:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T19:05:24.000Z", "max_issues_repo_path": "util/util.py", "max_issues_repo_name": "VMReyes/keypointgan", "max_issues_repo_head_hexsha": "17b6f6f43430d532603d25edb2f42c087119986e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util/util.py", "max_forks_repo_name": "VMReyes/keypointgan", "max_forks_repo_head_hexsha": "17b6f6f43430d532603d25edb2f42c087119986e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2021-01-21T07:00:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-16T02:02:42.000Z", "avg_line_length": 25.9902912621, "max_line_length": 103, "alphanum_fraction": 0.6100112066, "include": true, "reason": "import numpy", "num_tokens": 763} |
# General imports and utility functions
from imports import *
from utils import *
# Training environment
from parameters import par, update_dependencies
from stimulus import Stimulus
from optimizers import Standard, AdamOpt
import plotting_functions as pf
import copy
import cupy.linalg as LA
# Network/cell model functions
from spike_models import run_spike_model
from dynamics_adex import calculate_dynamics as adex_dynamics
from dynamics_izhi import calculate_dynamics as izhi_dynamics
class Model:
def __init__(self):
self.init_constants()
self.init_variables()
self.init_optimizer()
self.init_eligibility()
self.v_trace_mean = cp.zeros([1, 1, par['n_hidden']])
self.size_ref = cp.ones([par['batch_size'], 1, par['n_hidden']])
# Select model dynamics
if par['spike_model'] == 'adex':
self.dynamics = adex_dynamics
elif par['spike_model'] == 'izhi':
self.dynamics = izhi_dynamics
def init_constants(self):
""" Import constants from CPU to GPU """
constants = ['dt', 'dt_sec', 'adex', 'lif', 'izhi', 'w_init', 'v_init']
constants += ['EI_vector', 'EI_matrix', 'EI_mask_exh', 'EI_mask_inh']
constants += ['W_in_mask', 'W_rnn_mask', 'W_out_mask', 'b_out_mask']
constants += ['clopath', 'EE_mask', 'XE_mask']
if par['use_stp']:
constants += ['alpha_stf', 'alpha_std', 'U', 'syn_x_init', 'syn_u_init']
self.con_dict = {}
for c in constants:
self.con_dict[c] = to_gpu(par[c])
def init_variables(self):
""" Import variables from CPU to GPU, and apply any one-time
variable operations """
self.var_names = ['W_in', 'W_out', 'W_rnn', 'b_out']
self.var_dict = {}
self.grad_dict = {}
for v in self.var_names:
self.var_dict[v] = to_gpu(par[v+'_init'])
self.grad_dict[v] = cp.zeros_like(self.var_dict[v])
self.grad_dict['W_rnn_exc_local'] = cp.zeros_like(self.var_dict['W_rnn'])
self.grad_dict['W_rnn_inh_local'] = cp.zeros_like(self.var_dict['W_rnn'])
self.grad_dict['W_in_local'] = cp.zeros_like(self.var_dict['W_in'])
def init_optimizer(self):
""" Initialize the optimizer to be used for this model """
if par['optimizer'] == 'standard':
self.optimizer = Standard(self.var_dict, par['learning_rate'])
elif par['optimizer'] == 'adam':
self.optimizer = AdamOpt(self.var_dict, par['learning_rate'], \
par['adam_beta1'], par['adam_beta2'], par['adam_epsilon'])
else:
raise Exception('Optimizer "{}" not available.'.format(par['optimizer']))
def init_eligibility(self):
""" Make eligibility trace variables """
self.eps = {}
self.eps['inp'] = {}
self.eps['rec'] = {}
for s in ['v', 'w', 'ia']:
self.eps['inp'][s] = cp.zeros([par['batch_size'], par['n_input'], par['n_hidden']])
for s in ['v', 'w', 'ir', 'sx', 'su']:
self.eps['rec'][s] = cp.zeros([par['batch_size'], par['n_hidden'], par['n_hidden']])
self.kappa = {}
self.kappa['inp'] = cp.zeros([par['batch_size'], par['n_input'], par['n_hidden']])
self.kappa['rec'] = cp.zeros([par['batch_size'], par['n_hidden'], par['n_hidden']])
self.kappa['out'] = cp.zeros([par['batch_size'], par['n_hidden'], 1])
def zero_state(self):
""" Set all gradient and epsilon arrays to zero """
""" Runs every iteration"""
for v in self.var_names:
self.grad_dict[v] = cp.zeros_like(self.grad_dict[v])
for v in self.eps.keys():
for s in self.eps[v].keys():
self.eps[v][s] = cp.zeros_like(self.eps[v][s]) if not 'prev' in s else None
self.eps['inp']['prev_v'] = [cp.zeros([par['batch_size'], par['n_input'], par['n_hidden']]) for _ in range(par['latency'])]
self.eps['rec']['prev_v'] = [cp.zeros([par['batch_size'], par['n_hidden'], par['n_hidden']]) for _ in range(par['latency'])]
for k in self.kappa.keys():
self.kappa[k] = cp.zeros_like(self.kappa[k])
def apply_variable_rules(self):
""" Apply rules to the variables that must be applied every
time the model is run """
self.eff_var = {}
# Send input and output weights to effective variables
self.eff_var['W_in'] = cp.clip(self.var_dict['W_in'], 0., 10.)
self.eff_var['W_out'] = self.var_dict['W_out']
self.eff_var['b_out'] = self.var_dict['b_out']
# Send recurrent weights, with appropriate changes, to effective variables
if par['EI_prop'] != 1.:
eff = cp.clip(self.var_dict['W_rnn'], 0., 4.)
self.eff_var['W_rnn'] = apply_EI(eff, self.con_dict['EI_matrix'])
else:
self.eff_var['W_rnn'] = self.var_dict['W_rnn']
# Apply masks for each weight, then divide by the current divider
# to ensure correct conductance regime
for k in self.eff_var.keys():
self.eff_var[k] *= self.con_dict[k+'_mask']
def run_model(self, trial_info, testing=False):
""" Run the model by:
- Loading trial data
- Setting initial states
- Iterating over trial data
- Collecting states and updates over time """
# Load the input data, target data, and mask to GPU
trial_info = to_gpu(trial_info)
self.input_data = trial_info['neural_input']
self.output_data = trial_info['desired_output']
self.output_mask = trial_info['train_mask']
# Establish variable rules
self.apply_variable_rules()
# Clear gradients and epsilons
self.zero_state()
# Establish internal state recording
self.v = cp.zeros([par['num_time_steps'], par['batch_size'], 1, par['n_hidden']])
self.w = cp.zeros([par['num_time_steps'], par['batch_size'], 1, par['n_hidden']])
self.sx = cp.zeros([par['num_time_steps'], par['batch_size'], par['n_hidden'], 1])
self.su = cp.zeros([par['num_time_steps'], par['batch_size'], par['n_hidden'], 1])
# Initialize cell states
v = self.con_dict['v_init'] * self.size_ref
w = self.con_dict['w_init'] * self.size_ref
# Initialize synaptic plasticity
sx = self.con_dict['syn_x_init'] if par['use_stp'] else 1.
su = self.con_dict['syn_u_init'] if par['use_stp'] else 1.
# Record other parts of the model as well
self.z = cp.zeros([par['num_time_steps'], par['batch_size'], par['n_hidden']])
self.h = cp.zeros([par['num_time_steps'], par['batch_size'], par['n_hidden']])
self.y = cp.zeros([par['num_time_steps'], par['batch_size'], par['n_output']])
self.eps_v_rec = cp.zeros([par['num_time_steps'], par['n_hidden']])
self.eps_w_rec = cp.zeros([par['num_time_steps'], par['n_hidden']])
self.eps_ir_rec = cp.zeros([par['num_time_steps'], par['n_hidden']])
# Initialize input trace
ia = cp.zeros([par['batch_size'], par['n_input'], par['n_hidden']])
ir = cp.zeros([par['batch_size'], par['n_hidden'], par['n_hidden']])
# Initialize Clopath traces
self.x_trace = cp.zeros([par['batch_size'], par['n_input'], 1])
self.z_trace = cp.zeros([par['batch_size'], par['n_hidden'], 1])
self.Vp_trace = cp.zeros([par['batch_size'], 1, par['n_hidden']])
self.Vm_trace = cp.zeros([par['batch_size'], 1, par['n_hidden']])
self.clopath_W_in = cp.zeros([par['n_input'], par['n_hidden']])
self.clopath_W_rnn = cp.zeros([par['n_hidden'], par['n_hidden']])
self.new_v_trace_mean = cp.zeros([1, 1, par['n_hidden']])
self.I_sqr = 0
# Make state dictionary
state_dict = {'v':v, 'w':w, 'ia':ia, 'ir':ir, 'ja':copy.copy(ia), 'jr':copy.copy(ir), 'sx':sx, 'su':su}
# Loop across time
for t in range(par['num_time_steps']):
# Run cell step
state_dict, I = self.recurrent_cell(state_dict, t)
# Update Clopath traces
z_L = self.z[t-par['latency'],:,:,cp.newaxis]
x = self.input_data[t,:,:,cp.newaxis]
post = self.z[t,:,cp.newaxis,:]
cl = self.con_dict['clopath']
V_eff = state_dict['v'] * (1-post) + self.con_dict[par['spike_model']]['Vth'] * post
self.Vp_trace += self.con_dict['clopath']['alpha_+'] * (-self.Vp_trace + V_eff)
self.Vm_trace += self.con_dict['clopath']['alpha_-'] * (-self.Vm_trace + V_eff)
self.z_trace += self.con_dict['clopath']['alpha_x'] * (-self.z_trace + z_L)
self.x_trace += self.con_dict['clopath']['alpha_x'] * (-self.x_trace + x)
self.new_v_trace_mean += cp.mean(0.5*(self.Vp_trace + self.Vm_trace)/par['num_time_steps'], axis=0, keepdims=True)
th_min = relu(self.Vm_trace - cl['theta-'])
th_plu = relu(V_eff-cl['theta+']) * relu(self.Vp_trace-cl['theta-'])
LTD = cl['A_LTD'] * (1 + 100.*((self.v_trace_mean-self.con_dict[par['spike_model']]['V_r'])/self.con_dict[par['spike_model']]['V_r'])**2)
self.clopath_W_rnn += cp.mean(cl['dt'] * (-LTD*z_L*th_min + cl['A_LTP']*self.z_trace*th_plu), axis=0)
self.clopath_W_in += cp.mean(cl['dt'] * (-LTD*x *th_min + cl['A_LTP']*self.x_trace*th_plu), axis=0)
# Identify I squared
self.I_sqr += (1/par['num_time_steps']) * cp.mean(cp.square(cp.sum(I, axis=1)))
# Record cell state
self.v[t,...] = state_dict['v']
self.w[t,...] = state_dict['w']
self.sx[t,...] = state_dict['sx']
self.su[t,...] = state_dict['su']
self.eps['inp']['prev_v'] = self.eps['inp']['prev_v'][1:]
self.eps['rec']['prev_v'] = self.eps['rec']['prev_v'][1:]
self.eps['inp']['prev_v'].append(self.eps['inp']['v'])
self.eps['rec']['prev_v'].append(self.eps['rec']['v'])
# Only run updates if training
if not testing:
# Update eligibilities and traces
self.update_eligibility(state_dict, I, t)
# Update pending weight changes
self.calculate_weight_updates(t)
self.v_trace_mean = self.new_v_trace_mean
def recurrent_cell(self, st, t):
""" Compute one iteration of the recurrent network, progressing the
internal state by one time step. """
z = self.z[t-par['latency'],..., cp.newaxis]
x = self.input_data[t,:,:,cp.newaxis]
# Update the input traces based on presynaptic spikes
curr_beta = self.con_dict[par['spike_model']]['beta']
st['ia'] = curr_beta * st['ia'] + (1-curr_beta) * self.eff_var['W_in'] * x
st['ir'] = curr_beta * st['ir'] + (1-curr_beta) * self.eff_var['W_rnn'] * st['sx'] * st['su'] * z
st['ja'] = curr_beta * st['ja'] + (1-curr_beta) * x
st['jr'] = curr_beta * st['jr'] + (1-curr_beta) * st['sx'] * st['su'] * z
#print( 'I', cp.mean(st['ia']), cp.mean(st['ir']))
# Update the synaptic plasticity state (recurrent only; input is static)
st['sx'], st['su'] = \
synaptic_plasticity(st['sx'], st['su'], z, self.con_dict, par['use_stp'])
# Sum the input currents into shape [batch x postsynaptic]
I = cp.sum(st['ia'], axis=1, keepdims=True) + cp.sum(st['ir'], axis=1, keepdims=True)
# Update the AdEx cell state with the input current
st['v'], st['w'], self.z[t,...] = run_spike_model(st['v'], st['w'], I, par['spike_model'], self.con_dict[par['spike_model']])
# Update output trace based on postsynaptic cell state (Eq. 12)
self.y[t,...] = self.con_dict[par['spike_model']]['kappa'] * self.y[t-1,...] + self.z[t,...] @ self.eff_var['W_out'] + self.eff_var['b_out']
# Calculate h, the pseudo-derivative (Eq. 5, ~24, 20/21)
# Bellec et al., 2018b
if par['spike_model'] == 'adex':
T = self.con_dict['adex']['V_T'] + par['betagrad']
elif par['spike_model'] == 'izhi':
T = self.con_dict['izhi']['c'] + par['betagrad']
else:
raise Exception('Unimplemented pseudo-derivative.')
self.h[t,...] = cp.squeeze(par['gamma_psd'] * cp.maximum(0., \
1 - cp.abs(st['v'] - T)/par['pseudo_th']))
#h = par['gamma_psd'] * cp.maximum(0., 1 - cp.abs((st['v'] + 40e-3)/par['pseudo_th']))
#h = par['gamma_psd'] * cp.ones_like(h)
return st, I
def update_eligibility(self, state_dict, I, t):
# Calculate the model dynamics and generate new epsilons
self.eps = self.dynamics(self.eps, state_dict, self.input_data, self.z, self.h, \
self.sx, self.su, self.con_dict, self.eff_var, self.var_dict, t)
# Update and modulate e's
e_inp = self.h[t,:,cp.newaxis,:] * self.eps['inp']['v']
e_rec = self.h[t,:,cp.newaxis,:] * self.eps['rec']['v']
e_out = self.z[t,...,cp.newaxis]
self.eps_v_rec[t,:] = cp.mean(self.eps['rec']['v'][0,:,:], axis=0)
self.eps_w_rec[t,:] = cp.mean(self.eps['rec']['w'][0,:,:], axis=0)
self.eps_ir_rec[t,:] = cp.mean(self.eps['rec']['ir'][0,:,:], axis=0)
# Increment kappa arrays forward in time (Eq. 42-45, k^(t-t') terms)
self.kappa['inp'] = self.con_dict[par['spike_model']]['kappa']*self.kappa['inp'] + e_inp
self.kappa['rec'] = self.con_dict[par['spike_model']]['kappa']*self.kappa['rec'] + e_rec
self.kappa['out'] = self.con_dict[par['spike_model']]['kappa']*self.kappa['out'] + e_out
# EI balance
if par['balance_EI_training']:
c = self.con_dict[par['spike_model']]
h = self.h[t,...]
z = self.z[t,...]
const = c['mu']
beta = par['weight_decay']
gamma = beta/4
self.grad_dict['W_rnn_exc_local'] += cp.mean((const * h * (1 - z))[:,np.newaxis,:] * state_dict['jr'], axis=0)
self.grad_dict['W_rnn_exc_local'][:par['n_exc'],:] -= gamma*self.eff_var['W_rnn'][:par['n_exc'],:]
self.grad_dict['W_in_local'] += cp.mean((const * h * (1 - z))[:,np.newaxis,:] * state_dict['ja'], axis=0)
self.grad_dict['W_in_local'] -= gamma* self.eff_var['W_in']
total_input = cp.sum(self.eff_var['W_rnn'][:par['n_exc'],:], axis=0, keepdims=True) + cp.sum(self.eff_var['W_in'], axis=0, keepdims=True)
total_input /= (par['n_exc'] + par['n_input'])
self.grad_dict['W_rnn_exc_local'][:par['n_exc'],:] -= beta*total_input
self.grad_dict['W_in_local'] -= beta*total_input
self.grad_dict['W_rnn_inh_local'] += cp.mean(I * state_dict['jr'], axis = 0)
def calculate_weight_updates(self, t):
# Calculate output error
output_error = self.output_mask[t,:,cp.newaxis] * (self.output_data[t] - softmax(self.y[t]))
L_hid = cp.sum(self.eff_var['W_out'][cp.newaxis,:,:] * output_error[:,cp.newaxis,:], axis=-1)
L_out = output_error
# Update pending weight changes
if True or par['train_input_weights']:
self.grad_dict['W_in'] += cp.mean(L_hid[:,cp.newaxis,:] * self.kappa['inp'], axis=0)
self.grad_dict['W_rnn'] += cp.mean(L_hid[:,cp.newaxis,:] * self.kappa['rec'], axis=0)
self.grad_dict['W_out'] += cp.mean(L_out[:,cp.newaxis,:] * self.kappa['out'], axis=0)
self.grad_dict['b_out'] += cp.mean(L_out[:,cp.newaxis,:], axis=0)
if par['balance_EI_training']:
self.grad_dict['W_rnn'] += par['local_rate']*self.con_dict['EI_mask_exh'] @ self.grad_dict['W_rnn_exc_local']
self.grad_dict['W_rnn'] += 2*par['local_rate']*self.con_dict['EI_mask_inh'] @ self.grad_dict['W_rnn_inh_local']
self.grad_dict['W_in'] += par['local_rate']*self.grad_dict['W_in_local']
self.grad_dict['W_rnn_exc_local'] *= 0.
self.grad_dict['W_rnn_inh_local'] *= 0.
self.grad_dict['W_in_local'] *= 0.
def optimize(self):
""" Optimize the model -- apply any collected updates """
cl = self.clopath_W_rnn * self.con_dict['EE_mask']
g_scale = cp.mean(cp.abs(self.grad_dict['W_rnn']))
c_scale = cp.mean(cp.abs(cl))
self.clopath_W_rnn = cl * (g_scale/c_scale)
self.grad_dict['W_rnn'] += self.clopath_W_rnn
cl = self.clopath_W_in * self.con_dict['XE_mask']
g_scale = cp.mean(cp.abs(self.grad_dict['W_in']))
c_scale = cp.mean(cp.abs(cl))
self.clopath_W_in = cl * (g_scale/c_scale)
self.grad_dict['W_in'] += self.clopath_W_in
self.grad_dict['W_in'] *= self.con_dict['W_in_mask']
self.grad_dict['W_rnn'] *= self.con_dict['W_rnn_mask']
self.grad_dict['W_out'] *= self.con_dict['W_out_mask']
# Calculate task loss
self.task_loss = cross_entropy(self.output_mask, self.output_data, self.y)
# Apply gradient updates using the chosen optimizer
self.var_dict = self.optimizer.apply_gradients(self.grad_dict)
def get_weights(self):
return to_cpu({name:self.var_dict[name] for name in self.var_dict.keys()})
def get_losses(self):
return to_cpu({'task':self.task_loss})
def get_mean_spiking(self):
z_mean = cp.mean(self.z, axis=(1,2))
spiking = cp.sum(z_mean*1000/par['trial_length'])
return to_cpu(spiking)
def get_performance(self):
self.task_accuracy = accuracy(self.y, self.output_data, self.output_mask)
self.full_accuracy = accuracy(self.y, self.output_data, self.output_mask, inc_fix=True)
return to_cpu(self.task_accuracy), to_cpu(self.full_accuracy)
def visualize_delta(self, i):
return None
#pf.visualize_delta(i, self.var_dict, self.grad_dict)
def show_output_behavior(self, it, trial_info):
pf.output_behavior(it, trial_info, softmax(self.y))
def main():
# Start the model run by loading the network controller and stimulus
print('\nLoading model...')
model = Model()
stim = Stimulus()
t0 = time.time()
print('Starting training.\n')
full_acc_record = []
task_acc_record = []
iter_record = []
I_sqr_record = []
W_rnn_grad_sum_record = []
W_rnn_grad_norm_record = []
# Run the training loop
for i in range(par['iterations']):
# Process a batch of stimulus using the current models
trial_info = stim.make_batch()
model.run_model(trial_info)
model.optimize()
losses = model.get_losses()
mean_spiking = model.get_mean_spiking()
task_accuracy, full_accuracy = model.get_performance()
full_acc_record.append(full_accuracy)
task_acc_record.append(task_accuracy)
iter_record.append(i)
I_sqr_record.append(model.I_sqr)
W_rnn_grad_sum_record.append(cp.sum(model.var_dict['W_rnn']))
W_rnn_grad_norm_record.append(LA.norm(model.grad_dict['W_rnn']))
W_exc_mean = cp.mean(cp.maximum(0, model.var_dict['W_rnn'][:par['n_exc'], :]))
W_inh_mean = cp.mean(cp.maximum(0, model.var_dict['W_rnn'][par['n_exc']:, :]))
info_str0 = 'Iter {:>5} | Task Loss: {:5.3f} | Task Acc: {:5.3f} | '.format(i, losses['task'], task_accuracy)
info_str1 = 'Full Acc: {:5.3f} | Mean Spiking: {:6.3f} Hz'.format(full_accuracy, mean_spiking)
print('Aggregating data...', end='\r')
if i%20==0:
# print('Mean EXC w_rnn ', W_exc_mean, 'mean INH w_rnn', W_inh_mean)
if par['plot_EI_testing']:
pf.EI_testing_plots(i, I_sqr_record, W_rnn_grad_sum_record, W_rnn_grad_norm_record)
pf.run_pev_analysis(trial_info['sample'], to_cpu(model.su*model.sx), \
to_cpu(model.z), to_cpu(cp.stack(I_sqr_record)), i)
weights = to_cpu(model.var_dict['W_rnn'])
fn = './savedir/{}_weights.pkl'.format(par['savefn'])
data = {'weights':weights, 'par': par}
pickle.dump(data, open(fn, 'wb'))
pf.activity_plots(i, model)
pf.clopath_update_plot(i, model.clopath_W_in, model.clopath_W_rnn, \
model.grad_dict['W_in'], model.grad_dict['W_rnn'])
pf.plot_grads_and_epsilons(i, trial_info, model, model.h, model.eps_v_rec, model.eps_w_rec, model.eps_ir_rec)
if i != 0:
pf.training_curve(i, iter_record, full_acc_record, task_acc_record)
if i%100 == 0:
model.visualize_delta(i)
if par['save_data_files']:
data = {'par' : par, 'weights' : to_cpu(model.var_dict)}
pickle.dump(data, open('./savedir/{}_data_iter{:0>6}.pkl'.format(par['savefn'], i), 'wb'))
trial_info = stim.make_batch(var_delay=False)
model.run_model(trial_info, testing=True)
model.show_output_behavior(i, trial_info)
# Print output info (after all saving of data is complete)
print(info_str0 + info_str1)
if i%100 == 0:
if np.mean(task_acc_record[-100:]) > 0.9:
print('\nMean accuracy greater than 0.9 over last 100 iters.\nMoving on to next model.\n')
break
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
quit('\nQuit by KeyboardInterrupt.\n')
| {"hexsha": "bc6b421270fe175b21e6b89852526e458ea5184c", "size": 19142, "ext": "py", "lang": "Python", "max_stars_repo_path": "model.py", "max_stars_repo_name": "gdgrant/Spiking-RNN", "max_stars_repo_head_hexsha": "47c1e822f20096080ff35692dc8a4de673d4222a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-02-01T10:32:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-11T12:18:06.000Z", "max_issues_repo_path": "model.py", "max_issues_repo_name": "gdgrant/Spiking-RNN", "max_issues_repo_head_hexsha": "47c1e822f20096080ff35692dc8a4de673d4222a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-10-28T18:55:30.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-28T18:55:30.000Z", "max_forks_repo_path": "model.py", "max_forks_repo_name": "gdgrant/Spiking-RNN", "max_forks_repo_head_hexsha": "47c1e822f20096080ff35692dc8a4de673d4222a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-29T08:13:58.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-29T08:13:58.000Z", "avg_line_length": 35.9812030075, "max_line_length": 142, "alphanum_fraction": 0.6633058197, "include": true, "reason": "import cupy", "num_tokens": 5787} |
function _is_cell_done(cell)
if cell.running_disabled
return true
else
return !cell.queued && !cell.running
end
end
"""
_is_notebook_done(notebook::Notebook)
Return whether all cells in the `notebook` have executed.
This method is more reliable than using `notebook.executetoken` because Pluto.jl drops that lock also after installing packages.
"""
function _notebook_done(notebook::Notebook)
cells = [last(elem) for elem in notebook.cells_dict]
return all(_is_cell_done, cells)
end
"""
parallel_build!(
dir,
files;
print_log=true,
session=ServerSession()
)
Build HTML files in parallel and write output to files with a ".html" extension.
This can be useful to speed up the build locally or in CI.
"""
function parallel_build!(
dir,
files;
print_log=true,
session=ServerSession()
)
# Start all the notebooks in parallel with async enabled.
# This way, Pluto handles concurrency.
notebooks = map(files) do in_file
in_path = joinpath(dir, in_file)
@assert isfile(in_path) "Expected .jl file at $in_path"
@info "Starting evaluation of Pluto notebook at $in_file"
notebook = SessionActions.open(session, in_path; run_async=true)
return notebook
end
for (in_file, notebook) in zip(files, notebooks)
while !_notebook_done(notebook)
sleep(1)
end
without_extension, _ = splitext(in_file)
out_file = "$(without_extension).html"
out_path = joinpath(dir, out_file)
html = notebook2html(notebook)
SessionActions.shutdown(session, notebook)
write(out_path, html)
end
return nothing
end
function parallel_build!(dir; print_log=true)
files = filter(endswith(".jl"), readdir(dir))
parallel_build!(dir, files; print_log)
return nothing
end
| {"hexsha": "5fe0dd0fdaf6a3b6fe0e500642dc3caee6717c5f", "size": 1906, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/build.jl", "max_stars_repo_name": "ctrekker/PlutoStaticHTML.jl", "max_stars_repo_head_hexsha": "7ec26a63af6bb32feb3b05f22496b0cec74a3445", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/build.jl", "max_issues_repo_name": "ctrekker/PlutoStaticHTML.jl", "max_issues_repo_head_hexsha": "7ec26a63af6bb32feb3b05f22496b0cec74a3445", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/build.jl", "max_forks_repo_name": "ctrekker/PlutoStaticHTML.jl", "max_forks_repo_head_hexsha": "7ec26a63af6bb32feb3b05f22496b0cec74a3445", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8450704225, "max_line_length": 128, "alphanum_fraction": 0.6689401889, "num_tokens": 438} |
While named after our neighbors to the north, the Canada Goose is a variety of geese goose that spend quite a bit of their lives around the Davis area. There are a few places where these Birds and Bird Watching birds seem to have taken up year round residence.
The California National Primate Research Center Primate Center front lawn had a flock at one point.
Often there is a large flock between the Yolo County Airport and the Yolo Sportsmens Association. One has to wonder what kind of a giant bird would want to live between an airport and a shooting range.
Read more at the wiki:WikiPedia:Canada_Goose Wikipedia article.
Photos
(request: if you have good local photos of Canada Geese, please add them here)
| {"hexsha": "ecfa396cc825677c9fc26421b17d6b730bf570fb", "size": 724, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Canada_Geese.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Canada_Geese.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Canada_Geese.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 80.4444444444, "max_line_length": 260, "alphanum_fraction": 0.7955801105, "num_tokens": 154} |
#importing libraries
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
#importing data set
dataset = pd.read_csv('Data.csv')
x = dataset.iloc[:,:-1].values
y = dataset.iloc[:,3].values
#handling missing values
imputer= SimpleImputer(missing_values=np.nan, strategy='mean')
imputer=imputer.fit(x[:,1:3])
x[:,1:3]= imputer.transform(x[:,1:3])
| {"hexsha": "7198099921d87af7efcc07439f8dfe2e2c9de591", "size": 374, "ext": "py", "lang": "Python", "max_stars_repo_path": "Handling Missing values/Imputer.py", "max_stars_repo_name": "Anish-AV/Data-Preprocessing", "max_stars_repo_head_hexsha": "0b3165dc2934c2d9578de1d1629a583d6479d3ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-30T23:08:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-30T23:08:50.000Z", "max_issues_repo_path": "Handling Missing values/Imputer.py", "max_issues_repo_name": "Anish-AV/Data-Preprocessing", "max_issues_repo_head_hexsha": "0b3165dc2934c2d9578de1d1629a583d6479d3ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Handling Missing values/Imputer.py", "max_forks_repo_name": "Anish-AV/Data-Preprocessing", "max_forks_repo_head_hexsha": "0b3165dc2934c2d9578de1d1629a583d6479d3ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.375, "max_line_length": 62, "alphanum_fraction": 0.7406417112, "include": true, "reason": "import numpy", "num_tokens": 104} |
import numpy as np
import cv2
def flo(img):
fimg = np.fft.fft2(img)
fsimg = np.fft.fftshift(fimg)
afsimg = np.abs(fsimg) / np.max(np.abs(fsimg))
return fsimg, afsimg
def getpic(x_coefficient, y_coefficient, linenum):
pic = np.ones([256, 256])
for x in range(pic.shape[0]):
for y in range(pic.shape[1]):
pic[x, y] = 0.5 + 0.5 * np.cos((x_coefficient * x + y_coefficient * y) * np.pi * linenum / 512)
pic = pic / np.max(pic)
cv2.imshow('x:{},y:{}'.format(x_coefficient, y_coefficient), pic)
return pic
def main():
pic1 = getpic(3, 4, 16)
pic2 = getpic(4, 3, 16)
pic3 = np.array((pic1, pic2)).min(axis=0)
# pic3 = np.minimum(pic1, pic2)
cv2.imshow('real space', pic3)
pic3f = flo(pic3)
cv2.imshow('fourier space', pic3f[1])
picmoire = getpic(1, -1, 16)
picmoiref = flo(picmoire)
cv2.imshow('moirefour space', picmoiref[1])
print('expect:', np.nonzero(pic3f[1]))
print('moire3:', np.nonzero(picmoiref[1]))
cv2.waitKey(0)
if __name__ == '__main__':
main()
| {"hexsha": "2e70e5115802873a9bd396fe24fd71a8ced69101", "size": 1128, "ext": "py", "lang": "Python", "max_stars_repo_path": "MoireTest/Moirefft.py", "max_stars_repo_name": "YuLingFengSCNU2017/MoireFitting", "max_stars_repo_head_hexsha": "8fb72f4892e8cf2eb5bdb03474c42dedebfd8640", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-23T08:45:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-23T08:45:53.000Z", "max_issues_repo_path": "MoireTest/Moirefft.py", "max_issues_repo_name": "YuLingFengSCNU2017/MoireFitting", "max_issues_repo_head_hexsha": "8fb72f4892e8cf2eb5bdb03474c42dedebfd8640", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MoireTest/Moirefft.py", "max_forks_repo_name": "YuLingFengSCNU2017/MoireFitting", "max_forks_repo_head_hexsha": "8fb72f4892e8cf2eb5bdb03474c42dedebfd8640", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-28T06:52:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-28T06:52:41.000Z", "avg_line_length": 27.512195122, "max_line_length": 108, "alphanum_fraction": 0.5718085106, "include": true, "reason": "import numpy", "num_tokens": 367} |
(*
Author: René Thiemann
License: LGPL
*)
section \<open>Show for Complex Numbers\<close>
text \<open>We print complex numbers as real and imaginary parts. Note that by transitivity, this theory
demands that an implementations for \textit{show-real} is available, e.g., by using
one of the theories \textit{Show-Real-Impl} or \textit{../Algebraic-Numbers/Show-Real-...}.\<close>
theory Show_Complex
imports
Complex
Show_Real
begin
definition "show_complex x = (
let r = Re x; i = Im x in
if (i = 0) then show_real r else if
r = 0 then show_real i @ ''i'' else
''('' @ show_real r @ ''+'' @ show_real i @ ''i)'')"
definition showsp_complex :: "complex showsp"
where
"showsp_complex p x y =
(show_complex x @ y)"
lemma show_law_complex [show_law_intros]:
"show_law showsp_complex r"
by (rule show_lawI) (simp add: showsp_complex_def show_law_simps)
lemma showsp_complex_append [show_law_simps]:
"showsp_complex p r (x @ y) = showsp_complex p r x @ y"
by (intro show_lawD show_law_intros)
local_setup {*
Show_Generator.register_foreign_showsp @{typ complex} @{term "showsp_complex"} @{thm show_law_complex}
*}
derive "show" complex
end
| {"author": "glimonta", "repo": "thesis", "sha": "1ef0e434ea7e98c4eb29ffe7bde668cb1951e4ed", "save_path": "github-repos/isabelle/glimonta-thesis", "path": "github-repos/isabelle/glimonta-thesis/thesis-1ef0e434ea7e98c4eb29ffe7bde668cb1951e4ed/src/Lib/Show/Show_Complex.thy"} |
# Model brings together the network, the loss function, the feed of
# training images, and a training loop
import tensorflow as tf
from PIL import Image
import numpy as np
import os
from feed import Feed
from architecture import GAN
from utils import pixels01, pixels11, tile
# print and flush
def printnow(x, end='\n'): print(x, flush=True, end=end)
# safe create directories
def makedirs(d):
if not os.path.exists(d): os.makedirs(d)
# This model uses the same loss function as DCGAN
class Model:
def __init__(self, feed, batch_size=64, img_shape=(64, 64),
G_lr=0.0004, D_lr=0.0004, G_beta1=0.5, D_beta1=0.5,
zsize=128, save_freq=10, output_cols=4, output_rows=4,
sess=None, checkpoints_path=None):
self.batch_size = batch_size
if ((img_shape[0] % 32 != 0) or (img_shape[1] % 32 != 0)):
raise ValueException("Image dimensions need to be divisible by 32. \
Dimensions received was %s." % img_shape)
self.img_shape = img_shape + (3,) # add (r,g,b) channels dimension
# learning rates for Adam optimizer
self.G_lr = G_lr
self.D_lr = D_lr
self.G_beta1 = G_beta1
self.D_beta1 = D_beta1
# size of latent vector
self.zsize = zsize
# save session and examples after this many batches
self.save_freq = int(save_freq)
# cols and rows of output image tile
self.output_cols = output_cols
self.output_rows = output_rows
pwd = os.getcwd()
self.dirs = {
'output': os.path.join(pwd, 'output'),
'logs': os.path.join(pwd, 'logs'),
'checkpoints': os.path.join(pwd, 'checkpoints')
}
# set or create tensorflow session
self.sess = sess
if not self.sess:
self.sess = tf.InteractiveSession()
# create directories if they don't exist
makedirs(self.dirs['logs'])
makedirs(self.dirs['output'])
makedirs(self.dirs['checkpoints'])
self.checkpoints_path = checkpoints_path or os.path.join(self.dirs['checkpoints'], 'checkpoint.ckpt')
# get number of files in output so we can continue where a previous process
# left off without overwriting
self.output_img_idx = len([f for f in os.listdir(self.dirs['output']) \
if os.path.isfile(os.path.join(self.dirs['output'], f))])
# data feed for training
self.feed = feed
# bool used by batch normalization. BN behavior is different when training
# vs predicting
self.is_training = tf.placeholder(tf.bool)
self.arch = GAN(self.is_training, img_shape=self.img_shape, zsize=128)
# how many times to train discriminator per minibatch
# This is a hyperparameter that can be tuned, it's >1 in wgans
self.D_train_iters = 2
# Build the network
def build_model(self):
# real image inputs from training data feed for training the
# discriminator
self.X = tf.placeholder(tf.float32, (None,) + self.img_shape)
# for feeding random draws of z (latent variable) to the generator
self.Z = tf.placeholder(tf.float32, (None, self.zsize))
# Instantiate a generator network. It takes an input
# of a latent vector
self.Gz = self.arch.generator(self.Z)
# discriminator connected to real image input (X)
self.Dreal, self.Dreal_logits, self.Dreal_similarity = \
self.arch.discriminator(self.X)
# create a second instance of the discriminator and connect to the
# output of the generator. reuse=True means this second instance will
# share the same network weights and biases as the first instance. We want
# this because training the discriminator weights happens using a loss
# function that is a function of both the discriminator applied to a generated
# image and the discriminator applied to a real image.
self.Dz, self.Dz_logits, _ = \
self.arch.discriminator(self.Gz, reuse=True)
# Build the loss function.
def build_losses(self):
self.Dreal_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=(tf.ones_like(self.Dreal_logits) - 0.25),
logits=self.Dreal_logits))
self.Dz_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(self.Dz_logits),
logits=self.Dz_logits))
# discriminator loss function from DCGAN
# discriminator wants to label real images with 1, generated with 0
self.D_loss = self.Dreal_loss + self.Dz_loss
# generator loss function. Make the generator think generated images
# are real
self.G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(self.Dz_logits),
logits=self.Dz_logits))
def build_optimizers(self):
# explicitly grab lists of variables for each type of network. This is used
# below to set up TF operations that train only one part of the network at
# a time (either generator or discriminator)
G_vars = [i for i in tf.trainable_variables() if 'generator' in i.name]
D_vars = [i for i in tf.trainable_variables() if 'discriminator' in i.name]
# Create optimizers.
G_opt = tf.train.AdamOptimizer(learning_rate=self.G_lr, beta1=self.G_beta1)
D_opt = tf.train.AdamOptimizer(learning_rate=self.D_lr, beta1=self.D_beta1)
# In tensor flow, you set up training by handing an optimizer object a tensor
# this is the output of a loss function, and (in this case) a set of variables
# that can be changed. You get back a training operation that you then run
# (see below) to take a step in training.
# pass var_list explicitly so that during training of (e.g.) generator, discriminator
# weights and biases aren't updated.
self.G_train = G_opt.minimize(self.G_loss, var_list=G_vars)
self.D_train = D_opt.minimize(self.D_loss, var_list=D_vars)
def setup_session(self):
# store epoch as tf variable so we can save it in the session
# this is nice for logging so that restarting the process doesn't reset the
# epoch count.
self.epoch = tf.get_variable('epoch', dtype='int32', initializer=tf.constant(0))
# random numbers to generate outputs. Store in tf variable so it gets
# stored in session. This is useful so that generated images that are saved
# during training come from the same latent variable inputs. This lets you
# see the gradual change / improvement of outputs even if the process dies
# and gets restarted
self.example_noise = tf.get_variable('noise', dtype='float32',
initializer=tf.constant(np.random.normal(size=(self.batch_size, self.zsize)).astype('float32')))
self.saver = tf.train.Saver()
try:
print('trying to restore session from %s' % self.checkpoints_path)
self.saver.restore(self.sess, self.checkpoints_path)
print('restored session')
except:
print('failed to restore session, creating a new one')
tf.global_variables_initializer().run()
# log some basic data for tensorboard
def setup_logging(self):
self.writer = tf.summary.FileWriter(self.dirs['logs'], self.sess.graph)
self.G_stats = tf.summary.merge([
tf.summary.scalar('G_loss', self.G_loss)
])
Dreal_mean = tf.reduce_mean(tf.sigmoid(self.Dreal_logits))
Dz_mean = tf.reduce_mean(tf.sigmoid(self.Dz_logits))
self.D_stats = tf.summary.merge([
tf.summary.scalar('Dreal_out', Dreal_mean),
tf.summary.scalar('Dz_out', Dz_mean),
tf.summary.scalar('D_loss', self.D_loss)
])
def train(self):
batches = self.feed.nbatches()
printnow('training with %s batches per epoch' % batches)
printnow('saving session and examples every %s batches' % self.save_freq)
# order the logged data for tensorboard
logcounter = 0
epoch = self.epoch.eval() # have to do this b/c self.epoch is a tensorflow var
while True:
for batch in range(batches):
# training image pixel values are [0,1] but DCGAN and it seems most
# GAN architectures benefit from / use [-1,1]
xfeed = pixels11(self.feed.feed(batch)) # convert to [-1, 1]
zfeed = np.random.normal(size=(self.batch_size, self.zsize)).astype('float32')
# train discriminator (possibly more than once) by running
# the training operation inside the session
for i in range(self.D_train_iters):
_, summary = self.sess.run(
[ self.D_train, self.D_stats ],
feed_dict={ self.X: xfeed, self.Z: zfeed, self.is_training: True })
self.writer.add_summary(summary, logcounter)
# train generator
_, summary = self.sess.run(
[ self.G_train, self.G_stats],
feed_dict={ self.X: xfeed, self.Z: zfeed, self.is_training: True })
self.writer.add_summary(summary, logcounter)
logcounter += 1
if (batch % self.save_freq == 0):
printnow('Epoch %s, batch %s/%s, saving session and examples' % (epoch, batch, batches))
# update TF epoch variable so restart of process picks up at same
# epoch where it died
self.sess.run(self.epoch.assign(epoch))
self.save_session()
self.output_examples()
epoch += 1
def save_session(self):
self.saver.save(self.sess, self.checkpoints_path)
def output_examples(self):
cols = self.output_cols
rows = self.output_rows
nimgs = cols*rows
zfeed = self.example_noise.eval() # need to eval to get value since it's a tf variable
imgs = self.sess.run(self.Gz, feed_dict={ self.Z: zfeed, self.is_training: False })
imgs = imgs[:nimgs]
# conver [-1,1] back to [0,1] before saving
imgs = pixels01(imgs)
path = os.path.join(self.dirs['output'], '%06d.jpg' % self.output_img_idx)
tiled = tile(imgs, (rows, cols))
as_ints = (tiled * 255.0).astype('uint8')
Image.fromarray(as_ints).save(path)
self.output_img_idx += 1
| {"hexsha": "078f35c9ac565bfbf84ec370c729386c5ed2f189", "size": 10780, "ext": "py", "lang": "Python", "max_stars_repo_path": "model.py", "max_stars_repo_name": "ReidWilliams/GANs", "max_stars_repo_head_hexsha": "e04cc40953bb9d2a173f9f1c066081beed95f563", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2017-12-31T20:48:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-22T05:33:42.000Z", "max_issues_repo_path": "model.py", "max_issues_repo_name": "dhruvramani/GANs", "max_issues_repo_head_hexsha": "e04cc40953bb9d2a173f9f1c066081beed95f563", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-12-01T10:07:14.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-21T22:12:58.000Z", "max_forks_repo_path": "model.py", "max_forks_repo_name": "dhruvramani/GANs", "max_forks_repo_head_hexsha": "e04cc40953bb9d2a173f9f1c066081beed95f563", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2017-12-31T20:48:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-19T01:06:48.000Z", "avg_line_length": 41.6216216216, "max_line_length": 109, "alphanum_fraction": 0.6233766234, "include": true, "reason": "import numpy", "num_tokens": 2439} |
Threads.nthreads() == 20 ||
error("Doc build on Noctua should be run with 20 Julia threads!")
println("--- :julia: Instantiating project")
using Pkg
Pkg.activate("..")
Pkg.instantiate()
Pkg.activate(".")
Pkg.instantiate()
push!(LOAD_PATH, joinpath(@__DIR__, ".."))
deleteat!(LOAD_PATH, 2)
println("+++ :julia: Building documentation")
include("make.jl") | {"hexsha": "74ecc416cd02ebda16953619c93ed9da7de609a9", "size": 357, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/build_docs.jl", "max_stars_repo_name": "carstenbauer/Pinning.jl", "max_stars_repo_head_hexsha": "61735eea175eb28f810892dda84e93cfec8bd074", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2021-10-19T20:54:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T08:00:47.000Z", "max_issues_repo_path": "docs/build_docs.jl", "max_issues_repo_name": "carstenbauer/Pinning.jl", "max_issues_repo_head_hexsha": "61735eea175eb28f810892dda84e93cfec8bd074", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2021-10-17T07:41:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T15:31:33.000Z", "max_forks_repo_path": "docs/build_docs.jl", "max_forks_repo_name": "carstenbauer/ThreadPinning.jl", "max_forks_repo_head_hexsha": "61735eea175eb28f810892dda84e93cfec8bd074", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.75, "max_line_length": 69, "alphanum_fraction": 0.6974789916, "num_tokens": 98} |
"""
CallbackFunction()
Set a generic Xpress callback function.
"""
struct CallbackFunction <: MOI.AbstractCallback end
function MOI.set(model::Optimizer, ::CallbackFunction, ::Nothing)
if model.callback_data !== nothing
removecboptnode(model.inner, C_NULL, C_NULL)
model.callback_data = nothing
end
model.has_generic_callback = false
return
end
function MOI.set(model::Optimizer, ::CallbackFunction, f::Function)
if model.callback_data !== nothing
Xpress.removecboptnode(model.inner, C_NULL, C_NULL)
model.callback_data = nothing
end
model.has_generic_callback = true
# Starting with this callback to test
model.callback_data = set_callback_optnode!(model.inner, (cb_data) -> begin
model.callback_state = CB_GENERIC
f(cb_data)
model.callback_state = CB_NONE
end)
return
end
MOI.supports(::Optimizer, ::CallbackFunction) = true
function get_cb_solution(model::Optimizer, model_inner::XpressProblem)
reset_callback_cached_solution(model)
Xpress.Lib.XPRSgetlpsol(model_inner,
model.callback_cached_solution.variable_primal,
model.callback_cached_solution.linear_primal,
model.callback_cached_solution.linear_dual,
model.callback_cached_solution.variable_dual)
return
end
function applycuts(opt::Optimizer, model::XpressProblem)
itype = Cint(1)
interp = Cint(-1) # Get all cuts
delta = 0.0#Xpress.Lib.XPRS_MINUSINFINITY
ncuts = Array{Cint}(undef,1)
size = Cint(length(opt.cb_cut_data.cutptrs))
mcutptr = Array{Xpress.Lib.XPRScut}(undef,size)
dviol = Array{Cdouble}(undef,size)
getcpcutlist(model, itype, interp, delta, ncuts, size, mcutptr, dviol) # requires an availabel solution
loadcuts(model, itype, interp, ncuts[1], mcutptr)
return ncuts[1] > 0
end
# ==============================================================================
# MOI callbacks
# ==============================================================================
function default_moi_callback(model::Optimizer)
return (cb_data) -> begin
get_cb_solution(model, cb_data.model)
if model.heuristic_callback !== nothing
model.callback_state = CB_HEURISTIC
# only allow one heuristic solution per LP optimal node
if Xpress.getintattrib(cb_data.model, Xpress.Lib.XPRS_CALLBACKCOUNT_OPTNODE) > 1
return
end
model.heuristic_callback(cb_data)
end
if model.user_cut_callback !== nothing
model.callback_state = CB_USER_CUT
# apply stored cuts if any
if length(model.cb_cut_data.cutptrs) > 0
added = applycuts(model, cb_data.model)
if added
return
end
end
# only allow one user cut solution per LP optimal node
# limiting two calls to guarantee th user has a chance to add
# a cut. if the user cut is loose the problem will be resolved anyway.
if Xpress.getintattrib(cb_data.model, Xpress.Lib.XPRS_CALLBACKCOUNT_OPTNODE) > 2
return
end
model.user_cut_callback(cb_data)
end
if model.lazy_callback !== nothing
model.callback_state = CB_LAZY
# add previous cuts if any
# to gurantee the user is dealing with a optimal solution
# feasibile for exisitng cuts
if length(model.cb_cut_data.cutptrs) > 0
added = applycuts(model, cb_data.model)
if added
return
end
end
model.lazy_callback(cb_data)
end
end
return
end
function MOI.get(model::Optimizer, attr::MOI.CallbackNodeStatus{CallbackData})
if check_moi_callback_validity(model)
mip_infeas = Xpress.getintattrib(attr.callback_data.model, Xpress.Lib.XPRS_MIPINFEAS)
if mip_infeas == 0
return MOI.CALLBACK_NODE_STATUS_INTEGER
elseif mip_infeas > 0
return MOI.CALLBACK_NODE_STATUS_FRACTIONAL
end
end
return MOI.CALLBACK_NODE_STATUS_UNKNOWN
end
function MOI.get(
model::Optimizer,
::MOI.CallbackVariablePrimal{CallbackData},
x::MOI.VariableIndex
)
return model.callback_cached_solution.variable_primal[_info(model, x).column]
end
# ==============================================================================
# MOI.UserCutCallback & MOI.LazyConstraint
# ==============================================================================
function MOI.set(model::Optimizer, ::MOI.UserCutCallback, cb::Function)
model.user_cut_callback = cb
return
end
function MOI.set(model::Optimizer, ::MOI.LazyConstraintCallback, cb::Function)
model.lazy_callback = cb
return
end
MOI.supports(::Optimizer, ::MOI.UserCutCallback) = true
MOI.supports(::Optimizer, ::MOI.UserCut{CallbackData}) = true
MOI.supports(::Optimizer, ::MOI.LazyConstraintCallback) = true
MOI.supports(::Optimizer, ::MOI.LazyConstraint{CallbackData}) = true
function MOI.submit(
model::Optimizer,
cb::CB,
f::MOI.ScalarAffineFunction{Float64},
s::Union{MOI.LessThan{Float64}, MOI.GreaterThan{Float64}, MOI.EqualTo{Float64}}
) where CB <: Union{MOI.UserCut{CallbackData},MOI.LazyConstraint{CallbackData}}
model_cb = cb.callback_data.model
model.cb_cut_data.submitted = true
if model.callback_state == CB_HEURISTIC
cache_exception(model,
MOI.InvalidCallbackUsage(MOI.HeuristicCallback(), cb))
Xpress.interrupt(model_cb, Xpress.Lib.XPRS_STOP_USER)
return
elseif model.callback_state == CB_LAZY && CB <: MOI.UserCut{CallbackData}
cache_exception(model,
MOI.InvalidCallbackUsage(MOI.LazyConstraintCallback(), cb))
Xpress.interrupt(model_cb, Xpress.Lib.XPRS_STOP_USER)
return
elseif model.callback_state == CB_USER_CUT && CB <: MOI.LazyConstraint{CallbackData}
cache_exception(model,
MOI.InvalidCallbackUsage(MOI.UserCutCallback(), cb))
Xpress.interrupt(model_cb, Xpress.Lib.XPRS_STOP_USER)
return
elseif !iszero(f.constant)
cache_exception(model,
MOI.ScalarFunctionConstantNotZero{Float64, typeof(f), typeof(s)}(f.constant))
Xpress.interrupt(model_cb, Xpress.Lib.XPRS_STOP_USER)
return
end
indices, coefficients = _indices_and_coefficients(model, f)
sense, rhs = _sense_and_rhs(s)
mtype = Int32[1] # Cut type
mstart = Int32[0, length(indices)]
mindex = Array{Xpress.Lib.XPRScut}(undef,1)
ncuts = Cint(1)
ncuts_ptr = Cint[0]
nodupl = Cint(2) # Duplicates are excluded from the cut pool, ignoring cut type
sensetype = Cchar[Char(sense)]
drhs = Float64[rhs]
indices .-= 1
mcols = Cint.(indices)
interp = Cint(-1) # Load all cuts
ret = Xpress.storecuts(model_cb, ncuts, nodupl, mtype, sensetype, drhs, mstart, mindex, mcols, coefficients)
Xpress.loadcuts(model_cb, mtype[], interp, ncuts, mindex)
push!(model.cb_cut_data.cutptrs, mindex[1])
model.cb_cut_data.cutptrs
return
end
# ==============================================================================
# MOI.HeuristicCallback
# ==============================================================================
function MOI.set(model::Optimizer, ::MOI.HeuristicCallback, cb::Function)
model.heuristic_callback = cb
return
end
MOI.supports(::Optimizer, ::MOI.HeuristicCallback) = true
function MOI.submit(
model::Optimizer,
cb::MOI.HeuristicSolution{CallbackData},
variables::Vector{MOI.VariableIndex},
values::MOI.Vector{Float64}
)
model_cb = cb.callback_data.model::Xpress.XpressProblem
model_cb2 = cb.callback_data.model_root::Xpress.XpressProblem
if model.callback_state == CB_LAZY
cache_exception(model,
MOI.InvalidCallbackUsage(MOI.LazyConstraintCallback(), cb))
Xpress.interrupt(model_cb, Xpress.Lib.XPRS_STOP_USER)
return
elseif model.callback_state == CB_USER_CUT
cache_exception(model,
MOI.InvalidCallbackUsage(MOI.UserCutCallback(), cb))
Xpress.interrupt(model_cb, Xpress.Lib.XPRS_STOP_USER)
return
end
ilength = length(variables)
mipsolval = fill(NaN,ilength)
mipsolcol = fill(NaN,ilength)
count = 1
for (var, value) in zip(variables, values)
mipsolcol[count] = convert(Cint,_info(model, var).column - 1)
mipsolval[count] = value
count += 1
end
mipsolcol = Cint.(mipsolcol)
mipsolval = Cfloat.(mipsolval)
if ilength == MOI.get(model, MOI.NumberOfVariables())
mipsolcol = C_NULL
end
addmipsol(model_cb, ilength, mipsolval, mipsolcol, C_NULL)
return MOI.HEURISTIC_SOLUTION_UNKNOWN
end
MOI.supports(::Optimizer, ::MOI.HeuristicSolution{CallbackData}) = true
function cache_exception(model::Optimizer, e::Exception)
model.cb_exception = e
return
end | {"hexsha": "f200b9a85a00cac2f8495809d8659c384c43baae", "size": 9050, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/MOI/MOI_callbacks.jl", "max_stars_repo_name": "vfegger/Xpress.jl", "max_stars_repo_head_hexsha": "0f5f1bb8f3df535a6e88a9628d5708eafca50bd2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2020-07-09T13:28:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T13:26:55.000Z", "max_issues_repo_path": "src/MOI/MOI_callbacks.jl", "max_issues_repo_name": "vfegger/Xpress.jl", "max_issues_repo_head_hexsha": "0f5f1bb8f3df535a6e88a9628d5708eafca50bd2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 77, "max_issues_repo_issues_event_min_datetime": "2020-06-13T18:24:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T08:06:59.000Z", "max_forks_repo_path": "src/MOI/MOI_callbacks.jl", "max_forks_repo_name": "vfegger/Xpress.jl", "max_forks_repo_head_hexsha": "0f5f1bb8f3df535a6e88a9628d5708eafca50bd2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-09-16T20:47:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T21:18:52.000Z", "avg_line_length": 36.3453815261, "max_line_length": 112, "alphanum_fraction": 0.6428729282, "num_tokens": 2212} |
"""
The model inference code in this file is modified from
https://gist.github.com/fyr91/83a392ffd22342d4e5f8866b01fafb30 Thanks to the
original authur: fyr91
"""
from onnx_tf.backend import prepare
import cv2
import numpy as np
import onnx
import onnxruntime as ort
def area_of(left_top, right_bottom):
"""
Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
def iou_of(boxes0, boxes1, eps=1e-5):
"""
Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Perform hard non-maximum-supression to filter out boxes with iou greater
than threshold
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
indexes = np.argsort(scores)
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(rest_boxes, np.expand_dims(current_box, axis=0),)
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
def predict(
width, height, confidences, boxes, prob_threshold, iou_threshold=0.5, top_k=-1
):
"""
Select boxes that contain human faces
Args:
width: original image width
height: original image height
confidences (N, 2): confidence array
boxes (N, 4): boxes array in corner-form
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
Returns:
boxes (k, 4): an array of boxes kept
labels (k): an array of labels for each boxes kept
probs (k): an array of probabilities for each boxes being in corresponding labels
"""
boxes = boxes[0]
confidences = confidences[0]
picked_box_probs = []
picked_labels = []
for class_index in range(1, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > prob_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = boxes[mask, :]
box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = hard_nms(box_probs, iou_threshold=iou_threshold, top_k=top_k,)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
picked_box_probs[:, 0] *= width
picked_box_probs[:, 1] *= height
picked_box_probs[:, 2] *= width
picked_box_probs[:, 3] *= height
return (
picked_box_probs[:, :4].astype(np.int32),
np.array(picked_labels),
picked_box_probs[:, 4],
)
onnx_path = "ultra_light_320.onnx"
onnx_model = onnx.load(onnx_path)
predictor = prepare(onnx_model, device="GPU")
ort_session = ort.InferenceSession(onnx_path)
input_name = ort_session.get_inputs()[0].name
def detect_first_face(frame):
"""The function will detect and return the first found face in camera frame.
Args:
frame: the camera frame for face detection.
Returns:
(x, y): returns the center of the face coordinate in NDC space [-1, 1].
The function will return None if no face is found.
"""
h, w, _ = frame.shape
# Preprocess the image for the model input.
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert BGR to RGB
img = cv2.resize(img, (320, 240)) # Resize
img_mean = np.array([127, 127, 127])
img = (img - img_mean) / 128
img = np.transpose(img, [2, 0, 1])
img = np.expand_dims(img, axis=0)
img = img.astype(np.float32)
confidences, boxes = ort_session.run(None, {input_name: img})
boxes, _, _ = predict(w, h, confidences, boxes, 0.7)
for i in range(boxes.shape[0]):
box = boxes[i, :]
x1, y1, x2, y2 = box
# Draw box to visualize the face position.
cv2.rectangle(frame, (x1, y1), (x2, y2), (80, 18, 236), 2)
cv2.rectangle(frame, (x1, y2 - 20), (x2, y2), (80, 18, 236), cv2.FILLED)
cv2.putText(
frame,
"Face",
(x1 + 6, y2 - 6),
cv2.FONT_HERSHEY_DUPLEX,
0.5,
(255, 255, 255),
1,
)
return (((x1 + x2) * 0.5 / w - 0.5) * 2.0, ((y1 + y2) * 0.5 / h - 0.5) * 2.0)
return None
| {"hexsha": "fa89991638d1a4078a068da92613bbbb0205b185", "size": 5793, "ext": "py", "lang": "Python", "max_stars_repo_path": "face-tracking/face_detection.py", "max_stars_repo_name": "rossning92/rpi-robot", "max_stars_repo_head_hexsha": "c92ae66e27533a5f80ba6ac3ecfe2351337681fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 106, "max_stars_repo_stars_event_min_datetime": "2020-06-14T13:36:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T06:12:43.000Z", "max_issues_repo_path": "face-tracking/face_detection.py", "max_issues_repo_name": "kcqnly/rpi-robot", "max_issues_repo_head_hexsha": "838802f8db5bb1a68311002dc9ea167014b0b7ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-11-13T18:55:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:55:26.000Z", "max_forks_repo_path": "face-tracking/face_detection.py", "max_forks_repo_name": "kcqnly/rpi-robot", "max_forks_repo_head_hexsha": "838802f8db5bb1a68311002dc9ea167014b0b7ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 38, "max_forks_repo_forks_event_min_datetime": "2020-06-14T13:37:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-18T14:29:19.000Z", "avg_line_length": 33.2931034483, "max_line_length": 89, "alphanum_fraction": 0.615397894, "include": true, "reason": "import numpy", "num_tokens": 1606} |
import matplotlib.pyplot as plt
import numpy as np
import os
def write_data(file):
data = []
for line in file:
list_line = line.split(" ")
if list_line[0] == "Time":
data.append(float(list_line[2]))
return np.array(data)
my_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(my_path, "JavaProject/results/Prim_N.txt")
prim_n = open(path, "r")
path = os.path.join(my_path, "JavaProject/results/NaiveKruskal_N.txt")
naivekruskal_n = open(path, "r")
path = os.path.join(my_path, "JavaProject/results/Kruskal_N.txt")
kruskal_n = open(path, "r")
path = os.path.join(my_path, "JavaProject/results/Prim_F.txt")
prim_f = open(path, "r")
path = os.path.join(my_path, "JavaProject/results/NaiveKruskal_F.txt")
naivekruskal_f = open(path, "r")
path = os.path.join(my_path, "JavaProject/results/Kruskal_F.txt")
kruskal_f = open(path, "r")
if prim_n.mode == "r" and naivekruskal_n.mode == "r" and kruskal_n.mode == "r" and prim_n.mode == "r" and \
naivekruskal_n.mode == "r" and kruskal_n.mode == "r":
data_prim_n = write_data(prim_n)
data_naivekruskal_n = write_data(naivekruskal_n)
data_kruskal_n = write_data(kruskal_n)
data_prim_f = write_data(prim_f)
data_naivekruskal_f = write_data(naivekruskal_f)
data_kruskal_f = write_data(kruskal_f)
x_tick = ["10", "20", "40", "80", "100", "200", "400", "800", "1k", "2k", "4k", "8k", "10k", "20k", "40k", "80k", "100k"]
plt.figure("kruskal_g")
plt.plot(range(68), data_kruskal_n, label="Nicola performance")
plt.plot(range(68), data_kruskal_f, label="Federico performance")
plt.xticks(np.linspace(0, 68, 17), x_tick)
plt.xlabel("Size of graph (nodes)")
plt.ylabel("Time (s)")
plt.title("Kruskal algorithm performance")
plt.legend()
plt.savefig(os.path.join(my_path, "relazioneAA/relazioneAA/imgs/kruskal_g.png"))
plt.close()
plt.plot(range(68), data_prim_n, label="Nicola performance")
plt.plot(range(68), data_prim_f, label="Federico performance")
plt.xticks(np.linspace(0, 68, 17), x_tick)
plt.xlabel("Size of graph (nodes)")
plt.ylabel("Time (s)")
plt.title("Prim algorithm performance")
plt.legend()
plt.savefig(os.path.join(my_path, "relazioneAA/relazioneAA/imgs/prim_g.png"))
plt.close()
plt.plot(range(68), data_naivekruskal_n, label="Nicola performance")
plt.plot(range(68), data_naivekruskal_f, label="Fedrico performance")
plt.xticks(np.linspace(0, 68, 17), x_tick)
plt.xlabel("Size of graph (nodes)")
plt.ylabel("Time (s)")
plt.title("Naive Kruskal Algorithm performance")
plt.legend()
plt.savefig(os.path.join(my_path, "relazioneAA/relazioneAA/imgs/naivekruskal_g.png"))
plt.close()
plt.plot(range(68), data_kruskal_n, label="Kruskal")
plt.plot(range(68), data_prim_n, label="Prim")
plt.plot(range(68), data_naivekruskal_n, label="Naive Kruskal")
plt.yscale("log")
plt.xticks(np.linspace(0, 68, 17), x_tick)
plt.xlabel("Size of graph (nodes)")
plt.ylabel("Time (s)")
plt.title("Nicola algorithms performance")
plt.legend()
plt.savefig(os.path.join(my_path, "relazioneAA/relazioneAA/imgs/compare_n.png"))
plt.close()
plt.plot(range(68), data_kruskal_f, label="Kruskal")
plt.plot(range(68), data_prim_f, label="Prim")
plt.plot(range(68), data_naivekruskal_f, label="Naive Kruskal")
plt.yscale("log")
plt.xticks(np.linspace(0, 68, 17), x_tick)
plt.xlabel("Size of graph (nodes)")
plt.ylabel("Time (s)")
plt.title("Federico algorithms performance")
plt.legend()
plt.savefig(os.path.join(my_path, "relazioneAA/relazioneAA/imgs/compare_f.png"))
plt.close()
prim_n.close()
naivekruskal_n.close()
kruskal_n.close()
prim_f.close()
naivekruskal_f.close()
kruskal_f.close() | {"hexsha": "b524807989e02ec4c96bcc09792887d8906847c3", "size": 3816, "ext": "py", "lang": "Python", "max_stars_repo_path": "plots.py", "max_stars_repo_name": "bubogunz/AAlab1", "max_stars_repo_head_hexsha": "254d11deb69973c9fa6a3ef77300f93c5345eeb6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plots.py", "max_issues_repo_name": "bubogunz/AAlab1", "max_issues_repo_head_hexsha": "254d11deb69973c9fa6a3ef77300f93c5345eeb6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plots.py", "max_forks_repo_name": "bubogunz/AAlab1", "max_forks_repo_head_hexsha": "254d11deb69973c9fa6a3ef77300f93c5345eeb6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4117647059, "max_line_length": 125, "alphanum_fraction": 0.6831761006, "include": true, "reason": "import numpy", "num_tokens": 1145} |
# Authors: Pearu Peterson, Pauli Virtanen, John Travers
"""
First-order ODE integrators.
User-friendly interface to various numerical integrators for solving a
system of first order ODEs with prescribed initial conditions::
d y(t)[i]
--------- = f(t,y(t))[i],
d t
y(t=0)[i] = y0[i],
where::
i = 0, ..., len(y0) - 1
class ode
---------
A generic interface class to numeric integrators. It has the following
methods::
integrator = ode(f,jac=None)
integrator = integrator.set_integrator(name,**params)
integrator = integrator.set_initial_value(y0,t0=0.0)
integrator = integrator.set_f_params(*args)
integrator = integrator.set_jac_params(*args)
y1 = integrator.integrate(t1,step=0,relax=0)
flag = integrator.successful()
class complex_ode
-----------------
This class has the same generic interface as ode, except it can handle complex
f, y and Jacobians by transparently translating them into the equivalent
real valued system. It supports the real valued solvers (i.e not zvode) and is
an alternative to ode with the zvode solver, sometimes performing better.
"""
integrator_info = \
"""
Available integrators
---------------------
vode
~~~~
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/vode.f
This integrator accepts the following parameters in set_integrator()
method of the ode class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- rband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+rband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+lband, j] = jac[i,j].
- method: 'adams' or 'bdf'
Which solver to use, Adams (non-stiff) or BDF (stiff)
- with_jacobian : bool
Whether to use the jacobian
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- order : int
Maximum order used by the integrator,
order <= 12 for Adams, <= 5 for BDF.
zvode
~~~~~
Complex-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/zvode.f
This integrator accepts the same parameters in set_integrator()
as the "vode" solver.
:Note:
When using ZVODE for a stiff system, it should only be used for
the case in which the function f is analytic, that is, when each f(i)
is an analytic function of each y(j). Analyticity means that the
partial derivative df(i)/dy(j) is a unique complex number, and this
fact is critical in the way ZVODE solves the dense or banded linear
systems that arise in the stiff case. For a complex stiff ODE system
in which f is not analytic, ZVODE is likely to have convergence
failures, and for this problem one should instead use DVODE on the
equivalent real system (in the real and imaginary parts of y).
dopri5
~~~~~~
Numerical solution of a system of first order
ordinary differential equations y'=f(x,y).
this is an explicit runge-kutta method of order (4)5
due to Dormand & Prince (with stepsize control and
dense output).
Authors: E. Hairer and G. Wanner
Universite de Geneve, Dept. de Mathematiques
CH-1211 Geneve 24, Switzerland
e-mail: ernst.hairer@math.unige.ch
gerhard.wanner@math.unige.ch
This code is described in:
E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
Differential Equations i. Nonstiff Problems. 2nd edition.
Springer Series in Computational Mathematics,
Springer-Verlag (1993)
This integrator accepts the following parameters in set_integrator()
method of the ode class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- max_step : float
- safety : float
Safety factor on new step selection (default 0.9)
- ifactor : float
- dfactor : float
Maximum factor to increase/decrease step sixe by in one step
- beta : float
Beta parameter for stabilised step size control.
dop853
~~~~~~
Numerical solution of a system of first 0rder
ordinary differential equations y'=f(x,y).
this is an explicit runge-kutta method of order 8(5,3)
due to Dormand & Prince (with stepsize control and
dense output).
Options and references the same as dopri5.
"""
if __doc__:
__doc__ += integrator_info
# XXX: Integrators must have:
# ===========================
# cvode - C version of vode and vodpk with many improvements.
# Get it from http://www.netlib.org/ode/cvode.tar.gz
# To wrap cvode to Python, one must write extension module by
# hand. Its interface is too much 'advanced C' that using f2py
# would be too complicated (or impossible).
#
# How to define a new integrator:
# ===============================
#
# class myodeint(IntegratorBase):
#
# runner = <odeint function> or None
#
# def __init__(self,...): # required
# <initialize>
#
# def reset(self,n,has_jac): # optional
# # n - the size of the problem (number of equations)
# # has_jac - whether user has supplied its own routine for Jacobian
# <allocate memory,initialize further>
#
# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
# # this method is called to integrate from t=t0 to t=t1
# # with initial condition y0. f and jac are user-supplied functions
# # that define the problem. f_params,jac_params are additional
# # arguments
# # to these functions.
# <calculate y1>
# if <calculation was unsuccesful>:
# self.success = 0
# return t1,y1
#
# # In addition, one can define step() and run_relax() methods (they
# # take the same arguments as run()) if the integrator can support
# # these features (see IntegratorBase doc strings).
#
# if myodeint.runner:
# IntegratorBase.integrator_classes.append(myodeint)
__all__ = ['ode', 'complex_ode']
__version__ = "$Id$"
__docformat__ = "restructuredtext en"
import re
import warnings
from numpy import asarray, array, zeros, int32, isscalar, real, imag
import vode as _vode
import _dop
#------------------------------------------------------------------------------
# User interface
#------------------------------------------------------------------------------
class ode(object):
"""\
A generic interface class to numeric integrators.
See also
--------
odeint : an integrator with a simpler interface based on lsoda from ODEPACK
quad : for finding the area under a curve
Examples
--------
A problem to integrate and the corresponding jacobian:
>>> from scipy import eye
>>> from scipy.integrate import ode
>>>
>>> y0, t0 = [1.0j, 2.0], 0
>>>
>>> def f(t, y, arg1):
>>> return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
>>> def jac(t, y, arg1):
>>> return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
The integration:
>>> r = ode(f, jac).set_integrator('zvode', method='bdf', with_jacobian=True)
>>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
>>> t1 = 10
>>> dt = 1
>>> while r.successful() and r.t < t1:
>>> r.integrate(r.t+dt)
>>> print r.t, r.y
"""
if __doc__:
__doc__ += integrator_info
def __init__(self, f, jac=None):
"""
Define equation y' = f(y,t) where (optional) jac = df/dy.
Parameters
----------
f : f(t, y, *f_args)
Rhs of the equation. t is a scalar, y.shape == (n,).
f_args is set by calling set_f_params(*args)
jac : jac(t, y, *jac_args)
Jacobian of the rhs, jac[i,j] = d f[i] / d y[j]
jac_args is set by calling set_f_params(*args)
"""
self.stiff = 0
self.f = f
self.jac = jac
self.f_params = ()
self.jac_params = ()
self.y = []
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
if isscalar(y):
y = [y]
n_prev = len(self.y)
if not n_prev:
self.set_integrator('') # find first available integrator
self.y = asarray(y, self._integrator.scalar)
self.t = t
self._integrator.reset(len(self.y),self.jac is not None)
return self
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator.
integrator_params :
Additional parameters for the integrator.
"""
integrator = find_integrator(name)
if integrator is None:
# FIXME: this really should be raise an exception. Will that break
# any code?
warnings.warn('No integrator name match with %r or is not '
'available.' % name)
else:
self._integrator = integrator(**integrator_params)
if not len(self.y):
self.t = 0.0
self.y = array([0.0], self._integrator.scalar)
self._integrator.reset(len(self.y),self.jac is not None)
return self
def integrate(self, t, step=0, relax=0):
"""Find y=y(t), set y as an initial condition, and return y."""
if step and self._integrator.supports_step:
mth = self._integrator.step
elif relax and self._integrator.supports_run_relax:
mth = self._integrator.run_relax
else:
mth = self._integrator.run
self.y,self.t = mth(self.f,self.jac or (lambda :None),
self.y,self.t,t,
self.f_params,self.jac_params)
return self.y
def successful(self):
"""Check if integration was successful."""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.success==1
def set_f_params(self,*args):
"""Set extra parameters for user-supplied function f."""
self.f_params = args
return self
def set_jac_params(self,*args):
"""Set extra parameters for user-supplied function jac."""
self.jac_params = args
return self
class complex_ode(ode):
""" A wrapper of ode for complex systems.
For usage examples, see `ode`.
"""
def __init__(self, f, jac=None):
"""
Define equation y' = f(y,t), where y and f can be complex.
Parameters
----------
f : f(t, y, *f_args)
Rhs of the equation. t is a scalar, y.shape == (n,).
f_args is set by calling set_f_params(*args)
jac : jac(t, y, *jac_args)
Jacobian of the rhs, jac[i,j] = d f[i] / d y[j]
jac_args is set by calling set_f_params(*args)
"""
self.cf = f
self.cjac = jac
if jac is not None:
ode.__init__(self, self._wrap, self._wrap_jac)
else:
ode.__init__(self, self._wrap, None)
def _wrap(self, t, y, *f_args):
f = self.cf(*((t, y[::2] + 1j*y[1::2]) + f_args))
self.tmp[::2] = real(f)
self.tmp[1::2] = imag(f)
return self.tmp
def _wrap_jac(self, t, y, *jac_args):
jac = self.cjac(*((t, y[::2] + 1j*y[1::2]) + jac_args))
self.jac_tmp[1::2,1::2] = self.jac_tmp[::2,::2] = real(jac)
self.jac_tmp[1::2,::2] = imag(jac)
self.jac_tmp[::2,1::2] = -self.jac_tmp[1::2,::2]
return self.jac_tmp
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator
integrator_params :
Additional parameters for the integrator.
"""
if name == 'zvode':
raise ValueError("zvode should be used with ode, not zode")
return ode.set_integrator(self, name, **integrator_params)
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
y = asarray(y)
self.tmp = zeros(y.size*2, 'float')
self.tmp[::2] = real(y)
self.tmp[1::2] = imag(y)
if self.cjac is not None:
self.jac_tmp = zeros((y.size*2, y.size*2), 'float')
return ode.set_initial_value(self, self.tmp, t)
def integrate(self, t, step=0, relax=0):
"""Find y=y(t), set y as an initial condition, and return y."""
y = ode.integrate(self, t, step, relax)
return y[::2] + 1j*y[1::2]
#------------------------------------------------------------------------------
# ODE integrators
#------------------------------------------------------------------------------
def find_integrator(name):
for cl in IntegratorBase.integrator_classes:
if re.match(name,cl.__name__,re.I):
return cl
return None
class IntegratorBase(object):
runner = None # runner is None => integrator is not available
success = None # success==1 if integrator was called successfully
supports_run_relax = None
supports_step = None
integrator_classes = []
scalar = float
def reset(self,n,has_jac):
"""Prepare integrator for call: allocate memory, set flags, etc.
n - number of equations.
has_jac - if user has supplied function for evaluating Jacobian.
"""
def run(self,f,jac,y0,t0,t1,f_params,jac_params):
"""Integrate from t=t0 to t=t1 using y0 as an initial condition.
Return 2-tuple (y1,t1) where y1 is the result and t=t1
defines the stoppage coordinate of the result.
"""
raise NotImplementedError('all integrators must define '
'run(f,jac,t0,t1,y0,f_params,jac_params)')
def step(self,f,jac,y0,t0,t1,f_params,jac_params):
"""Make one integration step and return (y1,t1)."""
raise NotImplementedError('%s does not support step() method' %
self.__class__.__name__)
def run_relax(self,f,jac,y0,t0,t1,f_params,jac_params):
"""Integrate from t=t0 to t>=t1 and return (y1,t)."""
raise NotImplementedError('%s does not support run_relax() method' %
self.__class__.__name__)
#XXX: __str__ method for getting visual state of the integrator
class vode(IntegratorBase):
runner = getattr(_vode,'dvode',None)
messages = {-1:'Excess work done on this call. (Perhaps wrong MF.)',
-2:'Excess accuracy requested. (Tolerances too small.)',
-3:'Illegal input detected. (See printed message.)',
-4:'Repeated error test failures. (Check all input.)',
-5:'Repeated convergence failures. (Perhaps bad'
' Jacobian supplied or wrong choice of MF or tolerances.)',
-6:'Error weight became zero during problem. (Solution'
' component i vanished, and ATOL or ATOL(i) = 0.)'
}
supports_run_relax = 1
supports_step = 1
def __init__(self,
method = 'adams',
with_jacobian = 0,
rtol=1e-6,atol=1e-12,
lband=None,uband=None,
order = 12,
nsteps = 500,
max_step = 0.0, # corresponds to infinite
min_step = 0.0,
first_step = 0.0, # determined by solver
):
if re.match(method,r'adams',re.I):
self.meth = 1
elif re.match(method,r'bdf',re.I):
self.meth = 2
else:
raise ValueError('Unknown integration method %s' % method)
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.order = order
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.success = 1
def reset(self,n,has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
miter = 1
else:
if self.mu is None: self.mu = 0
if self.ml is None: self.ml = 0
miter = 4
else:
if self.mu is None and self.ml is None:
if self.with_jacobian:
miter = 2
else:
miter = 0
else:
if self.mu is None: self.mu = 0
if self.ml is None: self.ml = 0
if self.ml==self.mu==0:
miter = 3
else:
miter = 5
mf = 10*self.meth + miter
if mf==10:
lrw = 20 + 16*n
elif mf in [11,12]:
lrw = 22 + 16*n + 2*n*n
elif mf == 13:
lrw = 22 + 17*n
elif mf in [14,15]:
lrw = 22 + 18*n + (3*self.ml+2*self.mu)*n
elif mf == 20:
lrw = 20 + 9*n
elif mf in [21,22]:
lrw = 22 + 9*n + 2*n*n
elif mf == 23:
lrw = 22 + 10*n
elif mf in [24,25]:
lrw = 22 + 11*n + (3*self.ml+2*self.mu)*n
else:
raise ValueError('Unexpected mf=%s' % mf)
if miter in [0,3]:
liw = 30
else:
liw = 30 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol,self.atol,1,1,self.rwork,self.iwork,mf]
self.success = 1
def run(self,*args):
y1,t,istate = self.runner(*(args[:5]+tuple(self.call_args)+args[5:]))
if istate <0:
warnings.warn('vode: ' + self.messages.get(istate,'Unexpected istate=%s'%istate))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
return y1,t
def step(self,*args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self,*args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if vode.runner is not None:
IntegratorBase.integrator_classes.append(vode)
class zvode(vode):
runner = getattr(_vode,'zvode',None)
supports_run_relax = 1
supports_step = 1
scalar = complex
def reset(self, n, has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
miter = 1
else:
if self.mu is None: self.mu = 0
if self.ml is None: self.ml = 0
miter = 4
else:
if self.mu is None and self.ml is None:
if self.with_jacobian:
miter = 2
else:
miter = 0
else:
if self.mu is None: self.mu = 0
if self.ml is None: self.ml = 0
if self.ml==self.mu==0:
miter = 3
else:
miter = 5
mf = 10*self.meth + miter
if mf in (10,):
lzw = 15*n
elif mf in (11, 12):
lzw = 15*n + 2*n**2
elif mf in (-11, -12):
lzw = 15*n + n**2
elif mf in (13,):
lzw = 16*n
elif mf in (14,15):
lzw = 17*n + (3*self.ml + 2*self.mu)*n
elif mf in (-14,-15):
lzw = 16*n + (2*self.ml + self.mu)*n
elif mf in (20,):
lzw = 8*n
elif mf in (21, 22):
lzw = 8*n + 2*n**2
elif mf in (-21,-22):
lzw = 8*n + n**2
elif mf in (23,):
lzw = 9*n
elif mf in (24, 25):
lzw = 10*n + (3*self.ml + 2*self.mu)*n
elif mf in (-24, -25):
lzw = 9*n + (2*self.ml + self.mu)*n
lrw = 20 + n
if miter in (0, 3):
liw = 30
else:
liw = 30 + n
zwork = zeros((lzw,), complex)
self.zwork = zwork
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol,self.atol,1,1,
self.zwork,self.rwork,self.iwork,mf]
self.success = 1
def run(self,*args):
y1,t,istate = self.runner(*(args[:5]+tuple(self.call_args)+args[5:]))
if istate < 0:
warnings.warn('zvode: ' +
self.messages.get(istate, 'Unexpected istate=%s'%istate))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
return y1, t
if zvode.runner is not None:
IntegratorBase.integrator_classes.append(zvode)
class dopri5(IntegratorBase):
runner = getattr(_dop,'dopri5',None)
name = 'dopri5'
messages = { 1 : 'computation successful',
2 : 'comput. successful (interrupted by solout)',
-1 : 'input is not consistent',
-2 : 'larger nmax is needed',
-3 : 'step size becomes too small',
-4 : 'problem is probably stiff (interrupted)',
}
def __init__(self,
rtol=1e-6,atol=1e-12,
nsteps = 500,
max_step = 0.0,
first_step = 0.0, # determined by solver
safety = 0.9,
ifactor = 10.0,
dfactor = 0.2,
beta = 0.0,
method = None
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.success = 1
def reset(self,n,has_jac):
work = zeros((8*n+21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), int32)
iwork[0] = self.nsteps
self.iwork = iwork
self.call_args = [self.rtol,self.atol,self._solout,self.work,self.iwork]
self.success = 1
def run(self,f,jac,y0,t0,t1,f_params,jac_params):
x,y,iwork,idid = self.runner(*((f,t0,y0,t1) + tuple(self.call_args)))
if idid < 0:
warnings.warn(self.name + ': ' +
self.messages.get(idid, 'Unexpected idid=%s'%idid))
self.success = 0
return y,x
def _solout(self, *args):
# dummy solout function
pass
if dopri5.runner is not None:
IntegratorBase.integrator_classes.append(dopri5)
class dop853(dopri5):
runner = getattr(_dop,'dop853',None)
name = 'dop853'
def __init__(self,
rtol=1e-6,atol=1e-12,
nsteps = 500,
max_step = 0.0,
first_step = 0.0, # determined by solver
safety = 0.9,
ifactor = 6.0,
dfactor = 0.3,
beta = 0.0,
method = None
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.success = 1
def reset(self,n,has_jac):
work = zeros((11*n+21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), int32)
iwork[0] = self.nsteps
self.iwork = iwork
self.call_args = [self.rtol,self.atol,self._solout,self.work,self.iwork]
self.success = 1
if dop853.runner is not None:
IntegratorBase.integrator_classes.append(dop853)
| {"hexsha": "76efa8c617de3bacaae4d2bd792b4f984c976560", "size": 25735, "ext": "py", "lang": "Python", "max_stars_repo_path": "scipy/integrate/ode.py", "max_stars_repo_name": "lesserwhirls/scipy-cwt", "max_stars_repo_head_hexsha": "ee673656d879d9356892621e23ed0ced3d358621", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2015-10-07T00:37:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T17:02:33.000Z", "max_issues_repo_path": "scipy/integrate/ode.py", "max_issues_repo_name": "lesserwhirls/scipy-cwt", "max_issues_repo_head_hexsha": "ee673656d879d9356892621e23ed0ced3d358621", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scipy/integrate/ode.py", "max_forks_repo_name": "lesserwhirls/scipy-cwt", "max_forks_repo_head_hexsha": "ee673656d879d9356892621e23ed0ced3d358621", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2015-05-09T14:23:57.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-15T05:56:00.000Z", "avg_line_length": 31.8897149938, "max_line_length": 93, "alphanum_fraction": 0.5587332427, "include": true, "reason": "from numpy,from scipy", "num_tokens": 6868} |
#include "f1_datalogger/udp_logging/common/rebroadcast_handler_2018.h"
#include <boost/bind.hpp>
#include <iostream>
deepf1::RebroadcastHandler2018::RebroadcastHandler2018()
{
std::cout << "Constructing Rebroadcast Handler" << std::endl;
}
deepf1::RebroadcastHandler2018::~RebroadcastHandler2018()
{
}
void handle_send(const std::string metadata,
const boost::system::error_code& error,
std::size_t bytes_transferred)
{
if(error.failed())
{
std::printf("Failed to rebroadcast %lu bytes. Error code: %d. Error Message: %s\n",
bytes_transferred, error.value(), error.message().c_str());
std::printf("Message metadata: %s\n", metadata.c_str());
}
}
std::string getMetadata(const deepf1::twenty_eighteen::PacketHeader& header)
{
return "Frame id: " + std::to_string(header.m_frameIdentifier) +"\n"+
"Packet Format: " + std::to_string(header.m_packetFormat) +"\n"+
"Packet id: " + std::to_string(header.m_packetId) +"\n"+
"Packet version: " + std::to_string(header.m_packetVersion) +"\n"+
"Player Car Index: " + std::to_string(header.m_playerCarIndex) +"\n"+
"Session Time: " + std::to_string(header.m_sessionTime) +"\n"+
"Session UID: " + std::to_string(header.m_sessionUID) +"\n";
}
void deepf1::RebroadcastHandler2018::handleData(const deepf1::twenty_eighteen::TimestampedPacketCarSetupData& data)
{
std::string metadata= getMetadata(data.data.m_header);
socket_->async_send_to(boost::asio::buffer(&(data.data), sizeof(data.data)), *remote_endpoint_, 0,
boost::bind(&handle_send, metadata,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
io_service_->run_one();
}
void deepf1::RebroadcastHandler2018::handleData(const deepf1::twenty_eighteen::TimestampedPacketCarStatusData& data)
{
std::string metadata= getMetadata(data.data.m_header);
socket_->async_send_to(boost::asio::buffer(&(data.data), sizeof(data.data)), *remote_endpoint_, 0,
boost::bind(&handle_send, metadata,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
io_service_->run_one();
}
void deepf1::RebroadcastHandler2018::handleData(const deepf1::twenty_eighteen::TimestampedPacketCarTelemetryData& data)
{
std::string metadata= getMetadata(data.data.m_header);
socket_->async_send_to(boost::asio::buffer(&(data.data), sizeof(data.data)), *remote_endpoint_, 0,
boost::bind(&handle_send, metadata,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
io_service_->run_one();
}
void deepf1::RebroadcastHandler2018::handleData(const deepf1::twenty_eighteen::TimestampedPacketEventData& data)
{
std::string metadata= getMetadata(data.data.m_header);
socket_->async_send_to(boost::asio::buffer(&(data.data), sizeof(data.data)), *remote_endpoint_, 0,
boost::bind(&handle_send, metadata,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
io_service_->run_one();
}
void deepf1::RebroadcastHandler2018::handleData(const deepf1::twenty_eighteen::TimestampedPacketLapData& data)
{
std::string metadata= getMetadata(data.data.m_header);
socket_->async_send_to(boost::asio::buffer(&(data.data), sizeof(data.data)), *remote_endpoint_, 0,
boost::bind(&handle_send, metadata,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
io_service_->run_one();
}
void deepf1::RebroadcastHandler2018::handleData(const deepf1::twenty_eighteen::TimestampedPacketMotionData& data)
{
std::string metadata= getMetadata(data.data.m_header);
socket_->async_send_to(boost::asio::buffer(&(data.data), sizeof(data.data)), *remote_endpoint_, 0,
boost::bind(&handle_send, metadata,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
io_service_->run_one();
}
void deepf1::RebroadcastHandler2018::handleData(const deepf1::twenty_eighteen::TimestampedPacketParticipantsData& data)
{
std::string metadata= getMetadata(data.data.m_header);
socket_->async_send_to(boost::asio::buffer(&(data.data), sizeof(data.data)), *remote_endpoint_, 0,
boost::bind(&handle_send, metadata,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
io_service_->run_one();
}
void deepf1::RebroadcastHandler2018::handleData(const deepf1::twenty_eighteen::TimestampedPacketSessionData& data)
{
std::string metadata= getMetadata(data.data.m_header);
socket_->async_send_to(boost::asio::buffer(&(data.data), sizeof(data.data)), *remote_endpoint_, 0,
boost::bind(&handle_send, metadata,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
io_service_->run_one();
}
bool deepf1::RebroadcastHandler2018::isReady()
{
return bool(remote_endpoint_) && bool(socket_) && bool(io_service_);
}
void deepf1::RebroadcastHandler2018::init(const std::string& host, unsigned int port, const std::chrono::high_resolution_clock::time_point& begin)
{
io_service_.reset(new boost::asio::io_service);
socket_.reset(new boost::asio::ip::udp::socket(*io_service_));
socket_->open(boost::asio::ip::udp::v4());
remote_endpoint_.reset(new boost::asio::ip::udp::endpoint(boost::asio::ip::address::from_string(host), port + 1));
}
| {"hexsha": "6a48efdeb8c835a275c5a30e97472aff987466ca", "size": 5223, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "data-logger/src/udp_logging/common/rebroadcast_handler_2018.cpp", "max_stars_repo_name": "linklab-uva/deepracing", "max_stars_repo_head_hexsha": "fc25c47658277df029e7399d295d97a75fe85216", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11.0, "max_stars_repo_stars_event_min_datetime": "2020-06-29T15:21:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-12T00:42:26.000Z", "max_issues_repo_path": "data-logger/src/udp_logging/common/rebroadcast_handler_2018.cpp", "max_issues_repo_name": "linklab-uva/deepracing", "max_issues_repo_head_hexsha": "fc25c47658277df029e7399d295d97a75fe85216", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data-logger/src/udp_logging/common/rebroadcast_handler_2018.cpp", "max_forks_repo_name": "linklab-uva/deepracing", "max_forks_repo_head_hexsha": "fc25c47658277df029e7399d295d97a75fe85216", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-01-23T23:36:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-02T00:18:37.000Z", "avg_line_length": 42.8114754098, "max_line_length": 146, "alphanum_fraction": 0.7465058396, "num_tokens": 1348} |
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.math import sum as _sum
from chainer.utils import array
from chainer.utils import type_check
class BatchL2NormSquared(function_node.FunctionNode):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
)
def forward_cpu(self, inputs):
self.retain_inputs((0,))
x = array.as_mat(inputs[0])
return (x * x).sum(axis=1),
def forward_gpu(self, inputs):
self.retain_inputs((0,))
x = array.as_mat(inputs[0])
l2normsquared_kernel = cuda.cupy.ReductionKernel(
'T x', 'T y', 'x * x', 'a + b', 'y = a', '0', 'l2normsquared'
)
return l2normsquared_kernel(x, axis=1),
def backward(self, indexes, gy):
x = self.get_retained_inputs()
return BatchL2NormSquaredGrad().apply((x[0], gy[0]))
class BatchL2NormSquaredGrad(function_node.FunctionNode):
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy0 = inputs
gy0 = gy0.reshape(-1, *((1,) * (x.ndim - 1)))
gx = 2 * x * gy0
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy0 = inputs
gy0 = gy0.reshape(-1, *((1,) * (x.ndim - 1)))
kernel = cuda.elementwise(
'T x, T gy', 'T gx', 'gx = 2 * x * gy',
'l2normsquared_bwd')
gx = kernel(x, gy0)
return gx,
def backward(self, indexes, grad_outputs):
x, gy0 = self.get_retained_inputs()
gy0 = gy0.reshape(-1, *((1,) * (x.ndim - 1)))
gy0 = chainer.functions.broadcast_to(gy0, x.shape)
gg2 = 2 * grad_outputs[0]
gx = gg2 * gy0
ggy0 = gg2 * x
return gx, _sum.sum(ggy0, axis=tuple(six.moves.range(1, ggy0.ndim)))
def batch_l2_norm_squared(x):
"""L2 norm (a.k.a.\\ Euclidean norm) squared.
This function implements the square of L2 norm on a vector. No reduction
along batch axis is done.
Args:
x (~chainer.Variable): Input variable. The first dimension is assumed
to be the *minibatch dimension*. If ``x`` has more than two
dimensions all but the first dimension are flattened to one
dimension.
Returns:
~chainer.Variable: Two dimensional output variable.
"""
return BatchL2NormSquared().apply((x,))[0]
| {"hexsha": "8a7df8e3f5c62150cb3912ea62d7a8c636b6dbad", "size": 2593, "ext": "py", "lang": "Python", "max_stars_repo_path": "chainer/functions/math/batch_l2_norm_squared.py", "max_stars_repo_name": "LuoYuanke/PrivChainer", "max_stars_repo_head_hexsha": "758d765c7903f6913cfd58c21db069d5f2a12203", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chainer/functions/math/batch_l2_norm_squared.py", "max_issues_repo_name": "LuoYuanke/PrivChainer", "max_issues_repo_head_hexsha": "758d765c7903f6913cfd58c21db069d5f2a12203", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chainer/functions/math/batch_l2_norm_squared.py", "max_forks_repo_name": "LuoYuanke/PrivChainer", "max_forks_repo_head_hexsha": "758d765c7903f6913cfd58c21db069d5f2a12203", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8045977011, "max_line_length": 77, "alphanum_fraction": 0.5989201697, "include": true, "reason": "import numpy", "num_tokens": 707} |
'''
FastText Recommender Module
'''
import numpy as np
from gensim.models import FastText
from gensim import matutils
class Recommender:
'''FastText Recommender Class'''
def __init__(self, path):
self.model = FastText.load(path)
def doc2words(self, doc, num=10):
'''
입력된 토큰, 토큰 리스트에 대하여
가장 의미상 가까운 단어 num 개를 반환
'''
return self.model.wv.most_similar(doc, topn=num)
def vec2words(self, vec, num=10):
'''
입력된 벡터에 대하여
가장 의미상 가까운 단어 num 개를 반환
'''
return self.model.wv.similar_by_vector(vec, topn=num)
def doc2vec(self, doc):
'''입력된 토큰, 토큰 리스트에 대한 임베딩 벡터 반환'''
if isinstance(doc, str):
doc = [doc]
if doc == []:
raise RuntimeError("빈 리스트는 벡터화시킬 수 없습니다.")
v = [self.model.wv[word] for word in doc]
return matutils.unitvec(np.array(v).mean(axis=0))
def vec_sim(self, vec_A, vec_B):
'''두 임베딩 벡터간의 의미적 유사도 반환'''
return np.dot(vec_A, vec_B)
def doc_sim(self, doc_A, doc_B):
'''두 토큰 or 토큰 리스트에 대한 의미적 유사도 반환'''
if isinstance(doc_A, str):
doc_A = [doc_A]
if isinstance(doc_B, str):
doc_B = [doc_B]
return self.model.wv.n_similarity(doc_A, doc_B)
def is_in_dict(self, word):
'''입력된 토큰이 모델이 알고 있는 토큰인지 반환'''
return word in self.model.wv.vocab
def make_test_report(self, path='./word_sim_test.md'):
'''특정 키워드에 대한 모델 성능 측정 및 MD 문서화'''
words = [
"공모전", "it", "컴퓨터", "취업", "진로", "장학",
"근무", "학교", "공부", "용돈", "국제", "토익",
"회계", "월급", "창업", "파이썬", "python", "java",
"디자인", "웹", "영상", "디자이너", "구매", "상품",
"데이터", "서버", "병원", "의료", "아르바이트", "연애",
"자유", "일본", "코트", "칭찬", "동아리", "새내기", "영어",
"채용", "학점", "수강", "생각", "스트레스", "행복", "수학",
"elp", "교수", "과제", "수업", "점수", "출판", "선생님",
"코딩", "물리", "군대", "대회", "세종대", "뉴스", "드라마",
"소식", "작품", "교양", "세미나", "특강", "복학", "휴학",
"장학금", "성적", "등록금", "질문", "후문", "학교", "알바",
"선생", "과외", "과학"
]
with open(path, 'w', encoding='utf-8') as f:
f.write("# 모델 단어 테스트\n\n")
for ex in words:
if not ex:
continue
if not self.is_in_dict(ex):
print("Skipped:", ex)
continue
f.write("### " + str(ex) + " \n")
f.write(" **" + str(self.doc2words(ex)) + "** \n")
f.write("\n") | {"hexsha": "aae8aa41787421dadfadaac7d19f7ede02338bb7", "size": 2605, "ext": "py", "lang": "Python", "max_stars_repo_path": "SIGNUS/modules/recommender/fasttext/__init__.py", "max_stars_repo_name": "837477/SIGNUS", "max_stars_repo_head_hexsha": "cd395dfd45d2c36d09ec9a8069e6e52e19f058e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SIGNUS/modules/recommender/fasttext/__init__.py", "max_issues_repo_name": "837477/SIGNUS", "max_issues_repo_head_hexsha": "cd395dfd45d2c36d09ec9a8069e6e52e19f058e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SIGNUS/modules/recommender/fasttext/__init__.py", "max_forks_repo_name": "837477/SIGNUS", "max_forks_repo_head_hexsha": "cd395dfd45d2c36d09ec9a8069e6e52e19f058e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5625, "max_line_length": 66, "alphanum_fraction": 0.4618042226, "include": true, "reason": "import numpy", "num_tokens": 1054} |
[STATEMENT]
lemma degree_leI:
assumes "(\<And>i. pdevs_apply y i = 0 \<Longrightarrow> pdevs_apply x i = 0)"
shows "degree x \<le> degree y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. degree x \<le> degree y
[PROOF STEP]
proof cases
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. ?P \<Longrightarrow> degree x \<le> degree y
2. \<not> ?P \<Longrightarrow> degree x \<le> degree y
[PROOF STEP]
assume "degree x \<noteq> 0"
[PROOF STATE]
proof (state)
this:
degree x \<noteq> 0
goal (2 subgoals):
1. ?P \<Longrightarrow> degree x \<le> degree y
2. \<not> ?P \<Longrightarrow> degree x \<le> degree y
[PROOF STEP]
from degree_least_nonzero[OF this]
[PROOF STATE]
proof (chain)
picking this:
pdevs_apply x (degree x - 1) \<noteq> (0::'b)
[PROOF STEP]
have "pdevs_apply y (degree x - 1) \<noteq> 0"
[PROOF STATE]
proof (prove)
using this:
pdevs_apply x (degree x - 1) \<noteq> (0::'b)
goal (1 subgoal):
1. pdevs_apply y (degree x - 1) \<noteq> (0::'a)
[PROOF STEP]
by (auto simp: assms split: if_split_asm)
[PROOF STATE]
proof (state)
this:
pdevs_apply y (degree x - 1) \<noteq> (0::'a)
goal (2 subgoals):
1. ?P \<Longrightarrow> degree x \<le> degree y
2. \<not> ?P \<Longrightarrow> degree x \<le> degree y
[PROOF STEP]
from degree_gt[OF this]
[PROOF STATE]
proof (chain)
picking this:
degree x - 1 < degree y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
degree x - 1 < degree y
goal (1 subgoal):
1. degree x \<le> degree y
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
degree x \<le> degree y
goal (1 subgoal):
1. \<not> degree x \<noteq> 0 \<Longrightarrow> degree x \<le> degree y
[PROOF STEP]
qed simp | {"llama_tokens": 709, "file": "Affine_Arithmetic_Affine_Form", "length": 9} |
"""Handle Fast Fourier Transform (FFT) for filter parameterization."""
import numpy as np
from astropy import units as u
from astropy.modeling.models import custom_model, Sine1D
from astropy.table import Table
from synphot.compat import NUMPY_LT_1_17
from synphot.models import Empirical1D
from synphot.spectrum import SpectralElement
from synphot.units import validate_quantity
__all__ = ['filter_to_fft', 'filter_from_fft', 'analytical_model_from_fft',
'filters_to_fft_table']
def _simplified_wavelength(n_lambda, lambda_0, delta_lambda):
# tynt assumed everything was in Angstrom, which coincides with
# synphot internal wavelength unit.
wave_unit = SpectralElement._internal_wave_unit
lambda_0 = validate_quantity(
lambda_0, wave_unit, equivalencies=u.spectral())
delta_lambda = validate_quantity(
delta_lambda, wave_unit, equivalencies=u.spectral())
lambda_max = (n_lambda + 1) * delta_lambda + lambda_0
return np.arange(lambda_0.value, lambda_max.value,
delta_lambda.value) * wave_unit
def filter_to_fft(bp, wavelengths=None, n_terms=10):
"""Calculate filter parameters using FFT.
Parameters
----------
bp : `~synphot.spectrum.SpectralElement`
Filter to parameterize.
wavelengths : array-like or `~astropy.units.quantity.Quantity`
Wavelength values for sampling.
If not a Quantity, assumed to be in Angstrom.
If `None`, ``waveset`` is used.
n_terms : int
Number of FFT parameters to keep.
Returns
-------
n_lambda : int
Number of elements in ``wl``.
lambda_0 : `~astropy.units.quantity.Quantity`
Minimum value of ``wl``.
delta_lambda : `~astropy.units.quantity.Quantity`
Median delta wavelength.
tr_max : `~astropy.units.quantity.Quantity`
Maximum value of ``tr``.
fft_parameters : list of complex
List of complex values that are FFT parameters to keep.
"""
wl = bp._validate_wavelengths(wavelengths)
tr = bp(wl)
diff_wl = np.diff(wl)
delta_lambda = np.nanmedian(diff_wl[diff_wl != 0])
lambda_0 = wl.min()
n_lambda = len(wl)
# Create a simplified wavelength grid
simplified_wavelength = _simplified_wavelength(
n_lambda, lambda_0, delta_lambda)
tr_max = tr.max()
# Interpolate transmittance onto simplified wavelength grid
tr_interp = np.interp(simplified_wavelength, wl, tr)
# Take the DFT of the interpolated transmittance curve
fft = np.fft.fft(tr_interp)[:n_terms]
if isinstance(fft, u.Quantity):
fft_parameters = fft.value.tolist()
else: # Older Numpy does not return Quantity
fft_parameters = fft.tolist()
return n_lambda, lambda_0, delta_lambda, tr_max, fft_parameters
def filter_from_fft(n_lambda, lambda_0, delta_lambda, tr_max, fft_parameters):
"""Reconstruct a filter from given FFT parameters.
The inputs for this function can be obtained from :func:`filter_to_fft`.
Parameters
----------
n_lambda : int
Number of elements in original wavelength array.
lambda_0 : float or `~astropy.units.quantity.Quantity`
Minimum value of original wavelength array.
If not a Quantity, assumed to be in Angstrom.
delta_lambda : float or `~astropy.units.quantity.Quantity`
Median delta wavelength of original wavelength array.
If not a Quantity, assumed to be in Angstrom.
tr_max : float or `~astropy.units.quantity.Quantity`
Maximum value of transmittance curve.
If a Quantity, must be unitless.
fft_parameters : list of complex
List of complex values that are FFT parameters representing the
filter transmittance curve.
Returns
-------
bp : `~synphot.spectrum.SpectralElement`
Reconstructed filter.
"""
wavelength = _simplified_wavelength(n_lambda, lambda_0, delta_lambda)
n_wave = len(wavelength)
ifft = np.fft.ifft(fft_parameters, n=n_wave)
transmittance = ((ifft.real - ifft.real.min()) * tr_max / ifft.real.ptp()) # noqa
return SpectralElement(
Empirical1D, points=wavelength, lookup_table=transmittance)
def analytical_model_from_fft(n_lambda, lambda_0, delta_lambda, tr_max,
fft_parameters):
"""Similar to :func:`filter_from_fft` except that this returns
an analytical model.
.. note::
This model needs to be sampled using the full range of
wavelength. See https://github.com/bmorris3/tynt/issues/9 .
Returns
-------
astropy_model : `~astropy.modeling.CompoundModel`
A compound model that consists of
`~astropy.modeling.functional_models.Sine1D` models.
"""
wavelength = _simplified_wavelength(n_lambda, lambda_0, delta_lambda)
n_wave = len(wavelength)
n_fft_pars = len(fft_parameters)
m = (np.sum([Sine1D(amplitude=fft_parameters[i].real / n_wave,
frequency=i / n_wave, phase=0.25)
for i in range(n_fft_pars)]) -
np.sum([Sine1D(amplitude=fft_parameters[i].imag / n_wave,
frequency=i / n_wave)
for i in range(n_fft_pars)]))
@custom_model
def fft_model(x):
"""Approximate Fourier reconstruction of an astronomical filter.
Parameters
----------
x : `~astropy.units.quantity.Quantity`
Full wavelength range that samples the filter.
Returns
-------
transmittance : array-like or `~astropy.units.quantity.Quantity`
Transmittance curve. If ``tr_max`` is a Quantity, this will
be a Quantity as well.
"""
wave_unit = SpectralElement._internal_wave_unit
x = validate_quantity(x, wave_unit, equivalencies=u.spectral())
mo = m((x - wavelength.min()) / (wavelength[1] - wavelength[0]))
return (mo - mo.min()) * tr_max / mo.ptp()
return fft_model()
def filters_to_fft_table(filters_mapping, n_terms=10):
"""Run :func:`filter_to_fft` on a list of filters
and store results in a table.
Parameters
----------
filters_mapping : dict
Dictionary mapping human-readable filter name to its
`~synphot.spectrum.SpectralElement` and wavelengths, if applicable.
If the filter object has a valid ``waveset``, just provide `None`
for wavelengths; otherwise provide a Quantity array for sampling.
For example::
{'JOHNSON/V': (<SpectralElement ...>, None),
'Flat': (<SpectralElement ...>, <Quantity [1000., ..., 9999.] Angstrom>)}
n_terms : int
Number of FFT parameters to keep.
Returns
-------
fft_table : `~astropy.table.Table`
Table storing FFT parameterization for the given filters.
Use its ``write`` method to save it to file.
""" # noqa
wave_unit = SpectralElement._internal_wave_unit
colnames = ['filter', 'n_lambda', 'lambda_0', 'delta_lambda',
'tr_max'] + [f'fft_{i}' for i in range(n_terms)]
rows = []
for key, (bp, wavelengths) in filters_mapping.items():
n_lambda, lambda_0, delta_lambda, tr_max, fft_pars = filter_to_fft(
bp, wavelengths=wavelengths, n_terms=n_terms)
if not NUMPY_LT_1_17:
rows.append(tuple(
[key, n_lambda, lambda_0, delta_lambda, tr_max] + fft_pars))
else: # Numpy 1.16 cannot handle unit here
rows.append(tuple(
[key, n_lambda, lambda_0.value, delta_lambda.value,
tr_max.value] + fft_pars))
fft_table = Table(rows=rows, names=colnames)
fft_table['lambda_0'].unit = wave_unit
fft_table['delta_lambda'].unit = wave_unit
return fft_table
| {"hexsha": "909ec88904e91ae26c9a405270f56b01e4ed488d", "size": 7798, "ext": "py", "lang": "Python", "max_stars_repo_path": "synphot/filter_parameterization/filter_fft.py", "max_stars_repo_name": "spacetelescope/pysynphot_DONOTUSE", "max_stars_repo_head_hexsha": "2a382d7bdf29cc4a1e6b69e59d5c1d0f82dabffc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "synphot/filter_parameterization/filter_fft.py", "max_issues_repo_name": "spacetelescope/pysynphot_DONOTUSE", "max_issues_repo_head_hexsha": "2a382d7bdf29cc4a1e6b69e59d5c1d0f82dabffc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "synphot/filter_parameterization/filter_fft.py", "max_forks_repo_name": "spacetelescope/pysynphot_DONOTUSE", "max_forks_repo_head_hexsha": "2a382d7bdf29cc4a1e6b69e59d5c1d0f82dabffc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3247863248, "max_line_length": 86, "alphanum_fraction": 0.6590151321, "include": true, "reason": "import numpy,from astropy", "num_tokens": 1837} |
Require Import Algebra.Utils Algebra.SetoidCat Algebra.Monad Algebra.Monoid Algebra.Alternative Algebra.NearSemiRing Algebra.Monad.ContT Algebra.Alternative Algebra.Functor Algebra.Applicative PairUtils SetoidUtils Tactics Algebra.SetoidCat.UnitUtils Algebra.Monoid.ArrUtils Algebra.Monad.Utils.
Require Import RelationClasses Relation_Definitions Morphisms SetoidClass.
Open Scope type_scope.
Section StoreHeap.
Context
{H : Type} {SH : Setoid H}
{R : Type} {SR : Setoid R}
{l lS}
{alt : @Alternative l lS}
.
Definition storeHeap A {AS} := SH ~> SR ~~> SH ~*~ lS (R * A) (SR ~*~ AS).
Instance storeHeapS {A} AS : Setoid (@storeHeap A AS) := SH ~~> SR ~~> SH ~*~ lS (R * A) (SR ~*~ AS).
Definition sh {B} (BS : Setoid B) A {SA : Setoid A} := contT BS (@storeHeapS) A.
Definition shS {B} (BS : Setoid B) { A} (SA : Setoid A):= contTS BS (@storeHeapS) SA.
Definition runSh {A B} {SA : Setoid A} {BS : Setoid B} : shS BS SA ~> (SA ~~> storeHeapS BS) ~~> storeHeapS BS := runContT.
Existing Instance contT_Monad.
Definition storeHeap_empty {A} {AS : Setoid A} : storeHeap A.
simple refine (injF (fun h : H => constS SR @ (h, empty)) _).
exact lS.
exact alt.
Lemma storeHeap_empty_1: forall {A} {AS : Setoid A}, Proper (equiv ==> equiv) (fun h : H => constS SR @ (h, empty)).
Proof.
intros. solve_proper.
Qed.
apply storeHeap_empty_1.
Defined.
Definition storeHeap_append {A} {AS : Setoid A} : storeHeapS AS ~> storeHeapS AS ~~> storeHeapS AS.
simple refine (injF4 (fun (s1 s2 : storeHeap A) h r =>
let (h', l1) := s1 @ h @ r in
let (h'', l2) := s2 @ h' @ r in
(h'', l1 <|> l2)) _).
exact lS.
exact alt.
Lemma storeHeap_append_1 : forall {A} {AS : Setoid A}, Proper (equiv ==> equiv ==> equiv ==> equiv ==> equiv)
(fun (s1 s2 : storeHeap A) (h : H) (r : R) =>
let (h', l1) := s1 @ h @ r in
let (h'', l2) := s2 @ h' @ r in (h'', l1 <|> l2)).
Proof.
autounfold. intros. simpl_let. split. rewritesr. rewritesr.
Qed.
apply storeHeap_append_1.
Defined.
Instance storeHeap_Alternative : @Alternative (@storeHeap) (@storeHeapS).
Proof.
exists (@storeHeap_empty) (@storeHeap_append).
intros. simpl. arrequiv. simpl_let. destruct (a @ a0 @ a1). rewrite left_unit_alt. split. reflexivity. reflexivity.
intros. simpl. arrequiv. simpl_let. destruct (a @ a0 @ a1). rewrite right_unit_alt. split. reflexivity. reflexivity.
intros. simpl. arrequiv. simpl_let. destruct (a @ a0 @ a1). simpl. destruct (b @ h @ a1). simpl. split . reflexivity. rewrite associativity_alt. reflexivity.
Defined.
Instance sh_Monad {B} {BS : Setoid B} : @Monad (@sh B BS) (@shS B BS) := contT_Monad BS (@storeHeapS).
Instance sh_Alternative {B} {BS : Setoid B} : @Alternative (@sh B BS) (@shS B BS) := contT_Alternative BS (@storeHeapS).
Context
{func : @Functor l lS}
{app : @Applicative l lS func}.
Definition getStore {B} {BS : Setoid B} : sh BS R.
simple refine (injF3 (fun (c : SR ~> storeHeapS BS) (h : H) (s : R) => c @ s @ h @ s) _).
Lemma getStore_1 : forall {B} {BS : Setoid B}, Proper (equiv ==> equiv ==> equiv ==> equiv)
(fun (c : SR ~> storeHeapS BS) (h : H) (s : R) => c @ s @ h @ s).
Proof.
intros. solve_proper.
Qed.
apply getStore_1.
Defined.
Definition getHeap {B} {BS : Setoid B} : sh BS H.
simple refine (injF3 (fun (c : SH ~> storeHeapS BS) (h : H) (s : R) => c @ h @ h @ s) _).
Lemma getHeap_1 : forall {B} {BS : Setoid B}, Proper (equiv ==> equiv ==> equiv ==> equiv)
(fun (c : SH ~> storeHeapS BS) (h : H) (s : R) => c @ h @ h @ s).
Proof.
intros. solve_proper.
Qed.
apply getHeap_1.
Defined.
Definition putStore {B} {BS : Setoid B} : SR ~> shS BS unitS.
simple refine (injF4 (fun (s : R) (c : unitS ~> storeHeapS BS) (h : H) (_ : R) => c @ tt @ h @ s) _).
Lemma putStore_1 : forall {B} {BS : Setoid B}, Proper (equiv ==> equiv ==> equiv ==> equiv ==> equiv)
(fun (s : R) (c : unitS ~> storeHeapS BS) (h : H) (_ : R) =>
c @ tt @ h @ s).
Proof.
autounfold. intros. rewritesr.
Qed.
apply putStore_1.
Defined.
Definition updateStore {B} {BS : Setoid B} : (SR ~~> SR) ~> shS BS unitS := (bind @ getStore) ∘ (flipS @ compS @ putStore).
Definition putHeap {B} {BS : Setoid B} : SH ~> shS BS unitS.
simple refine (injF4 (fun (h : H) (c : unitS ~> storeHeapS BS) (_ : H) (s : R) => c @ tt @ h @ s) _).
Lemma putHeap_1 : forall {B} {BS : Setoid B}, Proper (equiv ==> equiv ==> equiv ==> equiv ==> equiv)
(fun (h : H) (c : unitS ~> storeHeapS BS) (_ : H) (s : R) =>
c @ tt @ h @ s).
Proof.
autounfold. intros. rewritesr.
Qed.
apply putHeap_1.
Defined.
Definition updateHeap {B} {BS : Setoid B} : (SH ~~> SH) ~> shS BS unitS := (bind @ getHeap) ∘ (flipS @ compS @ putHeap).
Existing Instance arr_Monoid.
Existing Instance contT_A_Monoid.
Section Sh_SS_unitS_NearSemiRing.
Definition sh_times {S : Type} {SS : Setoid S} : shS SS unitS ~> shS SS unitS ~~> shS SS unitS := andThen.
Definition sh_plus {S : Type} {SS : Setoid S} : shS SS unitS ~> shS SS unitS ~~> shS SS unitS := mappend.
Definition sh_zero {S : Type} {SS : Setoid S} : sh SS unit := mempty.
Definition sh_one {S : Type} {SS : Setoid S}: sh SS unit := ret @ tt.
Instance sh_NearSemiRing {S : Type} {SS : Setoid S} : @NearSemiRing _ (shS SS unitS).
Proof.
exists (sh_one) (sh_zero) (sh_times) (sh_plus).
intros. simpl. arrequiv.
intros. unfold sh_times, sh_one. unfold andThen. normalizecomp. unfold constS. normalize. apply (@right_unit_equiv (sh SS) (@shS _ SS) sh_Monad). simpl. arrequiv. destruct a0. reflexivity.
intros. unfold sh_times. unfold andThen. normalizecomp. rewrite (@associativity (@sh S SS) (@shS _ SS) sh_Monad _ _ _ _ _ _ a (constS unitS @ b) (constS unitS @ c)). evalproper. simpl_equiv. reflexivity.
intros. apply left_unit_monoid.
intros. apply right_unit_monoid.
intros. apply associativity_monoid.
intros. simpl. arrequiv.
intros. unfold sh_times at 1. unfold sh_plus at 1. unfold andThen. normalizecomp. rewrite (@contT_left_distributivity _ _ _ _ _ _ _ _ _ a b (constS _ @ c)). reflexivity.
Grab Existential Variables.
solve_proper.
Defined.
End Sh_SS_unitS_NearSemiRing.
(* Proof. *)
(* Proof. *)
(*
Lemma concatMapM_cons : forall m0 (mnd : @Monad m0) A B (SA : Setoid A) (SB : Setoid B) (f : SA ~> m (listS SB)) (a : A) (l : list A), concatMapM @ f @ (a :: l) == appProper <$> f @ a <*> concatMapM @ f @ l.
Proof.
intros. simpl. repeat rewrite associativity_2. bindproper. unfold compM, injF2. simpl. arrequiv.
rewrite left_unit. simpl. normalize_monad. bindproper. normalize_monad. reflexivity.
Grab Existential Variables.
solve_proper.
solve_proper.
Qed.
Lemma sequence_map_ret : forall m0 (mnd : @Monad m0) A B (SA : Setoid A) (SB : Setoid B) (l : list A) (f : A -> B), sequence (map (fun a => ret @ f a) l) == ret @ map f l.
Proof.
intros. induction l.
simpl. reflexivity.
intros. simpl. normalize_monad. rewrite IHl. rewrite left_unit. simpl. reflexivity.
Qed.
Lemma concatMapM_ret : forall m0 (mnd : @Monad m0) A (SA : Setoid A) pr (l : list A) (s : H), injective (listS SA) (m (listS SA)) ret -> concatMapM @ injF (fun a => ret @ (a :: nil)) pr == ret .
Proof.
intros. simpl. arrequiv. induction a. simpl. normalize_monad. reflexivity.
simpl. normalize_monad. rewrite <- associativity0. rewrite ret_ret. rewrite <- ret_ret with (f:=@concat A) (g:=app (a::nil)).
assert (forall pr, injF (fun a1 => ret @ concat a1) pr == ret ∘ concatProper). intros. apply fun_ext. intros. reflexivity. rewrite H1.
rewrite IHa. normalize_monad. reflexivity. solve_proper. solve_proper. solve_proper. solve_proper.
Grab Existential Variables.
solve_proper.
solve_proper.
solve_proper.
solve_proper.
solve_proper.
Qed.
Lemma concatMapM_nil : forall m0 (mnd : @Monad m0) A B (SA : Setoid A) (SB : Setoid B) (f : SA ~> m (listS SB)), concatMapM @ f @ nil == ret @ nil.
Proof.
intros. simpl. rewrite left_unit. simpl. reflexivity.
Qed. *)
End StoreHeap.
| {"author": "xu-hao", "repo": "CertifiedQueryArrow", "sha": "8db512e0ebea8011b0468d83c9066e4a94d8d1c4", "save_path": "github-repos/coq/xu-hao-CertifiedQueryArrow", "path": "github-repos/coq/xu-hao-CertifiedQueryArrow/CertifiedQueryArrow-8db512e0ebea8011b0468d83c9066e4a94d8d1c4/Algebra/Monad/StoreHeap.v"} |
import numpy as np
import pandas as pd
import unittest
import io
import sys
from context import grama as gr
## Test cohort shapley
##################################################
class TestCohortShapley(unittest.TestCase):
def setUp(self):
pass
def test_cohort_shapley(self):
df_data = gr.df_make(x0=[0, 0, 1, 1], x1=[0, 1, 0, 1], f=[0, 1, 1, 2],)
df_true = gr.df_make(
f_x0=[-0.5, -0.5, +0.5, +0.5], f_x1=[-0.5, +0.5, -0.5, +0.5],
)
df_cohort = gr.tran_shapley_cohort(df_data, var=["x0", "x1"], out=["f"])
self.assertTrue(gr.df_equal(df_true, df_cohort))
| {"hexsha": "e3be4d6b0e42ab23fbfd8796d6326d40ba3e4c67", "size": 633, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_shapley.py", "max_stars_repo_name": "natalia-rubio/py_grama", "max_stars_repo_head_hexsha": "968c1c0238d7165de3b1b96534791feacc4aa960", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-02-24T16:51:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:56:55.000Z", "max_issues_repo_path": "tests/test_shapley.py", "max_issues_repo_name": "natalia-rubio/py_grama", "max_issues_repo_head_hexsha": "968c1c0238d7165de3b1b96534791feacc4aa960", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 78, "max_issues_repo_issues_event_min_datetime": "2019-12-30T19:13:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-23T18:17:54.000Z", "max_forks_repo_path": "tests/test_shapley.py", "max_forks_repo_name": "natalia-rubio/py_grama", "max_forks_repo_head_hexsha": "968c1c0238d7165de3b1b96534791feacc4aa960", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-10-19T17:49:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-15T20:46:52.000Z", "avg_line_length": 25.32, "max_line_length": 80, "alphanum_fraction": 0.5466034755, "include": true, "reason": "import numpy", "num_tokens": 219} |
# Written by Jimmy Zhong (zhongj2@carleton.edu), Carleton '23 under Professor Rika Anderson; date: August 31nd 2021
'''
cd /workspace/data/zhongj/Transposase_Project/integron_finder_tara_contig
python3
exec(open('find_cassette_function_and_pnps_august24.py').read())
'''
import glob
import os
import subprocess
import sys
import csv
import time
import random
import pandas as pd
import numpy as np
from collections import Counter
ocean_list = ["ARS", "CPC", "EAC", "IN", "NAT", "MED", "NP", "RS", "SAT", "SP"]
# mv tara_arabian_SECONDARY_contigs.integrons ARS.integrons
# mv tara_chileperucoastal_SECONDARY_contigs.integrons CPC.integrons
# mv tara_eafricacoastal_SECONDARY_contigs.integrons EAC.integrons
# mv tara_indianmonsoon_SECONDARY_contigs.integrons IN.integrons
# mv tara_mediterranean_SECONDARY_contigs.integrons MED.integrons
# mv tara_northatlantic_SECONDARY_contigs.integrons NAT.integrons
# mv tara_northpacific_SECONDARY_contigs.integrons NP.integrons
# mv tara_redsea_SECONDARY_contigs.integrons RS.integrons
# mv tara_southatlantic_SECONDARY_contigs.integrons SAT.integrons
# mv tara_southpacific_SECONDARY_contigs.integrons SP.integrons
integron_merged_colnames = ['gene_callers_id', 'contig', 'start', 'stop', 'direction', 'partial', 'call_type',
'pNpS', 'accession', 'gene_function', 'e_value', 'category', 'element', 'cassette_begin', 'cassette_end',
'type_molecule', 'from_integron_type']
def within(start1, start2, end1, end2):
if abs(int(start1)-int(start2)) < 100 and abs(int(end1)-int(end2)) < 100:
return True
else:
return False
def parse_anvio_files(ocean):
all_gene_calls = pd.read_csv(f"../Tara_Oceans_Bins/{ocean}_bins_v2/all-gene-calls.txt", sep='\t')
all_gene_calls.drop(columns = ['source','version'], inplace = True)
func = pd.read_csv(f"../Tara_Oceans_Bins/{ocean}_bins_v2/all-COG-functions.txt", sep='\t')
func.drop(columns = ['source'], inplace = True)
category = pd.read_csv(f"../Tara_Oceans_Bins/{ocean}_bins_v2/all-COG-categories.txt", sep="\t")
category.drop(columns = ['source','e_value', 'accession'], inplace = True)
pnps_file = f"../Tara_Oceans_Bins/{ocean}_bins_v2/DNA-pn-ps/pNpS.txt" # I only compute integron contigs for EAC, SP, and SAT. Computational restriction, not enough memory
if not os.path.isfile(pnps_file):
pnps_file = f"../Tara_Oceans_Bins/{ocean}_bins_v2/integron-pn-ps/pNpS.txt"
pnps = pd.read_csv(pnps_file, sep='\t')
pnps.drop(columns = ["Unnamed: 0", "sample_id"], inplace = True)
pnps.rename(columns={"corresponding_gene_call":"gene_callers_id", "pNpS_gene_reference":"pNpS"}, inplace=True)
print(pnps)
print(all_gene_calls)
all_gene_calls = all_gene_calls.merge(pnps, on="gene_callers_id", how='left')
gene_call_func_category = (all_gene_calls.merge(func, how='outer', on="gene_callers_id")).merge(category, on="gene_callers_id", how='outer')
gene_call_func_category.rename(columns={"function_y": "category", "function_x": "gene_function"}, inplace = True)
return gene_call_func_category.values.tolist()
def parse_integrons(ocean):
with open(f"{ocean}.integrons") as file:
hits = file.readlines()[2:]
integrons = []
# ["contig,element,pos_begin,pos_end,type_molecule,from_integron_type\n"]
# all anvio's contig names are c_1, c_2, etc
for line in hits:
line = line.split("\t")
if len(line) > 10: integrons.append(["c_" + line[1], line[2], line[3], line[4], line[7], line[10]])
return integrons
for ocean in ocean_list:
integrons = parse_integrons(ocean)
integrons_set = set(integron[0] for integron in integrons)
gene_call_func_category_list = parse_anvio_files(ocean)
out = []
for anvio_gene_call in gene_call_func_category_list:
contig_name = anvio_gene_call[1]
if contig_name in integrons_set: # check whether it's one of the integron containing contigs
gene_call_start, gene_call_end = anvio_gene_call[2], anvio_gene_call[3]
for cassette in integrons:
if contig_name == cassette[0]:
cassette_start, cassette_end = cassette[2], cassette[3]
if within(cassette_start, gene_call_start, cassette_end, gene_call_end):
out.append(anvio_gene_call + cassette[1:])
merged_integrons = pd.DataFrame(out, columns = integron_merged_colnames)
merged_integrons.to_csv(path_or_buf=f'{ocean}_merged_integrons.csv', sep=',', index=False)
def cat_all_ocean_integron(ocean_list):
catted = pd.DataFrame(columns = integron_merged_colnames)
for ocean in ocean_list:
additional = pd.read_csv(f"{ocean}_merged_integrons.csv")
additional['ocean']=ocean
print(ocean + ", length: " + str(len(additional.index)))
catted = catted.append(additional)
return catted
def cat_all_ocean_random(ocean_list):
catted = pd.DataFrame()
for ocean in ocean_list:
normal_all = pd.read_csv(f"../Tara_Oceans_Bins/{ocean}_bins_v2/all-COG-categories.txt", sep='\t').rename(columns={"function": "category"})
catted = catted.append(normal_all)
return catted
normal_gene_call = cat_all_ocean_random(ocean_list)
all_ocean_integron = cat_all_ocean_integron(ocean_list)
all_ocean_integron.to_csv(path_or_buf='all_ocean_merged_integrons.csv', sep=',', index=False)
# filter out the integrase of the cassette seequence, ~ negates condition
all_ocean_integron = all_ocean_integron[~all_ocean_integron['from_integron_type'].str.contains("In")]
all_ocean_integron = all_ocean_integron[~all_ocean_integron['type_molecule'].str.contains("attC")]
def count_cog_proprotion(pd_dataframe):
tmp = pd_dataframe[pd_dataframe.category.notnull()]
# if 'from_integron_type' in tmp:
# tmp = tmp[~tmp['from_integron_type'].str.contains("In")]
func_count_list = tmp['category'].tolist()
func_count_list = [word for line in func_count_list for word in line.split("!!!")] # split !!! into 2 different functions
func_Counter = Counter(func_count_list)
out, total = [], len(func_count_list)
for tag, count in func_Counter.items():
out.append([str(tag), str(count), str(count/total)])
return out, total
integron_count, total_integron = count_cog_proprotion(all_ocean_integron)
# SAT_gene_call = pd.read_csv(f"../Tara_Oceans_Bins/SAT_bins_v2/all-COG-categories.txt", sep='\t').rename(columns={"function": "category"})
integron_count, total_integron = count_cog_proprotion(all_ocean_integron)
normal_count, total_normal = count_cog_proprotion(normal_gene_call)
integron_df = pd.DataFrame(integron_count, columns = ['COG_function','integron_count','integron_prop'])
normal_df = pd.DataFrame(normal_count, columns = ['COG_function','normal_count','normal_prop'])
merged = integron_df.merge(normal_df, on="COG_function", how='inner')
merged.sort_values(by = "COG_function", axis=0)
merged.loc[len(merged.index)] = ['~Total', total_integron, "1", total_normal, "1"]
merged.to_csv(path_or_buf="all_integron_func_category_count.csv", sep=',', index=False)
all_integron = pd.read_csv("all_ocean_merged_integrons.csv")
all_integron = all_integron[["pNpS", "category", 'type_molecule', 'from_integron_type']]
non_null_integron = all_integron.dropna()
non_null_integron.to_csv(path_or_buf="integron_pnps_clean.csv", sep=',', index=False)
# pd.options.display.max_colwidth = 100
# pd.options.display.max_rows = 50
# non_null_integron['category']=non_null_integron['category'].str.split('!!!')
# non_null_integron.explode('category')
print("finish the first half, now calculate the mean bin pnps for all bins")
pnps_bin_contig = pd.DataFrame(columns = ["gene_callers_id","contig","pNpS","bin_name"])
for ocean in ocean_list:
all_gene_calls = pd.read_csv(f"./{ocean}_bins_v2/all-gene-calls.txt", sep='\t')
all_gene_calls = all_gene_calls[["gene_callers_id", "contig"]]
bin_pnps_file = f"./{ocean}_bins_v2/DNA-pn-ps/pNpS.txt" # I only compute integron contigs for EAC, SP, and SAT. Computational restriction, not enough memory
if not os.path.isfile(bin_pnps_file): bin_pnps_file = bin_pnps_file.replace("DNA", "bin")
pnps = pd.read_csv(bin_pnps_file, sep='\t') # pnps=pd.read_csv("bin-pn-ps/pNpS.txt", sep='\t')
pnps.drop(columns = ["Unnamed: 0", "sample_id"], inplace = True)
pnps.rename(columns={"corresponding_gene_call":"gene_callers_id", "pNpS_gene_reference":"pNpS"}, inplace=True)
tmp = all_gene_calls.merge(pnps, how="inner", on="gene_callers_id")
binning_info = pd.read_csv(f"./{ocean}_bins_v2/{ocean}_binning_info_anvio.txt", names=["contig", "bin_name"], sep='\t')
tmp = tmp.merge(binning_info, on="contig")
func = pd.read_csv(f"./{ocean}_bins_v2/all-COG-functions.txt", sep='\t')
func.drop(columns = ['source'], inplace = True)
tmp = tmp.merge(func, how='outer', on="gene_callers_id")
tmp = tmp[~tmp.isin([np.nan, np.inf, -np.inf]).any(1)] # filter out inf, -inf, and nan
pnps_bin_contig = pnps_bin_contig.append(tmp)
# # get bins' median pnps
# bin_median_pnps = pnps_bin_contig.groupby(["bin_name"])['pNpS'].agg(mean_bin_pnps='mean', median_bin_pnps='median', count='size').reset_index()
# df_mask=bin_median_pnps['count']>=10
# bin_median_pnps = bin_median_pnps[df_mask]
# bin_median_pnps.rename(columns={"bin_name":"bin"}, inplace=True)
# bin_median_pnps.to_csv('bin_median_pnps.csv', sep=',', index=False)
# show that bins with high pnps does not also have a ton of transposases
all_integrons = pd.read_csv('all_ocean_merged_integrons.csv')
pnps_bin_contig = pd.DataFrame(columns = ["gene_callers_id","contig","pNpS","bin_name"])
for ocean in ocean_list:
integrons = parse_integrons(ocean)
integrons_set = set(integron[0] for integron in integrons)
all_gene_calls = pd.read_csv(f"./{ocean}_bins_v2/all-gene-calls.txt", sep='\t')
all_gene_calls = all_gene_calls[["gene_callers_id", "contig"]]
bin_pnps_file = f"./{ocean}_bins_v2/DNA-pn-ps/pNpS.txt" # I only compute integron contigs for EAC, SP, and SAT. Computational restriction, not enough memory
if not os.path.isfile(bin_pnps_file): bin_pnps_file = bin_pnps_file.replace("DNA", "bin")
pnps = pd.read_csv(bin_pnps_file, sep='\t') # pnps=pd.read_csv("bin-pn-ps/pNpS.txt", sep='\t')
pnps.drop(columns = ["Unnamed: 0", "sample_id"], inplace = True)
pnps.rename(columns={"corresponding_gene_call":"gene_callers_id", "pNpS_gene_reference":"pNpS"}, inplace=True)
tmp = all_gene_calls.merge(pnps, how="inner", on="gene_callers_id")
binning_info = pd.read_csv(f"./{ocean}_bins_v2/{ocean}_binning_info_anvio.txt", names=["contig", "bin_name"], sep='\t')
tmp = tmp.merge(binning_info, on="contig")
tmp = tmp[~tmp.isin([np.nan, np.inf, -np.inf]).any(1)] # filter out inf, -inf, and nan
pnps_bin_contig = pnps_bin_contig.append(tmp)
bin_median_pnps = pnps_bin_contig.groupby(["bin_name"])['pNpS'].agg(mean_bin_pnps='mean', median_bin_pnps='median', count='size').reset_index()
df_mask=bin_median_pnps['count']>=10
bin_median_pnps = bin_median_pnps[df_mask]
bin_median_pnps.rename(columns={"bin_name":"bin"}, inplace=True)
bin_median_pnps.to_csv('bin_median_pnps.csv', sep=',', index=False)
def get_upper_range(df):
Q3, Q1 = np.quantile(df['pNpS'], 0.75), np.quantile(df['pNpS'], 0.25)
IQR = Q3 - Q1
return 1.5*IQR + Q3
upper_range = get_upper_range(pnps_bin_contig)
# get pnps for all transposases in bins
transposase_in_bins = pnps_bin_contig[pnps_bin_contig['function'].str.contains("transposase")]
non_transposase = pnps_bin_contig[~pnps_bin_contig['function'].str.contains("transposase")]
print(transposase_in_bins.query('pNpS < @upper_range').pNpS.describe())
print(scipy.stats.ttest_ind(non_transposase.query('pNpS < @upper_range').pNpS, transposase_in_bins.query('pNpS < @upper_range').pNpS))
| {"hexsha": "040938787749717596c96c1494d950570e71d00a", "size": 11411, "ext": "py", "lang": "Python", "max_stars_repo_path": "pn-ps-and-mapping/metagenome_function_and_pnps.py", "max_stars_repo_name": "carleton-spacehogs/transposase-deep-ocean", "max_stars_repo_head_hexsha": "cf782acec39f902c563ff83f6e74c2200bf7f743", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pn-ps-and-mapping/metagenome_function_and_pnps.py", "max_issues_repo_name": "carleton-spacehogs/transposase-deep-ocean", "max_issues_repo_head_hexsha": "cf782acec39f902c563ff83f6e74c2200bf7f743", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pn-ps-and-mapping/metagenome_function_and_pnps.py", "max_forks_repo_name": "carleton-spacehogs/transposase-deep-ocean", "max_forks_repo_head_hexsha": "cf782acec39f902c563ff83f6e74c2200bf7f743", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.1050228311, "max_line_length": 171, "alphanum_fraction": 0.7579528525, "include": true, "reason": "import numpy", "num_tokens": 3445} |
SUBROUTINE SET_DIR( DIR_OUT )
!***********************************************************************
!* Sets an Output Directory if Specified or if Local is Read-Only
!*
!* Language: Fortran
!*
!* Platform: Windows
!*
!* Compiler: Fortran
!*
!* Author: Stuart G. Mentzer
!* Andrew Orndorff
!*
!* Date: 2004/10/25
!***********************************************************************
! Headers
INCLUDE 'uds_fxn.fi'
! Arguments ______________________________________________________
CHARACTER*(*) DIR_OUT ! Output directory
! Variables ______________________________________________________
INTEGER IOS, IOS_W
! Set output directory
IF ( BLANK( DIR_OUT ) ) THEN ! Look for valid output directory
! Try local directory
IOS = TRY_DIR( DIR_OUT )
IF ( IOS .NE. 0 ) THEN ! Look for non-local directory
! Check DIR_OUT environment variable
CALL GET_ENV( 'DIR_OUT', DIR_OUT )
IF ( .NOT. BLANK( DIR_OUT ) ) THEN ! Use DIR_OUT path
IOS = TRY_DIR( DIR_OUT )
IF ( IOS .NE. 0 ) THEN ! Try creating the directory
IOS = MAKE_DIR( DIR_OUT )
IF ( IOS .EQ. 0 ) IOS = TRY_DIR( DIR_OUT )
IF ( IOS .NE. 0 ) THEN
WRITE( *, *, IOSTAT=IOS_W )
& '*** Output failed to DIR_OUT = ',
& DIR_OUT(:L_TRIM(DIR_OUT))
STOP ' '
END IF
END IF
ELSE ! Use a default output directory
DIR_OUT = 'C:\OUTPUT\'
! Create output subdirectory if necessary
IOS = TRY_DIR( DIR_OUT )
DO WHILE ( ( IOS .NE. 0 ) .AND.
& ( DIR_OUT(1:1) .LE. 'Z' ) )
IOS = MAKE_DIR( DIR_OUT )
IF ( IOS .EQ. 0 ) IOS = TRY_DIR( DIR_OUT )
IF ( IOS .NE. 0 )
& DIR_OUT(1:1) = CHAR( ICHAR( DIR_OUT(1:1) ) + 1 )
END DO
IF ( IOS .NE. 0 ) THEN
WRITE( *, * ) '*** Output failed to C-Z:\OUTPUT'
STOP ' '
END IF
END IF
END IF
ELSE ! Try specified directory
IOS = TRY_DIR( DIR_OUT )
IF ( IOS .NE. 0 ) THEN
IOS = MAKE_DIR( DIR_OUT )
IF ( IOS .EQ. 0 ) IOS = TRY_DIR( DIR_OUT )
IF ( IOS .NE. 0 ) THEN
WRITE( *, *, IOSTAT=IOS_W )
& '*** Output failed to DIR_OUT = ',
& DIR_OUT(:L_TRIM(DIR_OUT))
STOP ' '
END IF
END IF
END IF
RETURN
END
INTEGER FUNCTION TRY_DIR( DIR_OUT )
!***********************************************************************
!* Tries a Specified Directory for Output
!*
!* Language: Fortran
!*
!* Platform: Windows
!*
!* Compiler: Fortran
!*
!* Author: Stuart G. Mentzer
!*
!* Date: 1999/10/18
!***********************************************************************
! Arguments ______________________________________________________
CHARACTER*(*) DIR_OUT ! Output directory
! Variables ______________________________________________________
INTEGER LUN, IOS, IOC, LD, LD1
CHARACTER FILE_NAME*255
! Functions ______________________________________________________
LOGICAL ANY_CHARS
EXTERNAL ANY_CHARS
! Try DIR_OUT for output
LD = LEN_TRIM( DIR_OUT )
LD1 = MAX( LD, 1 )
IF ( ( LD .GT. 0 ) .AND.
& ( .NOT. ANY_CHARS( DIR_OUT(LD1:LD1), '\/:' ) ) ) THEN
! Add path separator
LD = LD + 1
DIR_OUT(LD:LD) = '\'
END IF
FILE_NAME = DIR_OUT(:LD)//'try_dir.out'
CALL FN_INCR( FILE_NAME )
CALL OPEN_FS( LUN, FILE_NAME, 'W', IOS )
CLOSE( UNIT=LUN, STATUS='DELETE', IOSTAT=IOC )
TRY_DIR = IOS
RETURN
END
| {"hexsha": "50acf9a5642dbd1e30d41d25faff3ad848720626", "size": 3822, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "src/lib/Windows/set_dir.for", "max_stars_repo_name": "DeadParrot/NHTSA-Tools", "max_stars_repo_head_hexsha": "e8de2d5aa3d6de96a858ae70ecc4e75fa3d80ac4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2017-03-14T03:50:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T21:45:12.000Z", "max_issues_repo_path": "src/lib/Windows/set_dir.for", "max_issues_repo_name": "DeadParrot/NHTSA-Tools", "max_issues_repo_head_hexsha": "e8de2d5aa3d6de96a858ae70ecc4e75fa3d80ac4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib/Windows/set_dir.for", "max_forks_repo_name": "DeadParrot/NHTSA-Tools", "max_forks_repo_head_hexsha": "e8de2d5aa3d6de96a858ae70ecc4e75fa3d80ac4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-31T23:57:05.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-31T23:57:05.000Z", "avg_line_length": 27.3, "max_line_length": 72, "alphanum_fraction": 0.4984301413, "num_tokens": 953} |
import torch.nn as nn
import numpy as np
from box import Box
from pathlib import Path
from src.model.nets.base_net import BaseNet
class MyNet(BaseNet):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.conv1 = nn.Conv2d(self.in_channels, 10, kernel_size=3)
self.conv2 = nn.Conv2d(10, self.out_channels, kernel_size=3)
def forward(self, x):
conv1 = self.conv1(x)
output = self.conv2(conv1)
return output
def test_base_net():
"""Test to build `BaseNet`.
"""
net = BaseNet()
def test_my_net():
"""Test to build the derived network.
"""
cfg = Box.from_yaml(filename=Path("test/configs/test_config.yaml"))
net = MyNet(**cfg.net.kwargs)
| {"hexsha": "9d89da2616af9b9edb554d87417c37343b677cc7", "size": 846, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/model/test_net.py", "max_stars_repo_name": "Tung-I/nips2019_template", "max_stars_repo_head_hexsha": "a1fcf35b7633d192d2706a533731cb8c457ac230", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-02-01T07:19:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-10T13:55:49.000Z", "max_issues_repo_path": "test/model/test_net.py", "max_issues_repo_name": "Tung-I/nips2019_template", "max_issues_repo_head_hexsha": "a1fcf35b7633d192d2706a533731cb8c457ac230", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2019-05-21T12:48:37.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-01T09:56:42.000Z", "max_forks_repo_path": "test/model/test_net.py", "max_forks_repo_name": "Tung-I/nips2019_template", "max_forks_repo_head_hexsha": "a1fcf35b7633d192d2706a533731cb8c457ac230", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-08-09T17:06:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-09T17:06:56.000Z", "avg_line_length": 24.8823529412, "max_line_length": 71, "alphanum_fraction": 0.6548463357, "include": true, "reason": "import numpy", "num_tokens": 215} |
#! /usr/bin/python3
from numpy.random import rand
from mat_util import load, save, save_text
newmat = True;
datadir = 'data'
p = 23 # number of helper processes
# we use a tall thin matrix.
if newmat:
m = p*500 # p helpers will each solve an mxn system each iteration
n = 150
A = rand(m,n)
b = rand(m,1)
save('A',A,datadir)
save('b',b,datadir)
else:
A = load('A',datadir)
b = load('b',datadir)
m,n = A.shape
print("m = {}, n = {}".format(m,n))
# pre-process and store the A matrix and b vector for use by the p processes.
# each process will just need to load its own data from disk
for i in range(p):
rank = i+1
l = m//p
save("A{}".format(rank), A[i*l:(i+1)*l,:], directory=datadir)
save("b{}".format(rank), b[i*l:(i+1)*l], directory=datadir)
| {"hexsha": "379720672b2331f3599823a380a4c734bda59c13", "size": 806, "ext": "py", "lang": "Python", "max_stars_repo_path": "admm/pre_proc.py", "max_stars_repo_name": "ddrake/convex_m", "max_stars_repo_head_hexsha": "6e506133c03bb1e0cf38143a907ac595082d524c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "admm/pre_proc.py", "max_issues_repo_name": "ddrake/convex_m", "max_issues_repo_head_hexsha": "6e506133c03bb1e0cf38143a907ac595082d524c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "admm/pre_proc.py", "max_forks_repo_name": "ddrake/convex_m", "max_forks_repo_head_hexsha": "6e506133c03bb1e0cf38143a907ac595082d524c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0, "max_line_length": 77, "alphanum_fraction": 0.6153846154, "include": true, "reason": "from numpy", "num_tokens": 259} |
#
__doc__ = """
Defines miscellaneous functions
"""
import numpy as np
from configobj import ConfigObj
import copy
import os
import sys
import inspect
import re
import string
import random
import yaml
import datetime
from collections import OrderedDict
# FILE ------------------------------------------
def assert_file(filename, strict=False, logger=None):
''' assert if file exists '''
if strict:
assert os.path.exists(filename), 'No %s file exists' % filename
else: # display warning to logger
if not os.path.exists(filename) and logger:
logger.info('No %s file exists' % filename)
def get_abspath(filename, do_assert=True, logger=None):
''' return absolute path with possible assertion '''
val = os.path.abspath(os.path.expandvars(os.path.expanduser(filename)))
assert_file(val, do_assert, logger)
return val
def make_dir(dirname, logger=None, force=False):
'''
Make a directory. Return True if it is created
If force is True and the directory already exists, this will force to create the directory after renaming the existing one to a directory with a random suffix.
'''
if not os.path.exists(dirname):
os.system('mkdir -p %s' % dirname)
if logger: logger.debug('A directory %s is created.' % dirname)
return True
else:
if logger: logger.debug('Directory %s already exists.' % dirname)
if force:
old_dirname = dirname + generate_random_str('_old_', 5)
os.rename(dirname, old_dirname)
os.system('mkdir -p %s' % dirname)
if logger:
logger.debug('The existing directory is renamed to %s.' % old_dirname)
logger.debug('A directory %s is created.' % dirname)
return True
return False
def split_filename(filename):
''' For a given filename,
Return a flag if it exists, directory name, and base filename
'''
assert_file(filename)
(dirname,basename) = os.path.split(os.path.abspath(filename))
return os.path.exists(filename), dirname, basename
def get_dirname(filename):
''' get directory name only '''
return split_filename(filename)[1]
def get_basename(filename):
''' get basename only '''
return split_filename(filename)[2]
def rmfile(filename):
''' delete a file after checking it is a file '''
if os.path.isfile(filename):
os.remove(filename)
# Binary Vector ------------------------------------------
def dec2bin(value, bw):
''' convert a decimal number(value) to a binary string w/ given bit width(bw) '''
return "".join(str((int(value)>>i) & 1) for i in range(bw-1,-1,-1))
def bin2dec(binstr):
''' Convert binary string to unsigned decimal number '''
n = len(binstr)-1
return int(sum(1<<n-i for i,bit in enumerate(binstr) if bit=='1'))
def invbin(binstr):
''' invert binary string '''
return "".join(str(int(i)^1) for i in binstr)
def bin2thermdec(binstr):
''' convert binary string(binstr) to thermometer code '''
n = len(binstr)-1
return int(sum(1 for i,bit in enumerate(binstr) if bit=='1'))
def all_gray(bitw, invert=False, dtype='int'):
''' returns a list of all possible gray codes for given bit width 'bitw' '''
G=lambda n:n and['0'+x for x in G(n-1)]+['1'+x for x in G(n-1)[::-1]]or['']
val = G(bitw)
val = val if not invert else [invbin(v) for v in val]
return [bin2dec(v) for v in val]
def all_bin(bitw, invert=False, dtype='int'):
val = [dec2bin(v, bitw) for v in range(2**bitw)]
val = val if not invert else [invbin(v) for v in val]
return [bin2dec(v) for v in val]
def all_therm(bitw, invert=False, dtype='int'): # all thermometer codes including 0
val = ['0'*(bitw-v)+'1'*v for v in range(bitw+1)]
val = val if not invert else [invbin(v) for v in val]
return [bin2dec(v) for v in val]
def all_onehot(bitw, include_zero = False, invert=False, dtype='int'):
val = ['0'*(bitw-v-1)+'1'+'0'*v for v in range(bitw)]
val = val + ['0'*bitw] if include_zero else val
val = val if not invert else [invbin(v) for v in val]
return [bin2dec(v) for v in val]
def therm2bin(thermstr):
''' convert thermometer-coded string(binstr) to thermometer code '''
pass
def therm2dec(thermstr):
''' convert thermometer code to decimal value '''
return bin2dec(therm2bin(thermstr))
def is_thermometer(in_str):
return in_str == 'thermometer'
def is_gray(in_str):
return in_str == 'gray'
def is_binary(in_str):
return in_str == 'binary'
# MISC MATH ---------------------------------------
def strictly_increasing(L):
return all(x<y for x, y in zip(L, L[1:]))
def strictly_decreasing(L):
return all(x>y for x, y in zip(L, L[1:]))
def non_increasing(L):
return all(x>=y for x, y in zip(L, L[1:]))
def non_decreasing(L):
return all(x<=y for x, y in zip(L, L[1:]))
def all_positive(data):
''' returns True if all elements are >0.0 '''
return all(x>0.0 for x in data)
def all_negative(data):
''' returns True if all elements are <0.0 '''
return all(x<0.0 for x in data)
def all_zero(data):
''' returns True if all elements are == 0.0 '''
return all(x==0.0 for x in data)
def get_absmax(values):
''' return a value with the largest absolute value in a list '''
res_neg = min(values)
res_pos = max(values)
return res_pos if abs(res_pos) >= abs(res_neg) else res_neg
def get_snr(signal,noise,mode='power'):
''' calculate SNR
mode: either power or signal
'''
return 10*np.log10(signal/noise) if mode=='power' else 20*np.log10(signal/noise)
# CONVERSION/REPLACEMENT ---------------------------------------
def from_engr (value):
''' convert engineering notation to a floating number '''
suffix = {'a':1e-18,'f':1e-15,'p':1e-12,'n':1e-9,'u':1e-6,'m':1e-3, \
'k':1e3,'M':1e6,'G':1e9,'T':1e12,'P':1e15,'E':1e18}
try:
return float(value[0:-1]) * suffix[value[-1]]
except:
return value
def to_engr (value,dtype=float):
''' convert a floating number to engineering notation
if value < 1e-18 , it returns 0.0
'''
suffix = [('a',1e-18),('f',1e-15),('p',1e-12),('n',1e-9),('u',1e-6),('m',1e-3), \
('',1.0),('k',1e3),('M',1e6),('G',1e9),('T',1e12),('P',1e15),('E',1e18)]
try:
m = abs(value)
if m < suffix[0][1]: # if less than 1e-18
return '0.0'
elif m >= suffix[-1][1]: # if larger than 1e18
return '%.3f'%(dtype(value/suffix[-1][1]))+suffix[-1][0]
else:
for p,v in enumerate(suffix):
if m/v[1] < 1.0:
return '%.3f'%(dtype(value/suffix[p-1][1]))+suffix[p-1][0]
except:
return None
def str2num(value,dtype=int):
if value[0] == 'b': # binary representation
try:
x = bin2dec(value[1:])
except:
raise ValueError('Binary number representation, %s, is wrong in test configuration file' %value)
else:
x = value
return dtype(float(x)) if dtype==int else dtype(x)
def interpolate_env(value, logger=None):
'''
Interpolate environment variables if exist. An environment variable is expressed as ${VAR} where VAR is the environment variable name.
'''
newvalue = copy.deepcopy(value)
envs = re.findall('\$\{\w+\}', value)
for e in envs:
evar = e.strip("$").strip("{").strip("}")
try:
newvalue = newvalue.replace(e, os.environ[evar])
except:
msg = "Environement variable (%s) does not exist !!!" % evar
if logger:
logger.warn(msg)
else:
print msg
return newvalue
def eval_str(value, dtype=int):
if type(value) != str:
return dtype(value)
if value[0] == 'b': # binary representation
try:
return bin2dec(value[1:])
except:
raise ValueError('Binary number representation, %s, is wrong in test configuration file' % value)
else:
return dtype(float(value)) if dtype==int else dtype(value)
# LIST ---------------------------------------
def flatten_list(l):
''' flatten list of lists '''
return [item for sublist in l for item in sublist]
def merge_list(a,b):
''' merge two lists, a and b, while removing any duplication '''
return a+list(set(b)-set(a))
def force_list(val):
if not isinstance(val, (list,tuple)):
val = [val]
return val
def add_column_list(srcl,dstl,idx=0):
''' assuming that dstl is a list of lists,
this function adds srcl list to the column of dstl at the left of the assigned index(idx).
'''
lofl = copy.deepcopy(dstl)
for i, row in enumerate(lofl):
row.insert(idx,srcl[i])
return lofl
def swap_item_list(data,i,j):
''' swap i(th) and j(th) item of the list,data '''
_tmp = data[i]
data[i] = data[j]
data[j] = _tmp
return data
# MISC ---------------------------------------
def printToday():
today = datetime.date.today()
return datetime.date.today().strftime('%b-%d-%Y')
def generate_random_str(prefix,N):
''' generate random string with a length of N(>1, including len(prefix)), it starts with X '''
char_set = string.ascii_uppercase + string.digits
return prefix+''.join(random.sample(char_set,N-1))
def print_section(msg, level=1, leading_newline=True):
''' print a message with a section deliminator at the top/bottom of the message '''
nc = len(msg)
newline = '\n' if leading_newline else None
if level==1:
return filter(None, [newline, '='*nc, msg, '='*nc])
elif level==2:
return filter(None, [newline, msg, '*'*nc])
elif level==3:
return filter(None, [newline, msg, '-'*nc])
else:
return filter(None, [newline, msg, '^'*nc])
def print_end_msg(msg, char='=', leading_newline=True):
newline = '\n' if leading_newline else None
return filter(None, [newline, '%s %s %s' %(char, msg, char)])
def get_letter_index(index, upper=False):
''' index from 1 '''
start = ord('A' if upper else 'a')
return chr(start+index-1)
def scriptinfo():
'''
Returns a dictionary with information about the running top level Python
script:
---------------------------------------------------------------------------
dir: directory containing script or compiled executable
name: name of script or executable
source: name of source code file
---------------------------------------------------------------------------
"name" and "source" are identical if and only if running interpreted code.
When running code compiled by py2exe or cx_freeze, "source" contains
the name of the originating Python script.
If compiled by PyInstaller, "source" contains no meaningful information.
'''
#---------------------------------------------------------------------------
# scan through call stack for caller information
#---------------------------------------------------------------------------
for teil in inspect.stack():
# skip system calls
if teil[1].startswith("<"):
continue
if teil[1].upper().startswith(sys.exec_prefix.upper()):
continue
trc = teil[1]
# trc contains highest level calling script name
# check if we have been compiled
if getattr(sys, 'frozen', False):
scriptdir, scriptname = os.path.split(sys.executable)
return {"dir": scriptdir,
"name": scriptname,
"source": trc}
# from here on, we are in the interpreted case
scriptdir, trc = os.path.split(trc)
# if trc did not contain directory information,
# the current working directory is what we need
if not scriptdir:
scriptdir = os.getcwd()
scr_dict ={"name": trc,
"source": trc,
"dir": scriptdir}
return scr_dict
def featureinfo(check=False):
''' return running script '''
if check:
scr = scriptinfo()
return os.path.splitext(scr['source'])[0]
else:
return 'mProbo'
def read_yaml(filename,default={}):
''' Read yaml '''
val=OrderedDict()
val.update(default)
f = get_abspath(filename)
val.update(yaml.load(open(f,'r')))
for k in default.keys():
if not val[k]: val.update({k:default[k]})
return val
def print_order(val):
val = str(val)
digit1 = val[-1]
if digit1 == '1' and val != '11':
return val + 'st'
elif digit1 == '2' and val != '12':
return val + 'nd'
elif digit1 == '3' and val != '13':
return val + 'rd'
else:
return val + 'th'
def get_class(klass):
''' get a class object from klass str '''
fields = klass.split('.')
module = '.'.join(fields[:-1])
m = __import__(module)
for comp in fields[1:]:
m = getattr(m, comp)
return m
def isNone(val):
''' since direct comparison if val==None gives warning '''
return True if type(val) == type(None) else False
| {"hexsha": "671fe39283e3eb71255b0c1c6843245b8ae9e690", "size": 12394, "ext": "py", "lang": "Python", "max_stars_repo_path": "DaVE/dave/common/misc.py", "max_stars_repo_name": "upscale-project/hslink_phy", "max_stars_repo_head_hexsha": "741f78da673d2e633da05d292aa6645125ebae32", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-07-16T19:40:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-05T02:33:00.000Z", "max_issues_repo_path": "DaVE/dave/common/misc.py", "max_issues_repo_name": "upscale-project/hslink_phy", "max_issues_repo_head_hexsha": "741f78da673d2e633da05d292aa6645125ebae32", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-06-06T02:15:54.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-19T23:32:40.000Z", "max_forks_repo_path": "DaVE/dave/common/misc.py", "max_forks_repo_name": "upscale-project/hslink_phy", "max_forks_repo_head_hexsha": "741f78da673d2e633da05d292aa6645125ebae32", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-06-20T19:19:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-28T08:20:57.000Z", "avg_line_length": 30.8308457711, "max_line_length": 161, "alphanum_fraction": 0.6236888817, "include": true, "reason": "import numpy", "num_tokens": 3355} |
Bulk Mailing is a incredibly complicated process that companies and the odd individual go through to send people things in the mail at discounted prices.
To do a Bulk Mailing you first need a license: http://pe.usps.com/businessmail101/postage/mailingPermit.htm
You then need to decide what kind of bulk mailing best fits your needs. There are several different ways to do a bulk mailing depending on whether you are doing a saturation mailing (mailing to at least 85% of the people on a given postal route) or mailing people in the same zipcode or district or if you are mailing to people all over the state or country. Monica Hernandez (the Bulk Mail Clerk) is an excellent resource for helping you decide what will best fit your needs and price range. This website is also touted as an guide to helping you make those kind of decisions: http://pe.usps.com/businessmail101/decisiontree/welcome.htm
Pricing is always changing. This calculator is set up to help you figure out how much your mailing will cost: http://dbcalc.usps.gov/CalculatorSetPage.aspx
Given how confusing and postal code based it it is always a good idea to check with a clerk to make sure the price will be what you think it is.
In the last year a new system has come out to try and simplify bulk mailing it is called Every Door Direct: http://www.usps.com/promotions/everydoordirectmail.htm
One of the biggest perks of this systems is that you do not have to get the addresses and barcodes for all of the people you plan to mail, it does have very strict rules about what is eligible so be sure to follow the guide lines.
When doing a bulk mail it is always a good idea to bring in a prototype of the item you wish to mail and have the bulk mail clerk (Monica) check it for size, droop, tabbing, and correct address and labeling before you print a whole shipment.
| {"hexsha": "25d9391dec4475c83203bedc3b6f819d0fb43568", "size": 1845, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Bulk_Mail_Center.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Bulk_Mail_Center.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Bulk_Mail_Center.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 123.0, "max_line_length": 638, "alphanum_fraction": 0.7956639566, "num_tokens": 403} |
! ###################################################################
! Copyright (c) 2013-2022, Marc De Graef Research Group/Carnegie Mellon University
! All rights reserved.
!
! Redistribution and use in source and binary forms, with or without modification, are
! permitted provided that the following conditions are met:
!
! - Redistributions of source code must retain the above copyright notice, this list
! of conditions and the following disclaimer.
! - Redistributions in binary form must reproduce the above copyright notice, this
! list of conditions and the following disclaimer in the documentation and/or
! other materials provided with the distribution.
! - Neither the names of Marc De Graef, Carnegie Mellon University nor the names
! of its contributors may be used to endorse or promote products derived from
! this software without specific prior written permission.
!
! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
! DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
! SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
! CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
! USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
! ###################################################################
module mod_kvectors
!! author: MDG
!! version: 1.0
!! date: 02/02/20
!!
!! variables and types needed to determine lists of wave vectors
use mod_kinds
use mod_global
IMPLICIT NONE
private
! linked list of wave vectors (used by all diffraction programs)
type, public :: kvectorlist
integer(kind=irg) :: i,j,hs ! image coordinates
real(kind=dbl) :: kt(3) ! tangential component of wavevector
real(kind=dbl) :: kn ! normal component
real(kind=dbl) :: k(3) ! full wave vector
type(kvectorlist),pointer :: next ! connection to next wave vector
end type kvectorlist
type, public :: kvectors_T
private
type(kvectorlist), pointer :: klist
real(kind=dbl) :: kinp(3)
real(kind=dbl) :: ktmax
integer(kind=irg) :: numk
integer(kind=irg) :: isym
character(fnlen) :: mapmode
real(kind=dbl) :: delta
real(kind=dbl) :: gan(3)
real(kind=dbl) :: gperp(3)
real(kind=dbl) :: kstar(3)
contains
private
procedure, pass(self) :: MakeRefList_
procedure, pass(self) :: Calckvectors_
procedure, pass(self) :: CalckvectorsSymmetry_
procedure, pass(self) :: get_ListHead_
procedure, pass(self) :: check_mapmode_
procedure, pass(self) :: get_numk_
procedure, pass(self) :: get_mapmode_
procedure, pass(self) :: set_kinp_
procedure, pass(self) :: set_ktmax_
procedure, pass(self) :: set_SamplingType_
procedure, pass(self) :: set_mapmode_
procedure, pass(self) :: Add_knode_
procedure, pass(self) :: AddkVector_
procedure, pass(self) :: Delete_kvectorlist_
procedure, pass(self) :: CalckvectorsECP_
procedure, pass(self) :: CalckvectorsPrecession_
procedure, pass(self) :: CalckvectorsGPU_
final :: kvectors_destructor
generic, public :: MakeRefList => MakeRefList_
generic, public :: Calckvectors => Calckvectors_
generic, public :: CalckvectorsSymmetry => CalckvectorsSymmetry_
generic, public :: get_ListHead => get_ListHead_
generic, public :: get_numk => get_numk_
generic, public :: set_kinp => set_kinp_
generic, public :: set_ktmax => set_ktmax_
generic, public :: set_SamplingType => set_SamplingType_
generic, public :: check_mapmode => check_mapmode_
generic, public :: set_mapmode => set_mapmode_
generic, public :: get_mapmode => get_mapmode_
generic, public :: Add_knode => Add_knode_
generic, public :: AddkVector => AddkVector_
generic, public :: Delete_kvectorlist => Delete_kvectorlist_
generic, public :: CalckvectorsECP => CalckvectorsECP_
generic, public :: CalckvectorsPrecession => CalckvectorsPrecession_
generic, public :: CalckvectorsGPU => CalckvectorsGPU_
end type kvectors_T
! the constructor routine for this class
interface kvectors_T
module procedure kvectors_constructor
end interface kvectors_T
contains
!--------------------------------------------------------------------------
type(kvectors_T) function kvectors_constructor( ) result(KVec)
!DEC$ ATTRIBUTES DLLEXPORT :: kvectors_constructor
!! author: MDG
!! version: 1.0
!! date: 02/02/20
!!
!! constructor for the kvectors_T Class
IMPLICIT NONE
integer(kind=irg) :: nref
! simply initialize the reflist; nref will be 0 but is not needed in calling program
nullify(KVec%klist)
KVec%numk = 0
call KVec%MakeRefList(nref)
end function kvectors_constructor
!--------------------------------------------------------------------------
subroutine kvectors_destructor(self)
!DEC$ ATTRIBUTES DLLEXPORT :: kvectors_destructor
!! author: MDG
!! version: 1.0
!! date: 02/02/20
!!
!! destructor for the kvectors_T Class
IMPLICIT NONE
type(kvectors_T), INTENT(INOUT) :: self
call reportDestructor('kvectors_T')
! 02-06-2020 CLément Lafond : cause fortran error 157 on Windows, commented for the moment has it not modify program behaviour
!call self%Delete_kvectorlist()
end subroutine kvectors_destructor
!--------------------------------------------------------------------------
recursive subroutine Delete_kvectorlist_(self)
!DEC$ ATTRIBUTES DLLEXPORT :: Delete_kvectorlist_
!! author: MDG
!! version: 1.0
!! date: 02/02/20
!!
!! delete the entire linked list
IMPLICIT NONE
class(kvectors_T), INTENT(INOUT) :: self
type(kvectorlist),pointer :: ktmp, ktail
! deallocate the entire linked list before returning, to prevent memory leaks
if (associated(self%klist)) then
ktail => self%klist
if (associated(ktail%next)) then
ktmp => ktail % next
do
if (associated(ktail)) deallocate(ktail)
if (.not. associated(ktmp)) EXIT
ktail => ktmp
ktmp => ktail % next
end do
end if
end if
nullify(self%klist)
self%numk = 0
end subroutine Delete_kvectorlist_
!--------------------------------------------------------------------------
recursive subroutine MakeRefList_(self, nref)
!DEC$ ATTRIBUTES DLLEXPORT :: MakeRefList_
!! author: MDG
!! version: 1.0
!! date: 02/04/20
!!
!! allocate and initialize the linked reflection list
use mod_io
IMPLICIT NONE
class(kvectors_T), INTENT(INOUT) :: self
integer(kind=irg),INTENT(INOUT) :: nref
type(IO_T) :: Message
type(kvectorlist),pointer :: rltail
integer(kind=irg) :: istat
if (associated(self%klist)) then
call self%Delete_kvectorlist()
end if
! create it if it does not already exist
if (.not.associated(self%klist)) then
nref = 0
allocate(self%klist,stat=istat)
if (istat.ne.0) call Message%printError('MakeRefList:',' unable to allocate pointer')
rltail => self%klist ! tail points to new value
nullify(rltail%next) ! nullify next in new value
end if
end subroutine MakeRefList_
!--------------------------------------------------------------------------
recursive function Kdelta(i,j) result(res)
!DEC$ ATTRIBUTES DLLEXPORT :: Kdelta
!! author: MDG
!! version: 1.0
!! date: 02/02/20
!!
!! Kronecker delta function, returns 1 or 0
IMPLICIT NONE
integer(kind=irg),INTENT(IN) :: i,j
integer(kind=irg) :: res
if (i.eq.j) then
res = 1
else
res = 0
end if
end function Kdelta
!--------------------------------------------------------------------------
recursive function check_mapmode_(self, mp) result(ok)
!DEC$ ATTRIBUTES DLLEXPORT :: check_mapmode_
!! author: MDG
!! version: 1.0
!! date: 02/02/20
!!
!! check whether or not the requested mapmode exists
IMPLICIT NONE
class(kvectors_T),INTENT(INOUT) :: self
character(fnlen),INTENT(IN),OPTIONAL :: mp
logical :: ok
integer(kind=irg) :: i
character(20) :: modes(6) = (/ 'Conical ', &
'ECCI ', &
'Standard ', &
'StandardConical ', &
'RoscaLambert ', &
'RoscaLambertLegendre' /)
ok = .FALSE.
if (present(mp)) then
do i = 1, 5
if (trim(modes(i)).eq.trim(mp)) ok = .TRUE.
end do
else
do i = 1, 5
if (trim(modes(i)).eq.trim(self%mapmode)) ok = .TRUE.
end do
end if
end function check_mapmode_
!--------------------------------------------------------------------------
recursive subroutine set_mapmode_(self, mp)
!DEC$ ATTRIBUTES DLLEXPORT :: set_mapmode_
!! author: MDG
!! version: 1.0
!! date: 02/12/20
!!
!! set (and check) the map mode
use mod_io
IMPLICIT NONE
class(kvectors_T), INTENT(INOUT) :: self
character(*), INTENT(IN) :: mp
type(IO_T) :: Message
self%mapmode = trim(mp)
if (.not.self%check_mapmode()) then
call Message%printError('set_mapmode','kvector mapping mode '//trim(mp)//' not known')
end if
end subroutine set_mapmode_
!--------------------------------------------------------------------------
recursive function get_mapmode_(self) result(mp)
!DEC$ ATTRIBUTES DLLEXPORT :: get_mapmode_
!! author: MDG
!! version: 1.0
!! date: 02/13/20
!!
!! get the map mode
IMPLICIT NONE
class(kvectors_T), INTENT(INOUT) :: self
character(fnlen) :: mp
mp = trim(self%mapmode)
end function get_mapmode_
!--------------------------------------------------------------------------
recursive subroutine set_kinp_(self, k)
!DEC$ ATTRIBUTES DLLEXPORT :: set_kinp_
!! author: MDG
!! version: 1.0
!! date: 02/12/20
!!
!! set the input wave vector
IMPLICIT NONE
class(kvectors_T), INTENT(INOUT) :: self
real(kind=dbl), INTENT(IN) :: k(3)
self%kinp = k
end subroutine set_kinp_
!--------------------------------------------------------------------------
recursive subroutine set_ktmax_(self, k)
!DEC$ ATTRIBUTES DLLEXPORT :: set_ktmax_
!! author: MDG
!! version: 1.0
!! date: 02/12/20
!!
!! set the max tangential component
IMPLICIT NONE
class(kvectors_T), INTENT(INOUT) :: self
real(kind=dbl), INTENT(IN) :: k
self%ktmax = k
end subroutine set_ktmax_
!--------------------------------------------------------------------------
recursive subroutine set_SamplingType_(self, i)
!DEC$ ATTRIBUTES DLLEXPORT :: set_SamplingType_
!! author: MDG
!! version: 1.0
!! date: 02/12/20
!!
!! set the the sampling type parameter isym
IMPLICIT NONE
class(kvectors_T), INTENT(INOUT) :: self
integer(kind=irg), INTENT(IN) :: i
self%isym = i
end subroutine set_SamplingType_
!--------------------------------------------------------------------------
recursive function get_numk_(self) result(numk)
!DEC$ ATTRIBUTES DLLEXPORT :: get_numk_
!! author: MDG
!! version: 1.0
!! date: 02/12/20
!!
!! return the number of k-vectors
IMPLICIT NONE
class(kvectors_T), INTENT(INOUT) :: self
integer(kind=irg) :: numk
numk = self%numk
end function get_numk_
!--------------------------------------------------------------------------
recursive function get_ListHead_(self) result(klist)
!DEC$ ATTRIBUTES DLLEXPORT :: get_ListHead_
!! author: MDG
!! version: 1.0
!! date: 02/12/20
!!
!! return the number of k-vectors
IMPLICIT NONE
class(kvectors_T), INTENT(INOUT) :: self
type(kvectorlist), pointer :: klist
klist => self%klist
end function get_ListHead_
!--------------------------------------------------------------------------
recursive subroutine Calckvectors_(self,cell,SG,Diff,ga,npx,npy,ijmax,usehex,LegendreArray)
!DEC$ ATTRIBUTES DLLEXPORT :: Calckvectors_
!! author: MDG
!! version: 1.0
!! date: 02/02/20
!!
!! create a linked list of wave vectors
!!
!! This is a new version that combines several older routines. The most important
!! aspects of this routine are a) linked list can use regular mapping or modified Lambert mapping;
!! b) list makes use of crystal symmetry (although that feature can be turned off); c) routine
!! has been cleaned up, and there is now a Delete_kvectorlist function as well. This is a very
!! complex routine so make sure you fully understand it before you attempt to modify or add anything!
!!
!! todo: The Standard and RoscaLambert mapmodes have different considerations of the
!! Laue groups; this needs to be verified and, if necessary, simplified to a single set of
!! conditions. This might also allow Addkvector and Add_knode to become a single routine.
use mod_io
use mod_diffraction
use mod_crystallography
use mod_symmetry
IMPLICIT NONE
class(kvectors_T),INTENT(INOUT) :: self
type(Cell_T),INTENT(INOUT) :: cell
type(SpaceGroup_T),INTENT(INOUT) :: SG
type(Diffraction_T),INTENT(INOUT) :: Diff
real(kind=dbl),INTENT(IN) :: ga(3)
!! "horizontal" reciprocal lattice vector
integer(kind=irg),INTENT(IN) :: npx
!! number of kvectors along x
integer(kind=irg),INTENT(IN) :: npy
!! number of kvectors along y
integer(kind=irg),INTENT(INOUT) :: ijmax
!! max parameter used for Conical and StandardConical modes
real(kind=dbl),INTENT(IN),OPTIONAL :: LegendreArray(0:2*npx)
!! Legendre lattitude grid points for spherical indexing
logical,INTENT(IN),OPTIONAL :: usehex
!! hexagonal mode for RoscaLambert mapmode
type(IO_T) :: Message
integer(kind=irg) :: istat,i,j,istart,iend,jstart,jend, imin, imax, jmin, jmax, ii, jj, sqring
real(kind=dbl) :: glen, xy(2), xx, yy, eps
logical :: hexgrid = .FALSE., yes = .TRUE., flip = .TRUE., check
character(3) :: grid
type(kvectorlist),pointer :: ktail, ktmp
real(kind=sgl) :: xytest(2), xxtest, yytest
! first, if self%klist already exists, delete it
if (associated(self%klist)) then ! deallocate the entire linked list
call self%Delete_kvectorlist()
end if
! do we know this mapmode ?
if ( self%check_mapmode().eqv..FALSE.) then
call Message%printError('Calckvectors','mapmode unknown')
end if
if (trim(self%mapmode).eq.'ECCI') then ! used for ECCI without symmetry application, including EMZAdefect
! compute geometrical factors
glen = cell%CalcLength(ga,'r') ! length of ga
self%gan = ga/glen ! normalized ga
!self%delta = 2.0*self%ktmax*glen/(2.0*float(npx)+1.0)
self%delta = self%ktmax*glen/dble(npx)
! print*,self%delta, self%gan ! grid step size in nm-1
call cell%TransSpace(self%kinp,self%kstar,'d','r') ! transform incident direction to reciprocal space
call cell%CalcCross(ga,self%kstar,self%gperp,'r','r',0)! compute g_perp = ga x k
call cell%NormVec(self%gperp,'r') ! normalize g_perp
call cell%NormVec(self%kstar,'r') ! normalize reciprocal beam vector
! allocate the head and tail of the linked list
allocate(self%klist,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Calckvectors','unable to allocate self%klist pointer')
ktail => self%klist ! tail points to new value
nullify(ktail%next) ! nullify next in new value
self%numk = 1 ! keep track of number of k-vectors so far
ktail%i = 0 ! i-index of beam
ktail%j = 0 ! j-index of beam
ktail%hs = 0 ! spare index
ktail%kt = (/0.0,0.0,0.0/) ! no tangential component for central beam direction
ktail%k = self%kstar/Diff%getWaveLength() ! divide by wavelength
ktail%kn = cell%CalcDot(ktail%k,self%kstar,'r') ! normal component
! set the loop limits
imin = -npx; imax = npx; jmin = -npy; jmax = npy;
! and loop over the entire range (without symmetry considerations
do i=imin,imax
do j=jmin,jmax
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if ((i**2+j**2).le.ijmax) then ! only directions inside the incident cone
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/)) ! add k-vector to linked list
end if
end if
end do
end do
end if ! mapmode = ECCI
if (trim(self%mapmode).eq.'Conical') then ! used for CBED without symmetry application, including EMZAdefect
! compute geometrical factors
glen = cell%CalcLength(ga,'r') ! length of ga
self%gan = ga/glen ! normalized ga
self%delta = 2.0*self%ktmax*glen/(2.0*float(npx)+1.0) ! grid step size in nm-1
call cell%TransSpace(self%kinp,self%kstar,'d','r') ! transform incident direction to reciprocal space
call cell%CalcCross(ga,self%kstar,self%gperp,'r','r',0)! compute g_perp = ga x k
call cell%NormVec(self%gperp,'r') ! normalize g_perp
call cell%NormVec(self%kstar,'r') ! normalize reciprocal beam vector
! allocate the head and tail of the linked list
allocate(self%klist,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Calckvectors','unable to allocate self%klist pointer')
ktail => self%klist ! tail points to new value
nullify(ktail%next) ! nullify next in new value
self%numk = 1 ! keep track of number of k-vectors so far
ktail%i = 0 ! i-index of beam
ktail%j = 0 ! j-index of beam
ktail%kt = (/0.0,0.0,0.0/) ! no tangential component for central beam direction
ktail%k = self%kstar/Diff%getWaveLength() ! divide by wavelength
ktail%kn = cell%CalcDot(ktail%k,self%kstar,'r') ! normal component
! set the loop limits
imin = -npx; imax = npx; jmin = -npy; jmax = npy;
! and loop over the entire range (without symmetry considerations
do i=imin,imax
do j=jmin,jmax
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if ((i**2+j**2).le.ijmax) then ! only directions inside the incident cone
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/)) ! add k-vector to linked list
end if
end if
end do
end do
end if ! mapmode = Conical
! standard or standard-conical kvector list, as used by CBED and other programs
if ( (self%mapmode.eq.'Standard').or.(self%mapmode.eq.'StandardConical') ) then
! for standard mode, we want to make sure that ijmax, which need not be defined by
! the calling program for this mode, is set to a large value
if (self%mapmode.eq.'Standard') then
ijmax = (5*npx)**2
end if
! compute geometrical factors
glen = cell%CalcLength(ga,'r') ! length of ga
self%gan = ga/glen ! normalized ga
self%delta = 2.0*self%ktmax*glen/(2.0*float(npx)+1.0) ! grid step size in nm-1
call cell%TransSpace(self%kinp,self%kstar,'d','r') ! transform incident direction to reciprocal space
call cell%CalcCross(ga,self%kstar,self%gperp,'r','r',0)! compute g_perp = ga x k
call cell%NormVec(self%gperp,'r') ! normalize g_perp
call cell%NormVec(self%kstar,'r') ! normalize reciprocal beam vector
! allocate the head and tail of the linked list
allocate(self%klist,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Calckvectors','unable to allocate self%klist pointer')
ktail => self%klist ! tail points to new value
nullify(ktail%next) ! nullify next in new value
self%numk = 1 ! keep track of number of k-vectors so far
ktail%i = 0 ! i-index of beam
ktail%j = 0 ! j-index of beam
ktail%kt = (/0.0,0.0,0.0/) ! no tangential component for central beam direction
ktail%k = self%kstar/Diff%getWaveLength() ! divide by wavelength
ktail%kn = cell%CalcDot(ktail%k,self%kstar,'r') ! normal component
! implement symmetry Table 7.3 from EM book
select case(self%isym) ! negative values -> systematic row; positive -> zone axis
case(-1) ! centrosymmetric systematic row
imin = 0; imax = npx; grid = 'srw'
case(-2) ! non-centrosymmetric systematic row
imin = -npx; imax = npx; grid = 'srw'
case(1) ! 2D Group 1
imin = -npx; imax = npx; jmin = -npy; jmax = npy; grid = 'sqa'
case(2) ! 2D Group 2
imin = -npx; imax = npx; jmin = 0; jmax = npy; grid = 'sqb'
case(3) ! 2D Group m
imin = -npx; imax = npx; jmin = 0; jmax = npy; grid = 'sqa'
case(4) ! 2D Group 2mm
imin = 0; imax = npx; jmin = 0; jmax = npy; grid = 'sqa'
case(5) ! 2D Group 4
imin = 1; imax = npx; jmin = 0; jmax = npy; grid = 'sqa'
case(6) ! 2D Group 4mm
imin = 0; imax = npx; jmin = 0; jmax = npy; grid = 'sqc'
case(7) ! 2D Group 3 (cubic version)
grid = 'hxa'; hexgrid=.TRUE.
case(8) ! 2D Group 31m (cubic version)
grid = 'hxb'; hexgrid=.TRUE.
case(9) ! 2D Group 6
grid = 'hxe'; hexgrid=.TRUE.
case(10) ! 2D Group 6mm
grid = 'hxf'; hexgrid=.TRUE.
case(11) ! 2D Group 3 (hexagonal setting)
grid = 'hxc'; hexgrid=.TRUE.
case(12) ! 2D Group 31m (hexagonal setting)
grid = 'hxd'; hexgrid=.TRUE.
case(13) ! 2D Group 3m1 (cubic setting)
grid = 'hxg'; hexgrid=.TRUE.
case(14) ! 2D Group 3m1 (hexagonal setting)
grid = 'hxh'; hexgrid=.TRUE.
case default ! we should never get here
call Message%printError('Calckvectors','unknown isym value')
end select
! now do the real work for standard sets of wave vectors
select case(grid)
case('srw') ! systematic row incident beam orientations
do i=imin,imax
if (i.ne.0) then ! the point (0,0) has already been taken care of
call self%Add_knode(cell,Diff,ktail,i,0,(/ 0.0,0.0/))
end if
end do
case('sqa') ! from here on, all orientations are zone axis cases for all Laue groups
do i=imin,imax
do j=jmin,jmax
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if (i**2+j**2.le.ijmax) then
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/))
end if
end if
end do
end do
case('sqb')
do i=imin,imax
jloop_sqb: do j=jmin,jmax
if ((j.eq.0).and.(i.lt.0)) cycle jloop_sqb ! skip the points (i<0,0)
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if (i**2+j**2.le.ijmax) then
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/))
end if
end if
end do jloop_sqb
end do
case('sqc')
do j=0,jmax
do i=j,imax
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if (i**2+j**2 .le. ijmax) then
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/))
end if
end if
end do
end do
case('hxa')
do j=0,npy
do i=1-Kdelta(j,0),npx
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if (i**2+j**2.le.ijmax) then
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/),hexgrid)
end if
end if
end do
end do
case('hxb')
do j=0,npy
do i=j,npx
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if (i**2+j**2.le.ijmax) then
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/),hexgrid)
end if
end if
end do
end do
case('hxc')
do j=0,npy
do i=1-Kdelta(j,0)-j,npx-j
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if (i**2+j**2.le.ijmax) then
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/),hexgrid)
end if
end if
end do
end do
case('hxd')
do j=0,npy
do i=0,npx-j
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if (i**2+j**2.le.ijmax) then
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/),hexgrid)
end if
end if
end do
end do
case('hxe')
do j=0,npy-1
do i=1-Kdelta(j,0),npx-j
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if (i**2+j**2.le.ijmax) then
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/),hexgrid)
end if
end if
end do
end do
case('hxf')
do j=0,npy/2
do i=j,npx-j
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if (i**2+j**2.le.ijmax) then
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/),hexgrid)
end if
end if
end do
end do
case('hxg')
do j=0,npy
do i=j/2,min(2*j,npy)
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if (i**2+j**2.le.ijmax) then
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/),hexgrid)
end if
end if
end do
end do
case('hxh')
do j=0,npy
do i=-j/2,min(j,npy-1)
if (.not.((i.eq.0).and.(j.eq.0))) then ! the point (0,0) has already been taken care of
if (i**2+j**2.le.ijmax) then
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/),hexgrid)
end if
end if
end do
end do
case default ! we should never get here
call Message%printError('Calckvectors:','unknown grid type value')
end select ! grid value
end if ! mapmode.eq.'Standard' or 'StandardConical'
! the next type of grid is the one used for the modified Lambert maps in the dynamical EBSD
! programs; this requires some special care, since these mappings are a little trickier than
! those of the standard mapmode. While it is possible to use a plain Lambert projection as
! well, here we only allow for the RoscaLambert mode.
if (self%mapmode.eq.'RoscaLambert') then
self%delta = 1.D0 / dble(npx)
if (usehex) then ! hexagonal grid
hexgrid = .TRUE.
else ! square grid
hexgrid = .FALSE.
end if
! allocate the head of the linked list
allocate(self%klist,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Calckvectors',' unable to allocate self%klist pointer')
ktail => self%klist ! tail points to new value
nullify(ktail%next) ! nullify next in new value
self%numk = 1 ! keep track of number of k-vectors so far
ktail%hs = 1 ! this lies in the Northern Hemisphere
ktail%i = 0 ! i-index of beam
ktail%j = 0 ! j-index of beam
self%kstar = (/ 0.D0, 0.D0, 1.D0 /) ! we always use c* as the center of the RoscaLambert projection
call cell%NormVec(self%kstar,'c') ! normalize incident direction
self%kstar = self%kstar/Diff%getWaveLength() ! divide by wavelength
! and transform to reciprocal crystal space using the direct structure matrix
ktail%k = matmul(transpose(cell%getdsm()),self%kstar)
ktail%kn = 1.0/Diff%getWaveLength()
! MDG: as of 8/25/15, we no longer use the Laue groups to determine the set of independent wave vectors,
! but instead we use the complete point group symmetry, as it should be. Upon reflection, using
! the Laue groups was equivalent to implicitly using Friedel's law, which makes all diffraction patterns
! centrosymmetric, and that is not correct for EBSD. So, the symbol isym now encodes the full point group,
! not the Laue group. This will require a modification in each calling program as well.
! in addition, the modified Lambert projection will now require two hemispheres (NH and SH). We can handle this
! by means of an optional argument to the AddkVector routine; when the argument is present, the -k_z version
! of the direction is also added to the list.
! deal with each point group symmetry separately or in sets, depending on the value of isym
select case (self%isym)
case (1) ! triclinic 1
istart = -npx
iend = npx
jstart = -npy
jend = npy
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
call self%AddkVector(cell,Diff,ktail,xy,i,j,addSH = yes)
end do
end do
case (2) ! triclinic -1
istart = -npx
iend = npx
jstart = -npy
jend = npy
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
call self%AddkVector(cell,Diff,ktail,xy,i,j)
end do
end do
case (3) ! monoclinic 2
istart = 0
iend = npx
jstart = -npy
jend = npy
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
call self%AddkVector(cell,Diff,ktail,xy,i,j, addSH = yes)
end do
end do
case (4) ! monoclinic m
istart = -npx
iend = npx
jstart = 0
jend = npy
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
call self%AddkVector(cell,Diff,ktail,xy,i,j, addSH = yes)
end do
end do
case (5) ! monoclinic 2/m, orthorhombic 222, mm2, tetragonal 4, -4
istart = 0
iend = npx
jstart = 0
jend = npy
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
call self%AddkVector(cell,Diff,ktail,xy,i,j, addSH = yes)
end do
end do
case (6) ! orthorhombic mmm, tetragonal 4/m, 422, -4m2, cubic m-3, 432 (for now)
istart = 0
iend = npx
jstart = 0
jend = npy
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
call self%AddkVector(cell,Diff,ktail,xy,i,j)
end do
end do
case (7) ! tetragonal 4mm
istart = 0
iend = npx
jstart = 0
jend = npx
do i=istart,iend
do j=jstart,i !
xy = (/ dble(i), dble(j) /) * self%delta
call self%AddkVector(cell,Diff,ktail,xy,i,j, addSH = yes)
end do
end do
case (8) ! tetragonal -42m, cubic -43m (for now)
istart = 0
iend = npx
jstart = -npx
jend = npx
do i=istart,iend
do j=-i, i !
xy = (/ dble(i), dble(j) /) * self%delta
call self%AddkVector(cell,Diff,ktail,xy,i,j)
end do
end do
case (9) ! tetragonal 4/mmm, cubic m-3m (for now)
istart = 0
iend = npx
jstart = 0
jend = npx
do i=istart,iend
do j=jstart,i !
xy = (/ dble(i), dble(j) /) * self%delta
call self%AddkVector(cell,Diff,ktail,xy,i,j)
end do
end do
! cases 10 through 19 are all on a hexagonal grid...
! for now (08/31/15), we have not yet implemented the rhombohedral setting of the trigonal space groups;
! this case is truly a pain in the neck to implement...
case (10) ! hexagonal 3
istart = 0
iend = npx
jstart = 0
jend = npx
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
if (InsideHexGrid(xy)) call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid, addSH = yes)
end do
end do
case (11) ! rhombohedral 3
call Message%printError('Calckvectors: ','rhombohedral setting not yet implemented, use hexagonal setting instead')
! istart = 0
! iend = npx
! jstart = 0
! jend = npx
! do j=jstart,jend
! do i=istart,iend !
! ii = 2*j-i
! jj = j-2*i
! xy = (/ dble(ii), dble(jj) /) * delta * LPs%isrt
! if (InsideHexGrid(xy)) call self%AddkVector(cell,Diff,ktail,xy,ii,jj,hexgrid, addSH = yes)
! end do
! end do
case (12) ! hexagonal -3, 321, -6; [not implemented: rhombohedral 32]
if ((SG%getSpaceGrouptrigonal()).and.(SG%getSpaceGroupsecond())) then
call Message%printError('Calckvectors: ','rhombohedral setting not yet implemented, use hexagonal setting instead')
else
istart = 0
iend = npx
jstart = 0
jend = npx
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
if (InsideHexGrid(xy)) call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid)
end do
end do
end if
case (13) ! [not implemented: rhombohedral -3], hexagonal 312 [ modified 7/31/18, MDG ]
if ((SG%getSpaceGrouptrigonal()).and.(SG%getSpaceGroupsecond())) then
call Message%printError('Calckvectors: ','rhombohedral setting not yet implemented, use hexagonal setting instead')
else
istart = 0
iend = npx
jstart = 0
jend = -npx
do j=jstart,jend,-1
do i=istart+j/2,iend !
xy = (/ dble(i), dble(j) /) * self%delta
if (InsideHexGrid(xy)) then
! call self%AddkVector(cell,Diff,ktail,xy,-i,-j,hexgrid)
! ktail%k(2) = -ktail%k(2)
call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid)
end if
end do
end do
istart = 0
iend = npx
jstart = 0
jend = npx
do i=istart,iend !
do j=jstart,i/2
xy = (/ dble(i), dble(j) /) * self%delta
if (InsideHexGrid(xy)) then
call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid)
! call self%AddkVector(cell,Diff,ktail,xy,-i,-j,hexgrid)
! ktail%k(2) = -ktail%k(2)
end if
end do
end do
end if
case (14) ! hexagonal 3m1, [not implemented: rhombohedral 3m]
if ((SG%getSpaceGrouptrigonal()).and.(SG%getSpaceGroupsecond())) then
call Message%printError('Calckvectors: ','rhombohedral setting not yet implemented, use hexagonal setting instead')
else
istart = 1
iend = npx
jstart = 1
jend = npx
do j=jstart,jend
do i=istart+(j-1)/2,2*j
xy = (/ dble(i), dble(j) /) * self%delta
if (InsideHexGrid(xy)) then
call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid, addSH = yes)
end if
end do
end do
end if
case (15) ! hexagonal 31m, 6
istart = 0
iend = npx
jstart = 1
jend = npx
do j=jstart,jend
do i=istart+j,jend
xy = (/ dble(i), dble(j) /) * self%delta
if (InsideHexGrid(xy)) then
call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid, addSH = yes)
end if
end do
end do
case (16) ! hexagonal -3m1, 622, -6m2 [not implemented: rhombohedral -3m]
if ((SG%getSpaceGrouptrigonal()).and.(SG%getSpaceGroupsecond())) then
call Message%printError('Calckvectors: ','rhombohedral setting not yet implemented, use hexagonal setting instead')
else
istart = 0
iend = npx
jstart = 0
jend = npx
eps = 1.0D-4
do j=jstart,jend
do i=istart,iend
xy = (/ dble(i), dble(j) /) * self%delta
xx = dble(i)-dble(j)/2.D0
yy = dble(j)*LPs%srt
check = .TRUE.
if (xx.lt.0.D0) then
check = .FALSE.
else
if (xx.ge.0.D0) then
yy = datan2(yy,xx)
if (yy .lt. (LPs%Pi/6.D0-eps)) check = .FALSE.
end if
end if
if (InsideHexGrid(xy).and.(check)) call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid)
end do
end do
end if
case (17) ! hexagonal -31m, 6/m, -62m
istart = 0
iend = npx
jstart = 0
jend = npx
eps = 1.0D-4
do j=jstart,jend
do i=istart,iend
xy = (/ dble(i), dble(j) /) * self%delta
xx = dble(i)-dble(j)/2.D0
yy = dble(j)*LPs%srt
check = .TRUE.
if (xx.lt.0.D0) then
check = .FALSE.
else
if (xx.ge.0.D0) then
yy = datan2(yy,xx)
if (yy.gt.(cPi/3.D0+eps)) check = .FALSE.
end if
end if
if (InsideHexGrid(xy).and.(check)) call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid)
end do
end do
case (18) ! hexagonal 6mm
istart = 0
iend = npx
jstart = 0
jend = npx
eps = 1.0D-4
do j=jstart,jend
do i=istart,iend
xy = (/ dble(i), dble(j) /) * self%delta
xx = dble(i)-dble(j)/2.D0
yy = dble(j)*LPs%srt
check = .TRUE.
if (xx.lt.0.D0) then
check = .FALSE.
else
if (xx.ge.0.D0) then
yy = datan2(yy,xx)
if (yy.gt.(cPi/6.D0+eps)) check = .FALSE.
end if
end if
if (InsideHexGrid(xy).and.(check)) call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid, addSH = yes)
end do
end do
case (19) ! hexagonal 6/mmm
istart = 0
iend = npx
jstart = 0
jend = npx
eps = 1.0D-4
do j=jstart,jend
do i=istart,iend
xy = (/ dble(i), dble(j) /) * self%delta
xx = dble(i)-dble(j)/2.D0
yy = dble(j)*LPs%srt
check = .TRUE.
if (xx.lt.0.D0) then
check = .FALSE.
else
if (xx.ge.0.D0) then
yy = datan2(yy, xx)
if (yy.gt.(LPs%Pi/6.D0+eps)) check = .FALSE.
end if
end if
if (InsideHexGrid(xy).and.(check)) call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid)
end do
end do
end select
end if
! the next type of grid is the one used for the modified Lambert maps with Legendre lattitude grid values in the
! dynamical EBSD programs; this requires some special care, since these mappings are a little
! trickier than those of the standard mapmode. While it is possible to use a plain Lambert
! projection as well, here we only allow for the RoscaLambert mode with modified lattitudinal angles.
if (self%mapmode.eq.'RoscaLambertLegendre') then
self%delta = 1.D0 / dble(npx)
if (usehex) then ! hexagonal grid
hexgrid = .TRUE.
else ! square grid
hexgrid = .FALSE.
end if
! allocate the head of the linked list
allocate(self%klist,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Calckvectors',' unable to allocate self%klist pointer')
ktail => self%klist ! tail points to new value
nullify(ktail%next) ! nullify next in new value
self%numk = 1 ! keep track of number of k-vectors so far
ktail%hs = 1 ! this lies in the Northern Hemisphere
ktail%i = 0 ! i-index of beam
ktail%j = 0 ! j-index of beam
self%kstar = (/ 0.0, 0.0, 1.0 /) ! we always use c* as the center of the RoscaLambert projection
call cell%NormVec(self%kstar,'c') ! normalize incident direction
self%kstar = self%kstar/Diff%getWaveLength() ! divide by wavelength
! and transform to reciprocal crystal space using the structure matrix
ktail%k = matmul(transpose(cell%getdsm()),self%kstar)
ktail%kn = 1.0/Diff%getWaveLength()
! MDG: as of 8/25/15, we no longer use the Laue groups to determine the set of independent wave vectors,
! but instead we use the complete point group symmetry, as it should be. Upon reflection, using
! the Laue groups was equivalent to implicitly using Friedel's law, which makes all diffraction patterns
! centrosymmetric, and that is not correct for EBSD. So, the symbol isym now encodes the full point group,
! not the Laue group. This will require a modification in each calling program as well.
! in addition, the modified Lambert projection will now require two hemispheres (NH and SH). We can handle this
! by means of an optional argument to the AddkVector routine; when the argument is present, the -k_z version
! of the direction is also added to the list.
! The main difference with the regular RoscaLambert case is the fact that the lattitudinal value of the direction
! cosines needs to be replaced by the one from the Legendre array, and then the in-plane dorection cosines need to
! be properly scaled; this type of master pattern is then used for Spherical Indexing in the EMSphInx program.
! deal with each point group symmetry separately or in sets, depending on the value of isym
select case (self%isym)
case (1) ! triclinic 1
istart = -npx
iend = npx
jstart = -npy
jend = npy
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
call self%AddkVector(cell,Diff,ktail,xy,i,j,addSH = yes,LegendreLattitude=LegendreArray(sqring))
end do
end do
case (2) ! triclinic -1
istart = -npx
iend = npx
jstart = -npy
jend = npy
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
call self%AddkVector(cell,Diff,ktail,xy,i,j,LegendreLattitude=LegendreArray(sqring))
end do
end do
case (3) ! monoclinic 2
istart = 0
iend = npx
jstart = -npy
jend = npy
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
call self%AddkVector(cell,Diff,ktail,xy,i,j, addSH = yes,LegendreLattitude=LegendreArray(sqring))
end do
end do
case (4) ! monoclinic m
istart = -npx
iend = npx
jstart = 0
jend = npy
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
call self%AddkVector(cell,Diff,ktail,xy,i,j, addSH = yes,LegendreLattitude=LegendreArray(sqring))
end do
end do
case (5) ! monoclinic 2/m, orthorhombic 222, mm2, tetragonal 4, -4
istart = 0
iend = npx
jstart = 0
jend = npy
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
call self%AddkVector(cell,Diff,ktail,xy,i,j, addSH = yes,LegendreLattitude=LegendreArray(sqring))
end do
end do
case (6) ! orthorhombic mmm, tetragonal 4/m, 422, -4m2, cubic m-3, 432 (for now)
istart = 0
iend = npx
jstart = 0
jend = npy
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
call self%AddkVector(cell,Diff,ktail,xy,i,j,LegendreLattitude=LegendreArray(sqring))
end do
end do
case (7) ! tetragonal 4mm
istart = 0
iend = npx
jstart = 0
jend = npx
do i=istart,iend
do j=jstart,i !
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
call self%AddkVector(cell,Diff,ktail,xy,i,j, addSH = yes,LegendreLattitude=LegendreArray(sqring))
end do
end do
case (8) ! tetragonal -42m, cubic -43m (for now)
istart = 0
iend = npx
jstart = -npx
jend = npx
do i=istart,iend
do j=-i, i !
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
call self%AddkVector(cell,Diff,ktail,xy,i,j,LegendreLattitude=LegendreArray(sqring))
end do
end do
case (9) ! tetragonal 4/mmm, cubic m-3m (for now)
istart = 0
iend = npx
jstart = 0
jend = npx
do i=istart,iend
do j=jstart,i !
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
call self%AddkVector(cell,Diff,ktail,xy,i,j,LegendreLattitude=LegendreArray(sqring))
end do
end do
! cases 10 through 19 are all on a hexagonal grid...
! for now (08/31/15), we have not yet implemented the rhombohedral setting of the trigonal space groups;
! this case is truly a pain in the neck to implement...
case (10) ! hexagonal 3
istart = 0
iend = npx
jstart = 0
jend = npx
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
if (InsideHexGrid(xy)) call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid, addSH = yes, &
LegendreLattitude=LegendreArray(sqring))
end do
end do
case (11) ! rhombohedral 3
call Message%printError('Calckvectors: ','rhombohedral setting not yet implemented, use hexagonal setting instead')
! istart = 0
! iend = npx
! jstart = 0
! jend = npx
! do j=jstart,jend
! do i=istart,iend !
! ii = 2*j-i
! jj = j-2*i
! xy = (/ dble(ii), dble(jj) /) * delta * LPs%isrt
! if (InsideHexGrid(xy)) call self%AddkVector(cell,Diff,ktail,xy,ii,jj,hexgrid, addSH = yes)
! end do
! end do
case (12) ! hexagonal -3, 321, -6; [not implemented: rhombohedral 32]
if ((SG%getSpaceGrouptrigonal()).and.(SG%getSpaceGroupsecond())) then
call Message%printError('Calckvectors: ','rhombohedral setting not yet implemented, use hexagonal setting instead')
else
istart = 0
iend = npx
jstart = 0
jend = npx
do j=jstart,jend
do i=istart,iend !
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
if (InsideHexGrid(xy)) call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid, &
LegendreLattitude=LegendreArray(sqring))
end do
end do
end if
case (13) ! [not implemented: rhombohedral -3], hexagonal 312 [ modified 7/31/18, MDG ]
if ((SG%getSpaceGrouptrigonal()).and.(SG%getSpaceGroupsecond())) then
call Message%printError('Calckvectors: ','rhombohedral setting not yet implemented, use hexagonal setting instead')
else
istart = 0
iend = npx
jstart = 0
jend = -npx
do j=jstart,jend,-1
do i=istart+j/2,iend !
xy = (/ dble(i), dble(j) /) * self%delta
if (InsideHexGrid(xy)) then
! call self%AddkVector(cell,Diff,ktail,xy,-i,-j,hexgrid)
! ktail%k(2) = -ktail%k(2)
sqring = maxval( (/ abs(i), abs(j) /) )
call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid,LegendreLattitude=LegendreArray(sqring))
end if
end do
end do
istart = 0
iend = npx
jstart = 0
jend = npx
do i=istart,iend !
do j=jstart,i/2
xy = (/ dble(i), dble(j) /) * self%delta
if (InsideHexGrid(xy)) then
sqring = maxval( (/ abs(i), abs(j) /) )
call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid,LegendreLattitude=LegendreArray(sqring))
! call self%AddkVector(cell,Diff,ktail,xy,-i,-j,hexgrid)
! ktail%k(2) = -ktail%k(2)
end if
end do
end do
end if
case (14) ! hexagonal 3m1, [not implemented: rhombohedral 3m]
if ((SG%getSpaceGrouptrigonal()).and.(SG%getSpaceGroupsecond())) then
call Message%printError('Calckvectors: ','rhombohedral setting not yet implemented, use hexagonal setting instead')
else
istart = 1
iend = npx
jstart = 1
jend = npx
do j=jstart,jend
do i=istart+(j-1)/2,2*j
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
if (InsideHexGrid(xy)) then
call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid, addSH = yes,LegendreLattitude=LegendreArray(sqring))
end if
end do
end do
end if
case (15) ! hexagonal 31m, 6
istart = 0
iend = npx
jstart = 1
jend = npx
do j=jstart,jend
do i=istart+j,jend
xy = (/ dble(i), dble(j) /) * self%delta
sqring = maxval( (/ abs(i), abs(j) /) )
if (InsideHexGrid(xy)) then
call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid, addSH = yes,LegendreLattitude=LegendreArray(sqring))
end if
end do
end do
case (16) ! hexagonal -3m1, 622, -6m2 [not implemented: rhombohedral -3m]
if ((SG%getSpaceGrouptrigonal()).and.(SG%getSpaceGroupsecond())) then
call Message%printError('Calckvectors: ','rhombohedral setting not yet implemented, use hexagonal setting instead')
else
istart = 0
iend = npx
jstart = 0
jend = npx
eps = 1.0D-4
do j=jstart,jend
do i=istart,iend
xy = (/ dble(i), dble(j) /) * self%delta
xx = dble(i)-dble(j)/2.D0
yy = dble(j)*LPs%srt
check = .TRUE.
if (xx.lt.0.D0) then
check = .FALSE.
else
if (xx.ge.0.D0) then
yy = datan2(yy,xx)
if (yy .lt. (LPs%Pi/6.D0-eps)) check = .FALSE.
end if
end if
sqring = maxval( (/ abs(i), abs(j) /) )
if (InsideHexGrid(xy).and.(check)) call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid,&
LegendreLattitude=LegendreArray(sqring))
end do
end do
end if
case (17) ! hexagonal -31m, 6/m, -62m
istart = 0
iend = npx
jstart = 0
jend = npx
eps = 1.0D-4
do j=jstart,jend
do i=istart,iend
xy = (/ dble(i), dble(j) /) * self%delta
xx = dble(i)-dble(j)/2.D0
yy = dble(j)*LPs%srt
check = .TRUE.
if (xx.lt.0.D0) then
check = .FALSE.
else
if (xx.ge.0.D0) then
yy = datan2(yy,xx)
if (yy.gt.(cPi/3.D0+eps)) check = .FALSE.
end if
end if
sqring = maxval( (/ abs(i), abs(j) /) )
if (InsideHexGrid(xy).and.(check)) call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid, &
LegendreLattitude=LegendreArray(sqring))
end do
end do
case (18) ! hexagonal 6mm
istart = 0
iend = npx
jstart = 0
jend = npx
eps = 1.0D-4
do j=jstart,jend
do i=istart,iend
xy = (/ dble(i), dble(j) /) * self%delta
xx = dble(i)-dble(j)/2.D0
yy = dble(j)*LPs%srt
check = .TRUE.
if (xx.lt.0.D0) then
check = .FALSE.
else
if (xx.ge.0.D0) then
yy = datan2(yy,xx)
if (yy.gt.(cPi/6.D0+eps)) check = .FALSE.
end if
end if
sqring = maxval( (/ abs(i), abs(j) /) )
if (InsideHexGrid(xy).and.(check)) call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid, &
addSH = yes,LegendreLattitude=LegendreArray(sqring))
end do
end do
case (19) ! hexagonal 6/mmm
istart = 0
iend = npx
jstart = 0
jend = npx
eps = 1.0D-4
do j=jstart,jend
do i=istart,iend
xy = (/ dble(i), dble(j) /) * self%delta
xx = dble(i)-dble(j)/2.D0
yy = dble(j)*LPs%srt
check = .TRUE.
if (xx.lt.0.D0) then
check = .FALSE.
else
if (xx.ge.0.D0) then
yy = datan2(yy, xx)
if (yy.gt.(LPs%Pi/6.D0+eps)) check = .FALSE.
end if
end if
sqring = maxval( (/ abs(i), abs(j) /) )
if (InsideHexGrid(xy).and.(check)) call self%AddkVector(cell,Diff,ktail,xy,i,j,hexgrid,&
LegendreLattitude=LegendreArray(sqring))
end do
end do
end select
end if
end subroutine Calckvectors_
recursive subroutine CalckvectorsPrecession_(self,cell,Diff,k,ga,precangle,prechalfwidth,precsample,precazimuthal,numk)
!DEC$ ATTRIBUTES DLLEXPORT :: CalckvectorsPrecession_
!> @author Marc De Graef, Carnegie Mellon University
!
!> @brief create a linked list of wave vectors for precession electron diffraction
!
!> @details This is a new version to test whether or not we can use the whole pattern
!> symmetry to determine the relevant list of incident wave vectors; this should be a
!> general routine, so that we do not need to consider each symmetry case separately.
!> This will require a floating point version of the Apply2DPGSymmetry routine in symmetry.f90.
!
!> @param khead wave vector list pointer
!> @param cell unit cell pointer
!> @param k central wave vector
!> @param ga reciprocal lattice vector normal to k
!> @param precangle main precession angle [mrad]
!> @param prechalfwidth precession beam half width [mrad]
!> @param precsample number of samples in half width
!> @param precazimuthal number of samples around each precession circle
!> @param numk total number of wave vectors in list
!
!> @date 03/05/14 MDG 1.0 original
!> @date 11/28/14 MDG 2.0 rewrite without global variables
!--------------------------------------------------------------------------
use mod_global
use mod_io
use mod_diffraction
use mod_crystallography
use mod_Lambert
IMPLICIT NONE
class(kvectors_T),INTENT(INOUT) :: self
type(Cell_T) ,INTENT(IN) :: cell
type(Diffraction_T),INTENT(INOUT) :: Diff
type(io_T) :: Message
real(kind=dbl),INTENT(IN) :: k(3) !< initial wave vector
real(kind=dbl),INTENT(IN) :: ga(3) !< "horizontal" reciprocal lattice vector
real(kind=sgl),INTENT(IN) :: precangle !< precession angle in [mrad]
real(kind=sgl),INTENT(IN) :: prechalfwidth !< halfwidth of tilted beam [mrad]
integer(kind=irg),INTENT(IN) :: precsample !< number of kvectors along beam tilt
integer(kind=irg),INTENT(IN) :: precazimuthal !< number of kvectors along circumference
integer(kind=irg),INTENT(OUT) :: numk !< total number of kvectors in linked list
type(kvectorlist),pointer :: ktail, khead
integer(kind=irg) :: istat,i,j, iequiv(2,12), nequiv, jj, nx, ny, il, ith
real(kind=dbl) :: gp, dgp, glen, gan(3), gperp(3), kstar(3), dth
real(kind=dbl),allocatable :: gw(:), ct(:), st(:), th(:), mLambda
logical :: hexgrid = .FALSE.
real(kind=sgl) :: kt(3),kr(3)
real(kind=sgl) :: ktlen
! compute geometrical factors
glen = cell%CalcLength(ga,'r') ! length of ga
gan = ga/glen
mLambda = Diff%getWaveLength() ! normalized ga
gp = 2.0*sin(precangle/1000.0)*mLambda ! precession angle converted to reciprocal length gp in units of glen
dgp = 0.0
if (precsample.gt.0) then
dgp = 2.0*sin(0.001*(precangle-prechalfwidth))/mLambda/glen/float(precsample) ! half width step size converted to reciprocal length dgp in units of glen
end if
allocate(gw(2*precsample+1)) ! sampling radii
gw = gp + dgp * (/ (i,i=-precsample,precsample) /) ! sampling radii
! pre-compute cosines and sines
allocate(ct(precazimuthal),st(precazimuthal), th(precazimuthal))
dth = 2.D0*cPi / dble(precazimuthal)
th = (/ (i-1,i=1,precazimuthal) /) * dth
ct = cos(th)
st = sin(th)
call cell%TransSpace(k,kstar,'d','r') ! transform incident direction to reciprocal space
call cell%CalcCross(ga,kstar,gperp,'r','r',0) ! compute g_perp = ga x k
call cell%NormVec(gperp,'r') ! normalize g_perp
call cell%NormVec(kstar,'r') ! normalize reciprocal beam vector
! first, if self%klist already exists, delete it
if (associated(self%klist)) then ! deallocate the entire linked list
call self%Delete_kvectorlist()
end if
! allocate the head of the linked list
allocate(self%klist,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Calckvectors',' unable to allocate self%klist pointer')
ktail => self%klist ! tail points to new value
nullify(ktail%next) ! nullify next in new value
self%numk = 0 ! keep track of number of k-vectors so far
! next loop around each of the precession circles
do il = 1,2*precsample+1 ! number of concentric circles
do ith = 1,precazimuthal ! number of points along each circle
! make a new one in the list, except for the first one
if (self%numk.ne.0) then
allocate(ktail%next,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Add_knode:',' unable to allocate pointer')
ktail => ktail%next ! tail points to new value
nullify(ktail%next) ! nullify next in new value
end if
! and populate the fields
kt = - gw(il)*ct(ith)*gan - gw(il)*st(ith)*gperp ! tangential component of k
ktail%kt = kt ! store tangential component of k
ktlen = cell%CalcLength(kt,'r')**2 ! squared length of tangential component
kr = kt + sqrt(1.0/mLambda**2 - ktlen)*kstar ! complete wave vector
ktail%k = kr ! store in pointer list
ktail%kn = cell%CalcDot(ktail%k,kstar,'r') ! normal component of k
self%numk = self%numk + 1
end do
end do
end subroutine CalckvectorsPrecession_
!--------------------------------------------------------------------------
recursive function InsideHexGrid(xy) result(res)
!DEC$ ATTRIBUTES DLLEXPORT :: InsideHexGrid
!! author: MDG
!! version: 1.0
!! date: 02/02/20
!!
!! determines whether or not a point lies inside the standard hexagonal Lambert grid
!!
!! The hexagon has unit edge length, with one vertex at (1,0). Take the
!! absolute values of x and y and check if this point lies inside a box with edge
!! lengths (1, sqrt(3)/2). If not, exit; else, check on which side of the line
!! |x|+|y|/sqrt(3)=1 the point lies.
IMPLICIT NONE
real(kind=dbl),INTENT(IN) :: xy(2)
logical :: res
real(kind=dbl) :: ax, ay
! assume it is a good point
res = .TRUE.
! first of all, take the absolute values and see if the transformed point lies inside the
! rectangular box with edge lengths (1,sqrt(3)/2)
ax = abs(xy(1)-0.5D0*xy(2))
ay = abs(xy(2)*LPs%srt)
if ((ax.gt.1.D0).or.(ay.gt.LPs%srt)) res = .FALSE.
! then check for the inclined edge
if (res) then
if (ax+ay*LPs%isrt .gt. 1.D0) res = .FALSE.
end if
end function InsideHexGrid
!--------------------------------------------------------------------------
recursive subroutine CalckvectorsSymmetry_(self,cell,Diff,TDPG,ga,npx,npy,ijmax,klaue,debug)
!DEC$ ATTRIBUTES DLLEXPORT :: CalckvectorsSymmetry_
use mod_io
use mod_diffraction
use mod_crystallography
use mod_Lambert
use mod_symmetry2D
IMPLICIT NONE
class(kvectors_T),INTENT(INOUT) :: self
type(Cell_T),INTENT(INOUT) :: cell
type(Diffraction_T),INTENT(INOUT) :: Diff
type(symdata2D),INTENT(INOUT) :: TDPG
!f2py intent(in,out) :: TDPG
real(kind=dbl),INTENT(IN) :: ga(3)
!! "horizontal" reciprocal lattice vector
integer(kind=irg),INTENT(IN) :: npx
!! number of kvectors along x
integer(kind=irg),INTENT(IN) :: npy
!! number of kvectors along y
integer(kind=irg),INTENT(INOUT) :: ijmax
!! max parameter used for Conical and StandardConical modes
real(kind=sgl),INTENT(IN) :: klaue(2)
!! fractional Laue center coordinates
logical,INTENT(IN),OPTIONAL :: debug
type(IO_T) :: Message
integer(kind=irg),allocatable :: kselected(:,:) ! keeps track of which k-vectors have already been considered
integer(kind=irg) :: istat,i,j, iequiv(2,12), nequiv, jj, nx, ny
real(kind=dbl) :: glen, Lauexy(2)
logical :: hexgrid = .FALSE.
real(kind=sgl) :: kt(3),kr(3)
real(kind=sgl) :: ktlen
type(kvectorlist),pointer :: ktail
nx = 2*npx
ny = 2*npy
allocate(kselected(-nx:nx,-ny:ny))
! initialize the kselected array to 0
kselected = 0
! compute geometrical factors
glen = cell%CalcLength(ga,'r') ! length of ga
Lauexy = glen * klaue ! scaled Laue center coordinates
self%gan = ga/glen ! normalized ga
self%delta = 2.0*self%ktmax*glen/(2.0*float(npx)+1.0) ! grid step size in nm-1
call cell%TransSpace(self%kinp,self%kstar,'d','r') ! transform incident direction to reciprocal space
call cell%CalcCross(ga,self%kstar,self%gperp,'r','r',0)! compute g_perp = ga x k
call cell%NormVec(self%gperp,'r') ! normalize g_perp
call cell%NormVec(self%kstar,'r') ! normalize reciprocal beam vector
! allocate the head and tail of the linked list
allocate(self%klist,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Calckvectors','unable to allocate self%klist pointer')
ktail => self%klist ! tail points to new value
nullify(ktail%next) ! nullify next in new value
self%numk = 1 ! keep track of number of k-vectors so far
ktail%i = 0 ! i-index of beam
ktail%j = 0 ! j-index of beam
! use the Laue center coordinates to define the tangential component of the incident wave vector
kt = - Lauexy(1)*self%gan - Lauexy(2)*self%gperp ! tangential component of k
ktail%kt = kt ! store tangential component of k
ktlen = cell%CalcLength(kt,'r')**2 ! squared length of tangential component
kr = kt + sqrt(1.0/Diff%getWaveLength()**2 - ktlen)*self%kstar ! complete wave vector
ktail%k = kr ! store in pointer list
ktail%kn = cell%CalcDot(ktail%k,self%kstar,'r') ! normal component of k
kselected(0,0) = 2
if (maxval(abs(klaue)).eq.0.0) then ! zone axis orientation, so we should use symmetry
! we scan over the entire range of potential beam directions, defined by npx and npy along with
! the conical truncation parameter ijmax; for each point we check whether or not it has been considered
! before; it it has, we move on, if it hasn't, then we add this point to the linked list in the usual way.
! we do this by computing the equivalent (i,j) using the Whole Pattern symmetry.
do i=-nx,nx
do j=-ny,ny
if (kselected(i,j).eq.0) then
if ((i*i+j*j).le.ijmax) then
! first of all, add the present point to the linked list
call self%Add_knode(cell,Diff,ktail,i,j,(/ 0.0,0.0/))
! then compute the equivalent points and flag all of them in kselected
call Apply2DPGSymmetry(TDPG,i,j,self%isym,iequiv,nequiv)
kselected(iequiv(1,1),iequiv(2,1)) = 2
if (nequiv.gt.1) then
do jj=2,nequiv
kselected(iequiv(1,jj),iequiv(2,jj)) = 1
end do
end if
end if
end if
end do
end do
else ! not a zone axis, so no symmmetry
do i=-nx,nx
do j=-ny,ny
if (kselected(i,j).eq.0) then
if ((i*i+j*j).le.ijmax) then
! first of all, add the present point to the linked list
call self%Add_knode(cell,Diff,ktail,i,j,sngl(Lauexy))
kselected(i,j) = 2
end if
end if
end do
end do
end if
! and clean up the kselected array
deallocate(kselected)
end subroutine CalckvectorsSymmetry_
!--------------------------------------------------------------------------
recursive subroutine Add_knode_(self,cell,Diff,ktail,i,j,klaue,hexgrid)
!DEC$ ATTRIBUTES DLLEXPORT :: Add_knode_
!! author: MDG
!! version: 1.0
!! date: 02/02/20
!!
!! add one entry to the linked wave vector list (standard mode)
use mod_io
use mod_crystallography
use mod_diffraction
IMPLICIT NONE
class(kvectors_T),INTENT(INOUT) :: self
type(Cell_T),INTENT(INOUT) :: cell
type(Diffraction_T),INTENT(INOUT) :: Diff
type(kvectorlist),pointer :: ktail
integer(kind=irg),INTENT(IN) :: i
integer(kind=irg),INTENT(IN) :: j
real(kind=sgl),INTENT(IN) :: klaue(2)
logical,INTENT(IN),OPTIONAL :: hexgrid
type(IO_T) :: Message
real(kind=sgl) :: kt(3),kr(3)
real(kind=sgl) :: ktlen
integer(kind=irg) :: istat
allocate(ktail%next,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Add_knode:',' unable to allocate pointer')
ktail => ktail%next ! tail points to new value
nullify(ktail%next) ! nullify next in new value
self%numk = self%numk + 1 ! keep track of number of k-vectors so far
ktail%i = i ! i-index of beam
ktail%j = j ! j-index of beam
! is it a square or hexagonal grid ?
if (present(hexgrid)) then
kt = -(float(i)-float(j)*0.5)*self%delta*self%gan - float(j)*self%delta*self%gperp*0.5*sqrt(3.0) ! tangential component of k
else
kt = -(klaue(1)+float(i)*self%delta)*self%gan - (klaue(2)+float(j)*self%delta)*self%gperp ! tangential component of k
end if
ktail%kt = kt ! store tangential component of k
ktlen = cell%CalcLength(kt,'r')**2 ! squared length of tangential component
kr = kt + sqrt(1.0/Diff%getWaveLength()**2 - ktlen)*self%kstar ! complete wave vector
ktail%k = kr ! store in pointer list
ktail%kn = cell%CalcDot(ktail%k,self%kstar,'r') ! normal component of k
end subroutine Add_knode_
!--------------------------------------------------------------------------
recursive function GetSextant(x,y) result(res)
!DEC$ ATTRIBUTES DLLEXPORT :: GetSextant
!! author: MDG
!! version: 1.0
!! date: 02/02/20
!!
!! determines in which sextant a point (x,y) is located (used for RoscaLambert mapmode)
IMPLICIT NONE
real(kind=dbl),INTENT(IN):: x, y
real(kind=dbl),parameter :: srt = 1.732050808 ! sqrt(3.D0)
integer(kind=irg) :: res
real(kind=dbl) :: xx
xx = dabs(x*srt) ! |x| sqrt(3)
if (y.ge.0) then
if (y.ge.xx) then
res = 0
else
if (x.gt.0.D0) then
res = 1
else
res = 5
end if
end if
else
if (dabs(y).ge.xx) then
res = 3
else
if (x.gt.0.D0) then
res = 2
else
res = 4
end if
end if
end if
end function GetSextant
!--------------------------------------------------------------------------
recursive subroutine AddkVector_(self,cell,Diff,ktail,xyval,i,j,usehex,addSH,LegendreLattitude)
!DEC$ ATTRIBUTES DLLEXPORT :: AddkVector_
!! author: MDG
!! version: 1.0
!! date: 02/02/20
!!
!! add a k-vector for square or hexagonal grid sampling mode (used for RoscaLambert mapmode)
use mod_io
use mod_diffraction
use mod_crystallography
use mod_Lambert
IMPLICIT NONE
class(kvectors_T),INTENT(INOUT) :: self
type(Cell_T),INTENT(INOUT) :: cell
type(Diffraction_T),INTENT(INOUT) :: Diff
type(kvectorlist),pointer :: ktail
real(kind=dbl),INTENT(IN) :: xyval(2)
integer(kind=irg),INTENT(IN) :: i
integer(kind=irg),INTENT(IN) :: j
logical,INTENT(IN),OPTIONAL :: usehex
logical,INTENT(IN),OPTIONAL :: addSH
real(kind=dbl),INTENT(IN),OPTIONAL :: LegendreLattitude
type(Lambert_T) :: Lambert
type(IO_T) :: Message
integer(kind=irg) :: istat, ks, ii, ierr
real(kind=dbl) :: kstar(3), p
logical :: hex
! project the coordinate up to the sphere, to get a unit 3D vector kstar in cartesian space
if (present(usehex)) then
Lambert = Lambert_T( xyd = xyval )
ierr = Lambert%LambertHexToSphere(kstar)
hex = .TRUE.
else
Lambert = Lambert_T( xyd = xyval )
ierr = Lambert%LambertSquareToSphere(kstar)
hex = .FALSE.
end if
! do we need to modify the direction cosines to coincide with the Legendre lattitudinal grid values?
if (present(LegendreLattitude)) then
if (kstar(3).ne.1.D0) then
! the factor p rescales the x and y components of kstar to maintain a unit vector
p = sqrt((1.D0-LegendreLattitude**2)/(1.D0-kstar(3)**2))
kstar = (/ p*kstar(1), p*kstar(2), LegendreLattitude /)
end if
end if
! add this vector to the linked list
allocate(ktail%next,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Addkvector:',' unable to allocate ktail pointer')
ktail => ktail%next ! tail points to new value
nullify(ktail%next) ! nullify next in new value
self%numk = self%numk + 1 ! keep track of number of k-vectors so far
ktail%hs = 1 ! which hemisphere (Northern = 1, Southern = -1)
! if (iv) then ! transform the hex coordinates to square-array coordinates
! ktail%i = i - j/2+mod(j,2)/2 ! i-index of beam
! ktail%j = j ! j-index of beam
! else ! leave the square coordinates unchanged
ktail%i = i ! i-index of beam
ktail%j = j ! j-index of beam
! end if
call cell%NormVec(kstar,'c') ! normalize incident direction in cartesian space
kstar = kstar/Diff%getWaveLength() ! divide by wavelength
! and transform to reciprocal crystal space using the direct structure matrix
call cell%TransSpace(kstar, ktail%k, 'c', 'r')
ktail%kn = 1.0/Diff%getWaveLength()
! do we also need to add a Southern Hemisphere vector ?
if (present(addSH)) then
if (addSH.eqv..TRUE.) then
allocate(ktail%next,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Addkvector:',' unable to allocate ktail pointer')
ktail => ktail%next ! tail points to new value
nullify(ktail%next) ! nullify next in new value
self%numk = self%numk + 1 ! keep track of number of k-vectors so far
ktail%hs = -1 ! which hemisphere (Northern = 1, Southern = -1)
! if (iv) then ! transform the hex coordinates to square-array coordinates
! ktail%i = i - j/2+mod(j,2)/2 ! i-index of beam
! ktail%j = j ! j-index of beam
! else ! leave the square coordinates unchanged
ktail%i = i ! i-index of beam
ktail%j = j ! j-index of beam
! end if
! get the Southern hemisphere version of kstar
kstar(3) = -kstar(3)
! and transform to reciprocal crystal space using the direct structure matrix
call cell%TransSpace(kstar, ktail%k, 'c', 'r')
ktail%kn = 1.0/Diff%getWaveLength()
end if
end if
end subroutine AddkVector_
!--------------------------------------------------------------------------
recursive subroutine CalckvectorsECP_(self,cell,Diff,rotmat,thetac,npx,npy,FN)
!DEC$ ATTRIBUTES DLLEXPORT :: CalckvectorsECP_
!! author: Saransh Singh
!! version: 1.0
!! date: 02/03/20
!!
!! create a linked list of wave vectors for conical incidence in ECP
use mod_io
use mod_diffraction
use mod_crystallography
use mod_Lambert
IMPLICIT NONE
class(kvectors_T),INTENT(INOUT) :: self
type(Cell_T),INTENT(INOUT) :: cell
type(Diffraction_T),INTENT(INOUT) :: Diff
real(kind=sgl),INTENT(IN) :: rotmat(3,3) !< initial wave vector
real(kind=sgl),INTENT(IN) :: thetac !< half angle of cone of incident beam directions in degrees
integer(kind=irg),INTENT(IN) :: npx !< number of kvectors along x
integer(kind=irg),INTENT(IN) :: npy !< number of kvectors along y
real(kind=sgl),INTENT(IN) :: FN(3) ! foil normal in reciprocal frame
type(IO_T) :: Message
type(kvectorlist),pointer :: ktail
real(kind=sgl) :: thetacr
integer(kind=irg) :: i,j,imin,imax,jmin,jmax,ijmax,istat
real(kind=sgl) :: kk(3),krec(3)
real(kind=sgl) :: k(3),kcart(3),kkk
if (associated(self%klist)) then ! deallocate the entire linked list
call self%Delete_kvectorlist()
end if
k = (/0.0,0.0,1.0/)
k = k/Diff%getWaveLength()
thetacr = dtor*thetac
kk = k
self%ktmax = tan(thetacr)*sqrt(sum(kk**2))
self%delta = 2.0*self%ktmax/(2.0*float(npx)+1.0)
imin = -npx
imax = npx
jmin = -npy
jmax = npy
ijmax = npx**2
! allocate the head and tail of the linked list
allocate(self%klist,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Calckvectors','unable to allocate self%klist pointer')
ktail => self%klist ! tail points to new value
nullify(ktail%next) ! nullify next in new value
self%numk = 0
ktail%i = 0 ! i-index of beam
ktail%j = 0 ! j-index of beam
ktail%kt = (/0.0,0.0,0.0/) ! no tangential component for central beam direction
ktail%k = matmul(rotmat,kk)
k = ktail%k
call cell%TransSpace(k,krec,'c','r')
ktail%k = krec
kkk = cell%CalcDot(sngl(ktail%k),FN,'r') ! normal component
ktail%kn = kkk
do i = imin,imax
do j = jmin,jmax
allocate(ktail%next)
ktail => ktail%next
nullify(ktail%next)
self%numk = self%numk + 1
ktail%i = i
ktail%j = j
ktail%kt = (/self%delta*i,self%delta*j,0.D0/)
ktail%k = ktail%kt + kk
call cell%NormVec(ktail%k,'c')
ktail%k = ktail%k/Diff%getWaveLength()
ktail%kt = matmul(rotmat,ktail%kt)
ktail%k = matmul(rotmat,ktail%k)
k = ktail%k
call cell%TransSpace(k,krec,'c','r')
ktail%k = krec
k = ktail%kt
call cell%TransSpace(k,krec,'c','r')
ktail%kt = krec
kkk = cell%CalcDot(sngl(ktail%k),FN,'r')
ktail%kn = kkk
end do
end do
end subroutine CalckvectorsECP_
!--------------------------------------------------------------------------
recursive subroutine CalckvectorsGPU_(self,cell,Diff,npx,npix,centralpix,usehex)
!DEC$ ATTRIBUTES DLLEXPORT :: CalckvectorsGPU_
!! author: Saransh Singh
!! version: 1.0
!! date: 02/03/20
!!
!! create a linked list of wave vectors for approximate master pattern calculation
!!
!! this subroutine calculates the list of k vectors in a small patch of
!! size npix*npix in lambert space. This central beam is used to calculate the list
!! of g vectors for the whole patch, and the k vectors are used to calculate the
!! diagonal components i.e. the sg values.
use mod_io
use mod_diffraction
use mod_crystallography
use mod_Lambert
IMPLICIT NONE
class(kvectors_T),INTENT(INOUT) :: self
type(cell_T),INTENT(INOUT) :: cell
type(Diffraction_T),INTENT(INOUT) :: Diff
integer(kind=irg),INTENT(IN) :: npx ! 2*npx+1 is size of master pattern
integer(kind=irg),INTENT(IN) :: npix ! the small patch will be 4*npix*npix
integer(kind=irg),INTENT(IN) :: centralpix(2)
logical,INTENT(IN),OPTIONAL :: usehex
type(IO_T) :: Message
type(kvectorlist),pointer :: ktail
logical :: verbose,switchmirror,hex
integer(kind=irg) :: i,j,isym,pgnum,ks,istat
integer(kind=irg) :: istart,iend,jstart,jend
integer(kind=irg),parameter :: LaueTest(11) = (/ 149, 151, 153, 156, 158, 160, 161, 164, 165, 166, 167 /) ! space groups with 2 or mirror at 30 degrees
real(kind=dbl) :: x,y,q,XX,YY,xp,yp,rr, xy(2)
allocate(self%klist,stat=istat) ! allocate new value
if (istat.ne.0) call Message%printError('Calckvectors',' unable to allocate self%klist pointer')
ktail => self%klist ! tail points to new value
nullify(ktail%next)
self%numk = 1
! we deal with the symmetry of the master pattern in the main subroutine
istart = centralpix(1)-npix
iend = centralpix(1)+npix-1
jstart = centralpix(2)-npix
jend = centralpix(2)+npix-1
if (present(usehex)) then
if (usehex) then
! hexagonal grid step size
self%delta = 1.D0/dble(npx)
ktail%i = centralpix(1)
ktail%j = centralpix(2)
x = (i - j*0.5)*self%delta
y = j*self%delta*LPs%srt
rr = x*x+y*y
hex = .TRUE.
ks = GetSextant(x,y)
select case (ks)
case (0,3)
XX = LPs%preb*y*dcos(x*LPs%prec/y)
YY = LPs%preb*y*dsin(x*LPs%prec/y)
case (1,4)
xp = y+LPs%rtt*x
yp = y*LPs%pred/xp
XX = LPs%prea*xp*dsin(yp)
YY = LPs%prea*xp*dcos(yp)
case (2,5)
xp = y-LPs%rtt*x
yp = y*LPs%pred/xp
XX = LPs%prea*xp*dsin(yp)
YY = -LPs%prea*xp*dcos(yp)
end select
q = XX**2+YY**2
self%kstar = (/ 0.5D0*XX*dsqrt(4.D0-q), 0.5D0*YY*dsqrt(4.D0-q),1.D0-0.5D0*q /)
ktail%i = centralpix(1) - centralpix(2)/2+mod(centralpix(2),2)/2 ! i-index of beam
ktail%j = centralpix(2) ! j-index of beam
call cell%NormVec(self%kstar,'c')
self%kstar = self%kstar/Diff%getWaveLength()
! and transform to reciprocal crystal space using the direct structure matrix
ktail%k = matmul(transpose(cell%getdsm()),self%kstar)
ktail%kn = 1.0/Diff%getWaveLength()
else
self%delta = 1.D0/dble(npx)
ktail%i = centralpix(1)
ktail%j = centralpix(2)
x = centralpix(1)*self%delta
y = centralpix(2)*self%delta
rr = x*x+y*y
hex = .FALSE.
if (maxval(abs((/x,y/))).eq.0.0) then
self%kstar = (/0.D0,0.D0,1.D0/)
else
if (dabs(x).le.dabs(y)) then
q = 2.D0*y*LPs%iPi*dsqrt(cPi-y*y)
self%kstar = (/ q*dsin(x*LPs%Pi*0.25D0/y), q*dcos(x*LPs%Pi*0.25D0/y), 1.D0-2.D0*y*y*LPs%iPi /)
else
q = 2.D0*x*LPs%iPi*dsqrt(cPi-x*x)
self%kstar = (/ q*dcos(y*LPs%Pi*0.25D0/x), q*dsin(y*LPs%Pi*0.25D0/x), 1.D0-2.D0*x*x*LPs%iPi /)
end if
end if
ktail%i = centralpix(1)
ktail%j = centralpix(2)
call cell%NormVec(self%kstar,'c')
self%kstar = self%kstar/Diff%getWaveLength()
! and transform to reciprocal crystal space using the direct structure matrix
ktail%k = matmul(transpose(cell%getdsm()),self%kstar)
ktail%kn = 1.0/Diff%getWaveLength()
end if
else
self%delta = LPs%ap/dble(npx)
ktail%i = centralpix(1)
ktail%j = centralpix(2)
x = centralpix(1)*self%delta
y = centralpix(2)*self%delta
rr = x*x+y*y
hex = .FALSE.
if (maxval(abs((/x,y/))).eq.0.0) then
self%kstar = (/0.D0,0.D0,1.D0/)
else
if (dabs(x).le.dabs(y)) then
q = 2.D0*y*LPs%iPi*dsqrt(cPi-y*y)
self%kstar = (/ q*dsin(x*LPs%Pi*0.25D0/y), q*dcos(x*LPs%Pi*0.25D0/y), 1.D0-2.D0*y*y*LPs%iPi /)
else
q = 2.D0*x*LPs%iPi*dsqrt(cPi-x*x)
self%kstar = (/ q*dcos(y*LPs%Pi*0.25D0/x), q*dsin(y*LPs%Pi*0.25D0/x), 1.D0-2.D0*x*x*LPs%iPi /)
end if
end if
ktail%i = centralpix(1)
ktail%j = centralpix(2)
call cell%NormVec(self%kstar,'c')
self%kstar = self%kstar/Diff%getWaveLength()
! and transform to reciprocal crystal space using the direct structure matrix
ktail%k = matmul(transpose(cell%getdsm()),self%kstar)
ktail%kn = 1.0/Diff%getWaveLength()
end if
do j=jstart,jend
do i=istart,iend
if (.not.((i .eq. centralpix(1)) .and. (j .eq. centralpix(2)))) then ! central pixel already taken care of
if (present(usehex)) then
xy = (/ dble(i), dble(j) /) * self%delta
call self%AddkVector(cell,Diff,ktail,xy,i,j,usehex)
else
xy = (/ dble(i), dble(j) /) * self%delta
call self%AddkVector(cell,Diff,ktail,xy,i,j)
end if
end if
end do
end do
end subroutine CalckvectorsGPU_
end module mod_kvectors
| {"hexsha": "1c1485567a2ec78ac9ad7eb523c7abaa8fc0b131", "size": 88577, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Source/EMsoftOOLib/mod_kvectors.f90", "max_stars_repo_name": "EMsoft-org/EMsoftOO", "max_stars_repo_head_hexsha": "052aefc32a603b0dcb830901fcf14535afc15676", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-07T15:41:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T22:25:55.000Z", "max_issues_repo_path": "Source/EMsoftOOLib/mod_kvectors.f90", "max_issues_repo_name": "EMsoft-org/EMsoftOO", "max_issues_repo_head_hexsha": "052aefc32a603b0dcb830901fcf14535afc15676", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Source/EMsoftOOLib/mod_kvectors.f90", "max_forks_repo_name": "EMsoft-org/EMsoftOO", "max_forks_repo_head_hexsha": "052aefc32a603b0dcb830901fcf14535afc15676", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3151353751, "max_line_length": 165, "alphanum_fraction": 0.537983901, "num_tokens": 25199} |
import numpy as np
from scipy.io import loadmat
from scipy.optimize import minimize
from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sn
from sklearn.svm import SVC
import timeit
def preprocess():
"""
Input:
Although this function doesn't have any input, you are required to load
the MNIST data set from file 'mnist_all.mat'.
Output:
train_data: matrix of training set. Each row of train_data contains
feature vector of a image
train_label: vector of label corresponding to each image in the training
set
validation_data: matrix of training set. Each row of validation_data
contains feature vector of a image
validation_label: vector of label corresponding to each image in the
training set
test_data: matrix of training set. Each row of test_data contains
feature vector of a image
test_label: vector of label corresponding to each image in the testing
set
"""
mat = loadmat('mnist_all.mat') # loads the MAT object as a Dictionary
n_feature = mat.get("train1").shape[1]
n_sample = 0
for i in range(10):
n_sample = n_sample + mat.get("train" + str(i)).shape[0]
n_validation = 1000
n_train = n_sample - 10 * n_validation
# Construct validation data
validation_data = np.zeros((10 * n_validation, n_feature))
for i in range(10):
validation_data[i * n_validation:(i + 1) * n_validation, :] = mat.get("train" + str(i))[0:n_validation, :]
# Construct validation label
validation_label = np.ones((10 * n_validation, 1))
for i in range(10):
validation_label[i * n_validation:(i + 1) * n_validation, :] = i * np.ones((n_validation, 1))
# Construct training data and label
train_data = np.zeros((n_train, n_feature))
train_label = np.zeros((n_train, 1))
temp = 0
for i in range(10):
size_i = mat.get("train" + str(i)).shape[0]
train_data[temp:temp + size_i - n_validation, :] = mat.get("train" + str(i))[n_validation:size_i, :]
train_label[temp:temp + size_i - n_validation, :] = i * np.ones((size_i - n_validation, 1))
temp = temp + size_i - n_validation
# Construct test data and label
n_test = 0
for i in range(10):
n_test = n_test + mat.get("test" + str(i)).shape[0]
test_data = np.zeros((n_test, n_feature))
test_label = np.zeros((n_test, 1))
temp = 0
for i in range(10):
size_i = mat.get("test" + str(i)).shape[0]
test_data[temp:temp + size_i, :] = mat.get("test" + str(i))
test_label[temp:temp + size_i, :] = i * np.ones((size_i, 1))
temp = temp + size_i
# Delete features which don't provide any useful information for classifiers
sigma = np.std(train_data, axis=0)
index = np.array([])
for i in range(n_feature):
if (sigma[i] > 0.001):
index = np.append(index, [i])
train_data = train_data[:, index.astype(int)]
validation_data = validation_data[:, index.astype(int)]
test_data = test_data[:, index.astype(int)]
# Scale data to 0 and 1
train_data /= 255.0
validation_data /= 255.0
test_data /= 255.0
return train_data, train_label, validation_data, validation_label, test_data, test_label
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def blrObjFunction(initialWeights, *args):
"""
blrObjFunction computes 2-class Logistic Regression error function and
its gradient.
Input:
initialWeights: the weight vector (w_k) of size (D + 1) x 1
train_data: the data matrix of size N x D
labeli: the label vector (y_k) of size N x 1 where each entry can be either 0 or 1 representing the label of corresponding feature vector
Output:
error: the scalar value of error function of 2-class logistic regression
error_grad: the vector of size (D+1) x 1 representing the gradient of
error function
"""
train_data, labeli = args
n_data = train_data.shape[0]
n_features = train_data.shape[1]
error = 0
error_grad = np.zeros((n_features + 1, 1))
# Including bias term in training data
train_data = np.append(np.ones([n_data,1]),train_data,axis=1)
# Estimating posterior
theta = sigmoid(np.dot(initialWeights.reshape([1, n_features+1]), np.transpose(train_data)))
LeftTerm = np.dot(np.log(theta),labeli)
RightTerm = np.dot(np.log(np.ones(theta.shape) - theta),(np.ones(labeli.shape) - labeli))
# Estimating error
error = (-1/n_data) * (LeftTerm + RightTerm)
error = sum(error)
print(error) # uncomment line to visualize gradient descent
# Estimating gradients
error_grad = (1/n_data) * np.reshape((np.dot(np.transpose(train_data), (np.transpose(theta) - labeli))),n_features + 1)
return error, error_grad
def blrPredict(W, data):
"""
blrObjFunction predicts the label of data given the data and parameter W
of Logistic Regression
Input:
W: the matrix of weight of size (D + 1) x 10. Each column is the weight
vector of a Logistic Regression classifier.
X: the data matrix of size N x D
Output:
label: vector of size N x 1 representing the predicted label of
corresponding feature vector given in data matrix
"""
## Including bias term to input data
data = np.append(np.ones([data.shape[0],1]),data,axis=1)
label = np.zeros((data.shape[0], 1))
## Estimating Posterior
posterior = np.exp(np.dot(data, W))
posterior = posterior / np.reshape(sum(np.transpose(posterior)),[data.shape[0],1]) # check about np.sum
## Extracting label of Maximum Posterior
for i in range(0,data.shape[0]):
label[i] = np.where(posterior[i,:] == np.max(posterior[i,:]))
return label
def mlrObjFunction(params, *args):
"""
mlrObjFunction computes multi-class Logistic Regression error function and
its gradient.
Input:
initialWeights: the weight vector of size (D + 1) x 1
train_data: the data matrix of size N x D
labeli: the label vector of size N x 1 where each entry can be either 0 or 1
representing the label of corresponding feature vector
Output:
error: the scalar value of error function of multi-class logistic regression
error_grad: the vector of size (D+1) x 10 representing the gradient of
error function
"""
train_data, labeli = args
n_data = train_data.shape[0]
n_feature = train_data.shape[1]
error = 0
error_grad = np.zeros((n_feature + 1, n_class))
initialWeights = params.reshape([n_feature + 1, n_class])
# Including bias in training data
train_data = np.append(np.ones([n_data,1]),train_data,axis=1)
# Estimating posterior
theta = np.dot(train_data, initialWeights)
theta = np.exp(theta)
theta = theta / np.reshape(sum(np.transpose(theta)),[n_data,1]) # check about np.sum
# Estimating error
error = np.dot(np.transpose(labeli),np.log(theta))
error = - np.sum(np.diagonal(error))
error = error/(labeli.shape[0]*labeli.shape[1])
print(error) # - uncomment to visualize the gradient descent
# Estimating gradients
error_grad = np.dot(np.transpose(train_data), (theta - labeli)) / (labeli.shape[0]*labeli.shape[1])
error_grad = error_grad.reshape([(n_feature+1)*n_class])
return error, error_grad
def mlrPredict(W, data):
"""
mlrObjFunction predicts the label of data given the data and parameter W
of Logistic Regression
Input:
W: the matrix of weight of size (D + 1) x 10. Each column is the weight
vector of a Logistic Regression classifier.
X: the data matrix of size N x D
Output:
label: vector of size N x 1 representing the predicted label of
corresponding feature vector given in data matrix
"""
## Adding bias term to input data
data = np.append(np.ones([data.shape[0],1]),data,axis=1)
label = np.zeros((data.shape[0], 1))
## Calculation Posterior
posterior = np.exp(np.dot(data, W))
posterior = posterior / np.reshape(sum(np.transpose(posterior)),[data.shape[0],1]) # check about np.sum
## Extracting label of Maximum Posterior
for i in range(0,data.shape[0]):
label[i] = np.where(posterior[i,:] == np.max(posterior[i,:]))
return label
def confusionMatrix(label, predict):
CF = np.zeros([10,10])
for i in range(0,len(label)):
CF[int(label[i]),int(predict[i])] += 1
classAccuracy = np.zeros(10)
for i in range (0,10):
classAccuracy[i]= CF[i,i] * 100/ np.sum(CF[i,:])
print("Confusion Matrix:")
CF = CF.astype(int)
df_cm = pd.DataFrame(CF.astype(int), index = [i for i in "0123456789"],
columns = [i for i in "0123456789"])
plt.figure(figsize = (10,7))
sn.heatmap(df_cm, annot=True)
return CF, classAccuracy
"""
Script for Logistic Regression
"""
start = timeit.default_timer()
train_data, train_label, validation_data, validation_label, test_data, test_label = preprocess()
# number of classes
n_class = 10
# number of training samples
n_train = train_data.shape[0]
# number of features
n_feature = train_data.shape[1]
Y = np.zeros((n_train, n_class))
for i in range(n_class):
Y[:, i] = (train_label == i).astype(int).ravel()
Accuracy_List = []
"""
Script for Binomial Logistic Regression
"""
W = np.zeros((n_feature + 1, n_class))
initialWeights = np.zeros((n_feature + 1, 1))
opts = {'maxiter': 100}
for i in range(n_class):
labeli = Y[:, i].reshape(n_train, 1)
args = (train_data, labeli)
nn_params = minimize(blrObjFunction, initialWeights, jac=True, args=args, method='CG', options=opts)
W[:, i] = nn_params.x.reshape((n_feature + 1,))
print("\n\n--------Binomail LR -----------")
# Find the accuracy on Training Dataset
predicted_label = blrPredict(W, train_data)
print('\n Training set Accuracy:' + str(100 * np.mean((predicted_label == train_label).astype(float))) + '%')
# Find the accuracy on Validation Dataset
predicted_label = blrPredict(W, validation_data)
print('\n Validation set Accuracy:' + str(100 * np.mean((predicted_label == validation_label).astype(float))) + '%')
# Find the accuracy on Testing Dataset
predicted_label = blrPredict(W, test_data)
print('\n Testing set Accuracy:' + str(100 * np.mean((predicted_label == test_label).astype(float))) + '%')
[confMat_BLR, ClassAccBLR] = confusionMatrix(test_label, predicted_label)
stop = timeit.default_timer()
time = stop - start
print ("Time Taken = " + str(time) +" seconds")
"""
Script for Support Vector Machine
"""
##################
# SVM Code Begins here
##################
print('\n\n--------------SVM-------------------\n\n')
## For Linear Kernel
print('\n **** SVM with linear kernel ****\n')
start = timeit.default_timer()
svmModel = SVC(kernel = "linear")
svmModel.fit(train_data, train_label)
#train_predicted = svmModel.predict(train_data)
#validation_predicted = svmModel.predict(validation_data)
#test_predicted = svmModel.predict(test_data)
Accuracy_train = svmModel.score(train_data,train_label)
Accuracy_validation = svmModel.score(validation_data,validation_label)
Accuracy_test = svmModel.score(test_data, test_label)
print("Accuracy of train data in SVM: " +str(Accuracy_train))
print("Accuracy of validation data in SVM: " +str(Accuracy_validation))
print("Accuracy of test data in SVM: " +str(Accuracy_test))
Accuracy_List.append(["Linear","Gamme = default","c = 1.0","Training_Accuracy:" + str(Accuracy_train), "Validation_Accuracy:" + str(Accuracy_validation), "Test_Accuracy:" + str(Accuracy_test)])
stop = timeit.default_timer()
time = stop - start
print ("Time Taken = " + str(time) +" seconds")
#For Radial bias Kernel with Gamma =1
start = timeit.default_timer()
print('\n **** Radial Bias SVM with gamma = 1 ****\n') ## gamma = 1
svmModel = SVC(kernel = "rbf", gamma = 1)
svmModel.fit(train_data, train_label)
#train_predicted = svmModel.predict(train_data)
#validation_predicted = svmModel.predict(validation_data)
#test_predicted = svmModel.predict(test_data)
Accuracy_train = svmModel.score(train_data,train_label)
Accuracy_validation = svmModel.score(validation_data,validation_label)
Accuracy_test = svmModel.score(test_data, test_label)
Accuracy_List.append(["Radial","Gamme = default","c = 1.0","Training_Accuracy:" + str(Accuracy_train), "Validation_Accuracy:" + str(Accuracy_validation), "Test_Accuracy:" + str(Accuracy_test)])
print("Accuracy of train data in SVM: " +str(Accuracy_train))
print("Accuracy of validation data in SVM: " +str(Accuracy_validation))
print("Accuracy of test data in SVM: " +str(Accuracy_test))
stop = timeit.default_timer()
time = stop - start
### For Radial bias Kernel with Gamma = default('auto')
print('\n **** Radial Bias SVM with Default Gamma setting ****\n') ## gamma = default
start = timeit.default_timer()
svmModel = SVC(kernel = "rbf")
svmModel.fit(train_data, train_label)
#train_predicted = svmModel.predict(train_data)
#validation_predicted = svmModel.predict(validation_data)
#test_predicted = svmModel.predict(test_data)
Accuracy_train = svmModel.score(train_data,train_label)
Accuracy_validation = svmModel.score(validation_data,validation_label)
Accuracy_test = svmModel.score(test_data, test_label)
Accuracy_List.append(["Radial","Gamme = 1.0","c = 1.0","Training_Accuracy:" + str(Accuracy_train), "Validation_Accuracy:" + str(Accuracy_validation), "Test_Accuracy:" + str(Accuracy_test)])
print("Accuracy of train data in SVM: " +str(Accuracy_train))
print("Accuracy of validation data in SVM: " +str(Accuracy_validation))
print("Accuracy of test data in SVM: " +str(Accuracy_test))
stop = timeit.default_timer()
time = stop - start
print ("Time Taken = " + str(time) +" seconds")
## For Radial bias with varying values of C
print('\n **** Radial Bias SVM with varying C values ****\n') ## gamma = default
Flag = True;
C = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
for c in C:
start = timeit.default_timer()
svmModel = SVC(C = c, kernel = "rbf")
svmModel.fit(train_data, train_label)
#train_predicted = svmModel.predict(train_data)
#validation_predicted = svmModel.predict(validation_data)
#test_predicted = svmModel.predict(test_data)
Accuracy_train = svmModel.score(train_data,train_label)
Accuracy_validation = svmModel.score(validation_data,validation_label)
Accuracy_test = svmModel.score(test_data, test_label)
Accuracy_List.append(["Radial","Gamme:default","c = " + str(c),"Training_Accuracy:" + str(Accuracy_train), "Validation_Accuracy:" + str(Accuracy_validation), "Test_Accuracy:" + str(Accuracy_test)])
print('C value: ' + str(c))
print("Accuracy of train data in SVM: " +str(Accuracy_train))
print("Accuracy of validation data in SVM: " +str(Accuracy_validation))
print("Accuracy of test data in SVM: " +str(Accuracy_test))
stop = timeit.default_timer()
time = stop - start
print ("Time Taken = " + str(time) +" seconds")
file = open("output.csv",'w+')
for line in Accuracy_List:
file.write("\n" + str(line))
file.close()
##################
# Multinomail Logistic Regression Code Begins here
##################
print('\n\n--------------Multimomial Logistic Regression-------------------\n\n')
start = timeit.default_timer()
"""
Script for Extra Credit Part
"""
## FOR EXTRA CREDIT ONLY
W_b = np.zeros((n_feature + 1, n_class))
initialWeights_b = np.zeros((n_feature + 1, n_class))
opts_b = {'maxiter': 100}
args_b = (train_data, Y)
nn_params = minimize(mlrObjFunction, initialWeights_b, jac=True, args=args_b, method='CG', options=opts_b)
W_b = nn_params.x.reshape((n_feature + 1, n_class))
# Find the accuracy on Training Dataset
predicted_label_b = mlrPredict(W_b, train_data)
print('\n Training set Accuracy:' + str(100 * np.mean((predicted_label_b == train_label).astype(float))) + '%')
# Find the accuracy on Validation Dataset
predicted_label_b = mlrPredict(W_b, validation_data)
print('\n Validation set Accuracy:' + str(100 * np.mean((predicted_label_b == validation_label).astype(float))) + '%')
# Find the accuracy on Testing Dataset
predicted_label_b = mlrPredict(W_b, test_data)
print('\n Testing set Accuracy:' + str(100 * np.mean((predicted_label_b == test_label).astype(float))) + '%')
stop = timeit.default_timer()
time = stop - start
print ("Time Taken = " + str(time) +" seconds")
[confMat_MLR, ClassAccMLR] = confusionMatrix(test_label, predicted_label_b) | {"hexsha": "703288d7d0444dd1eca8aa7f2d2fc07f8d1b391f", "size": 16740, "ext": "py", "lang": "Python", "max_stars_repo_path": "SVM and Logistic Regression/script.py", "max_stars_repo_name": "Muthu2093/Machine-Learning-Applications", "max_stars_repo_head_hexsha": "bb171ff2bfdcb5af64403ae7f63fe96572d63963", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SVM and Logistic Regression/script.py", "max_issues_repo_name": "Muthu2093/Machine-Learning-Applications", "max_issues_repo_head_hexsha": "bb171ff2bfdcb5af64403ae7f63fe96572d63963", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SVM and Logistic Regression/script.py", "max_forks_repo_name": "Muthu2093/Machine-Learning-Applications", "max_forks_repo_head_hexsha": "bb171ff2bfdcb5af64403ae7f63fe96572d63963", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7692307692, "max_line_length": 201, "alphanum_fraction": 0.6744922342, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4308} |
#######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import torch
from baselines.common.running_mean_std import RunningMeanStd
class BaseNormalizer:
def __init__(self, read_only=False):
self.read_only = read_only
def set_read_only(self):
self.read_only = True
def unset_read_only(self):
self.read_only = False
def state_dict(self):
return None
def load_state_dict(self, _):
return
class MeanStdNormalizer(BaseNormalizer):
def __init__(self, read_only=False, clip=10.0, epsilon=1e-8):
BaseNormalizer.__init__(self, read_only)
self.read_only = read_only
self.rms = None
self.clip = clip
self.epsilon = epsilon
def __call__(self, x):
x = np.asarray(x)
if self.rms is None:
self.rms = RunningMeanStd(shape=(1,) + x.shape[1:])
if not self.read_only:
self.rms.update(x)
if self.clip is None:
return (x - self.rms.mean) / np.sqrt(self.rms.var + self.epsilon)
else:
return np.clip((x - self.rms.mean) / np.sqrt(self.rms.var + self.epsilon),
-self.clip, self.clip)
def state_dict(self):
return {'mean': self.rms.mean,
'var': self.rms.var}
def load_state_dict(self, saved):
self.rms.mean = saved['mean']
self.rms.var = saved['var']
class RescaleNormalizer(BaseNormalizer):
def __init__(self, coef=1.0):
BaseNormalizer.__init__(self)
self.coef = coef
def __call__(self, x):
if not isinstance(x, torch.Tensor):
x = np.asarray(x)
return self.coef * x
class ImageNormalizer(RescaleNormalizer):
def __init__(self):
RescaleNormalizer.__init__(self, 1.0 / 255)
class SignNormalizer(BaseNormalizer):
def __call__(self, x):
return np.sign(x)
| {"hexsha": "ed0c3ab8b493b3820e04082d138f2337e696311d", "size": 2213, "ext": "py", "lang": "Python", "max_stars_repo_path": "deep_rl/utils/normalizer.py", "max_stars_repo_name": "Louis-Bagot/DeepRL", "max_stars_repo_head_hexsha": "0b152c52bbba90362c8276c223fee3f9a464eb32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deep_rl/utils/normalizer.py", "max_issues_repo_name": "Louis-Bagot/DeepRL", "max_issues_repo_head_hexsha": "0b152c52bbba90362c8276c223fee3f9a464eb32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deep_rl/utils/normalizer.py", "max_forks_repo_name": "Louis-Bagot/DeepRL", "max_forks_repo_head_hexsha": "0b152c52bbba90362c8276c223fee3f9a464eb32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5066666667, "max_line_length": 86, "alphanum_fraction": 0.5661997289, "include": true, "reason": "import numpy", "num_tokens": 518} |
#pragma once
#include <polyfem/Types.hpp>
#include <Eigen/Dense>
#include <Eigen/Sparse>
namespace polyfem {
// Show some stats about the matrix M: det, singular values, condition number, etc
void show_matrix_stats(const Eigen::MatrixXd &M);
template<typename T>
T determinant(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &mat)
{
assert(mat.rows() == mat.cols());
if(mat.rows() == 1)
return mat(0);
else if(mat.rows() == 2)
return mat(0, 0) * mat(1, 1) - mat(0, 1) * mat(1, 0);
else if(mat.rows() == 3)
return mat(0,0)*(mat(1,1)*mat(2,2)-mat(1,2)*mat(2,1))-mat(0,1)*(mat(1,0)*mat(2,2)-mat(1,2)*mat(2,0))+mat(0,2)*(mat(1,0)*mat(2,1)-mat(1,1)*mat(2,0));
assert(false);
return T(0);
}
template<typename T>
void read_matrix(const std::string &path, Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> &mat);
Eigen::Vector4d compute_specturm(const StiffnessMatrix &mat);
} // namespace polyfem
| {"hexsha": "036ebd67b368d514d2aac3e11560f7f49294cd4b", "size": 949, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/utils/MatrixUtils.hpp", "max_stars_repo_name": "ldXiao/polyfem", "max_stars_repo_head_hexsha": "d4103af16979ff67d461a9ebe46a14bbc4dc8c7c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/utils/MatrixUtils.hpp", "max_issues_repo_name": "ldXiao/polyfem", "max_issues_repo_head_hexsha": "d4103af16979ff67d461a9ebe46a14bbc4dc8c7c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils/MatrixUtils.hpp", "max_forks_repo_name": "ldXiao/polyfem", "max_forks_repo_head_hexsha": "d4103af16979ff67d461a9ebe46a14bbc4dc8c7c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3611111111, "max_line_length": 151, "alphanum_fraction": 0.645943098, "num_tokens": 331} |
'''
Organize nav curves of multiple funds into the multi-timeseries objects offered by gluonts.
'''
import os
import inspect
import sys
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from gluonts.dataset.common import ListDataset
import numpy as np
import pandas as pd
from gluonts.dataset.multivariate_grouper import MultivariateGrouper
from gluonts.dataset.repository.datasets import get_dataset
# exchange_rate_nips, electricity_nips, traffic_nips, solar_nips,
# wiki-rolling_nips, ## taxi_30min is buggy still
dataset = get_dataset("electricity_nips", regenerate=True)
train_grouper = MultivariateGrouper(max_target_dim=min(
2000, int(dataset.metadata.feat_static_cat[0].cardinality)))
test_grouper = MultivariateGrouper(num_test_dates=int(len(dataset.test) / len(dataset.train)),
max_target_dim=min(2000, int(dataset.metadata.feat_static_cat[0].cardinality)))
dataset_train = train_grouper(dataset.train)
dataset_test = test_grouper(dataset.test)
print('Example Success')
ts_jsons = []
for i in range(10):
ts_jsons.append(
{
"start": pd.Timestamp('2021-01-01', freq='D'),
"target": np.arange(300 + i),
}
)
dataset = ListDataset(ts_jsons, freq='D')
print(next(iter(dataset)))
train_grouper = MultivariateGrouper(max_target_dim=10)
grouped_dataset = train_grouper(dataset)
print(len(grouped_dataset))
print(next(iter(grouped_dataset)))
print('Own version success')
| {"hexsha": "a330ae777fcf35f5219a4dd18cb0c852d3719fde", "size": 1571, "ext": "py", "lang": "Python", "max_stars_repo_path": "reference/multivariate_dataset_examples.py", "max_stars_repo_name": "jeffrey82221/gluonts_fund_price_forecast", "max_stars_repo_head_hexsha": "fed7c484c4dba663201f9cf96aa86ca98119b54c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-21T13:27:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T13:27:32.000Z", "max_issues_repo_path": "reference/multivariate_dataset_examples.py", "max_issues_repo_name": "jeffrey82221/gluonts_fund_price_forecast", "max_issues_repo_head_hexsha": "fed7c484c4dba663201f9cf96aa86ca98119b54c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-02-20T10:36:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-22T03:47:13.000Z", "max_forks_repo_path": "reference/multivariate_dataset_examples.py", "max_forks_repo_name": "jeffrey82221/gluonts_fund_price_forecast", "max_forks_repo_head_hexsha": "fed7c484c4dba663201f9cf96aa86ca98119b54c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.152173913, "max_line_length": 114, "alphanum_fraction": 0.7530235519, "include": true, "reason": "import numpy", "num_tokens": 379} |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import iris
from matplotlib.path import Path
from lagranto.trajectory import load
import datetime
from mymodule import grid, convert, interpolate
import matplotlib.pyplot as plt
from lagranto import caltra, trajectory
import random
from matplotlib.dates import DayLocator, HourLocator, DateFormatter
iris.FUTURE.cell_datetime_objects=True
"I can't do 3D trajectories because I can't find the 'altitude' coordinate to start them off"
"As 3D trajectories require altitude instead of potential temperature"
ben_datadir = '/storage/silver/NCAS-Weather/ben/nawdex/mi-ar482/'
def outflow_grid(k = 0, levels = 3, hoursafterinit = [42, 36, 42, 24], thlevs = [[320, 325, 330], [325, 330, 335], [310, 315, 320], [310, 315, 320]], folder = 'IOP3/T42', strapp = ''):
"This presently defines the grid of points from the sime time that the region is defined"
"But by loading the forward trajectories this could easily be adapted to"
"define the grid at any time along the trajectory"
#save_dir = '/glusterfs/msc/users_2018/bn826011/NAWDEX/Final/'
save_dir = '/storage/silver/scenario/bn826011/WCB_outflow/Final/'
basetime = [datetime.datetime(2016, 9, 22, 12), datetime.datetime(2016, 9, 26, 12), datetime.datetime(2016, 9, 30, 12), datetime.datetime(2016, 10, 03, 12)]
basetime_str = basetime[k].strftime('%Y%m%d_%H')
TrB = load(save_dir + folder + '/{}_TrajectoryEnsemble_backward'.format(basetime_str) + strapp)
start_time = [datetime.timedelta(hours = 0)]
# trajectory at definition time
datadir = '/export/cloud/migrated-NCASweather/ben/nawdex/mi-ar482/{}/'.format(basetime_str)
#hoursafterinit = [42, 42, 42, 72, 96]#42
time = basetime[k] + datetime.timedelta(hours = hoursafterinit[k])
mapping = {}
leadtimehr = np.int((time - basetime[k]).total_seconds()) / 3600
fn = 'prodm_op_gl-mn_{0}_d{1:03d}_thgrid.pp'.\
format(basetime_str, 12 * (leadtimehr / 12))
mapping[time] = datadir + fn
# dexs = iris.load('/export/cloud/NCASweather/ben/nawdex/mi-ar482/20160922_12/' +
# 'prodm_op_gl-mn_20160922_12_d*_thsfcs_5K.nc', 'dimensionless_exner_function')
# dexs[-1] = iris.util.new_axis(dexs[-1], 'time')
# dex = dexs.concatenate_cube()
#
# temps = iris.load('/export/cloud/NCASweather/ben/nawdex/mi-ar482/20160922_12/' +
# 'prodm_op_gl-mn_20160922_12_d*_thsfcs_5K.nc', 'air_temperature')
# temps[-1] = iris.util.new_axis(temps[-1], 'time')
# temp = temps.concatenate_cube()
#
# lvls = ('air_potential_temperature', thlevs[k])
#
# altd = convert.calc('altitude', iris.cube.CubeList([dex, temp]), levels = lvls)
# # 3D caltra works on altitude not theta levels
# # however I don't know how to get altitude?!
# # I think leo's data must've have altitude as a coordinate
cubes = iris.load(mapping[time], iris.Constraint(time=time))
plt.figure(figsize = (10, 14))
zc = iris.load('/export/cloud/migrated-NCASweather/ben/nawdex/mi-ar482/' + basetime_str +
'/prodm_op_gl-mn_' + basetime_str + '_d*_thsfcs_5K.nc', 'altitude')
zc = zc[:-1].concatenate_cube()
# the last time step has different metadata?
for kl in xrange(levels):
theta_level = thlevs[k][kl]
trajectories = TrB.select(
'air_potential_temperature', '==', theta_level, time = start_time)
x = trajectories.x[:, 0]
y = trajectories.y[:, 0]
#
# tlev_cstrnt = iris.Constraint(air_potential_temperature = theta_level)
#
# altdth = altd.extract(tlev_cstrnt)
lvls = ('air_potential_temperature', [theta_level])
w = convert.calc('upward_air_velocity', cubes, levels = lvls)
# I think fairly arbitrary cube
# z = grid.make_cube(w, 'altitude')
# # altitude
#
# print z
#
# lvls = ('air_potential_temperature', [theta_level])
#
# coord_name, values = lvls
# I now need some way to interpolate altitude to desired theta level
# z = interpolate.to_level(z, **{coord_name: values})
# z = convert.calc('altitude', iris.cube.CubeList([z]), levels = lvls)
glon, glat = grid.get_xy_grids(w)
gridpoints = np.array([glon.flatten(), glat.flatten()]).transpose()
points = np.array([x, y]).transpose()
pth = Path(points)
# Mask all points that are not contained in the circuit
mask = np.logical_not(pth.contains_points(gridpoints).reshape(glat.shape))
tlev_cstrnt = iris.Constraint(air_potential_temperature = theta_level)
time_cstrnt = iris.Constraint(time = time)
#try this for altitude
z = zc.extract(tlev_cstrnt & time_cstrnt)
plt.subplot(levels, 2, 2*kl+1)
plt.contourf(mask, cmap = 'gray')
masked_lon = []
masked_lat = []
alt_list = []
for i, col in enumerate(mask):
for j, point in enumerate(col):
if point == False:
lat = glat[i, j]
lon = glon[i, j]
alt = z.data[i, j]
masked_lon.append(lon)
masked_lat.append(lat)
alt_list.append(alt)
plt.subplot(levels, 2, 2*kl+2)
plt.scatter(masked_lon, masked_lat, s = 2, c = 'k', marker = '.', edgecolor = 'k')
lt = len(masked_lon)
points3d = np.zeros([lt, 3])
points3d[:, 0] = np.array(masked_lon)
points3d[:, 1] = np.array(masked_lat)
points3d[:, 2] = np.array(alt_list)
pointsth = np.zeros([lt, 3])
pointsth[:, 0] = np.array(masked_lon)
pointsth[:, 1] = np.array(masked_lat)
pointsth[:, 2] = theta_level*np.ones([lt])
if kl == 0:
outflow_volume = points3d
outflow_volume_th = pointsth
else:
outflow_volume = np.concatenate((outflow_volume, points3d))
outflow_volume_th = np.concatenate((outflow_volume_th, pointsth))
np.save('/storage/silver/scenario/bn826011/WCB_outflow/Final/' + folder + '/inflow/' + basetime_str + strapp + 'initial_grid.npy', outflow_volume)
np.save('/storage/silver/scenario/bn826011/WCB_outflow/Final/' + folder + '/inflow/' + basetime_str + strapp + 'initial_grid_thlevs.npy', outflow_volume_th)
#plt.savefig('/glusterfs/msc/users_2018/bn826011/NAWDEX/Final/' + folder + '/' + basetime_str + strapp + '_masks.jpg')
plt.show()
def inflow_caltra(k = 0, levs = 3, hoursafterinit = [42, 36, 42, 24], folder = 'IOP3/T42', strapp = ''):
# Create a mapping from datetime objects to filenames
basetime = [datetime.datetime(2016, 9, 22, 12), datetime.datetime(2016, 9, 26, 12), datetime.datetime(2016, 9, 30, 12), datetime.datetime(2016, 10, 03, 12)]
basetime_str = basetime[k].strftime('%Y%m%d_%H')
#datadir = '/export/cloud/migrated-NCASweather/ben/nawdex/mi-ar482/{}/'.format(basetime_str)
datadir = '/storage/silver/NCAS-Weather/ben/nawdex/mi-ar482/{}/'.format(basetime_str)
timestep = datetime.timedelta(hours=6)
times = [basetime[k] + timestep * i for i in range(hoursafterinit[k]/6 + 1)]
print times[-1]
#creates mapping up to and including selected outflow time
mapping = {}
for t in times:
leadtimehr = np.int((t - basetime[k]).total_seconds()) / 3600
fn = [datadir + 'prodm_op_gl-mn_{0}_d{1:03d}_thgrid.pp'.\
format(basetime_str, 12 * (leadtimehr / 12)),
datadir + 'prodm_op_gl-mn_{0}_c{1:03d}.pp'.\
format(basetime_str, 6 * (leadtimehr / 6))] # new addition of c to mapping
mapping[t] = fn#datadir + fn
#trainp_th = np.load('/glusterfs/msc/users_2018/bn826011/NAWDEX/Final/' + folder + '/inflow/' + basetime_str + strapp + 'initial_grid.npy')
trainp_th = np.load('/storage/silver/scenario/bn826011/WCB_outflow/Final/' + folder + '/inflow/' + basetime_str + strapp + 'initial_grid.npy')
#trajectory input on theta levels
tracers = ['air_potential_temperature', 'air_pressure', 'derived_pv', 'dPV_tot', 'adv_only_PV', 'dPV_LW', 'dPV_mic', 'dPV_conv', 'dPV_adv', 'dPV_SW', 'dPV_ph1', 'dPV_bl', 'dPV_cld', 'dPV_mass']# 'specific_humidity', 'mass_fraction_of_cloud_ice_in_air', 'mass_fraction_of_cloud_liquid_water_in_air', 'derived_pv']
#need these for circulation integrals
traout = caltra.caltra(trainp_th, mapping, fbflag=-1, nsubs = 12, tracers = tracers)
# 12 steps between files = 30 mins apart
traout.save('/storage/silver/scenario/bn826011/WCB_outflow/Final/' + folder + '/inflow/{}_3DTrajectoryEnsemble_dPV'.format(basetime_str) + strapp)
return traout
def inflow_caltra_fw(k = 0, levs = 3, hoursafterinit = [42, 42, 42, 24], hoursuntil = [90, 78, 156, 84], folder = 'IOP3/T42', strapp = ''):
# Create a mapping from datetime objects to filenames
basetime = [datetime.datetime(2016, 9, 22, 12), datetime.datetime(2016, 9, 26, 12), datetime.datetime(2016, 9, 30, 12), datetime.datetime(2016, 10, 03, 12)]
basetime_str = basetime[k].strftime('%Y%m%d_%H')
datadir = '/export/cloud/migrated-NCASweather/ben/nawdex/mi-ar482/{}/'.format(basetime_str)
timestep = datetime.timedelta(hours=6)
times = [basetime[k] + timestep * i for i in range(hoursafterinit[k]/6, hoursuntil[k]/6 + 1)]
print times[0]
#creates mapping up to and including selected outflow time
mapping = {}
for t in times:
leadtimehr = np.int((t - basetime[k]).total_seconds()) / 3600
fn = 'prodm_op_gl-mn_{0}_d{1:03d}_thgrid.pp'.\
format(basetime_str, 12 * (leadtimehr / 12))
mapping[t] = datadir + fn
#trainp_th = np.load('/glusterfs/msc/users_2018/bn826011/NAWDEX/Final/' + folder + '/inflow/' + basetime_str + strapp + 'initial_grid.npy')
trainp_th = np.load('/storage/silver/scenario/bn826011/WCB_outflow/Final/' + folder + '/inflow/' + basetime_str + strapp + 'initial_grid.npy')
#trajectory input on theta levels
tracers = ['air_potential_temperature', 'air_pressure']#, 'specific_humidity', 'mass_fraction_of_cloud_ice_in_air', 'mass_fraction_of_cloud_liquid_water_in_air']
#need these for circulation integrals
traout = caltra.caltra(trainp_th, mapping, fbflag=1, nsubs = 12, tracers = tracers)
# 12 steps between files = 30 mins apart
#traout.save('/glusterfs/msc/users_2018/bn826011/NAWDEX/Final/' + folder + '/inflow/{}_3DTrajectoryEnsemble_fw'.format(basetime_str) + strapp)
traout.save('/storage/silver/scenario/bn826011/WCB_outflow/Final/' + folder + '/inflow/{}_3DTrajectoryEnsemble_fw'.format(basetime_str) + strapp)
return traout
def heating_hist(k = 0, levs = 3, theta_0 = [320, 325, 310, 310], t = 0, folder = 'IOP3/T42', strapp = ''):
basetime = [datetime.datetime(2016, 9, 22, 12), datetime.datetime(2016, 9, 26, 12), datetime.datetime(2016, 9, 30, 12), datetime.datetime(2016, 10, 03, 12)]
basetime_str = basetime[k].strftime('%Y%m%d_%H')
#Tr3 = load('/glusterfs/msc/users_2018/bn826011/NAWDEX/Final/' + folder + '/inflow/{}_3DTrajectoryEnsemble'.format(basetime_str) + strapp)
Tr3 = load('/storage/silver/scenario/bn826011/WCB_outflow/Final/' + folder + '/inflow/{}_3DTrajectoryEnsemble'.format(basetime_str) + strapp)
bins = np.linspace(-10, 30, 21)
plt.figure(figsize = (13, 9))
Tall = Tr3.select('air_potential_temperature', '!=', -1000)
#only those which remain in domain
plt.subplot(int(levs/2)+1, 2, 1)
curve = plt.hist(Tall.data[:, 0, 3] - Tall.data[:, -(t+1), 3], bins = bins)
#plt.title(folder + '_' + strapp)
plt.title('Started at all surfaces')
plt.xlabel('Delta theta')
for i in xrange(levs):
theta = theta_0[k] + i*5
Tt = Tall.select('air_potential_temperature', '>=', theta - 2.5, time = [datetime.timedelta(hours = 0)])
Tt = Tt.select('air_potential_temperature', '<', theta + 2.5, time = [datetime.timedelta(hours = 0)])
# those which remain in domain that start on desired theta surface
plt.subplot(int(levs/2)+1, 2, 2+i)
plt.hist(Tt.data[:, 0, 3] - Tt.data[:, -(t+1), 3], bins = bins)
plt.title('Started at ' + str(theta) + 'K surface')
plt.xlabel('Delta theta')
plt.savefig('/glusterfs/msc/users_2018/bn826011/NAWDEX/Final/' + folder + '/inflow/delta_theta_histograms_t=' + str(t) + strapp + '.png')
#plt.savefig('IOP3_T42_2x2_hist_t=' + str(t) + '.png')
plt.show()
return curve
def random_3D_trajs(k = 0, levs = 3, theta_0 = [320, 325, 310, 310], trajs = 50, folder = 'IOP3/T42', strapp = '', var_idxs = [8, 9, 10]):
basetime = [datetime.datetime(2016, 9, 22, 12), datetime.datetime(2016, 9, 26, 12), datetime.datetime(2016, 9, 30, 12), datetime.datetime(2016, 10, 03, 12)]
basetime_str = basetime[k].strftime('%Y%m%d_%H')
#Tr3 = load('/glusterfs/msc/users_2018/bn826011/NAWDEX/Final/' + folder + '/inflow/{}_3DTrajectoryEnsemble'.format(basetime_str) + strapp)
#Tr3 = load('/storage/silver/scenario/bn826011/WCB_outflow/Final/' + folder + '/inflow/{}_3DTrajectoryEnsemble_dPV'.format(basetime_str) + strapp)
Tr2 = load('/storage/silver/scenario/bn826011/WCB_outflow/Final/' + folder + '/inflow/{}_3DTrajectoryEnsemble_dPV'.format(basetime_str) + strapp)
plt.figure(figsize = (12, 8))
for j, threshs in enumerate([[10, 60], [5, 10], [0, 5], [-60, 0]]):
thresh_lo = threshs[0]
thresh_hi = threshs[1]
Tall = Tr2.select('air_potential_temperature', '!=', -1000)
#only those which remain in domain
Tasc = Tall.select('air_potential_temperature', '>', thresh_lo, time=[Tall.relative_times[0], Tall.relative_times[-1]])
Tasc = Tall.select('air_potential_temperature', '<', thresh_hi, time=[Tall.relative_times[0], Tall.relative_times[-1]])
# select all trajectories which ascend at least 10K
#plt.subplot(int(levs/2)+1, 2, 1)
plt.subplot(2, 2, 1+j)
# for i in xrange(trajs):
# i = random.randint(0, len(Tall)-1)
# plt.plot(Tall.times, Tall.data[i, :, 5])#3])
for var_idx in var_idxs:
Tmean = np.mean(Tasc.data[:, :, var_idx], axis = 0)
#Tstdev = np.std(Tasc.data[:, :, var_idx], axis=0)
plt.plot(Tasc.times, Tmean, linewidth = 3, label = Tasc.names[var_idx])
#plt.plot(Tasc.times, Tmean + Tstdev, 'r', linewidth = 1)
#plt.plot(Tasc.times, Tmean - Tstdev, 'r', linewidth = 1)
#plt.title(folder + '_' + strapp)
plt.title('Started at all surfaces, ' + str(thresh_lo) + ' < dtheta < ' + str(thresh_hi))
plt.ylabel('PVU')#theta')
plt.legend(loc = 'upper left')
plt.ylim(-.25, .25)
pg = plt.gca()
fmt = DateFormatter('\n%m/%d')
fmt2 = DateFormatter('%H')
majorLocator = DayLocator(interval=1)
minorLocator = HourLocator(range(0, 24, 6))
pg.xaxis.set_major_formatter(fmt)
pg.xaxis.set_minor_formatter(fmt2)
pg.xaxis.set_minor_locator(minorLocator)
pg.xaxis.set_major_locator(majorLocator)
# for j in xrange(levs):
#
# theta = theta_0[k] + j*5
#
# Tt = Tall.select('air_potential_temperature', '>=', theta - 2.5, time = [datetime.timedelta(hours = 0)])
# Tt = Tt.select('air_potential_temperature', '<', theta + 2.5, time = [datetime.timedelta(hours = 0)])
# # those which remain in domain that start on desired theta surface
## Tasc = Tt.select('air_potential_temperature', '>', thresh_lo, time=[Tt.relative_times[0], Tt.relative_times[-1]])
## Tasc = Tt.select('air_potential_temperature', '<', thresh_hi, time=[Tt.relative_times[0], Tt.relative_times[-1]])
# # select all trajectories which ascend at least 10K
# Tasc = Tt
#
# plt.subplot(int(levs/2)+1, 2, 2+j)
#
## for i in xrange(trajs):
## i = random.randint(0, len(Tt))
## plt.plot(Tasc.times, Tt.data[i, :, 5])
#
# for var_idx in var_idxs:
#
# Tmean = np.mean(Tasc.data[:, :, var_idx], axis = 0)
# #Tstdev = np.std(Tasc.data[:, :, var_idx], axis=0)
#
# plt.plot(Tasc.times, Tmean, linewidth = 3, label = Tasc.names[var_idx])
# #plt.plot(Tasc.times, Tmean + Tstdev, 'r', linewidth = 1)
# #plt.plot(Tasc.times, Tmean - Tstdev, 'r', linewidth = 1)
#
# plt.title('Started at ' + str(theta) + 'K surface')
# plt.ylabel('PVU')#theta')
# plt.legend()
# plt.ylim(-.25, .25)
#
# pg = plt.gca()
# fmt = DateFormatter('\n%m/%d')
# fmt2 = DateFormatter('%H')
# majorLocator = DayLocator(interval=1)
# minorLocator = HourLocator(range(0, 24, 6))
# pg.xaxis.set_major_formatter(fmt)
# pg.xaxis.set_minor_formatter(fmt2)
# pg.xaxis.set_minor_locator(minorLocator)
# pg.xaxis.set_major_locator(majorLocator)
#plt.savefig('/glusterfs/msc/users_2018/bn826011/NAWDEX/Final/' + folder + '/inflow/' + str(trajs) + '_random_3D-trajectories' + strapp + '.png')
#plt.savefig('IOP3_T42_random_2x2_50_back.png')
plt.savefig('/home/users/bn826011/NAWDEX/From N Drive/2019_figs/dPV/' + folder[:4]
+ '_3D_PV_trajectories_all.png')# + str(var_idxs) + strapp + '.png')
plt.show()
def inflow_time(k = 0, levs = 3, theta_0 = [320, 325, 310, 310], hoursafterinit = [42, 42, 42, 24], folder = 'IOP3/T42', strapp = ''):
xax = []
curve = []
for t in xrange(hoursafterinit[k]/6):
hists = heating_hist(k = k, levs = levs, theta_0 = theta_0, t=t, folder = folder, strapp = strapp)
xax.append((hists[1][:-1]+hists[1][1:])/2)
curve.append(hists[0])
for t in xrange(hoursafterinit[k]/6):
plt.plot(xax[t], curve[t], label = str(t*6), linewidth = 2)
#plt.legend()
plt.legend(title = 'Hours since start of forecast')
plt.xlabel('Delta theta')
plt.title('Difference in theta between time and outflow time')
plt.ylabel('Number of trajectories')
plt.savefig('IOP3_T42_inflow_time.png')
#plt.savefig('/glusterfs/msc/users_2018/bn826011/NAWDEX/Final/' + folder + '/inflow/theta_level_distribution_all_times' + strapp + '.png')
plt.show()
| {"hexsha": "33c1ccfefd57f8693e96e5e20cf48d4c60426045", "size": 19459, "ext": "py", "lang": "Python", "max_stars_repo_path": "wcb_outflow/jakeb_old_code/July1.py", "max_stars_repo_name": "LSaffin/wcb_airmass", "max_stars_repo_head_hexsha": "996a8907fb2eaedb3e9e27e182fca19e5c2db9bd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "wcb_outflow/jakeb_old_code/July1.py", "max_issues_repo_name": "LSaffin/wcb_airmass", "max_issues_repo_head_hexsha": "996a8907fb2eaedb3e9e27e182fca19e5c2db9bd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wcb_outflow/jakeb_old_code/July1.py", "max_forks_repo_name": "LSaffin/wcb_airmass", "max_forks_repo_head_hexsha": "996a8907fb2eaedb3e9e27e182fca19e5c2db9bd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-09-07T12:01:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-16T04:53:08.000Z", "avg_line_length": 45.2534883721, "max_line_length": 317, "alphanum_fraction": 0.5999280539, "include": true, "reason": "import numpy", "num_tokens": 5675} |
/**
* Copyright (C) 2015 Dato, Inc.
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD license. See the LICENSE file for details.
*/
#ifndef GRAPHLAB_SFRAME_SARRAY_FILE_FORMAT_V1_HPP
#define GRAPHLAB_SFRAME_SARRAY_FILE_FORMAT_V1_HPP
#include <string>
#include <memory>
#include <typeinfo>
#include <map>
#include <parallel/mutex.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include <timer/timer.hpp>
#include <logger/logger.hpp>
#include <sframe/sarray_file_format_interface.hpp>
#include <sframe/sarray_index_file.hpp>
#include <fileio/general_fstream.hpp>
#include <fileio/temp_files.hpp>
#include <serialization/serialization_includes.hpp>
#include <sframe/sarray_index_file.hpp>
#include <sframe/sarray_v1_block_manager.hpp>
#include <cppipc/server/cancel_ops.hpp>
namespace graphlab {
// anonymous namespace, local only
namespace {
/**
* The default size of each block in the file
*/
static const size_t DEFAULT_BLOCK_SIZE = 512 * 1024 /*512K*/;
/**
* The maximum number of blocks that can be maintained in a reader's cache
*/
static const size_t MAX_BLOCKS_IN_CACHE = 512;
/**
* The maximum number of prefetch entries for read_segment()
*/
static const size_t MAX_ROWS_IN_CACHE_PER_READ_SEGMENT = 655360;
}
/**
* This class implements the version 1 file format.
* See below for the file format design and specification.
*
* ## version 1 file format design
* Like the version 0 file format, the version 1 file format is designed to not
* so \b simple, but extensible.
*
* An sarray on disk is held as a collection of files with a common prefix.
* There is an index file, followed by a collection of data files.
*
* - Index File: [prefix].sidx
* - Data Files: [prefix].0000 [prefix].0001 [prefix].0002 etc.
*
* ### Index Files
* The index file has a suffix \c .sidx and contains basic information about
* the data files. The format of the index file is the Microsoft INI format
* http://en.wikipedia.org/wiki/INI_file, with the following sections and keys.
* All sections and keys are required
*
* \verbatim
* [sarray]
* ; The version of the file format. Required.
* version=1
* ; Number of segments in the array. Required
* num_segments=16
* ; The C++ mangled typeid name of the contents. Optional.
* content_type=i
* ; The block size in bytes
* block_size=512768
*
* [segment_sizes]
* ;For each segment, the number of elements in the segment
* ;There must be num_segments segments ranging from 0000 to num_segments-1
* 0000=67
* 0001=24
* 0002=57
*
* \endverbatim
*
* ### Data Files
* Data files are numbered with a four numeral (base 10) suffix. Numberings are
* sequential beginning at 0000. Each data file corresponds to one segment.
* Each data file is represented as a collection of blocks.
* Each block begins with a 24 byte header:
* - 8 bytes - number of elements in this block
* - 8 bytes - number of bytes in the block (excluding this header)
* - 8 bytes - reserved for flags.
* The remaining contents of each block is then no more than a sequential
* serialization of objects of type T.
*
* Each data file then has a variable length footer which is
* - all the block headers repeated in sequential order.
* - 8 bytes - integer containing the length of the above structure.
* (i.e. length of the complete footer - 8)
* The size of block on disk can be variable (due to compression).
*/
template <typename T>
class sarray_format_reader_v1: public sarray_format_reader<T> {
public:
/// Default Constructor
inline sarray_format_reader_v1() {}
/**
* Destructor. Also closes the sarray if open.
*/
inline ~sarray_format_reader_v1() {
close();
}
/// deleted copy constructor
sarray_format_reader_v1(const sarray_format_reader_v1& other) = delete;
/// deleted assignment
sarray_format_reader_v1& operator=(const sarray_format_reader_v1& other) = delete;
/**
* Open has to be called before any of the other functions are called.
* Throws a string exception if it is unable to open the index file, or if
* there is a format error in the sarray index file.
*
* Will throw an exception if a file set is already open.
*/
void open(index_file_information index) {
index_file = "";
ASSERT_MSG(!array_open, "sarray already open");
index_info = index;
initialize();
}
/**
* Open has to be called before any of the other functions are called.
* Throws a string exception if it is unable to open the index file, or if
* there is a format error in the sarray index file.
*
* Will throw an exception if a file set is already open.
*/
void open(std::string sidx_file) {
index_file = sidx_file;
ASSERT_MSG(!array_open, "sarray already open");
index_info = read_index_file(index_file);
initialize();
}
/**
* Closes an sarray file set. No-op if the array is already closed.
*/
void close() {
// no-op is already closed
if (!array_open) return;
index_info = index_file_information();
// mark array as closed
array_open = false;
}
/**
* Return the number of segments in the sarray
*/
size_t num_segments() const {
ASSERT_MSG(array_open, "sarray not open");
return index_info.nsegments;
}
/**
* Returns the number of elements in a given segment.
*/
size_t segment_size(size_t segmentid) const {
ASSERT_MSG(array_open, "sarray not open");
ASSERT_LT(segmentid, index_info.nsegments);
return index_info.segment_sizes[segmentid];
}
/**
* Gets the contents of the index file information read from the index file
*/
const index_file_information& get_index_info() const {
return index_info;
}
/**
* Returns the index_file of the array (the argument in \ref open)
*/
std::string get_index_file() const {
return index_file;
}
size_t read_rows(size_t row_start,
size_t row_end,
sframe_rows& out_obj) {
return sarray_format_reader<T>::read_rows(row_start, row_end, out_obj);
}
/**
* Reads a collection of rows, storing the result in out_obj.
* This function is independent of the open_segment/read_segment/close_segment
* functions, and can be called anytime. This function is also fully
* concurrent.
* \param row_start First row to read
* \param row_end one past the last row to read (i.e. EXCLUSIVE). row_end can
o be beyond the end of the array, in which case,
* fewer rows will be read.
* \param out_obj The output array
* \returns Actual number of rows read. Return (size_t)(-1) on failure.
*
* \note This function is currently only optimized for "mostly" sequential
* reads. i.e. we are expecting read_rows(a, b), to be soon followed by
* read_rows(b,c), etc.
*/
size_t read_rows(size_t row_start,
size_t row_end,
std::vector<T>& out_obj) {
out_obj.clear();
cache_rows(row_start, row_end);
while(row_start < row_end) {
size_t cur_rows_read = add_rows_from_block(row_start, row_end, out_obj);
if (cur_rows_read == (size_t)(-1)) break;
row_start += cur_rows_read;
if (cur_rows_read == 0) break;
if(cppipc::must_cancel()) {
throw(std::string("Cancelled by user."));
}
}
return out_obj.size();
}
private:
typedef v1_block_impl::block_reader::block_id block_id;
bool array_open = false; // true if we are currently reading a segment
std::string index_file;
index_file_information index_info;
v1_block_impl::block_reader block_reader;
/*
* Datastructures to manage read_rows
*/
mutex block_cache_lock;
/**
* In memory cache of a block
*/
struct block_cache_data {
mutex lock;
// the block address cached
block_id block_address{0,0};
// The block cache
std::vector<char> buffer;
// known / cached rows to buffer offset addresses
std::map<size_t, size_t> row_to_offset;
double last_access_time = 0.0;
};
std::map<block_id, std::shared_ptr<block_cache_data> > block_cache;
timer current_time;
/**
* Initializes the internal data structures based on the information in the
* index_info
*/
void initialize() {
// open the block reader
block_reader.init(index_info);
// array is now open for reading
array_open = true;
}
/**
* Caches all the blocks which includes the range of rows requested
*/
void cache_rows(size_t row_start, size_t row_end) {
std::vector<block_id> blocks_to_cache;
block_id block_address = block_reader.block_containing_row(row_start);
if (block_address.first == (size_t)(-1)) {
log_and_throw("Unable to read row at " + std::to_string(row_start));
}
while(row_start < row_end) {
block_id block_address = block_reader.block_containing_row(row_start);
if (block_address.first == (size_t)(-1)) return;
block_cache_lock.lock();
size_t in_cache = block_cache.count(block_address);
block_cache_lock.unlock();
// if not in cache, we need to cache it
if (!in_cache) {
blocks_to_cache.push_back(block_address);
}
size_t first_row_in_block = block_reader.first_row_of_block(block_address);
size_t rows_in_block = block_reader.num_elem_in_block(block_address);
size_t last_row_in_block = first_row_in_block + rows_in_block;
size_t last_row_to_read = std::min(row_end, last_row_in_block);
row_start = last_row_to_read;
}
cache_blocks(blocks_to_cache);
}
/**
* Caches a collection of blocks into the cache, and also returns a reference
* to the blocks.
*/
std::vector<std::shared_ptr<block_cache_data> >
cache_blocks(const std::vector<block_id>& blocks) {
std::vector<std::shared_ptr<block_cache_data> > entries(blocks.size());
if (blocks.size() == 0) return entries;
std::vector<char*> buffers;
// prepare the read_blocks call. Allocate the buffers, and the entries
for(auto& entry: entries) {
entry.reset(new block_cache_data);
entry->buffer.resize(index_info.block_size);
buffers.push_back(entry->buffer.data());
}
auto lens = block_reader.read_blocks(blocks, buffers);
DASSERT_EQ(lens.size(), blocks.size());
for (size_t i = 0; i < blocks.size(); ++i) {
if (lens[i] == (size_t)(-1)) {
throw("Failed to fetch block " +
std::to_string(blocks[i].first) + ":" +
std::to_string(blocks[i].second));
} else {
entries[i]->buffer.resize(lens[i]);
}
add_new_entry_to_cache(blocks[i], entries[i]);
}
return entries;
}
/**
* Inserts a new cache entry into the cache
*/
void add_new_entry_to_cache(block_id block_address,
std::shared_ptr<block_cache_data> entry) {
// fill the start and end into the row_to_offset cache
// this helps the look up later by setting left and right
// boundaries.
size_t first_row_in_block = block_reader.first_row_of_block(block_address);
size_t rows_in_block = block_reader.num_elem_in_block(block_address);
entry->row_to_offset[first_row_in_block] = 0;
entry->row_to_offset[first_row_in_block + rows_in_block] = entry->buffer.size();
entry->last_access_time = current_time.current_time();
// acquire lock and stick it into the cache
std::unique_lock<mutex> lock(block_cache_lock);
block_cache[block_address] = entry;
}
/**
* Retrieves a block either from the cache, or from the block reader
*/
std::shared_ptr<block_cache_data> fetch_block(block_id block_address) {
std::unique_lock<mutex> lock(block_cache_lock);
if (block_cache.size() > MAX_BLOCKS_IN_CACHE) {
lock.unlock();
uncache_oldest();
lock.lock();
}
auto iter = block_cache.find(block_address);
if (iter == block_cache.end()) {
// data is not in the cache!
// fetch it from disk. unlock the lock first
lock.unlock();
auto entry = cache_blocks(std::vector<block_id>{block_address})[0];
return entry;
} else {
iter->second->last_access_time = current_time.current_time();
return iter->second;
}
}
void uncache(v1_block_impl::block_reader::block_id block_address) {
std::lock_guard<mutex> lock(block_cache_lock);
block_cache.erase(block_address);
}
void uncache_oldest() {
std::lock_guard<mutex> lock(block_cache_lock);
// look for the oldest block
while(block_cache.size() > MAX_BLOCKS_IN_CACHE) {
auto oldest_iter = block_cache.begin();
auto iter = block_cache.begin();
while(iter != block_cache.end()) {
if (iter->second->last_access_time <
oldest_iter->second->last_access_time) {
oldest_iter = iter;
}
++iter;
}
block_cache.erase(oldest_iter);
}
}
/**
* Internal function. Reads a collection of rows, \b appending the result in
* out_obj, but \b stopping at a block boundary.
* This function is independent of the open_segment/read_segment/close_segment
* functions, and can be called anytime. This function is also fully
* concurrent.
* \param row_start First row to read
* \param row_end one past the last row to read (i.e. EXCLUSIVE). row_end can
* be beyond the end of the array, in which case,
* fewer rows will be read.
* \param out_obj The output array
* \returns Actual number of rows read. Return (size_t)(-1) on failure.
*/
size_t add_rows_from_block(size_t row_start,
size_t row_end,
std::vector<T>& out_obj) {
// find the block
block_id block_address = block_reader.block_containing_row(row_start);
// failure. start row not found
if (block_address.first == (size_t)(-1)) return (size_t)(-1);
// get some basic information about the block that we will need
size_t first_row_in_block = block_reader.first_row_of_block(block_address);
size_t rows_in_block = block_reader.num_elem_in_block(block_address);
size_t last_row_in_block = first_row_in_block + rows_in_block;
// fetch the block
std::shared_ptr<block_cache_data> block = fetch_block(block_address);
std::unique_lock<mutex> guard(block->lock);
auto iter = block->row_to_offset.lower_bound(row_start);
// imposible
ASSERT_TRUE(iter != block->row_to_offset.end());
// lower_bound returns the first element >= to the row_start
// if iter->first != row_start (i.e. greater)
// we jump to the previous known position, so we can seek forward from there.
if (iter->first > row_start) --iter;
// read out the iterator
size_t currow = iter->first;
size_t curoffset = iter->second;
// we can release the lock here. We are done with the lookup
guard.unlock();
iarchive iarc(block->buffer.data() + curoffset,
block->buffer.size() - curoffset);
// read all the rows we do not care about.
T temp;
while(currow < row_start) {
iarc >> temp;
++currow;
}
size_t last_row_to_read = std::min(row_end, last_row_in_block);
size_t rows_read = last_row_to_read - currow;
while(currow < last_row_to_read) {
iarc >> temp;
out_obj.push_back(std::move(temp));
++currow;
}
if (row_end < last_row_in_block) {
// add a cache of the position
guard.lock();
block->row_to_offset[row_end] = curoffset + iarc.off;
} else {
// block has been completely read. Uncache it
uncache(block_address);
}
return rows_read;
}
};
/**
* The format v1 writer. See the format reader for details on the file format.
*/
template <typename T>
class sarray_format_writer_v1 {
public:
/// Default constructor
sarray_format_writer_v1():array_open(false) { }
/**
* Destructor. Also closes the sarray if open.
*/
~sarray_format_writer_v1() {
close();
}
/// deleted copy constructor
sarray_format_writer_v1(const sarray_format_writer_v1& other) = delete;
/// deleted assignment
sarray_format_writer_v1& operator=(const sarray_format_writer_v1& other) = delete;
/**
* Open has to be called before any of the other functions are called.
* Throws a string exception if it is unable to open the file set, or
* if the file set already exists. Will also throw an exception if a file
* set is already open. It will be created with a block size of
* DEFAULT_BLOCK_SIZE.
*
* \param sidx_file The sarray index file to write
* \param segments_to_create The number of segments the sarray is split into
*/
void open(std::string sidx_file,
size_t segments_to_create) {
open(sidx_file, segments_to_create, DEFAULT_BLOCK_SIZE);
}
/**
* Open has to be called before any of the other functions are called.
* Throws a string exception if it is unable to open the file set, or
* if the file set already exists. Will also throw an exception if a file
* set is already open.
*
* \param sidx_file The sarray index file to write
* \param segments_to_create The number of segments the sarray is split into
* \param block_size The size of each block within the segment. Note that this
* only affects the pre-compression size (i.e. it tries to size the serialized
* block this block_size. But after writing to disk, the size may change).
*/
void open(std::string sidx_file,
size_t segments_to_create,
size_t block_size) {
ASSERT_MSG(!array_open, "sarray already open");
ASSERT_MSG(boost::algorithm::ends_with(sidx_file, ".sidx"),
"Index file must end with .sidx");
array_open = true;
index_file = sidx_file;
index_info = index_file_information();
index_info.version = 1;
index_info.nsegments = segments_to_create;
index_info.segment_sizes.resize(index_info.nsegments, 0);
index_info.segment_files.resize(index_info.nsegments);
index_info.content_type = typeid(T).name();
index_info.block_size = block_size;
segment_data.resize(index_info.nsegments);
last_block_size.resize(index_info.nsegments);
writer.set_num_segments(index_info.nsegments);
}
/**
* Closes an sarray file set. No-op if the array is already closed.
* Also commits the index file.
*/
void close() {
// no-op is already closed
if (!array_open) return;
// close all writers
for (size_t i = 0;i < segment_data.size(); ++i) {
close_segment(i);
}
write_index_file();
// done!
// clear all variables
array_open = false;
index_file = "";
index_info = index_file_information();
segment_data.clear();
}
/**
* Returns the number of parallel output segments
* Throws an exception if the array is not open.
*/
size_t num_segments() const {
ASSERT_MSG(array_open, "sarray not open");
return index_info.nsegments;
}
/**
* Returns the size of each block inside the SArray
*/
size_t block_size() const {
return index_info.block_size;
}
/**
* Returns the number of elements written to a segment so far.
* should throw an exception if the segment ID does not exist,
*/
virtual size_t segment_size(size_t segmentid) const {
return index_info.segment_sizes[segmentid];
}
/**
* Makes a particular segment writable with \ref write_segment.
* Should throw an exception if the segment is already open, or if
* the segment ID does not exist. Each segment should only be open once.
*/
void open_segment(size_t segmentid) {
log_func_entry();
ASSERT_MSG(array_open, "sarray not open");
ASSERT_LT(segmentid, index_info.nsegments);
ASSERT_MSG(!segment_data[segmentid], "Segment already open");
std::string filename;
// put it in the same location as the index file
// generate a prefix for the file. if segmentid is 1, this generates 0001
// if segmentid is 2 this generates 0002, etc.
std::stringstream strm;
strm << index_file.substr(0, index_file.length() - 5) << ".";
strm.fill('0'); strm.width(4);
strm << segmentid;
filename = strm.str();
logstream(LOG_DEBUG) << "Open segment " << segmentid
<< " for write on " << filename << std::endl;
writer.open_segment(segmentid, filename);
// update the index information
index_info.segment_files[segmentid] = filename;
// set up the in memory buffer
segment_data[segmentid].reset(new oarchive);
segment_data[segmentid]->expand_buf(index_info.block_size);
}
/**
* Writes an object to the segment.
* Should throw an exception if the segment is not opened with open_segment.
*/
void write_segment(size_t segmentid, const T& t) {
DASSERT_MSG(array_open, "sarray not open");
DASSERT_LT(segmentid, index_info.nsegments);
DASSERT_MSG(segment_data[segmentid], "Segment not open");
size_t prevlen = segment_data[segmentid]->off;
(*segment_data[segmentid]) << t;
// have we exceeded the block size?
if (segment_data[segmentid]->off > index_info.block_size) {
// yup!, revert.
segment_data[segmentid]->off = prevlen;
// flush_block clears the last_block_size[segmentid]
flush_block(segmentid);
(*segment_data[segmentid]) << t;
}
++last_block_size[segmentid];
}
void write_segment(size_t segmentid, T&& t) {
DASSERT_MSG(array_open, "sarray not open");
DASSERT_LT(segmentid, index_info.nsegments);
DASSERT_MSG(segment_data[segmentid], "Segment not open");
size_t prevlen = segment_data[segmentid]->off;
(*segment_data[segmentid]) << t;
// have we exceeded the block size?
if (segment_data[segmentid]->off > index_info.block_size) {
// yup!, revert.
segment_data[segmentid]->off = prevlen;
// flush_block clears the last_block_size[segmentid]
flush_block(segmentid);
(*segment_data[segmentid]) << t;
}
++last_block_size[segmentid];
}
/** Closes a segment.
* After a segment is closed, writing to the segment will throw an error.
*/
void close_segment(size_t segmentid) {
if (segmentid < index_info.nsegments) {
// if there is an output file on the segment, close it
if (segment_data[segmentid]) {
flush_block(segmentid);
writer.close_segment(segmentid);
free(segment_data[segmentid]->buf);
segment_data[segmentid].reset();
}
}
}
/**
* Returns the index file of the sarray files are living on.
* \see get_file_names()
*/
std::string get_index_file() const {
return index_file;
}
index_file_information& get_index_info() {
return index_info;
}
/**
* Flushes the index_file_information to disk
*/
virtual void write_index_file() {
graphlab::write_index_file(index_file, index_info);
}
private:
bool array_open = false;
std::string index_file;
index_file_information index_info;
// The serialization write buffers for each open segment
// Stores in memory, the last block that has not been written.
// When the block has been written, the archive is cleared.
std::vector<std::unique_ptr<oarchive> > segment_data;
// The number of elements written to the segment_data that has not been
// flushed to disk
std::vector<size_t> last_block_size;
v1_block_impl::block_writer writer;
/**
* Flushes the current contents of segment_data[segment_id] to disk
* as a block.
*/
void flush_block(size_t segmentid) {
// if there is no data to write, skip
if (last_block_size[segmentid] == 0) return;
writer.write_block(segmentid,
segment_data[segmentid]->buf,
segment_data[segmentid]->off,
last_block_size[segmentid],
v1_block_impl::LZ4_COMPRESSION /* flags */);
// Reset all buffers so that the next block can be started
// increment the data counter
index_info.segment_sizes[segmentid] += last_block_size[segmentid];
// reset the serialization buffer
segment_data[segmentid]->off = 0;
// reset the data counter
last_block_size[segmentid] = 0;
}
};
} // namespace graphlab
#endif
| {"hexsha": "7fed8b278d0758412f70f7a585f3054c490ee11a", "size": 24257, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "oss_src/sframe/sarray_file_format_v1.hpp", "max_stars_repo_name": "parquette/ParFrame", "max_stars_repo_head_hexsha": "0522aa6afdf529b3e91505b70e918f1500aae886", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "oss_src/sframe/sarray_file_format_v1.hpp", "max_issues_repo_name": "parquette/ParFrame", "max_issues_repo_head_hexsha": "0522aa6afdf529b3e91505b70e918f1500aae886", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "oss_src/sframe/sarray_file_format_v1.hpp", "max_forks_repo_name": "parquette/ParFrame", "max_forks_repo_head_hexsha": "0522aa6afdf529b3e91505b70e918f1500aae886", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6902777778, "max_line_length": 84, "alphanum_fraction": 0.6804221462, "num_tokens": 5969} |
C Copyright(C) 1999-2020 National Technology & Engineering Solutions
C of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
C NTESS, the U.S. Government retains certain rights in this software.
C
C See packages/seacas/LICENSE for details
C=======================================================================
subroutine mapev(nold, nnew, nvar, map, vars, scr)
C=======================================================================
C --*** MAPVAR *** (GREPOS) Map values from old array to new.
C --
C --MAPVAR reorders the data in VARS based on the map in MAP
C --
C --Parameters:
C -- NOLD - IN - number of old values,
C -- NNEW - IN - number of new values
C -- NVAR - IN - the number of variables
C -- NVARDM - IN - the row dimension of VARNP
C -- MAP - IN - map from new value to old MAP(NEW) = OLD
C size is 'nnew'
C -- VARS - IN/OUT - the values. On input in old order,
C on output in new order
C -- SCR - TMP - temporary storage area
integer map(*)
real vars(*)
c real vars(nold, nvar)
real scr(*)
C ... VARS is treated as a doubly-dimensioned array (NOLD, NVAR),
C on input and (NNEW, NVAR) on output.
C The dimensions need to be in this order so we can read them
C in using exgev and exgnv. Note that dbist2 ensures that there
C are values for all elements for each variable even if the
C variable does not exist for a particular block. This makes it
C easier to transfer variables from old to new. We don't need to
C worry about the truth table in this routine; just transfer
C all variables from old to new and the output takes care
C of truth table handling. This is a little extra work
C done in this routine, but permits modifying truth table or
C combining element blocks with different truth tables.
do 30 ivar = 1, nvar
do 10 i = 1, nnew
scr(i) = vars(map(i) + nold * (ivar-1))
10 continue
do 20 i = 1, nnew
vars(i + nnew * (ivar-1)) = scr(i)
20 continue
30 continue
return
end
| {"hexsha": "af74de7fc0729321ada721e6c4fa871b38c01976", "size": 2145, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/seacas/applications/grepos/gp_mapev.f", "max_stars_repo_name": "jschueller/seacas", "max_stars_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_stars_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_stars_count": 82, "max_stars_repo_stars_event_min_datetime": "2016-02-04T18:38:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T03:01:49.000Z", "max_issues_repo_path": "packages/seacas/applications/grepos/gp_mapev.f", "max_issues_repo_name": "jschueller/seacas", "max_issues_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_issues_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_issues_count": 206, "max_issues_repo_issues_event_min_datetime": "2015-11-20T01:57:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:12:04.000Z", "max_forks_repo_path": "packages/seacas/applications/grepos/gp_mapev.f", "max_forks_repo_name": "jschueller/seacas", "max_forks_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_forks_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_forks_count": 68, "max_forks_repo_forks_event_min_datetime": "2016-01-13T22:46:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:25:05.000Z", "avg_line_length": 39.0, "max_line_length": 72, "alphanum_fraction": 0.5981351981, "num_tokens": 578} |
__author__ = 'Tadas'
import cv2
import numpy as np
import glob
# where to find the results.mat:
yt_dir = r"C:\Users\Tadas\Dropbox\AAM\test data\ytceleb_annotations_CVPR2014"
vids = ["0035_02_003_adam_sandler",
"0042_02_010_adam_sandler",
"0292_02_002_angelina_jolie",
"0293_02_003_angelina_jolie",
"0294_02_004_angelina_jolie",
"0502_01_005_bruce_willis",
"0504_01_007_bruce_willis",
"1198_01_012_julia_roberts",
"1786_02_006_sylvester_stallone"]
for vid in vids:
img_dir = yt_dir + "/" + vid + "/"
yt_imgs = glob.glob(img_dir + '*.png')
fourcc = cv2.cv.CV_FOURCC('D','I','V','X')
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(yt_dir + "/" + vid + ".avi", -1, 30.0, (320,240))
for img in yt_imgs:
img = cv2.imread(img)
cv2.imshow("test", img)
cv2.waitKey(10)
out.write(img)
out.release() | {"hexsha": "1906fb09028ba1ebb5abec4939c4161fc1889dda", "size": 938, "ext": "py", "lang": "Python", "max_stars_repo_path": "CLM-framework/matlab_runners/yt_prep/extract_vids.py", "max_stars_repo_name": "OAkyildiz/falcon549A", "max_stars_repo_head_hexsha": "69d91a1c3729e537ce606ecda87be70f566dc536", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-04T18:34:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T18:34:33.000Z", "max_issues_repo_path": "CLM-framework/matlab_runners/yt_prep/extract_vids.py", "max_issues_repo_name": "OAkyildiz/falcon549A", "max_issues_repo_head_hexsha": "69d91a1c3729e537ce606ecda87be70f566dc536", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CLM-framework/matlab_runners/yt_prep/extract_vids.py", "max_forks_repo_name": "OAkyildiz/falcon549A", "max_forks_repo_head_hexsha": "69d91a1c3729e537ce606ecda87be70f566dc536", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-08-15T12:53:44.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-18T08:33:13.000Z", "avg_line_length": 26.0555555556, "max_line_length": 77, "alphanum_fraction": 0.6353944563, "include": true, "reason": "import numpy", "num_tokens": 321} |
from pylearn2.train_extensions import TrainExtension
from pylearn2.datasets.preprocessing import CentralWindow
from pylearn2.utils.rng import make_np_rng
from skimage.transform import AffineTransform, warp, resize
import skimage
import numpy as np
from pylearn2.datasets import preprocessing
import random
import math
class RealtimeAugment(TrainExtension):
def __init__(self, window_shape, center_shape=None, central_window_shape=None, randomize=None, randomize_once=None, center=None, rotate=True,
scale_diff=0.0, rng=(2013, 0o2, 20), shear=0.0, translation=0.0, preprocess=None):
self._window_shape = window_shape
self._center_shape = center_shape
self._central_window_shape = central_window_shape
self._randomize = randomize if randomize else []
self._randomize_once = randomize_once if randomize_once else []
self._center = center if center else []
self._rotate = rotate
self._scale_diff = scale_diff
self._shear = shear
self._translation = translation
self._preprocess = preprocess
self._rng = make_np_rng(rng, which_method="random_integers")
def setup(self, model, dataset, algorithm):
if self._center_shape is not None:
preprocessor = CentralWindow(self._center_shape)
for data in self._center:
preprocessor.apply(data)
randomize_now = self._randomize + self._randomize_once
self._original = dict((data,
data.get_topological_view()) for data in randomize_now)
self.randomize_datasets(randomize_now)
def randomize_datasets(self, datasets):
center_shift = np.array(self._window_shape) / 2. -0.5
tform_center = skimage.transform.SimilarityTransform(translation=-center_shift)
tform_uncenter = skimage.transform.SimilarityTransform(translation=center_shift)
if self._preprocess is not None:
pipeline = preprocessing.Pipeline()
#window the rotations to get rid of the uniform background
if self._central_window_shape is not None:
print('adding window')
pipeline.items.append(CentralWindow(self._central_window_shape))
for item in self._preprocess:
pipeline.items.append(item)
im_shape = (self._window_shape[0], self._window_shape[1], 1)
for d_idx, dataset in enumerate(datasets):
data = self._original[dataset]
#randomly window data
print(data.shape)
arr = np.empty((data.shape[0], self._window_shape[0], self._window_shape[1], data.shape[3]), dtype=np.float32)
for idx, example in enumerate(data):
scale_x = np.random.uniform(1 - self._scale_diff, 1 + self._scale_diff)
scale_y = np.random.uniform(1 - self._scale_diff, 1 + self._scale_diff)
translation_x = np.random.uniform(1 - self._translation, 1 + self._translation)
translation_y = np.random.uniform(1 - self._translation, 1 + self._translation)
shear = np.random.uniform(0. - self._shear, 0. + self._shear)
rotation = np.random.uniform(0, 360)
tform = AffineTransform(scale=(scale_x, scale_y), rotation=np.deg2rad(rotation),
translation=(translation_x, translation_y), shear=shear)
tform = tform_center + tform + tform_uncenter
img = warp(example, tform, output_shape=self._window_shape)
arr[idx] = img
dataset.set_topological_view(arr, axes=dataset.view_converter.axes)
#assumes self._randomize in in order of [train, valid/test]
if self._preprocess is not None:
can_fit = True
if d_idx == 1:
can_fit = False
dataset.apply_preprocessor(preprocessor=pipeline, can_fit=can_fit)
def on_monitor(self, model, dataset, algorithm):
model = None
dataset = None
algorithm = None
self.randomize_datasets(self._randomize)
| {"hexsha": "f020436143fdca6485b2bea4e9290c1d07ec68f1", "size": 4298, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/external/repositories_2to3/132209/datasciencebowl-master/realtime_augment.py", "max_stars_repo_name": "Keesiu/meta-kaggle", "max_stars_repo_head_hexsha": "87de739aba2399fd31072ee81b391f9b7a63f540", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/external/repositories_2to3/132209/datasciencebowl-master/realtime_augment.py", "max_issues_repo_name": "Keesiu/meta-kaggle", "max_issues_repo_head_hexsha": "87de739aba2399fd31072ee81b391f9b7a63f540", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/external/repositories_2to3/132209/datasciencebowl-master/realtime_augment.py", "max_forks_repo_name": "Keesiu/meta-kaggle", "max_forks_repo_head_hexsha": "87de739aba2399fd31072ee81b391f9b7a63f540", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-12-04T08:23:33.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-04T08:23:33.000Z", "avg_line_length": 47.2307692308, "max_line_length": 146, "alphanum_fraction": 0.630525826, "include": true, "reason": "import numpy", "num_tokens": 924} |
# This test code was written by the `hypothesis.extra.ghostwriter` module
# and is provided under the Creative Commons Zero public domain dedication.
import numpy as np
import pytest
import torch
from hypothesis import assume, given, settings
from hypothesis import strategies as st
from hypothesis.extra.numpy import arrays
import privacyraven.extraction.synthesis
import privacyraven.utils.query
from libs.PrivacyRaven.src.privacyraven.models.victim import train_four_layer_mnist_victim
from libs.PrivacyRaven.src.privacyraven.utils import model_creation
# Establish strategies
device = torch.device("cpu")
model = train_four_layer_mnist_victim(gpus=torch.cuda.device_count())
def query_mnist(input_data):
return privacyraven.utils.query.get_target(model, input_data, (1, 28, 28, 1))
def valid_query():
return st.just(query_mnist)
def valid_data():
return arrays(np.float64, (10, 28, 28, 1), st.floats())
@given(query_func=valid_query(), input_size=st.just((1, 28, 28, 1)))
def test_fuzz_establish_query(query_func, input_size):
x = privacyraven.utils.query.establish_query(
query_func=query_func, input_size=input_size
)
assert callable(x) is True
@settings(deadline=None)
@given(
model=st.just(model), input_data=valid_data(), input_size=st.just((1, 28, 28, 1))
)
def test_fuzz_get_target(model, input_data, input_size):
input_data = torch.from_numpy(input_data)
target = privacyraven.utils.query.get_target(
model=model, input_data=input_data, input_size=input_size
)
assert torch.argmax(target) >= 0
assert torch.argmax(target) < 10
@settings(deadline=None)
@given(
input_data=valid_data(),
input_size=st.just((1, 28, 28, 1)),
single=st.just(False),
warning=st.just(False),
)
def test_fuzz_reshape_input(input_data, input_size, single, warning):
x = privacyraven.utils.query.reshape_input(
input_data=input_data, input_size=input_size, single=single, warning=warning
)
# assert x.size() == torch.Size([1, 28, 28, 1])
| {"hexsha": "b5460e1d4d85203f2d6270592e18087bd5355a76", "size": 2040, "ext": "py", "lang": "Python", "max_stars_repo_path": "libs/PrivacyRaven/tests/test_utils_query.py", "max_stars_repo_name": "paragrapharamus/msdp", "max_stars_repo_head_hexsha": "7b91d7a7f1ccf8e3bcd21a0c2a5b55746b2d5cb6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libs/PrivacyRaven/tests/test_utils_query.py", "max_issues_repo_name": "paragrapharamus/msdp", "max_issues_repo_head_hexsha": "7b91d7a7f1ccf8e3bcd21a0c2a5b55746b2d5cb6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/PrivacyRaven/tests/test_utils_query.py", "max_forks_repo_name": "paragrapharamus/msdp", "max_forks_repo_head_hexsha": "7b91d7a7f1ccf8e3bcd21a0c2a5b55746b2d5cb6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0, "max_line_length": 90, "alphanum_fraction": 0.7495098039, "include": true, "reason": "import numpy", "num_tokens": 523} |
import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
def make_upconv_net(in_channels, in_h, upconv_specs):
upconv_list = nn.ModuleList()
kernel_sizes = upconv_specs['kernel_sizes']
num_channels = upconv_specs['num_channels']
strides = upconv_specs['strides']
paddings = upconv_specs['paddings']
output_paddings = upconv_specs['output_paddings']
use_bn = upconv_specs['use_bn']
in_ch = in_channels
for k, ch, s, p, op in zip(kernel_sizes, num_channels, strides, paddings, output_paddings):
seq = [nn.ConvTranspose2d(in_ch, ch, k, stride=s, padding=p, output_padding=op, bias=not use_bn)]
if use_bn: seq.append(nn.BatchNorm2d(ch))
seq.append(nn.ReLU())
# seq.append(nn.Conv2d(ch, ch, 3, stride=1, padding=1, bias=not use_bn))
# if use_bn: seq.append(nn.BatchNorm2d(ch))
# seq.append(nn.ReLU())
upconv_list.extend(seq)
in_ch = ch
in_h = (in_h - 1)*s - 2*p + k + op
print('--> %dx%dx%d' % (in_ch, in_h, in_h))
upconv_seq = nn.Sequential(*upconv_list)
return upconv_seq, in_ch, in_h
def make_conv_net(in_channels, in_h, conv_specs):
kernel_sizes = conv_specs['kernel_sizes']
num_channels = conv_specs['num_channels']
strides = conv_specs['strides']
paddings = conv_specs['paddings']
use_bn = conv_specs['use_bn']
in_ch = in_channels
conv_list = nn.ModuleList()
for k, ch, s, p in zip(kernel_sizes, num_channels, strides, paddings):
seq = [nn.Conv2d(in_ch, ch, k, stride=s, padding=p, bias=not use_bn)]
if use_bn: seq.append(nn.BatchNorm2d(ch))
seq.append(nn.ReLU())
conv_list.extend(seq)
in_ch = ch
in_h = int(math.floor(
1 + (in_h + 2*p - k)/s
))
print('--> %dx%dx%d' % (in_ch, in_h, in_h))
conv_seq = nn.Sequential(*conv_list)
return conv_seq, in_ch, in_h
def make_fc_net(in_size, fc_specs):
fc_hidden_sizes = fc_specs['hidden_sizes']
use_bn = fc_specs['use_bn']
fc_list = nn.ModuleList()
for out_size in fc_hidden_sizes:
seq = [nn.Linear(in_size, out_size, bias=not use_bn)]
if use_bn: seq.append(nn.BatchNorm1d(out_size))
seq.append(nn.ReLU())
fc_list.extend(seq)
in_size = out_size
fc_seq = nn.Sequential(*fc_list)
return fc_seq, out_size
| {"hexsha": "dbaa2e4afcec1663746b07e9f0de657e75a541c4", "size": 2440, "ext": "py", "lang": "Python", "max_stars_repo_path": "gen_models/__init__.py", "max_stars_repo_name": "yifan-you-37/rl_swiss", "max_stars_repo_head_hexsha": "8b0ee7caa5c1fa93860916004cf4fd970667764f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 56, "max_stars_repo_stars_event_min_datetime": "2019-10-20T03:09:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T09:21:40.000Z", "max_issues_repo_path": "gen_models/__init__.py", "max_issues_repo_name": "yifan-you-37/rl_swiss", "max_issues_repo_head_hexsha": "8b0ee7caa5c1fa93860916004cf4fd970667764f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-10-01T07:33:51.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-12T03:40:57.000Z", "max_forks_repo_path": "gen_models/__init__.py", "max_forks_repo_name": "yifan-you-37/rl_swiss", "max_forks_repo_head_hexsha": "8b0ee7caa5c1fa93860916004cf4fd970667764f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-11-04T16:56:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T09:21:41.000Z", "avg_line_length": 31.6883116883, "max_line_length": 105, "alphanum_fraction": 0.6483606557, "include": true, "reason": "import numpy", "num_tokens": 685} |
__author__ = "Md. Ahsan Ayub"
__license__ = "GPL"
__credits__ = ["Ayub, Md. Ahsan", "Johnson, Will",
"Siraj, Ambareen"]
__maintainer__ = "Md. Ahsan Ayub"
__email__ = "mayub42@students.tntech.edu"
__status__ = "Prototype"
# Modular function to apply decision tree classifier
def DT_classifier(X, Y, numFold):
# Intilization of the figure
myFig = plt.figure(figsize=[12,10])
# Stratified K-Folds cross-validator
cv = StratifiedKFold(n_splits=numFold,random_state=None, shuffle=False)
# Initialization of the decision tree classifier
classifier = tree.DecisionTreeClassifier()
acc_scores = []
precision_scores = []
recall_scores = []
f1_scores = []
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 1
for train, test in cv.split(X, Y):
# Spliting the dataset
X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
# Fitting the classifier into training set
classifier = classifier.fit(X_train, Y_train)
# Breakdown of statistical measure based on classes
Y_pred = classifier.predict(X_test)
print(classification_report(Y_test, Y_pred, digits=4))
# Compute the model's performance
acc_scores.append(accuracy_score(Y_test, Y_pred))
if(len(np.unique(Y)) > 2):
f1_scores_temp = []
f1_scores_temp.append(f1_score(Y_test, Y_pred, average=None))
f1_scores.append(np.mean(f1_scores_temp))
del f1_scores_temp
precision_scores_temp = []
precision_scores_temp.append(precision_score(Y_test, Y_pred, average=None))
precision_scores.append(np.mean(precision_scores_temp))
del precision_scores_temp
recall_scores_temp = []
recall_scores_temp.append(recall_score(Y_test, Y_pred, average=None))
recall_scores.append(np.mean(recall_scores_temp))
del recall_scores_temp
else:
f1_scores.append(f1_score(Y_test, Y_pred, average='binary'))
precision_scores.append(precision_score(Y_test, Y_pred, average='binary'))
recall_scores.append(recall_score(Y_test, Y_pred, average='binary'))
if(len(np.unique(Y)) == 2):
probas_ = classifier.predict_proba(X_test)
fpr, tpr, thresholds = roc_curve(Y_test, probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, color='black', alpha=0.5,
label='ROC fold %d (AUC = %0.3f)' % (i, roc_auc))
print("Iteration ongoing inside DT method - KFold step: ", i)
i += 1
if(len(np.unique(Y)) == 2):
plt.plot([0,1],[0,1],linestyle = '--',lw = 1, alpha=0.5, color = 'black')
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color='black',
label=r'Mean ROC (AUC = %0.3f)' % (mean_auc),
lw=2, alpha=0.8)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate', fontsize=18, weight='bold')
plt.ylabel('True Positive Rate', fontsize=18, weight='bold')
plt.title('Receiver Operating Characteristic (ROC) Curve\nDecision Tree', fontsize=20, fontweight='bold')
plt.legend(loc="lower right",fontsize=14)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.show()
fileName = 'Decision_Tree_ROC_' + str(numFold) + '_Fold.eps'
# Saving the figure
myFig.savefig(fileName, format='eps', dpi=1200)
# Statistical measurement of the model
print("Accuracy: ", np.mean(acc_scores))
print("Precision: ", np.mean(precision_scores))
print("Recall: ", np.mean(recall_scores))
print("F1: ", np.mean(f1_scores))
if(len(np.unique(Y)) == 2):
print(acc_scores)
print(precision_scores)
print(recall_scores)
print(f1_scores)
# Modular function to apply artificial neural network
def ANN_classifier(X, Y, batchSize, epochCount):
myFig = plt.figure(figsize=[12,10])
# Spliting the dataset into the Training and Test Set
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 42, stratify=Y)
# Initializing the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(output_dim = round(X.shape[1]/2), init = 'uniform', activation = 'relu', input_dim = X.shape[1]))
# Adding the second hidden layer
classifier.add(Dense(output_dim = round(X.shape[1]/2), init = 'uniform', activation = 'relu'))
# Add a dropout layer
#classifier.add(Dropout(0.4))
if(len(np.unique(Y)) > 2): # Multi-classification task
# Adding the output layer
classifier.add(Dense(output_dim = len(np.unique(Y)), init = 'uniform', activation = 'softmax'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
else: # Binary classification task
# Adding the output layer
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Callback to stop if validation loss does not decrease
callbacks = [EarlyStopping(monitor='val_loss', patience=2)]
# Fitting the ANN to the Training set
history = classifier.fit(X_train,
Y_train,
callbacks=callbacks,
validation_split=0.1,
batch_size = batchSize,
epochs = epochCount,
shuffle=True)
print(history.history)
print(classifier.summary())
# ------ Evaluation -------
print("Artificial Neural Network")
# Predicting the Test set results
Y_pred = classifier.predict_classes(X_test)
Y_pred = (Y_pred > 0.5)
# Breakdown of statistical measure based on classes
print(classification_report(Y_test, Y_pred, digits=4))
# Compute the model's performance
# Making the cufusion Matrix
cm = confusion_matrix(Y_test, Y_pred)
print("Confusion Matrix:\n", cm)
print("Accuracy: ", accuracy_score(Y_test, Y_pred))
if(len(np.unique(Y))) == 2:
print("F1: ", f1_score(Y_test, Y_pred, average='binary'))
print("Precison: ", precision_score(Y_test, Y_pred, average='binary'))
print("Recall: ", recall_score(Y_test, Y_pred, average='binary'))
else:
f1_scores = f1_score(Y_test, Y_pred, average=None)
print("F1: ", np.mean(f1_scores))
print("F1: ", np.mean(f1_scores))
precision_scores = precision_score(Y_test, Y_pred, average=None)
print("Precison: ", np.mean(precision_scores))
recall_scores = recall_score(Y_test, Y_pred, average=None)
print("Recall: ", np.mean(recall_scores))
# ------------ Print Accuracy over Epoch --------------------
plt.plot(history.history['accuracy'], linestyle = ':',lw = 2, alpha=0.8, color = 'black')
plt.plot(history.history['val_accuracy'], linestyle = '--',lw = 2, alpha=0.8, color = 'black')
plt.title('Accuracy over Epoch\nArtificial Neural Network', fontsize=20, weight='bold')
plt.ylabel('Accuracy', fontsize=18, weight='bold')
plt.xlabel('Epoch', fontsize=18, weight='bold')
plt.legend(['Train', 'Validation'], loc='lower right', fontsize=14)
plt.xticks(ticks=range(0, len(history.history['accuracy'])))
plt.yticks(fontsize=16)
plt.show()
if(len(np.unique(Y))) == 2:
fileName = 'ANN_Accuracy_over_Epoch_Binary_Classification.eps'
else:
fileName = 'ANN_Accuracy_over_Epoch_Multiclass_Classification.eps'
# Saving the figure
myFig.savefig(fileName, format='eps', dpi=1200)
# ------------ Print Loss over Epoch --------------------
# Clear figure
plt.clf()
myFig = plt.figure(figsize=[12,10])
plt.plot(history.history['loss'], linestyle = ':',lw = 2, alpha=0.8, color = 'black')
plt.plot(history.history['val_loss'], linestyle = '--',lw = 2, alpha=0.8, color = 'black')
plt.title('Loss over Epoch\nArtificial Neural Network', fontsize=20, weight='bold')
plt.ylabel('Loss', fontsize=18, weight='bold')
plt.xlabel('Epoch', fontsize=18, weight='bold')
plt.legend(['Train', 'Validation'], loc='upper right', fontsize=14)
plt.xticks(ticks=range(0, len(history.history['loss'])))
plt.yticks(fontsize=16)
plt.show()
if(len(np.unique(Y))) == 2:
fileName = 'ANN_Loss_over_Epoch_Binary_Classification.eps'
else:
fileName = 'ANN_Loss_over_Epoch_Multiclass_Classification.eps'
# Saving the figure
myFig.savefig(fileName, format='eps', dpi=1200)
# ------------ ROC Curve --------------------
# Clear figure
plt.clf()
myFig = plt.figure(figsize=[12,10])
if len(np.unique(Y)) == 2:
fpr, tpr, _ = roc_curve(Y_test, Y_pred)
plt.plot(fpr, tpr, color='black',
label=r'ROC (AUC = %0.3f)' % (auc(fpr, tpr)),
lw=2, alpha=0.8)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate', fontsize=18, weight='bold')
plt.ylabel('True Positive Rate', fontsize=18, weight='bold')
plt.title('Receiver Operating Characteristic (ROC) Curve\nArtificial Neural Network', fontsize=20, fontweight='bold')
plt.legend(loc="lower right",fontsize=14)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.show()
fileName = 'ANN_Binary_Classification_ROC.eps'
# Saving the figure
myFig.savefig(fileName, format='eps', dpi=1200)
# Importing the libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Libraries relevant to performance metrics
from sklearn.metrics import roc_curve, auc, confusion_matrix, classification_report, accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import StratifiedKFold
from scipy import interp
from sklearn.preprocessing import MinMaxScaler
# Libraries relevant to supervised learning
from sklearn import tree
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Input
from keras.callbacks import EarlyStopping
#importing the data set
from scipy.io import arff
data = arff.loadarff('../TRAbID/probe_known_attacks.arff')
dataset = pd.DataFrame(data[0])
print(dataset.head())
# Some manual processing on the dataframe
dataset = dataset.dropna()
dataset = dataset.drop(['Flow_ID', '_Source_IP', '_Destination_IP', '_Timestamp'], axis = 1)
dataset['Flow_Bytes/s'] = dataset['Flow_Bytes/s'].astype(float)
dataset['_Flow_Packets/s'] = dataset['_Flow_Packets/s'].astype(float)
dataset_sample = dataset
#dataset_sample = dataset_sample.truncate(before=0, after=0)
#dataset_sample.loc[dataset.index[1]] = dataset.iloc[5475]
max_count = 1501
iBenign = 20000
iDDoS = 0
iBot = 0
iDoS_slowloris = 0
iDoS_Slowhttptest = 0
iDoS_Hulk = 0
iDoS_GoldenEye = 0
iHeartbleed = 0
iFTP_Patator = 0
iSSH_Patator = 0
iWeb_Attack_BF = 0
iWeb_Attack_XSS = 0
iWeb_Attack_SQL = 0
iInfiltration = 0
iIndex = 0
for i in range(0,948995):
try:
if(dataset.iloc[[i]]['Label'].values == 'BENIGN'):
iBenign = iBenign - 1
if(iBenign >= 0):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'DDoS'):
iDDoS = iDDoS + 1
if(iDDoS < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'Bot'):
iBot = iBot + 1
if(iBot < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'DoS slowloris'):
iDoS_slowloris = iDoS_slowloris + 1
if(iDoS_slowloris < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'DoS Slowhttptest'):
iDoS_Slowhttptest = iDoS_Slowhttptest + 1
if(iDoS_Slowhttptest < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'DoS Hulk'):
iDoS_Hulk = iDoS_Hulk + 1
if(iDoS_Hulk < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'DoS GoldenEye'):
iDoS_GoldenEye = iDoS_GoldenEye + 1
if(iDoS_GoldenEye < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'Heartbleed'):
iHeartbleed = iHeartbleed + 1
if(iHeartbleed < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'FTP-Patator'):
iFTP_Patator = iFTP_Patator + 1
if(iFTP_Patator < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'SSH-Patator'):
iSSH_Patator = iSSH_Patator + 1
if(iSSH_Patator < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'Web Attack ñ Brute Force'):
iWeb_Attack_BF = iWeb_Attack_BF + 1
if(iWeb_Attack_BF < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'Web Attack ñ XSS'):
iWeb_Attack_XSS = iWeb_Attack_XSS + 1
if(iWeb_Attack_XSS < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'Web Attack ñ Sql Injection'):
iWeb_Attack_SQL = iWeb_Attack_SQL + 1
if(iWeb_Attack_SQL < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
elif(dataset.iloc[[i]]['Label'].values == 'Infiltration'):
iInfiltration = iInfiltration + 1
if(iInfiltration < max_count):
#dataset = dataset.drop([i], axis=0)
dataset_sample.loc[dataset.index[iIndex]] = dataset.iloc[i]
iIndex = iIndex + 1
else:
continue
print(i)
except:
print("Exception")
continue
dataset_sample.to_csv('sample_dataset.csv', index = None, header=True)
# Creating X and Y from the dataset
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(Y_class)
Y_class = le.transform(Y_class)
print(list(le.classes_))
print(np.unique(Y_attack))
Y_class = dataset.iloc[:,-1].values
Y_class = Y_class.astype(str)
dataset['class'] = dataset['class'].astype(str)
X = dataset.iloc[:,0:43].values
X = X.astype(int)
# Performing scale data
scaler = MinMaxScaler ().fit(X)
X_scaled = np.array(scaler.transform(X))
# 5-fold cross validation
DT_classifier(X_scaled, Y_class, 5) | {"hexsha": "54678f16985ecc2243e82a108caf9920e75abc1d", "size": 17268, "ext": "py", "lang": "Python", "max_stars_repo_path": "supervised_learning.py", "max_stars_repo_name": "duyndh/adversarial_ml_ids", "max_stars_repo_head_hexsha": "d963303b02dc52ad1233602abc71278a45e69341", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-06-20T13:22:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T16:57:24.000Z", "max_issues_repo_path": "supervised_learning.py", "max_issues_repo_name": "duyndh/adversarial_ml_ids", "max_issues_repo_head_hexsha": "d963303b02dc52ad1233602abc71278a45e69341", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-21T19:31:34.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-21T19:42:37.000Z", "max_forks_repo_path": "supervised_learning.py", "max_forks_repo_name": "duyndh/adversarial_ml_ids", "max_forks_repo_head_hexsha": "d963303b02dc52ad1233602abc71278a45e69341", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-11-26T15:12:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-17T20:33:35.000Z", "avg_line_length": 38.5446428571, "max_line_length": 140, "alphanum_fraction": 0.5989691916, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4352} |
#coding=utf-8
import rospy
from std_msgs.msg import Header
from sensor_msgs.msg import Image, NavSatFix
from map_generator.msg import tjy
from nav_msgs.msg import Path
import numpy as np
import time
from googleplaces import GooglePlaces
import googlemaps
import time
import sys
import math
from math import cos,sin,tan,sqrt
from visualization_msgs.msg import Marker
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
C_EARTH = 6378137.0
class GoogleMaps(object):
def __init__(self):
self._GOOGLE_MAPS_KEY = "AIzaSyD4gxyxClNkmTXbTkTseNvFwA16YR9NAPE"
self._Google_Places = GooglePlaces(self._GOOGLE_MAPS_KEY)
self._Google_Geocod = googlemaps.Client(key=self._GOOGLE_MAPS_KEY)
def _nearby_search(self, lng, lat, language, radius, result=None):
if result is None:
nearby_query_result = self._Google_Places.nearby_search(language=language,
lat_lng={'lat': lat, 'lng': lng}, radius=radius)
else:
if result.has_next_page_token:
#print(result.next_page_token)
nearby_query_result = self._Google_Places.nearby_search(
pagetoken=result.next_page_token, lat_lng={'lat': lat, 'lng': lng}, radius=radius)
else:
nearby_query_result = None
return nearby_query_result
def get_all_data(self, lng, lat, language='en', radius=100):
count = 0
list_return_info = []
list_nearby_search_result = self._nearby_search(lng, lat, language, radius)
while(list_nearby_search_result is not None):
for place in list_nearby_search_result.places:
# Returned places from a query are place summaries.
print(place.name)
print(place.geo_location['lng'])
print(place.geo_location['lat'])
print(count)
count = count+1
list_return_info.append({"name":place.name, "lng":place.geo_location['lng'], "lat":place.geo_location['lat']})
#print place.place_id
# The following method has to make a further API call.
#place.get_details()
# Referencing any of the attributes below, prior to making a call to
# get_details() will raise a googleplaces.GooglePlacesAttributeError.
#print place.details # A dict matching the JSON response from Google.
#print place.local_phone_number
#print place.international_phone_number
#print place.website
#print place.url
# Are there any additional pages of results?
list_nearby_search_result = self._nearby_search(lng, lat, language, radius, list_nearby_search_result)
return list_return_info
class Transform(object):
def __init__(self):
self.R = None
self.t = None
def centroid_point(self, samples):
means = np.mean(samples, axis=0)
return means
def transform_lamda(self, A, B):
A_norm = np.sum(A*A,axis=1)
B_norm = np.sum(B*B,axis=1)
#lam=np.sqrt(A_norm)/np.sqrt(B_norm)
lam = A_norm/B_norm
lam=np.mean(lam)
return lam
def transform_3D_RT(self, A, B):
assert A.shape == B.shape
# A is original, B is target
centroidA = self.centroid_point(A)
centroidB = self.centroid_point(B)
H = np.dot((A - centroidA).T , (B - centroidB))
A_move=A - centroidA
B_move=B - centroidB
lam = self.transform_lamda(A_move, B_move)
U,S,V = np.linalg.svd(H)
R = np.dot(V,U.T)
if np.linalg.det(R) < 0:
#print('Reflection detected')
V[:,2] = -1*V[:,2]
R = np.dot(V,U.T)
t = - np.dot((R/sqrt(lam)),centroidA.T) + centroidB.T
R = R/sqrt(lam)
self.R= R
self.t = t.reshape((3,1))
return R, t
def transform(self, A, R = None, t = None):
if R is None:
R = self.R
t = self.t
B = np.dot(R, A.T) + t
return B
class NearbySearch(object):
def __init__(self):
self._sub = rospy.Subscriber('/trajectory',tjy, self.callback, queue_size=100)
#self._pub = rospy.Publisher('/nearby_gps', NavSatFix, queue_size = 100)
#self._pub1 = rospy.Publisher('/car_gps', NavSatFix, queue_size = 100)
self._pub = rospy.Publisher('/location', Marker, queue_size=1000)
self.google_maps = GoogleMaps()
self.count = 0
self.gps_result = []
self.new_gps = []
#self.xyz_temp = NavSatFix()
self._timenum = 0
self.init_lat =0.0
self.init_lng = 0.0
self.init_x = 0.0
self.init_y = 0.0
self.init_z = 0.0
self.init = True
self.init_pose_x = 0.0
self.init_pose_y = 0.0
self.init_pose_z = 0.0
self.number_limit = 20
self.sample_num = 30
self.xyz_samples = []
self.pose_samples = []
self.ave_xyz_samples = []
self.ave_pose_samples = []
self.transform = Transform()
self.display_freq = 10
self.marker_scale = 0.2
self.marker_lifetime = 8 # 0 is forever
self.marker_id = 0
self.marker_ns = 'building'+str(self.marker_id)
self.marker_color = {'r': 1.0, 'g': 0.7, 'b': 1.0, 'a': 1.0}
#self.marker_init()
def add_point(self, samples, point):
if len(samples)== self.number_limit:
samples.remove(samples[0])
samples.append(point)
return samples
def ave_append(self, ave, sample):
if len(sample) == self.number_limit:
sam = np.mean(np.array(sample).reshape((-1,3)), axis=0)
ave.append([sam[0], sam[1], sam[2]])
return ave
def add_samples(self, samples, ave_samples):
if not len(ave_samples) == 0:
new = ave_samples + samples
else:
new = samples
print("current groundtrue sample: ", len(new))
return np.array(new).reshape((-1,3))
def marker_init(self, markers):
self.marker_ns = 'building'+str(self.marker_id)
markers.ns = self.marker_ns
markers.id = self.marker_id
markers.type = Marker.CUBE
markers.action = Marker.ADD
markers.lifetime = rospy.Duration(self.marker_lifetime)
markers.scale.x = self.marker_scale
markers.scale.y = self.marker_scale
markers.scale.z = self.marker_scale
markers.color.r = self.marker_color['r']
markers.color.g = self.marker_color['g']
markers.color.b = self.marker_color['b']
markers.color.a = self.marker_color['a']
markers.header.frame_id = "sensor_frame"
markers.header.stamp = rospy.Time.now()
markers.frame_locked = True
#markers.points = list()
return markers
def name_init(self, markers):
self.marker_ns = 'name'+str(self.marker_id+1)
markers.ns = self.marker_ns
markers.id = self.marker_id+1
markers.type = Marker.TEXT_VIEW_FACING
markers.action = Marker.ADD
markers.lifetime = rospy.Duration(self.marker_lifetime)
markers.scale.x = self.marker_scale
markers.scale.y = self.marker_scale
markers.scale.z = self.marker_scale
markers.color.r = self.marker_color['r']
markers.color.g = self.marker_color['g'] - 0.4
markers.color.b = self.marker_color['b']
markers.color.a = self.marker_color['a']
markers.header.frame_id = "sensor_frame"
markers.header.stamp = rospy.Time.now()
markers.frame_locked = True
#markers.points = list()
return markers
def combine(self, result, new):
newgps = []
for line in new:
if not line in result:
result.append(line)
newgps.append(line)
print("current result: ",len(result))
return result, newgps
'''
def gps_xyz_convert(self, lat, lng, alt=0.0):
deltalat = lat - self.init_lat
deltalng = lng - self.init_lng
deltax = deltalat * C_EARTH
deltay = deltalng * C_EARTH * cos(lat)
return x,y,z
'''
def gps_xyz_convert(self, lat, lng, alt=0.0):
Datum=84 #投影基准面类型:北京54基准面为54,西安80基准面为80,WGS84基准面为84
prjno=0 #投影带号
zonewide=3
IPI=0.0174532925199433333333 #3.1415926535898/180.0
B=lat #纬度
L=lng #经度
if zonewide==6:
prjno=(int)(L/zonewide)+1
L0=prjno*zonewide-3
else:
prjno=(int)((L-1.5)/3)+1
L0=prjno*3
if(Datum==54):
a=6378245
f=1/298.3
elif(Datum==84):
a=6378137
f=1/298.257223563
L0=L0*IPI
L=L*IPI
B=B*IPI
e2=2*f-f*f
l=L-L0
t=tan(B)
m=l * cos(B)
N=a/sqrt(1-e2* sin(B) * sin(B))
q2=e2/(1-e2)* cos(B)* cos(B)
a1=1+float(3/4)*e2+float(45/64)*e2*e2+float(175/256)*e2*e2*e2+float(11025/16384)*e2*e2*e2*e2+float(43659/65536)*e2*e2*e2*e2*e2
a2=float(3/4)*e2+float(15/16)*e2*e2+float(525/512)*e2*e2*e2+float(2205/2048)*e2*e2*e2*e2+float(72765/65536)*e2*e2*e2*e2*e2
a3=float(15/64)*e2*e2+float(105/256)*e2*e2*e2+float(2205/4096)*e2*e2*e2*e2+float(10359/16384)*e2*e2*e2*e2*e2
a4=float(35/512)*e2*e2*e2+float(315/2048)*e2*e2*e2*e2+float(31185/13072)*e2*e2*e2*e2*e2
b1=a1*a*(1-e2)
b2=float(-1/2)*a2*a*(1-e2)
b3=float(1/4)*a3*a*(1-e2)
b4=float(-1/6)*a4*a*(1-e2)
c0=b1
c1=2*b2+4*b3+6*b4
c2=-(8*b3+32*b4)
c3=32*b4
s=c0*B+cos(B)*(c1*sin(B)+c2*sin(B)*sin(B)*sin(B)+c3*sin(B)*sin(B)*sin(B)*sin(B)*sin(B))
x=s+float(1/2)*N*t*m*m+float(1/24)*(5-t*t+9*q2+4*q2*q2)*N*t*m*m*m*m+float(1/720)*(61-58*t*t+t*t*t*t)*N*t*m*m*m*m*m*m
y=N*m+float(1/6)*(1-t*t+q2)*N*m*m*m+float(1/120)*(5-18*t*t+t*t*t*t-14*q2-58*q2*t*t)*N*m*m*m*m*m
y=y+1000000*prjno+500000
return x, y-38000000, alt
def callback(self, msg):
currentpose = msg.tjy.poses[-1]
currentgps = msg.gps[-1]
lat = currentgps.latitude
lng = currentgps.longitude
alt = currentgps.altitude
if self.init:
self.init_lat = lat
self.init_lng = lng
self.init_x, self.init_y, self.init_z = self.gps_xyz_convert(lat, lng)
#self.init_x, self.init_y, self.init_z = lat, lng, alt
print("init xyz ", self.init_x, self.init_y, self.init_z)
self.init_pose_x = currentpose.pose.position.x
self.init_pose_y = currentpose.pose.position.y
self.init_pose_z = currentpose.pose.position.z
print("init pose ", self.init_pose_x, self.init_pose_y, self.init_pose_z)
self.xyz_samples = self.add_point(self.xyz_samples,[0,0,0])
#self.pose_samples = np.array([self.init_pose_x, self.init_pose_y, self.init_pose_z]).reshape((1,3))
self.pose_samples = self.add_point(self.pose_samples, [0, 0, 0])
self.init = False
else:
temp_x, temp_y, temp_z = self.gps_xyz_convert(lat, lng)
#temp_x, temp_y, temp_z = lat, lng, alt
temp_x = temp_x - self.init_x
temp_y = temp_y - self.init_y
temp_z = temp_z - self.init_z
self.xyz_samples = self.add_point(self.xyz_samples,[temp_x, temp_y, temp_z])
#self.xyz_samples = np.concatenate((self.xyz_samples , np.array([temp_x, temp_y, temp_z]).reshape((1,3))),axis=0)
temp_pose_x = currentpose.pose.position.x
temp_pose_y = currentpose.pose.position.y
temp_pose_z = currentpose.pose.position.z
#self.pose_samples = np.concatenate((self.pose_samples, np.array([temp_pose_x, temp_pose_y, temp_pose_z]).reshape((1,3))), axis=0)
#self.pose_samples = np.concatenate((self.pose_samples, np.array([temp_pose_x, temp_pose_y, 0.0]).reshape((1,3))), axis=0)
self.pose_samples = self.add_point(self.pose_samples, [temp_pose_z, temp_pose_x, temp_pose_y])
if self._timenum%self.sample_num == 0:
self.ave_pose_samples = self.ave_append(self.ave_pose_samples, self.pose_samples)
self.ave_xyz_samples = self.ave_append(self.ave_xyz_samples, self.xyz_samples)
if self._timenum%self.display_freq == 1:
print("latitude: {0}, longitude: {1}, altitude: {2}".format(lat, lng, alt))
#list_return_info = self.google_maps.get_all_data(lng, lat)
list_return_info = temp_read()
print("find gps info")
self.gps_result ,self.new_gps = self.combine(self.gps_result, list_return_info)
#if not len(self.new_gps)==0:
#self.count = len(self.gps_result)
#R, t = self.transform.transform_3D_RT(self.xyz_samples, self.pose_samples)
R, t = self.transform.transform_3D_RT(self.add_samples(self.xyz_samples, self.ave_xyz_samples), self.add_samples(self.pose_samples, self.ave_pose_samples))
#R, t = self.transform.transform_3D_RT(np.array([[0,0,0],[temp_x, temp_y, temp_z]]).reshape((-1,3)), np.array([[0,0,0],[temp_pose_z, temp_pose_x, temp_pose_y]]).reshape((-1,3)))
print("R ",R)
print("t ",t)
temp_pose = self.transform.transform(np.array([temp_x, temp_y, temp_z]).reshape((1,3)))
#print("temp_pose: ",temp_pose)
#print("true pose: ",np.array([temp_pose_z, temp_pose_x, temp_pose_y]).reshape((3,1)))
print("distance",self.distance(temp_pose.reshape((3,1)), np.array([temp_pose_z, temp_pose_x, temp_pose_y]).reshape((3,1))))
for point in self.gps_result:
name = point["name"]
gps_lat = point["lat"]
gps_lng = point["lng"]
ros_x, ros_y, ros_z = self.gps_xyz_convert(gps_lat, gps_lng)
#ros_x, ros_y, ros_z = gps_lat, gps_lng,alt
ros_x = ros_x - self.init_x
ros_y = ros_y - self.init_y
ros_z = ros_z - self.init_z
#print("name: {0}, latitude: {1}, longitude: {2}, altitude: {3}".format(name, gps_lat, gps_lng, alt))
ros_pose = self.transform.transform(np.array([ros_x,ros_y,ros_z]).reshape((1,3)))
#print("ros_pose", ros_pose)
self.marker_id = self.count
self.count += 2
marker = Marker()
namemarker = Marker()
marker = self.marker_init(marker)
namemarker = self.name_init(namemarker)
namemarker.text = name
marker.pose.position.x = ros_pose[1]
marker.pose.position.y = ros_pose[2]
marker.pose.position.z = ros_pose[0]
marker.pose.orientation.y = 0.0
marker.pose.orientation.z = 0.0
marker.pose.orientation.w = 1.0
self._pub.publish(marker)
namemarker.pose.position.x = ros_pose[1]
namemarker.pose.position.y = ros_pose[2] - 0.3
namemarker.pose.position.z = ros_pose[0]
namemarker.pose.orientation.y = 0.0
namemarker.pose.orientation.z = 0.0
namemarker.pose.orientation.w = 1.0
self._pub.publish(namemarker)
self.new_gps = []
self._timenum = self._timenum + 1
def distance(self, ros, pose):
dis = np.sqrt(np.sum((ros.reshape((3,1))-pose.reshape((3,1)))**2))
return dis
def main(self):
rospy.spin()
def save_xyz():
f = open("building_xyz.txt", "w")
for point in self.gps_result:
name = point["name"]
gps_lat = point["lat"]
gps_lng = point["lng"]
ros_x, ros_y, ros_z = self.gps_xyz_convert(gps_lat, gps_lng)
ros_x = ros_x - self.init_x
ros_y = ros_y - self.init_y
ros_z = ros_z - self.init_z
ros_pose = self.transform.transform(np.array([ros_x,ros_y,ros_z]).reshape((1,3)))
f.write(name+";%f" %ros_pose[1]+";%f" %ros_pose[2]+";%f" %ros_pose[0]+"\n")
f.close()
rospy.on_shutdown(save_xyz)
def temp_read():
f = open("../../05/info.txt","r")
data = []
for line in f.readlines():
line = line.strip('\n')
line = line.strip('\r')
line = line.split(',')
if " " in line:
line.remove(" ")
data.append({"name":line[0], "lng":float(line[1]), "lat":float(line[2])})
f.close()
return data
if __name__ == '__main__':
rospy.init_node("NearbySearch", anonymous=True)
nearby = NearbySearch()
nearby.main()
'''
transform = Transform()
a = np.array([[0,0,0],[1.1,5.6,0],[2.5,3.2,0],[6.9,8.4,0]]).reshape((-1,3))
b = np.array([[0,0,0],[0.11, 0.56,0.0],[0.25,0.32,0],[0.69,0.84,0]]).reshape((-1,3))
R, t = transform.transform_3D_RT(a,b)
m = np.array([1.1,5.6,0]).reshape((1,3))
n = np.array([10.1, 11.6, 0]).reshape((1,3))
mb = transform.transform(m)
nb = transform.transform(n)
print(mb)
print(nb)
'''
| {"hexsha": "c10e6776d7422824d2c9657228b1b2eac16126fe", "size": 17565, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/nearbyGPS.py", "max_stars_repo_name": "germal/Semantic_SLAM-1", "max_stars_repo_head_hexsha": "0284b3f832ca431c494f9c134fe46c40ec86ee38", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 213, "max_stars_repo_stars_event_min_datetime": "2019-04-19T07:44:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T00:57:04.000Z", "max_issues_repo_path": "src/nearbyGPS.py", "max_issues_repo_name": "DingYikang/Semantic_SLAM", "max_issues_repo_head_hexsha": "0284b3f832ca431c494f9c134fe46c40ec86ee38", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-03-06T13:09:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-06T05:12:43.000Z", "max_forks_repo_path": "src/nearbyGPS.py", "max_forks_repo_name": "DingYikang/Semantic_SLAM", "max_forks_repo_head_hexsha": "0284b3f832ca431c494f9c134fe46c40ec86ee38", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 48, "max_forks_repo_forks_event_min_datetime": "2019-12-12T13:46:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T01:04:26.000Z", "avg_line_length": 37.8556034483, "max_line_length": 189, "alphanum_fraction": 0.5738115571, "include": true, "reason": "import numpy", "num_tokens": 4833} |
% !TEX program = xelatex
\documentclass{resume}
\usepackage{zh_CN-Adobefonts_external} % Simplified Chinese Support using external fonts (./fonts/zh_CN-Adobe/)
\usepackage{zh_CN-Adobefonts_internal} % Simplified Chinese Support using system fonts
\begin{document}
\pagenumbering{gobble} % suppress displaying page number
\name{Yifan Wu}
\basicInfo{
\email{yifanwu@pku.edu.cn} \textperiodcentered\
\phone{(+86) 184-8207-1800} \textperiodcentered\
\github[wuyifan18]{https://github.com/wuyifan18}}
\section{教育经历}
\datedsubsection{\textbf{Peking University}}{Sep. 2019 -- Present}
Ph.D. student in Software Engineering\\
Advisor: Prof. \href{http://www.ss.pku.edu.cn/index.php/teacherteam/teacherlist/1674-%E6%9D%8E%E5%BD%B1}{Ying Li}
\datedsubsection{\textbf{University of Electronic Science and Technology of China}}{Sep. 2015 -- Jun. 2019}
B.E. in Software Engineering\\
GPA: 3.86/4.0\\
Rank: 3/132
\section{研究兴趣}
Graph Computing, Machine Learning Systems, Distributed Systems
\section{项目经历}
\datedsubsection{\textbf{LogFlash}}{Sep. 2019 -- Present}
\textit{Submitted} to anonymous peer-review, Second author\\
It is important to detect and diagnose anomalies accurately and timely for large-scale software systems. Thus, We propose LogFlash, an online self-updating anomaly detection and diagnosis approach that enables model training and anomaly diagnosis at real time. LogFlash considers anomaly detection and diagnosis as a real-time streaming processing task where each log entry is processed only once without any iterations or intermediate storage. Experiment results show that LogFlash reduces over 5 times of training and detection time compared with the state-of-art works while maintaining the capability of accurate problem diagnosis.
\begin{itemize}
\item Implementation of log template mining module based on Flink
\item Implementation of front-end page including task management and config management
\item Deployment of LogFlash based on docker
\end{itemize}
\datedsubsection{\textbf{Data annotation platform}}{Apr. 2019 -- Aug. 2019}
\role{SDE Intern at Infimind}{}
Data annotation platform composed of multiple modules such as user login, project and task management, annotation statistics and review, annotation operation.
\begin{itemize}
\item Implementation of front-end page including user based on Vue.js
\end{itemize}
\datedsubsection{\textbf{Crawler project}}{Mar. 2019 -- Apr. 2019}
\role{SDE Intern at Infimind}{}
An crawler tool for China Judgements Online.
\begin{itemize}
\item Support IP proxy
\item Support multiple processes
\item Support full crawling
\item Divide data according to decision time, region and court
\end{itemize}
\datedsubsection{\textbf{Xlearn Project}}{Feb. 2018 -- May. 2018}
\role{SDE Intern at School of Software, Tsinghua University}{Supvised by Mingsheng Long, Zhongyi Pei}
An platform where non-machine learning experts can run machine learning applications such as radar extrapolation.
\begin{itemize}
\item Implementation of backend based on Flask and Nginx
\item Deployment of algorithms such as radar extrapolation
\item Implementation of front-end page
\end{itemize}
\section{主要论文}
\begin{enumerate}[parsep=0.5ex]
\item How Far Have We Come in Detecting Anomalies in Distributed Systems? An Empirical Study with a Statement-level Fault Injection Method\\
Yong Yang, \textbf{Yifan Wu}, Karthik Pattabiraman, Long Wang, Ying Li\\
The 31st International Symposium on Software Reliability Engineering (\textbf{ISSRE'20}) (CCF B)
\item LogFlash: Online Anomaly Detection and Diagnosis from System Logs for Large-scale Software Systems (Submitted to anonymous peer-review)\\
Tong Jia, Ying Li, \textbf{Yifan Wu}\\
\end{enumerate}
\section{主要奖励}
\begin{itemize}[parsep=0.5ex]
\item \datedline{Outstanding Graduates of UESTC}{2019}
\item \datedline{Second Award, People's Scholarship}{2018}
\item \datedline{Suzhou Industrial Park Scholarship}{2017}
\item \datedline{Outstanding League Member of UESTC}{2017}
\item \datedline{First Award, People's Scholarship}{2016}
\end{itemize}
\section{专业技能}
\begin{itemize}[parsep=0.5ex]
\item Languages: Java, Python, C, Go, Markdown
\item Systems: Flink, Pytorch, Docker, TensorFlow, PowerGraph, Vue.js
\end{itemize}
%% Reference
%\newpage
%\bibliographystyle{IEEETran}
%\bibliography{mycite}
\end{document}
| {"hexsha": "dbc1be6627f91d262b0af216b0a0439579a986a5", "size": 4369, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "resume-zh_CN.tex", "max_stars_repo_name": "wuyifan18/resume", "max_stars_repo_head_hexsha": "5fa002530cb40b5540d424fbebcf59707d6f5430", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "resume-zh_CN.tex", "max_issues_repo_name": "wuyifan18/resume", "max_issues_repo_head_hexsha": "5fa002530cb40b5540d424fbebcf59707d6f5430", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "resume-zh_CN.tex", "max_forks_repo_name": "wuyifan18/resume", "max_forks_repo_head_hexsha": "5fa002530cb40b5540d424fbebcf59707d6f5430", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.4787234043, "max_line_length": 635, "alphanum_fraction": 0.7821011673, "num_tokens": 1201} |
# 扩散映射(DiffusionMaps)
## 符号定义
|符号|概念|
|:--:|:--:|
|$\pmb{x}$|样本点|
|$X$|样本集合|
|$N$|样本总数|
|$G$|有限图|
|$S$|有限图元素集合|
|$W$|权重矩阵|
|$D$|度矩阵|
|$P$|转移矩阵|
|$M$|距离矩阵|
|$d$|距离|
|$m$|降维后维度|
## 概念
ISOMAP通过替换欧氏距离为最短路径距离实现了比较好的降维效果,但是ISOMAP有一个非常明显的缺陷:对噪声较敏感。噪声很有可能改变两个点之间的最短路径以至于影响相当多样本对的距离度量,从而得到错误的降维结果。
为了抵抗噪声的影响,一个非常简单的思路就是取多条路径的平均值(若两个点由多条短路径相连则可以断定这两个是相近的)。Diffusion Maps就有着类似的想法,不同的是,Diffusion Maps是以图的形式来审视所有数据。Diffusion Maps定义相应的距离度量用于衡量图中节点之间的连接程度,节点间相互连接的总量则作为两节点的相似程度。Difuusion Maps在随机游走以及扩散过程的基础上定义扩散距离,并找到了扩散距离与低维欧氏距离的联系,从而实现将高维空间的数据嵌入到低维空间。
Diffusion Maps主要有如下工作:
1. 定义一个有限图$G=(S, E)$,其中$S$为所有数据点构成的元素集合,$E$为所有边的集合
2. 在有限图的基础上,定义相应的转移矩阵$P$,并进行随机游走。
3. 寻找当随机游走步数$t\rightarrow \infty$时的平稳分布$\mu$
4. 定义扩散距离并实现数据的低维空间嵌入
## 推导
* **权重矩阵**
首先需要定义一个权重矩阵,权重矩阵的定义方式可以由实际情况决定,仅需要满足如下的条件:
1. 对称性
$$
\begin{equation}
W = W^T
\end{equation}
$$
2. 元素非负
$$
\begin{equation}
w(i, j) \geq 0
\end{equation}
$$
在下述推导中采用高斯核进行定义,即
$$
\begin{equation}
\begin{split}
W_{ij}
&= w(i, j) \\
&= \mathcal{k}(\pmb{x_i}, \pmb{x_j}) \\
&= \exp(-\gamma||\pmb{x_i-\pmb{x_j}}||^2)
\end{split}
\end{equation}
$$
为了调整节点空间位置的影响,对权重矩阵进行归一化。
为了方便计算,首先引入度矩阵$D$,度矩阵为对角阵
$$
\begin{equation}
D_{ii} = \sum_{j=1}^NW(\pmb{x_i, x_j})
\end{equation}
$$
这里采用对称归一化,即$D^{-\frac{1}{2}}WD^{-\frac{1}{2}}$,这样处理后权重矩阵依然为对称矩阵。
* **随机游走**
上述使用核函数获得的两两点之间的相似性度量实际上仅描述了局部信息。为了进一步获得全局上的信息,Diffusion Maps引入了图上的随机游走。从某一个特定的节点$\pmb{x_i}$出发,经过一个时间步后,相较于较远的节点,显然更容易转移到较近的点。因此上述由高斯核函数定义的权重矩阵能够用于构造转移矩阵。
$$
\begin{equation}
P = D^{-1}W
\end{equation}
$$
上述仅为一个时间步的转移,Diffusion Maps引入了多个时间步的转移。设$P^t$表示t个时间步的转移矩阵,其元素$P^t_{ij}$表示经过t个时间步从节点$\pmb{x_i}$转移到节点$\pmb{x_j}$的概率。显然多个时间步的转移能够体现节点间的内在联系。实际上,当$t\rightarrow\infty$时,会得到一个平稳分布。
$$
\begin{equation}
\mu(\pmb{x_i}) = \frac{D_{ii}}{\sum_{j=1}^ND_{jj}}
\end{equation}
$$
详细证明可以参考参考资料[3]
* **扩散距离**
在随机游走的基础上,若两个节点的t个时间步的转移矩阵相似,我们可以认为这两个节点的相似度比较高,反之则可以认为这两个节点的相似度低,Diffusion Maps正是基于此定义了一个相当有效的节点相似度度量方法。其定义如下
$$
\begin{equation}
d^2_t(\pmb{x_i}, \pmb{x_j}) = \sum_{z=1}^N\frac{(P^t_{iz}-P^t_{jz})^2}{\mu(\pmb{x_z})}
\end{equation}
$$
上式是一个带权重的$l^2$距离,节点分布密集的区域$\mu(\pmb{x_z})$较大,此时权重较小,而节点分布稀疏的区域$\mu(\pmb{x_z})$较小,此时权重较大。
* **转移矩阵的特征分解**
这里直接给结论,详细推导见参考资料[4]
对于$P^t$有如下分解
$$
\begin{equation}
P^t = \sum_{i=1}^N\lambda_i^t\pmb{\varphi_i}\pmb{\psi_i}
\end{equation}
$$
其中$\lambda$为特征值,$\varphi$为左特征向量,$\psi$为右特征向量。左特征向量关于$\frac{1}{\mu}$归一化,右特征向量关于$\mu$归一化,并按照特征值的大小降序排列,可以得到
$$
\begin{equation}
P^t_{ij} = \sum_{z=1}^N\lambda_z^t\varphi_z(i)\psi_z(j)
\end{equation}
$$
其中$P^t_{ij}$为矩阵$P^t$中位置为$ij$的值,$\varphi_z(i)$为第z个左特征向量的第i个位置的值
将式9带入式7可以得到
$$
\begin{equation}
\begin{split}
d^2_t(\pmb{x_i}, \pmb{x_j})
&= \sum_{z=1}^N\frac{(P^t_{iz}-P^t_{jz})^2}{\mu(\pmb{x_z})} \\
&= \sum_{z=1}^N\frac{(\sum_{l=1}^N\lambda_l^t\varphi_l(i)\psi_l(z)-\sum_{l=1}^N\lambda_l^t\varphi_l(j)\psi_l(z))^2}{\mu(\pmb{x_z})} \\
&= \sum_{z=1}^N\frac{\sum_{l=1}^N\lambda_l^{2t}\psi_l^2(z)(\varphi_l(i)-\varphi_l(j))^2}{\mu(\pmb{x_z})} \\
&= \sum_{l=1}^N\sum_{z=1}^N\frac{\psi_l^2(z)}{\mu(\pmb{x_z})}\lambda_l^{2t}(\varphi_l(i)-\varphi_l(j))^2 \\
&= \sum_{l=1}^N\lambda_l^{2t}(\varphi_l(i)-\varphi_l(j))^2 \\
&= \sum_{l=1}^N(\lambda_l^{t}\varphi_l(i) - \lambda_l^{t}\varphi_l(j))^2
\end{split}
\end{equation}
$$
考虑到$\pmb{\varphi_1} \equiv \pmb{1}$因此忽略第一项,最终得到
$$
\begin{equation}
d^2_t(\pmb{x_i}, \pmb{x_j}) = \sum_{l=2}^N(\lambda_l^{t}\varphi_l(i) - \lambda_l^{t}\varphi_l(j))^2
\end{equation}
$$
考虑到特征值的衰减,取前几项即可达到一定的精度,因此上述定义的距离可以由前$m$项近似表示
$$
\begin{equation}
\begin{split}
d^2_t(\pmb{x_i}, \pmb{x_j})
&= \sum_{l=2}^N(\lambda_l^{t}\varphi_l(i) - \lambda_l^{t}\varphi_l(j))^2 \\
&\simeq \sum_{l=2}^{m+1}(\lambda_l^{t}\varphi_l(i) - \lambda_l^{t}\varphi_l(j))^2
\end{split}
\end{equation}
$$
上述定义的距离显然可以视为在$\mathcal{R^m}$上的欧式距离。在式-12的基础上定义如下映射
$$
\begin{equation}
\Phi_t: \pmb{x_i} \longmapsto [\lambda_2^{t}\varphi_2(i), \lambda_3^{t}\varphi_3(i), \cdots, \lambda_{m+1}^{t}\varphi_{m+1}(i)]^T
\end{equation}
$$
可以得到
$$
\begin{equation}
\begin{split}
d^2_t(\pmb{x_i}, \pmb{x_j})
&= \sum_{l=2}^N(\lambda_l^{t}\varphi_l(i) - \lambda_l^{t}\varphi_l(j))^2 \\
&\simeq ||\Phi_t(\pmb{x_i}) - \Phi_t(\pmb{x_j})||^2
\end{split}
\end{equation}
$$
式-14指出在原空间定义的扩散距离可以由低维空间$\mathcal{R^m}$中的欧式距离近似表示。这一低维表示由随机游走时间$t$以及转移矩阵决定。
## 算法步骤
## 参考资料
[1] https://en.wikipedia.org/wiki/Isomap
[2] https://www.cnblogs.com/EIPsilly/p/15732378.html
[3] https://www.cs.yale.edu/homes/spielman/561/lect10-18.pdf
[4] https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-262-discrete-stochastic-processes-spring-2011/video-lectures/lecture-8-markov-eigenvalues-and-eigenvectors/MIT6_262S11_lec08.pdf
| {"hexsha": "009d25d93c40efd85ddf494fe423bcdca9e2b6f3", "size": 6923, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "10_DiffusionMaps/DiffusionMaps.ipynb", "max_stars_repo_name": "koolo233/dimensionality_reduction_python", "max_stars_repo_head_hexsha": "452a927772c546f68d6a63e96cdb017b23e4077c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "10_DiffusionMaps/DiffusionMaps.ipynb", "max_issues_repo_name": "koolo233/dimensionality_reduction_python", "max_issues_repo_head_hexsha": "452a927772c546f68d6a63e96cdb017b23e4077c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "10_DiffusionMaps/DiffusionMaps.ipynb", "max_forks_repo_name": "koolo233/dimensionality_reduction_python", "max_forks_repo_head_hexsha": "452a927772c546f68d6a63e96cdb017b23e4077c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0509259259, "max_line_length": 258, "alphanum_fraction": 0.4977610862, "converted": true, "num_tokens": 3085} |
from flask import Flask, request
from gensim import corpora, models, similarities
import csv
import numpy as np
import logging
import os
import sys
import gzip
import pkg_resources
from pkg_resources import DistributionNotFound
import pathlib
logging.basicConfig(
handlers=[logging.FileHandler(__file__ + ".log", "w", "utf-8")],
format="%(asctime)s %(levelname)s:%(message)s",
level=logging.INFO,
)
# default boilerplate code
app = Flask(__name__)
# set of active gensim models (learning/relearning possible)
active_models = {}
# set of active gensim vector files (just consumption)
active_vectors = {}
@app.route("/melt_ml.html")
def display_server_status():
"""Can be used to check whether the server is running. Also works in a Web browser.
Returns
-------
str
A message indicating that the server is running.
"""
return "MELT ML Server running. Ready to accept requests."
@app.route("/check-requirements", methods=["GET"])
def check_requirements() -> str:
"""Can be used to check whether the server is fully functional.
Returns
-------
str
A message listing installed and potentially missing requirements.
"""
requirements_file = request.headers.get("requirements_file")
logging.info(f"received requirements file path: {requirements_file}")
with pathlib.Path(requirements_file).open() as requirements_txt:
requirements = pkg_resources.parse_requirements(requirements_txt)
ok_requirements = []
missing_requirements = []
for requirement in requirements:
requirement = str(requirement)
print(f"Checking {requirement}")
try:
pkg_resources.require(requirement)
ok_requirements.append(requirement)
except Exception as error:
missing = str(error)
missing_requirements.append(requirement)
message = "Dependency Check"
if len(ok_requirements) > 0:
message += "\nInstalled Requirements:"
for r in ok_requirements:
message += "\n\t" + r
if len(missing_requirements) > 0:
message += "\nMissing Requirements:"
for r in missing_requirements:
message += "\n\t" + r
else:
message += "\n=> Everything is installed. You are good to go!"
print(message)
logging.info(message)
return message
class MySentences(object):
"""Data structure to iterate over the lines of a file in a memory-friendly way. The files can be gzipped."""
def __init__(self, file_or_directory_path):
"""Constructor
Parameters
----------
file_or_directory_path : str
The path to the file containing the walks or the path to the file which contains multiple walk files.
"""
self.file_or_directory_path = file_or_directory_path
def __iter__(self):
try:
if os.path.isdir(self.file_or_directory_path):
logging.info("Directory detected.")
for file_name in os.listdir(self.file_or_directory_path):
logging.info("Processing file: " + file_name)
if file_name[-2:] in "gz":
logging.info("Gzip file detected! Using gzip.open().")
for line in gzip.open(
os.path.join(self.file_or_directory_path, file_name),
mode="rt",
encoding="utf-8",
):
line = line.rstrip("\n")
words = line.split(" ")
yield words
else:
for line in open(
os.path.join(self.file_or_directory_path, file_name),
mode="rt",
encoding="utf-8",
):
line = line.rstrip("\n")
words = line.split(" ")
yield words
else:
logging.info("Processing file: " + self.file_or_directory_path)
if self.file_or_directory_path[-2:] in "gz":
logging.info("Gzip file detected! Using gzip.open().")
for line in gzip.open(
self.file_or_directory_path, mode="rt", encoding="utf-8"
):
line = line.rstrip("\n")
words = line.split(" ")
yield words
else:
for line in open(
self.file_or_directory_path, mode="rt", encoding="utf-8"
):
line = line.rstrip("\n")
words = line.split(" ")
yield words
except Exception:
logging.error("Failed reading file:")
logging.error(self.file_or_directory_path)
logging.exception("Stack Trace:")
@app.route("/w2v-to-kv", methods=["GET"])
def w2v_to_kv() -> str:
"""Method will convert the provided w2v file to a kv file.
Returns
-------
str (representing a boolean)
'True' as string if operation was successful, else 'False' (as string).
"""
from gensim.models import KeyedVectors
try:
w2v_path = request.headers.get("w2v_path")
new_file = request.headers.get("new_file")
result = KeyedVectors.load_word2vec_format(w2v_path, unicode_errors='ignore')
result.save(new_file)
active_models[os.path.realpath(new_file)] = result
active_vectors[os.path.realpath(new_file)] = result.wv
return "True"
except Exception as exception:
logging.exception("An exception occurred.")
return "False"
@app.route("/train-word2vec", methods=["GET"])
def train_word_2_vec() -> str:
"""Method to train a word2vec model given one file to be used for training. Parameters are expected in the request
header.
Returns
-------
str (representing a boolean)
'True' as string if operation was successful, else 'False' (as string).
"""
try:
model_path = request.headers.get("model_path") # where the model will be stored
vector_path = request.headers.get(
"vector_path"
) # where the vector file will be stored
file_path = request.headers.get("file_path")
vector_dimension = request.headers.get("vector_dimension")
number_of_threads = request.headers.get("number_of_threads")
window_size = request.headers.get("window_size")
iterations = request.headers.get("iterations")
negatives = request.headers.get("negatives")
cbow_or_sg = request.headers.get("cbow_or_sg")
min_count = request.headers.get("min_count")
sample = request.headers.get("sample")
epochs = request.headers.get("epochs")
sentences = MySentences(file_path)
logging.info("Sentences object (" + file_path + ") initialized.")
if cbow_or_sg == "sg":
model = models.Word2Vec(
sample=float(sample),
min_count=int(min_count),
size=int(vector_dimension),
workers=int(number_of_threads),
window=int(window_size),
sg=1,
negative=int(negatives),
iter=int(iterations),
)
else:
model = models.Word2Vec(
sample=float(sample),
min_count=int(min_count),
size=int(vector_dimension),
workers=int(number_of_threads),
window=int(window_size),
sg=0,
cbow_mean=1,
negative=int(negatives),
iter=int(iterations),
)
logging.info("Model object initialized. Building Vocabulary...")
model.build_vocab(sentences)
logging.info("Vocabulary built. Training now...")
model.train(
sentences=sentences, total_examples=model.corpus_count, epochs=int(epochs)
)
logging.info("Model trained.")
model.save(model_path)
model.wv.save(vector_path)
active_models[os.path.realpath(model_path)] = model
active_vectors[os.path.realpath(vector_path)] = model.wv
return "True"
except Exception as exception:
logging.exception("An exception occurred.")
return "False"
@app.route("/is-in-vocabulary", methods=["GET"])
def is_in_vocabulary():
"""Check whether there is a vector for the given concept.
Returns
-------
boolean
True if concept in model vocabulary, else False.
"""
concept = request.headers.get("concept")
model_path = request.headers.get("model_path")
vector_path = request.headers.get("vector_path")
vectors = get_vectors(model_path, vector_path)
return str(concept in vectors.vocab)
@app.route("/get-vocabulary-size", methods=["GET"])
def get_vocab_size():
model_path = request.headers.get("model_path")
vector_path = request.headers.get("vector_path")
vectors = get_vectors(model_path, vector_path)
return str(len(vectors.vocab))
def get_vectors(model_path, vector_path):
"""Will return the gensim vectors given model_path and vector_path where only one variable is filled.
The Java backend makes sure that the correct variable of the both is filled. This method also handles the
caching of models and vectors.
Returns
-------
gensim vectors for further operations.
"""
if vector_path is None:
if model_path in active_models:
# logging.info("Found model in cache.")
model = active_models[model_path]
vectors = model.wv
else:
model = models.Word2Vec.load(model_path)
active_models[model_path] = model
vectors = model.wv
elif vector_path in active_vectors:
# logging.info("Found vector file in cache.")
vectors = active_vectors[vector_path]
else:
vectors = models.KeyedVectors.load(vector_path, mmap="r")
active_vectors[vector_path] = vectors
return vectors
@app.route("/get-similarity", methods=["GET"])
def get_similarity_given_model():
concept_1 = request.headers.get("concept_1")
concept_2 = request.headers.get("concept_2")
model_path = request.headers.get("model_path")
vector_path = request.headers.get("vector_path")
vectors = get_vectors(model_path=model_path, vector_path=vector_path)
if vectors is None:
logging.error("Could not instantiate vectors.")
return 0.0
if concept_1 is None or concept_2 is None:
message = (
"ERROR! concept_1 and/or concept_2 not found in header. "
"Similarity cannot be calculated."
)
print(message)
logging.error(message)
return message
if concept_1 not in vectors.vocab:
message = "ERROR! concept_1 not in the vocabulary."
print(message)
logging.error(message)
return message
if concept_2 not in vectors.vocab:
message = "ERROR! concept_2 not in the vocabulary."
print(message)
logging.error(message)
return message
similarity = vectors.similarity(concept_1, concept_2)
return str(similarity)
@app.route("/get-vocabulary-terms", methods=["GET"])
def get_vocabulary_terms():
model_path = request.headers.get("model_path")
vector_path = request.headers.get("vector_path")
vectors = get_vectors(model_path, vector_path)
result = ""
for word in vectors.vocab:
result += word + "\n"
return result
@app.route("/get-vector", methods=["GET"])
def get_vector_given_model():
concept = request.headers.get("concept")
model_path = request.headers.get("model_path")
vector_path = request.headers.get("vector_path")
vectors = get_vectors(model_path=model_path, vector_path=vector_path)
if vectors is None:
logging.error("Could not instantiate vectors.")
return 0.0
if concept is None:
message = "ERROR! concept not found in header. " "Vector cannot be retrieved."
print(message)
logging.error(message)
return message
if concept not in vectors.vocab:
message = "ERROR! Concept '" + str(concept) + "' not in the vocabulary."
print(message)
logging.error(message)
return message
result = ""
for element in vectors.word_vec(concept):
result += " " + str(element)
return result[1:]
# TF-IDF and LSI models
@app.route("/train-vector-space-model", methods=["GET"])
def train_vector_space_model():
input_file_path = request.headers.get("input_file_path")
model_path = request.headers.get("model_path")
dictionary = __createDictionary(input_file_path)
corpus = CsvCorpus(dictionary, input_file_path)
tfidf = models.TfidfModel(dictionary=dictionary)
tfidf_corpus = tfidf[corpus]
index = similarities.Similarity(
"index.index", tfidf_corpus, num_features=len(dictionary)
)
# index = similarities.SparseMatrixSimilarity(tfidf_corpus, num_features=len(dictionary))
# index = similarities.MatrixSimilarity(tfidf_corpus, num_features=len(dictionary))
active_models[model_path] = (corpus, index)
return "True"
@app.route("/query-vector-space-model", methods=["GET"])
def query_vector_space_model():
try:
model_path = request.headers.get("model_path")
document_id_one = request.headers.get("document_id_one")
document_id_two = request.headers.get("document_id_two") # can be None
model = active_models.get(model_path)
if model is None:
return "ERROR! Model not active"
(corpus, index) = model
pos_one = corpus.id2pos.get(document_id_one)
if pos_one is None:
return "ERROR! document_id_one not in the vocabulary."
sims = index.similarity_by_id(pos_one)
if document_id_two is None:
return __sims2scores(sims, corpus.pos2id, 10)
else:
pos_two = corpus.id2pos.get(document_id_two)
if pos_two is None:
return "ERROR! document_id_two not in the vocabulary."
test = sims[pos_two]
return str(test)
except Exception as e:
return str(e)
english_stopwords = {
"has",
"mightn",
"me",
"here",
"other",
"very",
"but",
"ours",
"he",
"his",
"there",
"you",
"some",
"don",
"such",
"under",
"their",
"themselves",
"mustn't",
"had",
"shan't",
"she's",
"yourselves",
"by",
"about",
"needn",
"re",
"weren't",
"any",
"herself",
"don't",
"am",
"hadn",
"what",
"each",
"weren",
"hadn't",
"between",
"both",
"in",
"can",
"the",
"does",
"too",
"shouldn",
"once",
"when",
"s",
"it",
"as",
"same",
"haven",
"hasn't",
"didn't",
"wasn't",
"on",
"shan",
"they",
"of",
"was",
"aren't",
"out",
"before",
"our",
"aren",
"ourselves",
"wouldn",
"we",
"didn",
"having",
"above",
"just",
"below",
"why",
"against",
"wouldn't",
"were",
"yours",
"few",
"m",
"doesn",
"my",
"nor",
"then",
"you'll",
"your",
"isn't",
"haven't",
"him",
"doesn't",
"i",
"wasn",
"who",
"will",
"that'll",
"if",
"hasn",
"been",
"myself",
"d",
"where",
"into",
"t",
"ain",
"couldn't",
"being",
"how",
"y",
"which",
"you've",
"an",
"or",
"from",
"no",
"ma",
"doing",
"through",
"all",
"most",
"theirs",
"than",
"are",
"to",
"while",
"shouldn't",
"that",
"so",
"and",
"only",
"until",
"ve",
"isn",
"should",
"her",
"yourself",
"have",
"over",
"because",
"you'd",
"be",
"more",
"a",
"himself",
"those",
"these",
"not",
"its",
"own",
"for",
"she",
"down",
"hers",
"you're",
"whom",
"after",
"this",
"at",
"do",
"ll",
"it's",
"up",
"couldn",
"with",
"itself",
"again",
"off",
"is",
"during",
"further",
"mustn",
"won",
"did",
"mightn't",
"needn't",
"should've",
"them",
"now",
"o",
"won't",
}
def __createDictionary(file_path, stopwords=english_stopwords):
with open(file_path, encoding="utf-8") as f:
# collect statistics about all tokens
readCSV = csv.reader(f, delimiter=",")
dictionary = corpora.Dictionary(line[1].lower().split() for line in readCSV)
# remove stop words and words that appear only once
stop_ids = [
dictionary.token2id[stopword]
for stopword in stopwords
if stopword in dictionary.token2id
]
once_ids = [tokenid for tokenid, docfreq in dictionary.dfs.items() if docfreq == 1]
dictionary.filter_tokens(
stop_ids + once_ids
) # remove stop words and words that appear only once
dictionary.compactify() # remove gaps in id sequence after words that were removed
return dictionary
def __sims2scores(sims, pos2id, topsims, eps=1e-7):
"""Convert raw similarity vector to a list of (docid, similarity) results."""
result = []
sims = abs(
sims
) # TODO or maybe clip? are opposite vectors "similar" or "dissimilar"?!
for pos in np.argsort(sims)[::-1]:
if pos in pos2id and sims[pos] > eps: # ignore deleted/rewritten documents
# convert positions of resulting docs back to ids
result.append((pos2id[pos], sims[pos]))
if len(result) == topsims:
break
return result
class CsvCorpus(object):
def __init__(self, dictionary, file_path):
self.dictionary = dictionary
self.file_path = file_path
self.id2pos = {} # map document id (string) to index position (integer)
self.pos2id = {} # map index position (integer) to document id (string)
def __iter__(self):
with open(self.file_path, encoding="utf-8") as csvfile:
readCSV = csv.reader(csvfile, delimiter=",")
for i, row in enumerate(readCSV):
if row[0] in self.id2pos:
logging.info(
"Document ID %s already in file - the last one is used only",
row[0],
)
self.id2pos[row[0]] = i
self.pos2id[i] = row[0]
yield self.dictionary.doc2bow(row[1].lower().split())
@app.route("/write-model-as-text-file", methods=["GET"])
def write_vectors_as_text_file():
"""
Writes all vectors of the model to a text file: one vector per line
Returns
-------
boolean
'True' as string if operation was successful, else 'False' (as string).
"""
model_path = request.headers.get("model_path")
vector_path = request.headers.get("vector_path")
file_to_write = request.headers.get("file_to_write")
entity_file = request.headers.get("entity_file")
vectors = get_vectors(model_path=model_path, vector_path=vector_path)
print("Writing the vectors as text file.")
with open(file_to_write, "w+") as f:
count = 0
if entity_file is None:
logging.info("Classic mode: Writing the full vocabulary.")
number_of_vectors_as_str = str(len(vectors.vocab))
logging.info("Processing " + number_of_vectors_as_str + " vectors...")
for concept in vectors.vocab:
if concept.strip() == "":
continue
line_to_write = ""
count += 1
vector = vectors.get_vector(concept)
line_to_write += concept + " "
for element in np.nditer(vector):
line_to_write += str(element) + " "
line_to_write += "\n"
f.write(line_to_write)
if count % 10000 == 0:
logging.info(
"Vectors processed: "
+ str(count)
+ " of "
+ number_of_vectors_as_str
)
else:
concepts = read_concept_file(entity_file)
number_of_vectors_as_str = str(len(concepts))
logging.info("Light mode: Writing subset to text vector file.")
logging.info("Processing " + number_of_vectors_as_str + " vectors...")
for concept in concepts:
count += 1
line_to_write = ""
if concept in vectors.vocab:
vector = vectors.get_vector(concept)
line_to_write += concept + " "
for element in np.nditer(vector):
line_to_write += str(element) + " "
else:
logging.info(
"WARN: The following concept has not been found in the vector space: "
+ concept
)
line_to_write += "\n"
f.write(line_to_write)
if count % 10000 == 0:
print(
"Vectors processed: "
+ str(count)
+ " of "
+ number_of_vectors_as_str
)
return "True"
def read_concept_file(path_to_concept_file):
result = []
with open(path_to_concept_file, errors="ignore") as concept_file:
for lemma in concept_file:
lemma = lemma.replace("\n", "").replace("\r", "")
result.append(lemma)
logging.info("Concept file read: " + str(path_to_concept_file))
return result
@app.route("/hello", methods=["GET"])
def hello_demo():
"""A demo program that will return Hello <name> when called.
Returns
-------
greeting : str
A simple greeting.
"""
name_to_greet = request.headers.get("name")
print(name_to_greet)
return "Hello " + str(name_to_greet) + "!"
if __name__ == "__main__":
# determine the port
try:
if len(sys.argv) == 2:
logging.info("Received argument: " + sys.argv[1])
int_port = int(sys.argv[1])
if int_port > 0:
port = int_port
else:
port = 1808
except Exception as e:
logging.info("Exception occurred. Using default port: 1808")
port = 1808
logging.error(e)
logging.info(f"Starting server using port {port}")
app.run(debug=False, port=port)
| {"hexsha": "a123178fcbbae70143c6ca1d867f644b951ca7c4", "size": 23132, "ext": "py", "lang": "Python", "max_stars_repo_path": "rdf2vec/src/main/resources/python_server.py", "max_stars_repo_name": "EDAO-Project/DBpediaEmbedding", "max_stars_repo_head_hexsha": "457b66beab5bbfc37b55cab7534ab66c7d00c7bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2020-03-25T16:23:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T16:06:07.000Z", "max_issues_repo_path": "rdf2vec/src/main/resources/python_server.py", "max_issues_repo_name": "EDAO-Project/DBpediaEmbedding", "max_issues_repo_head_hexsha": "457b66beab5bbfc37b55cab7534ab66c7d00c7bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 47, "max_issues_repo_issues_event_min_datetime": "2020-03-24T10:32:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T06:56:21.000Z", "max_forks_repo_path": "src/main/resources/python_server.py", "max_forks_repo_name": "vemonet/jRDF2Vec", "max_forks_repo_head_hexsha": "c32f8213524b1cafd6d7be587a6e9ee195d28dcd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-04-27T19:26:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-28T09:11:45.000Z", "avg_line_length": 30.1590612777, "max_line_length": 118, "alphanum_fraction": 0.5684333391, "include": true, "reason": "import numpy", "num_tokens": 5099} |
import os
import sys
sys.path.append(os.getcwd())
from model import ValueNetwork
from env import JointState
import torch
import numpy as np
def test_rotate():
vn = ValueNetwork(14, [150, 150, 100], kinematic=False)
state = JointState(2, 2, 0, 1, 0.3, 2, 4, 1, 0, 4, 2, 2, 0, 0.3)
state = torch.Tensor(state).expand(1, 14)
rotated_state = vn.rotate(state, torch.device('cpu')).squeeze().numpy()
assert np.allclose(rotated_state, [2, 1, 1, 0, 0.3, 0, 0, -2, 0, -2, 0.3, 0.6, 1, 0, 2], atol=1e-06)
vn = ValueNetwork(14, [150, 150, 100], kinematic=True)
state = JointState(2, 2, 0, 1, 0.3, 2, 4, 1, 0, 4, 2, 2, 0, 0.3)
state = torch.Tensor(state).expand(1, 14)
rotated_state = vn.rotate(state, torch.device('cpu')).squeeze().numpy()
assert np.allclose(rotated_state, [2, 1, 1, 0, 0.3, -np.pi/2, 0, -2, 0, -2, 0.3, 0.6, 0, -1, 2], atol=1e-06)
test_rotate() | {"hexsha": "d6d4bca7889a87f778231654c84e8dd55ca25e8a", "size": 896, "ext": "py", "lang": "Python", "max_stars_repo_path": "CADRL-master/test/test_model.py", "max_stars_repo_name": "NeuEIRG/Collision-Avoidance-with-DRL", "max_stars_repo_head_hexsha": "7b641410eba6fe7cba9ada29307c4a2a73e3d0d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2019-03-15T07:50:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T06:10:58.000Z", "max_issues_repo_path": "CADRL-master/test/test_model.py", "max_issues_repo_name": "yufengzhe1/Collision-Avoidance-with-DRL", "max_issues_repo_head_hexsha": "7b641410eba6fe7cba9ada29307c4a2a73e3d0d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-04-27T06:32:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-24T15:05:09.000Z", "max_forks_repo_path": "CADRL-master/test/test_model.py", "max_forks_repo_name": "NeuEIRG/Collision-Avoidance-with-DRL", "max_forks_repo_head_hexsha": "7b641410eba6fe7cba9ada29307c4a2a73e3d0d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-02-11T17:33:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-17T13:30:08.000Z", "avg_line_length": 37.3333333333, "max_line_length": 112, "alphanum_fraction": 0.6205357143, "include": true, "reason": "import numpy", "num_tokens": 382} |
(*
File: Anonymous_PAPP.thy
Author: Manuel Eberl, University of Innsbruck
*)
section \<open>Anonymous Party Approval Rules\<close>
theory Anonymous_PAPP
imports Complex_Main "Randomised_Social_Choice.Order_Predicates" PAPP_Multiset_Extras
begin
text \<open>
In this section we will define (anonymous) P-APP rules and some basic desirable properties
of P-APP rules.
\<close>
subsection \<open>Definition of the General Setting\<close>
text \<open>
The following locale encapsulates an anonymous \<^emph>\<open>party approval election\<close>; that is:
\<^item> a number of voters
\<^item> a set of parties
\<^item> the size of the desired committee
The number of parties and voters is assumed to be finite and non-zero.
As a modelling choice, we do not distinguish the voters at all; there is no explicit set
of voters. We only care about their number.
\<close>
locale anon_papp_election =
fixes n_voters :: nat and parties :: "'a set" and committee_size :: nat
assumes finite_parties [simp, intro]: "finite parties"
assumes n_voters_pos: "n_voters > 0"
assumes nonempty_parties [simp]: "parties \<noteq> {}"
begin
text \<open>
The result of a P-APP election is a committee, i.e.\ a multiset of parties with
the desired size.
\<close>
definition is_committee :: "'a multiset \<Rightarrow> bool" where
"is_committee W \<longleftrightarrow> set_mset W \<subseteq> parties \<and> size W = committee_size"
end
text \<open>
A \<^emph>\<open>preference profile\<close> for a P-APP collection consists of one approval list (i.e.\ a set of
approved parties) for each voter. Since we are in an anonymous setting, this means that
we have a \<^emph>\<open>multiset\<close> consisting of \<open>n\<close> sets of parties (where \<open>n\<close> is the number of voters).
Moreover, we make the usual assumption that the approval lists must be non-empty.
\<close>
locale anon_papp_profile = anon_papp_election +
fixes A :: "'a set multiset"
assumes A_subset: "\<And>X. X \<in># A \<Longrightarrow> X \<subseteq> parties"
assumes A_nonempty: "{} \<notin># A"
assumes size_A: "size A = n_voters"
begin
lemma A_nonempty': "A \<noteq> {#}"
using size_A n_voters_pos by auto
end
context anon_papp_election
begin
abbreviation
is_pref_profile where "is_pref_profile \<equiv> anon_papp_profile n_voters parties"
lemma is_pref_profile_iff:
"is_pref_profile A \<longleftrightarrow> set_mset A \<subseteq> Pow parties - {{}} \<and> size A = n_voters"
unfolding anon_papp_profile_def anon_papp_profile_axioms_def
using anon_papp_election_axioms by auto
lemma not_is_pref_profile_empty [simp]: "\<not>is_pref_profile {#}"
using anon_papp_profile.A_nonempty'[of n_voters]
by auto
text \<open>
The following relation is a key definition: it takes an approval list \<open>A\<close> and turns it
into a preference relation on committees. A committee is to be at least as good as another
if the number of approved parties in it is at least as big.
This relation is a reflexive, transitive, and total.
\<close>
definition committee_preference :: "'a set \<Rightarrow> 'a multiset relation" ("Comm") where
"W1 \<preceq>[Comm(A)] W2 \<longleftrightarrow> size {# x\<in>#W1. x \<in> A #} \<le> size {# x\<in>#W2. x \<in> A #}"
lemma not_strict_Comm [simp]: "\<not>(W1 \<prec>[Comm(A)] W2) \<longleftrightarrow> W1 \<succeq>[Comm(A)] W2"
by (auto simp: committee_preference_def strongly_preferred_def)
lemma not_weak_Comm [simp]: "\<not>(W1 \<preceq>[Comm(A)] W2) \<longleftrightarrow> W1 \<succ>[Comm(A)] W2"
by (auto simp: committee_preference_def strongly_preferred_def)
sublocale Comm: preorder "Comm(A)" "\<lambda>x y. x \<prec>[Comm(A)] y"
by standard (auto simp: committee_preference_def strongly_preferred_def)
lemma strong_committee_preference_iff:
"W1 \<prec>[Comm(A)] W2 \<longleftrightarrow> size {# x\<in>#W1. x \<in> A #} < size {# x\<in>#W2. x \<in> A #}"
by (auto simp: committee_preference_def strongly_preferred_def)
text \<open>
We also define the Pareto ordering on parties induced by a given preference profile:
One party is at least as good (in the Pareto relation) as another if all voters agree that
it is at least as good. That is, $y \succeq x$ in the Pareto ordering if all voters who
approve $x$ also approve $y$.
This relation is also reflexive and transitive.
\<close>
definition Pareto :: "'a set multiset \<Rightarrow> 'a relation" where
"x \<preceq>[Pareto(A)] y \<longleftrightarrow> x \<in> parties \<and> y \<in> parties \<and> (\<forall>X\<in>#A. x \<in> X \<longrightarrow> y \<in> X)"
sublocale Pareto: preorder_on parties "Pareto A"
by standard (auto simp: Pareto_def)
text \<open>
Pareto losers are parties that are (strictly) Pareto-dominated, i.e.\ there
exists some other party that all voters consider to be at least as good and at least
one voter considers it to be strictly better.
\<close>
definition pareto_losers :: "'a set multiset \<Rightarrow> 'a set" where
"pareto_losers A = {x. \<exists>y. y \<succ>[Pareto(A)] x}"
end
subsection \<open>P-APP rules and Desirable Properties\<close>
text \<open>
The following locale describes a P-APP rule. This is simply a function that maps every
preference profile to a committee of the desired size.
Note that in our setting, a P-APP rule has a fixed number of voters, a fixed set of parties,
and a fixed desired committee size.
\<close>
locale anon_papp = anon_papp_election +
fixes r :: "'a set multiset \<Rightarrow> 'a multiset"
assumes rule_wf: "is_pref_profile A \<Longrightarrow> is_committee (r A)"
subsection \<open>Efficiency\<close>
text \<open>
Efficiency is a common notion in Social Choice Theory. The idea is that if a party is
``obviously bad'', then it should not be chosen. What ``obviously bad'' means depends on the
precise notion of Efficiency that is used. We will talk about two notions: Weak Efficiency and
Pareto Efficiency.
A P-APP rule is \<^emph>\<open>weakly efficient\<close> if a party that is approved by no one is never
part of the output committee.
Note that approval lists must be non-empty, so there is always at least one party that
is approved by at least one voter.
\<close>
locale weakly_efficient_anon_papp = anon_papp +
assumes weakly_efficient: "is_pref_profile A \<Longrightarrow> \<forall>X\<in>#A. x \<notin> X \<Longrightarrow> x \<notin># r A"
text \<open>
A P-APP rule is \<^emph>\<open>Pareto-efficient\<close> if a Pareto-dominated party is never part of
the output committee.
\<close>
locale pareto_optimal_anon_papp = anon_papp +
assumes pareto_optimal: "is_pref_profile A \<Longrightarrow> x \<in> pareto_losers A \<Longrightarrow> x \<notin># r A"
begin
text \<open>
Pareto-efficiency implies weak efficiency:
\<close>
sublocale weakly_efficient_anon_papp
proof
fix A x
assume A: "is_pref_profile A" and x: "\<forall>X\<in>#A. x \<notin> X"
interpret anon_papp_profile n_voters parties committee_size A
by fact
have "A \<noteq> {#}"
using A_nonempty' .
then obtain X where X: "X \<in># A"
by auto
with A_nonempty have "X \<noteq> {}"
by auto
then obtain y where y: "y \<in> X"
by auto
show "x \<notin># r A"
proof (cases "x \<in> parties")
case False
thus ?thesis
using rule_wf[OF A] by (auto simp: is_committee_def)
next
case True
have "y \<succ>[Pareto(A)] x"
unfolding Pareto_def using X x y True A_subset[of X]
by (auto simp: strongly_preferred_def)
hence "x \<in> pareto_losers A"
by (auto simp: pareto_losers_def)
thus ?thesis
using pareto_optimal[OF A] by auto
qed
qed
end
subsection \<open>Strategyproofness\<close>
text \<open>
Strategyproofness is another common notion in Social Choice Theory that generally encapsulates
the notion that an voter should not be able to manipulate the outcome of an election in their
favour by (unilaterally) submitting fake preferences; i.e.\ reporting one's preferences
truthfully should always be the optimal choice.
A P-APP rule is called \<^emph>\<open>cardinality-strategyproof\<close> if an voter cannot obtain a better committee
(i.e.\ one that contains strictly more of their approved parties) by submitting an approval
list that is different from their real approval list.
\<close>
text \<open>
To make the definition simpler, we first define the notion of \<^emph>\<open>manipulability\<close>: in the context
of a particular P-APP rule \<open>r\<close>, a preference profile \<open>A\<close> is said to be manipulable by the voter
\<open>i\<close> with the fake preference list \<open>Y\<close> if \<open>r(A(i := Y))\<close> contains strictly more parties
approved by \<open>i\<close> than \<open>r(A)\<close>.
Since we have anonymous profiles and do not talk about particular voters, we replace \<open>i\<close> with
their approval list \<open>X\<close>. Since \<open>A\<close> is a multiset, the definition of manipulability
becomes $r(A-\{X\}+\{Y\}) \succ_{X} r(A)$.
\<close>
definition (in anon_papp) card_manipulable where
"card_manipulable A X Y \<longleftrightarrow>
is_pref_profile A \<and> X \<in># A \<and> Y \<noteq> {} \<and> Y \<subseteq> parties \<and> r (A - {#X#} + {#Y#}) \<succ>[Comm(X)] r A"
text \<open>
A technical (and fairly obvious) lemma: replacing an voter's approval list with a different
approval list again yields a valid preference profile.
\<close>
lemma (in anon_papp) is_pref_profile_replace:
assumes "is_pref_profile A" and "X \<in># A" and "Y \<noteq> {}" and "Y \<subseteq> parties"
shows "is_pref_profile (A - {#X#} + {#Y#})"
proof -
interpret anon_papp_profile n_voters parties committee_size A
by fact
show ?thesis
using assms A_subset A_nonempty unfolding is_pref_profile_iff
by (auto dest: in_diffD simp: size_Suc_Diff1)
qed
locale card_stratproof_anon_papp = anon_papp +
assumes not_manipulable: "\<not>card_manipulable A X Y"
begin
text \<open>
The two following alternative versions of non-manipulability are somewhat nicer to use
in practice.
\<close>
lemma not_manipulable':
assumes "is_pref_profile A" "is_pref_profile A'" "A + {#Y#} = A' + {#X#}"
shows "\<not>(r A' \<succ>[Comm(X)] r A)"
proof (cases "X = Y")
case True
thus ?thesis
using assms by (simp add: strongly_preferred_def)
next
case False
interpret A: anon_papp_profile n_voters parties committee_size A
by fact
interpret A': anon_papp_profile n_voters parties committee_size A'
by fact
from assms(3) False have *: "Y \<in># A'" "X \<in># A"
by (metis add_mset_add_single insert_noteq_member)+
have "\<not>card_manipulable A X Y"
by (intro not_manipulable)
hence "\<not>r (A - {#X#} + {#Y#}) \<succ>[Comm(X)] r A"
using assms * A.A_subset A'.A_subset A.A_nonempty A'.A_nonempty
by (auto simp: card_manipulable_def)
also have "A - {#X#} + {#Y#} = A'"
using assms(3) False by (metis add_eq_conv_diff add_mset_add_single)
finally show ?thesis .
qed
lemma not_manipulable'':
assumes "is_pref_profile A" "is_pref_profile A'" "A + {#Y#} = A' + {#X#}"
shows "r A' \<preceq>[Comm(X)] r A"
using not_manipulable'[OF assms] by simp
end
subsection \<open>Representation\<close>
text \<open>
\<^emph>\<open>Representation\<close> properties are in a sense the opposite of Efficiency properties: if a
sufficiently high voters agree that certain parties are good, then these should, to some
extent, be present in the result. For instance, if we have 20 voters and 5 of them approve
parties \<open>A\<close> and \<open>B\<close>, then if the output committee has size 4, we would expect either
\<open>A\<close> or \<open>B\<close> to be in the committee to ensure that these voters' preferences are represented fairly.
Weak representation is a particularly weak variant of this that states that if at least one $k$-th
of the voters (where $k$ is the size of the output committee) approve only a single
party \<open>x\<close>, then \<open>x\<close> should be in the committee at least once:
\<close>
locale weak_rep_anon_papp =
anon_papp n_voters parties committee_size r
for n_voters and parties :: "'alt set" and committee_size :: nat and r +
assumes weak_representation:
"is_pref_profile A \<Longrightarrow> committee_size * count A {x} \<ge> n_voters \<Longrightarrow> x \<in># r A"
text \<open>
The following alternative definition of Weak Representation is a bit closer to the definition
given in the paper.
\<close>
lemma weak_rep_anon_papp_altdef:
"weak_rep_anon_papp n_voters parties committee_size r \<longleftrightarrow>
anon_papp n_voters parties committee_size r \<and> (committee_size = 0 \<or>
(\<forall>A x. anon_papp_profile n_voters parties A \<longrightarrow>
count A {x} \<ge> n_voters / committee_size \<longrightarrow> x \<in># r A))"
by (cases "committee_size = 0")
(auto simp: field_simps weak_rep_anon_papp_def
weak_rep_anon_papp_axioms_def
anon_papp_def anon_papp_axioms_def anon_papp_election_def
simp flip: of_nat_mult)
text \<open>
\<^emph>\<open>Justified Representation\<close> is a stronger notion which demands that if there is a subgroup of
voters that comprises at least one $k$-th of all voters and for which the intersection of their
approval lists is some nonempty set \<open>X\<close>, then at least one of the parties approved by at least
one voter in that subgroup must be in the result committee.
\<close>
locale justified_rep_anon_papp =
anon_papp n_voters parties committee_size r
for n_voters and parties :: "'alt set" and committee_size :: nat and r +
assumes justified_representation:
"is_pref_profile A \<Longrightarrow> G \<subseteq># A \<Longrightarrow> committee_size * size G \<ge> n_voters \<Longrightarrow>
(\<Inter>X\<in>set_mset G. X) \<noteq> {} \<Longrightarrow> \<exists>X x. X \<in># G \<and> x \<in> X \<and> x \<in># r A"
begin
text \<open>
Any rule that satisfies Justified Representation also satisfies Weak Representation
\<close>
sublocale weak_rep_anon_papp
proof
fix A x
assume *: "is_pref_profile A" "n_voters \<le> committee_size * count A {x}"
define G where "G = replicate_mset (count A {x}) {x}"
have [simp]: "size G = count A {x}"
by (auto simp: G_def)
have **: "set_mset G \<subseteq> {{x}}"
by (auto simp: G_def)
have ***: "G \<subseteq># A"
unfolding G_def by (meson count_le_replicate_mset_subset_eq order_refl)
have "\<exists>X x. X \<in># G \<and> x \<in> X \<and> x \<in># r A"
by (rule justified_representation) (use * ** *** in auto)
thus "x \<in># r A"
using ** by auto
qed
end
locale card_stratproof_weak_rep_anon_papp =
card_stratproof_anon_papp + weak_rep_anon_papp
subsection \<open>Proportional Representation\<close>
text \<open>
The notions of Representation we have seen so far are fairly week in that they only demand
that certain parties be in the committee at least once if enough voters approve them. Notions of
Proportional Representation strengthen this by demanding that if a sufficiently large subgroup of
voters approve some parties, then these voters must be represented in the result committe not
just once, but to a degree proportional to the size of that subgroup of voters.
For Weak Representation, the proportional generalization is fairly simple: if a fraction of
at least $\frac{ln}{k}$ of the voters uniquely approve a party \<open>x\<close>, then \<open>x\<close> must be in the
committee at least \<open>l\<close> times.
\<close>
locale weak_prop_rep_anon_papp =
anon_papp n_voters parties committee_size r
for n_voters and parties :: "'alt set" and committee_size :: nat and r +
assumes weak_proportional_representation:
"is_pref_profile A \<Longrightarrow> committee_size * count A {x} \<ge> l * n_voters \<Longrightarrow> count (r A) x \<ge> l"
begin
sublocale weak_rep_anon_papp
proof
fix A x
assume "is_pref_profile A" "n_voters \<le> committee_size * count A {x}"
thus "x \<in># r A"
using weak_proportional_representation[of A 1] by auto
qed
end
text \<open>
Similarly, Justified \<^emph>\<open>Proportional\<close> Representation demands that if the approval lists of
a subgroup of at least $\frac{ln}{k}$ voters have a non-empty intersection, then at least \<open>l\<close>
parties in the result committee are each approved by at least one of the voters in the subgroup.
\<close>
locale justified_prop_rep_anon_papp =
anon_papp n_voters parties committee_size r
for n_voters and parties :: "'alt set" and committee_size :: nat and r +
assumes justified_proportional_representation:
"is_pref_profile A \<Longrightarrow> G \<subseteq># A \<Longrightarrow> committee_size * size G \<ge> l * n_voters \<Longrightarrow>
(\<Inter>X\<in>set_mset G. X) \<noteq> {} \<Longrightarrow> size {# x \<in># r A. x \<in> (\<Union>X\<in>set_mset G. X) #} \<ge> l"
begin
sublocale justified_rep_anon_papp
proof
fix A G
assume "is_pref_profile A" "G \<subseteq># A" "n_voters \<le> committee_size * size G"
"(\<Inter>X\<in>set_mset G. X) \<noteq> {}"
hence "size {#x \<in># r A. \<exists>X\<in>#G. x \<in> X#} \<ge> 1"
using justified_proportional_representation[of A G 1] by auto
hence "{#x \<in># r A. \<exists>X\<in>#G. x \<in> X#} \<noteq> {#}"
by auto
thus "\<exists>X x. X \<in># G \<and> x \<in> X \<and> x \<in># r A"
by fastforce
qed
sublocale weak_prop_rep_anon_papp
proof
fix A l x
assume *: "is_pref_profile A" "l * n_voters \<le> committee_size * count A {x}"
define G where "G = replicate_mset (count A {x}) {x}"
from * have "size {#x \<in># r A. x \<in> (\<Union>X\<in>set_mset G. X)#} \<ge> l"
by (intro justified_proportional_representation)
(auto simp: G_def simp flip: count_le_replicate_mset_subset_eq)
also have "size {#x \<in># r A. x \<in> (\<Union>X\<in>set_mset G. X)#} \<le> count (r A) x"
by (auto simp: G_def)
finally show "count (r A) x \<ge> l" .
qed
end
locale card_stratproof_weak_prop_rep_anon_papp =
card_stratproof_anon_papp + weak_prop_rep_anon_papp
end
| {"author": "isabelle-prover", "repo": "mirror-afp-devel", "sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1", "save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel", "path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/PAPP_Impossibility/Anonymous_PAPP.thy"} |
"""
parameters lat/long bounding box (top/left bottom/right) and base
convert to row/col limits - get decimal lat long min /max multiple y 3 and round to integer
use numpy to read in the hgt files
extract the subarrays required, merge into a single array, add base, flip and output to stdout
"""
import sys, math, os
from numpy import *
def load_hgt (fn) :
path = "srtm90/"+fn
siz = os.path.getsize(path)
dim = int(math.sqrt(siz/2))
# assert dim*dim*2 == siz, 'Invalid file size'
return fromfile(path, dtype('>i2'), dim*dim).reshape((dim, dim))
nm = 1850 #meters in a nautical mile
resolution= 3 # 3 second resolution
samples = 3600 / resolution
rscale = nm * 60 * resolution / 3600
lat_tl = float(sys.argv[1])
long_tl = float(sys.argv[2])
lat_br = float(sys.argv[3])
long_br= float(sys.argv[4])
base= float(sys.argv[5])
lat_min = min(lat_tl,lat_br)
lat_max = max(lat_tl,lat_br)
long_min = min(long_tl,long_br)
long_max = max(long_tl,long_br)
lat_mid = (lat_min + lat_max)/2
lat_min_d = int(math.floor(lat_min))
lat_max_d = int(math.ceil(lat_max))-1
long_min_d = int(math.floor(long_min))
long_max_d = int(math.ceil(long_max))-1
# print (lat_min_d, long_min_d, lat_max_d, long_max_d)
for latd in range(lat_min_d,lat_max_d + 1) :
if latd > 0 :
latDir = "N"
else :
latDir = "S"
for longd in range(long_min_d,long_max_d + 1) :
if longd > 0 :
longDir = "E"
else :
longDir = "W"
tile = (latDir +"%02u" + longDir + "%03u.hgt") % (abs(latd),abs(longd))
# print ("// " + tile)
h=load_hgt(tile)
tile_lat_min = samples - int(min (1.0, lat_tl - latd) * samples)
tile_lat_max= samples - int(max (0.0 , lat_br - latd) * samples)
tile_long_max = int(min (1.0, long_br - longd) * samples)
tile_long_min = int(max (0.0 , long_tl - longd) * samples)
# print(tile_lat_min, tile_lat_max, tile_long_min, tile_long_max)
he= h[tile_lat_min: tile_lat_max,tile_long_min:tile_long_max]
if (longd ==long_min_d) :
strip = he
else :
strip = hstack([strip,he])
if (latd == lat_min_d) :
surface = strip
else :
surface = vstack([strip,surface])
surface = surface + base
surface = flipud(surface)
# print surface.shape
savetxt(sys.stdout,surface,"%d")
| {"hexsha": "d4b8bac27acac3900472c658dc41ad4415511661", "size": 2351, "ext": "py", "lang": "Python", "max_stars_repo_path": "extract_strm90.py", "max_stars_repo_name": "KitWallace/terrain", "max_stars_repo_head_hexsha": "c9a1a7e11fbcc470e659c4b2c5b46d3e4e2c93cc", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "extract_strm90.py", "max_issues_repo_name": "KitWallace/terrain", "max_issues_repo_head_hexsha": "c9a1a7e11fbcc470e659c4b2c5b46d3e4e2c93cc", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2015-10-12T04:28:38.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-19T18:01:17.000Z", "max_forks_repo_path": "extract_strm90.py", "max_forks_repo_name": "KitWallace/terrain", "max_forks_repo_head_hexsha": "c9a1a7e11fbcc470e659c4b2c5b46d3e4e2c93cc", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5324675325, "max_line_length": 96, "alphanum_fraction": 0.6410038282, "include": true, "reason": "from numpy", "num_tokens": 719} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.