text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
'''
SMART HMI magnetgram .fits processing code
=========================================
Written by Sophie A. Murray, code originally developed by Paul Higgins (ar_processmag.pro).
Developed under Python 3 and Sunpy 0.8.3
- Python 3.6.1 |Anaconda custom (x86_64)| (default, May 11 2017, 13:04:09)
Inputs:
- inmap: processed magnetogram
- cosmicthresh: a hard threshold for detecting cosmic rays
- medfiltwidth: the width of a box to use to perform median filter on magnetogram
Optional keywords:
- medianfilter: if TRUE, 3x3 median filter noisy values (default FALSE in original code)
Steps:
- Cosmic ray removal
- Non-finite removal
- Zero off-limb pixels
- Rotate solar north = up
- Cosine correction
- Median filter
Notes:
- sunpy.wcs has been deprecated and needs to be replaced
- there has to be a better way for median filtering a.l.a filter_image.pro
'''
import numpy as np
import scipy
from scipy import interpolate
#import sunpy.wcs.wcs as wcs
from configparser import ConfigParser
import sunpy.map
import astropy.units as u
from astropy.convolution import convolve
def main(inmap, medianfilter):
"""Load input paramters, remove cosmic rays and NaNs,
then make all off-limb pixels zero, and clean up limb,
rotate map and do a cosine correction.
"""
## Load configuration file
config = ConfigParser()
config.read("config.ini")
## Rotate
inmap = inmap.rotate(angle=int(inmap.meta['crota2'])*u.deg)
inmap = inmap.resample(u.Quantity([1024, 1024], u.pixel))
inmap.meta['crota2'] = 0.
# I commented out above as it was just adding way too much limb noise, so instead doing a hacky way
if (inmap.meta['crota2'] >= 100.):
data = np.flip(inmap.data, 1)[::-1]
inmap = sunpy.map.Map(data, inmap.meta)
inmap.meta['crota2'] = 0.
# Already floats in numpy data array so skipped first line converting double to float
## Cosmic ray removal
data = cosmicthresh_remove(inmap.data, np.float(config.get('processing', 'cosmicthresh')))
## Clean NaNs
# Higgo used bilinear interpoaltion
data = remove_nans(data)
## Zero off-limb pixels
# Clean edge - make all pixels off limb equal to 0. Has been commented out as done during nan removal above!
# data = edge_remove(data)
## Create cosine map
inmap = sunpy.map.Map(data, inmap.meta)
cosmap, rrdeg, limbmask = ar_cosmap(inmap)
## Fix remaining limb issues
data, limbmask = fix_limb(inmap.data, rrdeg, limbmask)
## Median filter noisy values
# Higgo uses an IDL rotine called filter_image so need to get python version (not urgent as not used default)
if medianfilter is True:
data = median_filter(data, np.float(config.get('processing', 'medfiltwidth')))
## Magnetic field cosine correction
inmap = sunpy.map.Map(data, inmap.meta)
data, cosmap = cosine_correction(inmap, cosmap)
return inmap, cosmap, limbmask
def cosmicthresh_remove(data, cosmicthresh):
"""
Search for cosmic rays using hard threshold defined in config file.
Remove if greater than 3-sigma detection than neighbouring pixels.
"""
wcosmic = np.where(data>cosmicthresh)
ncosmic = len(wcosmic[0])
print('Cosmic Ray Candidates Found: ', ncosmic)
if (ncosmic == 0.):
return data
else:
wcx = wcosmic[0]
wcy = wcosmic[1]
neighbours = np.int_([-1, 0, 1, 1, 1, 0, -1, -1])
for i in range(0, ncosmic):
wcx_neighbours = wcx[i] + neighbours
wcy_neighbours = wcy[i] + neighbours
wc_logic = (3*np.std(data[wcx_neighbours, wcy_neighbours]))+np.mean(data[wcx_neighbours, wcy_neighbours])
if (data[wcx[i], wcy[i]] >= wc_logic):
data[wcx[i], wcy[i]] = np.mean(data[wcx_neighbours, wcy_neighbours])
return data
def remove_nans(array):
"""
Clean NaNs.
Includes zero-values off-limb as 'missing'.
"""
x = np.arange(0, array.shape[1])
y = np.arange(0, array.shape[0])
## Mask invalid values
array = np.ma.masked_invalid(array)
xx, yy = np.meshgrid(x, y)
## Get only the valid values
x1 = xx[~array.mask]
y1 = yy[~array.mask]
newarr = array[~array.mask]
GD1 = interpolate.griddata((x1, y1), newarr.ravel(),
(xx, yy),
method='cubic', fill_value=0.)
return GD1
def edge_remove(data):
"""
Get rid of crazy arbitrary values at the edge of the image.
Basically set everything beyond the limb equal to zero.
"""
edgepix = data[0, 0]
wblankpx = np.where(data == edgepix)
data[wblankpx] = 0.
return data
def ar_cosmap(inmap):
"""
Get the cosine map and off-limb pixel map using WCS.
Generate a map of the solar disk that is 1 at disk center and goes radially outward as the cos(angle to LOS), which
is = 2 at 60 degrees from LOS.
Other outputs:
- rrdeg: gives degrees from disk center
- offlimb: map of 1=on-disk and 0=off-disk
"""
## Take off an extra half percent from the disk to get rid of limb effects
fudge=0.999
#
## Get helioprojective_coordinates
# Below is deprecated so commented out and updated
# xx, yy = wcs.convert_pixel_to_data(inmap.data.shape,
# [inmap.meta["CDELT1"], inmap.meta["CDELT2"]],
# [inmap.meta["CRPIX1"], inmap.meta["CRPIX2"]],
# [inmap.meta["CRVAL1"], inmap.meta["CRVAL2"]])
x, y = (np.meshgrid(*[np.arange(v.value) for v in inmap.dimensions]) * u.pixel)
hpc = inmap.pixel_to_world(x, y)#NEED TO CHECK RE WHAT ORIGIN TO USE, origin=1)
xx = hpc.Tx.value
yy = hpc.Ty.value
rr = ((xx**2.) + (yy**2.))**(0.5)
#
coscor = np.copy(rr)
rrdeg = np.arcsin(coscor / inmap.meta["RSUN_OBS"])
coscor = 1. / np.cos(rrdeg)
wgt = np.where(rr > (inmap.meta["RSUN_OBS"]*fudge))
coscor[wgt] = 1.
#
offlimb = np.copy(rr)
wgtrr = np.where(rr >= (inmap.meta["RSUN_OBS"]*fudge))
offlimb[wgtrr] = 0.
wltrr = np.where(rr < (inmap.meta["RSUN_OBS"]*fudge))
offlimb[wltrr] = 1.
#
return coscor, rrdeg, offlimb
def fix_limb(data, rrdeg, limbmask):
"""
Zero off-limb pixels (zero from 90 degrees to LOS).
(Note originally in IDL code it was 80 but I felt that it was cutting too much off)
This is making the edge a bit smaller.
"""
maxlimb = 90.
wofflimb = np.where(((rrdeg/(2.*np.pi))*360.) > maxlimb)
data[wofflimb] = 0.
limbmask[wofflimb] = 0.
return data*limbmask, limbmask
def median_filter(data, medfiltwidth):
"""
Median filter noisy values. See here for inspiration:
http://docs.sunpy.org/en/stable/generated/gallery/image_bright_regions_gallery_example.html
"""
kernel = np.ones((np.int(medfiltwidth), np.int(medfiltwidth))) #medfiltwidth should be three
return convolve(data, kernel)
# return scipy.ndimage.gaussian_filter(data, medfiltwidth)
def cosine_correction(inmap, cosmap):
"""
Do magnetic field cosine correction.
Limit correction to having 1 pixel at edge of the Sun. This is the maximum factor of pixel area
covered by a single pixel at the solar limb as compared with at disk centre.
"""
thetalim = np.arcsin(1. - inmap.meta["CDELT1"] / inmap.meta["RSUN_OBS"])
coscorlim = 1. / np.cos(thetalim)
cosmaplim = np.where((cosmap) > coscorlim)
cosmap[cosmaplim] = coscorlim
return inmap.data*cosmap, cosmap
def myround(x, base=5):
"""
Round a number to nearest '5'.
"""
return int(base * round(float(x)/base))
if __name__ == '__main__':
main()
|
{"hexsha": "9f82f29a90650d93a74696a90ee59a8a7b7242ef", "size": 7774, "ext": "py", "lang": "Python", "max_stars_repo_path": "process_magnetogram.py", "max_stars_repo_name": "mo-sb/smart_python", "max_stars_repo_head_hexsha": "e754c4e382e81c9658d9d5accea39b64c5e95c17", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-11T09:59:10.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-11T09:59:10.000Z", "max_issues_repo_path": "process_magnetogram.py", "max_issues_repo_name": "mo-sb/smart_python", "max_issues_repo_head_hexsha": "e754c4e382e81c9658d9d5accea39b64c5e95c17", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-06-20T12:41:41.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-28T09:03:31.000Z", "max_forks_repo_path": "process_magnetogram.py", "max_forks_repo_name": "mo-sb/smart_python", "max_forks_repo_head_hexsha": "e754c4e382e81c9658d9d5accea39b64c5e95c17", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-09-14T13:39:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-26T10:03:30.000Z", "avg_line_length": 37.1961722488, "max_line_length": 119, "alphanum_fraction": 0.6416259326, "include": true, "reason": "import numpy,import scipy,from scipy,import astropy,from astropy", "num_tokens": 2181}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The program provides neural networks for recommendation
or modification of cryptographically random numbers.
"""
from argparse import ArgumentParser
from os import path
import matplotlib.pyplot as plt
import numpy as np
from models import Limiter, Modifier, Recommender
from test_suite import test_number
__version__ = '1.0'
__author__ = 'Semyon Makhaev'
__email__ = 'semenmakhaev@yandex.ru'
MODELS = {
'limiter': Limiter,
'modifier': Modifier,
'recommender': Recommender
}
def parse_args():
"""Command-line arguments parsing."""
parser = ArgumentParser(prog='main.py',
description='Program providing neural networks for recommendation \
or modification of cryptographically random numbers',
epilog='Semyon Makhaev, 2019.')
parser.add_argument('model_type',
type=str,
choices=['limiter', 'modifier', 'recommender'],
help='Model filename')
parser.add_argument('--model_filename', '-m', type=str, help='Model filename')
parser.add_argument('--plot_filename', '-p', type=str, help='Plot filename')
parser.add_argument('-t', '--tests_path', type=str, help='NIST statistical test suite path')
parser.add_argument('--summary', '-s', action='store_true', help='Print model summary')
parser.add_argument('--fit', '-f', action='store_true', help='Fit model')
return parser.parse_args()
def initialize_model(model_type, model_filename):
"""Model initialization."""
if not model_type or model_type not in MODELS:
raise NameError(f'Model {model_type} does not exist')
model = MODELS[model_type]()
if model_filename and path.exists(model_filename):
model.load(model_filename)
else:
model.create()
model.compile()
return model
def postprocess_predictions(predictions):
"""Splits data to binary numbers."""
result = []
for prediction in predictions:
bits = [0 if x < 0.5 else 1 for x in prediction]
bits_str = ''.join([str(x) for x in bits])
number = int(f'0b{bits_str}', 2)
result.append(number)
return result
def evaluate_prediction(numbers, test_suite_path):
"""Run NIST tests to determine randomness of predicted numbers."""
results = [test_number(number, test_suite_path) for number in numbers]
success_measure = np.mean(results)
print(f'Evaluation: {success_measure * 100}%')
plt.plot(results)
plt.show()
def main():
"""Running tools for model creating, training and evaluating."""
args = parse_args()
model = initialize_model(args.model_type, args.model_filename)
dataset = model.get_dataset()
if args.plot_filename:
model.plot(args.plot_filename)
if args.fit:
model.fit(dataset)
model.save(args.model_filename)
if args.summary:
model.summary()
_, x_test = dataset
predictions = model.predict(x_test)
if args.tests_path and model.is_evaluable:
numbers = postprocess_predictions(predictions)
evaluate_prediction(numbers, args.tests_path)
else:
print(f'Predictions: {predictions}')
if __name__ == '__main__':
main()
|
{"hexsha": "7363dffc6b8afda0ce048cdcbc0ab154228352fb", "size": 3330, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "SemyonMakhaev/personal-auth", "max_stars_repo_head_hexsha": "32fe00db7c6acc16e49b20178c08a6d2364307d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "SemyonMakhaev/personal-auth", "max_issues_repo_head_hexsha": "32fe00db7c6acc16e49b20178c08a6d2364307d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-11-13T18:28:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:23:02.000Z", "max_forks_repo_path": "main.py", "max_forks_repo_name": "SemyonMakhaev/personal-auth", "max_forks_repo_head_hexsha": "32fe00db7c6acc16e49b20178c08a6d2364307d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2950819672, "max_line_length": 96, "alphanum_fraction": 0.6573573574, "include": true, "reason": "import numpy", "num_tokens": 716}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 30 08:42:40 2020
@author: ibarlow
Script to fill the 96WPs with 3 doses of each drug from the Prestwick C elegans
library that contains 240 drugs
"""
import pandas as pd
from pathlib import Path
import numpy as np
import itertools
import math
import warnings
PRESTWICK_LIBRARY = Path('/Users/ibarlow/OneDrive - Imperial College London/'+\
'Documents/DrugScreening/DrugLibraries/' +\
'Prestwick_CelegansLibrary/'+\
'Celegans_Library_240_IBedits.csv')
SAVE_TO = PRESTWICK_LIBRARY.parent / '2020PrestwickLibraryPlates3doses.csv'
CONTROL_DICT = {'DMSO': 5,
'NoCompound': 4}
NO_CONTROLS = CONTROL_DICT['DMSO'] + CONTROL_DICT['NoCompound']
MIN_VOLUME_REQUIRED_ul = 20
STOCK_CONCENTRATIONS_M = [0.1, 0.01, 0.001]
MAX_NUMBER_CONCENTRATIONS = 3
# 96well plate format
COLUMNS96WP = np.arange(1, 13)
ROWS96WP = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
WELLS96WP = [''.join([i[0], str(i[1]).zfill(2)]) for i in
list(itertools.product(ROWS96WP, COLUMNS96WP))]
CONTROL_WELLS = WELLS96WP[-NO_CONTROLS:]
if __name__ == '__main__':
prestwick_drugs = pd.read_csv(PRESTWICK_LIBRARY)
prestwick_drugs['number_concentrations'] = MAX_NUMBER_CONCENTRATIONS
prestwick_drugs['maximum_concentration_M'] = STOCK_CONCENTRATIONS_M[0]
prestwick_drugs['number_replicates'] = 4
prestwick_drugs['vol_DMSO_to_add_for_max_concentration_ul'] =[
((r.mass_supplied_g/r.mol_weight_structure)*1000/r.maximum_concentration_M)*1000
for i,r in prestwick_drugs.iterrows()]
#%%
number_conditions = sum(prestwick_drugs['number_concentrations'])
number_plates = math.ceil(number_conditions /
(len(WELLS96WP) - NO_CONTROLS))
libraryDF = pd.DataFrame(columns=['drug_type',
'drug_code',
'drug_concentration'
])
libraryDF['library_plate_number'] = sum([len(WELLS96WP)*[p] for p in
range(1, number_plates+1)], [])
libraryDF['well_name'] = WELLS96WP * number_plates
# loop through the drugs and assign to the wells
well_counter = 0
for i, r in prestwick_drugs.iterrows():
if libraryDF.loc[well_counter].well_name >= CONTROL_WELLS[0]:
libraryDF.loc[well_counter:well_counter + CONTROL_DICT['DMSO']-1,
['drug_type',
'drug_code',
'drug_concentration']] = 'DMSO', 'DMSO', 0.001
well_counter += CONTROL_DICT['DMSO']
libraryDF.loc[well_counter:well_counter + CONTROL_DICT['NoCompound']-1,
['drug_type',
'drug_code',
'drug_concentration']] = 'NoCompound', 'NoCompound', 0
well_counter += CONTROL_DICT['NoCompound']
libraryDF.loc[well_counter:well_counter+r.number_concentrations-1,
['drug_type',
'drug_code',
'drug_concentration']] = r.chemical_name,r.Compound_Identifying_Number, STOCK_CONCENTRATIONS_M
well_counter += r.number_concentrations
else:
libraryDF.loc[well_counter:well_counter+r.number_concentrations-1,
['drug_type',
'drug_code',
'drug_concentration']] = r.chemical_name, r.Compound_Identifying_Number, STOCK_CONCENTRATIONS_M
well_counter += r.number_concentrations
library_export = libraryDF.merge(prestwick_drugs[['Compound_Identifying_Number',
'chemical_name',
'mol_weight_structure',
'mass_supplied_g',
'maximum_concentration_M',
'vol_DMSO_to_add_for_max_concentration_ul'
]],
left_on='drug_code',
right_on='Compound_Identifying_Number',
how='outer')
library_export.drop(columns=['Compound_Identifying_Number',
'chemical_name'],
inplace=True)
library_export.sort_values(by=['library_plate_number',
'well_name'],
inplace=True)
if SAVE_TO.exists():
warnings.warn('Sygenta 3 dose .csv file already exists')
else:
library_export.to_csv(SAVE_TO, index=False)
|
{"hexsha": "57dfcd1d4d8eadc8c866a5950a4ce906ccb9334b", "size": 4937, "ext": "py", "lang": "Python", "max_stars_repo_path": "druglibrary/Prestwick_library_plates.py", "max_stars_repo_name": "ilbarlow/PrestwickScreen", "max_stars_repo_head_hexsha": "b1c7f045aba600746a8de133d25582135f789d75", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "druglibrary/Prestwick_library_plates.py", "max_issues_repo_name": "ilbarlow/PrestwickScreen", "max_issues_repo_head_hexsha": "b1c7f045aba600746a8de133d25582135f789d75", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "druglibrary/Prestwick_library_plates.py", "max_forks_repo_name": "ilbarlow/PrestwickScreen", "max_forks_repo_head_hexsha": "b1c7f045aba600746a8de133d25582135f789d75", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.814516129, "max_line_length": 122, "alphanum_fraction": 0.5519546283, "include": true, "reason": "import numpy", "num_tokens": 1098}
|
#!/usr/bin/env python3
import os, json, argparse
from threading import Thread
from queue import Queue
import numpy as np
from scipy.misc import imread, imresize
import h5py
from random import shuffle
import sys
"""
Create an HDF5 file of video frames, optical flow and certainty masks for training a feedforward video style transfer model.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', default='')
parser.add_argument('--output_file', default='video-364.h5')
parser.add_argument('--height', type=int, default=256)
parser.add_argument('--width', type=int, default=384)
parser.add_argument('--max_images', type=int, default=-1)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--include_val', type=int, default=1)
parser.add_argument('--max_resize', default=16, type=int)
parser.add_argument('--sequence_length', default=2, type=int)
args = parser.parse_args()
def read_flow(filename):
if len(filename) > 1:
with open(filename, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
else:
w = np.fromfile(f, np.int32, count=1)[0]
h = np.fromfile(f, np.int32, count=1)[0]
# print 'Reading %d x %d flo file' % (w, h)
data = np.fromfile(f, np.float32, count=2*w*h)
# Reshape data into 3D array (columns, rows, bands)
data2D = np.resize(data, (h, w, 2))
return data2D
def add_data(h5_file, image_dir, prefix, args):
# Make a list of all images in the source directory
image_list = []
image_extensions = {'.jpg', '.jpeg', '.JPG', '.JPEG', '.png', '.PNG'},
for item in os.listdir(image_dir):
full_path = os.path.join(image_dir, item)
sub_folder = os.path.join(full_path, "flow")
if os.path.exists(sub_folder):
for filename in os.listdir(sub_folder):
if filename.endswith(".flo"):
frames = (os.path.splitext(filename)[0]).split('_')
# Test if start of sequence
if frames[0] == "s":
if int(frames[1]) < int(frames[2]):
image_list.append( ( full_path, int(frames[1]) ) )
num_images = len(image_list)
print("Found %d images" % num_images)
shuffle(image_list)
# Resize all images and copy them into the hdf5 file
# We'll bravely try multithreading
dset_imgs1_name = os.path.join(prefix, 'frames1')
dset_imgs2_name = os.path.join(prefix, 'frames2')
dset_flow_name = os.path.join(prefix, 'flow')
dset_cert_name = os.path.join(prefix, 'cert')
dset_size3 = (num_images, args.sequence_length, 3, args.height, args.width)
dset_size2 = (num_images, args.sequence_length-1, 2, args.height, args.width)
dset_size1 = (num_images, args.sequence_length-1, args.height, args.width)
imgs_dset = h5_file.create_dataset(dset_imgs1_name, dset_size3, np.uint8)
flow_dset = h5_file.create_dataset(dset_flow_name, dset_size2, np.float32)
cert_dset = h5_file.create_dataset(dset_cert_name, dset_size1, np.uint8)
# input_queue stores (idx, filename) tuples,
# output_queue stores (idx, resized_img) tuples
input_queue = Queue()
output_queue = Queue()
# Read workers pull images off disk and resize them
def read_worker():
while True:
imgs = []
flows = []
certs = []
idx, frame_paths, flow_paths, cert_paths = input_queue.get()
for frame_path in frame_paths:
imgs.append(imread(frame_path))
for flow_path in flow_paths:
flows.append(read_flow(flow_path))
for cert_path in cert_paths:
certs.append(imread(cert_path))
input_queue.task_done()
output_queue.put((idx, imgs, flows, certs))
# Write workers write resized images to the hdf5 file
def write_worker():
num_written = 0
while True:
idx, imgs, flows, certs = output_queue.get()
# RGB image, transpose from H x W x C to C x H x W
if imgs[0].ndim == 3:
for i, img in enumerate(imgs):
imgs_dset[idx,i] = img.transpose(2, 0, 1)
elif imgs[0].ndim == 2:
# Grayscale image; it is H x W so broadcasting to C x H x W will just copy
# grayscale values into all channels.
for i, img in enumerate(imgs):
imgs_dset[idx,i] = img
for i, flow in enumerate(flows):
flow_dset[idx,i] = flow.transpose(2, 0, 1)
for i, cert in enumerate(certs):
cert_dset[idx,i] = cert.transpose(0, 1)
output_queue.task_done()
num_written = num_written + 1
if num_written % 100 == 0:
print('Copied %d / %d image sequences' % (num_written, num_images))
# Start the read workers.
for i in range(args.num_workers):
t = Thread(target=read_worker)
t.daemon = True
t.start()
# h5py locks internally, so we can only use a single write worker =(
t = Thread(target=write_worker)
t.daemon = True
t.start()
for idx, tuple in enumerate(image_list):
filesTuple = []
certsTuple = []
flowsTuple = []
for i in range(0, args.sequence_length):
filesTuple.append(os.path.join(tuple[0], "frame_{:04d}.png".format(tuple[1]+i)))
flowsTuple.append(os.path.join(tuple[0], "flow/s_{:04d}_{:04d}.flo".format(tuple[1]+1, tuple[1])))
for i in range(1, args.sequence_length-1):
flowsTuple.append(os.path.join(tuple[0], "flow/{:04d}_{:04d}.flo".format(tuple[1]+i+1, tuple[1]+i)))
for i in range(0, args.sequence_length-1):
certsTuple.append(os.path.join(tuple[0], "flow/reliable_{:04d}_{:04d}.pgm".format(tuple[1]+i+1, tuple[1]+i)))
if args.max_images > 0 and idx >= args.max_images: break
input_queue.put((idx, filesTuple, flowsTuple, certsTuple))
input_queue.join()
output_queue.join()
if __name__ == '__main__':
with h5py.File(args.output_file, 'w') as f:
add_data(f, args.input_dir, 'train', args)
if args.include_val != 0:
add_data(f, args.input_dir, 'val', args)
|
{"hexsha": "2b8d40bf8da855b6a754ae35ddd72922f647ac72", "size": 5945, "ext": "py", "lang": "Python", "max_stars_repo_path": "video_dataset/make_video_dataset.py", "max_stars_repo_name": "deform-lab/fast-artistic-videos", "max_stars_repo_head_hexsha": "47ed2a9934c6d91a6d000c050ac3f327897a972f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 341, "max_stars_repo_stars_event_min_datetime": "2018-02-10T23:49:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T16:53:00.000Z", "max_issues_repo_path": "video_dataset/make_video_dataset.py", "max_issues_repo_name": "deform-lab/fast-artistic-videos", "max_issues_repo_head_hexsha": "47ed2a9934c6d91a6d000c050ac3f327897a972f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 32, "max_issues_repo_issues_event_min_datetime": "2018-02-13T23:00:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-22T04:19:22.000Z", "max_forks_repo_path": "video_dataset/make_video_dataset.py", "max_forks_repo_name": "deform-lab/fast-artistic-videos", "max_forks_repo_head_hexsha": "47ed2a9934c6d91a6d000c050ac3f327897a972f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 49, "max_forks_repo_forks_event_min_datetime": "2018-02-11T20:23:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T12:26:58.000Z", "avg_line_length": 36.6975308642, "max_line_length": 124, "alphanum_fraction": 0.6613961312, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1634}
|
import os
import cv2
import numpy as np
import pandas as pd
from torchvision.transforms import transforms
from torch.utils.data import Dataset
from datasets.base_dataset import BaseDataset
from utils.augmenters.augment import seg
EMOTION_DICT = {
0: "angry",
1: "disgust",
2: "fear",
3: "happy",
4: "sad",
5: "surprise",
6: "neutral",
}
class FER2013Dataset(BaseDataset):
"""
Input params:
stage: The stage of training.
configuration: Configuration dictionary.
"""
def __init__(self, configuration):
super().__init__(configuration)
self._stage = configuration["stage"]
self.affine = configuration["affine"]
self._image_size = tuple(configuration["input_size"])
self._data = pd.read_csv(os.path.join(configuration["dataset_path"], "{}.csv".format(self._stage)))
# self._data = self._data[self._data.emotion != 3]
self._pixels = self._data["pixels"].tolist()
self._emotions = pd.get_dummies(self._data["emotion"])
self._transform = transforms.Compose(
[
transforms.ToPILImage(),
transforms.ToTensor(),
]
)
def __getitem__(self, index):
pixels = self._pixels[index]
pixels = list(map(int, pixels.split(" ")))
image = np.asarray(pixels).reshape(48, 48)
image = image.astype(np.uint8)
# print(self._image_size)
image = cv2.resize(image, self._image_size)
image = np.dstack([image] * 1)
# image = np.dstack([image] * 3)
# if self._stage == "train":
if self.affine:
image = seg(image=image)
# if self._stage == "test" and self._tta == True:
# images = [seg(image=image) for i in range(self._tta_size)]
# # images = [image for i in range(self._tta_size)]
# images = list(map(self._transform, images))
# target = self._emotions.iloc[idx].idxmax()
# return images, target
image = self._transform(image)
target = self._emotions.iloc[index].idxmax()
return image, target
def __len__(self):
# return the size of the dataset
return len(self._pixels)
def get_emotion(self, index):
if (index in EMOTION_DICT):
return EMOTION_DICT[index]
else:
return "error"
|
{"hexsha": "30619d8623e9b1800612cc2c7706faf432bdab9b", "size": 2409, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/FER2013_dataset.py", "max_stars_repo_name": "And1210/FER_SSL", "max_stars_repo_head_hexsha": "6cad839261667dce30a8b9db9638ef7334953063", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datasets/FER2013_dataset.py", "max_issues_repo_name": "And1210/FER_SSL", "max_issues_repo_head_hexsha": "6cad839261667dce30a8b9db9638ef7334953063", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets/FER2013_dataset.py", "max_forks_repo_name": "And1210/FER_SSL", "max_forks_repo_head_hexsha": "6cad839261667dce30a8b9db9638ef7334953063", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6896551724, "max_line_length": 107, "alphanum_fraction": 0.5944375259, "include": true, "reason": "import numpy", "num_tokens": 587}
|
import os
import time
import pickle
import numpy as np
from timecast.learners import AR, PredictLast, PredictConstant
from timecast.utils.losses import MeanSquareError
from timecast import load_learner
from fusion_data import FusionData
from utils import experiment
ex = experiment("baseline")
@ex.config
def config():
learner_type = "PredictLast"
learner_path = None
data_dir = "FRNN_1d_sample"
data_keys = "test_list.npy"
shot_data = "shot_data.npz"
input_dim = 1
input_index = 3
output_dim = 1
output_index = 3
window_size = 1
filter_size = 1
batch_size = 1
warning = 30
model_path = None
result_path = "results.pkl"
fit_intercept = False
constrain = False
normalize = True
@ex.named_config
def config_ar():
learner_type = "AR"
learner_path = "sacred/experiments/train_ar/89/results.pkl"
data_dir = "FRNN_1d_sample"
data_keys = "test_list.npy"
shot_data = "shot_data.npz"
input_dim = 142
input_index = 3
output_dim = 1
output_index = 3
window_size = 1
filter_size = 128
batch_size = 128
warning = 30
model_path = "FRNN_1D_sample.h5"
result_path = "results.pkl"
fit_intercept = True
constrain = False
normalize = True
@ex.automain
def main(
learner_type,
learner_path,
data_dir,
data_keys,
shot_data,
input_dim,
input_index,
output_dim,
output_index,
window_size,
filter_size,
batch_size,
warning,
model_path,
result_path,
fit_intercept,
constrain,
normalize,
):
with ex.open_resource(os.path.join(data_dir, shot_data), "rb") as data_file:
with ex.open_resource(os.path.join(data_dir, data_keys), "rb") as keys_file:
with ex.open_resource(os.path.join(data_dir, model_path), "rb") as model_file:
data = FusionData(
data_file,
keys_file,
input_dim=input_dim,
input_index=input_index,
output_dim=output_dim,
output_index=output_index,
warning=warning,
filter_size=filter_size,
batch_size=batch_size,
normalize=normalize,
model_path=os.path.join(data_dir, model_path),
)
if learner_type == "PredictLast":
learner_type = PredictLast
elif learner_type == "PredictConstant":
learner_type = PredictConstant
elif learner_type == "AR":
learner_type = AR
if learner_path is None:
learner = learner_type(
# Input dimension may have gotten transformed
input_dim=int(data.input_dim),
output_dim=output_dim,
window_size=window_size,
fit_intercept=fit_intercept,
constrain=constrain,
# Data loader will handle normalization
normalize=False,
)
else:
learner = load_learner(learner_path)
print(learner.to_dict())
pred = {}
true = {}
mse = {}
for i, (X, y, shot) in enumerate(data.featurize()):
print("Processing shot {}: {} ({}, {})".format(i, shot, X.shape, y.shape))
# if shot not in pred:
start = time.time()
pred[shot] = learner.predict(X)
print(" ...took {} seconds".format(time.time() - start))
true[shot] = y
learner.update(X, y)
mse[shot] = MeanSquareError().compute(pred[shot], true[shot])
learner.reset()
path = os.path.join(ex.observers[1].dir, result_path)
print("Saving data to {}".format(path))
pickle.dump({"pred": pred, "true": true, "mse": mse}, open(path, "wb"))
|
{"hexsha": "548260f2eeda1c75b3c1376111d75b1c28aa92c6", "size": 3744, "ext": "py", "lang": "Python", "max_stars_repo_path": "skgaip/fusion/main.py", "max_stars_repo_name": "danielsuo/toy_flood", "max_stars_repo_head_hexsha": "471d3c4091d86d4a00fbf910937d4e60fdaf79a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-30T07:42:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-30T07:42:12.000Z", "max_issues_repo_path": "skgaip/fusion/main.py", "max_issues_repo_name": "danielsuo/toy_flood", "max_issues_repo_head_hexsha": "471d3c4091d86d4a00fbf910937d4e60fdaf79a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-09-25T22:37:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:38:23.000Z", "max_forks_repo_path": "skgaip/fusion/main.py", "max_forks_repo_name": "danielsuo/toy_flood", "max_forks_repo_head_hexsha": "471d3c4091d86d4a00fbf910937d4e60fdaf79a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5531914894, "max_line_length": 90, "alphanum_fraction": 0.6079059829, "include": true, "reason": "import numpy", "num_tokens": 898}
|
import os
import gzip
import numpy as np
import struct
import urllib
from urllib import request
# load compressed MNIST gz files and return numpy arrays
def load_data(filename, label = False):
with gzip.open(filename) as gz:
magic_number = struct.unpack('I', gz.read(4))
n_items = struct.unpack('>I', gz.read(4))
if not label:
n_rows = struct.unpack('>I', gz.read(4))[0]
n_cols = struct.unpack('>I', gz.read(4))[0]
res = np.frombuffer(gz.read(n_items[0] * n_rows * n_cols), dtype = np.uint8)
res = res.reshape(n_items[0], n_rows * n_cols)
else:
res = np.frombuffer(gz.read(n_items[0]), dtype = np.uint8)
res = res.reshape(n_items[0], 1)
return res
# one-hot encode a 1-D array
def one_hot_encode(array, num_of_classes):
return np.eye(num_of_classes)[array.reshape(-1)]
def prepare_data(dataset, data_folder):
data_folder = os.path.join(data_folder, dataset)
print('making data directory ' + data_folder + '...')
os.makedirs(data_folder, exist_ok = True)
def download_data(url, filename):
if not os.path.isfile(filename):
print('downloading ' + url)
urllib.request.urlretrieve(url, filename = filename)
else:
print(filename + ' exists, using it')
print('downloading training data ...')
download_data('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', './data/mnist/train-images.gz')
download_data('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', './data/mnist/train-labels.gz')
print('done.')
print('downloading testing data ...')
download_data('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', './data/mnist/test-images.gz')
download_data('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', './data/mnist/test-labels.gz')
print('done.')
print('Prepared training dataset is stored here:', data_folder)
X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0
X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0
y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1)
y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape, sep = '\n')
return X_train, X_test, y_train, y_test
|
{"hexsha": "3e9376ebd6191f7cd387d92cdd3a8fb347e5e7cf", "size": 2445, "ext": "py", "lang": "Python", "max_stars_repo_path": "mnist-vscode-docs-sample/utils.py", "max_stars_repo_name": "luisquintanilla/vscode-tools-for-ai", "max_stars_repo_head_hexsha": "45ce66e84c854a544554cc8e42ddc00922cda195", "max_stars_repo_licenses": ["CC-BY-4.0", "MIT"], "max_stars_count": 218, "max_stars_repo_stars_event_min_datetime": "2017-09-22T22:26:23.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-05T12:42:40.000Z", "max_issues_repo_path": "mnist-vscode-docs-sample/utils.py", "max_issues_repo_name": "luisquintanilla/vscode-tools-for-ai", "max_issues_repo_head_hexsha": "45ce66e84c854a544554cc8e42ddc00922cda195", "max_issues_repo_licenses": ["CC-BY-4.0", "MIT"], "max_issues_count": 1342, "max_issues_repo_issues_event_min_datetime": "2019-05-07T09:02:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T17:34:10.000Z", "max_forks_repo_path": "mnist-vscode-docs-sample/utils.py", "max_forks_repo_name": "isabella232/vscode-tools-for-ai", "max_forks_repo_head_hexsha": "1098e68d6a503ac8e9935bddde808b75a7b05d14", "max_forks_repo_licenses": ["CC-BY-4.0", "MIT"], "max_forks_count": 49, "max_forks_repo_forks_event_min_datetime": "2019-05-07T02:20:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T03:53:20.000Z", "avg_line_length": 41.4406779661, "max_line_length": 112, "alphanum_fraction": 0.6609406953, "include": true, "reason": "import numpy", "num_tokens": 642}
|
import sys
import os
if __name__ == "__main__":
sys.path.append("../pyscatwave")
from itertools import product
import math
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda
from global_const import *
from torch.autograd import Variable, grad
try:
from scattering.scattering1d.utils import modulus
from scattering.scattering1d import filter_bank
from scattering.scattering1d.fft_wrapper import fft1d_c2c, ifft1d_c2c_normed
except ImportError:
from scatwave.scattering1d.utils import modulus
from scatwave.scattering1d import filter_bank
from scatwave.scattering1d.fft_wrapper import fft1d_c2c, ifft1d_c2c_normed
from tqdm import tqdm
from global_const import DATAPATH, Tensor
import metric
import optim
from utils import make_dir_if_not_there
def figure(x=1, y=1):
a = max(x, y)
r = 12 / a
return plt.figure(figsize=(x * r, y * r))
def smooth_signal(x):
x_hat = np.fft.fft(x)
sigma_hat = 0.15
T = x.shape[-1]
freqs = np.fft.fftfreq(T)
gauss_hat = np.exp(- 0.5 * freqs ** 2 / sigma_hat ** 2)
gauss_hat = gauss_hat / (np.sqrt(np.mean(gauss_hat ** 2, axis=-1) * 2 * np.pi))
x_smooth = np.real(np.fft.ifft(x_hat * gauss_hat))
# figure(x=2)
# plt.plot(freqs, gauss_hat)
# plt.axhline(y=0)
# plt.show()
# figure(x=2)
# plt.plot(x, 'r')
# plt.plot(x_smooth, 'b')
# plt.show()
# figure(x=2)
# plt.plot(np.fft.fftshift(freqs), np.fft.fftshift(np.abs(x_hat)), 'r')
# plt.semilogy(np.fft.fftshift(freqs), np.fft.fftshift(np.abs(np.fft.fft(x_smooth))), 'b')
# plt.show()
# raise SystemExit
return x_smooth
def solve_border(x0, opt):
T = np.size(x0)
if opt is None:
pass
elif opt == "padd":
zeros = np.zeros(T // 2)
x0 = np.concatenate((zeros, x0, zeros))
elif opt == "smooth":
sin2 = np.sin(np.linspace(0, np.pi / 2, T // 8)) ** 2
enveloppe = np.concatenate((sin2, np.ones(3 * T // 4), sin2[::-1]))
x0 = x0 * enveloppe
elif opt == "mirror":
x0 = np.concatenate((x0, x0[::-1]))
else:
err = "opt should be one of [None, 'padd', 'smooth', 'mirror'], but got '{}'".format(opt)
raise ValueError(err)
return x0
def diracs(T, n_dirac):
x0 = torch.zeros(1, 1, T)
loc = np.random.choice(T // 2, size=n_dirac, replace=False)
ampl = np.random.randn(n_dirac)
for l, a in zip(loc, ampl):
x0[0, 0, l + T // 4] = a
return x0
def staircase(T, n_dirac, compact=True, zero_mean=True, smooth=True):
if compact:
t = T // 2
else:
t = T
x0 = diracs(t, n_dirac)
x0 = x0.numpy()[0, 0, :]
x0 = np.cumsum(x0, axis=0)
if zero_mean:
x0 = x0 - np.mean(x0)
if compact:
zeros = np.zeros(T // 4)
x0 = np.concatenate((zeros, x0, zeros), axis=0)
if smooth:
x0 = smooth_signal(x0)
return Tensor(x0[None, None, :])
def locally_smooth(T, n_ensemble, compact=True, zero_mean=False, smooth=True, per=2.):
if compact == "padd" or compact == "mirror":
t = T // 2
else:
t = T
x0 = np.zeros(t)
loc_discontinuity = np.sort(np.random.choice(t, size=2 * n_ensemble, replace=False))
loc_discontinuity = loc_discontinuity
ampl = np.random.randn(n_ensemble)
for k, a in enumerate(ampl):
i, j = loc_discontinuity[2 * k], loc_discontinuity[2 * k + 1]
x0[i:j] = a
var = np.cos(np.linspace(0, 2 * np.pi, t, endpoint=False) * per)
x0 = x0 * var
if zero_mean:
x0 = x0 - np.mean(x0)
x0 = solve_border(x0, compact)
if smooth:
x0 = smooth_signal(x0)
return Tensor(x0[None, None, :])
def single_freq_modulated(T, per0=11., per1=127., compact=True, zero_mean=False, smooth=True):
if compact:
t = T // 2
else:
t = T
t = np.linspace(0, 2 * np.pi, t)
theta = np.random.rand() * 2 * np.pi
x0 = np.sin(t * per0) * np.cos(t * per1 + theta)
if zero_mean:
x0 = x0 - np.mean(x0)
if compact:
zeros = np.zeros(T // 4)
x0 = np.concatenate((zeros, x0, zeros), axis=0)
if smooth:
x0 = smooth_signal(x0)
return Tensor(x0[None, None, :])
def single_freq_modulated_bis(T, per0=5., per1=127., compact=True, zero_mean=False, smooth=True):
if compact:
t = T // 2
else:
t = T
t = np.linspace(0, 2 * np.pi, t)
theta = np.random.rand() * 2 * np.pi
x0 = -(np.cos(t * per0) - 1.) * np.cos(t * per1 + theta)
if zero_mean:
x0 = x0 - np.mean(x0)
x0 = solve_border(x0, compact)
if smooth:
x0 = smooth_signal(x0)
return Tensor(x0[None, None, :])
def fourier_diracs(T, n_dirac):
x0_hat_re = np.zeros(T)
x0_hat_im = np.zeros(T)
loc = np.random.choice(T // 2 + 1, size=n_dirac, replace=False)
ampl = np.random.randn(n_dirac)
iampl = np.random.randn(n_dirac)
for l, a, b in zip(loc, ampl, iampl):
x0_hat_re[l] = a
x0_hat_im[l] = b
x0_hat = x0_hat_re + 1j * x0_hat_im
x0_hat_shift = np.fft.fftshift(x0_hat)
x0_hat_negfreq = np.fft.ifftshift(np.concatenate((x0_hat_shift[0:1], x0_hat_shift[:0:-1])))
x0_hat = (x0_hat + np.conj(x0_hat_negfreq)) / 2
x0_np = np.fft.ifft(x0_hat)
x0 = Tensor(np.real(x0_np)[None, None, :])
return x0
def fourier_staircase(T, n_dirac):
x0_hat_re = np.zeros(T)
x0_hat_im = np.zeros(T)
loc = np.random.choice(T // 2 + 1, size=n_dirac, replace=False)
ampl = np.random.randn(n_dirac)
iampl = np.random.randn(n_dirac)
for l, a, b in zip(loc, ampl, iampl):
x0_hat_re[l] = a
x0_hat_im[l] = b
x0_hat = x0_hat_re + 1j * x0_hat_im
x0_hat = np.cumsum(x0_hat)
x0_hat_shift = np.fft.fftshift(x0_hat)
x0_hat_negfreq = np.fft.ifftshift(np.concatenate((x0_hat_shift[0:1], x0_hat_shift[:0:-1])))
x0_hat = (x0_hat + np.conj(x0_hat_negfreq)) / 2
x0_np = np.fft.ifft(x0_hat)
x0 = Tensor(np.real(x0_np)[None, None, :])
return x0
def lena_line(line_idx=None, compact="padd", zero_mean=True, smooth=True):
# lena = plt.imread(os.path.join(DATAPATH, "gen_phaseexp_inv/lena512c.jpg"))
lena_rgb = plt.imread(os.path.join(DATAPATH, "gen_phaseexp_inv/lena1024c.jpg"))
lena = np.dot(lena_rgb[..., :3], [0.299, 0.587, 0.114])
T = lena.shape[-1]
if line_idx is None:
line_idx = np.random.randint(T // 4, 3 * T // 4)
x0 = np.array(lena[line_idx, :])
if zero_mean:
x0 = x0 - np.mean(x0)
x0 = solve_border(x0, compact)
if smooth:
x0 = smooth_signal(x0)
x0 = Tensor(x0[None, None, :])
x0 = x0 / torch.max(torch.abs(x0))
return x0, line_idx
def make_cantor(n, a1, a2, b1, b2, compact=True, zero_mean=True, smooth=True):
"""Recursively generates a Cantor distribution.
Inputs:
n: size of output
a1: size factor for left cantor
a2: size factor for right cantor
b1: weight factor for left cantor
b2: weight factor for right cantor
"""
def make_cantor_rec(x, n1, n2, p):
if n2 < n1:
raise ValueError("Left or right border misplaced.")
elif n2 - n1 <= 6:
x[int(n1):int(n2)] = p / (int(n2) - int(n1))
else:
make_cantor_rec(x, n1, n1 + (n2 - n1) * a1, p * b1)
make_cantor_rec(x, n1 + (n2 - n1) * a2, n2, p * b2)
if compact:
n_used = n // 2
else:
n_used = n
x0 = np.zeros(n_used)
make_cantor_rec(x0, 0, n_used, 1.)
if zero_mean:
x0 = x0 - np.mean(x0)
if compact:
zero = np.zeros(n // 4)
x0 = np.concatenate((zero, x0, zero), axis=0)
if smooth:
x0 = smooth_signal(x0)
x0 = Tensor(x0[None, None, :])
x0 = x0 / torch.max(torch.abs(x0))
return x0
def binomial_cascade(p, **kwargs):
b1, b2 = p, 1 - p
return make_cantor(0.5, 0.5, b1, b2, **kwargs)
def random_notes(n, n_freq, n_harm, time_lim, sr):
tmin, tmax = time_lim
min_freq = 10 * sr / tmin # at least 10 periods at the same frequency
max_freq = sr / (2 * n_harm) # all harmonies must be within the signal's quality
n_oct = np.log2(max_freq / min_freq)
n_note = int(n_oct * 12) # number of possible fundamental frequencies
assert(max_freq > min_freq)
fund_idxs = np.random.choice(n_note, size=n_freq, replace=False)
fund_freqs = min_freq * np.power(2, fund_idxs / 12)
n_notes = int(n / tmax)
length_notes = np.random.randint(tmin, tmax, size=n_notes)
idx_notes = np.random.randint(n_freq, size=n_notes)
x0 = np.zeros((n,))
harm = np.arange(1, n_harm + 1)[None]
ampl = np.power(2., 1 - harm)
for i, (length, idx_freq) in enumerate(zip(length_notes, idx_notes)):
# get time range
idx_start = i * tmax
idx_stop = idx_start + length
# get amplitude decrease over time
increase = np.cos(np.linspace(-np.pi / 2, 0, length // 12)) ** 2
plateau = np.ones(length // 2 - length // 12)
decrease = np.cos(np.linspace(0, np.pi / 2, length - length // 2)) ** 2
envelope = np.concatenate((increase, plateau, decrease))
# find fundamental frequency and its harmonies
fund_freq = fund_freqs[idx_freq]
freqs = fund_freq * harm
# make oscillation
t = np.arange(0, length)[:, None]
osc = np.sum(np.sin(2 * np.pi * freqs * t / sr) * ampl, axis=-1)
# multiply by envelope and write on signal
x0[idx_start:idx_stop] = osc * envelope
x0 = Tensor(x0[None, None, :])
x0 = x0 / torch.max(torch.abs(x0))
return x0
def offset(x, x0, phi, order=2):
wav = phi.filt_hat[order, ...].data.cpu().numpy()
wav = wav[..., 0] + 1j * wav[..., 1]
x0_hat = np.fft.fft(x0)
x_hat = np.fft.fft(x)
x0_filt_hat = x0_hat * wav
x_filt_hat = x_hat * wav
x0_filt = np.fft.ifft(x0_filt_hat)
x_filt = np.fft.ifft(x_filt_hat)
T = np.size(wav)
t = np.arange(T)
x0_center = np.sum(t * np.abs(x0_filt) ** 2) / np.sum(np.abs(x0_filt) ** 2)
x_center = np.sum(t * np.abs(x_filt) ** 2) / np.sum(np.abs(x_filt) ** 2)
return int(round(x0_center - x_center))
def offset_greed_search(x, x0, order=2):
T = np.size(x)
min_err, min_offset = float('inf'), -1
for offset in range(T):
x1 = np.roll(x, offset)
err = np.linalg.norm(x0 - x1, ord=order)
if err < min_err:
min_err, min_offset = err, offset
return min_offset
if __name__ == "__main__":
nb_exp = 2 # number of iterations
seed = [np.random.randint(10 ** 6) for _ in range(nb_exp)]
seed = [411602]
# seed = [354934] # use for graphics
wavelet_types = ["battle_lemarie"] # ["battle_lemarie", "morlet"]
do_fst_order = [True] # [True, False]
n_octave = list((1, 6)) # number of octaves of interactions kept in covariance matrix
loss_types = ['MSE'] # ['RelativeMSE', 'MSE', 'L1]
cuda = True
zero_means = [True] #
smooths = [False]
nscales_list = [12]
Qs = list((1, 8))
# Qs = [2]
high_freq_bl = 0.5
Ks = [1 + 2 ** k for k in range(1, 6)]
# save_dir = "order1_usefulness"
# save_dir_root = "lena/error_curve_2"
save_dir_root = "graphics"
#signal_name = "lena"
signal_name = "cantor"
#signal_name = "staircase"
detail = True
Ts = [2 ** 10]
n_diracs_l = [4]
for rs in seed:
for wavelet_type, fst_order, nscales,K, Q, noct, loss_type, zero_mean, smooth, T, n_diracs \
in product(wavelet_types, do_fst_order, nscales_list, Ks, Qs, n_octave, \
loss_types, zero_means, smooths, Ts, n_diracs_l):
# for wavelet_type, fst_order, K, ndiag, loss_type, zero_mean, smooth, T, n_diracs \
# in product(wavelet_types, do_fst_order, Ks, do_reduce, loss_types, \
# zero_means, smooths, Ts, n_diracs_l):
# manual seed
np.random.seed(rs)
torch.manual_seed(rs + 1)
torch.cuda.manual_seed(rs + 2)
print("Random seed: {}".format(rs))
# some hyperparameters deduced from others
ndiag = int(Q * noct + 1)
# K = max(3, noct + 1)
# K = 3
save_dir = save_dir_root
# save_dir = save_dir_root + '_' + str(T)
save_path = join(RESPATH, join("gen_phaseexp_inv/", save_dir))
make_dir_if_not_there(save_path)
# choose signal to be reconstructed
if signal_name == 'lena':
signal, line_idx = lena_line(zero_mean=zero_mean)
signal_info = 'lena' + str(line_idx)
elif signal_name == 'cantor':
signal = make_cantor(T, 0.3333, 0.6666, 0.5, 0.5, zero_mean=zero_mean, smooth=smooth)
signal_info = 'cantor'
elif signal_name == 'staircase':
signal = staircase(T, n_diracs, zero_mean=zero_mean, smooth=smooth)
signal_info = 'staircase'
else:
raise ValueError("Unknown signal name: {}".format(signal_name))
x0 = signal
T = x0.size(-1)
# initialize metric
tol = 2.0
# phi_fst = metric.PhaseHarmonicAvg(
# nscales, Q, K, T, wav_type=wavelet_type, check_for_nan=False)
phi_scd = metric.PhaseHarmonicCov(
nscales, Q, K, T, wav_type=wavelet_type, ndiag=ndiag,
fst_order=fst_order, tolerance=tol, multi_same=False,
high_freq=high_freq_bl, check_for_nan=False)
reduced = "reduced" + str(noct) + "oct"
# initialize and print save name
high_freq_info = (str(high_freq_bl) if wavelet_type == "battle_lemarie" else "")
wavelet_info = wavelet_type.replace("_", "-") + high_freq_info
if zero_mean:
signal_info = signal_info + "-0mean"
if smooth:
signal_info = signal_info + "-smooth"
save_name = signal_info + "_{}o2_{}-{}coeff_{}_{}_N{}_Q{}_K{}_seed{}".format(
"o1" if fst_order else "", reduced, phi_scd.shape_effect(),
wavelet_info, loss_type, phi_scd.N, phi_scd.Q, phi_scd.K, rs)
print(save_name)
# initialize solvers
# algo_fst = optim.AdamDescentFst(loss_type, detail=detail)
algo_scd = optim.AdamDescentScd(loss_type, detail=detail)
# move to GPU
if cuda:
# algo_fst = algo_fst.cuda()
algo_scd = algo_scd.cuda()
# phi_fst = phi_fst.cuda()
phi_scd = phi_scd.cuda()
x0 = x0.cuda()
print("Working on GPU")
else:
# algo_fst = algo_scd.cpu()
algo_fst = algo_scd.cpu()
# phi_fst = phi_fst.cpu()
phi_scd = phi_scd.cpu()
x0 = x0.cpu()
print("Working on CPU")
x = None
# First, optimize first order coefficients
# x, logs_fst = algo_fst(x0, phi_fst, niter=8000, print_freq=1000, lr=0.01)
# stops_fst = [len(logs_fst[0])]
# print("First Order Loss: {}".format(logs_fst[0][-1]))
# Second, optimize covariance matrix
niter = 32000
lr0 = 0.1
gamma = 0.9
# milestones = []
milestones = [1000 * i for i in range(1, 32)]
x, logs_scd = algo_scd(x0, phi_scd, niter=niter, print_freq=1000, past_x=x,
lr=lr0, milestones=milestones, gamma=gamma)
print("Second Order Loss: {}".format(logs_scd[0][-1]))
# compute final loss
final_loss = logs_scd[0][-1]
save_name = save_name + "_loss{:.1E}".format(final_loss)
# convert to numpy
x0_np = x0.cpu().squeeze(1).squeeze(0).numpy()
x_np = x.cpu().squeeze(1).squeeze(0).numpy()
# quick align
t = np.arange(T)
# off = offset(x_np, x0_np, phi_scd, order=Q + 1)
off = offset_greed_search(x_np, x0_np, order=2)
x_np_centered = np.roll(x_np, off)
err = np.linalg.norm(x0_np - x_np, ord=2) / np.linalg.norm(x0_np, ord=2)
# # plot in time
# fig = plt.figure(figsize=(12, 12))
# ax = plt.subplot2grid((4, 1), (0, 0), rowspan=3)
# ax.plot(x0_np, 'r')
# ax.plot(x_np_centered, 'b')
# ax.set_xticklabels([])
# plt.title("Original (red) and reconstructed (blue) signals")
# ax = plt.subplot2grid((4, 1), (3, 0), rowspan=1)
# ax.plot(x0_np - x_np_centered, 'r')
# plt.title("Error")
# plt.savefig(join(save_path, save_name + "_time.pdf"))
# # plot in fourier
# fig = plt.subplot(2, 1, 2)
# ax = plt.subplot2grid((5, 1), (0, 0), rowspan=3)
# ax.semilogy(np.abs(np.fft.fftshift(np.fft.fft(x0_np))), 'r')
# ax.semilogy(np.abs(np.fft.fftshift(np.fft.fft(x_np))), 'b')
# ax.set_xticklabels([])
# plt.title("Original (red) and reconstructed (blue) Fourier spectra")
# ax = plt.subplot2grid((5, 1), (3, 0), rowspan=1)
# ax.semilogy(np.abs(np.fft.fftshift(np.fft.fft(x0_np) - np.fft.fft(x_np))), 'r')
# ax.set_xticklabels([])
# plt.title("Fourier Error")
# ax = plt.subplot2grid((5, 1), (4, 0), rowspan=1)
# ax.semilogy(np.abs(np.fft.fftshift(np.fft.fft(x0_np) - np.fft.fft(x_np))) / np.abs(np.fft.fftshift(np.fft.fft(x0_np))), 'r')
# plt.title("Fourier Relative Error")
# plt.savefig(join(save_path, save_name + "_fourier.pdf"))
# save experiment
if detail:
logs1 = np.stack(logs_scd[1], axis=-1)
logs2 = np.stack(logs_scd[2], axis=-1)
else:
logs1 = None
logs2 = None
save_var = {
'x0': x0_np, 'x': x_np, 'err': err, 'order': 2, 'seed': rs,
'N': nscales, 'Q': Q, 'K': K, 'ndiag': ndiag, 'T': T, 'tol': phi_scd.tol,
'fst_order': phi_scd.fst_order, 'wav_type': wavelet_type,
'multi_same': phi_scd.multi_same,
# 'logs_prepare': np.array(logs_fst[0]),
# 'logs_prepare_detail': np.stack(logs_fst[1], axis=-1) if detail else None,
'logs': np.array(logs_scd[0]),
'logs_fst': logs1,
'logs_scd': logs2,
}
np.savez(join(save_path, save_name + "_x.npz"), **save_var)
# fig_loss.savefig(join(save_path, save_name + "_loss.pdf"))
print('saved as "{}_[signal.pdf, loss.pdf, x.npz]"'.format(join(save_path, save_name)))
print(save_name + "_time")
print("\n--------------------------------------------\n")
|
{"hexsha": "ae5038f880423f1a3060320eaca011cbd4bca588", "size": 19224, "ext": "py", "lang": "Python", "max_stars_repo_path": "kymatio/phaseexp1d/phaseexp/make_figs.py", "max_stars_repo_name": "sixin-zh/kymatio_wph", "max_stars_repo_head_hexsha": "237c0d2009766cf83b2145420a14d3c6e90dc983", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kymatio/phaseexp1d/phaseexp/make_figs.py", "max_issues_repo_name": "sixin-zh/kymatio_wph", "max_issues_repo_head_hexsha": "237c0d2009766cf83b2145420a14d3c6e90dc983", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kymatio/phaseexp1d/phaseexp/make_figs.py", "max_forks_repo_name": "sixin-zh/kymatio_wph", "max_forks_repo_head_hexsha": "237c0d2009766cf83b2145420a14d3c6e90dc983", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8260869565, "max_line_length": 138, "alphanum_fraction": 0.5647107782, "include": true, "reason": "import numpy,import scipy", "num_tokens": 5782}
|
# ==================================================================================================
# A toy code example that tests extracting the TSDF voxel centers from a TSDF
#
# Please run script from repository root, i.e.:
# python3 ./tsdf_management/extract_voxel_centers_test.py
#
# Copyright 2021 Gregory Kramida
# ==================================================================================================
import os
import sys
import open3d as o3d
import open3d.core as o3c
import numpy as np
from settings import process_arguments, PathParameters
from data import camera
from tsdf.default_voxel_grid import make_default_tsdf_voxel_grid
PROGRAM_EXIT_SUCCESS = 0
def main():
use_mask = True
#####################################################################################################
# === open3d device, image paths, camera intrinsics, volume ===
#####################################################################################################
# === device config ===
device = o3d.core.Device('cuda:0')
# === compile image paths ===
# TODO: instead of using real data, generate toy color & image data of a plane at a fixed distance from the camera
sequence_number = 14
frame_index = 200
split = "val"
segment = None
frames_directory = os.path.join(PathParameters.dataset_base_directory.value, "{:s}/seq{:03d}/".format(split, sequence_number))
color_image_filename_mask = frames_directory + "color/{:06d}.jpg"
color_image_path = color_image_filename_mask.format(frame_index)
depth_image_filename_mask = frames_directory + "depth/{:06d}.png"
depth_image_path = depth_image_filename_mask.format(frame_index)
mask_image_folder = frames_directory + "mask"
if segment is None:
segment = os.path.splitext(os.listdir(mask_image_folder)[0])[0].split('_')[1]
mask_image_path = os.path.join(mask_image_folder, "{:06d}_{:s}.png".format(frame_index, segment))
# === handle intrinsics ===
depth_intrinsics_path = os.path.join(PathParameters.dataset_base_directory.value, "val/seq014/intrinsics.txt")
intrinsics_open3d_cpu, _ = camera.load_open3d_intrinsics_from_text_4x4_matrix_and_image(depth_intrinsics_path, depth_image_path)
intrinsics_open3d_gpu = o3d.core.Tensor(intrinsics_open3d_cpu.intrinsic_matrix, o3d.core.Dtype.Float32, device)
extrinsics_numpy = np.eye(4)
extrinsics_open3d_gpu = o3d.core.Tensor(extrinsics_numpy, o3d.core.Dtype.Float32, device)
# === open3d volume ===
volume = make_default_tsdf_voxel_grid(device)
#####################################################################################################
# === load images, fuse into TSDF, extract & visualize mesh ===
#####################################################################################################
# === images & TSDF integration/tsdf_management ===
if use_mask:
depth_image_numpy = np.array(o3d.io.read_image(depth_image_path))
color_image_numpy = np.array(o3d.io.read_image(color_image_path))
mask_image_numpy = np.array(o3d.io.read_image(mask_image_path))
mask_image_numpy_color = np.dstack((mask_image_numpy, mask_image_numpy, mask_image_numpy)).astype(np.uint8)
# apply mask
depth_image_numpy &= mask_image_numpy
color_image_numpy &= mask_image_numpy_color
depth_image_open3d_legacy = o3d.geometry.Image(depth_image_numpy)
color_image_open3d_legacy = o3d.geometry.Image(color_image_numpy)
else:
depth_image_open3d_legacy = o3d.io.read_image(depth_image_path)
color_image_open3d_legacy = o3d.io.read_image(color_image_path)
depth_image_gpu: o3d.t.geometry.Image = o3d.t.geometry.Image.from_legacy_image(depth_image_open3d_legacy, device=device)
color_image_gpu: o3d.t.geometry.Image = o3d.t.geometry.Image.from_legacy_image(color_image_open3d_legacy, device=device)
volume.integrate(depth_image_gpu, color_image_gpu, intrinsics_open3d_gpu, extrinsics_open3d_gpu, 1000.0, 3.0)
voxel_centers: o3c.Tensor = volume.extract_voxel_centers()
voxel_centers_np = voxel_centers.cpu().numpy()
print(voxel_centers_np)
np.save(os.path.join(PathParameters.output_directory.value, "voxel_centers_000200_red_shorts.npy"), voxel_centers_np)
return PROGRAM_EXIT_SUCCESS
if __name__ == "__main__":
process_arguments()
sys.exit(main())
|
{"hexsha": "c88d051cdb09df0dc56d169c0f817840c2bf3d9b", "size": 4437, "ext": "py", "lang": "Python", "max_stars_repo_path": "subprocedure_examples/extract_voxel_centers_test.py", "max_stars_repo_name": "Algomorph/NeuralTracking", "max_stars_repo_head_hexsha": "6312be8e18828344c65e25a423c239efcd3428dd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-04-18T04:23:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T08:37:51.000Z", "max_issues_repo_path": "subprocedure_examples/extract_voxel_centers_test.py", "max_issues_repo_name": "Algomorph/NeuralTracking", "max_issues_repo_head_hexsha": "6312be8e18828344c65e25a423c239efcd3428dd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2021-05-28T21:59:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-03T16:09:41.000Z", "max_forks_repo_path": "subprocedure_examples/extract_voxel_centers_test.py", "max_forks_repo_name": "Algomorph/NeuralTracking", "max_forks_repo_head_hexsha": "6312be8e18828344c65e25a423c239efcd3428dd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-03-10T02:56:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-14T06:04:50.000Z", "avg_line_length": 42.6634615385, "max_line_length": 132, "alphanum_fraction": 0.6441289159, "include": true, "reason": "import numpy", "num_tokens": 973}
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
def plot_eis(frequencies, impedance, title=None, cmap='tab10'):
""" Creates a single figure w/ both Bode and Nyquist plots of a single EIS spectrum.
Plots the results of a simulated circuit as well if provided
Args:
frequency (np.ndarray): numpy array of frequency values. Real, positive numbers
impedance (np.ndarray): numpy array of impedance values. Imaginary numbers
title (str): A figure title. Defaults to None.
cmap (str): name of a matplotlib colormap for coloring multiple lines
"""
fig, ax = plt.subplots(1, 3, figsize=(12,4))
cmap = plt.get_cmap(cmap)
if impedance.shape != frequencies.shape:
colors = cmap(np.linspace(0,1,impedance.size))
colors = colors[:,0:3]
ax[0].set_prop_cycle(color=colors)
ax[1].set_prop_cycle(color=colors)
ax[2].set_prop_cycle(color=colors)
else:
colors = cmap(0)
if impedance.shape != frequencies.shape:
# plot multiple lines
frequencies = np.repeat(frequencies, impedance.size).reshape((frequencies.size, impedance.size))
impedance = np.vstack(impedance).transpose()
# Bode Plot (1)
ax[0].semilogx(frequencies, np.abs(impedance), "o")
ax[0].set_title("Bode, |Z| vs. frequency")
ax[0].set_xlabel("Freq [Hz]")
ax[0].set_ylabel(r"|Z| [$\Omega$]", color="k")
# Bode Plot (2)
ax[1].semilogx(frequencies, np.angle(impedance, deg=True), "o")
ax[1].set_title(r"Bode, $\angle$Z vs. frequency")
ax[1].set_xlabel("Freq [Hz]")
ax[1].set_ylabel(r"$\angle$Z [$^\circ$]", color="k")
# Nyquist Plot
ax[2].plot(np.real(impedance), -np.imag(impedance), "o")
ax[2].set_aspect("equal")
ax[2].invert_yaxis()
ax[2].set_title("Nyquist")
ax[2].set_xlabel(r"Re(Z) [$\Omega$]", color="k")
ax[2].set_ylabel(r"Im(Z) [$\Omega$]", color="k")
if title is not None:
fig.suptitle(title)
fig.tight_layout()
plt.show()
if __name__ == "__main__":
...
|
{"hexsha": "d4c5bad7bdda1b3c8ca476c369ffd192481a2eb6", "size": 2094, "ext": "py", "lang": "Python", "max_stars_repo_path": "predict_capacity_from_eis/eisplot.py", "max_stars_repo_name": "battery-data-commons/mrs-sp22-tutorial", "max_stars_repo_head_hexsha": "64b420d2365f2ff26b6ea50617923db3a80c819b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "predict_capacity_from_eis/eisplot.py", "max_issues_repo_name": "battery-data-commons/mrs-sp22-tutorial", "max_issues_repo_head_hexsha": "64b420d2365f2ff26b6ea50617923db3a80c819b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "predict_capacity_from_eis/eisplot.py", "max_forks_repo_name": "battery-data-commons/mrs-sp22-tutorial", "max_forks_repo_head_hexsha": "64b420d2365f2ff26b6ea50617923db3a80c819b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3278688525, "max_line_length": 104, "alphanum_fraction": 0.6322827125, "include": true, "reason": "import numpy", "num_tokens": 598}
|
# Standard imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Evaluation
from sklearn import metrics
from sklearn.model_selection import train_test_split
# Scale
from sklearn.preprocessing import StandardScaler
# Models
import statsmodels.api as sm
from sklearn import linear_model
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import GridSearchCV
class Regressor:
def __init__(self, df):
self.df = df
self.dependent_vars = list(self.df.drop(["price"], axis=1))
# Simulated test case for prediction (to be populated with user input)
self.x_case = {
"sq_feet": 5000,
"bedrooms": 3,
"baths": 1,
"dogs": 0,
"cable": 0,
"Quadrant_SW-Central": 0,
"type_Basement": 0,
"type_Condo": 0,
"type_House": 1,
"type_Shared": 0,
"community_Beltline": 0,
"community_Downtown": 0,
"community_Eau Claire": 0,
"community_Victoria Park": 0,
"den 1": 0,
}
self.X = self.df.drop(["price"], axis=1).values
self.y = self.df["price"].values
self.y = self.y.reshape(-1, 1)
# Scale objects
self.scalerX = StandardScaler().fit(self.X)
self.scalery = StandardScaler().fit(self.y)
# Scale the data (mean 0 , SD 1)
self.X = self.scalerX.transform(self.X)
self.y = self.scalery.transform(self.y)
# Scale the test case
vals = np.fromiter(self.x_case.values(), dtype=float).reshape(1, -1)
self.x_case = self.scalerX.transform(vals)
# Split data
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X, self.y, test_size=0.2, random_state=1
)
def evaluate(self, name, model, y_pred):
"""Testing error measures"""
rescaled_y_test = self.scalery.inverse_transform(self.y_test)
rescaled_y_pred = self.scalery.inverse_transform(y_pred)
rescaled_y_train = self.scalery.inverse_transform(self.y_train)
mae = round(metrics.mean_absolute_error(rescaled_y_test, rescaled_y_pred), 2)
mse = round(metrics.mean_squared_error(rescaled_y_test, rescaled_y_pred), 2)
rmse = round(
np.sqrt(metrics.mean_squared_error(rescaled_y_test, rescaled_y_pred)), 2
)
r2 = round(metrics.r2_score(self.y_test, y_pred), 2)
inSampleR2 = round(model.score(self.X_train, self.y_train), 2)
print("")
print("")
print("Error Measures: ", name)
print("")
print("Mean Absolute Error:", mae)
print("Mean Squared Error:", mse)
print("Out of sample R-square value: ", r2)
print("In Sample R2 Score: {0}".format(inSampleR2))
print("")
print("Important!")
print("Root Mean Squared Error:", rmse)
print("MAX: ", np.max(rescaled_y_train))
print("MIN: ", np.min(rescaled_y_train))
print("")
return 1
def sample_backwards(self, y_pred):
rescaled_y_test = self.scalery.inverse_transform(self.y_test)
rescaled_y_pred = self.scalery.inverse_transform(y_pred)
rescaled_y_test = rescaled_y_test.ravel()
rescaled_y_pred = rescaled_y_pred.ravel()
print("")
__df = pd.DataFrame({"Actual": rescaled_y_test, "Predicted": rescaled_y_pred})
print(__df)
return 1
def predict(self, model):
model.fit(self.X, self.y)
# Make prediction
y_pred = model.predict(self.x_case)
rescaled_y_pred = self.scalery.inverse_transform(y_pred)
print(rescaled_y_pred)
return 1
class Linear(Regressor):
def __init__(self, df):
# Instantiate superclass
super().__init__(df)
def smlinear_w_constant(self):
self.X_train = sm.add_constant(self.X_train) # intercept (beta_0) t
# sm.OLS(output, input)
model = sm.OLS(self.y_train, self.X_train).fit()
# predictions = model.predict(X)
# Print out the statistics
print(model.summary())
return 1
def sklinear(self, predict=False):
lm = linear_model.LinearRegression()
if not predict:
lm.fit(self.X_train, self.y_train)
# Make predictions based on independent variable testing data
y_pred = lm.predict(self.X_test)
super().evaluate("Multiple Linear", lm, y_pred)
super().sample_backwards(y_pred)
else:
super().predict(lm)
return 1
class EnsembleTree(Regressor):
def __init__(self, df):
# Instantiate superclass
super().__init__(df)
def skEnsemble(self, predict=False):
regr = GradientBoostingRegressor()
if not predict:
# Train the model
regr.fit(self.X_train, self.y_train)
# Make predictions based on independent variable testing data
y_pred = regr.predict(self.X_test)
super().evaluate("Ensemble Tree", regr, y_pred)
super().sample_backwards(y_pred)
else:
super().predict(regr)
return 1
class Knn(Regressor):
def __init__(self, df):
# Instantiate superclass
super().__init__(df)
def skKnn(self, predict=False):
neigh = KNeighborsRegressor(n_neighbors=5)
if not predict:
neigh.fit(self.X_train, self.y_train)
# Make predictions based on independent variable testing data
y_pred = neigh.predict(self.X_test)
# Compare independent variables predictions to independent variable test data
super().evaluate("KNN", neigh, y_pred)
super().sample_backwards(y_pred)
else:
super().predict(neigh)
return 1
class Mlp(Regressor):
def __init__(self, df):
# Instantiate superclass
super().__init__(df)
def skMlp(self, predict=False):
# Multi-layer Perceptron regressor
mlp = MLPRegressor(
hidden_layer_sizes=(30, 10),
activation="tanh",
solver="adam",
learning_rate="adaptive",
max_iter=1000,
learning_rate_init=0.001,
warm_start=True,
alpha=0.01,
)
if not predict:
# Fit the best algorithm to the data.
mlp.fit(self.X_train, self.y_train)
# mlp.fit(self.X_train, self.y_train)
y_pred = mlp.predict(self.X_test)
super().evaluate("MPL", mlp, y_pred) # Error measures
super().sample_backwards(
y_pred
) # Compare independent variables predictions to independent variable test data
else:
super().predict(
mlp
) # Predict the price using the sample user specified input data
return 1
|
{"hexsha": "c9a620f12a5768af92114e13d7ec247f1a168285", "size": 7133, "ext": "py", "lang": "Python", "max_stars_repo_path": "Regressors.py", "max_stars_repo_name": "agmoss/rental_regression_analysis", "max_stars_repo_head_hexsha": "1b6aeba571ba70ccc6fed02ab2290b14425cc92f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-04T21:46:30.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-04T21:46:30.000Z", "max_issues_repo_path": "Regressors.py", "max_issues_repo_name": "agmoss/rental_regression_analysis", "max_issues_repo_head_hexsha": "1b6aeba571ba70ccc6fed02ab2290b14425cc92f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Regressors.py", "max_forks_repo_name": "agmoss/rental_regression_analysis", "max_forks_repo_head_hexsha": "1b6aeba571ba70ccc6fed02ab2290b14425cc92f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2251908397, "max_line_length": 92, "alphanum_fraction": 0.6024113276, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 1646}
|
from distutils.core import setup
from setuptools import find_packages
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy
# details on installing python packages can be found here
# https://docs.python.org/3.7/install/
ext_modules = [
Extension("MAS_library.MAS_library", ["MAS_library/MAS_library.pyx",
"MAS_library/MAS_c.c"],
extra_compile_args=['-O3','-ffast-math','-march=native','-fopenmp'],
extra_link_args=['-fopenmp'], libraries=['m']),
Extension("Pk_library.Pk_library", ["Pk_library/Pk_library.pyx"],
extra_compile_args = ['-O3','-ffast-math','-march=native','-fopenmp']),
Extension("Pk_library.bispectrum_library",
["Pk_library/bispectrum_library.pyx"]),
Extension("MAS_library.field_properties",
["MAS_library/field_properties.pyx"]),
Extension("redshift_space_library.redshift_space_library",
["redshift_space_library/redshift_space_library.pyx"]),
Extension("smoothing_library.smoothing_library",
["smoothing_library/smoothing_library.pyx"],
extra_compile_args = ['-O3','-ffast-math','-march=native','-fopenmp'],
extra_link_args=['-fopenmp'], libraries=['m']),
Extension("void_library.void_library",
["void_library/void_library.pyx",
"void_library/void_openmp_library.c"],
extra_compile_args = ['-O3','-ffast-math','-march=native','-fopenmp'],
extra_link_args=['-fopenmp'], libraries=['m']),
Extension("integration_library.integration_library",
["integration_library/integration_library.pyx",
"integration_library/integration.c",
"integration_library/runge_kutta.c"],
extra_compile_args=["-O3","-ffast-math","-march=native"]),
Extension("density_field_library.density_field_library",
["density_field_library/density_field_library.pyx"]),
Extension("sorting_library.sorting_library",
["sorting_library/sorting_library.pyx"],
extra_compile_args=['-O3','-ffast-math','-march=native']),
Extension("HI_library.HI_library",
["HI_library/HI_library.pyx"]),
Extension("HI_clusters_library.HI_clusters_library",
["HI_clusters_library/HI_clusters_library.pyx"]),
]
setup(
name = 'Pylians3',
version = "3.0",
author = 'Francisco Villaescusa-Navarro',
author_email = 'villaescusa.francisco@gmail.com',
ext_modules = cythonize(ext_modules,
compiler_directives={'language_level' : "3"},
include_path=['MAS_library/','void_library/',
'integration_library/']),
include_dirs=[numpy.get_include()],
packages=find_packages(),
py_modules=['bias_library', 'CAMB_library', 'correlation_function_library',
'cosmology_library', 'HI_image_library/HI_image_library',
'HOD_library', 'IM_library', 'mass_function_library',
'plotting_library', 'readfof', 'readgadget', 'readsnapHDF5',
'readsnap', 'readsnap2', 'readsnap_mpi',
'readsubf', 'routines', 'units_library']
)
|
{"hexsha": "70997e5451638a27b37d9369171c916b2ffb7550", "size": 3288, "ext": "py", "lang": "Python", "max_stars_repo_path": "library/setup.py", "max_stars_repo_name": "GabrieleParimbelli/Pylians3", "max_stars_repo_head_hexsha": "03b6f497c084c6a1c795e8b8f8d1e9c71c5e80cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2019-12-06T10:48:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T14:13:49.000Z", "max_issues_repo_path": "library/setup.py", "max_issues_repo_name": "GabrieleParimbelli/Pylians3", "max_issues_repo_head_hexsha": "03b6f497c084c6a1c795e8b8f8d1e9c71c5e80cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-02-04T00:20:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-16T13:27:06.000Z", "max_forks_repo_path": "library/setup.py", "max_forks_repo_name": "GabrieleParimbelli/Pylians3", "max_forks_repo_head_hexsha": "03b6f497c084c6a1c795e8b8f8d1e9c71c5e80cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-10-30T21:41:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-05T07:02:34.000Z", "avg_line_length": 39.1428571429, "max_line_length": 79, "alphanum_fraction": 0.6386861314, "include": true, "reason": "import numpy", "num_tokens": 736}
|
#define _FILE_OFFSET_BITS 64
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <expat.h>
#include <boost/regex.hpp>
#include <boost/tokenizer.hpp>
#include <boost/foreach.hpp>
#include "wiki_scanner.h"
using namespace std;
using namespace boost;
/* Typedefs */
typedef struct {
char* cdata;
size_t cdata_len;
int in_title;
char* title;
int in_text;
char* text;
} xml_progress_t;
static void XMLCALL load_start(void *user_data, const char *name, const char **attr);
static void XMLCALL load_end(void *user_data, const char *name);
static void XMLCALL character_data_handler(void* user_data, const XML_Char *s, int len);
static void parse_outlinks(xml_progress_t*);
/* Globlals */
static const regex redirect_rx("^#REDIRECT\\s+\\[\\[(.*?)\\]\\]",boost::regex::perl);
static ofstream output("/tmp/titles");
int main(int argc,char** argv)
{
if (argc < 2) {
cerr << "Please specify an articles.xml" << endl;
exit(-1);
}
char buf[BUFSIZ];
FILE* xml_file = NULL;
xml_progress_t buffer = {0};
XML_Parser parser = XML_ParserCreate(NULL);
XML_SetUserData(parser, &buffer);
XML_SetElementHandler(parser, load_start, load_end);
XML_SetCharacterDataHandler(parser,character_data_handler);
int done = 0;
xml_file = fopen(argv[1],"r");
if (xml_file) {
do {
size_t len = fread(buf, 1, sizeof(buf),xml_file);
done = len < sizeof(buf);
if (XML_Parse(parser, buf, len, done) == XML_STATUS_ERROR) {
fprintf(stderr, "%s at line %lu\n", XML_ErrorString(XML_GetErrorCode(parser)), XML_GetCurrentLineNumber(parser));
}
} while (!done);
} else {
printf("Error opening file: %s\n",strerror(errno));
fprintf(stderr,"Cannot find file\n");
}
XML_ParserFree(parser);
}
/* Static Private Implementation */
static void XMLCALL
load_start(void *user_data, const char *name, const char **attr)
{
xml_progress_t* prog = (xml_progress_t*) user_data;
if (!strcmp(name,"title")) {
prog->in_title = 1;
} else if (!strcmp(name,"text")) {
prog->in_text = 1;
}
}
static void XMLCALL
load_end(void *user_data, const char *name)
{
xml_progress_t* prog = (xml_progress_t*) user_data;
if (!strcmp(name,"title")) {
prog->title = prog->cdata;
prog->in_title = 0;
} else if (!strcmp(name,"text")) {
prog->text = prog->cdata;
prog->in_text = 0;
} else if (!strcmp(name,"page")) {
parse_outlinks(prog);
free(prog->title);
free(prog->text);
}
prog->cdata = NULL;
}
static void XMLCALL
character_data_handler(void* user_data, const XML_Char *s, int len)
{
xml_progress_t* prog = (xml_progress_t*) user_data;
if (len > 0) {
if (prog->in_title || prog->in_text) {
if (prog->cdata) {
size_t new_cdata_len = prog->cdata_len + len;
prog->cdata = (XML_Char*) realloc(prog->cdata,sizeof(XML_Char) * (new_cdata_len + 1));
memcpy(prog->cdata + prog->cdata_len,s,len);
prog->cdata[new_cdata_len] = '\0';
prog->cdata_len = new_cdata_len;
} else {
prog->cdata_len = len;
prog->cdata = (XML_Char*) malloc(sizeof(XML_Char) * (len + 1));
memcpy(prog->cdata,s,len);
prog->cdata[len] = '\0';
}
}
}
}
/* Here we search for outlinks and output them */
static void parse_outlinks(xml_progress_t* prog)
{
char* p = prog->text;
char* pe = p + prog->cdata_len;
wiki_token_t token;
scan(&token,NULL,p,pe);
while(token.type != END_OF_FILE) {
cout << "Token Scanned: " << token.type << endl;
scan(&token,&token,NULL,pe);
}
}
|
{"hexsha": "8e061c2219c4f908bcd0626ff8c7a7e2a3b6d2fa", "size": 3617, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "c/wikipedia/article_xml_converter.cpp", "max_stars_repo_name": "mmonto7/small-world-graph", "max_stars_repo_head_hexsha": "8ea1015c24065cb71875620b28c66ffb8348dcae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2016-05-31T07:23:27.000Z", "max_stars_repo_stars_event_max_datetime": "2018-02-16T00:06:04.000Z", "max_issues_repo_path": "c/wikipedia/article_xml_converter.cpp", "max_issues_repo_name": "mmonto7/small-world-graph", "max_issues_repo_head_hexsha": "8ea1015c24065cb71875620b28c66ffb8348dcae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2020-08-31T20:51:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-30T18:05:25.000Z", "max_forks_repo_path": "c/wikipedia/article_xml_converter.cpp", "max_forks_repo_name": "mmonto7/small-world-graph", "max_forks_repo_head_hexsha": "8ea1015c24065cb71875620b28c66ffb8348dcae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2015-01-17T07:31:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-31T20:49:41.000Z", "avg_line_length": 26.2101449275, "max_line_length": 121, "alphanum_fraction": 0.6524744263, "num_tokens": 1014}
|
#!usr/bin/env python
#-*- coding:utf-8 _*-
"""
@author:yaoli
@file: 05_back_propagation.py 反向传播
@time: 2018/12/06
"""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
sess = tf.Session()
# 一个回归的例子。输入数据是100个随机数,平均值是1.0,标准差是0.1.目标是100个常数10.0
# 我们设置线性回归模型 x_data * A = target_values
# 理论上,我们认为 A 应该等于 10
x_vals = np.random.normal(1,0.1,100)
y_vals = np.repeat(10.,100)
x_data = tf.placeholder(shape = [1], dtype=tf.float32)
y_target = tf.placeholder(shape=[1],dtype=tf.float32)
# 创建模型参数 A
A = tf.Variable(tf.random_normal(shape=[1]))
# 将操作加入到图中
my_output = tf.multiply(x_data, A)
# 将 L2损失函数加入到图中
loss = tf.square(my_output - y_target)
# 初始化变量
init = tf.global_variables_initializer()
sess.run(init)
# 我们需要创建一个优化操作(optimizing operations),此处我们使用梯度下降优化器(GradientDescentOptimizer),并告诉TensorFlow去最小化
# 损失,这里我们将学习率设置为0.02,学习率决定了学习速度,但是如果学习率太大的话,算法可能不能收敛
my_opt = tf.train.GradientDescentOptimizer(0.02)
train_step = my_opt.minimize(loss)
# 运行回归流程
for i in range(1400):
rand_index = np.random.choice(100)
rand_x = [x_vals[rand_index]]
rand_y = [y_vals[rand_index]]
sess.run(train_step, feed_dict={x_data:rand_x, y_target: rand_y})
if (i+1)%200 == 0:
print('Step #' + str(i+1) + 'A = ' + str(sess.run(A)))
print('Loss = ' + str())
|
{"hexsha": "e55d476a1b1c70983c74beef0bef4faa854f2a0a", "size": 1370, "ext": "py", "lang": "Python", "max_stars_repo_path": "02_TensorFlow_Way/05_back_propagation.py", "max_stars_repo_name": "GeneralLi95/TensorFlow_cookbook", "max_stars_repo_head_hexsha": "f1102cc0cd0b2f641346664d601e01f315a8b437", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "02_TensorFlow_Way/05_back_propagation.py", "max_issues_repo_name": "GeneralLi95/TensorFlow_cookbook", "max_issues_repo_head_hexsha": "f1102cc0cd0b2f641346664d601e01f315a8b437", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "02_TensorFlow_Way/05_back_propagation.py", "max_forks_repo_name": "GeneralLi95/TensorFlow_cookbook", "max_forks_repo_head_hexsha": "f1102cc0cd0b2f641346664d601e01f315a8b437", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8490566038, "max_line_length": 95, "alphanum_fraction": 0.7138686131, "include": true, "reason": "import numpy", "num_tokens": 541}
|
[STATEMENT]
lemma maximum_fst_prefixes_are_prefixes :
assumes "xys \<in> list.set (maximum_fst_prefixes t xs ys)"
shows "map fst xys = take (length xys) xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map fst xys = take (length xys) xs
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
xys \<in> list.set (maximum_fst_prefixes t xs ys)
goal (1 subgoal):
1. map fst xys = take (length xys) xs
[PROOF STEP]
proof (induction xys arbitrary: t xs)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>t xs. [] \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst [] = take (length []) xs
2. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
[] \<in> list.set (maximum_fst_prefixes t xs ys)
goal (2 subgoals):
1. \<And>t xs. [] \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst [] = take (length []) xs
2. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
[] \<in> list.set (maximum_fst_prefixes t xs ys)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
[] \<in> list.set (maximum_fst_prefixes t xs ys)
goal (1 subgoal):
1. map fst [] = take (length []) xs
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
map fst [] = take (length []) xs
goal (1 subgoal):
1. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
case (Cons xy xys)
[PROOF STATE]
proof (state)
this:
xys \<in> list.set (maximum_fst_prefixes ?t ?xs ys) \<Longrightarrow> map fst xys = take (length xys) ?xs
xy # xys \<in> list.set (maximum_fst_prefixes t xs ys)
goal (1 subgoal):
1. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
xys \<in> list.set (maximum_fst_prefixes ?t ?xs ys) \<Longrightarrow> map fst xys = take (length xys) ?xs
xy # xys \<in> list.set (maximum_fst_prefixes t xs ys)
[PROOF STEP]
have "xs \<noteq> []"
[PROOF STATE]
proof (prove)
using this:
xys \<in> list.set (maximum_fst_prefixes ?t ?xs ys) \<Longrightarrow> map fst xys = take (length xys) ?xs
xy # xys \<in> list.set (maximum_fst_prefixes t xs ys)
goal (1 subgoal):
1. xs \<noteq> []
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
xs \<noteq> []
goal (1 subgoal):
1. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
xs \<noteq> []
[PROOF STEP]
obtain x xs' where "xs = x#xs'"
[PROOF STATE]
proof (prove)
using this:
xs \<noteq> []
goal (1 subgoal):
1. (\<And>x xs'. xs = x # xs' \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using list.exhaust
[PROOF STATE]
proof (prove)
using this:
xs \<noteq> []
\<lbrakk>?y = [] \<Longrightarrow> ?P; \<And>x21 x22. ?y = x21 # x22 \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
goal (1 subgoal):
1. (\<And>x xs'. xs = x # xs' \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
xs = x # xs'
goal (1 subgoal):
1. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
obtain m where *:"t = PT m"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>m. t = PT m \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using finite_tree.cases
[PROOF STATE]
proof (prove)
using this:
(\<And>m. ?x = PT m \<Longrightarrow> ?P) \<Longrightarrow> ?P
goal (1 subgoal):
1. (\<And>m. t = PT m \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
t = PT m
goal (1 subgoal):
1. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
have "is_leaf (PT m) = False"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_leaf (PT m) = False
[PROOF STEP]
using Cons.prems
[PROOF STATE]
proof (prove)
using this:
xy # xys \<in> list.set (maximum_fst_prefixes t xs ys)
goal (1 subgoal):
1. is_leaf (PT m) = False
[PROOF STEP]
unfolding * \<open>xs = x#xs'\<close>
[PROOF STATE]
proof (prove)
using this:
xy # xys \<in> list.set (maximum_fst_prefixes (PT m) (x # xs') ys)
goal (1 subgoal):
1. is_leaf (PT m) = False
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
is_leaf (PT m) = False
goal (1 subgoal):
1. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
have "(xy#xys) \<in> list.set (concat (map (\<lambda> y . map ((#) (x,y)) (maximum_fst_prefixes (the (m (x,y))) xs' ys)) (filter (\<lambda> y . (m (x,y) \<noteq> None)) ys)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. xy # xys \<in> list.set (concat (map (\<lambda>y. map ((#) (x, y)) (maximum_fst_prefixes (the (m (x, y))) xs' ys)) (filter (\<lambda>y. m (x, y) \<noteq> None) ys)))
[PROOF STEP]
using Cons.prems
[PROOF STATE]
proof (prove)
using this:
xy # xys \<in> list.set (maximum_fst_prefixes t xs ys)
goal (1 subgoal):
1. xy # xys \<in> list.set (concat (map (\<lambda>y. map ((#) (x, y)) (maximum_fst_prefixes (the (m (x, y))) xs' ys)) (filter (\<lambda>y. m (x, y) \<noteq> None) ys)))
[PROOF STEP]
unfolding * \<open>xs = x#xs'\<close> \<open>is_leaf (PT m) = False\<close> maximum_fst_prefixes.simps
[PROOF STATE]
proof (prove)
using this:
xy # xys \<in> list.set (if False then [[]] else concat (map (\<lambda>y. map ((#) (x, y)) (maximum_fst_prefixes (the (m (x, y))) xs' ys)) (filter (\<lambda>y. m (x, y) \<noteq> None) ys)))
goal (1 subgoal):
1. xy # xys \<in> list.set (concat (map (\<lambda>y. map ((#) (x, y)) (maximum_fst_prefixes (the (m (x, y))) xs' ys)) (filter (\<lambda>y. m (x, y) \<noteq> None) ys)))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
xy # xys \<in> list.set (concat (map (\<lambda>y. map ((#) (x, y)) (maximum_fst_prefixes (the (m (x, y))) xs' ys)) (filter (\<lambda>y. m (x, y) \<noteq> None) ys)))
goal (1 subgoal):
1. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
xy # xys \<in> list.set (concat (map (\<lambda>y. map ((#) (x, y)) (maximum_fst_prefixes (the (m (x, y))) xs' ys)) (filter (\<lambda>y. m (x, y) \<noteq> None) ys)))
[PROOF STEP]
obtain y where "y \<in> list.set (filter (\<lambda> y . (m (x,y) \<noteq> None)) ys)"
and "(xy#xys) \<in> list.set (map ((#) (x,y)) (maximum_fst_prefixes (the (m (x,y))) xs' ys))"
[PROOF STATE]
proof (prove)
using this:
xy # xys \<in> list.set (concat (map (\<lambda>y. map ((#) (x, y)) (maximum_fst_prefixes (the (m (x, y))) xs' ys)) (filter (\<lambda>y. m (x, y) \<noteq> None) ys)))
goal (1 subgoal):
1. (\<And>y. \<lbrakk>y \<in> list.set (filter (\<lambda>y. m (x, y) \<noteq> None) ys); xy # xys \<in> list.set (map ((#) (x, y)) (maximum_fst_prefixes (the (m (x, y))) xs' ys))\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<in> list.set (filter (\<lambda>y. m (x, y) \<noteq> None) ys)
xy # xys \<in> list.set (map ((#) (x, y)) (maximum_fst_prefixes (the (m (x, y))) xs' ys))
goal (1 subgoal):
1. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
y \<in> list.set (filter (\<lambda>y. m (x, y) \<noteq> None) ys)
xy # xys \<in> list.set (map ((#) (x, y)) (maximum_fst_prefixes (the (m (x, y))) xs' ys))
[PROOF STEP]
have "xy = (x,y)" and "xys \<in> list.set (maximum_fst_prefixes (the (m (x,y))) xs' ys)"
[PROOF STATE]
proof (prove)
using this:
y \<in> list.set (filter (\<lambda>y. m (x, y) \<noteq> None) ys)
xy # xys \<in> list.set (map ((#) (x, y)) (maximum_fst_prefixes (the (m (x, y))) xs' ys))
goal (1 subgoal):
1. xy = (x, y) &&& xys \<in> list.set (maximum_fst_prefixes (the (m (x, y))) xs' ys)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
xy = (x, y)
xys \<in> list.set (maximum_fst_prefixes (the (m (x, y))) xs' ys)
goal (1 subgoal):
1. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
have **: "take (length ((x, y) # xys)) (x # xs') = x # (take (length xys) xs')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. take (length ((x, y) # xys)) (x # xs') = x # take (length xys) xs'
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
take (length ((x, y) # xys)) (x # xs') = x # take (length xys) xs'
goal (1 subgoal):
1. \<And>a xys t xs. \<lbrakk>\<And>t xs. xys \<in> list.set (maximum_fst_prefixes t xs ys) \<Longrightarrow> map fst xys = take (length xys) xs; a # xys \<in> list.set (maximum_fst_prefixes t xs ys)\<rbrakk> \<Longrightarrow> map fst (a # xys) = take (length (a # xys)) xs
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map fst (xy # xys) = take (length (xy # xys)) xs
[PROOF STEP]
using Cons.IH[OF \<open>xys \<in> list.set (maximum_fst_prefixes (the (m (x,y))) xs' ys)\<close>]
[PROOF STATE]
proof (prove)
using this:
map fst xys = take (length xys) xs'
goal (1 subgoal):
1. map fst (xy # xys) = take (length (xy # xys)) xs
[PROOF STEP]
unfolding \<open>xy = (x,y)\<close> \<open>xs = x#xs'\<close> **
[PROOF STATE]
proof (prove)
using this:
map fst xys = take (length xys) xs'
goal (1 subgoal):
1. map fst ((x, y) # xys) = x # take (length xys) xs'
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
map fst (xy # xys) = take (length (xy # xys)) xs
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5203, "file": "FSM_Tests_Prefix_Tree", "length": 39}
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.montgomery64_2e130m5_3limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition mul :
{ mul : feBW_small -> feBW_small -> feBW_small
| forall a b, phiM_small (mul a b) = F.mul (phiM_small a) (phiM_small b) }.
Proof.
Set Ltac Profiling.
Time synthesize_mul ().
Show Ltac Profile.
Time Defined.
Print Assumptions mul.
|
{"author": "anonymous-code-submission-01", "repo": "sp2019-54-code", "sha": "8867f5bed0821415ec99f593b1d61f715ed4f789", "save_path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code", "path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code/sp2019-54-code-8867f5bed0821415ec99f593b1d61f715ed4f789/src/Specific/montgomery64_2e130m5_3limbs/femul.v"}
|
#!/usr/bin/env python3
# a timing script for FFTs and convolutions using OpenMP
import sys, getopt
import numpy as np
from math import *
import subprocess
import os
import re # regexp package
import shutil
import tempfile
usage = '''A timing script for rocfft
Usage:
\ttiming.py
\t\t-w <string> set working directory for rocfft-rider
\t\t-i <string> directory for dloaded libs (appendable)
\t\t-o <string> name of output file (appendable for dload)
\t\t-D <-1,1> default: -1 (forward). Direction of transform
\t\t-I make transform in-place
\t\t-N <int> number of tests per problem size
\t\t-R set transform to be real/complex or complex/real
\t\t-d <1,2,3> default: dimension of transform
\t\t-x <int> minimum problem size in x direction
\t\t-X <int> maximum problem size in x direction
\t\t-y <int> minimum problem size in y direction
\t\t-Y <int> maximum problem size in Y direction
\t\t-z <int> minimum problem size in z direction
\t\t-Z <int> maximum problem size in Z direction
\t\t-f <string> precision: float(default) or double
\t\t-b <int> batch size
\t\t-g <int> device number
'''
def runcase(workingdir,
dload, libdir,
length, direction, rcfft, inplace, ntrial,
precision, nbatch, devicenum, logfilename):
progname = "dyna-rocfft-rider" if dload else "rocfft-rider"
prog = os.path.join(workingdir, progname)
cmd = []
cmd.append(prog)
cmd.append("--verbose")
cmd.append("0")
if dload:
cmd.append("--lib")
for val in libdir:
cmd.append(val)
cmd.append("-N")
cmd.append(str(ntrial))
cmd.append("--length")
for val in length:
cmd.append(str(val))
print(precision)
if precision == "double":
cmd.append("--double")
cmd.append("-b")
cmd.append(str(nbatch))
cmd.append("--device")
cmd.append(str(devicenum))
ttype = -1
itype = ""
otype = ""
if rcfft:
if (direction == -1):
ttype = 2
itype = 2
otype = 3
if (direction == 1):
ttype = 3
itype = 3
otype = 2
else:
itype = 0
otype = 0
if (direction == -1):
ttype = 0
if (direction == 1):
ttype = 1
cmd.append("-t")
cmd.append(str(ttype))
cmd.append("--itype")
cmd.append(str(itype))
cmd.append("--otype")
cmd.append(str(otype))
print(cmd)
print(" ".join(cmd))
fout = tempfile.TemporaryFile(mode="w+")
proc = subprocess.Popen(cmd, cwd=os.path.join(workingdir,"..",".."),
stdout=fout, stderr=fout,
env=os.environ.copy())
proc.wait()
rc = proc.returncode
vals = []
fout.seek(0)
cout = fout.read()
logfile = open(logfilename, "a")
logfile.write(" ".join(cmd))
logfile.write(cout)
logfile.close()
if rc == 0:
# ferr.seek(0)
# cerr = ferr.read()
searchstr = "Execution gpu time: "
for line in cout.split("\n"):
#print(line)
if line.startswith(searchstr):
vals.append([])
# Line ends with "ms", so remove that.
ms_string = line[len(searchstr): -2]
#print(ms_string)
for val in ms_string.split():
#print(val)
vals[len(vals) - 1].append(1e-3 * float(val))
print("seconds: ", vals)
else:
print("\twell, that didn't work")
print(rc)
print(" ".join(cmd))
return []
fout.close()
return vals
def main(argv):
# Options to determine which binary is to be run:
workingdir = "."
libdir = []
outfilename = []
logfilename = "timing.log"
# GPU device number:
devicenum = 0
# Experiment parameters:
ntrial = 10
# Problem size parameters:
direction = -1
inplace = False
rcfft = False
precision = "float"
dimension = 1
xmin = 2
xmax = 1024
ymin = 2
ymax = 1024
zmin = 2
zmax = 1024
radix = 2
nbatch = 1
try:
opts, args = getopt.getopt(argv,"hb:d:i:D:IN:o:Rw:x:X:y:Y:z:Z:f:r:g:")
except getopt.GetoptError:
print("error in parsing arguments.")
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h"):
print(usage)
exit(0)
elif opt in ("-w"):
workingdir = arg
elif opt in ("-o"):
outfilename.append(arg)
elif opt in ("-i"):
libdir.append(arg)
elif opt in ("-g"):
devicenum = int(arg)
elif opt in ("-N"):
ntrial = int(arg)
elif opt in ("-D"):
if(int(arg) in [-1,1]):
direction = int(arg)
else:
print("invalid direction: " + arg)
print(usage)
sys.exit(1)
elif opt in ("-I"):
inplace = True
elif opt in ("-R"):
rcfft = True
elif opt in ("-f"):
if arg not in ["float", "double"]:
print("precision must be float or double")
print(usage)
sys.exit(1)
precision = arg
elif opt in ("-d"):
dimension = int(arg)
if not dimension in {1,2,3}:
print("invalid dimension")
print(usage)
sys.exit(1)
elif opt in ("-x"):
xmin = int(arg)
elif opt in ("-X"):
xmax = int(arg)
elif opt in ("-y"):
ymin = int(arg)
elif opt in ("-Y"):
ymax = int(arg)
elif opt in ("-z"):
zmin = int(arg)
elif opt in ("-Z"):
zmax = int(arg)
elif opt in ("-b"):
nbatch = int(arg)
elif opt in ("-r"):
radix = int(arg)
dload = len(libdir) > 0
if dload:
print("Using dyna-rider")
else:
print("Using normal rider")
print("workingdir: "+ workingdir)
print("outfilename: "+ ",".join(outfilename))
print("libdir: "+ ",".join(libdir))
print("device number: " + str(devicenum))
print("ntrial: " + str(ntrial))
print("dimension: " + str(dimension))
print("xmin: "+ str(xmin) + " xmax: " + str(xmax))
if dimension > 1:
print("ymin: "+ str(ymin) + " ymax: " + str(ymax))
if dimension > 2:
print("zmin: "+ str(zmin) + " zmax: " + str(zmax))
print("direction: " + str(direction))
print("real/complex FFT? " + str(rcfft))
print("in-place? " + str(inplace))
print("batch-size: " + str(nbatch))
print("radix: " + str(radix))
progname = "dyna-rocfft-rider" if dload else "rocfft-rider"
prog = os.path.join(workingdir, progname)
if not os.path.isfile(prog):
print("**** Error: unable to find " + prog)
sys.exit(1)
metadatastring = "# " + " ".join(sys.argv) + "\n"
metadatastring += "# "
metadatastring += "dimension"
metadatastring += "\txlength"
if(dimension > 1):
metadatastring += "\tylength"
if(dimension > 2):
metadatastring += "\tzlength"
metadatastring += "\tnbatch"
metadatastring += "\tnsample"
metadatastring += "\tsamples ..."
metadatastring += "\n"
# The log file is stored alongside each data output file.
for idx in range(len(outfilename)):
logfilename = outfilename[idx] + ".log"
if not os.path.exists(os.path.dirname(logfilename)):
os.makedirs(os.path.dirname(logfilename))
print("log filename: " + logfilename)
logfile = open(logfilename, "w+")
logfile.write(metadatastring)
logfile.close()
outfile = open(outfilename[idx], "w+")
outfile.write(metadatastring)
outfile.close()
maxtrial = ntrial * xmax * ymax * zmax
xval = xmin
yval = ymin
zval = zmin
while(xval <= xmax and yval <= ymax and zval <= zmax):
print(xval)
length = [xval]
if dimension > 1:
length.append(yval)
if dimension > 2:
length.append(zval)
#N = max(ntrial, min(maxtrial // (xval * yval * zval), 20)) # FIXME: set upper bound to higher
N = ntrial
print(N)
seconds = runcase(workingdir,
dload, libdir,
length, direction, rcfft, inplace, N,
precision, nbatch, devicenum, logfilename)
#print(seconds)
for idx, vals in enumerate(seconds):
with open(outfilename[idx], 'a') as outfile:
outfile.write(str(dimension))
outfile.write("\t")
outfile.write(str(xval))
outfile.write("\t")
if(dimension > 1):
outfile.write(str(yval))
outfile.write("\t")
if(dimension > 2):
outfile.write(str(zval))
outfile.write("\t")
outfile.write(str(nbatch))
outfile.write("\t")
outfile.write(str(len(seconds[idx])))
for second in seconds[idx]:
outfile.write("\t")
outfile.write(str(second))
outfile.write("\n")
xval *= radix
if dimension > 1:
yval *= radix
if dimension > 2:
zval *= radix
if __name__ == "__main__":
main(sys.argv[1:])
|
{"hexsha": "4ae879ec47bcef304eba83e5846fc8c88e32e57f", "size": 9839, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/perf/timing.py", "max_stars_repo_name": "mhbliao/rocFFT", "max_stars_repo_head_hexsha": "f10ee7d8baba4bc2b87a6136cfebfe0f01e1535a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/perf/timing.py", "max_issues_repo_name": "mhbliao/rocFFT", "max_issues_repo_head_hexsha": "f10ee7d8baba4bc2b87a6136cfebfe0f01e1535a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/perf/timing.py", "max_forks_repo_name": "mhbliao/rocFFT", "max_forks_repo_head_hexsha": "f10ee7d8baba4bc2b87a6136cfebfe0f01e1535a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8725212465, "max_line_length": 102, "alphanum_fraction": 0.5052342718, "include": true, "reason": "import numpy", "num_tokens": 2433}
|
[STATEMENT]
lemma "minit \<phi>\<^sub>e\<^sub>x = \<lparr>
mstate_i = 0,
mstate_m =
MAnd (MPred ''A'' [MFOTL.Var 0]) False
(MUntil True (MRel {[None]}) (interval 1 2) (MExists (MPred ''B'' [MFOTL.Var 1, MFOTL.Var 0]))
([], []) [] [])
([], []),
mstate_n = 1\<rparr>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. minit \<phi>\<^sub>e\<^sub>x = \<lparr>mstate_i = 0, mstate_m = MAnd (MPred ''A'' [trm.Var 0]) False (MUntil True (MRel {[None]}) (interval 1 2) (MExists (MPred ''B'' [trm.Var 1, trm.Var 0])) ([], []) [] []) ([], []), mstate_n = 1\<rparr>
[PROOF STEP]
by eval
|
{"llama_tokens": 292, "file": "MFOTL_Monitor_Examples", "length": 1}
|
from abc import ABC, abstractmethod
import numpy as np
class Model (ABC):
@abstractmethod
def __init__(self):
...
@abstractmethod
def train(self, pos_triples:np.array, neg_triples:np.array):
...
@abstractmethod
def get_ranked_and_sorted_predictions(self, examples):
...
@abstractmethod
def output_model(self, path):
...
def _split_list_in_batches(self, input_list, batch_size):
return [input_list[i:i + batch_size] for i in range(0, len(input_list), batch_size)]
#todo check if num embedding == #nodes
|
{"hexsha": "90abfe429b7843561129d4c93f56596bb3eb86e4", "size": 586, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/openbiolink/evaluation/models/model.py", "max_stars_repo_name": "cthoyt/OpenBioLink", "max_stars_repo_head_hexsha": "c5f85b99f9104f70493136c343e4554261e990a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/openbiolink/evaluation/models/model.py", "max_issues_repo_name": "cthoyt/OpenBioLink", "max_issues_repo_head_hexsha": "c5f85b99f9104f70493136c343e4554261e990a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/openbiolink/evaluation/models/model.py", "max_forks_repo_name": "cthoyt/OpenBioLink", "max_forks_repo_head_hexsha": "c5f85b99f9104f70493136c343e4554261e990a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7037037037, "max_line_length": 92, "alphanum_fraction": 0.662116041, "include": true, "reason": "import numpy", "num_tokens": 138}
|
function module_dir()
return joinpath(@__DIR__, "..")
end
|
{"hexsha": "db8e8ec15074dcff695b9274c86ea02b80867872", "size": 59, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/utils.jl", "max_stars_repo_name": "simon-lc/AlgamesPlots.jl", "max_stars_repo_head_hexsha": "18851ea53168bbd1ab5c1c7f1116f8194d2c3091", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-02-04T08:25:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-06T10:20:17.000Z", "max_issues_repo_path": "src/utils.jl", "max_issues_repo_name": "simon-lc/AlgamesPlots.jl", "max_issues_repo_head_hexsha": "18851ea53168bbd1ab5c1c7f1116f8194d2c3091", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-02-05T21:00:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-13T04:42:49.000Z", "max_forks_repo_path": "src/utils.jl", "max_forks_repo_name": "simon-lc/AlgamesPlots.jl", "max_forks_repo_head_hexsha": "18851ea53168bbd1ab5c1c7f1116f8194d2c3091", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-03-11T08:02:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-28T06:42:53.000Z", "avg_line_length": 14.75, "max_line_length": 32, "alphanum_fraction": 0.7118644068, "num_tokens": 15}
|
import mathutils
import bpy
import numpy as np
class RegionManager():
""" Controls a single material
"""
def __init__(self, storage_pointer, context=None):
self.context = bpy.context if context is None else context
self.material_index = 0
self.bsp = storage_pointer
# Set color
# Initiailize the faces
self.faces = []
self.color = mathutils.Color(self.bsp.default_color)
# Create the constituent material
self.create_material()
#else:
# self.bsp.material = material
# self.bsp.name = material.name
# self.bsp.dc = material.diffuse_color
# self.color = mathutils.Color((dc[0],dc[1],dc[2]))
# Find all the faces
self.update_faces_with_material()
# Set the mathematical components
self.get_face_mathematical_components()
def create_material(self):
# create a new material
material = bpy.data.materials.new(name=str(self.bsp.name))
# add the material to the object
self.context.active_object.data.materials.append(material)
# set the color
material.diffuse_color = (self.color.r, self.color.g, self.color.b, 1)
self.bsp.material = material
# return the object
return self.bsp.material
def get_material_index(self):
# get the index of the given material.
self.material_index = self.bsp.local_material_index
if self.material_index != 2147483647:
return self.material_index
else:
pass
#Attempt restore and if not, delete parent pair
def apply_to_faces_by_face_index(self, face_indexes):
original_area = bpy.context.area.type
bpy.context.area.type = 'VIEW_3D'
self.get_material_index()
for index in face_indexes:
bpy.context.object.data.polygons[index].material_index = self.material_index
bpy.context.area.type = original_area
def update_faces_with_material(self):
self.get_material_index()
faces = []
for face in bpy.context.active_object.data.polygons:
if face.material_index == self.material_index:
faces.append(face)
return faces
def get_face_mathematical_components(self):
faces = self.update_faces_with_material()
normals = []
centers = []
for f in faces:
normals.append(f.normal)
centers.append(f.center)
normals = np.array(normals)
centers = np.array(centers)
return normals, centers
def set_color_from_MU_object(self):
self.bsp.material.diffuse_color = (self.color.r, self.color.g, self.color.b, 1.0)
def check_existance(self, callback):
for i in range(len(bpy.context.active_object.material_slots)):
if bpy.context.active_object.material_slots[i].name == self.bsp.material.name_full:
return
callback()
def destroy(self):
cpi = bpy.context.active_object.cs_individual_VG_
p = self.bsp.context_object.data.polygons
bm_index = cpi.base_region.local_material_index
# Resets the material patches to remove the
l_index = self.bsp.local_material_index
if l_index != 2147483647: # l_index is set to the largest index if the material is not found
li = [None]*len(p)
p.foreach_get('material_index', li)
for i, l in enumerate(li):
if l_index == l:
li[i] = bm_index
p.foreach_set('material_index', li)
bpy.context.active_object.data.materials.pop(index=l_index)
# Removes the material from the data if it hasn't been already
g_index = self.bsp.local_material_index
if g_index != 2147483647:
bpy.data.materials.remove(material=self.bsp.material)
#cpi.material_regions.remove(self.bsp)
def apply_all(self):
""" [ Applies the given region to the context objects ]
"""
material_index = self.get_material_index()
p = self.bsp.context_object.data.polygons
li = [material_index]*len(p)
p.foreach_set('material_index', li)
|
{"hexsha": "fb5a88823a513e2a997c506c9d9dcc11fa1623c4", "size": 4536, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/MaterialManagers/RegionManager.py", "max_stars_repo_name": "paigeco/VirtualGoniometer", "max_stars_repo_head_hexsha": "536e7e77fbb036ad8d777b42e751a0f3e80b8242", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-22T02:53:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-22T02:53:26.000Z", "max_issues_repo_path": "src/MaterialManagers/RegionManager.py", "max_issues_repo_name": "paigeco/VirtualGoniometer", "max_issues_repo_head_hexsha": "536e7e77fbb036ad8d777b42e751a0f3e80b8242", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:15:34.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-11T20:16:00.000Z", "max_forks_repo_path": "src/MaterialManagers/RegionManager.py", "max_forks_repo_name": "paigeco/VirtualGoniometer", "max_forks_repo_head_hexsha": "536e7e77fbb036ad8d777b42e751a0f3e80b8242", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.170212766, "max_line_length": 100, "alphanum_fraction": 0.5921516755, "include": true, "reason": "import numpy", "num_tokens": 934}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert an osc file to multiple csv files.
Accepts file.osc with contents:
____________________________________________________________________________________________________
osc_time |path |types |packets
e17775e1.21044f19 |/muse/eeg |ffffff |825.201477 825.201477 825.201477 825.201477 nan 825.201477
e17775e1.21173fb7 |/muse/acc |fff |0.000000 0.000000 0.000000
----------------------------------------------------------------------------------------------------
Parses and saves to multiple `path`.csv files.
The first column is the corresponding timestamp.
osc_time is in a weird format:
x.y where x and y are hex numbers.
x = seconds since 1 Jan 1900
y = 2**-32 fraction of a second
Subtract 2,208,988,800 seconds to get unix timestamp
"""
import os
from argparse import ArgumentParser
import numpy as np
from collections import defaultdict
eaxmple_usage = 'Usage: python osc_to_csv.py file.osc -f folder_path'
parser = ArgumentParser(
description='Convert an osc file to multiple csv files.',
epilog=eaxmple_usage)
parser.add_argument(
'file_name', type=str,
help='osc file captured via: ```oscdump [port] > file.osc```')
parser.add_argument(
'-f', '--folder', type=str, dest='folder_path',
help='A folder to place files in.')
args = parser.parse_args()
file_name = args.file_name
folder_path = args.folder_path
path_dict = defaultdict(lambda: [], {})
debug = False
num = 0
with open(file_name, 'r') as osc_file:
for line in osc_file.readlines():
line = line.strip('\n')
osc_time, path, types, packets = line.split(' ', 3)
x, y = osc_time.split('.')
float_time = int(x, 16) + int(y,16)/2**32
path_dict[path] += [[float_time] + [float(pack) for pack in packets.split(' ')]]
# debug with smaller file size
if debug:
if num < 50:
num = num + 1
else:
break
for path, path_list in path_dict.items():
path = path[1::] if path[0] == '/' else path
file_name = path.replace('/', '_')+'.csv'
if folder_path:
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
file_name = os.path.join(folder_path, file_name)
np.savetxt(file_name, np.array(path_list), delimiter=',')
print(f'Saved: {file_name}')
print('Done!')
|
{"hexsha": "ed82b20338bee752a8292e403159f5df02808018", "size": 2452, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/osc_to_csv.py", "max_stars_repo_name": "oishefarhan/OSC-recorder", "max_stars_repo_head_hexsha": "7379912b68f4e9e96edabe953e9090e0f00e14a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-05-25T12:21:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-01T11:27:55.000Z", "max_issues_repo_path": "scripts/osc_to_csv.py", "max_issues_repo_name": "oishefarhan/OSC-recorder", "max_issues_repo_head_hexsha": "7379912b68f4e9e96edabe953e9090e0f00e14a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-10-23T09:14:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-19T13:49:04.000Z", "max_forks_repo_path": "scripts/osc_to_csv.py", "max_forks_repo_name": "oishefarhan/OSC-recorder", "max_forks_repo_head_hexsha": "7379912b68f4e9e96edabe953e9090e0f00e14a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5890410959, "max_line_length": 100, "alphanum_fraction": 0.6219412724, "include": true, "reason": "import numpy", "num_tokens": 610}
|
#include <boost/atomic.hpp>
#include <iostream>
int main()
{
std::cout.setf(std::ios::boolalpha);
boost::atomic<short> s;
std::cout << s.is_lock_free() << '\n';
boost::atomic<int> i;
std::cout << i.is_lock_free() << '\n';
boost::atomic<long> l;
std::cout << l.is_lock_free() << '\n';
}
|
{"hexsha": "f3de02ff07a030ec5cbddcd83cef0373c36eec44", "size": 303, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Example/atomic_02/main.cpp", "max_stars_repo_name": "KwangjoJeong/Boost", "max_stars_repo_head_hexsha": "29c4e2422feded66a689e3aef73086c5cf95b6fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Example/atomic_02/main.cpp", "max_issues_repo_name": "KwangjoJeong/Boost", "max_issues_repo_head_hexsha": "29c4e2422feded66a689e3aef73086c5cf95b6fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Example/atomic_02/main.cpp", "max_forks_repo_name": "KwangjoJeong/Boost", "max_forks_repo_head_hexsha": "29c4e2422feded66a689e3aef73086c5cf95b6fe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.9375, "max_line_length": 40, "alphanum_fraction": 0.5940594059, "num_tokens": 96}
|
#!/usr/bin/env python
import rospy
from acl_msgs.msg import ViconState
from gazebo_msgs.msg import ModelStates
from geometry_msgs.msg import PointStamped
from acl_msgs.msg import FloatStamped
import numpy as np
IN_PROGRESS = 0
SUCCESS = 1
FAIL = 2
class collisionDetector:
def __init__(self):
self.init = False
self.collisionDetected = False
self.reachedGoal = False
self.gotObstacles = False
self.gotGoal = False
self.poseSub = rospy.Subscriber('/LQ02s/vicon',ViconState, self.poseCB)
self.obstSub = rospy.Subscriber('/gazebo/model_states', ModelStates, self.obstCB)
self.obstSub = rospy.Subscriber('/LQ02s/global_goal', PointStamped, self.goalCB)
self.statusPub = rospy.Publisher("flight_status", FloatStamped, queue_size=1)
self.obst = np.array([])
self.init = True
rospy.loginfo("Monitoring status...")
def obstCB(self,data):
if self.init:
if not self.gotObstacles:
for i in range(2,len(data.name)):
if i==2:
self.obst = np.array([data.pose[i].position.x,data.pose[i].position.y])
else:
self.obst = np.vstack((self.obst,np.array([data.pose[i].position.x,data.pose[i].position.y])))
self.gotObstacles = True
def poseCB(self,data):
if self.init:
if self.gotObstacles and not self.collisionDetected:
self.pose = np.array([data.pose.position.x, data.pose.position.y])
self.checkForCollision()
if self.gotGoal:
self.checkForGoal()
self.publishStatus()
def goalCB(self,data):
if self.init:
if not self.gotGoal:
self.goal = np.array([data.point.x,data.point.y])
self.gotGoal = True
def publishStatus(self):
if self.collisionDetected:
self.status = FAIL
elif self.reachedGoal:
self.status = SUCCESS
else:
self.status = IN_PROGRESS
flightStatus = FloatStamped()
flightStatus.header.stamp = rospy.get_rostime()
flightStatus.data = self.status
self.statusPub.publish(flightStatus)
if self.status != IN_PROGRESS:
self.collisionDetected = False
self.reachedGoal = False
self.gotObstacles = False
def checkForGoal(self):
if np.linalg.norm(self.pose-self.goal) < 5:
self.reachedGoal = True
rospy.logwarn("Reached goal!")
def checkForCollision(self):
for i in range(0,len(self.obst)):
if np.linalg.norm(self.pose-self.obst[i]) < 0.5:
self.collisionDetected = True
rospy.logfatal("Collision detected!")
if __name__ == '__main__':
rospy.init_node("collisionCheck")
c = collisionDetector()
rospy.spin()
|
{"hexsha": "bf13ee6a8373280e560a608bb881644671881c1e", "size": 2485, "ext": "py", "lang": "Python", "max_stars_repo_path": "acl_sim/scripts/flightStatus.py", "max_stars_repo_name": "betaBison/acl-gazebo", "max_stars_repo_head_hexsha": "d21792505bdaabc6d17a1eeb9da4134df7297b0f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "acl_sim/scripts/flightStatus.py", "max_issues_repo_name": "betaBison/acl-gazebo", "max_issues_repo_head_hexsha": "d21792505bdaabc6d17a1eeb9da4134df7297b0f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "acl_sim/scripts/flightStatus.py", "max_forks_repo_name": "betaBison/acl-gazebo", "max_forks_repo_head_hexsha": "d21792505bdaabc6d17a1eeb9da4134df7297b0f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6111111111, "max_line_length": 100, "alphanum_fraction": 0.7106639839, "include": true, "reason": "import numpy", "num_tokens": 675}
|
[STATEMENT]
lemma list_case_refine[refine]:
assumes "(li,l)\<in>\<langle>S\<rangle>list_rel"
assumes "fni \<le>\<Down>R fn"
assumes "\<And>xi x xsi xs. \<lbrakk> (xi,x)\<in>S; (xsi,xs)\<in>\<langle>S\<rangle>list_rel; li=xi#xsi; l=x#xs \<rbrakk> \<Longrightarrow> fci xi xsi \<le>\<Down>R (fc x xs)"
shows "(case li of [] \<Rightarrow> fni | xi#xsi \<Rightarrow> fci xi xsi) \<le> \<Down>R (case l of [] \<Rightarrow> fn | x#xs \<Rightarrow> fc x xs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (case li of [] \<Rightarrow> fni | xi # xsi \<Rightarrow> fci xi xsi) \<le> \<Down> R (case l of [] \<Rightarrow> fn | x # xs \<Rightarrow> fc x xs)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
(li, l) \<in> \<langle>S\<rangle>list_rel
fni \<le> \<Down> R fn
\<lbrakk>(?xi, ?x) \<in> S; (?xsi, ?xs) \<in> \<langle>S\<rangle>list_rel; li = ?xi # ?xsi; l = ?x # ?xs\<rbrakk> \<Longrightarrow> fci ?xi ?xsi \<le> \<Down> R (fc ?x ?xs)
goal (1 subgoal):
1. (case li of [] \<Rightarrow> fni | xi # xsi \<Rightarrow> fci xi xsi) \<le> \<Down> R (case l of [] \<Rightarrow> fn | x # xs \<Rightarrow> fc x xs)
[PROOF STEP]
by (auto split: list.split)
|
{"llama_tokens": 498, "file": "Refine_Monadic_Refine_Basic", "length": 2}
|
export sortpermFast
function sortpermFast(A::Vector)
n = length(A)
ii = collect(1:n)
B = copy(A)
quicksort!(B,ii, 1,n)
return ii, B # B = A[ii]
end # function sortpermFast
#----------------------------------------------------
function sortpermFast(A::Vector, D::Vector)
# Sort A and permute D according to A.
# For duplicate values in A, keep only values corresponding to
# the SMALLEST D.
n = length(A)
if length(D) != n
error("Lengths of A and D must be the same.")
end
ii = collect(1:n)
quicksort!(A, ii, 1,n)
D = D[ii]
if allunique(A)
return A, D
end
idxkeep = trues(n)
for j = 2 : n
for k = j-1 : -1 : 1
if A[j] != A[k]
break
end
if D[j] < D[k]
idxkeep[k] = false
else
idxkeep[j] = false
end
end # k
end # j
A = A[idxkeep]
D = D[idxkeep]
return A, D
end # function sortpermFast
#----------------------------------------------------
function quicksort!(A, order, i=1,j=length(A))
# modified from:
# http://rosettacode.org/wiki/Sorting_algorithms/Quicksort#Julia
@inbounds begin
if j > i
if j - i <= 10
# Insertion sort for small groups is faster than Quicksort
InsertionSort!(A,order, i,j)
return A
end
#pivot = A[rand(i:j)] # random element of A
pivot = A[ div(i+j,2) ]
left, right = i, j
while left <= right
while A[left] < pivot
left += 1
end
while A[right] > pivot
right -= 1
end
if left <= right
A[left], A[right] = A[right], A[left]
order[left], order[right] = order[right], order[left]
left += 1
right -= 1
end
end # left <= right
quicksort!(A,order, i, right)
quicksort!(A,order, left,j)
end # j > i
end
return A
end # function quicksort!
#----------------------------------------------------
function InsertionSort!(A, order, ii=1, jj=length(A))
@inbounds begin
for i = ii+1 : jj
j = i - 1
temp = A[i]
itemp = order[i]
while true
if j == ii-1
break
end
if A[j] <= temp
break
end
A[j+1] = A[j]
order[j+1] = order[j]
j -= 1
end
A[j+1] = temp
order[j+1] = itemp
end # i
end
return
end # function InsertionSort!
|
{"hexsha": "da4cce6990677c7a00052e6121dfd7611f065eec", "size": 2670, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Utils/sortpermFast.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/jInv.jl-3dacf901-f8cd-5544-86ed-7a705f85c244", "max_stars_repo_head_hexsha": "2e7305f231a29bd8e1e803b82cc2bc8e9b7a205a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2016-04-11T22:51:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-15T21:58:53.000Z", "max_issues_repo_path": "src/Utils/sortpermFast.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/jInv.jl-3dacf901-f8cd-5544-86ed-7a705f85c244", "max_issues_repo_head_hexsha": "2e7305f231a29bd8e1e803b82cc2bc8e9b7a205a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 52, "max_issues_repo_issues_event_min_datetime": "2016-03-23T18:24:31.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-08T15:52:47.000Z", "max_forks_repo_path": "src/Utils/sortpermFast.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/jInv.jl-3dacf901-f8cd-5544-86ed-7a705f85c244", "max_forks_repo_head_hexsha": "2e7305f231a29bd8e1e803b82cc2bc8e9b7a205a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2016-03-23T16:52:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T17:04:41.000Z", "avg_line_length": 20.6976744186, "max_line_length": 69, "alphanum_fraction": 0.4389513109, "num_tokens": 748}
|
#!/usr/bin/env python
import numpy as num
from e2rh import e2rh
from e2mr import e2mr
from e2dp import e2dp
from rh2mr import rh2mr
from rh2dp import rh2dp
from rh2e import rh2e
from mr2dp import mr2dp
from mr2e import mr2e
from mr2rh import mr2rh
from dp2e import dp2e
from dp2rh import dp2rh
from dp2mr import dp2mr
p = num.array((1000.0,))
t = num.array((260.0,))
e = num.array((1.0,))
print(p[0])
print(t[0])
print(e[0])
Tconvert = 273.15
rh = e2rh(p,t,e,Tconvert)
print(rh[0][0])
mr = e2mr(p,e)
print(mr[0])
dp = e2dp(e,t,Tconvert)
print(dp[0])
mr = rh2mr(p,t,rh[0],Tconvert)
print(mr[0][0])
dp = rh2dp(p,t,rh[0],Tconvert)
print(dp[0][0])
e = rh2e(p,t,rh[0],Tconvert)
print(e[0][0])
dp = mr2dp(p,t,mr[0],Tconvert)
print(dp[0])
e = mr2e(p,mr[0])
print(e[0])
rh = mr2rh(p,t,mr[0],Tconvert)
print(rh[0][0])
e = dp2e(t,dp,Tconvert)
print(e[0])
rh = dp2rh(p,t,dp,Tconvert)
print(rh[0][0])
mr = dp2mr(p,t,dp,Tconvert)
print(mr[0])
print('OK')
p = num.array((1000.0,))
t = num.array((260.0,))
rh = num.array((50.0,))
print(p[0])
print(t[0])
print(rh[0])
Tconvert = 273.15
e = rh2e(p,t,rh,Tconvert)
print(e[0][0])
mr = rh2mr(p,t,rh,Tconvert)
print(mr[0][0])
dp = rh2dp(p,t,rh,Tconvert)
print(dp[0][0])
rh = e2rh(p,t,e[0],Tconvert)
print(rh[0][0])
mr = e2mr(p,e[0])
print(mr[0])
dp = e2dp(e[0],t,Tconvert)
print(dp[0])
dp = mr2dp(p,t,mr[0],Tconvert)
print(dp[0])
e = mr2e(p,mr[0])
print(e[0])
rh = mr2rh(p,t,mr[0],Tconvert)
print(rh[0][0])
e = dp2e(t,dp,Tconvert)
print(e[0])
rh = dp2rh(p,t,dp,Tconvert)
print(rh[0][0])
mr = dp2mr(p,t,dp,Tconvert)
print(mr[0])
print('OK')
p = num.array((1000.0,))
t = num.array((260.0,))
mr = num.array((1.0,))
print(p[0])
print(t[0])
print(mr[0])
Tconvert = 273.15
dp = mr2dp(p,t,mr,Tconvert)
print(dp[0])
e = mr2e(p,mr)
print(e[0])
rh = mr2rh(p,t,mr,Tconvert)
print(rh[0][0])
e = rh2e(p,t,rh[0],Tconvert)
print(e[0][0])
mr = rh2mr(p,t,rh[0],Tconvert)
print(mr[0][0])
dp = rh2dp(p,t,rh[0],Tconvert)
print(dp[0][0])
rh = e2rh(p,t,e[0],Tconvert)
print(rh[0][0])
mr = e2mr(p,e[0])
print(mr[0])
dp = e2dp(e[0],t,Tconvert)
print(dp[0])
e = dp2e(t,dp,Tconvert)
print(e[0])
rh = dp2rh(p,t,dp,Tconvert)
print(rh[0][0])
mr = dp2mr(p,t,dp,Tconvert)
print(mr[0])
print('OK')
p = num.array((1000.0,))
t = num.array((260.0,))
dp = num.array((255.0,))
print(p[0])
print(t[0])
print(dp[0])
Tconvert = 273.15
e = dp2e(t,dp,Tconvert)
print(e[0])
rh = dp2rh(p,t,dp,Tconvert)
print(rh[0][0])
mr = dp2mr(p,t,dp,Tconvert)
print(mr[0])
dp = mr2dp(p,t,mr[0],Tconvert)
print(dp[0])
e = mr2e(p,mr[0])
print(e[0])
rh = mr2rh(p,t,mr[0],Tconvert)
print(rh[0][0])
e = rh2e(p,t,rh[0],Tconvert)
print(e[0][0])
mr = rh2mr(p,t,rh[0],Tconvert)
print(mr[0][0])
dp = rh2dp(p,t,rh[0],Tconvert)
print(dp[0][0])
rh = e2rh(p,t,e[0],Tconvert)
print(rh[0][0])
mr = e2mr(p,e[0])
print(mr[0])
dp = e2dp(e[0],t,Tconvert)
print(dp[0])
|
{"hexsha": "99b768b461d762040ad8540cb619aeeef2b19754", "size": 2987, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyuwphysret/common/pyfiles/atmos/testing123.py", "max_stars_repo_name": "graziano-giuliani/pythoncode", "max_stars_repo_head_hexsha": "4e505af5be3e32519cf4e62b85c101a63c885f77", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyuwphysret/common/pyfiles/atmos/testing123.py", "max_issues_repo_name": "graziano-giuliani/pythoncode", "max_issues_repo_head_hexsha": "4e505af5be3e32519cf4e62b85c101a63c885f77", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyuwphysret/common/pyfiles/atmos/testing123.py", "max_forks_repo_name": "graziano-giuliani/pythoncode", "max_forks_repo_head_hexsha": "4e505af5be3e32519cf4e62b85c101a63c885f77", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-24T02:45:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-24T02:45:47.000Z", "avg_line_length": 20.0469798658, "max_line_length": 31, "alphanum_fraction": 0.5972547707, "include": true, "reason": "import numpy", "num_tokens": 1280}
|
import csv
import numpy as np
def getDataSource(data_path):
marksInPercentage = []
days_present = []
with open(data_path) as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
marksInPercentage.append(float(row["Marks In Percentage"]))
days_present.append(float(row["Days Present"]))
return{"x":marksInPercentage , "y": days_present}
def findCorrelation(datasource):
correlation = np.corrcoef(datasource["x"],datasource["y"])
print("correlation between percentage and days present is :",correlation[0,1])
def setup():
data_path = "./data.csv"
datasource = getDataSource(data_path)
findCorrelation(datasource)
setup()
|
{"hexsha": "d9caadbda896bf351bd26159211f07cd539636c0", "size": 760, "ext": "py", "lang": "Python", "max_stars_repo_path": "class1.py", "max_stars_repo_name": "khushmax/corelation", "max_stars_repo_head_hexsha": "40f89c6736d9b6cb93a6aa12931ed3b9d8d7715f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "class1.py", "max_issues_repo_name": "khushmax/corelation", "max_issues_repo_head_hexsha": "40f89c6736d9b6cb93a6aa12931ed3b9d8d7715f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "class1.py", "max_forks_repo_name": "khushmax/corelation", "max_forks_repo_head_hexsha": "40f89c6736d9b6cb93a6aa12931ed3b9d8d7715f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3333333333, "max_line_length": 83, "alphanum_fraction": 0.6513157895, "include": true, "reason": "import numpy", "num_tokens": 165}
|
import cv2 as cv
import numpy as np
def grid(base, dimensions, images, scale=0.5):
# 1. SCALE IMAGE
base = cv.resize(base, (0, 0), fx=scale, fy=scale)
images = [cv.resize(image, (0, 0), fx=scale, fy=scale) for image in images]
# 2. COMPLETE DIMENTIONS IF MISSING
for i, image in enumerate(images):
if len(image.shape) < 3:
images[i] = cv.cvtColor(image, cv.COLOR_GRAY2BGR)
# 3. CREATE GRID
missing = dimensions[0]*dimensions[1] - len(images)
if missing < 0:
raise Exception('Wrong grid dimensions')
for i in range(missing):
images.append(np.zeros(base.shape, dtype=np.uint8))
grid = np.array(images);
grid = grid.reshape( (dimensions[0], dimensions[1], base.shape[0], base.shape[1], base.shape[2]) )
# 4. STACK IMAGES
return np.vstack( [np.hstack(row[:]) for row in grid] )
def getBoxesOffset(im, boxes):
im_w = im.shape[1]
im_h = im.shape[0]
offsets = []
for box in boxes:
x, y, w, h = box
xc, yc = (x + int(w/2), y + int(h/2))
x_off = 2*xc/im.shape[1] - 1
y_off = 1 - 2*yc/im.shape[0]
offsets.append( (x_off, y_off) )
return offsets
def drawBoxes(im, boxes):
for box in boxes:
x, y, w, h = box
xc, yc = (x + int(w/2), y + int(h/2))
cv.rectangle( im, (x, y), (x + w, y + h), (255, 250, 255), 2 )
return im
def drawBoxesPos(im, boxes):
cv.line(
im,
(0, int(im.shape[0]/2)),
(im.shape[1], int(im.shape[0]/2)),
(0, 255, 0), 2)
cv.line(
im,
(int(im.shape[1]/2), 0),
(int(im.shape[1]/2), im.shape[0] ),
(0, 255, 0), 2)
for box in boxes:
x, y, w, h = box
xc, yc = (x + int(w/2), y + int(h/2))
cv.circle( im, (xc, yc) , 1, (130, 250, 255), 2 )
cv.line( im, (xc, yc), (int( im.shape[1]/2), yc), (130, 250, 255), 2 )
cv.line( im, (xc, yc), (xc, int( im.shape[0]/2)), (130, 250, 255), 2 )
return im
|
{"hexsha": "a76af3d89a37bee8ff9ecea615c843fbd67e9755", "size": 1775, "ext": "py", "lang": "Python", "max_stars_repo_path": "cv_recon/cv_tools.py", "max_stars_repo_name": "AguilarLagunasArturo/cam-recon-tools", "max_stars_repo_head_hexsha": "32866dddf855658833b8aded2288613f31ce0d98", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-27T09:51:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-27T09:51:39.000Z", "max_issues_repo_path": "cv_recon/cv_tools.py", "max_issues_repo_name": "AguilarLagunasArturo/cv-recon", "max_issues_repo_head_hexsha": "32866dddf855658833b8aded2288613f31ce0d98", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cv_recon/cv_tools.py", "max_forks_repo_name": "AguilarLagunasArturo/cv-recon", "max_forks_repo_head_hexsha": "32866dddf855658833b8aded2288613f31ce0d98", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8939393939, "max_line_length": 99, "alphanum_fraction": 0.5954929577, "include": true, "reason": "import numpy", "num_tokens": 680}
|
(*
Copyright 2014 Cornell University
Copyright 2015 Cornell University
Copyright 2016 Cornell University
Copyright 2017 Cornell University
This file is part of VPrl (the Verified Nuprl project).
VPrl is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
VPrl is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VPrl. If not, see <http://www.gnu.org/licenses/>.
Websites: http://nuprl.org/html/verification/
http://nuprl.org/html/Nuprl2Coq
https://github.com/vrahli/NuprlInCoq
Authors: Abhishek Anand & Vincent Rahli
*)
Require Export sequents2.
Require Export sequents_lib.
Require Export rules_useful.
Require Export per_props_equality.
Require Export per_props_union.
Require Export per_props_cequiv.
Require Export per_props_squash.
Require Export subst_tacs.
Require Export sequents_equality.
Require Export sequents_tacs2.
Lemma inhabited_mkc_or {o} :
forall lib (A B : @CTerm o),
inhabited_type lib (mkc_or A B)
<=> (type lib A
# type lib B
# (inhabited_type lib A {+} inhabited_type lib B)).
Proof.
introv.
unfold inhabited_type.
split; introv h; exrepnd.
- apply equality_mkc_or in h0; exrepnd; dands; auto.
repndors; exrepnd.
+ left; exists a1.
apply equality_refl in h0; auto.
+ right; exists b1.
apply equality_refl in h0; auto.
- repndors; exrepnd.
+ exists (mkc_inl t).
apply equality_mkc_or; dands; auto.
left.
exists t t; dands; auto; spcast;
apply computes_to_valc_refl; eauto 3 with slow.
+ exists (mkc_inr t).
apply equality_mkc_or; dands; auto.
right.
exists t t; dands; auto; spcast;
apply computes_to_valc_refl; eauto 3 with slow.
Qed.
Lemma equality_base_implies_equality {o} :
forall lib (x y T : @CTerm o),
type lib T -> equality lib x y mkc_base -> member lib x T -> equality lib x y T.
Proof.
intros.
rw @equality_in_base_iff in H0.
spcast.
eapply equality_respects_cequivc_right; eauto.
Qed.
Lemma equality_mkc_equality_in_uni_equal_base {o} :
forall (lib : library) (i : nat) (a1 a2 b1 b2 A B : @CTerm o),
equality lib A B (mkc_uni i) ->
equality lib a1 b1 A ->
equality lib a2 b2 mkc_base ->
equality lib (mkc_equality a1 a2 A) (mkc_equality b1 b2 B) (mkc_uni i) .
Proof. intros. dup H0 as h1. dup H0 as h2.
apply equality_refl in h1. apply equality_sym in h2. apply equality_refl in h2.
dup H1 as e. apply equality_in_base in e. spcast.
apply equality_mkc_equality2_sp_in_uni; dands; sp; try (split; intro).
- pose proof (equality_respects_cequivc lib a2 b2 A e H2).
apply equality_sym in H3. apply equality_refl in H3. auto.
- apply cequivc_sym in e.
pose proof (equality_respects_cequivc lib b2 a2 A e H2).
apply equality_sym in H3. apply equality_refl in H3. auto.
- apply equality_respects_cequivc; auto.
Qed.
Lemma equality_mkc_equality_in_uni_equal_equal {o} :
forall (lib : library) (i : nat) (a1 a2 b1 b2 A B : @CTerm o),
equality lib A B (mkc_uni i) ->
equality lib a1 b1 A ->
equality lib a2 b2 A ->
equality lib (mkc_equality a1 a2 A) (mkc_equality b1 b2 B) (mkc_uni i) .
Proof. intros. dup H0 as h1. dup H0 as h2.
apply equality_refl in h1. apply equality_sym in h2. apply equality_refl in h2.
dup H1 as r1. dup H1 as r2.
apply equality_refl in r1. apply equality_sym in r2. apply equality_refl in r2.
apply equality_mkc_equality2_sp_in_uni; dands; sp; try (split; intro).
Qed.
Lemma equality_mkc_equality_in_uni_base_equal {o} :
forall (lib : library) (i : nat) (a1 a2 b1 b2 A B : @CTerm o),
equality lib A B (mkc_uni i) ->
equality lib a1 b1 mkc_base ->
equality lib a2 b2 A ->
equality lib (mkc_equality a1 a2 A) (mkc_equality b1 b2 B) (mkc_uni i) .
Proof. intros. dup H1 as h1. dup H1 as h2.
apply equality_refl in h1. apply equality_sym in h2. apply equality_refl in h2.
dup H0 as e. apply equality_in_base in e. spcast.
apply equality_mkc_equality2_sp_in_uni; dands; sp; try (split; intro).
- pose proof (equality_respects_cequivc lib a1 b1 A e H2).
apply equality_sym in H3. apply equality_refl in H3. auto.
- apply cequivc_sym in e.
pose proof (equality_respects_cequivc lib b1 a1 A e H2).
apply equality_sym in H3. apply equality_refl in H3. auto.
- apply equality_respects_cequivc; auto.
Qed.
Lemma equality_mkc_equality_in_uni_base_base {o} :
forall (lib : library) (i : nat) (a1 a2 b1 b2 A B : @CTerm o),
equality lib A B (mkc_uni i) ->
equality lib a1 b1 mkc_base ->
equality lib a2 b2 mkc_base ->
equality lib (mkc_equality a1 a2 A) (mkc_equality b1 b2 B) (mkc_uni i) .
Proof. intros.
apply equality_in_base in H0. spcast.
apply equality_in_base in H1. spcast.
dup H0 as e0. apply cequivc_sym in e0.
dup H1 as e1. apply cequivc_sym in e1.
pose proof (equality_respects_cequivc lib a1 b1 A H0) as X0.
pose proof (equality_respects_cequivc lib b1 a1 A e0) as Y0.
pose proof (equality_respects_cequivc lib a2 b2 A H1) as X1.
pose proof (equality_respects_cequivc lib b2 a2 A e1) as Y1.
apply equality_mkc_equality2_sp_in_uni; dands; sp; try (split; intro).
- apply X0 in H2.
apply equality_sym in H2. apply equality_refl in H2. auto.
- apply Y0 in H2.
apply equality_sym in H2. apply equality_refl in H2. auto.
- apply X1 in H2.
apply equality_sym in H2. apply equality_refl in H2. auto.
- apply Y1 in H2.
apply equality_sym in H2. apply equality_refl in H2. auto.
Qed.
Definition rule_equality_equality_concl {o} (H : @bhyps o) a1 a2 b1 b2 A B i :=
mk_baresequent
H
(mk_conclax (mk_equality
(mk_equality a1 a2 A)
(mk_equality b1 b2 B)
(mk_uni i))).
Definition rule_equality_equality_hyp1 {o} (H : @bhyps o) A B i e :=
mk_baresequent H (mk_concl (mk_equality A B (mk_uni i)) e).
Definition rule_equality_equality_hyp2 {o} (H : @bhyps o) a b A e :=
mk_baresequent H (mk_concl (mk_equality a b A) e).
(**
<<
H |- (a1 = a2 in A) = (b1 = b2 in B) in U(i)
By equalityEquality
H |- A = B in U(i)
H |- a1 = b1 in A
H |- a2 = b2 in A
>>
*)
Definition rule_equality_equality {o}
(H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat) :=
mk_rule
(rule_equality_equality_concl H a1 a2 b1 b2 A B i)
[ rule_equality_equality_hyp1 H A B i e1,
rule_equality_equality_hyp2 H a1 b1 A e2,
rule_equality_equality_hyp2 H a2 b2 A e3
]
[].
Lemma rule_equality_equality_true3 {o} :
forall lib (H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat),
rule_true3 lib (rule_equality_equality H A B a1 a2 b1 b2 e1 e2 e3 i).
Proof.
unfold rule_equality_equality, rule_true3, wf_bseq, closed_type_baresequent, closed_extract_baresequent; simpl.
intros.
clear cargs.
(* We prove the well-formedness of things *)
destseq; allsimpl.
dLin_hyp.
destruct Hyp as [wf1 hyp1].
destruct Hyp0 as [wf2 hyp2].
destruct Hyp1 as [wf3 hyp3].
destseq; allsimpl; proof_irr; GC.
match goal with
| [ |- sequent_true2 _ ?s ] => assert (wf_csequent s) as wfc
end.
{
clear hyp1 hyp2 hyp3.
unfold wf_csequent, wf_sequent, wf_concl; simpl.
dands; auto.
- apply wf_axiom.
- unfold closed_extract; simpl; auto.
}
exists wfc.
destseq; simpl in *.
(* We prove some simple facts on our sequents *)
(* done with proving these simple facts *)
vr_seq_true.
lsubst_tac.
rw <- @member_equality_iff.
pose proof (teq_and_eq_if_equality
lib (mk_uni i) (mk_equality a1 a2 A) (mk_equality b1 b2 B)
s1 s2 H wT w1 w2 c1 c6 c2 c7 cT cT2
eqh sim) as eqp.
lsubst_tac.
repeat (autodimp eqp hyp);[apply tequality_mkc_uni|].
clear dependent s1.
clear dependent s2.
introv hf sim.
lsubst_tac.
vr_seq_true in hyp1.
pose proof (hyp1 s1 s2 hf sim) as X; clear hyp1; exrepnd.
lsubst_tac.
vr_seq_true in hyp2.
pose proof (hyp2 s1 s2 hf sim) as Y; clear hyp2; exrepnd.
lsubst_tac.
vr_seq_true in hyp3.
pose proof (hyp3 s1 s2 hf sim) as Z; clear hyp3; exrepnd.
lsubst_tac.
rw @tequality_mkc_equality in X0;
rw @tequality_mkc_equality in Y0;
rw @tequality_mkc_equality in Z0.
apply equality_in_mkc_equality in X1;
apply equality_in_mkc_equality in Y1;
apply equality_in_mkc_equality in Z1.
repnd.
dup Z6 as ZZ1. apply equality_refl in ZZ1.
dup Z6 as ZZ2. apply equality_sym in ZZ2. apply equality_refl in ZZ2.
dimp Z4. auto. clear Z4.
dimp Z0. auto. clear Z0.
dup Y6 as YY1. apply equality_refl in YY1.
dup Y6 as YY2. apply equality_sym in YY2. apply equality_refl in YY2.
dimp Y4. auto. clear Y4.
dimp Y0. auto. clear Y0.
apply @equality_mkc_equality_in_uni_equal_equal.
- eapply equality_trans. exact X6. apply X0.
apply equality_sym in X6; apply equality_refl in X6; auto.
- eapply equality_trans. exact Y6. auto.
- eapply equality_trans. exact Z6. auto.
Qed.
Lemma rule_equality_equality_true_ext_lib {o} :
forall lib (H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat),
rule_true_ext_lib lib (rule_equality_equality H A B a1 a2 b1 b2 e1 e2 e3 i).
Proof.
introv.
apply rule_true3_implies_rule_true_ext_lib.
introv.
apply rule_equality_equality_true3.
Qed.
Lemma rule_equality_equality_wf2 {o} :
forall (H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat),
wf_rule2 (rule_equality_equality H A B a1 a2 b1 b2 e1 e2 e3 i).
Proof.
introv wf m; allsimpl.
repndors; subst; tcsp;
allunfold @wf_bseq; allsimpl; repnd; dands; auto.
- allrw <- @wf_equality_iff; tcsp.
- unfold closed_type_baresequent in *; simpl in *.
unfold closed_type in *; simpl in *.
allrw @covered_equality; tcsp.
- allrw <- @wf_equality_iff; tcsp.
- unfold closed_type_baresequent in *; simpl in *.
unfold closed_type in *; simpl in *.
allrw @covered_equality; tcsp.
- allrw <- @wf_equality_iff; tcsp.
- unfold closed_type_baresequent in *; simpl in *.
unfold closed_type in *; simpl in *.
allrw @covered_equality; tcsp.
Qed.
Definition rule_equality_equality_hyp3 {o} (H : @bhyps o) a b e :=
mk_baresequent H (mk_concl (mk_equality a b mk_base) e).
(**
<<
H |- (a1 = a2 in A) = (b1 = b2 in B) in U(i)
By equalityEqualityBase
H |- A = B in U(i)
H |- a1 = b1 in Base
H |- a2 = b2 in Base
>>
*)
Definition rule_equality_equality_base {o}
(H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat) :=
mk_rule
(rule_equality_equality_concl H a1 a2 b1 b2 A B i)
[ rule_equality_equality_hyp1 H A B i e1,
rule_equality_equality_hyp3 H a1 b1 e2,
rule_equality_equality_hyp3 H a2 b2 e3
]
[].
Lemma rule_equality_equality_base_true3 {o} :
forall lib (H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat),
rule_true3 lib (rule_equality_equality_base H A B a1 a2 b1 b2 e1 e2 e3 i).
Proof.
unfold rule_equality_equality_base, rule_true3, wf_bseq, closed_type_baresequent, closed_extract_baresequent; simpl.
intros.
clear cargs.
(* We prove the well-formedness of things *)
destseq; allsimpl.
dLin_hyp.
destruct Hyp as [wf1 hyp1].
destruct Hyp0 as [wf2 hyp2].
destruct Hyp1 as [wf3 hyp3].
destseq; allsimpl; proof_irr; GC.
match goal with
| [ |- sequent_true2 _ ?s ] => assert (wf_csequent s) as wfc
end.
{
clear hyp1 hyp2 hyp3.
unfold wf_csequent, wf_sequent, wf_concl; simpl.
dands; auto.
- apply wf_axiom.
- unfold closed_extract; simpl; auto.
}
exists wfc.
destseq; simpl in *.
(* We prove some simple facts on our sequents *)
(* done with proving these simple facts *)
vr_seq_true.
lsubst_tac.
rw <- @member_equality_iff.
pose proof (teq_and_eq_if_equality
lib (mk_uni i) (mk_equality a1 a2 A) (mk_equality b1 b2 B)
s1 s2 H wT w1 w2 c1 c6 c2 c7 cT cT2
eqh sim) as eqp.
lsubst_tac.
repeat (autodimp eqp hyp);[apply tequality_mkc_uni|].
clear dependent s1.
clear dependent s2.
introv hf sim.
lsubst_tac.
vr_seq_true in hyp1.
pose proof (hyp1 s1 s2 hf sim) as X; clear hyp1; exrepnd.
lsubst_tac.
vr_seq_true in hyp2.
pose proof (hyp2 s1 s2 hf sim) as Y; clear hyp2; exrepnd.
lsubst_tac.
vr_seq_true in hyp3.
pose proof (hyp3 s1 s2 hf sim) as Z; clear hyp3; exrepnd.
lsubst_tac.
rw @tequality_mkc_equality in X0;
rw @tequality_mkc_equality in Y0;
rw @tequality_mkc_equality in Z0.
apply equality_in_mkc_equality in X1;
apply equality_in_mkc_equality in Y1;
apply equality_in_mkc_equality in Z1.
repnd.
dimp Z0. apply member_base. clear Z0 Z5.
dimp Z4. apply member_base. clear Z3 Z4.
dup Y6 as YY1. apply equality_refl in YY1.
dup Y6 as YY2. apply equality_sym in YY2. apply equality_refl in YY2.
dimp Y4. auto. clear Y4.
dimp Y0. auto. clear Y0.
apply @equality_mkc_equality_in_uni_base_base.
- eapply equality_trans. exact X6. apply X0.
apply equality_sym in X6; apply equality_refl in X6; auto.
- eapply equality_trans. exact Y6. auto.
- eapply equality_trans. exact Z6. auto.
Qed.
Lemma rule_equality_equality_base_true_ext_lib {o} :
forall lib (H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat),
rule_true_ext_lib lib (rule_equality_equality_base H A B a1 a2 b1 b2 e1 e2 e3 i).
Proof.
introv.
apply rule_true3_implies_rule_true_ext_lib.
introv.
apply rule_equality_equality_base_true3.
Qed.
Lemma rule_equality_equality_base_wf2 {o} :
forall (H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat),
wf_rule2 (rule_equality_equality_base H A B a1 a2 b1 b2 e1 e2 e3 i).
Proof.
introv wf m; allsimpl.
repndors; subst; tcsp;
allunfold @wf_bseq; allsimpl; repnd; dands; auto.
- allrw <- @wf_equality_iff; tcsp.
- unfold closed_type_baresequent in *; simpl in *.
unfold closed_type in *; simpl in *.
allrw @covered_equality; tcsp.
- allrw <- @wf_equality_iff; tcsp.
- unfold closed_type_baresequent in *; simpl in *.
unfold closed_type in *; simpl in *.
allrw @covered_equality; tcsp.
- allrw <- @wf_equality_iff; tcsp.
- unfold closed_type_baresequent in *; simpl in *.
unfold closed_type in *; simpl in *.
allrw @covered_equality; tcsp.
Qed.
(**
<<
H |- (a1 = a2 in A) = (b1 = b2 in B) in U(i)
By equalityEqualityBase1
H |- A = B in U(i)
H |- a1 = b1 in Base
H |- a2 = b2 in A
>>
*)
Definition rule_equality_equality_base1 {o}
(H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat) :=
mk_rule
(rule_equality_equality_concl H a1 a2 b1 b2 A B i)
[ rule_equality_equality_hyp1 H A B i e1,
rule_equality_equality_hyp3 H a1 b1 e2,
rule_equality_equality_hyp2 H a2 b2 A e3
]
[].
Lemma rule_equality_equality_base1_true3 {o} :
forall lib (H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat),
rule_true3 lib (rule_equality_equality_base1 H A B a1 a2 b1 b2 e1 e2 e3 i).
Proof.
unfold rule_equality_equality_base1, rule_true3, wf_bseq, closed_type_baresequent, closed_extract_baresequent; simpl.
intros.
clear cargs.
(* We prove the well-formedness of things *)
destseq; allsimpl.
dLin_hyp.
destruct Hyp as [wf1 hyp1].
destruct Hyp0 as [wf2 hyp2].
destruct Hyp1 as [wf3 hyp3].
destseq; allsimpl; proof_irr; GC.
match goal with
| [ |- sequent_true2 _ ?s ] => assert (wf_csequent s) as wfc
end.
{
clear hyp1 hyp2 hyp3.
unfold wf_csequent, wf_sequent, wf_concl; simpl.
dands; auto.
- apply wf_axiom.
- unfold closed_extract; simpl; auto.
}
exists wfc.
destseq; simpl in *.
(* We prove some simple facts on our sequents *)
(* done with proving these simple facts *)
vr_seq_true.
lsubst_tac.
rw <- @member_equality_iff.
pose proof (teq_and_eq_if_equality
lib (mk_uni i) (mk_equality a1 a2 A) (mk_equality b1 b2 B)
s1 s2 H wT w1 w2 c1 c6 c2 c7 cT cT2
eqh sim) as eqp.
lsubst_tac.
repeat (autodimp eqp hyp);[apply tequality_mkc_uni|].
clear dependent s1.
clear dependent s2.
introv hf sim.
lsubst_tac.
vr_seq_true in hyp1.
pose proof (hyp1 s1 s2 hf sim) as X; clear hyp1; exrepnd.
lsubst_tac.
vr_seq_true in hyp2.
pose proof (hyp2 s1 s2 hf sim) as Y; clear hyp2; exrepnd.
lsubst_tac.
vr_seq_true in hyp3.
pose proof (hyp3 s1 s2 hf sim) as Z; clear hyp3; exrepnd.
lsubst_tac.
rw @tequality_mkc_equality in X0;
rw @tequality_mkc_equality in Y0;
rw @tequality_mkc_equality in Z0.
apply equality_in_mkc_equality in X1;
apply equality_in_mkc_equality in Y1;
apply equality_in_mkc_equality in Z1.
repnd.
dimp Y0. apply member_base. clear Y0 Y5.
dimp Y4. apply member_base. clear Y3 Y4.
dup Z6 as ZZ1. apply equality_refl in ZZ1.
dup Z6 as ZZ2. apply equality_sym in ZZ2. apply equality_refl in ZZ2.
dimp Z4. auto. clear Z4.
dimp Z0. auto. clear Z0.
apply @equality_mkc_equality_in_uni_base_equal.
- eapply equality_trans. exact X6. apply X0.
apply equality_sym in X6; apply equality_refl in X6; auto.
- eapply equality_trans. exact Y6. auto.
- eapply equality_trans. exact Z6. auto.
Qed.
Lemma rule_equality_equality_base1_true_ext_lib {o} :
forall lib (H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat),
rule_true_ext_lib lib (rule_equality_equality_base1 H A B a1 a2 b1 b2 e1 e2 e3 i).
Proof.
introv.
apply rule_true3_implies_rule_true_ext_lib.
introv.
apply rule_equality_equality_base1_true3.
Qed.
Lemma rule_equality_equality_base1_wf2 {o} :
forall (H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat),
wf_rule2 (rule_equality_equality_base1 H A B a1 a2 b1 b2 e1 e2 e3 i).
Proof.
introv wf m; allsimpl.
repndors; subst; tcsp;
allunfold @wf_bseq; allsimpl; repnd; dands; auto.
- allrw <- @wf_equality_iff; tcsp.
- unfold closed_type_baresequent in *; simpl in *.
unfold closed_type in *; simpl in *.
allrw @covered_equality; tcsp.
- allrw <- @wf_equality_iff; tcsp.
- unfold closed_type_baresequent in *; simpl in *.
unfold closed_type in *; simpl in *.
allrw @covered_equality; tcsp.
- allrw <- @wf_equality_iff; tcsp.
- unfold closed_type_baresequent in *; simpl in *.
unfold closed_type in *; simpl in *.
allrw @covered_equality; tcsp.
Qed.
(**
<<
H |- (a1 = a2 in A) = (b1 = b2 in B) in U(i)
By equalityEqualityBase2
H |- A = B in U(i)
H |- a1 = b1 in A
H |- a2 = b2 in Base
>>
*)
Definition rule_equality_equality_base2 {o}
(H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat) :=
mk_rule
(rule_equality_equality_concl H a1 a2 b1 b2 A B i)
[ rule_equality_equality_hyp1 H A B i e1,
rule_equality_equality_hyp2 H a1 b1 A e2,
rule_equality_equality_hyp3 H a2 b2 e3
]
[].
Lemma rule_equality_equality_base2_true3 {o} :
forall lib (H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat),
rule_true3 lib (rule_equality_equality_base2 H A B a1 a2 b1 b2 e1 e2 e3 i).
Proof.
unfold rule_equality_equality_base2, rule_true3, wf_bseq, closed_type_baresequent, closed_extract_baresequent; simpl.
intros.
clear cargs.
(* We prove the well-formedness of things *)
destseq; allsimpl.
dLin_hyp.
destruct Hyp as [wf1 hyp1].
destruct Hyp0 as [wf2 hyp2].
destruct Hyp1 as [wf3 hyp3].
destseq; allsimpl; proof_irr; GC.
match goal with
| [ |- sequent_true2 _ ?s ] => assert (wf_csequent s) as wfc
end.
{
clear hyp1 hyp2 hyp3.
unfold wf_csequent, wf_sequent, wf_concl; simpl.
dands; auto.
- apply wf_axiom.
- unfold closed_extract; simpl; auto.
}
exists wfc.
destseq; simpl in *.
(* We prove some simple facts on our sequents *)
(* done with proving these simple facts *)
vr_seq_true.
lsubst_tac.
rw <- @member_equality_iff.
pose proof (teq_and_eq_if_equality
lib (mk_uni i) (mk_equality a1 a2 A) (mk_equality b1 b2 B)
s1 s2 H wT w1 w2 c1 c6 c2 c7 cT cT2
eqh sim) as eqp.
lsubst_tac.
repeat (autodimp eqp hyp);[apply tequality_mkc_uni|].
clear dependent s1.
clear dependent s2.
introv hf sim.
lsubst_tac.
vr_seq_true in hyp1.
pose proof (hyp1 s1 s2 hf sim) as X; clear hyp1; exrepnd.
lsubst_tac.
vr_seq_true in hyp2.
pose proof (hyp2 s1 s2 hf sim) as Y; clear hyp2; exrepnd.
lsubst_tac.
vr_seq_true in hyp3.
pose proof (hyp3 s1 s2 hf sim) as Z; clear hyp3; exrepnd.
lsubst_tac.
rw @tequality_mkc_equality in X0;
rw @tequality_mkc_equality in Y0;
rw @tequality_mkc_equality in Z0.
apply equality_in_mkc_equality in X1;
apply equality_in_mkc_equality in Y1;
apply equality_in_mkc_equality in Z1.
repnd.
dimp Z0. apply member_base. clear Z0 Z5.
dimp Z4. apply member_base. clear Z3 Z4.
dup Y6 as YY1. apply equality_refl in YY1.
dup Y6 as YY2. apply equality_sym in YY2. apply equality_refl in YY2.
dimp Y4. auto. clear Y4.
dimp Y0. auto. clear Y0.
apply @equality_mkc_equality_in_uni_equal_base.
- eapply equality_trans. exact X6. apply X0.
apply equality_sym in X6; apply equality_refl in X6; auto.
- eapply equality_trans. exact Y6. auto.
- apply equality_base_implies_equality. apply equality_refl in X6; auto.
eapply equality_trans. exact Z6. auto.
apply member_base.
Qed.
Lemma rule_equality_equality_base2_true_ext_lib {o} :
forall lib (H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat),
rule_true_ext_lib lib (rule_equality_equality_base2 H A B a1 a2 b1 b2 e1 e2 e3 i).
Proof.
introv.
apply rule_true3_implies_rule_true_ext_lib.
introv.
apply rule_equality_equality_base2_true3.
Qed.
Lemma rule_equality_equality_base2_wf2 {o} :
forall (H : @barehypotheses o)
(A B a1 a2 b1 b2 e1 e2 e3 : NTerm)
(i : nat),
wf_rule2 (rule_equality_equality_base2 H A B a1 a2 b1 b2 e1 e2 e3 i).
Proof.
introv wf m; allsimpl.
repndors; subst; tcsp;
allunfold @wf_bseq; allsimpl; repnd; dands; auto.
- allrw <- @wf_equality_iff; tcsp.
- unfold closed_type_baresequent in *; simpl in *.
unfold closed_type in *; simpl in *.
allrw @covered_equality; tcsp.
- allrw <- @wf_equality_iff; tcsp.
- unfold closed_type_baresequent in *; simpl in *.
unfold closed_type in *; simpl in *.
allrw @covered_equality; tcsp.
- allrw <- @wf_equality_iff; tcsp.
- unfold closed_type_baresequent in *; simpl in *.
unfold closed_type in *; simpl in *.
allrw @covered_equality; tcsp.
Qed.
|
{"author": "vrahli", "repo": "NuprlInCoq", "sha": "0c3d7723836d3f615ea47f56e58b2ea6173e7d98", "save_path": "github-repos/coq/vrahli-NuprlInCoq", "path": "github-repos/coq/vrahli-NuprlInCoq/NuprlInCoq-0c3d7723836d3f615ea47f56e58b2ea6173e7d98/rules/rules_equality3.v"}
|
% !TEX root = frideswide.tex
\chapter{Introduction}
|
{"hexsha": "1e2c770c51b571eeb8cc9e0f057ce418585d5e3e", "size": 53, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "introduction.tex", "max_stars_repo_name": "OpenBookPublishers/dunning-2pp-book", "max_stars_repo_head_hexsha": "ada7a8b62343b9f72ec0e2ef4493508cc4916989", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "introduction.tex", "max_issues_repo_name": "OpenBookPublishers/dunning-2pp-book", "max_issues_repo_head_hexsha": "ada7a8b62343b9f72ec0e2ef4493508cc4916989", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "introduction.tex", "max_forks_repo_name": "OpenBookPublishers/dunning-2pp-book", "max_forks_repo_head_hexsha": "ada7a8b62343b9f72ec0e2ef4493508cc4916989", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.25, "max_line_length": 28, "alphanum_fraction": 0.7358490566, "num_tokens": 15}
|
# Coder: Wenxin Xu
# Github: https://github.com/wenxinxu/resnet_in_tensorflow
# ==============================================================================
# This code was modified from the code in the link above.
#from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def horizontal_flip(image):
'''
Flip an image at 50% possibility
:param image: a 3 dimensional numpy array representing an image
:param axis: 0 for vertical flip and 1 for horizontal flip
:return: 3D image after flip
'''
flip_prop = np.random.randint(low=0, high=2)
if flip_prop == 0:
image = np.fliplr(image)
return image
def pad_images(data, padding_size):
pad_width = ((0, 0), (padding_size, padding_size), (padding_size, padding_size), (0, 0))
return np.pad(data, pad_width=pad_width, mode='reflect')
def random_crop_and_flip(batch_data, padding_size):
'''
Helper to random crop and random flip a batch of images
:param padding_size: int. how many layers of 0 padding was added to each side
:param batch_data: a 4D batch array
:return: randomly cropped and flipped image
'''
height = batch_data.shape[1]-2*padding_size
width = batch_data.shape[2]-2*padding_size
depth = batch_data.shape[3]
cropped_batch = np.zeros(len(batch_data) * height * width * depth).reshape(
len(batch_data), height, width, depth)
for i in range(len(batch_data)):
x_offset = np.random.randint(low=0, high=2 * padding_size + 1, size=1)[0]
y_offset = np.random.randint(low=0, high=2 * padding_size + 1, size=1)[0]
cropped_batch[i, ...] = batch_data[i, ...][x_offset:x_offset+height,
y_offset:y_offset+width, :]
cropped_batch[i, ...] = horizontal_flip(image=cropped_batch[i, ...])
return cropped_batch
|
{"hexsha": "c186e152f928db599685cfee374b629a9a32ae11", "size": 1895, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocessing.py", "max_stars_repo_name": "MinhyungCho/riemannian-batch-normalization", "max_stars_repo_head_hexsha": "d1ac938ca5af8af1b7c1d4f708c1aacd2d8cbab9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2017-10-28T13:11:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T10:40:07.000Z", "max_issues_repo_path": "preprocessing.py", "max_issues_repo_name": "hujiangpku/riemannian-batch-normalization", "max_issues_repo_head_hexsha": "d1ac938ca5af8af1b7c1d4f708c1aacd2d8cbab9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-10-19T04:13:01.000Z", "max_issues_repo_issues_event_max_datetime": "2017-10-19T04:13:01.000Z", "max_forks_repo_path": "preprocessing.py", "max_forks_repo_name": "hujiangpku/riemannian-batch-normalization", "max_forks_repo_head_hexsha": "d1ac938ca5af8af1b7c1d4f708c1aacd2d8cbab9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-12-08T19:27:35.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-13T07:36:25.000Z", "avg_line_length": 35.0925925926, "max_line_length": 92, "alphanum_fraction": 0.6569920844, "include": true, "reason": "import numpy", "num_tokens": 478}
|
import numpy as np
class GridMap:
'''
Mapping of variables from ranges defined by min-max and scale to a
0-1 unit hypercube.
'''
def __init__(self, variables):
self.cardinality = 0
# Count the total number of dimensions and roll into new format.
for variable in variables:
self.cardinality += 1
if variable['type'] not in ['int', 'float', 'enum']:
raise Exception("Unknown parameter type.")
self.variables = variables
print("Optimizing over %d dimensions\n" % (self.cardinality))
def get_params(self, u):
if u.shape[0] != self.cardinality:
raise Exception("Hypercube dimensionality is incorrect.")
params = []
for variable, ui in zip(self.variables, u):
if variable['type'] == 'int':
val = variable['min'] + self._index_map(ui, variable['max']-variable['min']+1)
elif variable['type'] == 'float':
val = variable['min'] + ui*(variable['max']-variable['min'])
#optional scale definition.
scale = variable.get('scale', 'log')
if scale == 'log':
val = 10**val
elif variable['type'] == 'enum':
ii = self._index_map(ui, len(variable['options']))
val = variable['options'][ii]
else:
raise Exception("Unknown parameter type.")
params.append({'name': variable['name'], 'val': val})
return params
def card(self):
return self.cardinality
def _index_map(self, u, items):
u = np.max((u, 0.0))
u = np.min((u, 1.0))
return int(np.floor((1-np.finfo(float).eps) * u * float(items)))
|
{"hexsha": "bd53657c72425d9ecd5d6857d141bf7f627187f1", "size": 1772, "ext": "py", "lang": "Python", "max_stars_repo_path": "gp_families/grid.py", "max_stars_repo_name": "jclevesque/gp_families", "max_stars_repo_head_hexsha": "3c24b0ec60231c6110e0060d6e2471683718615e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gp_families/grid.py", "max_issues_repo_name": "jclevesque/gp_families", "max_issues_repo_head_hexsha": "3c24b0ec60231c6110e0060d6e2471683718615e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gp_families/grid.py", "max_forks_repo_name": "jclevesque/gp_families", "max_forks_repo_head_hexsha": "3c24b0ec60231c6110e0060d6e2471683718615e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7450980392, "max_line_length": 94, "alphanum_fraction": 0.539503386, "include": true, "reason": "import numpy", "num_tokens": 407}
|
import os
import torch
import argparse
import pytorch_lightning as pl
from utils import read_config, get_early_stopper, get_checkpoint_callback, final_logs, print_dict
from train import Model
from dataset import DatasetModule
import numpy as np
from models.model import pDNN
parser = argparse.ArgumentParser()
parser.add_argument("--config", default="config.ini")
parser.add_argument("--num_gpus", type=int, default=0)
if __name__ == "__main__":
args = parser.parse_args()
filename = args.config
gpus = args.num_gpus if args.num_gpus is not None else 0
params = read_config(filename)
use_gpu = (args.num_gpus > 0)
if params["JOB_TYPE"] == "train":
if not os.path.exists(params["SAVE_DIR"]):
os.makedirs(params["SAVE_DIR"])
if not os.path.exists(params["LOG_DIR"]):
os.makedirs(params["LOG_DIR"])
if not os.path.exists(params["CHECKPOINTS_DIR"]):
os.makedirs(params["CHECKPOINTS_DIR"])
type2id = dict(
zip(params["BKG_LIST"]+params["SIG_LIST"], range(len(params["BKG_LIST"])+len(params["SIG_LIST"]))))
if params["MISSING_TRAIN"] :
temp = params["BKG_LIST"]+list(set(params["SIG_LIST"]) | set(params["MISSING_SIG"]))
type2id = dict(zip(temp, range(len(temp))))
print_dict(type2id, "Types")
dataset = DatasetModule(root_path=params["ROOT_PATH"],
campaigns=params["CAMPAIGN"],
channel=params["CHANNEL"],
norm_array=params["NORM_ARRAY"],
sig_sum=params["SIG_SUM"],
bkg_sum=params["BKG_SUM"],
bkg_list=params["BKG_LIST"],
sig_list=params["SIG_LIST"],
data_list=params["DATA_LIST"],
selected_features=params["FEATURES"],
reset_feature=params["RESET_FEATURE"],
reset_feature_name=params["RESET_FEATURE_NAME"],
rm_negative_weight_events=params["NEGATIVE_WT"],
cut_features=params["CUT_FEATURES"],
cut_values=params["CUT_VALUES"],
cut_types=params["CUT_TYPES"],
test_rate=params["TEST_SPLIT"],
val_split=params["VAL_SPLIT"],
batch_size=params["BATCH_SIZE"],
id_dict=type2id,
missing_train=params["MISSING_TRAIN"],
missing_sig=params["MISSING_SIG"],
use_PCA= params["USE_PCA"],
pca_components=params["PCA_COMPONENTS"])
early_stopping, logger, model_checkpoint = None, None, None
if params["EARLY_STOP"]:
early_stopping = get_early_stopper(
monitor=params["ES_MONITOR"], min_delta=params["ES_DELTA"], patience=params["ES_PATIENCE"], mode=params["ES_MODE"])
if params["SAVE_TB_LOGS"]:
logger = pl.loggers.TensorBoardLogger(
save_dir=params["LOG_DIR"], log_graph=False)
if params["SAVE_MODEL"]:
model_checkpoint = get_checkpoint_callback(
PATH=params["CHECKPOINTS_DIR"], monitor='val_loss', save_last=params["CHECK_EPOCH"]) #
loss_fn, output_fn = None, None
if params["LOSS"] == "bce_loss":
loss_fn = torch.nn.BCELoss()
output_fn = torch.nn.Sigmoid()
elif params["LOSS"] == "hinge_loss":
loss_fn = torch.nn.HingeLoss()
output_fn = torch.nn.Tanh()
model = Model(momentum=params["MOMENTUM"],
nesterov=params["NESTEROV"],
learn_rate=params["LEARN_RATE"],
learn_rate_decay=params["LR_DECAY"],
sig_class_weight=params["SIG_WT"],
bkg_class_weight=params["BKG_WT"],
threshold=params["THRESHOLD"],
optimizer=params["OPT"],
loss_fn=loss_fn,
output_fn=output_fn,
layers=params["LAYERS"],
nodes=params["NODES"],
dropout=params["DROPOUT"],
activation=params["ACTIVATION"],
input_size=params["PCA_COMPONENTS"] if params["USE_PCA"] else len(params["FEATURES"]),
id_dict=type2id,
save_tb_logs=params["SAVE_TB_LOGS"],
save_metrics=params["METRICS"],
save_wt_metrics=params["WT_METRICS"])
trainer = pl.Trainer(early_stop_callback=early_stopping,
checkpoint_callback=model_checkpoint,
logger=logger,
max_epochs=params["EPOCHS"],
gpus=gpus)
'''training the model'''
trainer.fit(model, dataset)
test_dataset = dataset.test_dataloader()
training_metrics = model.metrics
best_model = model
if params["ES_RESTORE"]:
best_model.load_state_dict(torch.load(model_checkpoint.best_model_path)['state_dict'])
final_logs(best_model.dnn, test_dataset,
params["THRESHOLD"], output_fn, type2id, gpus, training_metrics, params["LOG_DIR"])
elif params["JOB_TYPE"] == "test":
if not os.path.exists(params["LOAD_DIR"]):
raise Exception("Model doesnt exist")
type2id = dict(
zip(params["BKG_LIST"]+params["SIG_LIST"], range(len(params["BKG_LIST"])+len(params["SIG_LIST"]))))
print_dict(type2id, "Types")
dataset = DatasetModule(root_path=params["ROOT_PATH"],
campaigns=params["CAMPAIGN"],
channel=params["CHANNEL"],
norm_array=params["NORM_ARRAY"],
sig_sum=params["SIG_SUM"],
bkg_sum=params["BKG_SUM"],
bkg_list=params["BKG_LIST"],
sig_list=params["SIG_LIST"],
data_list=params["DATA_LIST"],
selected_features=params["FEATURES"],
reset_feature=params["RESET_FEATURE"],
reset_feature_name=params["RESET_FEATURE_NAME"],
rm_negative_weight_events=params["NEGATIVE_WT"],
cut_features=params["CUT_FEATURES"],
cut_values=params["CUT_VALUES"],
cut_types=params["CUT_TYPES"],
test_rate=1.0,
val_split=0.0,
batch_size=params["BATCH_SIZE"],
id_dict=type2id)
early_stopping, logger, model_checkpoint = None, None, None
loss_fn, output_fn = None, None
if params["LOSS"] == "bce_loss":
loss_fn = torch.nn.BCELoss()
output_fn = torch.nn.Sigmoid()
elif params["LOSS"] == "hinge_loss":
loss_fn = torch.nn.HingeLoss()
output_fn = torch.nn.Tanh()
model = Model(momentum=params["MOMENTUM"],
nesterov=params["NESTEROV"],
learn_rate=params["LEARN_RATE"],
learn_rate_decay=params["LR_DECAY"],
sig_class_weight=params["SIG_WT"],
bkg_class_weight=params["BKG_WT"],
threshold=params["THRESHOLD"],
optimizer=params["OPT"],
loss_fn=loss_fn,
output_fn=output_fn,
layers=params["LAYERS"],
nodes=params["NODES"],
dropout=params["DROPOUT"],
activation=params["ACTIVATION"],
input_size=len(params["FEATURES"]),
id_dict=type2id,
save_tb_logs=params["SAVE_TB_LOGS"],
save_metrics=params["METRICS"],
save_wt_metrics=params["WT_METRICS"])
dataset.prepare_data()
dataset.setup("test")
test_dataset = dataset.test_dataloader()
training_metrics = model.metrics
model.load_state_dict(torch.load(
params["LOAD_DIR"])['state_dict'], strict=False)
final_logs(model, test_dataset,
params["THRESHOLD"], output_fn, type2id, gpus, None, params["LOG_DIR"])
|
{"hexsha": "e92af6d21ff2011bb245da0ed7c218dbc4b3ae8b", "size": 8913, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "sriyash421/pDNN", "max_stars_repo_head_hexsha": "80276e046dfa21567a380502d187b928ec01147b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "sriyash421/pDNN", "max_issues_repo_head_hexsha": "80276e046dfa21567a380502d187b928ec01147b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "sriyash421/pDNN", "max_forks_repo_head_hexsha": "80276e046dfa21567a380502d187b928ec01147b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.4402173913, "max_line_length": 131, "alphanum_fraction": 0.5168854482, "include": true, "reason": "import numpy", "num_tokens": 1661}
|
import helpers
import re
import numpy as np
'''
Possible types:
header/footer
- has_words
- is_top_or_bottom
- small_text?
- n_lines <= 3
body
- has_words
- normal_word_separation
- normal_word_coverage
- !overlaps
- !small_text
- !very_separated_words
- !mostly_blank
- !little_word_coverage
- proportion_alpha > 0.7
- !offset_words
- n_lines > 1
- !is_line
graphic
- mostly_blank
- gaps!
- little_word_coverage
- overlaps?
- n_lines > 1
- !is_line
graphic_caption
- has_words
- small_text?
- overlaps?
- normal_word_coverage
- proportion_alpha > 0.7
- !is_line
-
reference
- has_words
- small_text?
- small_leading?
- normal_word_coverage
- proportion_alpha > 0.5...????
- offset_words?
- n_lines > 1
- !is_line
other
- doesn't match other criteria
'''
# Does the area have any words?
def has_words(area):
return True if area['words'] > 0 else False
# Does this area intersect a line
def line_intersect(area, all_areas):
for line in [ area for area in all_areas if is_line(area)]:
if helpers.rectangles_intersect(area, line):
return True
return False
# Is the text contained in this area smaller than "normal" (mean word height is +/- 0.25 of the document avg)
def small_text(area, doc_stats):
if area['word_height_avg'] < (doc_stats['word_height_avg'] - (doc_stats['word_height_avg_std']/4)):
return True
else:
return False
# Is the line height smaller than "normal" (mean leading is +/- 0.5 of the document avg)?
def small_leading(area, doc_stats):
mean_area_leading = np.nanmean(area['line_heights'])
if mean_area_leading >= doc_stats['line_height_avg'] - (doc_stats['line_height_std']/2) and mean_area_leading <= doc_stats['line_height_avg'] + (doc_stats['line_height_std']/2):
return True
else:
return False
# Is this area only one line and contain no text?
#
# Separator lines
# 1 line
# 0 words
# word separation index === 0
# word height index === 0
# word height average === 0
def is_line(area):
if area['lines'] == 1 and area['words'] == 0 and area['word_separation_index'] == 0 and area['word_height_index'] == 0 and area['word_height_avg'] == 0:
return True
else:
return False
# Is the area the first or last area (in y space) on the page?
def is_top_or_bottom(area, page_areas):
min_y = min([ a['y1'] for a in page_areas ])
max_y = max([ a['y1'] for a in page_areas ])
if (area['y1'] <= min_y + 10 and area['y1'] >= min_y - 10) or (area['y1'] <= max_y + 10 and area['y1'] >= max_y - 10):
return True
else:
return False
# Does the area contain much more white space than other areas?
#
# Giant blank areas are *probably* tables
# average line height > (document average line height + 100)
# area > 250000
def mostly_blank(area, doc_stats):
if np.nanmean(area['line_heights']) > doc_stats['line_height_avg'] + 100 and area['area'] > 250000:
return True
else:
return False
# Tables
# very_separated_words == True
# little_word_coverage == True
# n_lines > 1
# mostly_blank
# overlaps
# is the separation of words in the area much greater than others?
# word separation index >= (document median word separation index + 1 standard deviation)
def very_separated_words(area, doc_stats):
if (area['word_separation_index'] >= (doc_stats['word_separation_index_median'] + doc_stats['word_separation_index_std'])):
return True
else:
return False
# area covered by words <= (document word area median - 1 standard deviation)
def little_word_coverage(area, doc_stats):
if (area['word_area_index'] <= (doc_stats['word_area_index_median'] - doc_stats['word_area_index_std'])):
return False
else:
return True
# Is the separation of words in the area "normal"?
def normal_word_separation(area, doc_stats):
if (area['word_separation_index'] < (doc_stats['word_separation_index_median'] + doc_stats['word_separation_index_std'])):
return True
else:
return False
def normal_word_coverage(area, doc_stats):
if (area['word_area_index'] > (doc_stats['word_area_index_median'] - (doc_stats['word_area_index_std']/float(2))) and area['word_area_index'] < (doc_stats['word_area_index_median'] + (doc_stats['word_area_index_std']/float(2)))):
return True
else:
return False
def best_caption(area):
lines = area['soup'].find_all('span', 'ocr_line')
if len(lines) > 0:
clean_line = lines[0].getText().strip().replace('\n', ' ').replace(' ', ' ').lower()
matches = re.match('(table|figure|fig|map|appendix|app|appx|tbl)(?:\.)? (?:(\d+\w+(?:\.)?)|(\d+))', clean_line, flags=re.IGNORECASE|re.MULTILINE)
if matches is not None:
return True
return False
def good_caption(area):
lines = area['soup'].find_all('span', 'ocr_line')
if len(lines) > 0:
clean_line = lines[0].getText().strip().replace('\n', ' ').replace(' ', ' ').lower()
matches = re.findall('(table|figure|fig|map|appendix|app|appx|tbl)(?:\.)? (?:(\d+\w+(?:\.)?)|(\d+))', clean_line, flags=re.IGNORECASE|re.MULTILINE)
if len(matches):
return True
return False
def ok_caption(area):
lines = area['soup'].find_all('span', 'ocr_line')
for line in lines:
clean_line = line.getText().strip().replace('\n', ' ').replace(' ', ' ').lower()
matches = re.match('(\b\w{1,6}) \d+', clean_line, flags=re.IGNORECASE|re.MULTILINE)
if matches is not None and helpers.similar_to_keyword(matches.groups()[0]):
return True
return False
# Does the area intersect with any other areas on the page?
def overlap(area, all_areas):
for each in all_areas:
if helpers.rectangles_intersect(area, each) and each['x1'] != area['x1'] and each['y1'] != area['y1'] and each['x2'] != area['x2'] and each['y2'] != area['y2']:
return True
return False
# For a given area, what proportion of the characters are [a-z]
def proportion_alpha(area):
area_words = ' '.join(filter(None, area['soup'].getText().strip().replace('\n', ' ').replace(' ', ' ').split(' ')))
# return the number of alpha characters divided by the total number of characters
total_words = len(area_words.replace(' ', ''))
if total_words == 0:
return 0
return len(re.findall('[a-zA-Z]', area_words)) / float(total_words)
# Is there a high standard deviation in the x position of the first word of each line?
def offset_words(area):
# Record the x position of the first word of each line
first_word_xs = []
lines = area['soup'].find_all('span', 'ocr_line')
for line in lines:
words = line.find_all('span', 'ocrx_word')
if len(words) > 0:
coords = helpers.extractbbox(words[0].get('title'))
first_word_xs.append(coords['x1'])
if np.nanstd(first_word_xs) > 5:
return True
else:
return False
def classify(area, doc_stats, all_areas):
return {
'has_words': has_words(area),
'line_intersect': line_intersect(area, all_areas),
'small_text': small_text(area, doc_stats),
'small_leading': small_leading(area, doc_stats),
'is_line': is_line(area),
'is_top_or_bottom': is_top_or_bottom(area, all_areas),
'mostly_blank': mostly_blank(area, doc_stats),
'very_separated_words': very_separated_words(area, doc_stats),
'little_word_coverage': little_word_coverage(area, doc_stats),
'normal_word_separation': normal_word_separation(area, doc_stats),
'normal_word_coverage': normal_word_coverage(area, doc_stats),
'best_caption': best_caption(area),
'good_caption': good_caption(area),
'ok_caption': ok_caption(area),
'overlap': overlap(area, all_areas),
'proportion_alpha': proportion_alpha(area),
'offset_words': offset_words(area),
'n_gaps': len(area['gaps']),
'n_lines': area['lines'],
'x1': area['x1'],
'y1': area['y1'],
'x2': area['x2'],
'y2': area['y2'],
'area': area['area']
}
def classify_list(area, doc_stats, all_areas):
return [
int(has_words(area)),
int(line_intersect(area, all_areas)),
int(small_text(area, doc_stats)),
int(small_leading(area, doc_stats)),
int(is_line(area)),
int(is_top_or_bottom(area, all_areas)),
int(mostly_blank(area, doc_stats)),
int(very_separated_words(area, doc_stats)),
int(little_word_coverage(area, doc_stats)),
int(normal_word_separation(area, doc_stats)),
int(normal_word_coverage(area, doc_stats)),
int(best_caption(area)),
int(good_caption(area)),
int(ok_caption(area)),
int(overlap(area, all_areas)),
int(offset_words(area)),
proportion_alpha(area),
area['area'] / float(doc_stats['max_area']),
len(area['gaps']) / float(doc_stats['max_gaps']),
area['lines'] / float(doc_stats['max_lines'])
]
|
{"hexsha": "c567961d10467e8733e02ed32ee6a0169082d65d", "size": 9407, "ext": "py", "lang": "Python", "max_stars_repo_path": "heuristics.py", "max_stars_repo_name": "iross/blackstack", "max_stars_repo_head_hexsha": "4e44679f889d86626cd7cd263a0b770e1d5e9e64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-08T11:07:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-15T18:34:40.000Z", "max_issues_repo_path": "heuristics.py", "max_issues_repo_name": "iross/blackstack", "max_issues_repo_head_hexsha": "4e44679f889d86626cd7cd263a0b770e1d5e9e64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-19T23:56:37.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-19T23:56:37.000Z", "max_forks_repo_path": "heuristics.py", "max_forks_repo_name": "iross/blackstack", "max_forks_repo_head_hexsha": "4e44679f889d86626cd7cd263a0b770e1d5e9e64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4578754579, "max_line_length": 233, "alphanum_fraction": 0.6281492506, "include": true, "reason": "import numpy", "num_tokens": 2412}
|
import sys
from scipy.stats import hypergeom
gene_file = sys.argv[1]
output = ""
try:
fgene = open(gene_file, "r")
for gline in fgene:
gline = gline.rstrip()
geneids = gline.split(",")
output += "\"" + geneids[0] + "\","
print(output)
except IOError:
print ('cannot open', gene_file)
fgene.close()
|
{"hexsha": "308f97ab00c0ab5c54e2292fd64be0dfb20e5810", "size": 347, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/167/format.py", "max_stars_repo_name": "kbasecollaborations/GeneSet_Enrichment", "max_stars_repo_head_hexsha": "14a5e409019457bfbe985236ff103edb2e8896c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/167/format.py", "max_issues_repo_name": "kbasecollaborations/GeneSet_Enrichment", "max_issues_repo_head_hexsha": "14a5e409019457bfbe985236ff103edb2e8896c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/167/format.py", "max_forks_repo_name": "kbasecollaborations/GeneSet_Enrichment", "max_forks_repo_head_hexsha": "14a5e409019457bfbe985236ff103edb2e8896c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-17T20:26:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-17T20:26:32.000Z", "avg_line_length": 17.35, "max_line_length": 40, "alphanum_fraction": 0.5850144092, "include": true, "reason": "from scipy", "num_tokens": 94}
|
import pickle
# Our numerical workhorses
import numpy as np
import pandas as pd
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# Seaborn, useful for graphics
import seaborn as sns
# Import the utils for this project
import ccutils
# Define mRNA rate
# gm = 0.00284 # s**-1
# http://bionumbers.hms.harvard.edu/bionumber.aspx?id=105717&ver=3&trm=lacZ%20mRNA%20lifetime&org=
gm = 1 / (3 * 60)
# Define cell volume
Vcell = 2.15 # fL
# Define diffusion limiting rate
k0 = 2.7E-3
# =============================================================================
# Single promoter
# =============================================================================
# Load the flat-chain
with open('../../data/mcmc/lacUV5_constitutive_mRNA_prior.pkl', 'rb') as file:
unpickler = pickle.Unpickler(file)
gauss_flatchain = unpickler.load()
gauss_flatlnprobability = unpickler.load()
# Generate a Pandas Data Frame with the mcmc chain
index = ['kp_on', 'kp_off', 'rm']
# Generate a data frame out of the MCMC chains
df_mcmc = pd.DataFrame(gauss_flatchain, columns=index)
# reasign the index with the new entries
index = df_mcmc.columns
# map value of the parameters
max_idx = np.argmax(gauss_flatlnprobability, axis=0)
kpon, kpoff, rm = df_mcmc.iloc[max_idx, :]
# ea range
kpon_hpd = ccutils.stats.hpd(df_mcmc.iloc[:, 0], 0.95)
kpoff_hpd = ccutils.stats.hpd(df_mcmc.iloc[:, 1], 0.95)
rm_hpd = ccutils.stats.hpd(df_mcmc.iloc[:, 2], 0.95)
# Print results
print('Single gene copy parameters: ')
print("""
The most probable parameters for the model
------------------------------------------
kp_on = {0:.1f} -{1:0.1f} +{2:0.1f}
kp_off = {3:.1f} -{4:0.1f} +{5:0.1f}
rm = {6:.1f} -{7:0.1f} +{8:0.1f}
""".format(kpon, np.abs(kpon-kpon_hpd[0]), np.abs(kpon-kpon_hpd[1]),\
kpoff, np.abs(kpoff-kpoff_hpd[0]), np.abs(kpoff-kpoff_hpd[1]),\
rm, np.abs(rm-rm_hpd[0]), np.abs(rm-rm_hpd[1])))
# Print results
print("""
The most probable parameters for the model in seconds^-1
--------------------------------------------------------
kp_on = {0:.3f} -{1:0.3f} +{2:0.3f} s^-1
kp_off = {3:.3f} -{4:0.3f} +{5:0.3f} s^-1
rm = {6:.3f} -{7:0.3f} +{8:0.3f} s^-1
""".format(kpon * gm, np.abs(kpon-kpon_hpd[0]) * gm,
np.abs(kpon-kpon_hpd[1]) * gm,
kpoff * gm, np.abs(kpoff-kpoff_hpd[0]) * gm,
np.abs(kpoff-kpoff_hpd[1]) * gm,
rm * gm, np.abs(rm-rm_hpd[0]) * gm, np.abs(rm-rm_hpd[1]) * gm))
# =============================================================================
# Double promoter
# =============================================================================
# Load the flat-chain
with open('../../data/mcmc/lacUV5_constitutive_mRNA_double_expo.pkl',
'rb') as file:
unpickler = pickle.Unpickler(file)
gauss_flatchain = unpickler.load()
gauss_flatlnprobability = unpickler.load()
# Generate a Pandas Data Frame with the mcmc chain
index = ['kp_on', 'kp_off', 'rm']
# Generate a data frame out of the MCMC chains
df_mcmc = pd.DataFrame(gauss_flatchain, columns=index)
# rerbsine the index with the new entries
index = df_mcmc.columns
# map value of the parameters
max_idx = np.argmax(gauss_flatlnprobability, axis=0)
kpon_double, kpoff_double, rm_double = df_mcmc.iloc[max_idx, :]
# ea range
kpon_hpd = ccutils.stats.hpd(df_mcmc.iloc[:, 0], 0.95)
kpoff_hpd = ccutils.stats.hpd(df_mcmc.iloc[:, 1], 0.95)
rm_hpd = ccutils.stats.hpd(df_mcmc.iloc[:, 2], 0.95)
# Print results
print('Two-promoter model')
print("""
The most probable parameters for the model
------------------------------------------
kp_on = {0:.1f} -{1:0.1f} +{2:0.1f}
kp_off = {3:.1f} -{4:0.1f} +{5:0.1f}
rm = {6:.1f} -{7:0.1f} +{8:0.1f}
""".format(kpon_double, np.abs(kpon_double-kpon_hpd[0]),
np.abs(kpon_double-kpon_hpd[1]),
kpoff_double, np.abs(kpoff_double-kpoff_hpd[0]),
np.abs(kpoff_double-kpoff_hpd[1]),
rm_double, np.abs(rm_double-rm_hpd[0]),
np.abs(rm_double-rm_hpd[1])))
# Print results
print("""
The most probable parameters for the model in seconds^-1
--------------------------------------------------------
kp_on = {0:.3f} -{1:0.3f} +{2:0.3f} s^-1
kp_off = {3:.2f} -{4:0.2f} +{5:0.2f} s^-1
rm = {6:.1f} -{7:0.1f} +{8:0.1f} s^-1
""".format(kpon_double * gm, np.abs(kpon_double-kpon_hpd[0]) * gm,
np.abs(kpon_double-kpon_hpd[1]) * gm,
kpoff_double * gm, np.abs(kpoff_double-kpoff_hpd[0]) * gm,
np.abs(kpoff_double-kpoff_hpd[1]) * gm,
rm_double * gm, np.abs(rm_double-rm_hpd[0]) * gm,
np.abs(rm_double-rm_hpd[1]) * gm))
# =============================================================================
# Repressor rates
# =============================================================================
# Define binding energies of the different operators
energies = {'Oid': -17, 'O1': -15.3, 'O2': -13.9, 'O3': -9.7}
# Compute the rates for each repressor
kr_offs = {key: ccutils.model.kr_off_fun(value, k0,
kpon_double,
kpoff_double,
Vcell=Vcell) for key, value in
energies.items()}
# Print repressor rates
print("""
The most probable parameters for the repressor in seconds^-1
------------------------------------------------------------
""")
for key, value in kr_offs.items():
print('kr_off {0:s} = {1:.5f} s^-1'.format(key, value))
# =============================================================================
# Compute probability of each of the states
# =============================================================================
def prob_promoter(kr_on, kr_off, kp_on, kp_off, rm):
'''
Computes the probability of the three promoter states for a regulated
promoter
'''
P_B = (kr_off * kp_on) / (kp_off * kr_off + kp_off * kr_on + kr_off * kp_on)
P_E = (kp_off * kr_off) / (kp_off * kr_off + kp_off * kr_on + kr_off * kp_on)
P_R = (kp_off * kr_on) / (kp_off * kr_off + kp_off * kr_on + kr_off * kp_on)
return {'P_B': P_B, 'P_E': P_E, 'P_R': P_R}
# O1
R = 22
kr_on = 1 / Vcell / 0.6022 * k0 * R
probs_O1 = prob_promoter(kr_on, kr_offs['O1'], kpon_double,
kpoff_double, rm_double)
print('''
Probability of each promoter state for O1 - R{:d}
-------------------------------------------------
'''.format(R))
for key, value in probs_O1.items():
print('State {0:s} = {1:.5f}'.format(key, value))
|
{"hexsha": "76a41b6ac26a0b5b24f85bacc5004a2ef1d5c340", "size": 6555, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/theory/compute_parameters.py", "max_stars_repo_name": "RPGroup-PBoC/chann_cap", "max_stars_repo_head_hexsha": "f2a826166fc2d47c424951c616c46d497ed74b39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-08-21T04:06:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T07:36:58.000Z", "max_issues_repo_path": "src/theory/compute_parameters.py", "max_issues_repo_name": "RPGroup-PBoC/chann_cap", "max_issues_repo_head_hexsha": "f2a826166fc2d47c424951c616c46d497ed74b39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/theory/compute_parameters.py", "max_forks_repo_name": "RPGroup-PBoC/chann_cap", "max_forks_repo_head_hexsha": "f2a826166fc2d47c424951c616c46d497ed74b39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-29T17:43:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-09T00:20:16.000Z", "avg_line_length": 34.140625, "max_line_length": 98, "alphanum_fraction": 0.5441647597, "include": true, "reason": "import numpy", "num_tokens": 1979}
|
import numpy as np
def lt_bpdecoder(signal, n, raw, max_iter = 1):
# 1. get vi and cj
# vi:the neighbor of the variable node i
# cj: the neighbor of the check node j
m = len(raw)
cji = [raw[i] for i in range(m)]
vij = []
for i in range(n):
temp = []
for j in range(m):
if i in cji[j]:
temp.append(j)
vij.append(temp) # lists corresponding LT check bits j's for LDPC bit i
# initial
Lch = signal
tanh = np.tanh
prod = np.prod
arctan = np.arctan
L_hij = np.zeros(shape=(n, m))
L_fji = np.zeros(shape=(n, m))
count = 0
# start
##first Lvij
## I try to modify source code based on "soft decoding method for systematic raptor codes"
while (True):
count += 1
for j in range(m):
i_lists = cji[j]
for i in i_lists:
i_lists_copy = i_lists[:]
i_lists_copy.remove(i)
# LT code to LDPC code
PI = prod(tanh(0.5 * L_hij[i_lists_copy, j]))
L_fji[i][j] = 2 * arctan(PI * tanh(Lch[j] / 2))
for i in range(n):
j_lists = vij[i] # lists corresponding LT check bits j's for LDPC bit i
for j in j_lists:
j_lists_copy = j_lists[:]
j_lists_copy.remove(j)
L_hij[i][j] = sum(L_fji[i, j_lists_copy])
if count >= max_iter:
break
x = np.zeros([n])
for i in range(n):
j_lists = vij[i]
x[i] = sum(L_fji[i, j_lists])
# print(x)
# print(np.exp(-x)/(1+np.exp(-x)))
output = np.array(x <= 0).astype(int)
return output
|
{"hexsha": "3592faa346a1b1927837fbe0eeb55766fbaeadbc", "size": 1680, "ext": "py", "lang": "Python", "max_stars_repo_path": "lt_bpdecoder.py", "max_stars_repo_name": "newlyj/LT", "max_stars_repo_head_hexsha": "d901eee99602c6c624826e33a30496262a6ac14c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lt_bpdecoder.py", "max_issues_repo_name": "newlyj/LT", "max_issues_repo_head_hexsha": "d901eee99602c6c624826e33a30496262a6ac14c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lt_bpdecoder.py", "max_forks_repo_name": "newlyj/LT", "max_forks_repo_head_hexsha": "d901eee99602c6c624826e33a30496262a6ac14c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9655172414, "max_line_length": 94, "alphanum_fraction": 0.5142857143, "include": true, "reason": "import numpy", "num_tokens": 489}
|
export Berlage,
Ormsby,
Ricker
include("Berlage.jl")
include("Ormsby.jl")
include("Ricker.jl")
|
{"hexsha": "e64ef5a67658242da64580b0ef61cdd8569db833", "size": 96, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Wavelets/Wavelets.jl", "max_stars_repo_name": "fercarozzi/myseismicjulia", "max_stars_repo_head_hexsha": "a8b184af2dca29f36176e78128503d27411f2c28", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 80, "max_stars_repo_stars_event_min_datetime": "2015-09-12T01:58:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-17T04:50:56.000Z", "max_issues_repo_path": "src/Wavelets/Wavelets.jl", "max_issues_repo_name": "fercarozzi/myseismicjulia", "max_issues_repo_head_hexsha": "a8b184af2dca29f36176e78128503d27411f2c28", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2015-09-12T02:47:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-02T14:43:18.000Z", "max_forks_repo_path": "src/Wavelets/Wavelets.jl", "max_forks_repo_name": "fercarozzi/myseismicjulia", "max_forks_repo_head_hexsha": "a8b184af2dca29f36176e78128503d27411f2c28", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 60, "max_forks_repo_forks_event_min_datetime": "2015-09-12T01:58:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-28T22:21:07.000Z", "avg_line_length": 12.0, "max_line_length": 21, "alphanum_fraction": 0.7395833333, "num_tokens": 33}
|
# Copyright 2018 Jörg Franke
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import pickle
import urllib.request
import zipfile
import hashlib
from collections import OrderedDict
import tensorflow as tf
"""
Downloads and process glove word embeddings, applies them to a given vocabulary of a dataset.
"""
class WordEmbedding():
def __init__(self, embedding_size, vocabulary_size=None, word_idx_dict=None, initialization='uniform', tmp_dir='.',
dtype=tf.float32, seed=123):
self.rng = np.random.RandomState(seed)
if vocabulary_size == None:
vocabulary_size = word_idx_dict.__len__()
if initialization == 'uniform':
init_tensor = self.initialize_random(vocabulary_size, embedding_size, dtype)
elif initialization == 'glove':
init_tensor = self.initialize_with_glove(word_idx_dict, embedding_size, tmp_dir, dtype)
self.embeddings = tf.Variable(init_tensor, dtype=dtype, name='word_embedding')
def embed(self, word_idx):
embed = tf.nn.embedding_lookup(self.embeddings, word_idx, name='embedding_lookup')
return embed
@staticmethod
def initialize_random(vocabulary_size, embedding_size, dtype):
return tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0, dtype=dtype)
def initialize_with_glove(self, word_idx_dict, embedding_size, tmp_dir, dtype):
if embedding_size == 100:
glove_type = '6B'
elif embedding_size == 300:
glove_type = '42B'
else:
raise UserWarning('embedding size incompatible to glove word representations')
embeddings = self.get_glove_embeddings(tmp_dir, glove_type, embedding_size, word_idx_dict)
return embeddings
@staticmethod
def make_dict_hash(dictionary):
pre = sorted(((k, v) for k, v in dictionary.items()))
sort_dict = OrderedDict()
for element in pre:
if type(element[1]) == dict:
element_sort = OrderedDict(sorted(element[1].items()))
for key, value in element_sort.items():
sort_value = sorted(value.items())
sort_dict[key] = sort_value
else:
sort_dict[element[0]] = element[1]
hash_object = hashlib.md5(str(sort_dict).encode())
hash = str(hash_object.hexdigest())
return hash
def get_glove_embeddings(self, glove_path, glove_set, glove_dim, word_idx_dict):
dict_hash = self.make_dict_hash(word_idx_dict)
embeddings_file = os.path.join(glove_path,
'glove_embeddings_{}_{}_{}.plk'.format(dict_hash, glove_set, glove_dim))
if os.path.isfile(embeddings_file):
with open(embeddings_file, 'rb') as dict_file:
embeddings = pickle.load(dict_file)
else:
embeddings = self.prepare_glove_embeddings(glove_path, glove_set, glove_dim, word_idx_dict)
self.save_glove_embeddings(embeddings_file, embeddings)
return embeddings
@staticmethod
def save_glove_embeddings(embeddings_file, embeddings):
with open(embeddings_file, 'wb') as dict_file:
pickle.dump(embeddings, dict_file)
def prepare_glove_embeddings(self, glove_path, glove_set, glove_dim, word_idx_dict):
glove_embeddings_file = os.path.join(glove_path, "glove.{}.{}d.txt".format(glove_set, glove_dim))
if not os.path.isfile(glove_embeddings_file):
print("### Download GloVe Word Representation")
if glove_set == '6B':
url = 'http://nlp.stanford.edu/data/glove.6B.zip'
elif glove_set == '42B':
url = 'http://nlp.stanford.edu/data/glove.42B.300d.zip'
elif glove_set == '840B':
url = 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
file_name = url.split('/')[-1]
zip_path = os.path.join(glove_path, file_name)
filehandle, _ = urllib.request.urlretrieve(url, zip_path)
zip_file = zipfile.ZipFile(zip_path)
zip_file.extractall(glove_path)
zip_file.close()
os.remove(zip_path)
glove_dict = OrderedDict()
with open(glove_embeddings_file, 'r', encoding='utf-8') as gl:
for line in gl:
array = line.lstrip().rstrip().split(" ")
word = array[0]
vector = list(map(float, array[1:]))
vector = np.asarray(vector)
glove_dict[word.lower()] = vector
# aline vocabulary with GloVe vectors
embeddings = np.empty([word_idx_dict.__len__(), glove_dim])
OOV = []
for word, idx in word_idx_dict.items():
try:
vec = glove_dict[word]
embeddings[idx, :] = vec
except:
OOV.append(word)
rand_vec = self.rng.uniform(-1, 1, glove_dim)
embeddings[idx, :] = rand_vec
del (glove_dict)
print('### Word Embeddings: Out of vocabulary words: {}'.format(OOV.__len__()))
return embeddings
|
{"hexsha": "848a1cf650f9fdf06ab92329c906a7754865d549", "size": 5777, "ext": "py", "lang": "Python", "max_stars_repo_path": "adnc/model/utils/word_embedding.py", "max_stars_repo_name": "carusyte/ADNC", "max_stars_repo_head_hexsha": "4a5dfa5be1aca9f815794c2c276ec220a1eb591d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 62, "max_stars_repo_stars_event_min_datetime": "2018-07-05T13:55:13.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-03T15:58:07.000Z", "max_issues_repo_path": "adnc/model/utils/word_embedding.py", "max_issues_repo_name": "carusyte/ADNC", "max_issues_repo_head_hexsha": "4a5dfa5be1aca9f815794c2c276ec220a1eb591d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-26T08:29:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-17T12:38:28.000Z", "max_forks_repo_path": "adnc/model/utils/word_embedding.py", "max_forks_repo_name": "carusyte/ADNC", "max_forks_repo_head_hexsha": "4a5dfa5be1aca9f815794c2c276ec220a1eb591d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-02-03T03:14:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-17T22:33:41.000Z", "avg_line_length": 39.8413793103, "max_line_length": 119, "alphanum_fraction": 0.6259304137, "include": true, "reason": "import numpy", "num_tokens": 1236}
|
module futils
use m_str, only: str
use m_vector, only: dot, &
cross, &
normalize, &
normalized, &
perp_vec, &
rot3d_x, &
rot3d_y, &
rot3d_z
use m_get_default, only: get_default
implicit none
contains
end module futils
|
{"hexsha": "3f552ac5302f03040a66dc9dbceb57810e94b098", "size": 382, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/futils.f90", "max_stars_repo_name": "Nkzono99/futils", "max_stars_repo_head_hexsha": "a5f0b2a587452e0b3f4b01feb54093a57546ed43", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/futils.f90", "max_issues_repo_name": "Nkzono99/futils", "max_issues_repo_head_hexsha": "a5f0b2a587452e0b3f4b01feb54093a57546ed43", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/futils.f90", "max_forks_repo_name": "Nkzono99/futils", "max_forks_repo_head_hexsha": "a5f0b2a587452e0b3f4b01feb54093a57546ed43", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.4705882353, "max_line_length": 38, "alphanum_fraction": 0.4293193717, "num_tokens": 83}
|
"""
Basic dataset classes for storing image bases for OCR
Datasets return dict {"image": image, "string": string}
"""
import random
import six
import lmdb
from torch.utils.data import Dataset, ConcatDataset, Subset
from torch.nn import functional as F
from PIL import Image
import numpy as np
class DataItemKeys:
"""
Keys, returned in dict by all dataset classes, to avoid misprints
"""
IMAGE = "image"
IMAGE_WIDTH = "image_width"
STRING = "string"
class LmdbDataset(Dataset):
"""
Common lmdb-format dataset for storing image bases for OCR
Seeks all images by keys image-XXXXXXXXX and their text labels by label-XXXXXXXXX
Key num-samples should contain total image count
"""
def __init__(self, root, image_format="RGB", case_sensitive=False):
self.root = root
self.image_format = image_format
self.case_sensitive = case_sensitive
# We don't save env as class constructor due to picking problem with workers
# Save in class during first call of __getitem__
# See https://github.com/pytorch/vision/issues/689
env = lmdb.open(root, max_readers=32, readonly=True, lock=False,
readahead=False, meminit=False)
if not env:
raise RuntimeError("cannot create lmdb from {}".format(root))
with env.begin(write=False) as txn:
self.n_samples = int(txn.get("num-samples".encode()))
self.env = None
def get_all_symbols(self):
"""
Returns set of all symbols contained in dataset labels
"""
all_symbols = set()
with self.env.begin(write=False) as txn:
for index in range(self.n_samples):
index += 1
label_key = 'label-%09d'.encode() % index
label = txn.get(label_key).decode('utf-8')
all_symbols |= set(label)
return all_symbols
def __len__(self):
return self.n_samples
def __getitem__(self, index):
if self.env is None:
self.env = lmdb.open(self.root, max_readers=32, readonly=True, lock=False,
readahead=False, meminit=False)
assert index <= len(self), "index range error"
# lmbd starts indexing from 1
index += 1
with self.env.begin(write=False) as txn:
label_key = "label-{:09d}".format(index).encode()
label = txn.get(label_key).decode("utf-8")
img_key = "image-{:09d}".format(index).encode()
imgbuf = txn.get(img_key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert(self.image_format)
except IOError:
raise RuntimeError("Corrupted image for {}".format(index))
if not self.case_sensitive:
label = label.lower()
item = {
DataItemKeys.IMAGE: np.array(img),
DataItemKeys.STRING: label
}
return item
class PaddedDatasetWithTransforms(Dataset):
"""
Wraps any existing OCR dataset and image transforms function to apply when getting elements
Also padds smaller images to pad_width
"""
def __init__(self, dataset, transforms, pad_width):
self.dataset = dataset
self.tranforms = transforms
self.pad_width = pad_width
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
item = self.dataset[index]
image = self.tranforms(image=item[DataItemKeys.IMAGE])["image"]
width = image.shape[-1]
padding_right = self.pad_width - width
image = F.pad(image, (0, padding_right))
item[DataItemKeys.IMAGE] = image
item[DataItemKeys.IMAGE_WIDTH] = width
return item
|
{"hexsha": "46b6eab905f28ab2e717ad559b00cd0c1fe310da", "size": 3850, "ext": "py", "lang": "Python", "max_stars_repo_path": "recognition/src/data/dataset.py", "max_stars_repo_name": "AlexeyZhuravlev/OCR-experiments", "max_stars_repo_head_hexsha": "8493045054678a2e13cafce6d9e85c7581086c7a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-28T18:46:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-29T12:49:57.000Z", "max_issues_repo_path": "recognition/src/data/dataset.py", "max_issues_repo_name": "AlexeyZhuravlev/OCR-experiments", "max_issues_repo_head_hexsha": "8493045054678a2e13cafce6d9e85c7581086c7a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "recognition/src/data/dataset.py", "max_forks_repo_name": "AlexeyZhuravlev/OCR-experiments", "max_forks_repo_head_hexsha": "8493045054678a2e13cafce6d9e85c7581086c7a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8, "max_line_length": 95, "alphanum_fraction": 0.6119480519, "include": true, "reason": "import numpy", "num_tokens": 853}
|
(*|
##########################################################
Proving decidability for a datatype that includes a vector
##########################################################
:Link: https://stackoverflow.com/q/55335098
|*)
(*|
Question
********
I'm trying to work with a datatype that represents expressions in a
sort of universal algebra context. The usual way to express this in
(pen and paper) maths is that you have a set of function symbols, F,
together with an arity function. An expression is a tree where each
node is labelled with a function symbol and it has as many children as
its arity. In this particular example, I've also got a set of atomic
variables that get injected explicitly as terms.
It's pretty clear how to write this down with Coq (I've got a snippet
of code at the bottom), but I'd like to prove some sort of
decidability result. I've managed to prove decidability for vectors
("If I have decidability on ``A``, then I can get decidability on
``VectorDef.t A n``"), but I can't work out how to do the same for my
tree type.
I tried doing an explicit recursion over the structure of a tree, but
I ended up needing to call out to my "decidable vector" function,
which doesn't get past the termination checker. This is reasonable,
since the vector function expects to be given a discriminator for
arbitrary elements of its underlying type and this obviously doesn't
bottom out!
I can't work out how to tell Coq that (by induction) I have
decidability for some terms, and these are the only terms that appear
in the vectors in question. Is there a standard trick for doing this
sort of thing?
Below, the data types in question:
|*)
Require Vectors.VectorDef.
Definition vec := VectorDef.t.
Section VTree.
(* If it helps, I have a definition for this function *)
Variable dec_vec : forall A : Type,
(forall x y : A, {x = y} + {x <> y}) ->
forall (n : nat) (v v' : vec A n), {v = v'} + {v <> v'}.
Variable V : Set.
Variable F : Set.
Variable a : F -> nat.
Inductive VTree : Type :=
| varTerm : V -> VTree
| funTerm (f : F) (ts : vec VTree (a f)) : VTree.
Section DecVTree.
Hypothesis decV : forall x y : V, {x = y} + {x <> y}.
Hypothesis decF : forall x y : F, {x = y} + {x <> y}.
Definition decVTree : forall x y : VTree, {x = y} + {x <> y}.
(* ??? *)
Abort. (* .none *)
(*|
Answer (Li-yao Xia)
*******************
There are two challenging aspects to this problem.
1. Dependently typed programming with indexed types in Coq
2. Nested recursive types
Dependently typed programming with indexed types in Coq
=======================================================
By "indexed type" I am referring here specifically to inductive types
like ``Vector.t``, where the constructors refine some of the type
arguments. These arguments are called indices, and must appear between
``:`` and ``:=`` in the type signature:
|*)
Inductive Vector (A : Type) : nat (* <- index *) -> Type :=
| nil : Vector A 0
| cons : A -> forall n, Vector A n -> Vector A (S n).
(*|
Indexed inductive types are very useful to define propositions, where
the terms don't matter. But for actual data, the short story here is:
don't do it. It's technically possible, but it's a very deep rabbit
hole, and overall quite a pain to work with, in large part because
dependent pattern-matching in Coq is such an unintuitive construct.
For example, see this blogpost:
https://homes.cs.washington.edu/~jrw12/dep-destruct.html
A less extreme solution is to give up on other "dependently-typed"
aspects of this program. The next candidate on the chopping block here
is ``sumbool ({ _ } + { _ })``. If the functions (and parameters)
return ``bool`` instead, this makes them reasonably easy to define (\
*cough*, see next section). Proving their correctness is still a
problem but at least you have something to compute with.
Two general alternatives to inductive indexed types are:
- Just use the flat version (``list`` instead of ``vec``), giving up
some "by construction" guarantees.
- Make the type a function of the indices as a ``Definition`` (or
``Fixpoint``), instead of ``Inductive``. Here we use ``unit`` and
``prod`` as building blocks for such types, but you may have to make
up your own for more elaborate types. A lot of dependent
pattern-matching will be necessary.
|*)
Reset Initial. (* .none *)
Fixpoint vec (A : Set) (n : nat) :=
match n with
| O => unit | S n => (A * vec A n)%type
end.
(*|
You might also want to reconsider the representation of the language
you want to implement. For example, do you really want to represent
arities as explicitly as a function on symbols? (That could certainly
be the case.) For example, could you not restrict this to symbols of
arities 0, 1, 2?
Nested recursive types
======================
These are recursive types whose recursive occurrences are inside other
data types (which may be recursive). To simplify the discussion, to
unclutter the code, and because of the aforementioned issues with
dependent types in Coq, consider the following type using ``list``
instead of ``vec`` and with one fewer constructor:
|*)
Inductive LTree : Type :=
| funTerm : list LTree -> LTree.
(*|
You can define recursive functions on such a type with ``Fixpoint``,
but you have to be particularly careful about how recursive calls are
nested. Of course, this actually matters with any recursive type, but
the pattern is much more natural when the recursion is not nested, so
the problem is less noticeable.
Below is how we can decide equality for ``LTree``. We give up the
dependent ``sumbool``, returning a ``bool`` instead. The definition of
``dec_list`` is standard and generic.
|*)
Require Import List.
Import ListNotations.
Section List.
Context {A : Type} (decA : A -> A -> bool).
Fixpoint dec_list (l l' : list A) : bool :=
match l, l' with
| [], [] => true
| a :: l0, a' :: l0' =>
decA a a' && dec_list l0 l0'
| _, _ => false
end.
End List.
(*| Then equality of ``LTree`` looks innocent... |*)
Fixpoint decLTree (x y : LTree) : bool :=
match x, y with
| funTerm lx, funTerm ly =>
dec_list decLTree lx ly
end.
(*|
... but there are very subtle details that one needs to be aware of to convince Coq that the recursion is structurally decreasing.
The well-formedness of ``decLTree`` specifically depends in a very
delicate way on how ``dec_list`` uses its argument ``decA``, so
``dec_list`` must be a transparent definition:
1. It is only being applied to a subterm of the first list (you could
make it the second if you want, with some ``struct`` annotations).
2. ``decA`` is bound *outside* of ``Fixpoint dec_list``. The function
``decLTree`` would not be well-formed if that line instead read
``Fixpoint dec_list {A : Type} (decA : A -> A -> bool)``.
It's also possible to package these tricks up by writing some general
recursion/induction schemes for ``LTree``/``VTree``.
|*)
(*|
Answer (Rupert Swarbrick)
*************************
While Li-yao made some useful points, the dependent types aren't that
bad! It turns out that the reason my previous script didn't work is
that I'd used ``Qed`` rather than ``Defined`` to finish my
decidability proof for vectors.
Here's a complete working proof:
|*)
Reset Initial. (* .none *)
Require Vectors.VectorDef.
Require Import Logic.Eqdep_dec.
Require Import PeanoNat.
Definition vec := VectorDef.t.
Section dec_vec.
Variable A : Type.
Hypothesis decA : forall x y : A, {x = y} + {x <> y}.
Definition dec_vec {n} (v v' : vec A n) : {v = v'} + {v <> v'}.
refine (VectorDef.rect2 (fun _ x y => {x = y} + {x <> y})
(left (eq_refl))
(fun n v v' veq a a' => _)
v v').
- destruct (decA a a') as [ eqaH | neaH ].
+ rewrite <- eqaH; clear eqaH a'.
destruct veq as [ eqvH | nevH ].
* rewrite <- eqvH. apply left. exact eq_refl.
* apply right. intro consH. inversion consH.
exact (nevH (inj_pair2_eq_dec nat Nat.eq_dec (vec A) n v v' H0)).
+ apply right.
intro consH. inversion consH. contradiction.
Defined.
End dec_vec.
Section VTree.
Variable V : Set.
Variable F : Set.
Variable a : F -> nat.
Inductive VTree : Type :=
| varTerm : V -> VTree
| funTerm (f : F) (ts : vec VTree (a f)) : VTree.
Section DecVTree.
Hypothesis decV : forall x y : V, {x = y} + {x <> y}.
Hypothesis decF : forall x y : F, {x = y} + {x <> y}.
Lemma varTerm_ne_funTerm v f ts : varTerm v <> funTerm f ts.
Proof.
intros eqH. inversion eqH.
Qed.
Fixpoint decVTree (x y : VTree) : {x = y} + {x <> y}.
refine (match x, y with
| varTerm v, varTerm v' => _
| varTerm v, funTerm f ts => _
| funTerm f ts, varTerm v => _
| funTerm f ts, funTerm f' ts' => _
end
).
- destruct (decV v v') as [ eqH | neH ].
+ exact (left (f_equal varTerm eqH)).
+ enough (H: varTerm v <> varTerm v');
try (exact (right H)).
injection; tauto.
- exact (right (varTerm_ne_funTerm v f ts)).
- exact (right (not_eq_sym (varTerm_ne_funTerm v f ts))).
- destruct (decF f f') as [ feqH | fneH ].
+ revert ts'. rewrite <- feqH. clear feqH; intro ts'.
destruct (dec_vec VTree decVTree ts ts') as [ tseqH | tsneH ].
* apply left. apply f_equal. exact tseqH.
* apply right. intro funH. inversion funH.
exact (tsneH (inj_pair2_eq_dec
F decF (fun f => vec VTree (a f)) f ts ts' H0)).
+ enough (H: funTerm f ts <> funTerm f' ts');
try (exact (right H)).
injection; tauto.
Qed.
End DecVTree.
End VTree.
|
{"author": "vonavi", "repo": "coq-examples", "sha": "5e76634f5a069db118df57cb869235a9e0b5c30a", "save_path": "github-repos/coq/vonavi-coq-examples", "path": "github-repos/coq/vonavi-coq-examples/coq-examples-5e76634f5a069db118df57cb869235a9e0b5c30a/examples/proving-decidability-for-a-datatype-that-includes-a-vector.v"}
|
""" This module generates notes for a midi file using the
trained neural network """
import pickle
import numpy
from music21 import instrument, note, stream, chord
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import BatchNormalization as BatchNorm
from keras.layers import Activation
def generate():
""" Generate a piano midi file """
#load the notes used to train the model
with open('data/notes', 'rb') as filepath:
notes = pickle.load(filepath)
# Get all pitch names
pitchnames = sorted(set(item for item in notes))
# Get all pitch names
n_vocab = len(set(notes))
network_input, normalized_input = prepare_sequences(notes, pitchnames, n_vocab)
model = create_network(normalized_input, n_vocab)
prediction_output = generate_notes(model, network_input, pitchnames, n_vocab)
create_midi(prediction_output)
def prepare_sequences(notes, pitchnames, n_vocab):
""" Prepare the sequences used by the Neural Network """
# map between notes and integers and back
note_to_int = dict((note, number) for number, note in enumerate(pitchnames))
sequence_length = 100
network_input = []
output = []
for i in range(0, len(notes) - sequence_length, 1):
sequence_in = notes[i:i + sequence_length]
sequence_out = notes[i + sequence_length]
network_input.append([note_to_int[char] for char in sequence_in])
output.append(note_to_int[sequence_out])
n_patterns = len(network_input)
# reshape the input into a format compatible with LSTM layers
normalized_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))
# normalize input
normalized_input = normalized_input / float(n_vocab)
return (network_input, normalized_input)
def create_network(network_input, n_vocab):
""" create the structure of the neural network """
model = Sequential()
model.add(LSTM(
512,
input_shape=(network_input.shape[1], network_input.shape[2]),
recurrent_dropout=0.3,
return_sequences=True
))
model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3,))
model.add(LSTM(512))
model.add(BatchNorm())
model.add(Dropout(0.3))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(BatchNorm())
model.add(Dropout(0.3))
model.add(Dense(n_vocab))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Load the weights to each node
model.load_weights('/content/drive/MyDrive/Colab Notebooks/Mozart/weights-improvement-31-1.0257-bigger.hdf5')
return model
def generate_notes(model, network_input, pitchnames, n_vocab):
""" Generate notes from the neural network based on a sequence of notes """
# pick a random sequence from the input as a starting point for the prediction
start = numpy.random.randint(0, len(network_input)-1)
int_to_note = dict((number, note) for number, note in enumerate(pitchnames))
pattern = network_input[start]
prediction_output = []
# generate 500 notes
for note_index in range(500):
prediction_input = numpy.reshape(pattern, (1, len(pattern), 1))
prediction_input = prediction_input / float(n_vocab)
prediction = model.predict(prediction_input, verbose=0)
index = numpy.argmax(prediction)
result = int_to_note[index]
prediction_output.append(result)
pattern.append(index)
pattern = pattern[1:len(pattern)]
return prediction_output
def create_midi(prediction_output):
""" convert the output from the prediction to notes and create a midi file
from the notes """
offset = 0
output_notes = []
# create note and chord objects based on the values generated by the model
for pattern in prediction_output:
# pattern is a chord
if ('.' in pattern) or pattern.isdigit():
notes_in_chord = pattern.split('.')
notes = []
for current_note in notes_in_chord:
new_note = note.Note(int(current_note))
new_note.storedInstrument = instrument.Piano()
notes.append(new_note)
new_chord = chord.Chord(notes)
new_chord.offset = offset
output_notes.append(new_chord)
# pattern is a note
else:
new_note = note.Note(pattern)
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
output_notes.append(new_note)
# increase offset each iteration so that notes do not stack
offset += 0.5
midi_stream = stream.Stream(output_notes)
midi_stream.write('midi', fp='test_output.mid')
if __name__ == '__main__':
generate()
|
{"hexsha": "8a61687b7f6503c949d63737a2503b73b288bebf", "size": 4884, "ext": "py", "lang": "Python", "max_stars_repo_path": "predict.py", "max_stars_repo_name": "NehaPendem/Mozart", "max_stars_repo_head_hexsha": "e16620ad0ec05f666b5e8a7255eee10cbea3c2dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "predict.py", "max_issues_repo_name": "NehaPendem/Mozart", "max_issues_repo_head_hexsha": "e16620ad0ec05f666b5e8a7255eee10cbea3c2dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "predict.py", "max_forks_repo_name": "NehaPendem/Mozart", "max_forks_repo_head_hexsha": "e16620ad0ec05f666b5e8a7255eee10cbea3c2dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1366906475, "max_line_length": 113, "alphanum_fraction": 0.6846846847, "include": true, "reason": "import numpy", "num_tokens": 1074}
|
import chainer
import chainer.links as L
import chainer.functions as F
import random
#import cupy as np#if gpu is used
import numpy as np
import codecs
from chainer.training import extensions,triggers
import pickle
import optuna
from pathlib import Path
import glob
import os
import time
import collections
import argparse
import sys
SIZE = 10000
EOS = 1
BOS = 0
class EncoderDecoder(chainer.Chain):
def __init__(self, n_layer, n_vocab, n_out, n_hidden, dropout):
super(EncoderDecoder, self).__init__()
with self.init_scope():
self.embed_x = L.EmbedID(n_vocab, n_hidden)
self.embed_y = L.EmbedID(n_out, n_hidden)
self.encoder = L.NStepLSTM(
n_layers=n_layer,
in_size=n_hidden*3,
out_size=n_hidden*3,
dropout=dropout)
self.decoder = L.NStepLSTM(
n_layers=n_layer,
in_size=n_hidden*3,
out_size=n_hidden*3,
dropout=dropout)
self.W_C = L.Linear(2 * n_hidden, n_hidden)
self.W_D = L.Linear(n_hidden, n_out)
self.W_E = L.Linear(2 * n_hidden, n_out)
self.W_F = L.Linear(2 * n_hidden * 3, n_out*3)
self.n_hidden = n_hidden
self.n_out = n_out
self.attention_weight=[]
self.attention_weight_one_eq = []
def __call__(self, xs, ys):
#add 3 eos
eos = self.xp.array([EOS], dtype=np.int32)
ys_in = [F.concat((eos, y), axis=0) for y in ys]
ys_in = [F.concat((eos, y), axis=0) for y in ys_in]
ys_in = [F.concat((eos, y), axis=0) for y in ys_in]
ys_out = [F.concat((y, eos), axis=0) for y in ys]
ys_out = [F.concat((y, eos), axis=0) for y in ys_out]
ys_out = [F.concat((y, eos), axis=0) for y in ys_out]
# Both xs and ys_in are lists of arrays.
exs = [self.embed_x(x) for x in xs]
eys = [self.embed_y(y) for y in ys_in]
exs_3_combine = []
boolean_c = 0
for x in exs:
boolean_c = 0
word_3_combine = []
for i in range(len(x)):
if i % 3 == 0 and i < len(x) - 2:
a = F.concat((x[i], x[i + 1]), axis=0)
b = F.concat((a, x[i + 2]), axis=0)
if boolean_c == 0:
c = b.__copy__()
c = F.reshape(c, (1, self.n_hidden * 3))
boolean_c = 1
else:
if c.shape==(self.n_hidden,):
c = F.vstack([c, b])
else:
c = F.vstack([c, F.reshape(b,(1,self.n_hidden * 3))])
exs_3_combine.append(c)
eys_3_combine = []
boolean_c = 0
for x in eys:
boolean_c = 0
for i in range(len(x)):
if i % 3 == 0 and i < len(x) - 2:
a_eys = F.concat((x[i], x[i + 1]), axis=0)
b_eys = F.concat((a_eys, x[i + 2]), axis=0)
if boolean_c == 0:
c_eys = b_eys.__copy__()
c_eys = F.reshape(c_eys, (1, self.n_hidden * 3))
boolean_c = 1
else:
if c_eys.shape==(self.n_hidden * 3,):
c_eys = F.vstack([c_eys, b_eys])
else:
c_eys = F.vstack([c_eys, F.reshape(b_eys,(1,self.n_hidden * 3))])
eys_3_combine.append(c_eys)
# hx:dimension x batchsize
# cx:dimension x batchsize
# yx:batchsize x timesize x dimension
hx, cx, yx = self.encoder(None, None, exs_3_combine)
_, _, os = self.decoder(hx, cx, eys_3_combine)
loss = 0
for o, y, ey in zip(os, yx, ys_out): #batch-wise_process
op = self._contexts_vector(o,y)
op_2 = F.reshape(op,(int(op.size/n_out),n_out))
loss += F.softmax_cross_entropy(op_2, ey)
loss /= len(yx)
chainer.report({'loss': loss}, self)
return loss
def _contexts_vector(self, embedded_output, attention ):
a = 0 # flag
attention_weight_one_eq_part = []
for i in range(len(embedded_output)):
one_hidden_vector = F.get_item(embedded_output, i)
one_hidden_vector_rp = F.broadcast_to(one_hidden_vector, attention.shape)
weight = attention.__mul__(one_hidden_vector_rp)
weight = F.sum(weight, axis=1)
weight = F.broadcast_to(weight, (2, int(weight.shape[0])))
weight = F.softmax(weight)
weight = F.get_item(weight, 0)
weight = F.broadcast_to(weight, (1, int(weight.shape[0])))
attention_weight_one_eq_part.append(chainer.cuda.to_cpu(weight.data))
context = F.matmul(weight, attention)
one_hidden_vector = F.broadcast_to(one_hidden_vector,(1, int(one_hidden_vector.shape[0])))
if a == 0:
b = F.concat((one_hidden_vector, context))
a = 1
else:
c = F.concat((one_hidden_vector, context))
b = F.concat((b, c), axis=0)
self.attention_weight_one_eq.append(attention_weight_one_eq_part)
return self.W_F(b)
def _calculate_attention_layer_output(self, embedded_output, attention):
inner_prod = F.matmul(embedded_output, attention, transb=True)
weights = F.softmax(inner_prod)
contexts = F.matmul(weights, attention)
concatenated = F.concat((contexts, embedded_output))
new_embedded_output = F.tanh(self.W_C(concatenated))
return self.W_D(new_embedded_output)
def translate(self, xs, max_length=30):
with chainer.no_backprop_mode(), chainer.using_config("train", False):
exs = self.embed_x(xs)
hx, cx, yx = self.encoder(None, None, [exs])
predicts = []
eos = self.xp.array([EOS], dtype=np.int32)
for y in yx:
predict = []
ys_in = [eos]
for i in range(max_length):
eys = [self.embed_y(y) for y in ys_in]
_, _, os = self.decoder(hx, cx, eys)
op = self.__contexts_vector(os[0], y)
word_id = int(F.argmax(F.softmax(op)).data)
if word_id == EOS: break
predict.append(word_id)
ys_in = [self.xp.array([word_id], dtype=np.int32)]
predicts.append(np.array(predict))
return predict
def _translate_three_word(self, wid, hidden_states, cell_states, attentions):
y = np.array(wid, dtype=np.int32)
embedded_y = self.embed_y(y)
a_eys = F.concat((embedded_y[0], embedded_y[1]), axis=0)
b_eys = F.concat((a_eys, embedded_y[2]), axis=0)
c_eys = F.reshape(b_eys, (1, self.n_hidden * 3))
hidden_states, cell_states, embedded_outputs = self.decoder(hidden_states, cell_states, [c_eys])
output = self._contexts_vector(embedded_outputs[0], attentions[0])
output_3words = F.reshape(output, (int(output.size / self.n_out), self.n_out))
output_3words = F.softmax(output_3words)
return output_3words, hidden_states, cell_states
def translate_with_beam_search(self, sentence, max_length=30, beam_width=3):
with chainer.no_backprop_mode(), chainer.using_config('train', False):
exs = [self.embed_x(sentence)]
exs_3_combine = []
boolean_c = 0
for x in exs:
boolean_c = 0
word_3_combine = []
for i in range(len(x)):
if i % 3 == 0 and i < len(x) - 2:
a = F.concat((x[i], x[i + 1]), axis=0)
b = F.concat((a, x[i + 2]), axis=0)
if boolean_c == 0:
c = b.__copy__()
c = F.reshape(c, (1, self.n_hidden * 3))
boolean_c = 1
else:
if c.shape == (self.n_hidden * 3,):
c = F.vstack([c, b])
else:
c = F.vstack([c, F.reshape(b, (1, self.n_hidden * 3))])
exs_3_combine.append(c)
hidden_states, cell_states, attentions = self.encoder(None, None, exs_3_combine)
heaps = [[] for _ in range(max_length + 1)]
heaps[0].append((0, [EOS, EOS, EOS], hidden_states, cell_states))
solution = []
solution_score = 1e8
for i in range(max_length):
heaps[i] = sorted(heaps[i], key=lambda t: t[0])[:beam_width]
for score, translation, i_hidden_states, i_cell_states in heaps[i]:
wid = translation[-3:]
output, new_hidden_states, new_cell_states = \
self._translate_three_word(wid, i_hidden_states, i_cell_states, attentions)
next_translation = translation
for j in range(len(output)):
for next_wid in np.argsort(output[j].data)[::-1]:
if output[j].data[next_wid] < 1e-6:
break
next_score = score - np.log(output[j].data[next_wid])
if next_score > solution_score:
break
next_translation = next_translation + [next_wid]
break
next_item = (next_score, next_translation, new_hidden_states, new_cell_states)
if next_translation[-3:] == [EOS,EOS,EOS]:
if next_score < solution_score:
solution = translation[3:]
solution_score = next_score
else:
heaps[i + 1].append(next_item)
self.attention_weight.append(self.attention_weight_one_eq)
self.attention_weight_one_eq = []
return solution
class Data(chainer.dataset.DatasetMixin):
def __init__(self,vocab,integrand_dataset,primitive_dataset):
file_path_combined_words = vocab
file_path_integrand = integrand_dataset
file_path_primitive = primitive_dataset
f_words = codecs.open(file_path_combined_words, 'r', 'utf8')
line = f_words.readline()
id_to_char = {}
char_to_id = {}
while line:
l = line.strip().split(',')
if len(l) == 2:
char_to_id[l[1]] = int(l[0])
id_to_char[(int(l[0]))] = l[1]
elif len(l) != 2: # カンマが文字に含まれる場合(今はカンマが一つある場合にのみ対応)
char_to_id[l[1] + ',' + l[2]] = int(l[0])
id_to_char[(int(l[0]))] = ','
line = f_words.readline()
f_words.close()
self.vocab = char_to_id
self.train_data = []
self.test_data = []
# 被積分関数と原子関数の長さ
questions, answers = [], []
maximum_len_questions = 0
maximum_len_answers = 0
for line in open(file_path_integrand, 'r'):
questions.append(line)
if len(line) > maximum_len_questions:
maximum_len_questions = len(line)
for line in open(file_path_primitive, 'r'):
answers.append(line)
if len(line) > maximum_len_answers:
maximum_len_answers = len(line)
x = []
t = []
for i, sentence in enumerate(questions):
line_question_list = sentence.strip().split(' ')
line_question_list = [x for x in line_question_list if x]
x.append([char_to_id[c] for c in line_question_list])
for i, sentence in enumerate(answers):
line_answer_list = sentence.strip().split(' ')
line_answer_list = [x for x in line_answer_list if x]
t.append([char_to_id[c] for c in line_answer_list])
if not args.integrated_model and (args.study_name=="MLP_cupy_MedianPruner_epoch30_subtree_complete_correct_continue"):
f_word_for_polish = codecs.open("../dataset/LSTM_subtree_polish_token_correct_order.txt", 'r', 'utf8')
line_polish = f_word_for_polish.readline()
id_to_char = {}
while line_polish:
l = line_polish.strip().split(',')
if len(l) == 2:
#char_to_id[l[1]] = int(l[0])
id_to_char[(int(l[0]))] = l[1]
elif len(l) != 2:
#char_to_id[l[1] + ',' + l[2]] = int(l[0])
id_to_char[(int(l[0]))] = ','
line_polish = f_word_for_polish.readline()
f_word_for_polish.close()
if args.integrated_model and (args.study_name=="MLP_cupy_MedianPruner_epoch30_subtree_complete_correct_continue"):
f_word_for_polish = codecs.open("../LSTM_subtree/dataset/LSTM_subtree_polish_token_correct_order.txt", 'r', 'utf8')
line_polish = f_word_for_polish.readline()
id_to_char = {}
while line_polish:
l = line_polish.strip().split(',')
if len(l) == 2:
#char_to_id[l[1]] = int(l[0])
id_to_char[(int(l[0]))] = l[1]
elif len(l) != 2:
#char_to_id[l[1] + ',' + l[2]] = int(l[0])
id_to_char[(int(l[0]))] = ','
line_polish = f_word_for_polish.readline()
f_word_for_polish.close()
self.sentence = []
for i in range(len(x)):
self.sentence.append((np.array(x[i]).astype(np.int32), np.array(t[i]).astype(np.int32)))
self.vocab_inv = {}
self.vocab_inv = id_to_char
def flatten(nested_list):
return [e for inner_list in nested_list for e in inner_list]
def convert(batch, device):
def to_device_batch(batch):
return [chainer.dataset.to_device(device, x) for x in batch]
res = {'xs': to_device_batch([x for x, _ in batch]),
'ys': to_device_batch([y for _, y in batch])}
return res
def objective(trial):
"""Objective function to make optimization for Optuna.
Args:
trial: optuna.trial.Trial
Returns:
loss: float
Loss value for the trial
"""
mlp = generate_model(trial)
seed = 1984
random.seed(seed)
np.random.seed(seed)
data = Data()
global n_vocab
n_vocab = len(data.vocab)
global n_out
n_out= len(data.vocab)
dataset = chainer.datasets.get_cross_validation_datasets_random(data.sentence, 5, seed=1984)
k_fold_for_test = 0
train_and_validation = dataset[k_fold_for_test][0]
test = dataset[k_fold_for_test][1]
divide_train_and_validation = chainer.datasets.get_cross_validation_datasets_random(train_and_validation, 10, seed=1984)
k_fold_for_train_valid = 5
train = divide_train_and_validation[k_fold_for_train_valid][0]
valid = divide_train_and_validation[k_fold_for_train_valid][1]
train_iter = chainer.iterators.SerialIterator(train, batchsize, shuffle=False)
valid_iter = chainer.iterators.SerialIterator(valid, batchsize, repeat=False, shuffle=False)
optimizer = create_optimizer(trial,mlp)
optimizer.setup(mlp)
updater = chainer.training.StandardUpdater(train_iter, optimizer, converter=convert, device=0)
stop_trigger = chainer.training.triggers.EarlyStoppingTrigger(
monitor='validation/main/loss', check_trigger=(300, 'epoch'),
max_trigger=(epochs, 'epoch'))
trainer = chainer.training.Trainer(updater, stop_trigger,
out=MODEL_DIRECTORY/f"model_{trial.number}")
eval_model = mlp.copy()
trainer.extend(
chainer.training.extensions.Evaluator(valid_iter, eval_model, converter=convert, device=0))
log_report_extention = chainer.training.extensions.LogReport(trigger=(10,'epoch'),log_name=None)
trainer.extend(log_report_extention)
trainer.extend(
chainer.training.extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy', 'validation/main/loss']))
trainer.extend(extensions.LogReport(trigger=(1, 'epoch')))
trigger = triggers.MinValueTrigger('validation/main/loss', trigger=(1, 'epoch'))
trainer.extend(extensions.snapshot_object(eval_model,\
filename='best_loss_model_epoch_{.updater.epoch}'),\
trigger=trigger)
trainer.extend(extensions.snapshot(filename='latest_snapshot'),trigger=(1, 'epoch'))
trainer.run()
if GPU >= 0:
mlp.to_gpu() #if gpu is used
else:
mlp.to_cpu()
loss = log_report_extention.log[-1]['validation/main/loss']
count = 0
for source, target in valid:
#predict = mlp.translate(np.array(source,dtype=np.int32))
start = time.time()
predict = mlp.translate_with_beam_search(np.array(source, dtype=np.int32),max_length=100, beam_width=1)
elapsed_time = time.time() - start
source = ' '.join([data.vocab_inv[int(w)] for w in source if w != EOS and w != BOS])
predict = ' '.join([data.vocab_inv[int(w)] for w in predict if w != EOS and w != BOS])
target = ' '.join([data.vocab_inv[int(w)] for w in target if w != EOS and w != BOS])
print("-----")
print("source:", str(source))
print("predict:", str(predict))
print("elapsed_time:",str(elapsed_time))
print("target:", str(target))
if predict == target:
count += 1
print('- accuracy:',str(-(count/len(valid))))
return -(count/len(valid))
def generate_model(trial):
"""
:Args
trial : optuna.trial.Trial
:return:
classifier: chainer.links.Classifier
"""
# Suggest hyperparameters
data = Data()
global n_vocab
n_vocab = len(data.vocab)
global n_out
n_out= len(data.vocab)
n_hidden = trial.suggest_int("n_hidden", 100, 1024)
n_layer = trial.suggest_int("n_layer", 1, 3)
dropout = trial.suggest_uniform('dropout', 0.0, 0.2)
print('--')
print(f"Trial: {trial.number}")
print('Current hyperparameters:')
print(f" The number of layers: {n_layer}")
print(f" the dimensions of hidden vector: {n_hidden}")
print(f" the ratio for dropout: {dropout}")
print('--')
mlp = EncoderDecoder(n_layer, n_vocab, n_out, n_hidden, dropout)
return mlp
def create_optimizer(trial,model):
optimizer_name = optimizer_name = trial.suggest_categorical('optimizer', ['Adam', 'MomentumSGD'])
if optimizer_name == 'Adam':
adam_alpha = trial.suggest_loguniform('adam_alpha', 1e-5, 1e-1)
optimizer = chainer.optimizers.Adam(alpha=adam_alpha)
else:
momentum_sgd_lr = trial.suggest_loguniform('momentum_sgd_lr', 1e-5, 1e-1)
optimizer = chainer.optimizers.MomentumSGD(lr = momentum_sgd_lr)
weight_decay = trial.suggest_loguniform('weight_decay', 1e-10, 1e-3)
gradient_clipping = trial.suggest_uniform('gradient_clipping', 0, 10)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(weight_decay))
chainer.optimizer_hooks.GradientClipping(gradient_clipping)
return optimizer
def main():
parser = argparse.ArgumentParser(
description='LSTM_subtree_model')
parser.add_argument('--batchsize', '-b', type=int, default=128,
help='Number of equations in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=300,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--token_dataset', '-T',type=str, default='../dataset/LSTM_subtree_polish_token.txt')
parser.add_argument('--Integrand_dataset', '-i',type=str, default='../dataset/LSTM_subtree_polish_test_Integrand.txt')
parser.add_argument('--Primitive_dataset', '-p',type=str, default='../dataset/LSTM_subtree_polish_test_Primitive.txt')
parser.add_argument('--study_name', '-s',type=str, default='MLP_cupy_MedianPruner_epoch30_subtree_complete_correct_continue')
parser.add_argument('--learned_model', '-m',type=str, default='../model/LSTM_subtree_polish_best_model')
parser.add_argument('--integrated_model', action='store_true',help='use as a component of Integrated All models')
global args
args = parser.parse_args()
if args.gpu >=0:
global np
import cupy as np
global GPU
GPU = args.gpu
sys.path.append('../dataset/')
sys.path.append('../model')
vocab = args.token_dataset
integrand_dataset = args.Integrand_dataset
primitive_dataset = args.Primitive_dataset
STUDY_NAME = args.study_name
N_TRIALS = 100
PRUNER_INTERVAL = 20
epochs = args.epoch
batchsize = args.batchsize
MODEL_DIRECTORY = Path(args.learned_model)
#study = optuna.create_study(study_name = STUDY_NAME,storage=f"sqlite:///{STUDY_NAME}.db",
# load_if_exists=True, pruner=optuna.pruners.MedianPruner())
study = optuna.load_study(study_name = STUDY_NAME,storage=f"sqlite:///{STUDY_NAME}.db")
#print('=== Best Trial ===')
if STUDY_NAME == "MLP_cupy_MedianPruner_epoch30_subtree_complete_correct_continue":
print('=== LSTM subtree polish model ===')
if STUDY_NAME == "MLP_cupy_MedianPruner_epoch30_subtree_Integrand_reverse_polish_Primitive_polish_continue":
print('=== LSTM subtree IRPP model ===')
# print(study.trials[0])
evaluate_results(study.trials[0],vocab,integrand_dataset,primitive_dataset,MODEL_DIRECTORY)
def evaluate_results(trial,vocab,integrand_dataset,primitive_dataset,MODEL_DIRECTORY):
""" Evaluate the optimization results.
Args:
study: optuna.trial.Trial
Returns:
e None
"""
trial_number = 0#trial.number
#print("triaL_number"+str(trial_number))
data = Data(vocab,integrand_dataset,primitive_dataset)
n_vocab = len(data.vocab)
n_out = len(data.vocab)
n_hidden = trial.params['n_hidden'] #384
mlp = EncoderDecoder(trial.params['n_layer'] #4
, n_vocab, n_out,
trial.params['n_hidden']#384
,
trial.params['dropout']#0.17721476674236888
)
snapshots = glob.glob(str(MODEL_DIRECTORY))
latest_snapshot = max(snapshots, key=os.path.getctime) # The latest snapshot of the trial
print(f"Loading: {latest_snapshot}")
chainer.serializers.load_npz(latest_snapshot,mlp)
#'/home/kubota/LSTM_subtree/models/MLP_cupy_MedianPruner_epoch30_subtree_complete_correct_continue/model_{0}/best_loss_model_epoch_{1}'.format(trial_number,147), mlp)#, path = 'updater/model:main/predictor/')
seed = 1984
random.seed(seed)
np.random.seed(seed)
test = data.sentence
if GPU >= 0:
mlp.to_gpu() #if gpu is used
else:
mlp.to_cpu()
count = 0
index_num = 0
wrong_eq_list = []
list_result_for_attention = []
for source, target in test:
target_original = test[index_num][1]
start = time.time()
predict = mlp.translate_with_beam_search(np.array(source, dtype=np.int32),max_length=100, beam_width=1)
elapsed_time = time.time() - start
source_str_list = [data.vocab_inv[int(w)] for w in source]
source = ' '.join([data.vocab_inv[int(w)] for w in source])
predict_str_list = [data.vocab_inv[int(w)] for w in predict]
predict = ' '.join([data.vocab_inv[int(w)] for w in predict])
target = ' '.join([data.vocab_inv[int(w)] for w in target])
list_result_for_attention.append((source_str_list,predict_str_list))
print("-----")
if not args.integrated_model:
print("eq_num:",str(index_num))
print("Integrand(Input):", str(source))
print("Primitive(Output):", str(predict))
print("Correct Answer:", str(target))
print("elapsed_time:",str(elapsed_time))
if predict == target:
count += 1
#print("correct:"+str(index_num))
print("Correct!")
index_num+=1
else:
#print("wrong:"+str(index_num))
print("Wrong")
wrong_eq_list.append(index_num)
index_num+=1
#print('- accuracy:',str(-(count/len(test))))
#with open('result_for_attention_12122_fold_{0}_test.pickle'.format(k_fold_for_train_valid),'wb') as f:
# pickle.dump(list_result_for_attention,f)
#with open('attention_weight_12122_fold_{0}_test.pickle'.format(k_fold_for_train_valid),'wb') as f:
# pickle.dump(mlp.attention_weight,f)
if not args.integrated_model:
print("---Result Summary---")
print("Total correct equation num:{}".format(str(count)))
print("len(test):{}".format(len(test)))
print("Complete Correct Answer Rate:{}%".format(100*count/len(test)))
#print("wrong_eq_list:{}".format(wrong_eq_list))
if __name__ == '__main__':
main()
|
{"hexsha": "65ba142a104b78ad64785890cc0c0076836853e7", "size": 25716, "ext": "py", "lang": "Python", "max_stars_repo_path": "LSTM_subtree/src/LSTM_subtree_model.py", "max_stars_repo_name": "funalab/SymbolicIntegrator", "max_stars_repo_head_hexsha": "d5bc4acbe2a9d7e1b14d72bd976ec9b3e2bab653", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "LSTM_subtree/src/LSTM_subtree_model.py", "max_issues_repo_name": "funalab/SymbolicIntegrator", "max_issues_repo_head_hexsha": "d5bc4acbe2a9d7e1b14d72bd976ec9b3e2bab653", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LSTM_subtree/src/LSTM_subtree_model.py", "max_forks_repo_name": "funalab/SymbolicIntegrator", "max_forks_repo_head_hexsha": "d5bc4acbe2a9d7e1b14d72bd976ec9b3e2bab653", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8839427663, "max_line_length": 216, "alphanum_fraction": 0.5792891585, "include": true, "reason": "import numpy,import cupy", "num_tokens": 6219}
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
from functools import lru_cache
import math
import os
import yaml
import numpy as np
import torch
import torch.nn.functional as Fu
from pytorch3d.renderer import cameras
from pytorch3d.transforms import so3
from visdom import Visdom
import c3dpo
from hypercolumnet import HyperColumNet
from config import get_default_args
from tools import model_io
from tools import so3 as so3int # TODO: move random 2d rot elsewhere; use 6d from pt3d
from tools import vis_utils
import tools.eval_functions as eval_func
import tools.functions as func
from tools.loss_models import AppearanceLoss, GaussianLayer
from tools import utils
from tools.tensor_accumulator import TensorAccumulator
def conv1x1(in_planes, out_planes, init='no', cnv_args={
'bias': True,
'kernel_size': 1,
}, std=0.01):
"""1x1 convolution"""
cnv = torch.nn.Conv2d(in_planes, out_planes, **cnv_args)
# init weights ...
if init == 'no':
pass
elif init == 'normal0.01':
cnv.weight.data.normal_(0., std)
if cnv.bias is not None:
cnv.bias.data.fill_(0.)
else:
assert False
return cnv
# Module that predicts shape and texture parameters, along with rotation
class GlobalHead(torch.nn.Module):
def __init__(
self,
input_channels,
alpha_geom_size=0,
alpha_tex_size=0,
camera_code_size=0,
add_shared_layer=True,
glob_inst_norm=False,
):
super(GlobalHead, self).__init__()
if not(alpha_tex_size > 0 or alpha_geom_size >= 0 or camera_code_size > 0):
return
make_fc_layer = lambda dimout: conv1x1(input_channels, dimout, init='normal0.01')
# conv with dimout 0 does not work; use this instead
make_degenerate = lambda feat: feat.new_empty(feat.size()[0], 0, 1, 1)
# shared layer by all global stuff
self.shared_layer = None
if add_shared_layer:
self.shared_layer = torch.nn.Sequential(
make_fc_layer(input_channels),
torch.nn.InstanceNorm2d(input_channels)
if glob_inst_norm
else torch.nn.BatchNorm2d(input_channels),
torch.nn.ReLU(),
)
self.alpha_geom_layer = (
make_fc_layer(alpha_geom_size)
if alpha_geom_size > 0
else make_degenerate if alpha_geom_size == 0 else None
)
self.alpha_tex_layer = make_fc_layer(alpha_tex_size) if alpha_tex_size > 0 else None
self.rot_layer = make_fc_layer(camera_code_size) if camera_code_size else None
def forward(self, feat):
if self.shared_layer is not None:
feat = self.shared_layer(feat)
return tuple([
(head(feat)[:,:,0,0] if head is not None else None)
for head in (self.alpha_geom_layer, self.alpha_tex_layer, self.rot_layer)
])
class Model(torch.nn.Module):
def __init__( self,
TRUNK = get_default_args(HyperColumNet),
APPEARANCE_LOSS = get_default_args(AppearanceLoss),
nrsfm_exp_path = '',
huber_scaling_basis = 0.01,
huber_scaling_repro = 0.01,
photo_min_k = 6,
photo_reenact = False,
repro_loss_min_ray_length = 0.0,
app_mask_image = False,
detach_app = True,
uv_model_use_bn = True,
uv_model_l2_norm = False,
sampled_sil_n_samples = 1000,
sampled_sph_chamfer = 0,
spherical_embedding_radius = 1.,
c3dpo_flipped=True,
reparametrize_nrsfm_mean = True,
scale_aug_range = 0.2,
t_aug_range = 0.02,
rot_aug_range = 3.14/12.,
custom_basis_size = -1,
n_images_for_app_model = -1,
min_depth = 0.,
argmin_translation_min_depth = 0.,
argmin_translation_ray_projection = True,
ray_reprojection = False,
dilate_basis_loss = 0.,
EMBED_DB = get_default_args(TensorAccumulator),
embed_db_eval = False,
app_model_mask_gt = False,
loss_weights = {
'loss_basis': 1.,
'loss_alpha': 0.,
'loss_rotation': 0.,
'loss_repro': 0.0,
'loss_vgg': 0.0,
'loss_sph_emb_to_cam': 0.0,
'loss_sph_sample_mask': 0.0,
'loss_vgg_app': 0.0,
'loss_l1_app': 0.0,
'loss_ssim_app': 0.0,
'loss_repro_2d': 0.0,
'loss_repro_ray': 0.0,
},
log_vars=[ 'objective',
'loss_basis',
'loss_alpha',
'loss_rotation',
'loss_repro',
'loss_repro_2d',
'loss_repro_ray',
'loss_vgg',
'loss_sph_emb_to_cam',
'loss_sph_sample_mask',
'loss_vgg_app',
'loss_l1_app',
'loss_ssim_app',
'sig_avg',
# depth error metrics
'pclerr_dist',
],
**kwargs ):
super(Model, self).__init__()
# autoassign constructor params to self
utils.auto_init_args(self)
assert not uv_model_use_bn and uv_model_l2_norm, 'Do not use BN UV network!'
self._load_and_fix_nrsfm()
self.alpha_bias = None
self.basis_size = custom_basis_size if custom_basis_size >= 0 else self.nrsfm_model.shape_basis_size
if self.basis_size == self.nrsfm_model.shape_basis_size:
# will be able to compute basis matching loss
basis = torch.cat((
self.nrsfm_model.shape_layer.bias.data.view(3, -1, 1),
self.nrsfm_model.shape_layer.weight.data.view(3, -1, self.basis_size),
), dim=2)
self.nrsfm_model_basis = basis.permute(2,0,1).detach().cuda(0)
self.alpha_bias = self.nrsfm_model.alpha_layer.bias[None,:,None,None,None].cuda(0)
TRUNK['dimout'] = 3
self.trunk = HyperColumNet(**TRUNK)
self._make_glob_layers()
if self.trunk.dimout_glob > 0:
self._make_texture_model()
self._make_geom_deformation_model()
# appearance loss
self.appearance_loss = AppearanceLoss(**APPEARANCE_LOSS)
# init the embed database
EMBED_DB['db_dim'] = TRUNK['dimout']
self.embed_db = TensorAccumulator(**EMBED_DB)
def _load_and_fix_nrsfm(self):
self.nrsfm_model = load_nrsfm_model(self.nrsfm_exp_path)
self.nrsfm_model.z_augment = False
self.nrsfm_model.z_equivariance = False
self.nrsfm_model.canonicalization.use = False
self.nrsfm_model.perspective_depth_threshold = \
max(self.nrsfm_model.perspective_depth_threshold, self.min_depth)
self.nrsfm_model_kp_rescale = float(self.nrsfm_model.keypoint_rescale)
if self.reparametrize_nrsfm_mean:
self.nrsfm_model.reparametrize_mean_shape()
self.nrsfm_mean_radius = self._get_nrsfm_mean_radius()
for prm in self.nrsfm_model.parameters():
prm.requires_grad = False
self.nrsfm_model_basis = None
self.projection_type = self.nrsfm_model.projection_type
assert self.nrsfm_model.keypoint_rescale == 1.0 or self.projection_type == 'orthographic'
def _make_glob_layers(self):
indim = self.trunk.get_last_layer_numchannels()
# TODO: move the relevant config params from trunk
dimout_alpha_tex = self.trunk.dimout_glob
dimout_alpha_geom = self.basis_size
self.global_head = GlobalHead(
indim,
dimout_alpha_geom,
dimout_alpha_tex,
6,
glob_inst_norm=self.trunk.glob_inst_norm,
)
def _make_texture_model(self):
# make MLP mapping basis vectors + app encoding to colors
app_dim = 3 + self.trunk.dimout_glob
app_layers = c3dpo.make_trunk(
dim_in=app_dim,
n_fully_connected=512,
n_layers=3,
use_bn=self.uv_model_use_bn,
l2_norm=self.uv_model_l2_norm,
)
app_layers.append(torch.nn.Conv2d(512, 3, 1))
self.app_model = torch.nn.Sequential(*app_layers)
def _make_geom_deformation_model(self):
delta_layers = c3dpo.make_trunk(
dim_in=3,
n_fully_connected=512,
n_layers=3,
use_bn=self.uv_model_use_bn,
l2_norm=self.uv_model_l2_norm,
)
dim_out = (self.basis_size+1)*3
delta_layers.append( torch.nn.Conv2d(512, dim_out, 1) )
if self.trunk.final_std != 0.01:
ldelta = delta_layers[-1]
ldelta.weight.data = \
ldelta.weight.data.normal_(0., self.trunk.final_std)
ldelta.bias.data = \
ldelta.bias.data.fill_(self.trunk.final_bias)
print('deltanet: final bias = %1.2e, final std=%1.2e' % \
(ldelta.bias.data.mean(),
ldelta.weight.data.std())
)
# delta vectors predicted from the mean vectors
self.delta_model = torch.nn.Sequential(*delta_layers)
def _get_nrsfm_mean_radius(self):
mu = self.nrsfm_model.get_mean_shape().cuda().detach()
mumu = mu.mean(dim=1, keepdim=True)
return ((mu - mumu) ** 2).mean() ** 0.5
@lru_cache()
def _get_image_grid(self, image_size, grid_size):
imgrid = func.image_meshgrid( ((0, image_size[0]), (0, image_size[1])),
grid_size )
imgrid = imgrid[[1,0]] # convert from yx to xy
return imgrid
def _get_distance_from_grid(self, predicted_coord, image_size,
masks=None, K=None, ray_reprojection=True):
ba = predicted_coord.shape[0]
imgrid = self._get_image_grid(image_size, predicted_coord.size()[2:])
imgrid = imgrid.type_as(predicted_coord)[None].repeat(ba,1,1,1)
if masks is not None:
masks = masks.view(ba, -1)
if ray_reprojection:
#assert self.projection_type=='perspective'
imgrid_proj = func.calc_ray_projection(
predicted_coord.view(ba,3,-1),
imgrid.view(ba,2,-1),
K = K,
min_depth=self.min_depth,
min_r_len=self.repro_loss_min_ray_length,
)
err = func.avg_l2_huber(
imgrid_proj,
predicted_coord.view(ba,3,-1),
scaling=self.huber_scaling_repro,
mask=masks
)
else:
shape_reprojected_image, _ = self.nrsfm_model.camera_projection(
func.clamp_depth(predicted_coord, self.min_depth)
)
if self.projection_type=='perspective':
imgrid = self.nrsfm_model.calibrate_keypoints(imgrid, K)
err = func.avg_l2_huber(
shape_reprojected_image.view(ba,2,-1),
imgrid.view(ba,2,-1),
scaling=self.huber_scaling_repro,
mask=masks,
)
return err
def _get_mean_basis_embed(self, embed):
ba, _, he, wi = embed.shape
embed_re = embed.view(ba, self.basis_size+1, 3, he, wi)
embed_mean = embed_re[:, 0, :, :, :]
# add the bias from the alpha layer!
if self.alpha_bias is not None:
embed_mean_add = (embed_re[:,1:,:,:,:] * self.alpha_bias).sum(1)
embed_mean = embed_mean + embed_mean_add
return embed_mean
def _get_deltas_and_concat(self, embed):
return self.delta_model(embed)
def _gather_supervised_embeddings(self, embed, kp_loc, image_size):
# uses grid sampler now (grid of size KP x 1)
# outputs B x C x KP
ba = embed.shape[0]
image_size_tensor = torch.tensor(image_size).type_as(embed).flip(0)
grid_ = 2. * kp_loc / image_size_tensor[None,:,None] - 1.
grid_ = grid_.permute(0,2,1).view(ba, -1, 1, 2)
supervised_embed = Fu.grid_sample(embed, grid_, align_corners=False)[:,:,:,0]
return supervised_embed
def _get_basis_loss(self, kp_loc, kp_vis, embed, alpha, image_size):
assert self.nrsfm_model_basis is not None, "NRSFM basis not compatible."
ba = kp_loc.shape[0]
if self.dilate_basis_loss > 0.:
ga = GaussianLayer(sigma=self.dilate_basis_loss, separated=True).cuda()
embed = ga(embed)
kp_embed_view = self._gather_supervised_embeddings(
embed, kp_loc, image_size
)
gt_basis = self.nrsfm_model_basis.reshape(
-1, self.nrsfm_model.n_keypoints
)[None].repeat(ba,1,1).detach()
return func.avg_l2_huber( gt_basis, kp_embed_view,
scaling=self.huber_scaling_basis,
mask=kp_vis[:,None,:],
reduce_dims=[],
)
def _get_rotation_loss(self, est_rotation, nrsfm_rotation):
rel_rotation = torch.eye(3, 3).expand_as(est_rotation)
return 1.0 - torch.mean(
so3.so3_relative_angle(est_rotation, nrsfm_rotation, cos_angle=True)
)
def _adjust_nrsfm_model_kp_scale(self, orig_image_size, image_size):
if self.projection_type=='perspective':
# dont change ...
pass
elif self.projection_type=='orthographic':
rel_scale = 0.5 * sum( \
float(orig_image_size.mean(0)[i]) / image_size[i] \
for i in (0,1) )
self.nrsfm_model.keypoint_rescale = \
self.nrsfm_model_kp_rescale * rel_scale
else:
raise ValueError(self.projection_type)
def _similarity_aug(self, images, kp_loc, kp_vis, masks=None, depths=None):
"""
augment images, depths, masks and kp_loc using random
similarity transformation
"""
ba, _, he, wi = images.shape
# random scale
r_scl = images.new_zeros(ba,).uniform_(1., 1.+self.scale_aug_range)
r_rot = so3int.random_2d_rotation(ba, images.type(), self.rot_aug_range)
# random translation
imdiag = float(np.sqrt(he * wi))
r_t = images.new_zeros(ba,2).uniform_( \
-imdiag*self.t_aug_range, imdiag*self.t_aug_range)
# orig image grid
grid_ = self._get_image_grid(images.shape[2:], images.shape[2:])
grid_flat = grid_.type_as(images).repeat(ba,1,1,1).view(ba,2,-1)
# 1st transform the keypoints
kp_loc = torch.bmm(r_rot, kp_loc)
kp_loc = kp_loc * r_scl[:,None,None]
kp_loc = kp_loc - r_t[:,:,None]
# adjust the visibilities
ok = (kp_loc[:,0,:] >= 0.) * (kp_loc[:,1,:] >= 0.) * \
(kp_loc[:,0,:] < wi) * (kp_loc[:,1,:] < he)
kp_vis = kp_vis * ok.float()
kp_loc[kp_vis[:, None, :].expand_as(kp_loc) < 0.5] = 0.0
# then the image but with inverse trans
grid_t = torch.bmm(r_rot.permute(0,2,1), grid_flat)
grid_t = grid_t / r_scl[:,None,None]
grid_t = grid_t + r_t[:,:,None]
grid_t = grid_t / torch.FloatTensor([wi,he])[None,:,None].type_as(grid_t) # norm to 0, 1
grid_t = grid_t * 2. - 1. # norm to -1, 1
grid_t = grid_t.view(ba,2,he,wi).permute(0,2,3,1).contiguous()
# sample the images, depth, masks
images = Fu.grid_sample(images, grid_t, mode='bilinear', align_corners=False)
if depths is not None:
depths = Fu.grid_sample(depths, grid_t, mode='nearest', align_corners=False)
if masks is not None:
masks = Fu.grid_sample(masks, grid_t, mode='nearest', align_corners=False)
return images, kp_loc, kp_vis, masks, depths
def run_on_embed_db(self, preds, texture_desc, K, masks=None, image_size=None):
embed = self.embed_db.get_db()
embed = embed[None,:,:,None].repeat(preds['phi']['T'].size()[0], 1, 1, 1)
# we have to downscale the embeds to make everything well-behaved
embed_full = self._get_deltas_and_concat(embed)
phi_out = self._get_shapes_and_projections(embed_full, None, preds['phi'], K)
out = dict(
embed_db_mean=embed_full,
embed_db_shape_canonical=phi_out['shape_canonical_dense'],
embed_db_shape_camera_coord=phi_out['shape_camera_coord_dense'],
)
if texture_desc is not None:
app = self._run_app_model(embed_full, texture_desc, embed, skip_sph_assert=True)
out['embed_db_app'] = app
return out
def _merge_masked_tensors(self, pcl, masks):
c = pcl.size()[1]
pcl = pcl.transpose(0, 1).reshape(1, c, -1)
if masks is not None:
pcl = pcl[..., :, masks.reshape(-1) > 0.5]
return pcl
def _assert_spherical_embed(self, embed):
norms = (embed**2).sum(1).sqrt()
# we assert that the norms are constant (std <= 0.01)
# (in case we want to have different radius of the sphere)
assert (
embed.shape[1]==3
and float(norms.std()) <= 1e-2
), 'This can only run on spherical embeds!'
def _get_sph_embed_towards_camera_loss(self, embed, masks, R, eps=1e-8):
ba = embed.size()[0]
embed = embed.reshape(ba, 3, -1)
masks = masks.reshape(ba, 1, -1)
avg_emb = Fu.normalize((embed * masks).sum(dim=2) / (masks.sum(dim=2) + eps), dim=-1)
# Rotated by R, it should be ideally (0, 0, 1)
# swap - with + for the non-flipped C3DPO
sign = -1.0 if self.c3dpo_flipped else +1.0
loss = 1. + sign * torch.matmul(R, avg_emb[..., None])[:, 2].mean()
return loss
def _calc_depth_pcl_errs(self, pred, gt, masks=None):
# reshape the predicted depth to gt size (and rescale the values too)
pred_up = Fu.interpolate(pred, gt.shape[2:], mode='bilinear')
errs = eval_func.eval_depth_scale_inv(
pred_up.detach(), gt.detach(), masks=masks
)
return {'pclerr_dist': errs.mean()}
def _get_canonical_shape(self, dense_basis, alpha, masks, target_std=2.0):
ba, di, he, wi = dense_basis.size()
basis = dense_basis.reshape(ba, -1, 3*he*wi)
canon = basis[:, :1, :] + torch.bmm(alpha[:, None, :], basis[:, 1:, :])
return canon.reshape(ba, 3, he, wi)
def _argmin_translation(self, shape_camera_coord, shape_proj, shape_vis, K=None):
if self.projection_type=='orthographic':
projection, _ = self.nrsfm_model.camera_projection(shape_camera_coord)
T_amin = func.argmin_translation(projection, shape_proj, v=shape_vis)
T = Fu.pad(T_amin, (0,1), 'constant', float(0))
elif self.projection_type=='perspective':
ba = shape_camera_coord.size()[0]
if K is None:
K = torch.eye(3).type_as(shape_proj)[None].expand(ba, 3, 3)
if self.argmin_translation_ray_projection:
T = func.find_camera_T(
K, shape_camera_coord, shape_proj, v=shape_vis
)
else:
T = func.minimise_2d_residual_over_T(
K, shape_camera_coord, shape_proj, v=shape_vis
)
else:
raise ValueError(self.projection_type)
return T
def _argmin_camera(self, shape_canonical, masks, grid_normalised, phi):
ba = shape_canonical.size()[0]
centre = torch.sum(
shape_canonical.reshape(ba, 3, -1) * masks.reshape(ba, 1, -1),
dim=(0,2,),
keepdim=True,
) / masks.sum()
shape_centered = shape_canonical.reshape(ba, 3, -1) - centre
assert 'R' in phi, "Rotation should be given for argmin_T"
shape_camera_rotated = torch.bmm(phi['R'], shape_centered)
T = self._argmin_translation(
shape_camera_rotated,
grid_normalised.expand(shape_camera_rotated[:,:2,:].size()),
masks.reshape(ba, -1),
K=None, # ! points already calibrated
)
min_depth = self.argmin_translation_min_depth
if min_depth > 0.:
T = torch.cat((T[:,0:2], torch.clamp(T[:,2:3], min_depth)), dim=1)
T = T - torch.matmul(phi['R'], centre)[:, :, 0]
return T
def _get_shapes_and_projections(
self, dense_basis, masks, global_desc, K, image_repro_gt=None, alpha=None
):
masks = (
masks if masks is not None
else dense_basis.new_ones(dense_basis[:, :1, ...].size())
)
assert len(masks.size()) == 4
ba = dense_basis.size()[0]
kp_mean = global_desc['kp_mean']
phi = copy.copy(global_desc)
rescale = self.nrsfm_model.keypoint_rescale
if alpha is not None:
phi['shape_coeff'] = alpha
if self.projection_type=='perspective':
focal = torch.stack((K[:, 0, 0], K[:, 1, 1]), dim=1)
p0 = K[:, :2, 2]
camera = cameras.SfMPerspectiveCameras(
R=phi['R'].permute(0, 2, 1),
focal_length=focal, principal_point=p0,
device=dense_basis.device,
)
else:
camera = cameras.SfMOrthographicCameras(
R=phi['R'].permute(0, 2, 1),
device=dense_basis.device,
)
shape_canonical = self._get_canonical_shape(
dense_basis, phi['shape_coeff'], masks
)
if 'T' not in phi:
# the grid has to be calibrated (=pre-multiplied by K^{-1}) first!
grid_im_coord = Fu.pad(
image_repro_gt.reshape(1, 2, -1).permute(0,2,1), (0, 1), value=1.0
).repeat(ba, 1, 1)
grid_im_coord = camera.unproject_points(
grid_im_coord, world_coordinates=False
)[:,:,:2].permute(0,2,1)
grid_normalised = (grid_im_coord - kp_mean[:,:,None]) * rescale
phi['T'] = self._argmin_camera(
shape_canonical, masks, grid_normalised, phi
)
camera.T = phi['T']
shape_canonical_pt3d = shape_canonical.reshape(ba, 3, -1).permute(0, 2, 1)
shape_camera_coord = camera.get_world_to_view_transform().transform_points(
shape_canonical_pt3d
)
shape_image_coord_cal_dense = shape_camera_coord
depth_dense = shape_camera_coord[:,:,2:]
shape_proj_image = camera.transform_points(shape_canonical_pt3d)
shape_reprojected_image = shape_proj_image[:, :, :2]
# correct for the kp normalisation
if self.projection_type == 'perspective':
shape_image_coord_cal_dense = shape_image_coord_cal_dense + Fu.pad(
kp_mean[:,None] * shape_camera_coord[:,:,2:], (0, 1), value=0.0
)
shape_reprojected_image = shape_reprojected_image + (kp_mean * focal)[:, None]
else:
assert self.projection_type == 'orthographic'
shape_image_coord_cal_dense = (
shape_image_coord_cal_dense / rescale +
Fu.pad(kp_mean[:,None], (0, 1), value=0.0)
)
shape_reprojected_image = (
shape_reprojected_image / rescale + kp_mean[:, None]
)
return dict(
phi=phi,
shape_canonical_dense=shape_canonical,
shape_camera_coord_dense=shape_camera_coord.permute(0, 2, 1).reshape_as(shape_canonical),
depth_dense=depth_dense.reshape_as(shape_canonical[:, :1]),
shape_reprojected_image=shape_reprojected_image.permute(0, 2, 1).reshape_as(shape_canonical[:, :2]),
shape_image_coord_cal_dense=shape_image_coord_cal_dense.permute(0, 2, 1).reshape_as(shape_canonical),
)
def _get_best_scale(self, preds, image_size):
if self.projection_type=='orthographic':
shape_camera_coord = preds['shape_image_coord_cal_dense']
ba = shape_camera_coord.shape[0]
imgrid = self._get_image_grid(image_size, shape_camera_coord.size()[2:])
imgrid = imgrid.type_as(shape_camera_coord)[None].repeat(ba,1,1,1)
projection, depth = self.nrsfm_model.camera_projection(shape_camera_coord)
s, T = func.argmin_translation_scale(projection, imgrid, v=preds['embed_masks'])
shape_best = torch.cat((
s[:, None, None, None] * shape_camera_coord[:, :2] + T[:, :, None, None],
s[:, None, None, None] * shape_camera_coord[:, 2:]
), dim=1)
elif self.projection_type=='perspective':
# no scale opt here, won't help
shape_best = preds['shape_image_coord_cal_dense']
else:
raise ValueError(self.projection_type)
return shape_best
def _get_sampled_sph_loss(self, preds, K, image_size):
masks = preds['embed_masks']
ba = masks.shape[0]
embed_sphere = torch.randn(
size=(ba, 3, self.sampled_sil_n_samples*10, 1),
dtype=masks.dtype, device=masks.device)
embed_sphere = Fu.normalize(
embed_sphere, dim=1) * self.spherical_embedding_radius
# adjust the mean!
embed_full = self._get_deltas_and_concat(embed_sphere)
dense_phi = self._get_shapes_and_projections(embed_full, masks, preds, K)
image_coords = dense_phi['shape_reprojected_image']
shape = dense_phi['shape_image_coord_cal_dense']
image_size_tensor = torch.FloatTensor(
[s for s in image_size]).type_as(embed_sphere).flip(0)
grid = 2. * (image_coords / image_size_tensor[None,:,None,None]) - 1.
grid_prm = grid.permute(0, 2, 3, 1)
# get all scales until the smallest side is <= 5
samples = []
scl = -1
while min(masks.shape[2:]) > 4:
scl += 1
if scl > 0:
masks = (Fu.interpolate(
masks, scale_factor=0.5, mode='bilinear') > 0.).float()
samples.append(Fu.grid_sample(masks, grid_prm, align_corners=False).view(-1))
samples = torch.cat(samples, dim=0)
loss = (1 - samples).mean()
return {
'loss_sph_sample_mask': loss,
'sph_sample_projs': grid,
'sph_sample_3d': shape,
}
def _get_photometric_losses(
self,
images,
image_coords,
basis_embed,
embed_canonical=None,
n_min=5,
masks=None,
texture_desc=None,
):
ba = images.shape[0]
n_min = min(ba-1, n_min)
assert ba > 1, 'batch_size > 1 for photo losses!'
assert not (self.photo_reenact and texture_desc is None)
image_size = list(images.shape[2:])
image_size_render = list(basis_embed.shape[2:])
image_size_tensor = torch.FloatTensor(image_size).type_as(basis_embed).flip(0)
grid = 2. * (image_coords / image_size_tensor[None,:,None,None]) - 1.
grid = grid.permute(0, 2, 3, 1)
# image warping loss
if self.photo_reenact:
images_reenact = self._run_app_model(
basis_embed, texture_desc[0:1].repeat(ba, 1), embed_canonical
)
else:
images_reenact = images
images_reproject = Fu.grid_sample(images_reenact, grid, align_corners=False)
# resample ref image to images_resample resolution
images_ref = Fu.interpolate(images[:1], size=images_reproject.shape[2:])
images_ref = images_ref.expand_as(images_reproject)
loss_vgg, _, _ = self.appearance_loss(images_reproject, images_ref)
loss_vgg = loss_vgg[:, 0]
# transplant the rendered image by tokp pooling
assert (~torch.isnan(loss_vgg)).all(), "Some photometric loss values are NaN."
if masks is not None:
# weight the losses by seg masks
loss_vgg = masks[:1, 0] * loss_vgg
loss_topk, idx_render = torch.topk(loss_vgg[1:], n_min-1, dim=0, largest=False)
# make sure we include the target view
loss_vgg = (loss_topk.sum(0) + loss_vgg[0]) / n_min
idx_render = idx_render[:,None].expand(-1, 3, -1, -1)
im_render = {
'loss_vgg': (
torch.gather(images_reproject, 0, idx_render).sum(0) + images_reproject[0]
) / n_min
}
out = {}
out['loss_vgg'] = loss_vgg.mean()
out['images_reproject'] = images_reproject.detach()
out['images_gt'] = images_ref.detach()
out['image_ref_render'] = im_render
out['images'] = Fu.interpolate(images, size=images_reproject.shape[2:]).detach()
out['images_reenact'] = Fu.interpolate(images_reenact, size=images_reproject.shape[2:]).detach()
return out
def _mask_gt_image(self, image, mask):
avgcol = (image * mask).sum((2, 3)) / mask.sum((2, 3)).clamp(1)
image_m = image * mask + (1-mask) * avgcol[:, :, None, None]
# blur and mix
ga = GaussianLayer(sigma=5., separated=True).cuda()
image_mf = ga(image_m)
image_m = mask * image_m + (1-mask) * image_mf
return image_m
def _run_app_model(self, embed, texture_desc, embed_canonical, skip_sph_assert=False):
# run the appearance model taking as input per-pixel uv-like
# embeddings `embed` and the global appearance descriptor
# `texture_desc`
n_im_use = self.n_images_for_app_model if \
self.n_images_for_app_model > 0 else embed_canonical.size()[0]
texture_desc = texture_desc[:n_im_use]
embed_for_app = embed_canonical[:n_im_use]
if not skip_sph_assert:
self._assert_spherical_embed(embed_for_app)
if self.detach_app:
embed_for_app = embed_for_app.detach()
embed_app = torch.cat((
texture_desc[:,:,None,None].expand(-1,-1,*list(embed.shape[2:])),
embed_for_app,
), dim=1)
app = self.app_model(embed_app)
return app[:, :3] + 0.5
def _get_app_model_losses(
self,
images,
preds_app,
masks=None,
sigma=None,
):
# for now this is the same
images_pred = preds_app
ba = images_pred.shape[0]
image_size = list(images.shape[2:])
image_size_render = list(images_pred.shape[2:])
if masks is not None:
# weight the losses by seg masks
masks = Fu.interpolate(masks[:ba], size=image_size_render, mode='nearest')
# resample ref image to images_resample resolution
images_gt = Fu.interpolate(images[:ba], size=image_size_render)
# mask the images and do NN interp
if self.app_model_mask_gt:
images_gt = self._mask_gt_image(images_gt, masks)
loss_vgg, loss_rgb, _ = \
self.appearance_loss(
images_pred,
images_gt,
sig=sigma,
mask=masks if self.app_mask_image else None
)
if masks is not None:
# weight the losses by seg masks
loss_vgg, loss_rgb = \
[ (masks * l).sum() / torch.clamp(masks.sum(), 1e-1) \
for l in (loss_vgg, loss_rgb,) ]
else:
loss_vgg, loss_rgb = \
[ l.mean() \
for l in (loss_vgg, loss_rgb,) ]
out = {}
out['loss_vgg'] = loss_vgg
out['loss_l1'] = loss_rgb
out['loss_ssim'] = (loss_rgb * 0.0).detach() # not used
out['images_pred'] = images_pred
out['images_pred_clamp'] = torch.clamp(images_pred,0.,1.)
out['images_gt'] = images_gt
out['images'] = images_gt
return out
def forward(
self,
kp_loc=None,
kp_vis=None,
kp_conf=None,
images=None,
epoch_now=None,
orig_image_size=None,
masks=None,
depths=None,
K=None,
**kwargs
):
ba = images.shape[0] # batch size
image_size = images.size()[2:]
# adjust nrsfm model scale
self._adjust_nrsfm_model_kp_scale(orig_image_size, image_size)
preds = {}
preds['nrsfm_mean_shape'] = self.nrsfm_model.get_mean_shape()
if self.training and (
self.scale_aug_range > 0. or
self.t_aug_range > 0. or
self.rot_aug_range > 0.
):
images, kp_loc, kp_vis, masks, depths = \
self._similarity_aug(images, kp_loc, kp_vis,
masks=masks, depths=depths)
preds.update(
{ 'images_aug': images, 'kp_loc_aug': kp_loc,
'depths_aug': depths, 'masks_aug': masks }
)
embed, glob_features = self.trunk(
images, kp_loc_vis = torch.cat((kp_loc, kp_vis[:,None,:]), dim=1)
)
embed = Fu.normalize(embed, dim=1) * self.spherical_embedding_radius
embed_full = self._get_deltas_and_concat(embed)
#embed_masks = (Fu.interpolate(masks, embed.shape[2:], mode='bilinear') > 0.49).float()
embed_masks = Fu.interpolate(masks, embed.shape[2:], mode='nearest')
image_repro_gt = self._get_image_grid(image_size, embed_full.size()[2:])
preds['embed'] = embed
preds['embed_full'] = embed_full
preds['embed_masks'] = embed_masks
preds['embed_mean'] = self._get_mean_basis_embed(embed_full)
preds['image_repro_gt'] = image_repro_gt
alpha_geom, texture_desc, rotation_code = self.global_head(glob_features)
self.nrsfm_model.eval()
preds['nrsfm'] = self.nrsfm_model(
kp_loc=kp_loc,
kp_vis=kp_vis,
dense_basis=None, # estimate dense Phi here
K=K,
)
assert not self.nrsfm_model.camera_scale # so just ones
assert self.nrsfm_model.argmin_translation
#preds['kp_mean'] = preds['nrsfm']['kp_mean'] # TODO: this should go away
# override top-level preds if regressing directly
assert rotation_code is not None
assert alpha_geom is not None
global_desc = dict(
shape_coeff=alpha_geom,
R=so3int.so3_6d_to_rot(rotation_code),
kp_mean=preds['nrsfm']['kp_mean'],
)
preds.update(self._get_shapes_and_projections(
embed_full, embed_masks, global_desc, K, image_repro_gt
))
preds['shape_image_coord_cal'] = self._gather_supervised_embeddings(
preds['shape_image_coord_cal_dense'], # same as uncal for orthographic
kp_loc,
image_size,
)
preds['kp_reprojected_image'] = self._gather_supervised_embeddings(
preds['shape_reprojected_image'],
kp_loc,
image_size,
)
# compute NR-SFM Prior loss
if self.loss_weights['loss_basis'] > 0.:
preds['loss_basis'] = self._get_basis_loss(
kp_loc,
kp_vis,
embed_full,
preds['nrsfm']['phi']['shape_coeff'],
image_size,
)
if self.loss_weights.loss_alpha > 0.:
assert alpha_geom is not None
preds['loss_alpha'] = func.huber( \
(alpha_geom - preds['nrsfm']['phi']['shape_coeff'])**2,
scaling=self.huber_scaling_basis,
).mean()
if self.loss_weights.loss_rotation > 0.:
preds['loss_rotation'] = self._get_rotation_loss(
preds['phi']['R'],
preds['nrsfm']['phi']['R'],
)
# compute reprojection loss
preds['loss_repro_2d'] = self._get_distance_from_grid(
preds['shape_image_coord_cal_dense'],
image_size,
masks=embed_masks,
K=K,
ray_reprojection=False,
)
# preds['loss_repro_ray'] = 0.0
# if self.projection_type == 'perspective':
preds['loss_repro_ray'] = self._get_distance_from_grid(
preds['shape_image_coord_cal_dense'],
image_size,
masks=embed_masks,
K=K,
ray_reprojection=True,
)
preds['loss_repro'] = preds['loss_repro_ray'] if self.ray_reprojection else preds['loss_repro_2d']
# perceptual loss
preds['photo_out'] = None
if self.photo_min_k > 0 and ba > 1:
# use the first im as a loss as a target
basis_embed_ref = embed_full[:1].expand_as(embed_full)
masks_ref = embed_masks[:1].expand_as(embed_masks)
phi_onto_ref = self._get_shapes_and_projections(basis_embed_ref, masks_ref, preds['phi'], K)
preds['photo_out'] = self._get_photometric_losses(
images,
phi_onto_ref['shape_reprojected_image'],
embed_full,
texture_desc=texture_desc,
n_min=self.photo_min_k,
masks=embed_masks,
embed_canonical=embed,
)
preds['loss_vgg'] = preds['photo_out']['loss_vgg']
# embedding-camera alignment loss
if self.loss_weights['loss_sph_emb_to_cam'] > 0.:
preds['loss_sph_emb_to_cam'] = self._get_sph_embed_towards_camera_loss(
preds['embed'], embed_masks, preds['phi']['R'].detach()
)
# mask sampling loss
if self.loss_weights['loss_sph_sample_mask'] > 0.:
preds.update(self._get_sampled_sph_loss(preds, K, images.shape[2:]))
# appearance model
preds['app'] = None
if texture_desc is not None:
n_im_use = (
self.n_images_for_app_model
if self.n_images_for_app_model > 0
else ba
)
preds['app'] = self._run_app_model(
embed_full[:n_im_use], texture_desc[:n_im_use], embed
)
preds['app_out'] = self._get_app_model_losses(
images, preds['app'][:, :3], masks=masks,
)
for k in ('loss_vgg', 'loss_l1', 'loss_ssim'):
preds[k+'_app'] = preds['app_out'][k]
# finally get the optimization objective using self.loss_weights
preds['objective'] = self.get_objective(preds, epoch_now=epoch_now)
# =================
# the rest is only for visualisation/metrics
# run on cached embed_db
if self.embed_db is not None and self.embed_db_eval:
preds.update(self.run_on_embed_db(preds, texture_desc, K,
masks=embed_masks, image_size=image_size))
# accumulate into embed db
self.embed_db(embed, masks=embed_masks)
depth_pcl_metrics = self._calc_depth_pcl_errs(
preds['depth_dense'], depths, masks=masks
)
preds.update(depth_pcl_metrics)
# find the scale of shape_image_coord that minimizes the repro loss
preds['shape_image_coord_best_scale'] = self._get_best_scale(preds, image_size)
preds['nrsfm_shape_image_coord'] = preds['nrsfm'][{
'orthographic': 'shape_image_coord',
'perspective': 'shape_image_coord_cal',
}[self.projection_type]]
# a hack for vis purposes
preds['misc'] = {}
for k in ('images', 'images_app', 'images_geom', 'embed'):
if k in preds:
preds['misc'][k] = preds[k].detach()
elif k in vars():
preds['misc'][k] = vars()[k]
return preds
def get_objective(self, preds, epoch_now=None):
losses_weighted = {
k: preds[k] * float(w)
for k, w in self.loss_weights.items()
if k in preds and w != 0.0 # avoid adding NaN * 0
}
if not hasattr(self,'_loss_weights_printed') or \
not self._loss_weights_printed:
print('-------\nloss_weights:')
for k,w in self.loss_weights.items():
print('%20s: %1.2e' % (k,w) )
print('-------')
print('-------\nweighted losses:')
for k,w in losses_weighted.items():
print('%20s: %1.2e' % (k,w) )
print('-------')
self._loss_weights_printed = True
loss = torch.stack(list(losses_weighted.values())).sum()
return loss
def visualize( self, visdom_env_imgs, trainmode, \
preds, stats, clear_env=False ):
if stats is not None:
it = stats.it[trainmode]
epoch = stats.epoch
viz = vis_utils.get_visdom_connection(
server=stats.visdom_server,
port=stats.visdom_port,
)
else:
it = 0
epoch = 0
viz = vis_utils.get_visdom_connection()
if not viz.check_connection():
print("no visdom server! -> skipping batch vis")
return
idx_image = 0
title="e%d_it%d_im%d"%(epoch,it,idx_image)
imvar = 'images_aug' if 'images_aug' in preds else 'images'
dvar = 'depths_aug' if 'depths_aug' in preds else 'depths'
mvar = 'masks_aug' if 'masks_aug' in preds else 'masks'
# show depth
ds = preds['depth_dense'].cpu().detach().repeat(1,3,1,1)
ims = preds[imvar].cpu().detach()
ims = Fu.interpolate(ims,size=ds.shape[2:])
if mvar in preds: # mask depths, ims by masks
masks = Fu.interpolate(preds[mvar].cpu().detach(),
size=ds.shape[2:], mode='nearest' )
ims *= masks ; ds *= masks
ds = vis_utils.denorm_image_trivial(ds)
if 'pred_mask' in preds:
pred_mask = torch.sigmoid(preds['pred_mask'][:, None].detach()).cpu().expand_as(ims)
ims_ds = torch.cat( (ims, ds, pred_mask), dim=2 )
else:
ims_ds = torch.cat( (ims, ds), dim=2 )
viz.images(ims_ds, env=visdom_env_imgs, opts={'title':title}, win='depth')
# show aug images if present
imss = []
for k in (imvar, 'images_app', 'images_geom'):
if k in preds:
ims = preds[k].cpu().detach()
ims = Fu.interpolate(ims, scale_factor=0.25)
ims = vis_utils.denorm_image_trivial(ims)
R, R_gt = preds['phi']['R'], preds['nrsfm']['phi']['R']
angle_to_0 = np.rad2deg(
so3.so3_relative_angle(R[0].expand_as(R), R).data.cpu().numpy()
)
angle_to_0_gt = np.rad2deg(
so3.so3_relative_angle(R_gt[0].expand_as(R_gt), R_gt).data.cpu().numpy()
)
if ~np.isnan(angle_to_0).any():
ims = np.stack([
vis_utils.write_into_image(
(im*255.).astype(np.uint8), "%d° / %d°" % (d, d_gt), color=(255,0,255)
) for im, d, d_gt in zip(ims.data.numpy(), angle_to_0, angle_to_0_gt)
])
else:
ims = (ims.data.numpy()*255.).astype(np.uint8)
imss.append(ims)
if len(imss) > 0:
viz.images(
#torch.cat(imss, dim=2),
np.concatenate(imss, axis=2).astype(np.float32)/255.,
env=visdom_env_imgs,
opts={'title': title},
win='imaug',
)
# show reprojections
p1 = preds['kp_loc_aug' if 'kp_loc_aug' in preds else 'kp_loc'][idx_image]
p2 = preds['kp_reprojected_image'][idx_image,0:2]
p3 = preds['nrsfm']['kp_reprojected_image'][idx_image]
p = np.stack([p_.detach().cpu().numpy() for p_ in (p1, p2, p3)])
v = preds['kp_vis'][idx_image].detach().cpu().numpy()
vis_utils.show_projections( viz, visdom_env_imgs, p, v=v,
title=title, cmap__='rainbow',
markersize=50, sticks=None,
stickwidth=1, plot_point_order=False,
image=preds[imvar][idx_image].detach().cpu().numpy(),
win='projections' )
# dense reprojections
p1 = preds['image_repro_gt'].detach().cpu()
p2 = preds['shape_reprojected_image'][idx_image].detach().cpu()
# override mask with downsampled (augmentation applied if any)
mvar = 'embed_masks'
if mvar in preds:
masks = preds[mvar].detach().cpu()
#masks = Fu.interpolate(masks, size=p2.shape[1:], mode='nearest')
p1 = p1 * masks[idx_image]
p2 = p2 * masks[idx_image]
# TEMP
img = (preds[imvar][idx_image].cpu() * Fu.interpolate(
preds[mvar].cpu()[idx_image:idx_image+1], size=preds[imvar][0, 0].size(), mode='nearest'
)[0]).data.cpu().numpy()
p = np.stack([p_.view(2,-1).numpy() for p_ in (p1, p2)])
vis_utils.show_projections( viz, visdom_env_imgs, p, v=None,
title=title, cmap__='rainbow',
markersize=1, sticks=None,
stickwidth=1, plot_point_order=False,
image=img,
win='projections_dense' )
vis_utils.show_flow(viz, visdom_env_imgs, p,
image=preds[imvar][idx_image].detach().cpu().numpy(),
title='flow ' + title,
linewidth=1,
win='projections_flow',
)
if 'sph_sample_projs' in preds:
p = preds['sph_sample_projs'][idx_image].detach().cpu().view(2, -1)
if 'sph_sample_gt' in preds:
p_ = preds['sph_sample_gt'][idx_image].detach().cpu().view(2, -1)
p_ = p_.repeat(1, math.ceil(p.shape[1]/p_.shape[1]))
p = [p, p_[:, :p.shape[1]]]
else:
p = [p.view(2, -1)]
# p = (torch.stack(p) + 1.) / 2.
p = (torch.stack(p) + 1.) / 2.
imsize = preds[imvar][idx_image].shape[1:]
p[:, 0, :] *= imsize[1]
p[:, 1, :] *= imsize[0]
vis_utils.show_projections(viz, visdom_env_imgs,
p, v=None,
title=title + '_spl_sil',
cmap__='rainbow',
markersize=1, sticks=None,
stickwidth=1, plot_point_order=False,
image=preds[imvar][idx_image].detach().cpu().numpy(),
win='projections_spl_sil'
)
merged_embed = self._merge_masked_tensors(
preds['embed_full'], preds['embed_masks']
)[..., None]
gl_desc_0 = {k: v[:1] for k, v in preds['phi'].items()}
merged_with_pivot_phis = self._get_shapes_and_projections(
merged_embed, None, gl_desc_0, preds['K'][:1]
)
preds['shape_canonical_same_alphas'] = merged_with_pivot_phis[
'shape_canonical_dense'
][0 ,..., 0]
# dense 3d
pcl_show = {}
vis_list = ['dense3d', 'mean_shape', 'embed_db', 'batch_fused', 'sph_embed']
if self.loss_weights['loss_sph_sample_mask'] > 0:
vis_list.append('sph_sample_3d')
for vis in vis_list:
if vis=='canonical':
pcl = preds['shape_canonical_dense']
elif vis=='dense3d':
pcl = preds['shape_image_coord_cal_dense']
elif vis=='batch_fused':
pcl = preds['shape_canonical_same_alphas'].detach().cpu()
pcl = torch.cat((pcl, pcl), dim=0)
pcl[3:5,:] = 0.0
pcl[5,:] = 1.0
elif vis=='mean_shape':
pcl = preds['embed_mean']
elif vis=='mean_c3dpo_shape':
pcl = preds['nrsfm_mean_shape']
elif vis=='shape_canonical':
pcl = preds['shape_canonical_dense']
elif vis == 'sph_embed':
pcl = preds['embed'].detach().clone()
elif vis == 'sph_sample_3d':
pcl = preds['sph_sample_3d'][idx_image].detach().cpu().view(3, -1)
pcl = torch.cat((pcl, pcl.clone()), dim=0)
pcl[4:,:] = 0.0
pcl[3,:] = 1.0
# filtering outliers
pcl[:3] -= pcl[:3].mean(dim=1, keepdim=True) # will be centered anyway
std = pcl[:3].std(dim=1).max()
pcl[:3] = pcl[:3].clamp(-2.5*std, 2.5*std)
elif vis == 'embed_db':
pcl = self.embed_db.get_db(uniform_sphere=False).cpu().detach().view(3, -1)
pcl = torch.cat((pcl, pcl.clone()), dim=0)
pcl[3:5,:] = 0.0
pcl[4,:] = 1.0
else:
raise ValueError(vis)
if vis not in ('mean_c3dpo_shape', 'batch_fused', 'sph_sample_3d', 'embed_db'):
pcl_rgb = preds[imvar].detach().cpu()
#pcl = Fu.interpolate(pcl.detach().cpu(), pcl_rgb.shape[2:], mode='bilinear')
pcl_rgb = Fu.interpolate(pcl_rgb, size=pcl.shape[2:], mode='bilinear')
if (mvar in preds):
masks = preds[mvar].detach().cpu()
masks = Fu.interpolate(masks, \
size=pcl.shape[2:], mode='nearest')
else:
masks = None
pcl = pcl.detach().cpu()[idx_image].view(3,-1)
pcl_rgb = pcl_rgb[idx_image].view(3,-1)
pcl = torch.cat((pcl, pcl_rgb), dim=0)
if masks is not None:
masks = masks[idx_image].view(-1)
pcl = pcl[:,masks>0.]
# if vis == 'sph_embed':
# import pdb; pdb.set_trace()
if pcl.numel()==0:
continue
pcl_show[vis] = pcl.numpy()
vis_utils.visdom_plotly_pointclouds(viz, pcl_show, visdom_env_imgs,
title=title+'_'+vis,
markersize=1,
sticks=None, win=vis,
height=700, width=700 ,
normalise=True,
)
var3d = {
'orthographic': 'shape_image_coord',
'perspective': 'shape_image_coord_cal',
}[self.projection_type]
sparse_pcl = {
'nrsfm': preds['nrsfm'][var3d][idx_image].detach().cpu().numpy().copy(),
'dense': preds['shape_image_coord_cal'][idx_image].detach().cpu().numpy().copy(),
}
if 'kp_loc_3d' in preds:
sparse_pcl['gt'] = preds['kp_loc_3d'][idx_image].detach().cpu().numpy().copy()
if 'class_mask' in preds:
class_mask = preds['class_mask'][idx_image].detach().cpu().numpy()
sparse_pcl = {k: v*class_mask[None] for k,v in sparse_pcl.items()}
vis_utils.visdom_plotly_pointclouds(viz, sparse_pcl, visdom_env_imgs, \
title=title+'_sparse3d', \
markersize=5, \
sticks=None, win='nrsfm_3d',
height=500,
width=500 )
if 'photo_out' in preds and preds['photo_out'] is not None:
# show the source images and their renders
ims_src = preds['photo_out']['images'].detach().cpu()
ims_repro = preds['photo_out']['images_reproject'].detach().cpu()
ims_reenact = preds['photo_out']['images_reenact'].detach().cpu()
ims_gt = preds['photo_out']['images_gt'].detach().cpu()
# cat all the images
ims = torch.cat((ims_src,ims_reenact,ims_repro,ims_gt), dim=2)
ims = torch.clamp(ims,0.,1.)
viz.images(ims, env=visdom_env_imgs, opts={'title':title}, win='imrepro')
im_renders = preds['photo_out']['image_ref_render']
for l in im_renders:
im_gt = preds['photo_out']['images_gt'][0].detach().cpu()
im_render = im_renders[l].detach().cpu()
im = torch.cat((im_gt, im_render), dim=2)
im = torch.clamp(im, 0., 1.)
viz.image(im, env=visdom_env_imgs, \
opts={'title':title+'_min_render_%s' % l}, win='imrender_%s' % l)
if 'app_out' in preds and preds['app_out'] is not None:
# show the source images and their predictions
ims_src = preds['app_out']['images'].detach().cpu()
ims_pred = preds['app_out']['images_pred_clamp'].detach().cpu()
ims = torch.cat((ims_src,ims_pred), dim=2)
viz.images(ims, env=visdom_env_imgs, opts={'title':title}, win='impred')
def load_nrsfm_model(exp_name, get_cfg=False):
from dataset.dataset_configs import C3DPO_MODELS, C3DPO_URLS
if exp_name in C3DPO_MODELS:
exp_path = C3DPO_MODELS[exp_name]
else:
exp_path = exp_name
if not os.path.exists(exp_path):
url = C3DPO_URLS[exp_name]
print('Downloading C3DPO model %s from %s' % (exp_name, url))
utils.untar_to_dir(url, exp_path)
cfg_file = os.path.join(exp_path, 'expconfig.yaml')
assert os.path.isfile(cfg_file), 'no config for NR SFM %s!' % cfg_file
with open(cfg_file, 'r') as f:
cfg = yaml.load(f)
# exp = ExperimentConfig(cfg_file=cfg_file)
nrsfm_model = c3dpo.C3DPO(**cfg.MODEL)
model_path = model_io.find_last_checkpoint(exp_path)
assert model_path is not None, "cannot found previous NR SFM model %s" % model_path
print("Loading the model from", model_path)
model_state_dict, _, _ = model_io.load_model(model_path)
nrsfm_model.load_state_dict(model_state_dict, strict=True)
if get_cfg:
return nrsfm_model, cfg
else:
return nrsfm_model
|
{"hexsha": "ef0ba35209f98f4c695ff41d775fcc14b0a01911", "size": 44961, "ext": "py", "lang": "Python", "max_stars_repo_path": "c3dm/model.py", "max_stars_repo_name": "facebookresearch/c3dm", "max_stars_repo_head_hexsha": "cac38418e41f75f1395422200b8d7bdf6725aa43", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2020-12-04T16:40:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-06T01:35:16.000Z", "max_issues_repo_path": "c3dm/model.py", "max_issues_repo_name": "facebookresearch/c3dm", "max_issues_repo_head_hexsha": "cac38418e41f75f1395422200b8d7bdf6725aa43", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-03-16T09:05:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-23T12:43:37.000Z", "max_forks_repo_path": "c3dm/model.py", "max_forks_repo_name": "facebookresearch/c3dm", "max_forks_repo_head_hexsha": "cac38418e41f75f1395422200b8d7bdf6725aa43", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-08T00:50:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-06T01:35:06.000Z", "avg_line_length": 32.2070200573, "max_line_length": 104, "alphanum_fraction": 0.6848379707, "include": true, "reason": "import numpy", "num_tokens": 14050}
|
import numpy as np
from datetime import datetime
import cv2
from pathlib import Path
from skimage.filters import threshold_otsu
from skimage import filters
from scipy import ndimage
def segment_worms(g, well, well_paths):
'''
Segments worms to use for downstream normalization.
'''
# create a disk mask for 2X images
def create_circular_mask(h, w, center=None, radius=None):
if center is None: # make the center the center of the image
center = (int(w / 2), int(h / 2))
if radius is None: # make the radius the size of the image
radius = min(center[0], center[1], w - center[0], h - center[1])
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0])**2 + (Y - center[1])**2)
mask = dist_from_center <= radius
return mask
start_time = datetime.now()
g.work.joinpath(g.plate, well, 'img').mkdir(
parents=True, exist_ok=True)
outpath = g.work.joinpath(g.plate, well, 'img')
path = well_paths[0]
image = cv2.imread(str(path), cv2.IMREAD_ANYDEPTH)
if g.species == 'Sma':
height, width = image.shape
mask = create_circular_mask(height, width, radius=height / 2.1)
# gaussian blur
blur = ndimage.filters.gaussian_filter(image, 1.5)
# set threshold, make binary
# threshold = threshold_otsu(subtracted)
threshold = np.percentile(blur, 1.5)
binary = blur < threshold
binary = binary * mask
binary = ndimage.binary_closing(binary, iterations=5)
# remove small segmented debris
nb_components, labelled_image, stats, centroids = cv2.connectedComponentsWithStats(
binary.astype('uint8'), connectivity=8)
sizes = stats[1:, -1]
nb_components = nb_components - 1
# empirically derived minimum size
min_size = 2500
filtered = np.zeros((labelled_image.shape))
for i in range(0, nb_components):
if sizes[i] >= min_size:
filtered[labelled_image == i + 1] = 255
blur_png = g.work.joinpath(outpath,
g.plate + "_" + well + '_blur' + ".png")
cv2.imwrite(str(blur_png), blur * 255)
bin_png = g.work.joinpath(outpath,
g.plate + "_" + well + '_binary' + ".png")
cv2.imwrite(str(bin_png), binary * 255)
filt_png = g.work.joinpath(outpath,
g.plate + "_" + well + '_binary' + ".png")
cv2.imwrite(str(filt_png), filtered * 255)
# the area is the sum of all the white pixels (1.0)
area = np.sum(filtered)
print("Completed in {}".
format(datetime.now() - start_time))
else:
# sobel edge detection
sobel = filters.sobel(image)
# gaussian blur
blur = ndimage.filters.gaussian_filter(sobel, 1.5)
# set threshold, make binary
threshold = threshold_otsu(blur)
binary = blur > threshold
sobel_png = g.work.joinpath(outpath,
g.plate + "_" + well + '_edge' + ".png")
cv2.imwrite(str(sobel_png), sobel * 255)
blur_png = g.work.joinpath(outpath,
g.plate + "_" + well + '_blur' + ".png")
cv2.imwrite(str(blur_png), blur * 255)
bin_png = g.work.joinpath(outpath,
g.plate + "_" + well + '_binary' + ".png")
cv2.imwrite(str(bin_png), binary * 255)
# the area is the sum of all the white pixels (1.0)
area = np.sum(binary)
print("Completed in {}".
format(datetime.now() - start_time))
return area
|
{"hexsha": "949306e9e20a43d2057fe9dcf0005d3f438610fd", "size": 3742, "ext": "py", "lang": "Python", "max_stars_repo_path": "modules/segment_worms.py", "max_stars_repo_name": "zamanianlab/wrmXpress", "max_stars_repo_head_hexsha": "a40b3e7d66c3ca4e319ad268fd5c0bf0de036d16", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modules/segment_worms.py", "max_issues_repo_name": "zamanianlab/wrmXpress", "max_issues_repo_head_hexsha": "a40b3e7d66c3ca4e319ad268fd5c0bf0de036d16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-24T17:31:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-24T17:31:03.000Z", "max_forks_repo_path": "modules/segment_worms.py", "max_forks_repo_name": "zamanianlab/wrmXpress", "max_forks_repo_head_hexsha": "a40b3e7d66c3ca4e319ad268fd5c0bf0de036d16", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4107142857, "max_line_length": 91, "alphanum_fraction": 0.5686798503, "include": true, "reason": "import numpy,from scipy", "num_tokens": 925}
|
import pj2_clfs_zhihu.config as conf
import numpy as np
import word2vec
def emb2npz(emb_file_path, emb_dict_path):
"""将txt格式的embedding转为字典格式, 并将<PAD>和<UNK>加入"""
emb = word2vec.load(emb_file_path)
vec = emb.vectors
word2id = emb.vocab_hash
word2id['<PAD>'] = len(word2id)
pad_row = [0] * vec.shape[1]
vec = np.row_stack((vec, pad_row))
np.savez_compressed(emb_dict_path, vec=vec, word2id=word2id)
print('word size: {}'.format(len(word2id)))
print('emb shape: {}'.format(vec.shape))
def padding(texts, max_len, pad=0):
texts = texts[:max_len] if len(texts) > max_len else texts + [pad] * (max_len - len(texts))
return texts
def data2npz(src_path, dst_path):
"""src_path txt: label+\t+title+\t+content
如:40,6 w6061,w26959,w109 w23255,w728,w12768,w58588,w11,w1442,w855,w36791"""
data = np.load(conf.emb_path)
word2id = data['word2id'].item()
del data
labels = []
titles = []
contents = []
with open(src_path, 'r', encoding='utf-8') as f:
for i, line in enumerate(f):
label, title, content = line.replace('\n', '').split('\t')
label = [int(lab) for lab in label.split(',')]
label_mat = np.zeros(conf.n_classes, dtype='int32')
label_mat[label] = 1
labels.append(label_mat)
# word2id
title = [word2id[word if word in word2id else '</s>'] for word in title.split(',') if word.rstrip()]
content = [word2id[word if word in word2id else '</s>'] for word in content.split(',') if word.rstrip()]
# padding
titles.append(padding(title, conf.title_seq_len, pad=word2id['<PAD>']))
contents.append(padding(content, conf.content_seq_len, pad=word2id['<PAD>']))
print('data size: {}'.format(len(labels)))
np.savez_compressed(dst_path, label=labels, title=titles, content=contents)
if __name__ == '__main__':
import os
data_dir = 'data/'
if not os.path.exists(data_dir):
os.mkdir(data_dir)
emb2npz(conf.raw_emb_path, conf.emb_path)
data2npz(conf.train_file, conf.train_data)
data2npz(conf.dev_file, conf.dev_data)
|
{"hexsha": "20650ef8aa475d95536781b392b7d79920ea5615", "size": 2175, "ext": "py", "lang": "Python", "max_stars_repo_path": "pj2_clfs_zhihu/pre_data.py", "max_stars_repo_name": "AidenLong/PJ_NLP", "max_stars_repo_head_hexsha": "527e37806011235d86d4f86e3ee424f97ffffbdb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 157, "max_stars_repo_stars_event_min_datetime": "2019-04-02T03:47:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T17:21:00.000Z", "max_issues_repo_path": "pj2_clfs_zhihu/pre_data.py", "max_issues_repo_name": "123swx/PJ_NLP", "max_issues_repo_head_hexsha": "58dacf5ec61030ced725f9200851c57ff8c5a3af", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-03-08T09:41:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-11T03:35:13.000Z", "max_forks_repo_path": "pj2_clfs_zhihu/pre_data.py", "max_forks_repo_name": "123swx/PJ_NLP", "max_forks_repo_head_hexsha": "58dacf5ec61030ced725f9200851c57ff8c5a3af", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 48, "max_forks_repo_forks_event_min_datetime": "2019-04-04T08:38:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-28T08:37:08.000Z", "avg_line_length": 33.984375, "max_line_length": 116, "alphanum_fraction": 0.6312643678, "include": true, "reason": "import numpy", "num_tokens": 636}
|
# Yılan duvarın içine geçiyor kendi üzerinde yem oluşuyor .
# batch_size 500 dene
# her ilk hareket random
# Highscore grafiği
# Her Skorda kaç adım atmış pie grafiği ya da heatmap grafiği dot grafiğin alternatifi
# Keras plot model loss (https://machinelearningmastery.com/display-deep-learning-model-training-history-in-keras/)
# Keras modeli blok diagram halinde çizdirme (https://keras.io/api/utils/model_plotting_utils/)
# !pip uninstall tensorflow
# !pip install tensorflow-gpu
# !pip install pygame
# !pip install matplotlib==3.3.4
# !pip install --upgrade pip
# !pip install opencv-contrib-python
# !pip install opencv-python-headless
# !pip install seaborn
import sys
import os
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# os.environ['SDL_VIDEODRIVER'] = 'dummy'
import pygame
import numpy as np
import cv2
from DDQN2 import DDQNAgent
from GraphFunc import drawing_graph
# import seaborn as sns
# import matplotlib.pyplot as plt
#
# sns.set()
# plt.style.use('seaborn-colorblind')
np.set_printoptions(threshold=sys.maxsize)
class environment_init:
def __init__(self, env_pixel, board_size, resize_input_dims, take_graph, graph_per_episode, pers_of_kernel,
number_of_food, food_decay_step, input_ch_num=4, gui=True):
assert (board_size[1] * (env_pixel[0] / env_pixel[1])) % 1 == 0, "Window width must be compatible number."
assert env_pixel[0] % board_size[0] == 0, "Window width must be divisible by the board size."
assert env_pixel[1] % board_size[1] == 0, "Window height must be divisible by the board size."
pygame.init()
self.take_graph = take_graph
self.graph_per_episode = graph_per_episode
self.env_pixel = env_pixel
self.number_of_food = number_of_food
self.old_number_of_food = number_of_food+1
self.food_decay_step = food_decay_step
self.pers_of_kernel = pers_of_kernel
self.input_ch_num = input_ch_num
self.font = pygame.font.Font('freesansbold.ttf', 14)
pygame.display.set_caption('PUNGI')
self.restart = False
self.board_size = board_size
self.block_size = int(min(self.env_pixel[0], self.env_pixel[1]) / min(self.board_size[0], self.board_size[1]))
self.do_resize = True
self.gui = gui
self.state_flash_x = 0
self.state_flash_y = 0
self.st_flash = None
self.new_st_flash = None
self.st_flash_not_rot_surface = None
self.new_st_flash_not_rot_surface = None
self.st_flash_rot_surface = None
self.new_st_flash_rot_surface = None
self.st_flash_not_rot_gray_surf = None
self.new_st_flash_not_rot_gray_surf = None
self.pic_counter = 0
self.pers_fix = [0, 0]
self.resize_input_dims = resize_input_dims
if resize_input_dims[0] == pers_of_kernel[0] * self.block_size \
and resize_input_dims[1] == pers_of_kernel[1] * self.block_size:
self.do_resize = False
if self.gui:
self.env_window = pygame.Surface((self.env_pixel[0], self.env_pixel[1]))
self.gui_window = pygame.display.set_mode((64 + self.env_pixel[0] + 64 + self.pers_of_kernel[0] *
self.block_size + 64 + self.pers_of_kernel[0] * self.block_size
+ 64, 64 + self.env_pixel[1] + 200))
else:
self.env_window = pygame.display.set_mode((self.env_pixel[0], self.env_pixel[1]))
def gui_blit(self):
self.gui_window.fill((75, 75, 75))
self.gui_window.blit(self.env_window, (64, 64))
if ddqn_agent.t == 0:
self.gui_window.blit(self.st_flash_not_rot_surface,
(64 + self.env_pixel[0] + 64,
64 + (self.env_pixel[1] - self.pers_of_kernel[1] * self.block_size) / 2))
else:
self.gui_window.blit(self.new_st_flash_not_rot_surface,
(64 + self.env_pixel[0] + 64,
64 + (self.env_pixel[1] - self.pers_of_kernel[1] * self.block_size) / 2))
score_render = self.font.render("Score: " + str(run.score), True, (255, 255, 255))
self.gui_window.blit(score_render, (10, 2))
high_score_render = self.font.render("Highest Score: " + str(run.highestscore), True, (255, 255, 255))
self.gui_window.blit(high_score_render,
(64 + self.env_pixel[0] + 64 + self.pers_of_kernel[0] * self.block_size +
64 + self.pers_of_kernel[0] * self.block_size - 95, 2))
episode_render = self.font.render("Episode Number: " + str(run.episode_number), True, (255, 255, 255))
self.gui_window.blit(episode_render, (64, 64 + self.env_pixel[1] + 18))
step_render = self.font.render("Step Number: " + str(ddqn_agent.t), True, (255, 255, 255))
self.gui_window.blit(step_render, (64, 64 + self.env_pixel[1] + 18 + 30))
average_render = self.font.render("Average Score: " + str(run.scores_pie_graph[-10:].mean()), True,
(255, 255, 255))
self.gui_window.blit(average_render, (64, 64 + self.env_pixel[1] + 18 + 60))
epsilon_render = self.font.render("Epsilon: {:2.4f} ".format(ddqn_agent.epsilon), True, (255, 255, 255))
self.gui_window.blit(epsilon_render, (64, 64 + self.env_pixel[1] + 18 + 90))
remain_render = self.font.render("Max Step: " + str(run.maxIdleStep), True, (255, 255, 255))
self.gui_window.blit(remain_render, (64, 64 + self.env_pixel[1] + 18 + 120))
food_number = self.font.render("Food Number: " + str(self.number_of_food), True, (255, 255, 255))
self.gui_window.blit(food_number, (64, 64 + self.env_pixel[1] + 18 + 150))
env_render = self.font.render("Env.", True, (255, 255, 255))
self.gui_window.blit(env_render, (64, 32 + 2))
pers_render = self.font.render("Pers.", True, (255, 255, 255))
self.gui_window.blit(pers_render, (64 + self.env_pixel[0] + 64, 32 + 2))
def set_highestscore(self):
if run.score > run.highestscore:
run.highestscore = run.score
if self.number_of_food == run.highestscore_counter:
run.highestscore = 0
run.highestscore_counter -= 1
# highestscore_render = self.font.render("Highest Score : " + str(run.highestscore), True, (255, 255, 255))
# self.env_window.blit(highestscore_render, (300, 0))
def wall(self):
for wall_location_x in range(0, self.env_pixel[0], self.block_size):
pygame.draw.rect(self.env_window, (255, 0, 0), (wall_location_x, 0, self.block_size, self.block_size))
pygame.draw.rect(self.env_window, (255, 0, 0), (wall_location_x, self.block_size * (self.board_size[1] - 1),
self.block_size, self.block_size))
for wall_location_y in range(0, self.env_pixel[1], self.block_size):
pygame.draw.rect(self.env_window, (255, 0, 0), (0, wall_location_y, self.block_size, self.block_size))
pygame.draw.rect(self.env_window, (255, 0, 0), (self.block_size * (self.board_size[0] - 1), wall_location_y,
self.block_size, self.block_size))
# drawing grid lines
def grid_lines(self):
for x in range(0, self.env_pixel[0], self.block_size): # drawing vertical lines
pygame.draw.line(self.env_window, (40, 40, 40), (x, 0), (x, self.env_pixel[1]))
for y in range(0, self.env_pixel[1], self.block_size): # drawing horizontal lines
pygame.draw.line(self.env_window, (40, 40, 40), (0, y), (self.env_pixel[0], y))
def update_window(self):
pygame.display.update()
def perspective_not_rotated_grayscale(self):
if ddqn_agent.t == 0:
# cv2.imwrite("resimler/zresim0_not_rotated_pers_gray.png", self.st_flash)
if self.gui:
self.st_flash_not_rot_gray_surf = pygame.surfarray.make_surface(self.st_flash.swapaxes(0, 1))
self.gui_window.blit(self.st_flash_not_rot_gray_surf,
(64 + self.env_pixel[0] + 64 + self.pers_of_kernel[0] * self.block_size + 64,
64 + (self.env_pixel[1] - self.pers_of_kernel[1] * self.block_size) / 2))
else:
# cv2.imwrite("resimler/zresim" + str(self.pic_counter + 1) + "_not_rotated_pers_gray.png", self.new_st_flash)
self.new_st_flash_not_rot_gray_surf = pygame.Surface((self.pers_of_kernel[0] * self.block_size,
self.pers_of_kernel[1] * self.block_size))
if self.gui:
self.new_st_flash_not_rot_gray_surf = pygame.surfarray.make_surface(self.new_st_flash.swapaxes(0, 1))
self.gui_window.blit(self.new_st_flash_not_rot_gray_surf,
(64 + self.env_pixel[0] + 64 + self.pers_of_kernel[0] * self.block_size + 64,
64 + (self.env_pixel[1] - self.pers_of_kernel[1] * self.block_size) / 2))
def perspective_rotate_against_snake(self):
def perspective_pic_rotate(st_flash_not_rot):
if snake.change_x < 0: # sol
st_flash_rot = cv2.rotate(st_flash_not_rot, cv2.ROTATE_90_CLOCKWISE)
elif snake.change_x > 0: # sağ
st_flash_rot = cv2.rotate(st_flash_not_rot, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif snake.change_y < 0: # yukarı
st_flash_rot = st_flash_not_rot
else: # aşağı
st_flash_rot = cv2.rotate(st_flash_not_rot, cv2.ROTATE_180)
return st_flash_rot
if ddqn_agent.t == 0:
self.st_flash = perspective_pic_rotate(self.st_flash)
# cv2.imwrite("resimler/zresim0_rotated_pers.png", self.st_flash)
if self.gui:
self.st_flash_rot_surface = pygame.surfarray.make_surface(self.st_flash.swapaxes(0, 1))
self.gui_window.blit(self.st_flash_rot_surface,
(64 + self.env_pixel[0] + 64 + self.pers_of_kernel[0] * self.block_size + 64,
64 + (self.env_pixel[1] - self.pers_of_kernel[1] * self.block_size) / 2))
else:
self.new_st_flash = perspective_pic_rotate(self.new_st_flash)
# cv2.imwrite("resimler/zresim" + str(self.pic_counter + 1) + "_rotated_pers.png", self.new_st_flash)
if self.gui:
self.new_st_flash_rot_surface = pygame.surfarray.make_surface(self.new_st_flash.swapaxes(0, 1))
self.gui_window.blit(self.new_st_flash_rot_surface,
(64 + self.env_pixel[0] + 64 + self.pers_of_kernel[0] * self.block_size + 64,
64 + (self.env_pixel[1] - self.pers_of_kernel[1] * self.block_size) / 2))
def perspective_wall_prevent_overflow(self):
if snake.snake_head[0] < self.env_pixel[0] / 2:
self.pers_fix[0] = (self.pers_of_kernel[0] * self.block_size) / 2 - snake.snake_head[0]
elif snake.snake_head[0] > self.env_pixel[0] / 2:
self.pers_fix[0] = self.env_pixel[0] - (self.pers_of_kernel[0] * self.block_size) / 2 - snake.snake_head[0]
else:
self.pers_fix[0] = 0
if snake.snake_head[1] < self.env_pixel[1] / 2:
self.pers_fix[1] = (self.pers_of_kernel[1] * self.block_size) / 2 - snake.snake_head[1]
elif snake.snake_head[1] > self.env_pixel[1] / 2:
self.pers_fix[1] = self.env_pixel[1] - (self.pers_of_kernel[1] * self.block_size) / 2 - snake.snake_head[1]
else:
self.pers_fix[1] = 0
def first_screenshoot(self):
# self.perspective_wall_prevent_overflow()
rect = pygame.Rect(-(snake.snake_head[0] - (self.pers_of_kernel[0] * self.block_size) / 2 + self.pers_fix[0]),
-(snake.snake_head[1] - (self.pers_of_kernel[1] * self.block_size) / 2 + self.pers_fix[1]),
self.pers_of_kernel[0] * self.block_size, self.pers_of_kernel[1] * self.block_size)
self.st_flash_not_rot_surface = pygame.Surface((self.pers_of_kernel[0] * self.block_size,
self.pers_of_kernel[1] * self.block_size))
pygame.Surface.blit(self.st_flash_not_rot_surface, self.env_window, rect)
# pygame.image.save(self.env_window, "resimler/0env.png")
# pygame.image.save(self.st_flash_not_rot_surface, "resimler/0not_rotated.png")
self.st_flash = pygame.surfarray.array3d(self.st_flash_not_rot_surface)
# self.st_flash = self.st_flash.swapaxes(0, 1)
# cv2.imwrite("resimler/zresim0_not_rotated_pers.png", self.st_flash)
# environment_resim = pygame.surfarray.array3d(self.env_window)
# environment_resim = environment_resim.swapaxes(0, 1)
# cv2.imwrite("resimler/zresim0_not_rotated_env.png", environment_resim)
if self.gui:
self.gui_blit()
# # Perspektif resminin yılan hep yukarı doğru görünecek şekilde düzenlenmesi
# self.perspective_rotate_against_snake()
self.st_flash = cv2.cvtColor(self.st_flash, cv2.COLOR_BGR2GRAY).astype(float)
# self.st_flash[self.st_flash == 159] = 116 # yem
# self.st_flash[self.st_flash == 29] = 127 # duvar
# self.st_flash[self.st_flash == 150] = 63 # yılan kafası
# self.st_flash[self.st_flash == 73] = 127 # yılan kuyruğu
# self.perspective_not_rotated_grayscale()
self.update_window()
if self.do_resize:
self.st_flash = cv2.resize(self.st_flash, (self.resize_input_dims[0], self.resize_input_dims[1]))
# self.st_flash = np.stack((self.st_flash, self.st_flash, self.st_flash, self.st_flash),
# axis=2)
st_flash_start_list = [self.st_flash for i in range(self.input_ch_num)]
self.st_flash = np.stack(st_flash_start_list, axis=2)
self.st_flash = self.st_flash.reshape(1, self.st_flash.shape[0], self.st_flash.shape[1], self.st_flash.shape[2])
def take_screenshoot(self):
# self.perspective_wall_prevent_overflow()
if snake.snake_head[0] != self.state_flash_x or snake.snake_head[1] != self.state_flash_y:
rect = pygame.Rect(-(snake.snake_head[0] - (self.pers_of_kernel[0] * self.block_size) / 2 + self.pers_fix[0]),
-(snake.snake_head[1] - (self.pers_of_kernel[1] * self.block_size) / 2 + self.pers_fix[1]),
self.pers_of_kernel[0] * self.block_size, self.pers_of_kernel[1] * self.block_size)
self.new_st_flash_not_rot_surface = pygame.Surface((self.pers_of_kernel[0] * self.block_size,
self.pers_of_kernel[1] * self.block_size))
pygame.Surface.blit(self.new_st_flash_not_rot_surface, self.env_window, rect)
self.new_st_flash = pygame.surfarray.array3d(self.new_st_flash_not_rot_surface)
# self.new_st_flash = self.new_st_flash.swapaxes(0, 1)
# cv2.imwrite("resimler/zresim" + str(self.pic_counter + 1) + "_not_rotated_pers.png", self.new_st_flash)
# environment_resim = pygame.surfarray.array3d(self.env_window)
# environment_resim = environment_resim.swapaxes(0, 1)
# cv2.imwrite("resimler/zresim" + str(self.pic_counter + 1) + "_not_rotated_env.png", environment_resim)
if self.gui:
self.gui_blit()
# # Perspektif resminin yılan hep yukarı doğru görünecek şekilde düzenlenmesi
# self.perspective_rotate_against_snake()
self.new_st_flash = cv2.cvtColor(self.new_st_flash, cv2.COLOR_BGR2GRAY).astype(float)
# self.new_st_flash[self.new_st_flash == 159] = 116 # yem
# self.new_st_flash[self.new_st_flash == 29] = 127 # duvar
# self.new_st_flash[self.new_st_flash == 150] = 63 # yılan kafası
# self.new_st_flash[self.new_st_flash == 73] = 127 # yılan kuyruğu
# self.perspective_not_rotated_grayscale()
self.update_window()
if self.do_resize:
self.new_st_flash = cv2.resize(self.new_st_flash, (self.resize_input_dims[0], self.resize_input_dims[1]))
self.new_st_flash = self.new_st_flash.reshape(1, self.new_st_flash.shape[0], self.new_st_flash.shape[1], 1)
self.new_st_flash = np.append(self.new_st_flash, self.st_flash[:, :, :, :-1], axis=3)
self.pic_counter += 1
self.state_flash_x = snake.snake_head[0]
self.state_flash_y = snake.snake_head[1]
class Snake(object):
def __init__(self):
self.change_x = None
self.change_y = None
self.snake_head = [0, 0]
self.tails = [[0, 0]]
self.snake_head_location_random()
self.snake_tail_location_random()
self.tails_last = [self.tails[0][0], self.tails[0][1]]
# self.rand = 0
def move(self):
self.tails_last = [self.tails[len(self.tails) - 1][0], self.tails[len(self.tails) - 1][1]]
for i in range(len(self.tails) - 1, 0, -1):
self.tails[i] = [self.tails[i - 1][0], self.tails[i - 1][1]]
self.tails[0] = [self.snake_head[0], self.snake_head[1]]
self.snake_head[0] += snake.change_x
self.snake_head[1] += snake.change_y
# run.reverse_move_prevent = True
def snake_head_location_random(self):
self.snake_head[0] = np.random.choice(np.arange(start=2 * playground_init.block_size,
stop=playground_init.env_pixel[0] - 3 * playground_init.block_size,
step=playground_init.block_size))
self.snake_head[1] = np.random.choice(np.arange(start=2 * playground_init.block_size,
stop=playground_init.env_pixel[0] - 3 * playground_init.block_size,
step=playground_init.block_size))
def snake_tail_location_random(self):
rand_start_direction = np.random.choice(("sol", "sağ", "yukarı", "aşağı"))
if rand_start_direction == "sol":
# snake_tail_rect = pygame.Rect(self.snake_head[0] + playground_init.block_size, self.snake_head[1],
# playground_init.block_size, playground_init.block_size)
self.tails[0][0] = self.snake_head[0] + playground_init.block_size
self.tails[0][1] = self.snake_head[1]
self.change_x = -playground_init.block_size
self.change_y = 0
# print("snaketaillocation_")
# print("snaketaillocation_" + (rand_start_direction))
elif rand_start_direction == "sağ":
# snake_tail_rect = pygame.Rect(self.snake_head[0] - playground_init.block_size, self.snake_head[1],
# playground_init.block_size, playground_init.block_size)
self.tails[0][0] = self.snake_head[0] - playground_init.block_size
self.tails[0][1] = self.snake_head[1]
self.change_x = playground_init.block_size
self.change_y = 0
# print("snaketaillocation_")
# print("snaketaillocation_" + (rand_start_direction))
elif rand_start_direction == "yukarı":
# snake_tail_rect = pygame.Rect(self.snake_head[0], self.snake_head[1] + playground_init.block_size,
# playground_init.block_size, playground_init.block_size)
self.tails[0][0] = self.snake_head[0]
self.tails[0][1] = self.snake_head[1] + playground_init.block_size
self.change_x = 0
self.change_y = -playground_init.block_size
# print("snaketaillocation_")
# print("snaketaillocation_" + (rand_start_direction))
elif rand_start_direction == "aşağı":
# snake_tail_rect = pygame.Rect(self.snake_head[0], self.snake_head[1] - playground_init.block_size,
# playground_init.block_size, playground_init.block_size)
self.tails[0][0] = self.snake_head[0]
self.tails[0][1] = self.snake_head[1] - playground_init.block_size
self.change_x = 0
self.change_y = playground_init.block_size
# print("snaketaillocation_")
# print("snaketaillocation_" + (rand_start_direction))
# print("randomtail")
def snake_head_spawn(self):
snake_head_rect = pygame.Rect(self.snake_head[0], self.snake_head[1], playground_init.block_size,
playground_init.block_size)
pygame.draw.rect(playground_init.env_window, (255, 255, 255), snake_head_rect)
# playground_init.update_window()
# Adding tail if the snake eats food
def add_tail(self):
self.tails.append([self.tails_last[0], self.tails_last[1]])
# Arranging snake's tail location
def snake_tail_spawn(self):
for i in range(0, len(self.tails)):
snake_tail_rect = pygame.Rect(self.tails[i][0], self.tails[i][1], playground_init.block_size,
playground_init.block_size)
pygame.draw.rect(playground_init.env_window, (255, 45, 45), snake_tail_rect)
# print("tail")
# playground_init.update_window()
class Food(object):
def __init__(self):
self.food_state = True
self.food = np.zeros((playground_init.number_of_food, 2))
# self.food_location()
self.reward = 0
self.food_counter = 0
self.foodIndex = None
self.old_food = None
def eat(self):
for self.foodIndex in range(0, playground_init.number_of_food):
if (snake.snake_head[0] == self.food[self.foodIndex][0] and snake.snake_head[1] ==
self.food[self.foodIndex][1]):
run.score += 1
self.reward = 10
snake.add_tail()
self.food_state = True
self.food_location()
self.food_spawn()
# print("food " +str(self.reward))
run.maxIdleStep += run.foodStepIncrease
# print("max idle: " + str(run.maxIdleStep))
# Arranging food spawn location
def food_location(self):
if self.food_state:
if self.food_counter != 0:
self.food[self.foodIndex][0] = np.random.choice(
np.arange(start=playground_init.block_size,
stop=playground_init.env_pixel[0] - playground_init.block_size,
step=playground_init.block_size))
self.food[self.foodIndex][1] = np.random.choice(
np.arange(start=playground_init.block_size,
stop=playground_init.env_pixel[1] - playground_init.block_size,
step=playground_init.block_size))
else:
for i in range(0, playground_init.number_of_food):
self.food[i][0] = np.random.choice(
np.arange(start=playground_init.block_size,
stop=playground_init.env_pixel[0] - playground_init.block_size,
step=playground_init.block_size))
self.food[i][1] = np.random.choice(
np.arange(start=playground_init.block_size,
stop=playground_init.env_pixel[1] - playground_init.block_size,
step=playground_init.block_size))
if self.food_state:
if self.food_counter != 0:
while self.checkFoodLocation(self.food[self.foodIndex][0], self.food[self.foodIndex][1]):
self.food[self.foodIndex][0] = np.random.choice(
np.arange(start=playground_init.block_size,
stop=playground_init.env_pixel[0] - playground_init.block_size,
step=playground_init.block_size))
self.food[self.foodIndex][1] = np.random.choice(
np.arange(start=playground_init.block_size,
stop=playground_init.env_pixel[1] - playground_init.block_size,
step=playground_init.block_size))
if self.food_counter == 0:
while self.checkFoodLocation(self.food, self.food):
for i in range(0, playground_init.number_of_food):
self.food[i][0] = np.random.choice(
np.arange(start=playground_init.block_size,
stop=playground_init.env_pixel[0] - playground_init.block_size,
step=playground_init.block_size))
self.food[i][1] = np.random.choice(
np.arange(start=playground_init.block_size,
stop=playground_init.env_pixel[1] - playground_init.block_size,
step=playground_init.block_size))
self.food_state = False
self.old_food = np.copy(self.food)
def checkFoodLocation(self, posX, posY):
if self.food_state:
if self.food_counter != 0:
if posX == snake.snake_head[0] and posY == snake.snake_head[1]:
return True
for i in range(len(snake.tails)):
if posX == snake.tails[i][0] and posY == snake.tails[i][1]:
return True
for i in range(0, playground_init.number_of_food):
if (self.old_food[i][0] == posX) and (self.old_food[i][1] == posY):
return True
elif self.food_counter == 0:
for i in range(0, playground_init.number_of_food):
if posX[i][0] == snake.snake_head[0] and posX[i][1] == snake.snake_head[1]:
return True
for j in range(0, playground_init.number_of_food):
if posX[j][0] == snake.tails[0][0] and posX[j][1] == snake.tails[0][1]:
return True
self.food_counter += 1
return False
def food_spawn(self):
for i in range(0, playground_init.number_of_food):
food_rect = pygame.Rect(self.food[i][0], self.food[i][1], playground_init.block_size,
playground_init.block_size)
pygame.draw.rect(playground_init.env_window, (70, 170, 170), food_rect)
class Run:
def __init__(self):
# self.run_state = True
self.food_decay_step_counter = 1
self.action = None
self.done = 0
self.episode_number = 1
self.score = 0
self.scores = np.array([])
self.scores_pie_graph = np.zeros(10)
self.scores_pie_graph_100 = np.zeros(100)
self.highestscore = 0
self.highestscore_counter = playground_init.number_of_food - 1
self.highestscores = np.array([])
self.average_score_100 = 0.0
self.average_scores_100 = np.array([])
self.average_scores_100_counter = 0
self.graph_counter = 1
self.plot_counter = 0
self.maxIdleStep = 400 # The max step the snake can go without eating food
self.foodStepIncrease = 100 # The number of step food gives after eaten
self.doRestart = False
self.highest_average_score_100 = 80
def take_action(self, action):
self.action = action
# print("takeactiongirdi_action: " + str(self.action))
# print(self.action)
if ddqn_agent.n_actions == 3:
# Sadece sola, sağa dönebiliyor ve ileri gidebiliyor (n_actions=3 yap)
if snake.snake_head[0] < snake.tails[0][0]: # sol
if action == 0: # aşağı
snake.change_x = 0
snake.change_y = playground_init.block_size
elif action == 1: # yukarı
snake.change_x = 0
snake.change_y = -playground_init.block_size
elif action == 2: # sol
snake.change_x = -playground_init.block_size
snake.change_y = 0
elif snake.snake_head[0] > snake.tails[0][0]: # sağ
if action == 0: # yukarı
snake.change_x = 0
snake.change_y = -playground_init.block_size
elif action == 1: # aşağı
snake.change_x = 0
snake.change_y = playground_init.block_size
elif action == 2: # sağ
snake.change_x = playground_init.block_size
snake.change_y = 0
elif snake.snake_head[1] < snake.tails[0][1]: # yukarı
if action == 0: # sol
snake.change_x = -playground_init.block_size
snake.change_y = 0
elif action == 1: # sağ
snake.change_x = playground_init.block_size
snake.change_y = 0
elif action == 2: # yukarı
snake.change_x = 0
snake.change_y = -playground_init.block_size
elif snake.snake_head[1] > snake.tails[0][1]: # aşağı
if action == 0: # sağ
snake.change_x = playground_init.block_size
snake.change_y = 0
elif action == 1: # sol
snake.change_x = -playground_init.block_size
snake.change_y = 0
elif action == 2: # aşağı
snake.change_x = 0
snake.change_y = playground_init.block_size
elif ddqn_agent.n_actions == 4:
# Dört yöne de gidebiliyor
if (action == 0) and (snake.change_x != playground_init.block_size):
snake.change_x = -playground_init.block_size
snake.change_y = 0
elif (action == 1) and (snake.change_x != -playground_init.block_size):
snake.change_x = playground_init.block_size
snake.change_y = 0
elif (action == 2) and (snake.change_y != playground_init.block_size):
snake.change_y = -playground_init.block_size
snake.change_x = 0
elif (action == 3) and (snake.change_y != -playground_init.block_size):
snake.change_y = playground_init.block_size
snake.change_x = 0
# Check if the snake has hit the wall
def hit_wall(self):
# Restarting game if the snake has hit the wall
if snake.snake_head[0] == 0 or snake.snake_head[1] == 0 \
or snake.snake_head[0] == playground_init.block_size * (playground_init.board_size[0] - 1) \
or snake.snake_head[1] == playground_init.block_size * (playground_init.board_size[1] - 1):
food.reward = -1
# print("hitwall"+str(food.reward))
self.doRestart = True
self.done = 1
# Check if the snake has eaten itself
def hit_itself(self):
# if len(snake.tails) > 1 or len(snake.tails) > 1:
for i in range(0, len(snake.tails) - 1):
if snake.snake_head[0] == snake.tails[i][0] and snake.snake_head[1] == snake.tails[i][1]:
# i=0
food.reward = -1
# print("hitself : " +str(food.reward))
self.doRestart = True
self.done = 1
break
def button_press(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
if ddqn_agent.calistir == 1:
ddqn_agent.save_model()
print("QUIT. Ağırlık kaydedildi.")
elif ddqn_agent.calistir == 2:
ddqn_agent.save_model()
print("QUIT. Ağırlık kaydedildi.")
elif ddqn_agent.calistir == 3:
print("QUIT. Ağırlık kaydolmadı.")
self.exitgame()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
if ddqn_agent.calistir == 1:
ddqn_agent.save_model()
print("ESC. Ağırlık kaydedildi.")
elif ddqn_agent.calistir == 2:
ddqn_agent.save_model()
print("ESC. Ağırlık kaydedildi.")
elif ddqn_agent.calistir == 3:
print("ESC. Ağırlık kaydolmadı.")
self.exitgame()
elif event.key == pygame.K_KP_ENTER or event.key == pygame.K_RETURN:
if ddqn_agent.calistir == 1:
print("ENTER tuşuna basıldı. Model zaten " +
str(ddqn_agent.replace_target) + " episodeda bir kaydedildi.")
elif ddqn_agent.calistir == 2:
os.rename(ddqn_agent.model + '.backup', ddqn_agent.model)
print("ENTER tuşuna basıldı. Ağırlık kaydolmadı.")
elif ddqn_agent.calistir == 3:
print("ENTER tuşuna basıldı. Ağırlık kaydolmadı.")
self.exitgame()
elif event.key == pygame.K_SPACE:
pause_press = True
self.pause_game(pause_press)
def pause_game(self, pause_press):
while pause_press:
for eventspace in pygame.event.get():
if eventspace.type == pygame.KEYDOWN:
if eventspace.key == pygame.K_SPACE:
pause_press = False
def restart(self):
# self.done = 1
self.doRestart = False
self.maxIdleStep = 400
snake.tails = [[0, 0]]
snake.snake_head_location_random()
snake.snake_tail_location_random()
snake.tails_last = [snake.tails[0][0], snake.tails[0][1]]
# self.scores_pie_graph = np.append(self.scores_pie_graph, self.score)
index = self.episode_number % 10
self.scores_pie_graph[index] = self.score
index_100 = (self.episode_number-1) % 100
self.scores_pie_graph_100[index_100] = self.score
self.score = 0
playground_init.env_window.fill((0, 0, 0))
# playground_init.grid_lines()
playground_init.wall()
snake.snake_tail_spawn()
snake.snake_head_spawn()
food.food_state = True
food.food_counter = 0
food.food_location()
food.food_spawn()
playground_init.take_screenshoot()
if ((ddqn_agent.t + 1) > playground_init.food_decay_step * self.food_decay_step_counter) and (
playground_init.number_of_food != 1):
playground_init.number_of_food -= 1
self.food_decay_step_counter += 1
average_score = self.scores_pie_graph.mean()
self.average_score_100 = self.scores_pie_graph_100.mean()
print("Episode Number: " + str(self.episode_number) + " Step Number: " + str(ddqn_agent.t) +
" Highest Score: " + str(self.highestscore) +
" Average Score: {:3.4f} ".format(average_score) +
" Yüzlük Average Score: {:3.4f} ".format(self.average_score_100) +
" Epsilon: {:2.4f} ".format(ddqn_agent.epsilon) +
" Food Number: {} ".format(playground_init.number_of_food)) # + " Loss: " + str(self.avg_loss))
ddqn_agent.episode_counter = self.episode_number
self.episode_number += 1
# if ddqn_agent.calistir == 1 or ddqn_agent.calistir == 2:
# if self.average_score_100 > self.highest_average_score_100 and playground_init.number_of_food <= 10:
# model_name = "67.ddqn_model_32x32_perspective16_UcResim_Food20dan1eDustu" + \
# "_lr" + str(ddqn_agent.alpha) + \
# "_gamma" + str(ddqn_agent.gamma) + \
# "_FoodNum" + str(playground_init.number_of_food) + \
# "_HundrAvSc" + str(self.average_score_100) + ".h5"
# ddqn_agent.q_eval.save(model_name)
def exitgame(self):
pygame.quit()
sys.exit()
def running(self):
playground_init.env_window.fill((0, 0, 0))
# playground_init.grid_lines()
playground_init.wall()
snake.snake_tail_spawn()
snake.snake_head_spawn()
food.food_location()
food.food_spawn()
playground_init.first_screenshoot()
while True:
self.button_press()
playground_init.env_window.fill((0, 0, 0))
# playground_init.grid_lines()
playground_init.wall()
food.food_spawn()
self.take_action(ddqn_agent.choose_action(playground_init.st_flash))
snake.move()
food.eat()
playground_init.set_highestscore()
snake.snake_tail_spawn()
snake.snake_head_spawn()
self.hit_wall()
self.hit_itself()
playground_init.take_screenshoot()
ddqn_agent.remember(playground_init.st_flash, self.action, food.reward, playground_init.new_st_flash,
self.done)
food.reward = 0
self.done = 0
playground_init.st_flash = playground_init.new_st_flash
if ddqn_agent.calistir == 1:
ddqn_agent.learn()
pygame.time.wait(200)
elif ddqn_agent.calistir == 2:
ddqn_agent.learn()
# pygame.time.wait(100)
else:
pygame.time.wait(25)
pass
if self.doRestart and playground_init.take_graph:
self.average_scores_100 = np.append(self.average_scores_100, self.average_score_100)
self.average_scores_100_counter += 1
self.highestscores = np.append(self.highestscores, self.highestscore)
# self.scores = np.append(self.scores, self.score)
if self.episode_number % playground_init.graph_per_episode == 0:
drawing_graph(self.episode_number, self.average_scores_100, self.highestscores,
ddqn_agent.gamma, ddqn_agent.alpha, ddqn_agent.epsilon_dec)
if self.doRestart:
self.restart()
if (ddqn_agent.t % playground_init.food_decay_step == 0 or playground_init.number_of_food <= 10) and \
playground_init.old_number_of_food != playground_init.number_of_food:
self.highest_average_score_100 = 80
playground_init.old_number_of_food = playground_init.number_of_food
# # Loopa girmemesi için
# self.maxIdleStep -= 1
# if self.maxIdleStep == 0:
# # print("----MaxIdleStep 0 oldu----")
# self.restart()
# # food.reward = -1
# # self.maxIdleStep = 1000
playground_init = environment_init(env_pixel=(128, 128), board_size=(32, 32),
resize_input_dims=(64, 64), pers_of_kernel=(16, 16),
input_ch_num=3,
number_of_food=20, food_decay_step=150000,
take_graph=False, graph_per_episode=1000,
gui=True)
snake = Snake()
food = Food()
run = Run()
# if __name__ == '__main__':
ddqn_agent = DDQNAgent(alpha=0.000003, gamma=0.98, n_actions=3, batch_size=32, replace_target=10,
input_dims=(playground_init.resize_input_dims[0], playground_init.resize_input_dims[1],
playground_init.input_ch_num),
epsilon=1.0, epsilon_dec=(2 * 10 ** (-6)), epsilon_end=0.0001,
mem_size=50000, observe=2000, calistir=1,
fname="weights/67.ddqn_model_32x32_perspective16_UcResim_Food20dan1eDustu_lr0.000003_gamma0.99.h5")
# calistir parametresi >>> 1: Train olacak, weight yüklenmeyecek
# calistir parametresi >>> 2: Train olacak, weight yüklenir
# calistir parametresi >>> 3: Değerlendirme
# main_window = MainWindow()
run.running()
|
{"hexsha": "1b484da23ac3ee1c8a0412bb439eeafb3414b3c3", "size": 41834, "ext": "py", "lang": "Python", "max_stars_repo_path": "snake_agent.py", "max_stars_repo_name": "smlblr/Snake-Game-with-DDQN", "max_stars_repo_head_hexsha": "1b79a0d34cc07c43b121460f560bc2b8f99e591d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "snake_agent.py", "max_issues_repo_name": "smlblr/Snake-Game-with-DDQN", "max_issues_repo_head_hexsha": "1b79a0d34cc07c43b121460f560bc2b8f99e591d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "snake_agent.py", "max_forks_repo_name": "smlblr/Snake-Game-with-DDQN", "max_forks_repo_head_hexsha": "1b79a0d34cc07c43b121460f560bc2b8f99e591d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.2671568627, "max_line_length": 123, "alphanum_fraction": 0.5717120046, "include": true, "reason": "import numpy", "num_tokens": 9924}
|
import os
import logging
import tempfile
import nibabel
import numpy
import shutil
import dicom2nifti.image_reorientation as image_reorientation
from dicom2nifti.common import get_nifti_data
def ground_thruth_filenames(input_dir):
nifti_file = input_dir + '_ground_truth.nii.gz'
reoriented_nifti_file = input_dir + '_ground_truth_reoriented.nii.gz'
bval_file = input_dir + '_ground_truth.bval'
bvec_file = input_dir + '_ground_truth.bvec'
return nifti_file, reoriented_nifti_file, bval_file, bvec_file
def assert_compare_nifti(nifti_file_1, nifti_file_2):
logging.info("%s %s" % (nifti_file_1, nifti_file_2))
work_dir = tempfile.mkdtemp()
try:
tmp_nifti_file_1 = os.path.join(work_dir, os.path.basename(nifti_file_1))
tmp_nifti_file_2 = os.path.join(work_dir, os.path.basename(nifti_file_2))
image_reorientation.reorient_image(nifti_file_1, tmp_nifti_file_1)
image_reorientation.reorient_image(nifti_file_2, tmp_nifti_file_2)
nifti_1 = nibabel.load(tmp_nifti_file_1)
nifti_2 = nibabel.load(tmp_nifti_file_2)
# check the affine
if not numpy.allclose(nifti_1.affine, nifti_2.affine):
raise Exception('affine mismatch')
# check the data
nifti_1_data = get_nifti_data(nifti_1)
nifti_2_data = get_nifti_data(nifti_2)
# in case of rgba data we should stack the data again
if nifti_1.get_data_dtype() == [('R', 'u1'), ('G', 'u1'), ('B', 'u1'), ('A', 'u1')]:
nifti_1_data = numpy.stack([nifti_1_data['R'], nifti_1_data['G'], nifti_1_data['B'], nifti_1_data['A']])
if nifti_2.get_data_dtype() == [('R', 'u1'), ('G', 'u1'), ('B', 'u1'), ('A', 'u1')]:
nifti_2_data = numpy.stack([nifti_2_data['R'], nifti_2_data['G'], nifti_2_data['B'], nifti_2_data['A']])
if nifti_1.get_data_dtype() != nifti_2.get_data_dtype():
raise Exception('dtype mismatch')
if not numpy.allclose(nifti_1_data, nifti_2_data, rtol=0.01, atol=1):
difference = get_nifti_data(nifti_1) - get_nifti_data(nifti_2)
raise Exception('data mismatch %s ' % numpy.max(numpy.abs(difference)))
except:
shutil.rmtree(work_dir)
raise
def assert_compare_bval(bval_file_1, bval_file_2):
bval_1 = numpy.loadtxt(bval_file_1)
bval_2 = numpy.loadtxt(bval_file_2)
equal = numpy.allclose(bval_1, bval_2)
if not equal:
raise Exception('bvals not equal\n%s\n%s' %(numpy.array2string(bval_1), numpy.array2string(bval_2)))
def assert_compare_bvec(bvec_file_1, bvec_file_2):
bvec_1 = numpy.loadtxt(bvec_file_1)
bvec_2 = numpy.loadtxt(bvec_file_2)
equal = numpy.allclose(bvec_1, bvec_2)
if not equal:
raise Exception('bvecs not equal\n%s\n%s' %(numpy.array2string(bvec_1), numpy.array2string(bvec_2)))
|
{"hexsha": "ea91357ffeb4b78417d2cd55cfbe18b59e1a66bf", "size": 2861, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_tools.py", "max_stars_repo_name": "JuanPabloMontoya271/dicom2nifti", "max_stars_repo_head_hexsha": "dfea030fbc47ed9c43d7bb1c8a468c2be963a043", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 197, "max_stars_repo_stars_event_min_datetime": "2016-04-05T15:24:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T17:37:10.000Z", "max_issues_repo_path": "tests/test_tools.py", "max_issues_repo_name": "JuanPabloMontoya271/dicom2nifti", "max_issues_repo_head_hexsha": "dfea030fbc47ed9c43d7bb1c8a468c2be963a043", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 102, "max_issues_repo_issues_event_min_datetime": "2017-05-12T07:08:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T00:21:54.000Z", "max_forks_repo_path": "tests/test_tools.py", "max_forks_repo_name": "JuanPabloMontoya271/dicom2nifti", "max_forks_repo_head_hexsha": "dfea030fbc47ed9c43d7bb1c8a468c2be963a043", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 60, "max_forks_repo_forks_event_min_datetime": "2016-12-13T22:11:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T22:55:52.000Z", "avg_line_length": 40.2957746479, "max_line_length": 116, "alphanum_fraction": 0.6875218455, "include": true, "reason": "import numpy", "num_tokens": 910}
|
import os
import sys
sys.path.append("..")
import numpy as np
import tensorflow as tf
# from octrees import *
from libs import *
class OctreeConvTest(tf.test.TestCase):
def forward_and_backward(self, kernel_size, stride, idx=0):
depth = 4
channel= 3
height = 152
num_outputs = 5
# octree = octree_batch([get_one_octree('octree_1'), get_one_octree('octree_2')])
octree = octree_batch(octree_samples(['octree_1', 'octree_2']))
data = tf.constant(np.random.uniform(-1.0, 1.0, [1, channel, height, 1]).astype('float32'))
# forward
with tf.variable_scope('conv_%d' % idx) as scope:
conv_fast = octree_conv_fast(data, octree, depth, num_outputs, kernel_size, stride)
scope.reuse_variables()
conv_mem = octree_conv_memory(data, octree, depth, num_outputs, kernel_size, stride)
# get kernel
t_vars = tf.trainable_variables()
for var in t_vars:
if ('conv_%d' % idx) in var.name:
kernel = var
# backward
grad_fast, kernel_fast = tf.gradients(conv_fast, [data, kernel])
grad_mem, kernel_mem = tf.gradients(conv_mem, [data, kernel])
# test
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
# print('stride: ', stride, ', kernel_size: ', kernel_size)
self.assertAllEqual(conv_fast, conv_mem)
self.assertAllClose(grad_fast, grad_mem)
self.assertAllClose(kernel_fast, kernel_mem)
def test_forward_and_backward(self):
idx = 0
stride = [1, 2]
kernel_size = [[3, 3, 3], [2, 2, 2], [3, 1, 1], [3, 3, 1], [1, 1, 1]]
for i in range(len(stride)):
for j in range(len(kernel_size)):
self.forward_and_backward(kernel_size[j], stride[i], idx)
idx += 1
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
tf.test.main()
|
{"hexsha": "1dc2b9a1620c0b0a17b4d280807d242d5859adc0", "size": 1837, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/test/test_octree_conv.py", "max_stars_repo_name": "pauldinh/O-CNN", "max_stars_repo_head_hexsha": "fecefd92b559bdfe94a3983b2b010645167c41a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 299, "max_stars_repo_stars_event_min_datetime": "2019-05-27T02:18:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:29:20.000Z", "max_issues_repo_path": "tensorflow/test/test_octree_conv.py", "max_issues_repo_name": "pauldinh/O-CNN", "max_issues_repo_head_hexsha": "fecefd92b559bdfe94a3983b2b010645167c41a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 100, "max_issues_repo_issues_event_min_datetime": "2019-05-07T03:17:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T09:02:04.000Z", "max_forks_repo_path": "tensorflow/test/test_octree_conv.py", "max_forks_repo_name": "pauldinh/O-CNN", "max_forks_repo_head_hexsha": "fecefd92b559bdfe94a3983b2b010645167c41a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 84, "max_forks_repo_forks_event_min_datetime": "2019-05-17T17:44:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-14T04:32:02.000Z", "avg_line_length": 31.1355932203, "max_line_length": 95, "alphanum_fraction": 0.6570495373, "include": true, "reason": "import numpy", "num_tokens": 531}
|
%
% y = nnormn(x,dim,p)
%
% NNORMN normalizes an array x by its p-vector norms along dimension <dim>.
%
% dim: dimension along which to calculate norm. Default first nonsingleton
% p: norm-type. Default is 2.
%
% Equivalence: normc(x) == nnormn(x,1,2), normr(x) == nnormn(x,2,2)
%
% See also NORMC, NORMR, NNORM
% Created by Bill Winter December 2005
% Based on normc and normr
function x = nnormn(x,dim,p)
siz = size(x);
if nargin < 2, dim = find(siz > 1,1); end
if nargin < 3, p = 2; end
switch p
case inf, a = max(x,[],dim); % max
case -inf, a = min(x,[],dim); % min
case 1, a = sum(abs(x),dim); % manhattan
case 2, a = sqrt(sum(abs(x).^2,dim)); % euclidean
otherwise, a = sum(abs(x).^p,dim).^(1/p); % p-norm
end
a(a == 0) = 1;
N(1:length(siz)) = {':'};
N{dim} = ones(1,siz(dim));
x = x./a(N{:});
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/11139-array-tool-set/array/nnormn.m"}
|
[STATEMENT]
lemma long_pow_exp: "r \<noteq> \<epsilon> \<Longrightarrow> m \<le> \<^bold>|r\<^sup>@m\<^bold>|"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r \<noteq> \<epsilon> \<Longrightarrow> m \<le> \<^bold>|r \<^sup>@ m\<^bold>|
[PROOF STEP]
unfolding pow_len[of r m]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r \<noteq> \<epsilon> \<Longrightarrow> m \<le> m * \<^bold>|r\<^bold>|
[PROOF STEP]
using nemp_le_len[of r]
[PROOF STATE]
proof (prove)
using this:
r \<noteq> \<epsilon> \<Longrightarrow> 1 \<le> \<^bold>|r\<^bold>|
goal (1 subgoal):
1. r \<noteq> \<epsilon> \<Longrightarrow> m \<le> m * \<^bold>|r\<^bold>|
[PROOF STEP]
by simp
|
{"llama_tokens": 284, "file": "Combinatorics_Words_CoWBasic", "length": 3}
|
# -*- coding: utf-8 -*-
"""Copyright 2019 DScribe developers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
import numpy as np
import sparse
from sklearn.metrics.pairwise import pairwise_kernels
class LocalSimilarityKernel(ABC):
"""An abstract base class for all kernels that use the similarity of local
atomic environments to compute a global similarity measure.
"""
def __init__(
self,
metric,
gamma=None,
degree=3,
coef0=1,
kernel_params=None,
normalize_kernel=True,
):
"""
Args:
metric(string or callable): The pairwise metric used for
calculating the local similarity. Accepts any of the sklearn
pairwise metric strings (e.g. "linear", "rbf", "laplacian",
"polynomial") or a custom callable. A callable should accept
two arguments and the keyword arguments passed to this object
as kernel_params, and should return a floating point number.
gamma(float): Gamma parameter for the RBF, laplacian, polynomial,
exponential chi2 and sigmoid kernels. Interpretation of the
default value is left to the kernel; see the documentation for
sklearn.metrics.pairwise. Ignored by other kernels.
degree(float): Degree of the polynomial kernel. Ignored by other
kernels.
coef0(float): Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params(mapping of string to any): Additional parameters
(keyword arguments) for kernel function passed as callable
object.
normalize_kernel(boolean): Whether to normalize the final global
similarity kernel. The normalization is achieved by dividing each
kernel element :math:`K_{ij}` with the factor
:math:`\sqrt{K_{ii}K_{jj}}`
"""
self.metric = metric
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.normalize_kernel = normalize_kernel
def create(self, x, y=None):
"""Creates the kernel matrix based on the given lists of local
features x and y.
Args:
x(iterable): A list of local feature arrays for each structure.
y(iterable): An optional second list of features. If not specified
it is assumed that y=x.
Returns:
The pairwise global similarity kernel K[i,j] between the given
structures, in the same order as given in the input, i.e. the
similarity of structures i and j is given by K[i,j], where features
for structure i and j were in features[i] and features[j]
respectively.
"""
symmetric = False
if y is None:
y = x
symmetric = True
# First calculate the "raw" pairwise similarity of atomic environments
n_x = len(x)
n_y = len(y)
C_ij_dict = {}
for i in range(n_x):
for j in range(n_y):
# Skip lower triangular part for symmetric matrices
if symmetric and j < i:
continue
x_i = x[i]
# Save time on symmetry
if symmetric and j == i:
y_j = None
else:
y_j = y[j]
# Convert sparse.COO to scipy.sparse.csr
if isinstance(x_i, sparse.COO):
x_i = x_i.tocsr()
if isinstance(y_j, sparse.COO):
y_j = y_j.tocsr()
C_ij = self.get_pairwise_matrix(x_i, y_j)
C_ij_dict[i, j] = C_ij
# Calculate the global pairwise similarity between the entire
# structures
K_ij = np.zeros((n_x, n_y))
for i in range(n_x):
for j in range(n_y):
# Skip lower triangular part for symmetric matrices
if symmetric and j < i:
continue
C_ij = C_ij_dict[i, j]
k_ij = self.get_global_similarity(C_ij)
K_ij[i, j] = k_ij
# Save data also on lower triangular part for symmetric matrices
if symmetric and j != i:
K_ij[j, i] = k_ij
# Enforce kernel normalization if requested.
if self.normalize_kernel:
if symmetric:
k_ii = np.diagonal(K_ij)
x_k_ii_sqrt = np.sqrt(k_ii)
y_k_ii_sqrt = x_k_ii_sqrt
else:
# Calculate self-similarity for X
x_k_ii = np.empty(n_x)
for i in range(n_x):
x_i = x[i]
C_ii = self.get_pairwise_matrix(x_i)
k_ii = self.get_global_similarity(C_ii)
x_k_ii[i] = k_ii
x_k_ii_sqrt = np.sqrt(x_k_ii)
# Calculate self-similarity for Y
y_k_ii = np.empty(n_y)
for i in range(n_y):
y_i = y[i]
C_ii = self.get_pairwise_matrix(y_i)
k_ii = self.get_global_similarity(C_ii)
y_k_ii[i] = k_ii
y_k_ii_sqrt = np.sqrt(y_k_ii)
K_ij /= np.outer(x_k_ii_sqrt, y_k_ii_sqrt)
return K_ij
def get_pairwise_matrix(self, X, Y=None):
"""Calculates the pairwise similarity of atomic environments with
scikit-learn, and the pairwise metric configured in the constructor.
Args:
X(np.ndarray): Feature vector for the atoms in structure A
Y(np.ndarray): Feature vector for the atoms in structure B
Returns:
np.ndarray: NxM matrix of local similarities between structures A
and B, with N and M atoms respectively.
"""
if callable(self.metric):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.metric, filter_params=True, **params)
@abstractmethod
def get_global_similarity(self, localkernel):
"""
Computes the global similarity between two structures A and B.
Args:
localkernel(np.ndarray): NxM matrix of local similarities between
structures A and B, with N and M atoms respectively.
Returns:
float: Global similarity between the structures A and B.
"""
|
{"hexsha": "ea5d6c32afd2e89b25fb28c9b3e9419b1ebf438b", "size": 7264, "ext": "py", "lang": "Python", "max_stars_repo_path": "dscribe/kernels/localsimilaritykernel.py", "max_stars_repo_name": "Iximiel/dscribe", "max_stars_repo_head_hexsha": "1dd845cb918a244714f835023bdc82d95719eef1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 265, "max_stars_repo_stars_event_min_datetime": "2018-12-10T21:36:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T12:58:21.000Z", "max_issues_repo_path": "dscribe/kernels/localsimilaritykernel.py", "max_issues_repo_name": "Iximiel/dscribe", "max_issues_repo_head_hexsha": "1dd845cb918a244714f835023bdc82d95719eef1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 71, "max_issues_repo_issues_event_min_datetime": "2018-12-10T22:00:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T19:38:23.000Z", "max_forks_repo_path": "dscribe/kernels/localsimilaritykernel.py", "max_forks_repo_name": "Iximiel/dscribe", "max_forks_repo_head_hexsha": "1dd845cb918a244714f835023bdc82d95719eef1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 66, "max_forks_repo_forks_event_min_datetime": "2018-11-29T13:33:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T15:15:00.000Z", "avg_line_length": 36.8730964467, "max_line_length": 87, "alphanum_fraction": 0.5773678414, "include": true, "reason": "import numpy", "num_tokens": 1516}
|
import numpy as np
import pandas as pd
import psycopg2
from io import StringIO
from sklearn.model_selection import train_test_split
from db import db_engine
create_table_sql = """
CREATE TABLE IF NOT EXISTS marketing (
id serial PRIMARY KEY,
age integer,
job varchar(128),
marital varchar(128),
education varchar(128),
default_payment varchar(128),
balance integer,
housing varchar(128),
loan varchar(128),
day integer,
month varchar(128),
duration real,
campaign integer,
pdays integer,
previous integer,
poutcome varchar(128),
response varchar(128),
predicted_response varchar(128)
)
"""
get_data_sql = """select * from marketing"""
df = pd.read_csv("data/bank_cleaned.csv", index_col="id")
df.drop("response_binary", axis=1, inplace=True)
df["predicted_response"] = ""
test_size= 0.20 # 20% for testing
df_train, df_test = train_test_split(df, test_size=test_size, random_state=1234)
df_train.to_csv("data/train.csv")
df_test.to_csv("data/test.csv")
df_test = df_test.copy()
df_test["response"] = ""
df = pd.concat([df_train, df_test])
try:
conn = psycopg2.connect(db_engine())
cur = conn.cursor()
print("Create marketing table")
cur.execute(create_table_sql)
conn.commit()
print("Insert train and test data into table ...")
buffer = StringIO()
df.to_csv(buffer, index_label="id", header=False)
buffer.seek(0)
cur.copy_from(buffer, "marketing", sep=",")
conn.commit()
print("Insert finished.")
cur.close()
except Exception as e:
print("Problems:", str(e))
|
{"hexsha": "469b28808b788585e00c01d5d81a2bd89b08bffc", "size": 1596, "ext": "py", "lang": "Python", "max_stars_repo_path": "PostgreSQL_AutoML/init_db.py", "max_stars_repo_name": "mljar/integrations", "max_stars_repo_head_hexsha": "147154dd33daa7bd478fec912e034c7e28dbc53a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-08-28T23:03:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-10T06:53:02.000Z", "max_issues_repo_path": "PostgreSQL_AutoML/init_db.py", "max_issues_repo_name": "shahules786/integrations", "max_issues_repo_head_hexsha": "e703dbee5ba6d8cda66ffe26c2071969dc186aab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-04T11:24:58.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-04T11:24:58.000Z", "max_forks_repo_path": "PostgreSQL_AutoML/init_db.py", "max_forks_repo_name": "shahules786/integrations", "max_forks_repo_head_hexsha": "e703dbee5ba6d8cda66ffe26c2071969dc186aab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-01T11:39:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-21T18:52:14.000Z", "avg_line_length": 25.3333333333, "max_line_length": 80, "alphanum_fraction": 0.6929824561, "include": true, "reason": "import numpy", "num_tokens": 386}
|
/* $Id$
*
* Copyright 2010 Anders Wallin (anders.e.e.wallin "at" gmail.com)
*
* This file is part of OpenCAMlib.
*
* OpenCAMlib is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* OpenCAMlib is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with OpenCAMlib. If not, see <http://www.gnu.org/licenses/>.
*/
#include <list>
// uncomment to disable assert() calls
// #define NDEBUG
#include <cassert>
#include <boost/python.hpp>
#include <boost/foreach.hpp>
#include "point.h"
#include "triangle.h"
#include "millingcutter.h"
#include "numeric.h"
#include "octree.h"
#include "ocode.h"
//#define DEBUG_BUILD_OCT
namespace ocl
{
//**************** LinOCT ********************/
LinOCT::LinOCT() {
clist.clear();
}
int LinOCT::size() const {
return clist.size();
}
void LinOCT::append(Ocode& code) {
clist.push_back( code );
}
void LinOCT::delete_at(std::list<Ocode>::iterator& it) {
clist.erase( it );
}
void LinOCT::expand_at(std::list<Ocode>::iterator& itr) {
// note: there was a valid-index check here
// consider checking that itr is valid?
// if ( itr->expandable()) { // was: clist[idx].expandable()
std::list<Ocode> newnodes = itr->expand(); // the new nodes clist[idx]
BOOST_FOREACH( Ocode o, newnodes) {
clist.insert(itr, o);
}
// delete old node
std::list<Ocode>::iterator temp;
temp = itr;
itr--; // jump out of the way from erase()
clist.erase(temp);
/*} else {
std::cout << "LinOCT::expand_at() cannot expand " << *itr << "!\n";
}*/
return;
}
/// initialize octree and expand min_expans times
void LinOCT::init(int min_expand)
{
// assume the list is empty.
if (size() > 0) {
std::cout << "cannot call LinOCT::init() on non-empty tree! \n";
assert(0);
}
Ocode o = Ocode(); // create an onode, initally all "8"
o.init();
append(o);
for (int m=0; m<min_expand ; m++) { // go through the list min_expand times
std::list<Ocode>::iterator itr;
std::list<Ocode>::iterator current_end;
itr = clist.begin();
current_end = clist.end();
for(int n=0; n<size() ; n++) {
if ( itr->expandable() ) { // if expandable
//std::cout << " init() expanding " << *itr << "\n";
expand_at(itr); // expand the node
//std::cout << " after expand itr= " << *itr << "\n";
n=n+7; // jump forward, since we have inserted new nodes
itr++; // after expand(), itr points to the last expanded node
// so jump forward to expand next node
if ( itr == clist.end() )
itr--; // unless at the end of list
}
}
// std::cout << " LinOCT:init() m=" << m << " N=" << size() << "\n";
}
return;
}
/// build LinOCT octree from input volume OCTVolume
void LinOCT::build(OCTVolume* vol)
{ // loop through the whole list:
// - deleting white nodes
// - expanding grey nodes if possible
// - skipping over black nodes (only these remain when done)
// std::cout << size() << " nodes before build()\n";
std::list<Ocode>::iterator it;
std::list<Ocode>::iterator temp;
it = clist.begin();
int calc_calls = 0;
while ( it != clist.end() ) {
if ( ! (vol->isInsideBBo( *it )) ) { // nodes outside bounding-box can be deleted
temp = it;
if ( it != clist.begin() )
it--; // jump back out of the way from erase()
else
it++;
clist.erase(temp);
}
else { // this ocode contains the bounding-box
it->calcScore( vol ); // expensive call...
++calc_calls;
if ( (it->score == 9) ) { // black node
it++; // node is black, so leave it in the list, and move forward
} else if ( (it->score == 0) && (it->deg > 5) ) {
temp = it;
if ( it != clist.begin() )
it--; // jump out of the way from erase()
else
it++;
clist.erase(temp); // white node, delete.
}
else { // grey node, expand if possible, otherwise delete
if ( it->expandable() ) {
temp = it;
bool first=false;
if (it == clist.begin()) {
first = true;
} else {
temp--;
}
expand_at(it); // iterator moves to last expanded node.
if (first) // so need to reset iterator to first expanded node
it = clist.begin();
else
it = temp;
} else { // grey non-expandable nodes are removed
temp = it;
if ( it != clist.begin() )
it--; // jump out of the way from erase()
else
it++;
clist.erase(temp);
}
}
}
}
std::cout << " LinOCT::build() " << calc_calls << " calcScore() calls \n";
// std::cout << size() << " nodes after build()\n";
}
// NOTE: condense() seems to run very slowly
// >60s run time on length=138000 list.
void LinOCT::condense() {
// NOTE: list needs to be sorted before we come here.
// NOTE: consider using std::list<> unique() to remove duplicates
int n=0;
int n_duplicates=0;
int n_contained=0;
int n_collapse=0;
std::list<Ocode>::iterator itr;
std::list<Ocode>::iterator next;
itr = clist.begin();
while ( n < (size()-1) ) {
next=itr;
next++;
if ( (*itr) == (*next) ) { //( clist[n] == clist[n+1] ) { // remove duplicates
// FIXME delete_at(n);
n_duplicates++;
// deleting a duplicate creates an opportunity to collapse
// so need to jump back by 7 steps to check for collapse
int jump=7;
if (n<8)
jump=n;
n-=jump;
}
else if ( itr->containedIn( *next ) ) {
// remove nodes contained in the following node
// FIXME delete_at(n);
if (n>0)
n--; // jump back to check for more contained nodes
n_contained++;
}
// condense nodes if all eight sub-quadrants are present
else if ( can_collapse_at(n) ) { // can collapse the octet
//std::cout << "collapsing at " << n << "\n";
int deg = itr->degree();
// construct parent node of sub-octants
Ocode o; // parent node, all digits default to "8"
for (int m=0;m<(deg-1); m++) {
o.code[m] = itr->code[m]; // match code up to deg-1
}
//std::cout << "before collapse at " << n <<" code:" << clist[n] << "\n";
// add parent
// FIXME append_at(o, n);
//std::cout << "parent insert at " << n <<" code:" << clist[n] << "\n";
n++; // jump forward and delete the redundant sub-octants
for (int m=0;m<8;m++) {
//std::cout << " deleting at " << n<< " : " << clist[n] << "\n";
// FIXME delete_at(n);
}
n--; // jump back to the new parent
int jump=7;
if (n<8)
jump=n;
n-=jump;
// jump backward and see if the collapse has created
// an opportunity for more collapsing
// collapsable nodes can be as far back as 7 steps
n_collapse++;
}
else {
n++; // move forward in list
itr++;
}
}
if ( (n_duplicates>0) || (n_contained>0) || (n_collapse>0)) {
std::cout << "n_duplicates="<<n_duplicates<<"\n";
std::cout << "n_contained="<<n_contained<<"\n";
std::cout << "n_collapse="<<n_collapse<<"\n";
} else {
std::cout << "condense(): nothing to do!\n";
}
return;
}
/// return true if eight consequtive
/// nodes beginning at idx can be collapsed
bool LinOCT::can_collapse_at(int idx) {
std::list<Ocode>::iterator it;
it=clist.begin();
for (int n=0;n<idx;n++)
it++;
if ( (size()-idx) < 8 ) // at least 8 nodes must remain
return false;
int deg = it->degree();
// check for consequtive numbers 0-7 at position deg
Ocode o;
//std::cout << " checking "<< idx << " to " << idx+7 << " deg=" << deg << "\n";
for (int n=0; n < 8 ; n++) {
o = *it; // clist[idx+n];
//std::cout << "n=" << n << " Ocode= "<< o <<" code=" << (int)o.code[deg-1] << "\n";
if ( (o.code[deg-1] != n) || (o.degree() != deg) ) {// code must match 0-7
//std::cout << " no match\n";
return false;
}
it++;
}
return true;
}
/// remove o from this
void LinOCT::diff( LinOCT& o )
{
std::list<Ocode>::iterator itr1;
std::list<Ocode>::iterator temp;
std::list<Ocode>::iterator itr2;
std::vector<Ocode> Q12;
itr1=clist.begin();
itr2=o.clist.begin();
Ocode Hold12;
Hold12.null();
while ( (itr1 != clist.end() ) && ( itr2 != o.clist.end() ) ) {
if ( *itr1 == *itr2 ) {
// remove from 1
temp = itr1;
itr1++;
clist.erase(temp);
itr2++;
}
else if ( itr1->containedIn( *itr2 ) ) {
temp = itr1;
itr1++;
clist.erase(temp);
}
else if ( itr2->containedIn( *itr1 ) ) { // case 2
expand_at(itr1);
// need to jump back 7 steps
for (int m=0;m<7;m++)
itr1--;
}
else if ( *itr1 < *itr2 ) { // case 3
itr1++;
}
else { // case 4: o2 < o1
itr2++;
}
} // end while-loop
return;
}
/// compute difference, i.e.
/// remove nodes in other from this
LinOCT LinOCT::operation(int type, LinOCT& o)
{
// traverse through both lists
int idx1 = 0;
int idx2 = 0;
std::list<Ocode>::iterator itr1;
std::list<Ocode>::iterator itr2;
itr1=clist.begin();
itr2=o.clist.begin();
std::vector<Ocode> intersection;
std::vector<Ocode> sum; // a.k.a. union
std::vector<Ocode> diff12;
std::vector<Ocode> diff21;
std::list<Ocode> Q21;
std::list<Ocode> Q12;
Ocode Hold21;
Ocode Hold12;
Hold21.null();
Hold12.null();
while ( (idx1<size()) && (idx2<o.size()) ) {
// case 0
if ( *itr1 == *itr2 ) { //(clist[idx1] == o.clist[idx2]) { // identical nodes
intersection.push_back( *itr1 ); //clist[idx1] );
sum.push_back( *itr1 ); //clist[idx1] );
idx1++;
idx2++;
itr1++;
itr2++;
}
else if ( itr1->containedIn( *itr2 ) ) { //clist[idx1].containedIn( o.clist[idx2] ) ) { // case 1
intersection.push_back( *itr1 ); //clist[idx1] ); // idx1 contained is in both o1 and o2
if ( Hold21.isNull() )
Hold21 = *itr2; // o.clist[idx2]; // remember this node for later processing
Q21.push_back( *itr1 ); //clist[idx1] ); // these need to be removed from Hold21 later
idx1++;
itr1++;
}
else if ( itr2->containedIn( *itr1 ) ) { //o.clist[idx2].containedIn( clist[idx1] ) ) { // case 2
intersection.push_back( *itr2 ); //o.clist[idx2] ); // o2[idx2] is in both o1 and o2
if ( Hold12.isNull() )
Hold12 = *itr1; //clist[idx1]; // store for later processing
Q12.push_back( *itr2 ); //o.clist[idx2] ); // remove these later from Hold12
idx2++;
itr2++;
}
else if ( *itr1 < *itr2 ) { // clist[idx1] < o.clist[idx2] ) { // case 3
// add o1 element to union
sum.push_back( *itr1 ); //clist[idx1] );
// process the difference queues, if any
if ( Hold12 == *itr1 ) { //clist[idx1] ) { //compute difference o1-o2 Hold12 == clist[idx1]
do_diff( Hold12, Q12, diff12 ); // function for calculating difference
Hold12.null();
}
else
diff12.push_back( *itr1 ); //clist[idx1] ); // no matching node in o2, so o1 belongs to diff
idx1++;
itr1++;
}
else { // case 4: o2 < o1
if ( !( *itr2 < *itr1 ) ) { //o.clist[idx2] < clist[idx1]) ) {
std::cout << " case 4 o2 < o1 not true!\n";
// std::cout << "o2=" << *itr2 << "number=" << itr2->number() << "\n";
// std::cout << "o1=" << *itr1 << "number=" << itr1->number() << "\n";
assert(0);
}
// add o2 element to union
sum.push_back( *itr2 ); //o.clist[idx2] );
if ( Hold21 == *itr2 ) { //o.clist[idx2] ) { // Hold21 == o.clist[idx2]
do_diff( Hold21, Q21, diff21);
Hold21.null();
}
else
diff21.push_back( *itr2 ); //o.clist[idx2] ); // o2 belongs to diff21
idx2++;
itr2++;
}
} // end of while-loop through elements
// now process remaining elements, i.e. case where o1 is longer than o2 or vice versa
if (idx1 < size()) {// process rest of o1
int idx3 = idx1;
std::list<Ocode>::iterator itr3;
itr3 = itr1;
if ( Hold12 == *itr1 ) { //clist[idx1] ) {
do_diff( Hold12, Q12, diff12);
Hold12.null();
idx3++;
itr3++;
}
//for (int i=idx3; i<size(); i++)
for ( ; itr3 != clist.end() ; itr3++ )
diff12.push_back( *itr3 );
//diff12.push_back( clist[i] ); // o1 elements not in o2 are in diff12
//union calc here
//for (int i=idx1; i<size();i++)
for ( ; itr1 != clist.end(); itr1++)
sum.push_back( *itr1 );
}
else { // process rest of o2
int idx3=idx2;
std::list<Ocode>::iterator itr3;
itr3 = itr2;
if (Hold21 == *itr2 ) {
do_diff(Hold21, Q21, diff21);
Hold21.null();
idx3++;
itr3++;
}
for (; itr3 != o.clist.end() ; itr3++) {
diff21.push_back( *itr3 ); // o2 elements to diff21
}
// union calc here
for (; itr2 != o.clist.end(); itr2++)
sum.push_back( *itr2 );
}
/*
std::cout << " diff12= " << diff12.size() << "\n";
std::cout << " diff21= " << diff21.size() << "\n";
std::cout << " inters= " << intersection.size() << "\n";
*/
LinOCT result;
if (type==1) {
BOOST_FOREACH( Ocode o, diff12 ) {
result.append(o);
}
}
else if (type==2) {
BOOST_FOREACH( Ocode o, diff21 ) {
result.append(o);
}
} else if (type==3) {
BOOST_FOREACH( Ocode o, intersection) {
result.append(o);
}
} else if (type==4) {
BOOST_FOREACH( Ocode o, sum) {
result.append(o);
}
result.condense();
}
return result;
}
// computes difference H - Q
// where H is a node that is expanded
// and Q is a queue of nodes to be subtracted from H
// the result is appended to D
void LinOCT::do_diff(Ocode& H, std::list<Ocode>& Q, std::vector<Ocode>& D)
{
// H - an expandable node
// Q - queue of nodes contained in H
// D - difference queue for H-Q results
// Q2 contains expanded node
std::list<Ocode> Q2;
if ( !H.expandable()) {
/*
std::cout << " do_diff node H not expandable...\n";
std::cout << " H=" << H << "\n";
std::cout << " Q=\n";
BOOST_FOREACH( Ocode o, Q) {
std::cout << o << "\n";
}*/
Q2.push_back(H);
} else {
Q2 = H.expand();
}
/*
std::cout << " H.expand() on H=" << H <<" :\n";
BOOST_FOREACH( Ocode o, Q2) {
std::cout << o << "\n";
}*/
while ( !Q2.empty() ) { // go through the expanded nodes
if ( !Q.empty() ) {
Ocode n = Q.back();
Ocode n2 = Q2.back();
if ( n == n2 ) {// matching elements
Q2.pop_back(); //erase(Q2.begin()); // nothing to put into diff
Q.pop_back(); // erase(Q.begin());
} else if ( n.containedIn( n2 ) ) {// need to expand further
// expand n2 and add to front of Q2
std::list<Ocode> subocts = n2.expand();
Q2.pop_back(); // delete parent
BOOST_FOREACH( Ocode o, subocts) {
Q2.push_back(o); // insert new children
}
} else {
// no match in Q, so push node to diff
D.push_back( n2 );
Q2.pop_back();
}
}
else { // Q is empty
D.push_back( Q2.back() ); // no match in Q, so push node to diff
Q2.pop_back();
}
}// end while
//Q.clear();
//std::cout << " after do_diff Q.size() = " << Q.size() << "\n";
return;
}
/// add nodes of two trees together into one
void LinOCT::sum(LinOCT& other) {
// join the two lists, sort, and condense
BOOST_FOREACH( Ocode o, other.clist ) {
append( o );
}
sort();
condense();
return;
}
/// sort list of ocodes
void LinOCT::sort() {
clist.sort();
}
/// return list of triangles to python
/// the triangles correspond to every node/cube of the octree
boost::python::list LinOCT::get_triangles()
{
boost::python::list tlist;
BOOST_FOREACH( Ocode o, clist) {
std::vector<Point> p(8);
for (int m=0;m<8;++m)
p[m]=o.corner(m);
// these 12 triangles cover the 6 sides of the cube
tlist.append(Triangle(p[0],p[1],p[2]));
tlist.append(Triangle(p[1],p[2],p[6]));
tlist.append(Triangle(p[3],p[4],p[5]));
tlist.append(Triangle(p[4],p[5],p[7]));
tlist.append(Triangle(p[0],p[2],p[3]));
tlist.append(Triangle(p[3],p[4],p[2]));
tlist.append(Triangle(p[1],p[5],p[6]));
tlist.append(Triangle(p[6],p[7],p[5]));
tlist.append(Triangle(p[2],p[4],p[6]));
tlist.append(Triangle(p[6],p[7],p[4]));
tlist.append(Triangle(p[0],p[1],p[3]));
tlist.append(Triangle(p[3],p[5],p[1]));
}
return tlist;
}
boost::python::list LinOCT::get_nodes() {
boost::python::list nodelist;
BOOST_FOREACH( Ocode o, clist) {
nodelist.append(o);
}
return nodelist;
}
/// string repr
std::ostream& operator<<(std::ostream &stream, const LinOCT &l) {
stream << "LinOCT: N="<< l.size() ;
return stream;
}
/// print the whole list
void LinOCT::printList() {
BOOST_FOREACH( Ocode o, clist ) {
std::cout << " " << o << "\n";
}
}
/// string repr
std::string LinOCT::str() const {
std::ostringstream o;
o << *this;
return o.str();
}
} // end namespace
// end of file octree.cpp
|
{"hexsha": "16be29c7c62f8d79bc07f44dddd0c75638db953b", "size": 20359, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "opencamlib/src/attic/octree.cpp", "max_stars_repo_name": "JohnyEngine/CNC", "max_stars_repo_head_hexsha": "e4c77250ab2b749d3014022cbb5eb9924e939993", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "opencamlib/src/attic/octree.cpp", "max_issues_repo_name": "JohnyEngine/CNC", "max_issues_repo_head_hexsha": "e4c77250ab2b749d3014022cbb5eb9924e939993", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "opencamlib/src/attic/octree.cpp", "max_forks_repo_name": "JohnyEngine/CNC", "max_forks_repo_head_hexsha": "e4c77250ab2b749d3014022cbb5eb9924e939993", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5154798762, "max_line_length": 109, "alphanum_fraction": 0.4785107324, "num_tokens": 5467}
|
# This Python file uses the following encoding: utf-8
import numpy as np
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import matplotlib.cm as cm
from .colors import colorscale
class Brain:
def __init__(self, df1, order, coords_2d, df2=None):
"""
Brain Constructor.
Parameters
----------
df1 : pd.DataFrame
Pandas DataFrame containing at least :code:`intensity` and :code:`conjugate` columns.
The :code:`intensity` column must contain Array-like values of the length of the cortical atlas.
order : int
Mode order
coords_2d : pd.DataFrame
Pandas DataFrame containing the 2D corticial parcellation coordinates. These can be fetched from
Decomposition.atlas.coords_2d
df2 : pd.DataFrame, optional
Pandas DataFrame containing at least :code:`intensity` and :code:`conjugate` columns
The :code:`intensity` column must contain Array-like values of the length of the cortical atlas.
"""
self.coords_2d = coords_2d
self.mode1 = df1.loc[order - 1][['intensity', 'conjugate']]
if df2 is not None:
self.mode2 = df2.loc[order - 1][['intensity', 'conjugate']]
else:
self.mode2 = None
self.order = order
self.atlas_size = np.unique(list(self.coords_2d.label)).shape[0] - 1
@staticmethod
def intensities(modes, imag=False):
"""
Returns activity intensities of modes.
Parameters
----------
modes : pd.DataFrame
Pandas DataFrame containing at least :code:`intensity` and :code:`conjugate` columns.
The :code:`intensity` column must contain Array-like values of the length of the cortical atlas.
imag : boolean, optional
Retrieve the imaginary values from the activity intensities (default False)
Returns
-------
rows : list
list of range from 0 number of figure subplots
intensity : list of Array-like
Activity intensities for each mode
"""
rows = []
intensity = []
for mode in modes:
if not mode.conjugate or not imag:
# extend rows list
rows.append(rows[-1] + 1 if len(rows) != 0 else 1)
# normalize from -0.1 -> 0.1 in modes to 0.0 -> 1.0 for colormap
# real valued eigenvalue carry imaginary valued eigenvectors
intensity.append(5 * np.real(mode.intensity) + 0.5)
else:
# extend rows list
rows.extend(
list(
map(lambda x: rows[-1] + x, [1, 2])
) if len(rows) != 0 else [1, 2]
)
# normalize from -0.1 -> 0.1 in modes to 0.0 -> 1.0 for colormap
intensity.extend(
list(
map(lambda s: 5 * s + 0.5,
[np.real(mode.intensity), np.imag(mode.intensity)])
)
)
return rows, intensity
def figure(self, imag=False, colormap='coolwarm'):
"""
Returns Plotly Figure.
Parameters
----------
imag : boolean, optional
Incorporate brain visualizations for imaginary activity values (default False)
colormap : str
Colormap supported by matplotlib, which can be found on the
`official reference <https://matplotlib.org/3.1.1/gallery/color/colormap_reference.html>`_
Returns
-------
fig : go.Figure
Plotly figure of brain visualizations
"""
# Analysis
if self.mode2 is None:
rows, intensity = self.intensities([self.mode1], imag)
labels = ['Real'] if len(rows) == 1 else ['Real', 'Imaginary']
# Comparison
else:
rows, intensity = self.intensities([self.mode1, self.mode2], imag)
if len(rows) == 2:
labels = ['Group 1 \n Real', 'Group 2 \n Real']
elif len(rows) == 3:
if self.mode1.conjugate:
labels = ['Group 1 \n Real', 'Group 1 \n Imaginary', 'Group 2 \n Real']
else:
labels = ['Group 1 \n Real', 'Group 2 \n Real', 'Group 2 \n Imaginary']
else:
labels = ['Group 1 \n Real', 'Group 1 \n Imaginary', 'Group 2 \n Real', 'Group 2 \n Imaginary']
fig = make_subplots(rows=len(rows), cols=1, horizontal_spacing=0.05, vertical_spacing=0.05)
for row in rows:
goodroi = np.unique(
self.coords_2d[self.coords_2d.region.notnull()]['.roi']
)
roimap = dict(
zip(
goodroi, list(range(len(goodroi)))
)
)
roidx = np.unique(
self.coords_2d['.roi']
)
for hemi in ['left', 'right']:
for roi in roidx:
roi_df = self.coords_2d[(self.coords_2d['.roi'] == roi) & (self.coords_2d.hemi == hemi)]
if not roi_df.empty:
# Define color and region name
# Check that region is a valid region
if not np.unique(roi_df.region.isnull())[0]:
region = np.unique(roi_df.region)[0]
indice = roimap[roi]
if hemi is 'right':
indice += int(self.atlas_size / 2)
val = intensity[row - 1][indice]
col = 'rgb({0},{1},{2})'.format(*cm.get_cmap(colormap)(val)[:3])
else:
col = 'black'
region = 'Gingular Pole'
# Find traces with identifiers
for id in np.unique(roi_df['.id']):
# Add id selector
x = roi_df[roi_df['.id'] == id]['.long']
y = roi_df[roi_df['.id'] == id]['.lat']
fig.add_trace(go.Scatter(x=x, y=y, fill='toself', mode='lines', line=dict(color='black', width=0.5),
fillcolor=col, name=region if region else None, hoverinfo=None),
row=row, col=1)
fig.add_trace(go.Scatter(x=[300, 1100], y=[-25, -25], text=['left', 'right'], mode='text'),
row=row, col=1)
axis_config = dict(showgrid=False, showline=False, visible=False, ticks='',
showticklabels=False, zeroline=False, showspikes=False)
fig.update_layout(paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)', showlegend=False,
title_text='Mode {}'.format(self.order if self.order is not None else ''),
height=150 + len(rows) * 200)
for i, label in enumerate(labels):
fig.layout['xaxis' + str(i + 1)].update(axis_config)
fig.layout['yaxis' + str(i + 1)].update({**axis_config, **dict(scaleanchor='x' + str(i + 1), scaleratio=1,
title=labels[i], visible=True)})
# Hack to get a colorbar
fig.add_trace(go.Scatter(x=[200, 200], y=[-20, -20],
marker=dict(size=0.01, opacity=1, cmax=0.1, cmin=-0.1, color=[-0.1, 0.1],
colorbar=dict(title="Activity", len=.7, nticks=3),
colorscale=colorscale(colormap)),
mode="markers"))
return fig
|
{"hexsha": "86c1ff3c36b8d012712e11ac57d681ff245d01d9", "size": 7949, "ext": "py", "lang": "Python", "max_stars_repo_path": "nidmd/plotting/brain.py", "max_stars_repo_name": "arnauddhaene/nidmd", "max_stars_repo_head_hexsha": "e163aed0c3e80838ac37fa105b8026e535af2e5b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-19T00:04:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-19T00:04:11.000Z", "max_issues_repo_path": "nidmd/plotting/brain.py", "max_issues_repo_name": "arnauddhaene/dmd", "max_issues_repo_head_hexsha": "e163aed0c3e80838ac37fa105b8026e535af2e5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nidmd/plotting/brain.py", "max_forks_repo_name": "arnauddhaene/dmd", "max_forks_repo_head_hexsha": "e163aed0c3e80838ac37fa105b8026e535af2e5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-12T11:36:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-03T03:52:12.000Z", "avg_line_length": 38.033492823, "max_line_length": 128, "alphanum_fraction": 0.4974210593, "include": true, "reason": "import numpy", "num_tokens": 1802}
|
#!/usr/bin/env python
import numpy as np
import pickle
import random
from random import shuffle
from training.util import adjust_learning_rate, clip_model_grad, create_opt, load_dynamic_config
from util.evaluate import evaluate, count_overlap, evaluate_detail
from model.SemiMention import SemiMention
from config import config
from torch.autograd import Variable
import torch
import copy
import time
import pdb
# load data
f = open(config.data_path + "_train.pkl", 'rb')
train_token_batches, train_char_batch, train_char_len_batch, train_pos_batches, train_label_batches = pickle.load(f)
f.close()
f = open(config.data_path + "_dev.pkl", 'rb')
dev_token_batches, dev_char_batch, dev_char_len_batch, dev_pos_batches, dev_label_batches = pickle.load(f)
f.close()
f = open(config.data_path + "_test.pkl", 'rb')
test_token_batches, test_char_batch, test_char_len_batch, test_pos_batches, test_label_batches = pickle.load(f)
f.close()
# misc info
# TODO: get it better
misc_config = pickle.load(open(config.data_path + "_config.pkl", 'rb'))
load_dynamic_config(misc_config, config)
id2label = misc_config["id2label"]
ner_model = SemiMention(config)
if config.pre_trained:
ner_model.load_vector()
if config.if_gpu and torch.cuda.is_available(): ner_model = ner_model.cuda()
parameters = filter(lambda p: p.requires_grad, ner_model.parameters())
optimizer = create_opt(parameters, config)
print("{0} batches expected for training".format(len(train_token_batches)))
best_model = None
best_per = 0
train_all_batches = list(zip(train_token_batches, train_char_batch, train_char_len_batch, train_pos_batches, train_label_batches))
if config.if_shuffle:
shuffle(train_all_batches)
def get_f1(model, mode):
pred_all, pred, recall_all, recall = 0, 0, 0, 0
f_pred_all, f_pred, f_recall_all, f_recall = 0, 0, 0, 0
gold_cross_num = 0
pred_cross_num = 0
if mode == "dev":
batch_zip = zip(dev_token_batches, dev_char_batch, dev_char_len_batch, dev_pos_batches, dev_label_batches)
elif mode == "test":
batch_zip = zip(test_token_batches, test_char_batch, test_char_len_batch, test_pos_batches, test_label_batches)
else:
raise ValueError
for token_batch, char_batch, char_len_batch, pos_batch, label_batch in batch_zip:
token_batch_var = Variable(torch.LongTensor(np.array(token_batch)))
pos_batch_var = Variable(torch.LongTensor(np.array(pos_batch)))
if config.if_gpu:
token_batch_var = token_batch_var.cuda()
pos_batch_var = pos_batch_var.cuda()
model.eval()
pred_entities = model.predict(token_batch_var, pos_batch_var)
p_a, p, r_a, r = evaluate(label_batch, pred_entities)
#gold_cross_num += sum(count_overlap(label_batch))
#pred_cross_num += sum(count_overlap(pred_entities))
gold_cross_num += 0
pred_cross_num += 0
pred_all += p_a
pred += p
recall_all += r_a
recall += r
print(pred_all, pred, recall_all, recall)
f1 = 2 / ((pred_all / pred) + (recall_all / recall))
print( "Precision {0}, Recall {1}, F1 {2}".format(pred / pred_all, recall / recall_all, f1) )
# print("Prediction Crossing: ", pred_cross_num)
# print("Gold Crossing: ", gold_cross_num)
return f1
# Test
# f1 = get_f1(ner_model, "dev")
train_start_time = time.time()
early_counter = 0
decay_counter = 0
for e_ in range(config.epoch):
print("Epoch: ", e_ + 1)
batch_counter = 0
for token_batch, char_batch, char_len_batch, pos_batch, label_batch in train_all_batches:
batch_len = len(token_batch)
sent_len = len(token_batch[0])
token_batch_var = Variable(torch.LongTensor(np.array(token_batch)))
pos_batch_var = Variable(torch.LongTensor(np.array(pos_batch)))
if config.if_gpu:
token_batch_var = token_batch_var.cuda()
pos_batch_var = pos_batch_var.cuda()
ner_model.train()
optimizer.zero_grad()
loss = ner_model.forward(token_batch_var, pos_batch_var, label_batch)
loss.backward()
clip_model_grad(ner_model, config.clip_norm)
print("batch {0} with {1} instance and sentece length {2} loss {3}".format(
batch_counter, batch_len, sent_len, loss.cpu().data.numpy()[0]))
batch_counter += 1
optimizer.step()
if (e_+1) % config.check_every != 0:
continue
# evaluating dev and always save the best
cur_time = time.time()
f1 = get_f1(ner_model, "dev")
print("Dev step took {} seconds".format(time.time() - cur_time))
# early stop
if f1 > best_per:
early_counter = 0
best_per = f1
del best_model
best_model = copy.deepcopy(ner_model)
else:
early_counter += 1
if early_counter > config.lr_patience:
decay_counter += 1
early_counter = 0
if decay_counter > config.decay_patience:
break
else:
adjust_learning_rate(optimizer)
print("")
print("Training step took {} seconds".format(time.time() - train_start_time))
print("Best dev acc {0}".format(best_per))
print("")
# remember to eval after loading the model. for the reason of batchnorm and dropout
cur_time = time.time()
f1 = get_f1(best_model, "test")
print("Test step took {} seconds".format(time.time() - cur_time))
serial_number = str(random.randint(0,248))
this_model_path = config.model_path + "_" + serial_number
print("Dumping model to {0}".format(this_model_path))
torch.save(best_model.state_dict(), this_model_path)
|
{"hexsha": "e878ef558f8cb6297812bcf04465739b0f4aad1b", "size": 5588, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "berlino/overlapping-ner-em18", "max_stars_repo_head_hexsha": "c2db301cfd88c4ab51694d816fce6c2dcb75c5b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2018-11-15T20:36:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T10:46:51.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "berlino/overlapping-ner-em18", "max_issues_repo_head_hexsha": "c2db301cfd88c4ab51694d816fce6c2dcb75c5b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-03-20T03:52:21.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-31T07:56:06.000Z", "max_forks_repo_path": "train.py", "max_forks_repo_name": "berlino/overlapping-ner-em18", "max_forks_repo_head_hexsha": "c2db301cfd88c4ab51694d816fce6c2dcb75c5b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-03-23T00:43:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-17T06:16:09.000Z", "avg_line_length": 35.3670886076, "max_line_length": 130, "alphanum_fraction": 0.6998926271, "include": true, "reason": "import numpy", "num_tokens": 1360}
|
"""
转换pytorch版本OCR到keras
暂时只支持dense ocr ,lstm层不支持
"""
import os
import io
import argparse
import configparser
import numpy as np
def parser():
parser = argparse.ArgumentParser(description="pytorch dense ocr to keras ocr")
parser.add_argument('-weights_path',help='models/ocr-dense.pth')
parser.add_argument('-output_path', help='models/ocr-dense-keras.h5')
return parser.parse_args()
def set_cnn_weight(name,keramodel,torchmodelDict):
"""
将torch 模型CNN层导入 keras模型CNN层
"""
weight = None
bias = None
for key in torchmodelDict:
if name in key and 'weight' in key:
weight = torchmodelDict[key].numpy()
if name in key and 'bias' in key:
bias = torchmodelDict[key].numpy()
if weight is not None and bias is not None:
weight = weight.transpose(2, 3, 1, 0)
keramodel.get_layer(name).set_weights([weight,bias])
def set_bn_weight(name,keramodel,torchmodelDict):
"""
将torch 模型BN层导入 keras模型BN层
Keras的BN层参数顺序应该是[gamma, beta, mean, std]
"""
gamma, beta, mean, std = None,None,None,None
for key in torchmodelDict:
if name in key and 'weight' in key:
gamma = torchmodelDict[key].numpy()
if name in key and 'bias' in key:
beta = torchmodelDict[key].numpy()
if name in key and 'running_mean' in key:
mean = torchmodelDict[key].numpy()
if name in key and 'running_var' in key:
std = torchmodelDict[key].numpy()
keramodel.get_layer(name).set_weights([gamma, beta, mean, std])
def set_dense_weight(name,keramodel,torchmodelDict):
"""
将torch 模型linear层导入 keras模型dense层
"""
weight = None
bias = None
for key in torchmodelDict:
if name in key and 'weight' in key:
weight = torchmodelDict[key].numpy()
if name in key and 'bias' in key:
bias = torchmodelDict[key].numpy()
if weight is not None and bias is not None:
weight = np.transpose(weight)
keramodel.get_layer(name).set_weights([weight,bias])
if __name__=='__main__':
import os
import sys
args = parser()
GPUID=''
os.environ["CUDA_VISIBLE_DEVICES"] = GPUID##不调用GPU
sys.path.append('..')
sys.path.append('')
import torch
from collections import OrderedDict
from crnn.keys import alphabetChinese
from crnn.network_keras import keras_crnn
##ocrModel='models/ocr-dense.pth'##目前只支持 dense ocr
ocrModel = args.weights_path##torch模型权重
output_path =args.output_path##keras 模型权重输出
kerasModel = keras_crnn(32, 1, len(alphabetChinese)+1, 256, 1,lstmFlag=False)
state_dict = torch.load(ocrModel,map_location=lambda storage, loc: storage)
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.','') # remove `module.`
new_state_dict[name] = v
##模型转换
cnn = ['cnn.conv0','cnn.conv1','cnn.conv2','cnn.conv3','cnn.conv4','cnn.conv5','cnn.conv6']
BN =['cnn.batchnorm2','cnn.batchnorm4','cnn.batchnorm6']
linear = ['linear']
##CNN 层
for cn in cnn:
set_cnn_weight(cn,kerasModel,new_state_dict)
##BN 层
for bn in BN:
set_bn_weight(bn,kerasModel,new_state_dict)
## linear 层
for lr in linear:
set_dense_weight(lr,kerasModel,new_state_dict)
kerasModel.save_weights(output_path)##保存keras权重
|
{"hexsha": "e202fdddfd7cdc608deb718008e2359f9c2e0243", "size": 3525, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/pytorch_to_keras.py", "max_stars_repo_name": "liqinnetgain/redenv", "max_stars_repo_head_hexsha": "9feb19646495b3aae2bfb5b01a7991b2b6372566", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-18T05:20:47.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-18T05:20:47.000Z", "max_issues_repo_path": "tools/pytorch_to_keras.py", "max_issues_repo_name": "ypw-rich/chinese-ocr", "max_issues_repo_head_hexsha": "443e2f299b1a40afd38df6d150b8a2205132a84c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-06-18T14:24:05.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-18T15:06:44.000Z", "max_forks_repo_path": "tools/pytorch_to_keras.py", "max_forks_repo_name": "ypw-rich/chinese-ocr", "max_forks_repo_head_hexsha": "443e2f299b1a40afd38df6d150b8a2205132a84c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-06-18T05:20:49.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-18T05:20:49.000Z", "avg_line_length": 31.1946902655, "max_line_length": 95, "alphanum_fraction": 0.6314893617, "include": true, "reason": "import numpy", "num_tokens": 988}
|
def init_module(model_name='model'):
import os
# I tried, but it doesn't work...
# os.environ['THEANO_FLAGS'] = 'base_compiledir=~/.theano/' + model_name + str(os.getppid())
# print(os.environ['THEANO_FLAGS'])
from importlib import reload
global pm
import pymc3 as pm
pm = reload(pm)
global theano
import theano
theano = reload(theano)
global floatX
floatX = theano.config.floatX
global T
import theano.tensor as T
global np
import numpy as np
global stats
from scipy import stats
global warnings
import warnings
pm._log.setLevel('CRITICAL')
class LinearRegression:
def __init__(self, num_samples_fit=1000, num_tune_iter=500, advi_n_init=5000, num_samples_predict=1000, progressbar=False, previous_trace=None):
self.num_samples_fit = num_samples_fit
self.num_tune_iter = num_tune_iter
self.advi_n_init = advi_n_init
self.num_samples_predict = num_samples_predict
self.progressbar = progressbar
self.previous_trace = previous_trace
def fit(self, X, y):
self.X = theano.shared(X)
self.model = pm.Model()
# Make sure y is a 2D vector with 1 column per output
if y.ndim < 2:
y = np.atleast_2d(y).T
with self.model:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
warnings.simplefilter("ignore", UserWarning)
#mu_a_init = 0.
#mu_b_init = 0.
#if self.previous_trace is not None:
# mu_a_init = self.previous_trace['mu_a'].mean(axis=0)
# mu_b_init = self.previous_trace['mu_b'].mean(axis=0)
## Hyperpriors for group nodes
#mu_a = pm.Normal('mu_a', mu=mu_a_init, sd=10000, shape=y.shape[1])
#sigma_a = pm.HalfCauchy('sigma_a', 5, shape=y.shape[1])
#mu_b = pm.Normal('mu_b', mu=mu_b_init, sd=10000, shape=(X.shape[1],y.shape[1]))
#sigma_b = pm.HalfCauchy('sigma_b', 5, shape=(X.shape[1],y.shape[1]))
alpha_init = 0.
beta_init = 0.
alpha_init_std = 10000.0
beta_init_std = 10000.0
if self.previous_trace is not None:
alpha_init = self.previous_trace['alpha'].mean(axis=0)
beta_init = self.previous_trace['beta'].mean(axis=0)
alpha_init_std = self.previous_trace['alpha'].std(axis=0) * 1.5
beta_init_std = self.previous_trace['beta'].std(axis=0) * 1.5
# Priors for unknown model parameters
#alpha_d = pm.Normal('alpha', mu=mu_a, sd=sigma_a, shape=y.shape[1])
#beta_d = pm.Normal('beta', mu=mu_b, sd=sigma_b, shape=(X.shape[1],y.shape[1]))
alpha_d = pm.Normal('alpha', mu=alpha_init, sd=alpha_init_std, shape=y.shape[1])
beta_d = pm.Normal('beta', mu=beta_init, sd=beta_init_std, shape=(X.shape[1],y.shape[1]))
sigma_d = pm.HalfNormal('sigma', sd=10000)
# Expected value of outcome
mu_d = alpha_d + pm.math.dot(self.X, beta_d)
# Likelihood (sampling distribution) of observations
Y = pm.Normal('Y', mu=mu_d, sd=sigma_d, observed=y)
#print("sample")
self.trace = pm.sample(
self.num_samples_fit,
init='advi',
n_init=self.advi_n_init,
progressbar=self.progressbar,
cores=4,
tune=self.num_tune_iter)
def predict(self, X, return_std=False):
self.X.set_value(X)
ppc = pm.sample_posterior_predictive(self.trace, model=self.model, samples=self.num_samples_predict, progressbar=self.progressbar)
y = ppc['Y'].mean(axis=0)
std = ppc['Y'].std(axis=0)
if return_std:
if std.ndim > 1:
std = std.mean(axis=1)
return y, std
else:
return y
#def update(self, X, y):
# self.model = Model()
# with model:
# # Priors are posteriors from previous iteration
# mu_a = from_posterior('mu_a', self.trace['mu_a'])
# sigma_a = from_posterior('sigma_a', self.trace['sigma_a'])
# mu_b = from_posterior('mu_b', self.trace['mu_b'])
# sigma_b = from_posterior('sigma_b', self.trace['sigma_b'])
#
# alpha_d = pm.Normal('alpha', mu=mu_a, sd=sigma_a, shape=y.shape[1])
# beta_d = pm.Normal('beta', mu=mu_b, sd=sigma_b, shape=(X.shape[1],y.shape[1]))
# sigma_d = from_posterior('sigma_d', self.trace['sigma_d'])
# # Expected value of outcome
# mu_d = alpha_d + pm.math.dot(self.X, beta_d)
# # Likelihood (sampling distribution) of observations
# Y = pm.Normal('Y', mu=mu_d, sd=sigma_d, observed=y)
# # Draw posterior samples
# self.trace = pm.sample(self.num_samples_fit, progressbar=self.progressbar, cores=4, tune=self.num_tune_iter)
class SimpleGP:
def __init__(self, n_out=1):
self.n_out = n_out
def fit(self, X, y):
# Make sure y is a 2D vector with 1 column per output
if y.ndim < 2:
y = np.atleast_2d(y).T
assert(y.shape[1] == self.n_out)
with pm.Model() as model:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
warnings.simplefilter("ignore", UserWarning)
ls = [pm.Gamma("l"+str(i), alpha=2, beta=1) for i in range(self.n_out)]
#etas = [pm.HalfCauchy("eta"+str(i), beta=5) for i in range(self.n_out)]
means = [pm.gp.mean.Constant(pm.Normal("gp_mu"+str(i), 1, 3)) for i in range(self.n_out)]
#covs = [etas[i]**2 * pm.gp.cov.Matern52(X.shape[1], ls[i]) for i in range(self.n_out)]
covs = [pm.gp.cov.Matern52(X.shape[1], ls[i]) for i in range(self.n_out)]
self.gps = [pm.gp.Marginal(mean_func=means[i], cov_func=covs[i]) for i in range(self.n_out)]
sigmas = [pm.HalfCauchy("sigma" + str(i), beta=5) for i in range(self.n_out)]
self.ys = [self.gps[i].marginal_likelihood("y"+str(i), X=X, y=y[:,i], noise=sigmas[i]) for i in range(self.n_out)]
# "mp" stands for marginal posterior
self.mp = pm.find_MAP(progressbar=False) # sample(1, njobs=1, progressbar=True) #
def predict(self, X, return_std=False):
y = np.empty((X.shape[0], self.n_out))
std = np.empty((X.shape[0], self.n_out))
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
for i in range(self.n_out):
mu, var = self.gps[i].predict(X, point=self.mp, diag=True) #, pred_noise=True)
sd = np.sqrt(var)
y[:,i] = mu
std[:,i] = sd
if return_std:
return y, std.mean(axis=1)
else:
return y
class NeuralNet:
def __init__(self, n_layers=2, hidden_size=5,
num_fit_iter=30000, # only when advi is not commented out
num_samples_fit=1000,
num_tune_iter=500,
num_samples_predict=1000,
progressbar=False,
previous_trace=None):
self.n_layers = n_layers
self.hidden_size = hidden_size
self.num_fit_iter = num_fit_iter
self.num_samples_fit = num_samples_fit
self.num_tune_iter = num_tune_iter
self.num_samples_predict = num_samples_predict
self.progressbar = progressbar
self.previous_trace = previous_trace
def construct_regr_nn(nn_input, nn_output,
Xtrain, ytrain,
n_layers = 2,
hidden_size = 5,
previous_trace=None):
if n_layers <= 0:
hidden_size = Xtrain.shape[1]
# Initialize random weights between each layer
init_i = []
for i in range(n_layers):
if i == 0:
init_i.append(np.random.randn(Xtrain.shape[1], hidden_size).astype(floatX))
else:
init_i.append(np.random.randn(hidden_size, hidden_size).astype(floatX))
init_out = np.random.randn(hidden_size, ytrain.shape[1]).astype(floatX)
# Initialize random biases in each layer
init_b_i = [np.random.randn(hidden_size).astype(floatX) for _ in range(n_layers)]
init_b_out = np.random.randn(ytrain.shape[1]).astype(floatX)
weights_mu_init = [0. for _ in range(n_layers+1)]
bias_mu_init = [0. for _ in range(n_layers+1)]
if previous_trace is not None:
weights_mu_init = [previous_trace['w_'+str(i)+'_'+str(i+1)].mean(axis=0) for i in range(n_layers+1)]
bias_mu_init = [previous_trace['b_'+str(i+1)].mean(axis=0) for i in range(n_layers+1)]
with pm.Model() as neural_network:
# Weights from input to hidden layer
# Weights from ith to jth layer
weights_i_j = []
bias_i = []
for i in range(n_layers):
if i == 0:
weights_i_j.append( pm.Normal('w_0_1', mu=weights_mu_init[i], sd=10000,
shape=(Xtrain.shape[1], hidden_size),
testval=init_i[i]) )
else:
weights_i_j.append( pm.Normal('w_'+str(i)+'_'+str(i+1), mu=weights_mu_init[i], sd=10000,
shape=(hidden_size, hidden_size),
testval=init_i[i]) )
bias_i.append( pm.Normal('b_'+str(i+1), mu=bias_mu_init[i], sd=10000, shape=(hidden_size), testval=init_b_i[i]) )
# Weights from hidden layer to output
weights_j_out = pm.Normal('w_'+str(n_layers)+'_'+str(n_layers+1), mu=weights_mu_init[-1], sd=10000,
shape=(hidden_size,ytrain.shape[1]),
testval=init_out)
bias_out = pm.Normal('b_'+str(n_layers+1), mu=bias_mu_init[-1], sd=10000, shape=(ytrain.shape[1]), testval=init_b_out)
# Build neural-network using relu activation function
act_i = []
for i in range(n_layers):
if i == 0:
act_i.append(pm.math.maximum(0, pm.math.dot(nn_input, weights_i_j[i]) + bias_i[i]))
else:
act_i.append(pm.math.maximum(0, pm.math.dot(act_i[i-1], weights_i_j[i]) + bias_i[i]))
if len(act_i) == 0:
act_i.append(nn_input)
act_out = pm.math.dot(act_i[-1], weights_j_out) + bias_out
sigma_d = pm.HalfNormal('sigma', sd=10000, shape=ytrain.shape[1])
out = pm.Normal('out', mu=act_out, sd=sigma_d, observed=nn_output, total_size=ytrain.shape)
return neural_network
def fit(self, X, y):
self.ann_input = theano.shared(X)
self.ann_output = theano.shared(y)
self.neural_network = NeuralNet.construct_regr_nn( self.ann_input, self.ann_output, X, y, self.n_layers, self.hidden_size, self.previous_trace )
with self.neural_network:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
warnings.simplefilter("ignore", UserWarning)
#self.inference = pm.ADVI()
#tracker = pm.callbacks.Tracker(
# mean=self.inference.approx.mean.eval, # callable that returns mean
# std=self.inference.approx.std.eval # callable that returns std
# )
#self.approx = pm.fit(n=self.num_fit_iter,
# method=self.inference,
# progressbar=self.progressbar,
# callbacks=[pm.callbacks.CheckParametersConvergence(diff='absolute',tolerance=0.001), tracker])
#
##self.approx = pm.fit(300, method='svgd', inf_kwargs=dict(n_particles=1000), obj_optimizer=pm.sgd(learning_rate=0.01))
#self.trace = self.approx.sample(draws=5000)
self.trace = pm.sample(
self.num_samples_fit,
init='advi',
n_init=5000,
progressbar=self.progressbar,
cores=4,
tune=self.num_tune_iter)
def predict(self, X, return_std=False):
## create symbolic input
#x = T.matrix('X')
## symbolic number of samples is supported, we build vectorized posterior on the fly
#n = T.iscalar('n')
## Do not forget test_values or set theano.config.compute_test_value = 'off'
#x.tag.test_value = np.empty_like(X[:10])
#n.tag.test_value = 100
#
#_sample_proba = self.approx.sample_node(
# self.neural_network.out.distribution.mu,
# size=n,
# more_replacements={self.ann_input: x})
#
#sample_proba = theano.function([x, n], _sample_proba)
#
#y = sample_proba(X, 500).mean(0)
#y_std = sample_proba(X, 500).std(0)
#
#if return_std:
# return y, y_std.mean(axis=1) # Mean? or sqrt((sd1^2+sd2^2)/n)?
#else:
# return y
self.ann_input.set_value(X)
ppc = pm.sample_posterior_predictive(self.trace, model=self.neural_network, samples=self.num_samples_predict, progressbar=self.progressbar)
y = ppc['out'].mean(axis=0)
std = ppc['out'].std(axis=0)
if return_std:
if std.ndim > 1:
std = std.mean(axis=1)
return y, std
else:
return y
def test1():
n = 500 # The number of data points
# X = np.linspace(0, 10, n)[:, None] # The inputs to the GP, they must be arranged as a column vector
X = np.random.rand(n, 2)
# Define the true covariance function and its parameters
l_true = 1.0
eta_true = 3.0
cov_func = eta_true**2 * pm.gp.cov.Matern52(X.shape[1], l_true)
# A mean function that is zero everywhere
mean_func = pm.gp.mean.Zero()
# The latent function values are one sample from a multivariate normal
# Note that we have to call `eval()` because PyMC3 built on top of Theano
f_true = np.random.multivariate_normal(mean_func(X).eval(), cov_func(X).eval() + 1e-8*np.eye(n), 2).T
# The observed data is the latent function plus a small amount of IID Gaussian noise
# The standard deviation of the noise is `sigma`
sigma_true = 1.0
y = f_true + sigma_true * np.random.randn(n, 2)
X_new = np.random.rand(600, X.shape[1]) * 1.0
gp = SimpleGP(y.shape[1])
print("fit")
gp.fit(X, y)
print("predict")
ypred, ypred_std = gp.predict(X_new)
print(ypred)
print(ypred_std)
def test2():
n = 500 # The number of data points
# X = np.linspace(0, 10, n)[:, None] # The inputs to the GP, they must be arranged as a column vector
X = np.random.rand(n, 2)
y = X.dot([[2,3],[4,5]]) + 6 + np.random.random((X.shape[0],2))
ynew = X_new.dot([[2,3],[4,5]]) + 6 + np.random.random((X_new.shape[0],2))
model = LinearRegression(progressbar=True)
model.fit(X, y)
print([(n,model.trace[n].mean(axis=0)) for n in model.trace.varnames])
ypred, std = model.predict(X_new, return_std=True)
#print(std.shape)
#print(std)
plt.figure()
plt.scatter(X_new[:,0], ynew[:,0])
plt.scatter(X_new[:,0], ypred[:,0])
plt.figure()
plt.scatter(X_new[:,0], ynew[:,1])
plt.scatter(X_new[:,0], ypred[:,1])
plt.figure()
plt.scatter(X_new[:,1], ynew[:,0])
plt.scatter(X_new[:,1], ypred[:,0])
plt.figure()
plt.scatter(X_new[:,1], ynew[:,1])
plt.scatter(X_new[:,1], ypred[:,1])
plt.show()
def test3():
Xtrain = np.random.rand(100,5)
Xtest = np.random.rand(100,5)
beta = -np.random.rand(5,2)
ytrain = Xtrain.dot(beta) + np.random.rand(100,2)*0.05
ytest = Xtest.dot(beta)
model = NeuralNet(n_layers=0, hidden_size=5, num_fit_iter=30000, progressbar=True)
model.fit(Xtrain, ytrain)
ypred = model.predict(Xtest)
plt.scatter(ypred[:,0], ypred[:,1])
plt.scatter(ytest[:,0], ytest[:,1])
plt.show()
if __name__ == "__main__":
import matplotlib.pyplot as plt
test3()
|
{"hexsha": "cf61932ae5406980641c91eb32080557d203cbfe", "size": 17065, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pymc3_models.py", "max_stars_repo_name": "KastnerRG/sherlock", "max_stars_repo_head_hexsha": "ba3e8a81e08315df169bb5dd76d9fdd8f2660583", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pymc3_models.py", "max_issues_repo_name": "KastnerRG/sherlock", "max_issues_repo_head_hexsha": "ba3e8a81e08315df169bb5dd76d9fdd8f2660583", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pymc3_models.py", "max_forks_repo_name": "KastnerRG/sherlock", "max_forks_repo_head_hexsha": "ba3e8a81e08315df169bb5dd76d9fdd8f2660583", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.097826087, "max_line_length": 152, "alphanum_fraction": 0.5556401992, "include": true, "reason": "import numpy,from scipy,import theano,import pymc3", "num_tokens": 4346}
|
# -*- coding: utf-8 -*-
"""
The program performs classification on datasets generated using sklearn as well as a image dataset provided via kaggle through a neural network.
The user can define the layers of the neural network with respect to various activations and layer sizes.
@author: Randeep
"""
import numpy as np
from imageio import imread
import os
from skimage.transform import resize
import re
from random import shuffle
from matplotlib import pyplot
from pandas import DataFrame
from sklearn.datasets import make_moons, make_circles, make_blobs
#Ratio to split data into training and test sets
SPLIT_RATIO = 60
def sigmoid(Z):
#Sigmoid activation function
return (1/(1 + np.exp(-Z)))
def tanh(Z):
#tanh activation function
return np.tanh(Z)
def relu(Z):
#relu activation function
A = {}
A = Z
A[A > 0] = A[A > 0]
A[A <= 0] = 0
return A
def initialize_parameters(dimensions, initializer):
#Function to initialize the parameters
#Dimensions refers to the sizes of the layers of the neural net
#Initializer is the type of initialization
#'He' initialization is appropriate for relu layer
#'Xavier' initialization is appropriate for sigmoid or tanh layers
Layers = len(dimensions)
W = {}
b = {}
for l in range(1, Layers):
if initializer == 'Random':
multiplier = 1
elif initializer == 'He':
multiplier = np.sqrt(2/dimensions[l-1])
elif initializer == 'Xavier':
multiplier = np.sqrt(1/dimensions[l-1])
W[l] = np.random.randn(dimensions[l], dimensions[l-1]) * multiplier
b[l] = np.zeros((dimensions[l], 1))
parameters = {'Weights' : W,
'Biasses' : b}
return parameters
def linear_forward(X, W, b):
#Function to compute the linear component of a layer during forward propagation
Z = np.matmul(W, X) + b
return Z
def activation_forward(Z, activation_type):
#Function to compute the activation component of a layer during forward propagation
if activation_type == 'sigmoid':
A = sigmoid(Z)
elif activation_type == 'relu':
A = relu(Z)
elif activation_type == 'tanh':
A = tanh(Z)
return A
def linear_backward(dZ, A_prev, W, lambd, D, probability, layer):
#Function to compute gradients going from the linear component of a layer to the activation component of the previous layer in backward propagation
#m refers the number of samples
m = dZ.shape[1]
dW = (np.matmul(dZ, A_prev.T)/m) + (lambd * W / m)
db = np.sum(dZ, axis = 1, keepdims = True)/m
dA_prev = np.matmul(W.T, dZ)
if layer != 1:
dA_prev = np.multiply(dA_prev, D[layer - 1])
dA_prev = dA_prev/probability
return dW, db, dA_prev
def activation_backward(dA, A, activation_type):
#Function to compute gradients going from the activation component of a layer to the linear component of the same layer in backward propagation
if activation_type == 'relu':
A = (A > 0).astype(int)
elif activation_type == 'sigmoid':
A = np.multiply(A, 1-A)
elif activation_type == 'tanh':
A = 1 - np.square(A)
return np.multiply(dA, A)
def model_forward(X, parameters, activation_types, dropout_probabilities):
#Function to complete a single forward propagation
W = parameters['Weights']
b = parameters['Biasses']
Z = {}
A = {}
D = {}
A[0] = X
Layers = len(W)
for i in range(1, Layers+1):
Z[i] = linear_forward(A[i-1], W[i], b[i])
A[i] = activation_forward(Z[i], activation_types[i-1])
D[i] = np.random.rand(A[i].shape[0], A[i].shape[1])
D[i] = (D[i] < dropout_probabilities[i-1]).astype(int)
A[i] = np.multiply(A[i], D[i])
A[i] = A[i]/dropout_probabilities[i-1]
return A, Z, D
def model_backward(Y, activation_types, A, Z, W, lambd, D, dropout_probabilities):
#Function to complete a single backward propagation
Layers = len(activation_types)
dW = {}
db = {}
dZ = {}
dA_prev = -(np.divide(Y, A[Layers]) - np.divide(1 - Y, 1 - A[Layers]))
for i in reversed(range(1, Layers+1)):
dZ[i] = activation_backward(dA_prev, A[i], activation_types[i-1])
dW[i], db[i], dA_prev = linear_backward(dZ[i], A[i-1], W[i], lambd, D, dropout_probabilities[i-1], i)
gradients = {'dW' : dW,
'db' : db}
return gradients
def update_parameters(parameters, gradients, learning_rate):
#Function to update Weights and Biasses using gradients computed during backward propagation
Layers = len(parameters['Weights'])
for i in range(1, Layers + 1):
parameters['Weights'][i] = parameters['Weights'][i] - learning_rate * gradients['dW'][i]
parameters['Biasses'][i] = parameters['Biasses'][i] - learning_rate * gradients['db'][i]
return parameters
def compute_cost(A, Y, lambd, Weights):
#Compute the cost from the predicted values and values from the dataset
m = Y.shape[1]
regularization_cost = 0
for key in Weights:
regularization_cost += np.sum(np.square(Weights[key]))
regularization_cost = (lambd * regularization_cost)/(2 * m)
Cost = (-(np.matmul(Y, np.log(A).T) + np.matmul((1 - Y), np.log(1 - A).T))/m) + regularization_cost
Cost = np.squeeze(Cost)
return Cost
def predict(X, model):
#Function to make predictions based on the model argument and input data X
parameters = model['Parameters']
layers = len(parameters['Weights'])
activation_types = model['Activations']
dropout_probabilities = [1] * layers
A, Z, D = model_forward(X, parameters, activation_types, dropout_probabilities)
predictions = A[layers]
predictions = (predictions>0.5).astype(int)
return predictions
def calculate_accuracy(Y, Y_predictions):
#Function to calculate accuracy of predictions
return 100 - np.mean(np.abs(Y_predictions - Y))*100
def train(X_train, Y_train, X_test, Y_test, learning_rate, num_iterations, print_iteration, activation_types, layer_dimensions, dropout_probabilities = None, lambd = 0, initializer = 'Random'):
#Function to train a model using hyperparameters, learning rate, number of iterations, activationa types and layer dimensions
#Activation types defines the activation functions of each layer
#Layer dimensions defines the sizes of the layers
#Shape of X - (features, samples)
#Shape of Y - (1, samples)
#Lambd or lambda is the hyperparameter for weight decay regularization
#lambda can be set to 0 to remove weight decay
#Dropout_probabities refers to the list of probabilities to keep a neuron in a layer active
#Dropout cannot be applied to the output layer or input layer, so the size of the list will be size of activations - 1
parameters = initialize_parameters(layer_dimensions, initializer)
Costs = []
if dropout_probabilities == None:
dropout_probabilities = [1] * (len(layer_dimensions)-2)
#We append 1 as a probability for the output layer, i.e, no neuron in output layer must be dropped
dropout_probabilities.append(1)
for i in range(num_iterations):
A, Z, D = model_forward(X_train, parameters, activation_types, dropout_probabilities)
Cost = compute_cost(A[len(layer_dimensions)-1], Y_train, lambd, parameters['Weights'])
if i != 0 and i % print_iteration == 0:
Costs.append(Cost)
print ("Cost after iteration %i: %f" %(i, Cost))
gradients = model_backward(Y_train, activation_types, A, Z, parameters['Weights'], lambd, D, dropout_probabilities)
parameters = update_parameters(parameters, gradients, learning_rate)
model = {'Learning Rate' : learning_rate,
'Activations' : activation_types,
'Layer Sizes' : layer_dimensions,
'Iterations' : num_iterations,
'Parameters' : parameters,
'Costs' : Costs}
Y_train_predictions = predict(X_train, model)
Y_test_predictions = predict(X_test, model)
train_accuracy = calculate_accuracy(Y_train, Y_train_predictions)
test_accuracy = calculate_accuracy(Y_test, Y_test_predictions)
model['Test Accuracy'] = test_accuracy
model['Training Accuracy'] = train_accuracy
model['Training Predictions'] = Y_train_predictions
model['Test Predictions'] = Y_test_predictions
return model
def load_images(file_path, file_count, image_size):
#Function to load images into training and test sets
dirs = os.listdir(file_path)
dirs = np.array(dirs)
shuffle(dirs)
split_index = file_count * SPLIT_RATIO // 100
indices = np.random.permutation(file_count)
training_idx, test_idx = indices[:split_index], indices[split_index:]
image_training_paths = dirs[training_idx]
image_test_paths = dirs[test_idx]
Y_training = [re.match('cat', s) != None for s in image_training_paths]
Y_training = np.array(Y_training).astype(int)
Y_test = [re.match('cat', s) != None for s in image_test_paths]
Y_test = np.array(Y_test).astype(int)
Y_training = Y_training.reshape((1, Y_training.shape[0]))
Y_test = Y_test.reshape((1, Y_test.shape[0]))
image_training_paths = [file_path + '/' + s for s in image_training_paths]
image_test_paths = [file_path + '/' + s for s in image_test_paths]
images_training = [resize(imread(s), (image_size, image_size)) for s in image_training_paths]
images_test = [resize(imread(s), (image_size, image_size)) for s in image_test_paths]
X_training = np.array(images_training)
X_test = np.array(images_test)
X_training = np.reshape(X_training,(-1 ,X_training.shape[1] * X_training.shape[2] * X_training.shape[3])).T
X_test = np.reshape(X_test,(-1 ,X_test.shape[1] * X_test.shape[2] * X_test.shape[3])).T
X_training = X_training/255
X_test = X_test/255
print('iamgees loaded')
return X_training, Y_training, X_test, Y_test
def plot_cost(Costs):
#Function to plot Costs and iterations
pyplot.plot(Costs)
pyplot.show()
def plot_dataset(X, Y):
#This function plots the dataset
df = DataFrame(dict(x=X[0,:], y=X[1,:], label=Y[0, :]))
colors = {0:'red', 1:'blue'}
fig, ax = pyplot.subplots()
grouped = df.groupby('label')
for key, group in grouped:
group.plot(ax=ax, kind='scatter', x='x', y='y', label=key, color=colors[key])
pyplot.show()
def generate_dataset(examples_count, dataset_type = 'moons'):
#This function generates different types of data sets and divides the dataset into training and test sets
#The datasets supported are blobs, moons and circles
#Shape of X for test and training is (number of features, examples)
#Shape of Y for test and training is (1, examples)
if dataset_type == 'blobs':
X, y = make_blobs(n_samples=examples_count, centers=2, n_features=2)
elif dataset_type == 'moons':
X, y = make_moons(n_samples=examples_count, noise=0.1)
elif dataset_type == 'circles':
X, y = make_circles(n_samples=examples_count, noise=0.05)
X = X.T
y = np.reshape(y, (1, y.shape[0]))
split_index = X.shape[1]*SPLIT_RATIO//100
indices = np.random.permutation(X.shape[1])
training_idx, test_idx = indices[:split_index], indices[split_index:]
X_training, X_test, Y_training, Y_test = X[:, training_idx], X[:, test_idx], y[:, training_idx], y[:, test_idx]
return X_training, X_test, Y_training, Y_test
#$teps to classify
#1.Generate dataset using generate_dataset or load_images
#X, X_test, Y, Y_test = generate_dataset(10000, 'circles')
#2.Set the layer sizes and activations
#activations = ['relu', 'relu', 'relu', 'sigmoid']
#dimensions = [2, 20,5,7, 1]
#dimensions[0] should be the number of input features
#dimensions[-1] should be 1 to signify the output layer
#3.Train your model with tuned hyperparameters leaning rate, number of iterations, activations, dimensions, lambda, dropout probabilties and initializer
#model = train(X, Y, X_test, Y_test, 0.006, 10000, 500, activations, dimensions, initializer = 'Xavier')
#4. Plot changing cost, original dataset and predicted labels
#plot_cost(model['Costs'])
#plot_dataset(X, Y)
#plot_dataset(X, model['Training Predictions'])
#print the test and training accuracy
#print(model['Training Accuracy'])
#print(model['Test Accuracy'])
|
{"hexsha": "62c2dae24f04d3ef6ecf32e0fdb29f12634e8885", "size": 12612, "ext": "py", "lang": "Python", "max_stars_repo_path": "NeuralNet.py", "max_stars_repo_name": "monkeysforever/Neural-Net", "max_stars_repo_head_hexsha": "3bb50d97451691b21c4ade14b726cf254a135649", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "NeuralNet.py", "max_issues_repo_name": "monkeysforever/Neural-Net", "max_issues_repo_head_hexsha": "3bb50d97451691b21c4ade14b726cf254a135649", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NeuralNet.py", "max_forks_repo_name": "monkeysforever/Neural-Net", "max_forks_repo_head_hexsha": "3bb50d97451691b21c4ade14b726cf254a135649", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.4896551724, "max_line_length": 193, "alphanum_fraction": 0.6687281954, "include": true, "reason": "import numpy", "num_tokens": 3194}
|
"""
Interface to libpq - which interfaces with PostgreSQL's backend server
All functions should be considered unsafe (will segfault with bad pointers.)
Also, pointers's need their memory freed by calling the right PQ* functions.
"""
macro c(ret_type, func, arg_types, lib)
local args_in = Any[ symbol(string('a',x)) for x in 1:length(arg_types.args) ]
quote
$(esc(func))($(args_in...)) = ccall( ($(string(func)), $(Expr(:quote, lib)) ),
$ret_type, $arg_types, $(args_in...) )
end
end
abstract PGconn
abstract PGresult
abstract PGcancel
typealias ResultPtr Nullable{Ptr{Libpq.PGresult}}
typealias ConnPtr Nullable{Ptr{Libpq.PGconn}}
typealias Oid UInt32
typealias ConnStatusType UInt32
const connection_status = Dict(
(const CONNECTION_OK = 0) => :ok,
(const CONNECTION_BAD = 1) => :bad,
(const CONNECTION_STARTED = 2) => :started,
(const CONNECTION_MADE = 3) => :made,
(const CONNECTION_AWAITING_RESPONSE = 4) => :awaiting_response,
(const CONNECTION_AUTH_OK = 5) => :auth_ok,
(const CONNECTION_SETENV = 6) => :setenv,
(const CONNECTION_SSL_STARTUP = 7) => :ssl_startup,
(const CONNECTION_NEEDED = 8) => :needed,
)
typealias ExecStatusType UInt32
const exec_status = Dict(
(const PGRES_EMPTY_QUERY = 0) => :empty_query,
(const PGRES_COMMAND_OK = 1) => :command_ok,
(const PGRES_TUPLES_OK = 2) => :tuples_ok,
(const PGRES_COPY_OUT = 3) => :copy_out,
(const PGRES_COPY_IN = 4) => :copy_in,
(const PGRES_BAD_RESPONSE = 5) => :bad_response,
(const PGRES_NONFATAL_ERROR = 6) => :nonfatal_error,
(const PGRES_FATAL_ERROR = 7) => :fatal_error,
(const PGRES_COPY_BOTH = 8) => :copy_both,
(const PGRES_SINGLE_TUPLE = 9) => :single_tuple,
)
const error_field = Dict(
:severity => 'S', # ERROR, WARNING etc.
:sqlstate => 'C', # see apendix A in the pg man
:message_primary => 'M',
:message_detail => 'D',
:message_hint => 'H',
:statement_position => 'P',
:internal_position => 'p',
:internal_query => 'q',
:context => 'W',
:schema_name => 's',
:table_name => 't',
:column_name => 'c',
:datatype_name => 'd',
:constraint_name => 'n',
:source_file => 'F',
:source_line => 'L',
:source_function => 'R',
)
const error_state = Dict(
"00" => :successful_completion,
"01" => :warning,
"02" => :no_data,
"03" => :sql_statement_not_yet_complete,
"08" => :connection_exception,
"09" => :triggered_action_exception,
"0A" => :feature_not_supported,
"0B" => :invalid_transaction_initiation,
"0F" => :locator_exception,
"0L" => :invalid_grantor,
"0P" => :invalid_role_specification,
"0Z" => :diagnostics_exception,
"20" => :case_not_found,
"21" => :cardinality_violation,
"22" => :data_exception,
"23" => :integrity_constraint_violation,
"24" => :invalid_cursor_state,
"25" => :invalid_transaction_state,
"26" => :invalid_sql_statement_name,
"27" => :triggered_data_change_violation,
"28" => :invalid_authorization_specification,
"2B" => :dependent_privilege_descriptors_still_exist,
"2D" => :invalid_transaction_termination,
"2F" => :sql_routine_exception,
"34" => :invalid_cursor_name,
"38" => :external_routine_exception,
"39" => :external_routine_invocation_exception,
"3B" => :savepoint_exception,
"3D" => :invalid_catalog_name,
"3F" => :invalid_schema_name,
"40" => :transaction_rollback,
"42" => :syntax_error_or_access_rule_violation,
"44" => :with_check_option_violation,
"53" => :insufficient_resources,
"54" => :program_limit_exceeded,
"55" => :object_not_in_prerequisite_state,
"57" => :operator_intervention,
"58" => :system_error,
"F0" => :configuration_file_error,
"HV" => :foreign_data_wrapper_error,
"P0" => :plpgsql_error,
"XX" => :internal_error,
)
#### CONNECTIONS
@c Ptr{PGconn} PQsetdbLogin (Ptr{UInt8}, Ptr{UInt8}, Ptr{UInt8}, Ptr{UInt8},
Ptr{UInt8}, Ptr{UInt8}, Ptr{UInt8}) libpq
@c Ptr{PGconn} PQconnectdb (Ptr{UInt8},) libpq
@c Void PQfinish (Ptr{PGconn},) libpq
@c Ptr{UInt8} PQerrorMessage (Ptr{PGconn},) libpq
@c ExecStatusType PQresultStatus (Ptr{PGresult},) libpq
@c ConnStatusType PQstatus (Ptr{PGconn},) libpq
@c Ptr{UInt8} PQresultErrorMessage (Ptr{PGresult},) libpq
@c Ptr{UInt8} PQresultErrorField (Ptr{PGresult}, Cint) libpq
@c Void PQsetNoticeReceiver (Ptr{PGconn}, Ptr{Void}, Ptr{Void}) libpq
#### EXEC COMMANDS
@c Ptr{PGresult} PQexec (Ptr{PGconn}, Ptr{UInt8}) libpq
@c Ptr{PGresult} PQgetResult (Ptr{PGconn},) libpq # for the end of a copy command
@c Cint PQputCopyData (Ptr{PGconn}, Ptr{UInt8}, Cint) libpq
@c Cint PQputCopyEnd (Ptr{PGconn}, Ptr{UInt8}) libpq
#@c Cint PQgetCopyData (Ptr{PGconn}, Ptr{Ptr{UInt8}}, Cint) libpq
#### Results
@c Ptr{UInt8} PQgetvalue (Ptr{PGresult}, Cint, Cint) libpq
@c Cint PQgetisnull (Ptr{PGresult}, Cint, Cint) libpq
@c Void PQclear (Ptr{PGresult},) libpq
# result fields
@c Cint PQntuples (Ptr{PGresult},) libpq
@c Cint PQnfields (Ptr{PGresult},) libpq
@c Cint PQbinaryTuples (Ptr{PGresult},) libpq
@c Ptr{UInt8} PQfname (Ptr{PGresult}, Cint) libpq
@c Cint PQfnumber (Ptr{PGresult}, Ptr{UInt8}) libpq
@c Oid PQftable (Ptr{PGresult}, Cint) libpq
@c Cint PQftablecol (Ptr{PGresult}, Cint) libpq
@c Oid PQftype (Ptr{PGresult}, Cint) libpq
#for update insert etc...
@c Ptr{UInt8} PQcmdStatus (Ptr{PGresult},) libpq
@c Ptr{UInt8} PQcmdTuples (Ptr{PGresult},) libpq
#### Escaping
@c Void PQfreemem (Ptr{Void},) libpq
@c Ptr{UInt8} PQescapeLiteral (Ptr{PGconn}, Ptr{UInt8}, Cint) libpq
#@c Ptr{UInt8} PQescapeIdentifier (Ptr{PGconn}, Ptr{UInt8}, Cint) libpq
#### Canceling
@c Ptr{PGcancel} PQgetCancel (Ptr{PGconn},) libpq
@c Ptr{Void} PQfreeCancel (Ptr{PGcancel}, ) libpq
@c Cint PQcancel (Ptr{PGcancel}, Ptr{UInt8}, Cint) libpq
#### Misc
@c Cint PQprotocolVersion (Ptr{PGconn},) libpq
@c Cint PQserverVersion (Ptr{PGconn},) libpq
@c Cint PQlibVersion (Ptr{Void},) libpq
#@c Void PQreset (Ptr{PGconn},) libpq
#@c PGTransactionStatusType PQtransactionStatus (Ptr{PGconn},) libpq
#@c Ptr{Cuchar} PQescapeByteaConn (Ptr{PGconn}, Ptr{Cuchar}, Cint, Ptr{Cint}) libpq
#@c Ptr{Cuchar} PQunescapeBytea (Ptr{Cuchar}, Ptr{Cint}) libpq
#@c Ptr{Cuchar} PQescapeBytea (Ptr{Cuchar}, Cint, Ptr{Cint}) libpq
# return query as vector of vectors with all data types as strings
function bootstrap_query(ptr::Ptr{PGconn}, query::AbstractString)
result = PQexec(ptr, query)
ncols = PQnfields(result)
nrows = PQntuples(result)
a = Vector{Vector{UTF8String}}(nrows)
for x in 1:nrows
a[x] = [utf8(PQgetvalue(result, x-1, y-1)) for y in 1:ncols]
end
PQclear(result)
return a
end
# not sure how to test
function interuptable_exec(ptr::Ptr{PGconn}, query::AbstractString)
try
res = PQexec(ptr, query)
catch InteruptException
cancel = PQgetCancel(ptr)
msg = Array(UInt8, 256)
status = PQcancel(cancel, msg, sizeof(msg))
if status != 1
error("cancel failed: $(bytestring(msg))")
end
PQfreeCancel(cancel)
info("canceling statement due to user request")
nothing
end
end
|
{"hexsha": "39fbce66a0bfcec48130aac92f92d26787d9edf1", "size": 7895, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/libpq.jl", "max_stars_repo_name": "NCarson/Postgres", "max_stars_repo_head_hexsha": "5e263421df530a9d064451eb1ec6690b8f6c5985", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2016-02-20T22:25:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-11T06:09:46.000Z", "max_issues_repo_path": "src/libpq.jl", "max_issues_repo_name": "NCarson/Postgres", "max_issues_repo_head_hexsha": "5e263421df530a9d064451eb1ec6690b8f6c5985", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2016-02-19T19:57:19.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-03T23:48:30.000Z", "max_forks_repo_path": "src/libpq.jl", "max_forks_repo_name": "NCarson/Postgres", "max_forks_repo_head_hexsha": "5e263421df530a9d064451eb1ec6690b8f6c5985", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-01T18:46:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T18:46:07.000Z", "avg_line_length": 39.0841584158, "max_line_length": 86, "alphanum_fraction": 0.6154528182, "num_tokens": 2337}
|
version = v"6.2.1"
include("../common.jl")
# Build the tarballs
build_tarballs(ARGS, configure(version)...;
preferred_gcc_version=v"6", julia_compat="1.7")
|
{"hexsha": "adba59203bb9d9dd3fd1a9147251a32462125853", "size": 174, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "G/GMP/GMP@6.2.1/build_tarballs.jl", "max_stars_repo_name": "waralex/Yggdrasil", "max_stars_repo_head_hexsha": "bba5443f75b221c6973d479e2c6727cf0ae3a0b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "G/GMP/GMP@6.2.1/build_tarballs.jl", "max_issues_repo_name": "waralex/Yggdrasil", "max_issues_repo_head_hexsha": "bba5443f75b221c6973d479e2c6727cf0ae3a0b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "G/GMP/GMP@6.2.1/build_tarballs.jl", "max_forks_repo_name": "waralex/Yggdrasil", "max_forks_repo_head_hexsha": "bba5443f75b221c6973d479e2c6727cf0ae3a0b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-24T15:29:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-24T15:29:42.000Z", "avg_line_length": 19.3333333333, "max_line_length": 62, "alphanum_fraction": 0.6494252874, "num_tokens": 49}
|
import numpy as np
import torch
import torch.nn as nn
import torchvision
import os, sys
import copy
import time
import random
import ipdb
from tqdm import tqdm
import argparse
import network
sys.path.insert(0, "..")
from gflownet import get_GFlowNet
import utils_data
def makedirs(path):
if not os.path.exists(path):
print('creating dir: {}'.format(path))
os.makedirs(path)
else:
print(path, "already exist!")
class EBM(nn.Module):
def __init__(self, net, mean=None):
super().__init__()
self.net = net
if mean is None:
self.mean = None
else:
self.mean = nn.Parameter(mean, requires_grad=False)
self.base_dist = torch.distributions.Bernoulli(probs=self.mean)
def forward(self, x):
if self.mean is None:
bd = 0.
else:
bd = self.base_dist.log_prob(x).sum(-1)
logp = self.net(x).squeeze()
return logp + bd
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--device", "--d", default=0, type=int)
# data
parser.add_argument('--save_dir', type=str, default="./")
parser.add_argument('--data', type=str, default='dmnist')
parser.add_argument("--down_sample", "--ds", default=0, type=int, choices=[0, 1])
parser.add_argument('--ckpt_path', type=str, default=None)
# models
parser.add_argument('--model', type=str, default='mlp-256')
parser.add_argument('--base_dist', "--bd", type=int, default=1, choices=[0, 1])
parser.add_argument('--gradnorm', "--gn", type=float, default=0.0)
parser.add_argument('--l2', type=float, default=0.0)
parser.add_argument('--n_iters', "--ni", type=lambda x: int(float(x)), default=5e4)
parser.add_argument('--batch_size', "--bs", type=int, default=100)
parser.add_argument('--test_batch_size', type=int, default=100)
parser.add_argument('--print_every', "--pe", type=int, default=100)
parser.add_argument('--viz_every', "--ve", type=int, default=2000)
parser.add_argument('--eval_every', type=int, default=2000)
parser.add_argument('--lr', type=float, default=.0001)
parser.add_argument("--ebm_every", "--ee", type=int, default=1, help="EBM training frequency")
# for GFN
parser.add_argument("--type", type=str)
parser.add_argument("--hid", type=int, default=256)
parser.add_argument("--hid_layers", "--hl", type=int, default=5)
parser.add_argument("--leaky", type=int, default=1, choices=[0, 1])
parser.add_argument("--gfn_bn", "--gbn", type=int, default=0, choices=[0, 1])
parser.add_argument("--init_zero", "--iz", type=int, default=0, choices=[0, 1])
parser.add_argument("--gmodel", "--gm", type=str, default="mlp")
parser.add_argument("--train_steps", "--ts", type=int, default=1)
parser.add_argument("--l1loss", "--l1l", type=int, default=0, choices=[0, 1], help="use soft l1 loss instead of l2")
parser.add_argument("--with_mh", "--wm", type=int, default=0, choices=[0, 1])
parser.add_argument("--rand_k", "--rk", type=int, default=0, choices=[0, 1])
parser.add_argument("--lin_k", "--lk", type=int, default=0, choices=[0, 1])
parser.add_argument("--warmup_k", "--wk", type=lambda x: int(float(x)), default=0, help="need to use w/ lin_k")
parser.add_argument("--K", type=int, default=-1, help="for gfn back forth negative sample generation")
parser.add_argument("--rand_coef", "--rc", type=float, default=0, help="for tb")
parser.add_argument("--back_ratio", "--br", type=float, default=0.)
parser.add_argument("--clip", type=float, default=-1., help="for gfn's linf gradient clipping")
parser.add_argument("--temp", type=float, default=1)
parser.add_argument("--opt", type=str, default="adam", choices=["adam", "sgd"])
parser.add_argument("--glr", type=float, default=1e-3)
parser.add_argument("--zlr", type=float, default=1e-1)
parser.add_argument("--momentum", "--mom", type=float, default=0.0)
parser.add_argument("--gfn_weight_decay", "--gwd", type=float, default=0.0)
parser.add_argument('--mc_num', "--mcn", type=int, default=5)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = "{:}".format(args.device)
device = torch.device("cpu") if args.device < 0 else torch.device("cuda")
args.device = device
args.save_dir = os.path.join(args.save_dir, "test")
makedirs(args.save_dir)
print("Device:" + str(device))
print("Args:" + str(args))
before_load = time.time()
train_loader, val_loader, test_loader, args = utils_data.load_dataset(args)
plot = lambda p, x: torchvision.utils.save_image(x.view(x.size(0), args.input_size[0],
args.input_size[1], args.input_size[2]), p, normalize=True, nrow=int(x.size(0) ** .5))
print(f"It takes {time.time() - before_load:.3f}s to load {args.data} dataset.")
def preprocess(data):
if args.dynamic_binarization:
return torch.bernoulli(data)
else:
return data
if args.down_sample:
assert args.model.startswith("mlp-")
if args.model.startswith("mlp-"):
nint = int(args.model.split('-')[1])
net = network.mlp_ebm(np.prod(args.input_size), nint)
elif args.model.startswith("cnn-"):
nint = int(args.model.split('-')[1])
net = network.MNISTConvNet(nint)
elif args.model.startswith("resnet-"):
nint = int(args.model.split('-')[1])
net = network.ResNetEBM(nint)
else:
raise ValueError("invalid model definition")
init_batch = []
for x, _ in train_loader:
init_batch.append(preprocess(x))
init_batch = torch.cat(init_batch, 0)
eps = 1e-2
init_mean = init_batch.mean(0) * (1. - 2 * eps) + eps
if args.base_dist:
model = EBM(net, init_mean)
else:
model = EBM(net)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
xdim = np.prod(args.input_size)
assert args.gmodel == "mlp"
gfn = get_GFlowNet(args.type, xdim, args, device)
model.to(device)
print("model: {:}".format(model))
itr = 0
while itr < args.n_iters:
for x in train_loader:
st = time.time()
x = preprocess(x[0].to(device)) # -> (bs, 784)
if args.gradnorm > 0:
x.requires_grad_()
update_success_rate = -1.
assert "tb" in args.type
train_loss, train_logZ = gfn.train(args.batch_size, scorer=lambda inp: model(inp).detach(),
silent=itr % args.print_every != 0, data=x, back_ratio=args.back_ratio)
if args.rand_k or args.lin_k or (args.K > 0):
if args.rand_k:
K = random.randrange(xdim) + 1
elif args.lin_k:
K = min(xdim, int(xdim * float(itr + 1) / args.warmup_k))
K = max(K, 1)
elif args.K > 0:
K = args.K
else:
raise ValueError
gfn.model.eval()
x_fake, delta_logp_traj = gfn.backforth_sample(x, K)
delta_logp_traj = delta_logp_traj.detach()
if args.with_mh:
# MH step, calculate log p(x') - log p(x)
lp_update = model(x_fake).squeeze() - model(x).squeeze()
update_dist = torch.distributions.Bernoulli(logits=lp_update + delta_logp_traj)
updates = update_dist.sample()
x_fake = x_fake * updates[:, None] + x * (1. - updates[:, None])
update_success_rate = updates.mean().item()
else:
x_fake = gfn.sample(args.batch_size)
if itr % args.ebm_every == 0:
st = time.time() - st
model.train()
logp_real = model(x).squeeze()
if args.gradnorm > 0:
grad_ld = torch.autograd.grad(logp_real.sum(), x,
create_graph=True)[0].flatten(start_dim=1).norm(2, 1)
grad_reg = (grad_ld ** 2. / 2.).mean()
else:
grad_reg = torch.tensor(0.).to(device)
logp_fake = model(x_fake).squeeze()
obj = logp_real.mean() - logp_fake.mean()
l2_reg = (logp_real ** 2.).mean() + (logp_fake ** 2.).mean()
loss = -obj + grad_reg * args.gradnorm + args.l2 * l2_reg
optimizer.zero_grad()
loss.backward()
optimizer.step()
if itr % args.print_every == 0 or itr == args.n_iters - 1:
print("({:5d}) | ({:.3f}s/iter) |log p(real)={:.2e}, "
"log p(fake)={:.2e}, diff={:.2e}, grad_reg={:.2e}, l2_reg={:.2e} update_rate={:.1f}".format(itr, st,
logp_real.mean().item(), logp_fake.mean().item(), obj.item(), grad_reg.item(), l2_reg.item(), update_success_rate))
if (itr + 1) % args.eval_every == 0:
model.eval()
print("GFN TEST")
gfn.model.eval()
gfn_test_ll = gfn.evaluate(test_loader, preprocess, args.mc_num)
print("GFN Test log-likelihood ({}) with {} samples: {}".format(itr, args.mc_num, gfn_test_ll.item()))
model.cpu()
d = {}
d['model'] = model.state_dict()
d['optimizer'] = optimizer.state_dict()
gfn_ckpt = {"model": gfn.model.state_dict(), "optimizer": gfn.optimizer.state_dict(),}
gfn_ckpt["logZ"] = gfn.logZ.detach().cpu()
torch.save(d, "{}/ckpt.pt".format(args.save_dir))
torch.save(gfn_ckpt, "{}/gfn_ckpt.pt".format(args.save_dir))
model.to(device)
itr += 1
if itr > args.n_iters:
print("Training finished!")
quit(0)
|
{"hexsha": "1e7ca752417e23afc4023665610f6b01aa0ea05a", "size": 9970, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepebm/ebm.py", "max_stars_repo_name": "mlaugharn/EB_GFN", "max_stars_repo_head_hexsha": "2d20b5d37edb9c50e0bc0fb7feedbc390ddfefd7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deepebm/ebm.py", "max_issues_repo_name": "mlaugharn/EB_GFN", "max_issues_repo_head_hexsha": "2d20b5d37edb9c50e0bc0fb7feedbc390ddfefd7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deepebm/ebm.py", "max_forks_repo_name": "mlaugharn/EB_GFN", "max_forks_repo_head_hexsha": "2d20b5d37edb9c50e0bc0fb7feedbc390ddfefd7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1983471074, "max_line_length": 136, "alphanum_fraction": 0.576328987, "include": true, "reason": "import numpy", "num_tokens": 2525}
|
import numpy as np
class GeneratedImageHook:
# Pytorch forward pass module hook.
def __init__(self, module, every_n=10):
self.generated_images = []
self.count = 1
self.every_n = every_n
self.last_image = None
self.hook = module.register_forward_hook(self.save_generated_image)
def save_generated_image(self, module, input, output):
image = output.detach().cpu().numpy()[0]
if self.count % self.every_n == 0:
self.generated_images.append(image)
self.count = 0
self.last_image = image
self.count += 1
def close(self):
self.hook.remove()
def get_images(self):
return self.generated_images
|
{"hexsha": "ce6e3367bed5cb42337180dc0c70c8694fd6a73a", "size": 725, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytorch_stylegan_encoder/utilities/hooks.py", "max_stars_repo_name": "CSID-DGU/-2020-1-OSSP1-ninetynine-2", "max_stars_repo_head_hexsha": "b1824254882eeea0ee44e4e60896b72c51ef1d2c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-21T13:45:26.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-21T13:45:26.000Z", "max_issues_repo_path": "pytorch_stylegan_encoder/utilities/hooks.py", "max_issues_repo_name": "CSID-DGU/-2020-1-OSSP1-ninetynine-2", "max_issues_repo_head_hexsha": "b1824254882eeea0ee44e4e60896b72c51ef1d2c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pytorch_stylegan_encoder/utilities/hooks.py", "max_forks_repo_name": "CSID-DGU/-2020-1-OSSP1-ninetynine-2", "max_forks_repo_head_hexsha": "b1824254882eeea0ee44e4e60896b72c51ef1d2c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-09-02T03:18:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-27T08:24:05.000Z", "avg_line_length": 25.8928571429, "max_line_length": 75, "alphanum_fraction": 0.6248275862, "include": true, "reason": "import numpy", "num_tokens": 163}
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0,2*np.pi,1000)
f, ax = plt.subplots()
ax.plot(x,x)
ax.set_xlabel('x')
ax.set_ylabel('y')
|
{"hexsha": "0fce85554b859a904edc68207169f7370f54cf3e", "size": 239, "ext": "py", "lang": "Python", "max_stars_repo_path": "python_plot.py", "max_stars_repo_name": "njhung/NU_REU_git_njh", "max_stars_repo_head_hexsha": "8327746797a05bdecc052d1d825f0cc903149025", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python_plot.py", "max_issues_repo_name": "njhung/NU_REU_git_njh", "max_issues_repo_head_hexsha": "8327746797a05bdecc052d1d825f0cc903149025", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python_plot.py", "max_forks_repo_name": "njhung/NU_REU_git_njh", "max_forks_repo_head_hexsha": "8327746797a05bdecc052d1d825f0cc903149025", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-08T19:15:13.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-08T19:15:13.000Z", "avg_line_length": 14.9375, "max_line_length": 32, "alphanum_fraction": 0.6652719665, "include": true, "reason": "import numpy", "num_tokens": 70}
|
import numpy as np
from pathlib import Path
import pickle
from cytoolz import identity
from .predictors import common
from ..log import logger
class Predictor(object):
"""
Abstract predictor class which can manage scoring estimation via cross validation.
Attributes:
predictor (predictor object): underlying predictor object, e.g. CVModel
scorer (callable): (X_test,Y_test,model)->score, a scoring function
x_transform (callable): preprocessing transform on X data field
y_transform (callable): preprocessing transform on Y data field
"""
def __init__(self, predictor, scorer, x_transform=identity,
y_transform=identity):
"""
Class to manage fitting and analysis of supervised models.
Args:
predictor (object): specific ./predictors/ class for predictive models
scorer (callable): a function to compute a model score
x_transform (optional; callable): function to apply to the
independent variables before sending them through the predictor
y_transform (optional; callable): function to apply to the
dependent variables before sending them through the predictor
Returns:
Predictor
"""
self.predictor = predictor
self.scorer = scorer
self.x_transform = x_transform
self.y_transform = y_transform
@classmethod
def load(cls, filename):
"""
Create a predictor from a saved object.
Args:
filename (str)
Returns:
Predictor
"""
with open(filename, 'rb') as f:
return pickle.load(f)
def save(self, filepath, overwrite_existing=False):
"""
Save the predictor object for future use at the given filepath.
Args:
filepath (string): absolute filepath to save file
overwrite_existing (bool): whether or not to overwrite existing modelfile
Returns:
None
"""
path = Path(filepath)
assert overwrite_existing or not path.exists(), "Must allow overwriting existing files"
with open(filepath, 'wb') as f:
pickle.dump(self, f)
def evaluate(self, data, labels):
"""
Evaluate the predictor.
Args:
data (numpy.ndarray): the matrix of features (n_samples, n_features)
labels (numpy.ndarray): the vector of labels (n_samples,)
Returns:
score (float)
"""
X = self.x_transform(data)
Y = self.y_transform(labels)
return self.scorer(self.predictor, X, Y)
def predict(self, data):
"""
Predict labels.
Args:
data (numpy.ndarray): the matrix of features (n_samples, n_features)
Returns:
predictions (numpy.ndarray): the vector of predictions (nsamples,)
"""
X = self.x_transform(data)
return self.predictor.predict(X)
def fit(self, data, labels, nfolds=5, stratified=False, **fit_kwargs):
"""
Fit the predictor.
Args:
data (numpy.ndarray): the matrix of features (n_samples, n_features)
labels (numpy.ndarray): the vector of labels (n_samples,)
nfolds (int): number of data folds to use in predictor.fit
stratified (bool): whether or not to use stratified KFold or not
fit_kwargs (kwargs): keyword arguments to pass to predictor.fit
Returns:
None
"""
X = self.x_transform(data)
Y = self.y_transform(labels)
self.predictor.fit(X, Y, scorer=self.scorer,
nfolds=nfolds, stratified=stratified, **fit_kwargs)
def cross_validate(self, data, labels, outer_folds=5, inner_folds=5,
stratified=False, **fit_kwargs):
"""
Estimate the performance of the predictor using cross-validation.
Args:
data (numpy.ndarray): the matrix of features (n_samples, n_features)
labels (numpy.ndarray): the vector of labels
outer_folds (optional; int): the number of folds to use for score assessment
inner_folds (optional; int): the number of folds to use for hyperparameter
selection
stratified (bool): whether to use sklearn's KFold or StratifiedKFold
for folding data in cross-validation loops
fit_kwargs (kwargs): keyword arguments to pass to predictor.fit
Returns:
errors (numpy.ndarray)
"""
errors = np.zeros((outer_folds,))
folds = common.create_cv_folds(outer_folds, stratified)
i = 0
logger.predictor("Cross validating with {} outer folds, and {} "
"inner folds".format(outer_folds, inner_folds))
for train_index, test_index in folds.split(data, labels):
X_train, X_test = data[train_index], data[test_index]
y_train, y_test = labels[train_index], labels[test_index]
self.fit(X_train, y_train, nfolds=inner_folds, stratified=stratified,
**fit_kwargs)
errors[i] = self.evaluate(X_test, y_test)
i += 1
logger.predictor("Fold {} error: {}".format(i, errors[i-1]))
return errors
|
{"hexsha": "65f712b7406e2a6a82f05ed5c7dc67fa89a41e54", "size": 5403, "ext": "py", "lang": "Python", "max_stars_repo_path": "representation_learning_for_transcriptomics/supervised/predictor.py", "max_stars_repo_name": "unlearnai/representation_learning_for_transcriptomics", "max_stars_repo_head_hexsha": "66e7a31471ca3ded5d46945d34c74bad8f22afbf", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-03-30T00:36:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-07T18:43:27.000Z", "max_issues_repo_path": "representation_learning_for_transcriptomics/supervised/predictor.py", "max_issues_repo_name": "unlearnai/representation_learning_for_transcriptomics", "max_issues_repo_head_hexsha": "66e7a31471ca3ded5d46945d34c74bad8f22afbf", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "representation_learning_for_transcriptomics/supervised/predictor.py", "max_forks_repo_name": "unlearnai/representation_learning_for_transcriptomics", "max_forks_repo_head_hexsha": "66e7a31471ca3ded5d46945d34c74bad8f22afbf", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5590062112, "max_line_length": 95, "alphanum_fraction": 0.6074403109, "include": true, "reason": "import numpy", "num_tokens": 1101}
|
import cv2
import torch
import numpy as np
from ..base_internode import BaseInternode
from torch.nn.functional import interpolate
from utils.heatmap_tools import calc_gaussian_2d, heatmap2quad
__all__ = ['CalcAffinityQuad', 'CalcHeatmapByQuad', 'RandomCropSequence']
class CalcAffinityQuad(BaseInternode):
def __call__(self, data_dict):
n = len(data_dict['quad'])
if n < 2:
data_dict['quad_affinity'] = np.zeros([0, 4, 2], dtype=np.float32)
else:
data_dict['quad_affinity'] = []
for i in range(1, n):
bbox_1 = data_dict['quad'][i - 1]
bbox_2 = data_dict['quad'][i]
tl, bl = self.get_centers_of_triangles(bbox_1)
tr, br = self.get_centers_of_triangles(bbox_2)
affinity = np.array([tl, tr, br, bl])
data_dict['quad_affinity'].append(affinity)
data_dict['quad_affinity'] = np.array(data_dict['quad_affinity'], dtype=np.float32)
return data_dict
def get_centers_of_triangles(self, quad):
u = np.min(quad[..., 1])
# print(quad, u)
centers_of_triangles = []
center = np.mean(quad, axis=0)
# print(center)
for i in range(len(quad)):
p1 = quad[i]
p2 = quad[(i + 1) % len(quad)]
centers_of_triangles.append(np.mean([p1, p2, center], axis=0))
# print(p1, p2, np.mean([p1, p2, center], axis=0))
centers_of_triangles = np.array(centers_of_triangles)
# print(centers_of_triangles)
top = np.argmin(centers_of_triangles[..., 1])
bottom = np.argmax(centers_of_triangles[..., 1])
# print(top, bottom)
# exit()
return centers_of_triangles[top], centers_of_triangles[bottom]
# def reverse(self, **kwargs):
# if 'quad_affinity' in kwargs.keys():
# kwargs['quad'] = kwargs.pop('quad_affinity')
# kwargs['have_affinity'] = True
# if 'heatmap_affinity' in kwargs.keys():
# kwargs['heatmap'] = kwargs.pop('heatmap_affinity')
# kwargs['have_affinity'] = True
# return kwargs
def __repr__(self):
return 'CalcAffinityQuad()'
def rper(self):
return 'CalcAffinityQuad()'
class CalcHeatmapByQuad(BaseInternode):
def __init__(self, thresh, ratio=1):
self.ratio = ratio
self.gaussian = calc_gaussian_2d(alpha=False)
self.threshold = thresh
self.box = self.get_box()
def get_box(self):
h, w = self.gaussian.shape
binary = self.gaussian >= self.threshold
x1 = np.min(np.nonzero(binary)[1])
y1 = np.min(np.nonzero(binary)[0])
x2 = min(np.max(np.nonzero(binary)[1]) + 1, w)
y2 = min(np.max(np.nonzero(binary)[0]) + 1, h)
return np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]], dtype=np.float32)
def calc_heatmap(self, h, w, quad):
m = cv2.getPerspectiveTransform(self.box, quad / self.ratio)
dst = cv2.warpPerspective(self.gaussian, m, (int(w / self.ratio), int(h / self.ratio)), borderValue=0, borderMode=cv2.BORDER_CONSTANT)
return dst
def __call__(self, data_dict):
assert data_dict['quad'].shape[1] == 4
_, h, w = data_dict['image'].shape
heatmap = np.zeros((int(h / self.ratio), int(w / self.ratio)), dtype=np.float32)
for quad in data_dict['quad']:
heatmap_tmp = self.calc_heatmap(h, w, quad)
heatmap = np.where(heatmap_tmp > heatmap, heatmap_tmp, heatmap)
data_dict['heatmap'] = torch.from_numpy(heatmap)
return data_dict
def reverse(self, **kwargs):
if 'chbq' in kwargs and kwargs['chbq']:
return kwargs
if 'training' in kwargs.keys() and kwargs['training']:
if 'heatmap' in kwargs.keys():
if kwargs['heatmap'].dim() == 3:
kwargs['heatmap'] = kwargs['heatmap'].unsqueeze(0)
_, n, h, w = kwargs['heatmap'].shape
kwargs['heatmap'] = interpolate(kwargs['heatmap'], size=(int(h * self.ratio), int(w * self.ratio)), mode='bilinear', align_corners=False)
kwargs['heatmap'] = kwargs['heatmap'][0]
else:
# print(kwargs['heatmap'].shape)
kwargs['heatmap'] = kwargs['heatmap'].unsqueeze(0).unsqueeze(0)
_, n, h, w = kwargs['heatmap'].shape
kwargs['heatmap'] = interpolate(kwargs['heatmap'], size=(int(h * self.ratio), int(w * self.ratio)), mode='bilinear', align_corners=False)
kwargs['heatmap'] = kwargs['heatmap'][0][0]
# print(kwargs['heatmap'].shape,'=============')
kwargs['chbq'] = True
else:
if 'heatmap' in kwargs.keys():
heatmap = kwargs.pop('heatmap').detach().cpu().numpy()
quad = heatmap2quad(heatmap, 1)
kwargs['quad'] = quad * self.ratio
kwargs['chbq'] = True
return kwargs
def __repr__(self):
return 'CalcHeatmapByQuad(ratio={}, thresh={})'.format(self.ratio, self.threshold)
def rper(self):
return 'CalcHeatmapByQuad(ratio={})'.format(1 / self.ratio)
class RandomCropSequence(BaseInternode):
def __init__(self, p=1):
assert 0 < p <= 1
self.p = p
def __call__(self, data_dict):
# assert 'quad_group' not in data_dict.keys()
assert isinstance(data_dict['quad'], np.ndarray)
if random.random() < self.p and len(data_dict['quad']) > 1:
length = random.randint(1, len(data_dict['quad']))
left = random.randint(0, len(data_dict['quad']) - length)
data_dict['quad'] = data_dict['quad'][left:left + length]
tmp = data_dict['quad'].reshape(-1, 2)
xmin = np.min(tmp[:, 0])
xmax = np.max(tmp[:, 0])
ymin = np.min(tmp[:, 1])
ymax = np.max(tmp[:, 1])
data_dict['quad'][..., 0] -= xmin
data_dict['quad'][..., 1] -= ymin
data_dict['image'] = data_dict['image'].crop((xmin, ymin, xmax, ymax))
# print(data_dict['seq'])
if 'seq' in data_dict.keys():
data_dict['seq'] = data_dict['seq'][left:left + length]
# print(data_dict['quad'].shape, length, left, len(data_dict['seq']))
# exit()
return data_dict
def reverse(self, **kwargs):
kwargs['jump'] = 'all'
return kwargs
def __repr__(self):
return 'RandomCropSequence(p={})'.format(self.p)
def rper(self):
return 'RandomCropSequence(not available)'
|
{"hexsha": "e84e92b35a3aefd75116acaf3b715ca838b8624b", "size": 6738, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasetsnx/bamboo/misc/craft.py", "max_stars_repo_name": "ckxy/part-of-hitogata", "max_stars_repo_head_hexsha": "76402d48a336fcd964d0e64bb01d959e8f07f296", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datasetsnx/bamboo/misc/craft.py", "max_issues_repo_name": "ckxy/part-of-hitogata", "max_issues_repo_head_hexsha": "76402d48a336fcd964d0e64bb01d959e8f07f296", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasetsnx/bamboo/misc/craft.py", "max_forks_repo_name": "ckxy/part-of-hitogata", "max_forks_repo_head_hexsha": "76402d48a336fcd964d0e64bb01d959e8f07f296", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4216216216, "max_line_length": 157, "alphanum_fraction": 0.5624814485, "include": true, "reason": "import numpy", "num_tokens": 1719}
|
import os.path as osp
from functools import partial
import mmcv
import numpy as np
import pytest
import torch
from mmdet import digit_version
from mmdet.models.dense_heads import RetinaHead, YOLOV3Head
from .utils import (WrapFunction, convert_result_list, ort_validate,
verify_model)
data_path = osp.join(osp.dirname(__file__), 'data')
if digit_version(torch.__version__) <= digit_version('1.5.0'):
pytest.skip(
'ort backend does not support version below 1.5.0',
allow_module_level=True)
def retinanet_config():
"""RetinanNet Head Config."""
head_cfg = dict(
stacked_convs=6,
feat_channels=2,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]))
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
model = RetinaHead(
num_classes=4, in_channels=1, test_cfg=test_cfg, **head_cfg)
model.requires_grad_(False)
model.eval()
return model
def test_retina_head_forward_single():
"""Test RetinaNet Head single forward in torch and onnxruntime env."""
retina_model = retinanet_config()
feat = torch.rand(1, retina_model.in_channels, 32, 32)
wrap_model = WrapFunction(retina_model.forward_single)
ort_validate(wrap_model, feat)
def test_retina_head_forward():
"""Test RetinaNet Head forward in torch and onnxruntime env."""
retina_model = retinanet_config()
s = 128
# RetinaNet head expects a multiple levels of features per image
feats = [
torch.rand(1, retina_model.in_channels, s // (2**(i + 2)),
s // (2**(i + 2))) # [32, 16, 8, 4, 2]
for i in range(len(retina_model.anchor_generator.strides))
]
wrap_model = WrapFunction(retina_model.forward)
ort_validate(wrap_model, feats)
def test_retinanet_head_get_bboxes():
"""Test RetinaNet Head _get_bboxes() in torch and onnxruntime env."""
retina_model = retinanet_config()
s = 128
img_metas = [{
'img_shape_for_onnx': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3),
'img_shape': (s, s, 2)
}]
# The data of retina_head_get_bboxes.pkl contains two parts:
# cls_score(list(Tensor)) and bboxes(list(Tensor)),
# where each torch.Tensor is generated by torch.rand().
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
retina_head_data = 'retina_head_get_bboxes.pkl'
feats = mmcv.load(osp.join(data_path, retina_head_data))
cls_score = feats[:5]
bboxes = feats[5:]
retina_model.get_bboxes = partial(
retina_model.get_bboxes, img_metas=img_metas)
wrap_model = WrapFunction(retina_model.get_bboxes)
wrap_model.cpu().eval()
with torch.no_grad():
torch.onnx.export(
wrap_model, (cls_score, bboxes),
'tmp.onnx',
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11)
onnx_outputs = verify_model(cls_score + bboxes)
torch_outputs = wrap_model.forward(cls_score, bboxes)
torch_outputs = convert_result_list(torch_outputs)
torch_outputs = [
torch_output.detach().numpy() for torch_output in torch_outputs
]
# match torch_outputs and onnx_outputs
for i in range(len(onnx_outputs)):
np.testing.assert_allclose(
torch_outputs[i], onnx_outputs[i], rtol=1e-03, atol=1e-05)
def yolo_config():
"""YoloV3 Head Config."""
head_cfg = dict(
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'))
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
model = YOLOV3Head(
num_classes=4,
in_channels=[1, 1, 1],
out_channels=[16, 8, 4],
test_cfg=test_cfg,
**head_cfg)
model.requires_grad_(False)
model.eval()
return model
def test_yolov3_head_forward():
"""Test Yolov3 head forward() in torch and ort env."""
yolo_model = yolo_config()
# Yolov3 head expects a multiple levels of features per image
feats = [
torch.rand(1, 1, 64 // (2**(i + 2)), 64 // (2**(i + 2)))
for i in range(len(yolo_model.in_channels))
]
wrap_model = WrapFunction(yolo_model.forward)
ort_validate(wrap_model, feats)
def test_yolov3_head_get_bboxes():
"""Test yolov3 head get_bboxes() in torch and ort env."""
yolo_model = yolo_config()
s = 128
img_metas = [{
'img_shape_for_onnx': (s, s, 3),
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
# The data of yolov3_head_get_bboxes.pkl contains
# a list of torch.Tensor, where each torch.Tensor
# is generated by torch.rand and each tensor size is:
# (1, 27, 32, 32), (1, 27, 16, 16), (1, 27, 8, 8).
yolo_head_data = 'yolov3_head_get_bboxes.pkl'
pred_maps = mmcv.load(osp.join(data_path, yolo_head_data))
yolo_model.get_bboxes = partial(yolo_model.get_bboxes, img_metas=img_metas)
wrap_model = WrapFunction(yolo_model.get_bboxes)
wrap_model.cpu().eval()
with torch.no_grad():
torch.onnx.export(
wrap_model,
pred_maps,
'tmp.onnx',
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11)
onnx_outputs = verify_model(pred_maps)
torch_outputs = convert_result_list(wrap_model.forward(pred_maps))
torch_outputs = [
torch_output.detach().numpy() for torch_output in torch_outputs
]
# match torch_outputs and onnx_outputs
for i in range(len(onnx_outputs)):
np.testing.assert_allclose(
torch_outputs[i], onnx_outputs[i], rtol=1e-03, atol=1e-05)
|
{"hexsha": "ed6888f5bced4c69f030fd48fa800f1183f78548", "size": 6848, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_onnx/test_head.py", "max_stars_repo_name": "likelyzhao/Swin-Transformer-Object-Detection", "max_stars_repo_head_hexsha": "4003ea497e32be85b657a928e6b7d8f782e578ff", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 367, "max_stars_repo_stars_event_min_datetime": "2022-01-14T03:32:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T04:48:20.000Z", "max_issues_repo_path": "tests/test_onnx/test_head.py", "max_issues_repo_name": "likelyzhao/Swin-Transformer-Object-Detection", "max_issues_repo_head_hexsha": "4003ea497e32be85b657a928e6b7d8f782e578ff", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 39, "max_issues_repo_issues_event_min_datetime": "2021-08-05T07:16:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T13:23:48.000Z", "max_forks_repo_path": "tests/test_onnx/test_head.py", "max_forks_repo_name": "likelyzhao/Swin-Transformer-Object-Detection", "max_forks_repo_head_hexsha": "4003ea497e32be85b657a928e6b7d8f782e578ff", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 61, "max_forks_repo_forks_event_min_datetime": "2021-07-30T07:51:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T14:40:02.000Z", "avg_line_length": 30.1674008811, "max_line_length": 79, "alphanum_fraction": 0.6125876168, "include": true, "reason": "import numpy", "num_tokens": 1921}
|
"""Conversion code from CSV to NetCDF files
:author: Chris R. Vernon
:email: chris.vernon@pnnl.gov
License: BSD 2-Clause, see LICENSE and DISCLAIMER files
"""
import os
import numpy as np
import pandas as pd
class DataToArray:
"""Convert Xanthos outputs from CSV to a 3D NumPy array having a data value for each grid cell in the global
coordinate plane per time step.
INSTANCE VARIABLES:
:param xanthos_reference_file: Full path with file name and extension to the input Xathos reference file
containing the x, y array index positions of each of the 67420 land cell ids;
contains headers: 'grid_id', 'latitude_index', and 'longitude_index'.
:type xanthos_reference_file: str
:param xanthos_data_csv: Full path with file name and extension to a Xanthos output containing data
associated with each 67420 land cell. Data should be in the format of a row
per grid cell where the columns are the values per month or year. Contains
headers: 'id', 'YYYY' or 'YYYYMM'; where 'YYYY' is a string year (e.g., '1995')
and 'YYYYMM' is a string year and month (e.g., '199501').
:type xanthos_data_csv: str
CLASS VARIABLES:
:param X_MIN: Minimum X or longitude coordinate
:type X_MIN: float
:param Y_MIN: Minimum Y or latitude coordinate
:type Y_MIN: float
:param X_MAX: Maximum X or longitude coordinate
:type X_MAX: float
:param Y_MAX: Maximum Y or latitude coordinate
:type Y_MAX: float
:param RESOLUTION: Grid cell resolution
:type RESOLUTION: float
:param NODATA: NoData value of the array for non-land elements; default `np.nan`
:type constant_value: float; int; NaN
:param REF_COLUMNS: Column header names used to extract fields from the reference data
:type REF_COLUMNS: list
:param KEY: Primary to set that identifies the grid cell id from the Xanthos data;
default: `grid_id`
:type KEY: str
:param DATA_ID_FIELD: Primary to set that identifies the grid cell id from the Xanthos data;
default: `grid_id`
:type DATA_ID_FIELD: str
:param REF_X_FIELD: Reference data field name for the X index; default: `longitude_index`
:type REF_X_FIELD: str
:param REF_Y_FIELD: Reference data field name for the X index; default: `latitude_index`
:type REF_Y_FIELD: str
Examples:
# Option 1: run model for all years by passing a configuration YAML as the sole argument
>>> from xnetcdf import DataToArray
>>> x = XanthosToNetcdf(xanthos_reference_file='<reference file>', xanthos_data_csv='<data file>')
# get output array
>>> x.data_array
"""
# coordinate bounds for Xanthos
X_MIN = -180.0
Y_MIN = -90.0
X_MAX = 180.0
Y_MAX = 90.0
# xanthos resolution in degrees
RESOLUTION = 0.5
# xanthos default NoData value
NODATA = np.nan
# primary key for data frames
KEY = 'grid_id'
# target xanthos reference columns to load and column names
REF_X_FIELD = 'longitude_index'
REF_Y_FIELD = 'latitude_index'
REF_COLUMNS = [KEY, REF_Y_FIELD, REF_X_FIELD]
# id field to rename from the input data
DATA_ID_FIELD = 'id'
def __init__(self, xanthos_reference_file, xanthos_data_csv):
self._reference_file = xanthos_reference_file
self._data_csv = xanthos_data_csv
@staticmethod
def check_exist(file_path):
"""Ensure the file exists.
:param file_path: Full path with file name and extension to the input file
:type file_path: str
:return: Valid file path
"""
if os.path.isfile(file_path):
return file_path
else:
raise IOError(f"USAGE: File path '{file_path} cannot be located.")
@property
def reference_file(self):
"""Validate reference file existence."""
return self.check_exist(self._reference_file)
@property
def data_csv(self):
"""Validate reference file existence."""
return self.check_exist(self._data_csv)
@property
def df_reference(self):
"""Load reference file into a data frame."""
# read in data csv
return pd.read_csv(self.reference_file, usecols=self.REF_COLUMNS)
@property
def df_data(self):
"""Load data file into a data frame."""
# read in data csv
df = pd.read_csv(self.data_csv)
# rename 'id' column to 'grid_id' to assist join as key
df.rename(columns={self.DATA_ID_FIELD: self.KEY}, inplace=True)
return df
@property
def df_merge(self):
"""Create a data frame that represents merged reference data and xanthos values from the input."""
return self.df_data.merge(self.df_reference, on='grid_id')
@property
def grid_array(self):
"""Build grid array for the desired extent and resolution.
:return: 2D array matching the shape of the coordinate plane
"""
# build coordinate arrays
x_coords = np.arange(self.X_MIN, self.X_MAX + self.RESOLUTION, self.RESOLUTION)
y_coords = np.arange(self.Y_MIN, self.Y_MAX + self.RESOLUTION, self.RESOLUTION)
# build two-dimensional array matching the shape of the coordinates
coords = np.zeros(shape=(y_coords.shape[0], x_coords.shape[0]))
# make entire array set to NoData value
coords[:] = self.NODATA
return coords
@property
def data_array(self):
"""Create a multi-dimensional array to house Xanthos data for each land cell in the coordinate plane.
:returns: 3D array of land cell values per time step as they exist on the global
coordinate plane. Shape: (n_time, y, x)
"""
# get years from data frame
df = self.df_data.copy()
# remove id column so only years or months are left
df.drop(columns=self.KEY, inplace=True)
# build coordinate y, x index list
coordinate_indices = [self.df_merge[self.REF_Y_FIELD].values, self.df_merge[self.REF_X_FIELD].values]
# construct initial 3D array of (n_time, y, x)
arr = np.empty(shape=(df.shape[1], self.grid_array.shape[0], self.grid_array.shape[1]))
for index, col in enumerate(df.columns):
# copy to preserve original
grid_array = self.grid_array.copy()
# data from the data frame for each time step based on the index locations in the global coordinate plane
grid_array[coordinate_indices] = df[col]
# add as a time dimension to the output 3D array
arr[index, :, :] = grid_array
return arr
|
{"hexsha": "041618b91d540f9973ef4b5d8c339fcc3b7e23e0", "size": 7559, "ext": "py", "lang": "Python", "max_stars_repo_path": "xnetcdf/convert.py", "max_stars_repo_name": "crvernon/xnetcdf", "max_stars_repo_head_hexsha": "12aa0788888a11d03c7fd346795a0dbff3a102d5", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xnetcdf/convert.py", "max_issues_repo_name": "crvernon/xnetcdf", "max_issues_repo_head_hexsha": "12aa0788888a11d03c7fd346795a0dbff3a102d5", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-06-15T19:45:56.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-15T20:34:57.000Z", "max_forks_repo_path": "xnetcdf/convert.py", "max_forks_repo_name": "crvernon/xnetcdf", "max_forks_repo_head_hexsha": "12aa0788888a11d03c7fd346795a0dbff3a102d5", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5159817352, "max_line_length": 120, "alphanum_fraction": 0.5881730388, "include": true, "reason": "import numpy", "num_tokens": 1649}
|
#!/bin/env python
#
# Advent of Code Day 2020
# Day 02
#
# author: Daniel Joseph Antrim
# e-mail: dantrim1023 AT gmail DOT com
#
import sys
from argparse import ArgumentParser
from pathlib import Path
import numpy as np
def unpack_db_entry(db_entry):
"""
Takes a DB entry and returns the password itself,
and breaks apart the requirements (the required character,
and the min/max number of times that character must appear
in the password).
Args:
db_entry [str] : DB entry, format: <min>-<max> <character>: <password>
Returns:
<min>[int], <max>[int], <character>[str], <password>[str]
"""
requirements, password = (
db_entry.split(":")[0].strip(),
db_entry.split(":")[1].strip(),
)
min_required, max_required, letter_required = (
int(requirements.split()[0].split("-")[0]),
int(requirements.split()[0].split("-")[1]),
requirements.split()[1],
)
return min_required, max_required, letter_required, password
def is_good_password_part1(db_entry):
"""
Classify the DB entry following Part 1.
The DB entry of the format <min>-<max> <character>: <password>
describes the requirement that the specific character <character>
must appear >= <min> or <= <max> times in the <password>.
"""
min_required, max_required, letter_required, password = unpack_db_entry(db_entry)
valid_occurrences = np.arange(min_required, max_required + 1, 1)
if password.count(letter_required) in valid_occurrences:
return True
else:
return False
def is_good_password_part2(db_entry):
"""
Classify the DB entry following Part 2.
The DB entry of the format <min>-<max> <character>: <password>
describes the requirement that the specific character must
appear at either position <min> OR position <max>, but not BOTH,
in the <password> string. The string indexing starts at 1, so
array index 0 is password character position (specified by the positions
<min> and <max>) 1.
"""
first_loc, second_loc, character, password = unpack_db_entry(db_entry)
first_passes = password[first_loc - 1] == character
second_passes = password[second_loc - 1] == character
return first_passes ^ second_passes # xor
def main(input_path):
good_entries_part1, bad_entries_part1 = [], []
good_entries_part2, bad_entries_part2 = [], []
n_entries_total = 0
with open(input_path, "r") as input_file:
for line in input_file:
line = line.strip()
n_entries_total += 1
# part1 classification
if is_good_password_part1(line):
good_entries_part1.append(line)
else:
bad_entries_part1.append(line)
# part2 classification
if is_good_password_part2(line):
good_entries_part2.append(line)
else:
bad_entries_part2.append(line)
n_entries_classified = len(good_entries_part1) + len(bad_entries_part1)
if n_entries_classified != n_entries_total:
print(
f"ERROR[PART 1]: Failed to classify {n_entries_total - n_entries_classified} DB entries!"
)
sys.exit(1)
n_entries_classified = len(good_entries_part2) + len(bad_entries_part2)
if n_entries_classified != n_entries_total:
print(
f"ERROR[PART 2]: Failed to classify {n_entries_total - n_entries_classified} DB entries!"
)
sys.exit(1)
print(
f"PART 1: # of good db entries = {len(good_entries_part1)}, # of bad db entries = {len(bad_entries_part1)}"
)
print(
f"PART 2: # of good db entries = {len(good_entries_part2)}, # of bad db entries = {len(bad_entries_part2)}"
)
if __name__ == "__main__":
parser = ArgumentParser(description="AoC day #2")
parser.add_argument("input", help="Day #2 input file")
args = parser.parse_args()
input_path = Path(args.input)
if not input_path.exists() or not input_path.is_file():
print(f'ERROR: bad input "{args.input}"')
sys.exit(1)
main(input_path)
|
{"hexsha": "c56aa31aff782f1765d4593eca3b695eccca34ef", "size": 4139, "ext": "py", "lang": "Python", "max_stars_repo_path": "2020/python/day_02/day_02.py", "max_stars_repo_name": "dantrim/danny_advents_of_code", "max_stars_repo_head_hexsha": "57bfe4da81db5aa34c83604eab765552a688b144", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-12-01T15:32:35.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-02T16:59:30.000Z", "max_issues_repo_path": "2020/python/day_02/day_02.py", "max_issues_repo_name": "dantrim/danny_advents_of_code", "max_issues_repo_head_hexsha": "57bfe4da81db5aa34c83604eab765552a688b144", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2020/python/day_02/day_02.py", "max_forks_repo_name": "dantrim/danny_advents_of_code", "max_forks_repo_head_hexsha": "57bfe4da81db5aa34c83604eab765552a688b144", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8492063492, "max_line_length": 115, "alphanum_fraction": 0.653297898, "include": true, "reason": "import numpy", "num_tokens": 1003}
|
#Ref: Microscopists
# Image smoothing, denoising
# Averaging, gaussian blurring, median, bilateral filtering
#OpenCV has a function cv2.filter2D(), which convolves whatever kernel we define with the image.
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('images/BSE_Google_noisy.jpg', 1)
kernel = np.ones((5,5),np.float32)/25
filt_2D = cv2.filter2D(img,-1,kernel) #Convolution using the kernel we provide
blur = cv2.blur(img,(5,5)) #Convolution with a normalized filter. Same as above for this example.
blur_gaussian = cv2.GaussianBlur(img,(5,5),0) #Gaussian kernel is used.
median_blur = median = cv2.medianBlur(img,5) #Using kernel size 5. Better on edges compared to gaussian.
bilateral_blur = cv2.bilateralFilter(img,9,75,75) #Good for noise removal but retain edge sharpness.
cv2.imshow("Original", img)
cv2.imshow("2D filtered", filt_2D)
cv2.imshow("Blur", blur)
cv2.imshow("Gaussian Blur", blur_gaussian)
cv2.imshow("Median Blur", median_blur)
cv2.imshow("Bilateral", bilateral_blur)
cv2.waitKey(0)
cv2.destroyAllWindows()
#############################################################
#Edge detection:
import cv2
import numpy as np
img = cv2.imread("images/Neuron.jpg", 0)
edges = cv2.Canny(img,100,200) #Image, min and max values
cv2.imshow("Original Image", img)
cv2.imshow("Canny", edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
#########################################################
|
{"hexsha": "eb4880f354085848f6fa8167eef61df23d9cbf86", "size": 1483, "ext": "py", "lang": "Python", "max_stars_repo_path": "10_image_processing_in_openCV_intro1-preprocessing.py", "max_stars_repo_name": "Data-Laboratory/WorkExamples", "max_stars_repo_head_hexsha": "27e58207e664da7813673e6792c0c30c0a5bf74c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-15T22:27:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T22:27:27.000Z", "max_issues_repo_path": "10_image_processing_in_openCV_intro1-preprocessing.py", "max_issues_repo_name": "Data-Laboratory/WorkExamples", "max_issues_repo_head_hexsha": "27e58207e664da7813673e6792c0c30c0a5bf74c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "10_image_processing_in_openCV_intro1-preprocessing.py", "max_forks_repo_name": "Data-Laboratory/WorkExamples", "max_forks_repo_head_hexsha": "27e58207e664da7813673e6792c0c30c0a5bf74c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5531914894, "max_line_length": 105, "alphanum_fraction": 0.6783546864, "include": true, "reason": "import numpy", "num_tokens": 401}
|
# -*-coding:utf-8-*-
from facenet_pytorch import MTCNN, RNet
from PIL import Image, ImageDraw
import torch, mmcv, cv2, time, json, os
import numpy as np
from torch.nn.functional import interpolate
def test_mtcnn_img():
mtcnn = MTCNN(image_size=640, thresholds=[0.8, 0.8, 0.6], min_face_size=40)
img = Image.open('./test_1.jpg')
boxes, _ = mtcnn.detect(img)
draw = ImageDraw.Draw(img)
for box in boxes:
draw.rectangle(box.tolist(), outline=(255, 0, 0), width=6)
img.show()
def test_mtcnn_video():
device = torch.device('cuda')
mtcnn = MTCNN(image_size=320, thresholds=[0.8, 0.8, 0.6], min_face_size=100, device=device)
video = mmcv.VideoReader('video2.mp4')
mean_ms = []
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter('queen-mtcnn.avi', fourcc, 60.0, (1280, 720))
dat = dict(dir_name="queen", bboxes=[])
for frame in video:
T1 = time.time()
img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
boxes, _ = mtcnn.detect(img)
frame_draw = img.copy()
draw = ImageDraw.Draw(frame_draw)
if boxes is None:
dat['bboxes'].append([0, 0, 0, 0])
else:
for bb in boxes:
dat['bboxes'].append([int(bb[0]), int(bb[1]), int(bb[2] - bb[0]), int(bb[3] - bb[1])])
draw.rectangle(bb.tolist(), outline=(255, 0, 0), width=6)
frame_draw = cv2.cvtColor(np.asarray(frame_draw),cv2.COLOR_RGB2BGR)
T2 = time.time()
mean_ms.append(round((T2 - T1)*1000, 4))
# FPS = round(1000/np.mean(mean_ms), 4)
frame_draw = cv2.putText(frame_draw, str(np.round(np.mean(mean_ms), 4))+"ms", (10,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
# frame_draw = cv2.putText(frame_draw, str(FPS) + "FPS", (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
# out.write(frame_draw)
cv2.imshow("MTCNN", frame_draw)
k = cv2.waitKey(1)
if k ==27:
cv2.destroyAllWindows()
return
cv2.destroyAllWindows()
# out.release()
# file_out = 'results/queen/MTCNN.json'
# with open(file_out, 'w', encoding='utf-8') as fout:
# fout.write(json.dumps(dat))
# fout.write('\n')
class box:
def __init__(self):
self.x = 0
self.y = 0
self.w = 0
self.h = 0
def clear(self):
self.x = 0
self.y = 0
self.w = 0
self.h = 0
def scale(self, scale):
self.x = self.x/scale
self.y = self.y/scale
self.w = self.w/scale
self.h = self.h/scale
class optical_flow_tracking:
def __init__(self):
self.last_box = box()
self.points1 = None
self.points2 = None
self.pointsFB = None
self.tbb = box()
self.tracked = False
term_criteria = (cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 20, 0.03)
self.lk_params = dict(winSize=(4,4),maxLevel=5,criteria=term_criteria)
def get_last_box(self, cbox: box()):
self.last_box = cbox
def process_frame(self, last_gray,current_gray, bbnext, lastboxfound):
self.points1 = None
self.points2 = None
if lastboxfound:
self.track(last_gray,current_gray)
else:
self.tracked = False
if self.tracked:
bbnext = self.tbb
self.last_box = bbnext
else:
lastboxfound = False
return bbnext, lastboxfound
def track(self, last_gray, current_gray):
self.bbPoints()
if len(self.points1)<1:
self.tracked = False
return
self.tracked = self.trackf2f(last_gray, current_gray)
if self.tracked:
self.bbPredict()
def bbPoints(self):
max_pts = 10
margin_h = 0
margin_v = 0
step_x = int((self.last_box.w - 2*margin_h)/max_pts)
step_y = int((self.last_box.h - 2*margin_v)/max_pts)
points = []
for y in range(self.last_box.y+margin_v, self.last_box.y+self.last_box.h-margin_v, step_y):
for x in range(self.last_box.x+margin_h, self.last_box.x+self.last_box.w-margin_h, step_x):
points.append((float(x), float(y)))
self.points1 = np.expand_dims(np.array(points).astype(np.float32),1)
def trackf2f(self, last_gray, current_gray):
self.points2, status, similarity = cv2.calcOpticalFlowPyrLK(last_gray, current_gray,
self.points1, None,
**self.lk_params)
self.pointsFB, FBstatus, FBerror = cv2.calcOpticalFlowPyrLK(current_gray, last_gray,
self.points2, None,
**self.lk_params)
for idx in range(self.points1.shape[0]):
real = self.pointsFB[idx][0][0] - self.points1[idx][0][0]
imag = self.pointsFB[idx][0][1] - self.points1[idx][0][1]
FBerror[idx] = pow(real, 2) + pow(imag, 2)
status, similarity = self.normCrossCorrelation(last_gray, current_gray, status, similarity)
return self.filterPts(status, similarity, FBerror)
def normCrossCorrelation(self, last_gray, current_gray, status, similarity):
for idx in range(self.points1.shape[0]):
if status[idx]==1:
rec0 = cv2.getRectSubPix(last_gray, (10,10), (int(self.points1[idx][0][0]), int(self.points1[idx][0][1])))
rec1 = cv2.getRectSubPix(current_gray, (10,10), (int(self.points2[idx][0][0]), int(self.points2[idx][0][1])))
res = cv2.matchTemplate(rec0, rec1, cv2.TM_CCOEFF_NORMED)
similarity[idx] = float(res[0])
else:
similarity[idx] = float(0)
return status, similarity
def filterPts(self, status, similarity, FBerror):
simmed = np.median(similarity)
k = 0
for i in range(self.points2.shape[0]):
if not status[i]: continue
if similarity[i] > simmed:
self.points1[k] = self.points1[i]
self.points2[k] = self.points2[i]
FBerror[k] = FBerror[i]
k+=1
if k == 0: return False
self.points1 = self.points1[:k]
self.points2 = self.points2[:k]
FBerror = FBerror[:k]
fbmed = np.median(FBerror)
k = 0
for i in range(self.points2.shape[0]):
if not status[i]: continue
if FBerror[i] <= fbmed:
self.points1[k] = self.points1[i]
self.points2[k] = self.points2[i]
k+=1
self.points1 = self.points1[:k]
self.points2 = self.points2[:k]
if k > 0: return True
else: return False
def bbPredict(self):
npoints = self.points1.shape[0]
xoff = np.zeros(npoints)
yoff = np.zeros(npoints)
for i in range(npoints):
xoff[i] = self.points2[i][0][0] - self.points1[i][0][0]
yoff[i] = self.points2[i][0][1] - self.points1[i][0][1]
dx = np.median(xoff)
dy = np.median(yoff)
if npoints>1:
d = []
for i in range(npoints):
for j in range(i+1, npoints):
d.append((pow(self.points2[i][0][0] - self.points2[j][0][0], 2) + pow(self.points2[i][0][1] - self.points2[j][0][1], 2))/
(pow(self.points1[i][0][0] - self.points1[j][0][0], 2) + pow(self.points1[i][0][1] - self.points1[j][0][1], 2)))
s = np.median(d)
else:
s = 1.0
s1 = 0.5 * (s - 1) * self.last_box.w
s2 = 0.5 * (s - 1) * self.last_box.h
self.tbb.x = max(int(np.round(self.last_box.x + dx -s1)), 0)
self.tbb.y = max(int(np.round(self.last_box.y + dy - s2)), 0)
self.tbb.w = int(np.round(self.last_box.w * s))
self.tbb.h = int(np.round(self.last_box.h * s))
class cof:
def __init__(self, device, image_size=640):
# device = torch.device('cuda:0')
self.image_size=image_size
self.mtcnn = MTCNN(image_size=image_size, thresholds=[0.8, 0.8, 0.6], min_face_size=40, device=device)
self.rnet = RNet()
self.final_box = []
self.pbox = box()
self.cbox = box()
self.status = True
self.tracking = optical_flow_tracking()
self.skip = 5
self.last_gray = None
self.result = box()
self.t0_list = []
self.t1_list = []
self.t2_list = []
def detect(self, img):
if len(self.final_box)==0 or self.status==False:
t0 = time.time()
self.result.clear()
with torch.no_grad():
self.final_box, _ = self.mtcnn.detect(img)
self.t0_list.append(np.round((time.time()- t0)*1000, 4))
# print(f'>>>> mtcnn time: {np.round(np.mean(self.t0_list), 4)} ms')
if self.final_box is None:
self.final_box = []
if len(self.final_box)>0:
self.cbox.x = int(self.final_box[0][0])
self.cbox.y = int(self.final_box[0][1])
self.cbox.w = int(self.final_box[0][2] - self.final_box[0][0])
self.cbox.h = int(self.final_box[0][3] - self.final_box[0][1])
self.last_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
self.tracking.get_last_box(self.cbox)
self.result = self.cbox
self.status = True
if len(self.final_box) > 0:
current_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
t1 = time.time()
self.pbox, self.status = self.tracking.process_frame(self.last_gray,current_gray,self.pbox, self.status)
self.t1_list.append(np.round((time.time() - t1) * 1000, 4))
# print(f'tracking time: {np.round(np.mean(self.t1_list), 4)} ms')
if self.status:
if self.skip>2:
t2 = time.time()
face = img[self.pbox.y:self.pbox.y+self.pbox.h, self.pbox.x:self.pbox.x+self.pbox.w]
face = Image.fromarray(cv2.cvtColor(face, cv2.COLOR_BGR2RGB))
face = np.uint8(face)
rnet_in = torch.tensor(face).unsqueeze(0).permute(0, 3, 1, 2).float()
rnet_in = interpolate(rnet_in, (24, 24))
rnet_in = (rnet_in - 127.5) * 0.0078125
with torch.no_grad():
out = self.rnet(rnet_in)
out1 = out[1].permute(1, 0)
score = out1[1, :]
if score<0.90:
self.final_box = []
self.t2_list.append(np.round((time.time() - t2) * 1000, 4))
# print(f'rnet time: {np.round(np.mean(self.t2_list), 4)} ms')
self.skip=0
self.result = self.pbox
self.last_gray = current_gray
self.skip+=1
else:
self.result = self.cbox
def cof_test():
device = torch.device('cpu')
cof_obj = cof(device)
video = mmcv.VideoReader('data/static_human/2.mp4')
mean_ms = []
# dat = dict(dir_name="queen", bboxes=[])
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter('queen-cof.avi', fourcc, 60.0, (1280, 720))
for frame in video:
t = time.time()
frame = cv2.resize(frame, (640, 480))
cof_obj.detect(frame)
# dat['bboxes'].append([cof_obj.result.x, cof_obj.result.y, cof_obj.result.w, cof_obj.result.h])
mean_ms.append(np.round((time.time() - t) * 1000, 4))
print(f'total time: {np.round(np.mean(mean_ms), 4)} ms')
# print('-'*30)
cv2.rectangle(frame,
(cof_obj.result.x, cof_obj.result.y),
(cof_obj.result.x+cof_obj.result.w, cof_obj.result.y+cof_obj.result.h),
(255,0,0), 2)
cv2.putText(frame, str(np.round(np.mean(mean_ms), 4)) + "ms", (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
# out.write(frame)
cv2.imshow("COF", frame)
k = cv2.waitKey(1)
if k == 27:
cv2.destroyAllWindows()
return
cv2.destroyAllWindows()
# out.release()
# file_out = 'results/queen/COF.json'
# with open(file_out, 'w', encoding='utf-8') as fout:
# fout.write(json.dumps(dat))
# fout.write('\n')
def save_cof_result_video(root_path):
device = torch.device('cuda')
file_list = os.listdir(root_path)
for file in file_list:
file_out = f'results/{root_path.split("/")[1]}/COF_{file.split(".")[0]}.json'
cof_obj = cof(device)
video = mmcv.VideoReader(root_path+file)
dat = dict(dir_name=root_path+file, bboxes=[])
with open(file_out, 'w', encoding='utf-8') as fout:
for img in video:
cof_obj.detect(img)
dat['bboxes'].append([cof_obj.result.x, cof_obj.result.y, cof_obj.result.w, cof_obj.result.h])
# cv2.rectangle(img,
# (cof_obj.result.x, cof_obj.result.y),
# (cof_obj.result.x + cof_obj.result.w, cof_obj.result.y + cof_obj.result.h),
# (255, 0, 0), 2)
# cv2.imshow(root_path+file, img)
# k = cv2.waitKey(1)
# if k == 27:
# cv2.destroyAllWindows()
# return
print(len(dat['bboxes']))
cv2.destroyAllWindows()
fout.write(json.dumps(dat))
fout.write('\n')
del cof_obj
def save_mtcnn_result_video(root_path):
device = torch.device('cuda')
file_list = os.listdir(root_path)
for file in file_list:
file_out = f'results/{root_path.split("/")[1]}/MTCNN_{file.split(".")[0]}.json'
video = mmcv.VideoReader(root_path + file)
mtcnn = MTCNN(image_size=1080, thresholds=[0.9, 0.9, 0.7], min_face_size=40, device=device)
dat = dict(dir_name=root_path + file, bboxes=[])
with open(file_out, 'w', encoding='utf-8') as fout:
for img in video:
boxes, _ = mtcnn.detect(img)
if boxes is None:
dat['bboxes'].append([0,0,0,0])
else:
bb = boxes[0]
dat['bboxes'].append([int(bb[0]), int(bb[1]), int(bb[2]-bb[0]), int(bb[3]-bb[1])])
# cv2.rectangle(img,
# (int(bb[0]), int(bb[1])),
# (int(bb[2]), int(bb[3])),
# (255, 0, 0), 2)
# cv2.imshow(file, img)
# k = cv2.waitKey(30)
# if k == 27:
# cv2.destroyAllWindows()
# return
print(len(dat['bboxes']))
cv2.destroyAllWindows()
fout.write(json.dumps(dat))
fout.write('\n')
del mtcnn
if __name__=='__main__':
cof_test()
# test_mtchh_video()
|
{"hexsha": "07031bd156241c033e55cbecd8c480df5ceb9403", "size": 15323, "ext": "py", "lang": "Python", "max_stars_repo_path": "cof_main.py", "max_stars_repo_name": "HandsomeHans/Face-Tracking-Using-Optical-Flow-and-CNN-Pytorch", "max_stars_repo_head_hexsha": "b12cb26cd4d038d9763a9be0910154be2ec91d9a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-06-07T11:13:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T11:05:08.000Z", "max_issues_repo_path": "cof_main.py", "max_issues_repo_name": "HandsomeHans/Face-Tracking-Using-Optical-Flow-and-CNN-Pytorch", "max_issues_repo_head_hexsha": "b12cb26cd4d038d9763a9be0910154be2ec91d9a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cof_main.py", "max_forks_repo_name": "HandsomeHans/Face-Tracking-Using-Optical-Flow-and-CNN-Pytorch", "max_forks_repo_head_hexsha": "b12cb26cd4d038d9763a9be0910154be2ec91d9a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-08T01:33:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-08T01:33:38.000Z", "avg_line_length": 41.9808219178, "max_line_length": 141, "alphanum_fraction": 0.5289434184, "include": true, "reason": "import numpy", "num_tokens": 4126}
|
Endpoint("/examples") do request::HTTP.Request
readstring(joinpath(dirname(@__FILE__),"examples.html"))
end
Endpoint("/examples/pages") do request::HTTP.Request
readstring(joinpath(dirname(@__FILE__),"pages.html"))
end
include("plotly.jl")
include("requests.jl")
# include("mwe.jl")
function examples()
@async Pages.start()
sleep(2.0)
Pages.launch("http://localhost:$(Pages.port)/examples")
end
|
{"hexsha": "ecdb8aa2dd6867953e901913d064d69689ae69af", "size": 408, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/examples.jl", "max_stars_repo_name": "minggu24/Pages.jl", "max_stars_repo_head_hexsha": "6b187312a3bc3b19108a500032fe5a0ecda613a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/examples.jl", "max_issues_repo_name": "minggu24/Pages.jl", "max_issues_repo_head_hexsha": "6b187312a3bc3b19108a500032fe5a0ecda613a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/examples.jl", "max_forks_repo_name": "minggu24/Pages.jl", "max_forks_repo_head_hexsha": "6b187312a3bc3b19108a500032fe5a0ecda613a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.4, "max_line_length": 60, "alphanum_fraction": 0.7254901961, "num_tokens": 103}
|
from __future__ import annotations
from typing import Callable
import numpy as np
from numpy.typing import ArrayLike
from ._helpers import (
Info,
LinearOperator,
asrlinearoperator,
clip_imag,
get_default_inner,
wrap_inner,
)
def cgls(
A: LinearOperator,
b: ArrayLike,
inner: Callable[[np.ndarray, np.ndarray], np.ndarray] | None = None,
x0: ArrayLike | None = None,
tol: float = 1e-5,
atol: float = 1.0e-15,
maxiter: int | None = None,
callback: Callable[[int, np.ndarray, np.ndarray], None] | None = None,
tol_inner_real: float = 1.0e-15,
):
"""Basically CG, but the residual is taken from the normal equation
s = A^H r = A^H b - A^H A x
"""
def _norm(y):
return np.sqrt(clip_imag(_inner(y, y), tol_inner_real))
A = asrlinearoperator(A)
b = np.asarray(b)
assert len(A.shape) == 2
assert A.shape[0] == b.shape[0]
N = A.shape[0]
_inner = get_default_inner(b.shape) if inner is None else wrap_inner(inner)
maxiter = N if maxiter is None else maxiter
# get initial residual
if x0 is None:
x_shape = (A.shape[1], *b.shape[1:])
x = np.zeros(x_shape, dtype=b.dtype)
r = np.copy(b)
else:
x = np.copy(x0)
r = b - A @ x
if callback is not None:
callback(0, x, r)
p = A.rmatvec(r)
rhos = [None, clip_imag(_inner(p, p), tol_inner_real)]
nresnorms = [np.sqrt(rhos[-1])]
# iterate
k = 0
success = False
criterion = np.maximum(tol * nresnorms[0], atol)
while True:
if np.all(nresnorms[-1] <= criterion):
# oh really?
r = b - A @ x
nresnorms[-1] = _norm(A.rmatvec(r))
if np.all(nresnorms[-1] <= criterion):
success = True
break
if k == maxiter:
break
Ap = A @ p
alpha = rhos[-1] / clip_imag(_inner(Ap, Ap), tol_inner_real)
# update solution and residual
x += alpha * p
r -= alpha * Ap
s = A.rmatvec(r)
rhos[0] = rhos[-1]
rhos[-1] = clip_imag(_inner(s, s), tol_inner_real)
beta = rhos[-1] / np.where(rhos[-2] != 0, rhos[-2], 1.0)
p = s + beta * p
if callback is not None:
callback(k + 1, x, r)
nresnorms.append(np.sqrt(rhos[-1]))
k += 1
return x if success else None, Info(success, x, k, nresnorms=np.array(nresnorms))
|
{"hexsha": "51c15c1d35b49a86250534db00334e475c3436f8", "size": 2466, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/krylov/cgls.py", "max_stars_repo_name": "nschloe/krylov", "max_stars_repo_head_hexsha": "58813233ff732111aa56f7b1d71908fda78080be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2020-06-17T15:51:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T04:33:11.000Z", "max_issues_repo_path": "src/krylov/cgls.py", "max_issues_repo_name": "nschloe/krylov", "max_issues_repo_head_hexsha": "58813233ff732111aa56f7b1d71908fda78080be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2020-08-27T17:38:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-11T20:00:07.000Z", "max_forks_repo_path": "src/krylov/cgls.py", "max_forks_repo_name": "nschloe/krylov", "max_forks_repo_head_hexsha": "58813233ff732111aa56f7b1d71908fda78080be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-05-20T19:47:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-03T00:20:33.000Z", "avg_line_length": 23.7115384615, "max_line_length": 85, "alphanum_fraction": 0.5563665856, "include": true, "reason": "import numpy,from numpy", "num_tokens": 743}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 10:05:47 2020
@author: heiko
"""
import numpy as np
from pyrsa.util.inference_util import pool_rdm
from pyrsa.rdm import compare
from .crossvalsets import sets_leave_one_out_rdm
def cv_noise_ceiling(rdms, ceil_set, test_set, method='cosine',
pattern_descriptor='index'):
""" calculates the noise ceiling for crossvalidation.
The upper bound is calculated by pooling all rdms for the appropriate
patterns in the testsets.
the lower bound is calculated by using only the appropriate rdms
from ceil_set for training.
Args:
rdms(pyrsa.rdm.RDMs): complete data
ceil_set(list): a list of the training RDMs with 2-tuple entries:
(RDMs, pattern_sample)
test_set(list): a list of the test RDMs with 2-tuple entries:
(RDMs, pattern_sample)
method(string): comparison method to use
pattern_descriptor(string): descriptor to group patterns
Returns:
list: lower nc-bound, upper nc-bound
"""
assert len(ceil_set) == len(test_set), \
'train_set and test_set must have the same length'
noise_min = []
noise_max = []
for i in range(len(ceil_set)):
train = ceil_set[i]
test = test_set[i]
pred_train = pool_rdm(train[0], method=method)
pred_train = pred_train.subsample_pattern(by=pattern_descriptor,
value=test[1])
pred_test = pool_rdm(rdms, method=method)
pred_test = pred_test.subsample_pattern(by=pattern_descriptor,
value=test[1])
noise_min.append(np.mean(compare(pred_train, test[0], method)))
noise_max.append(np.mean(compare(pred_test, test[0], method)))
noise_min = np.mean(np.array(noise_min))
noise_max = np.mean(np.array(noise_max))
return noise_min, noise_max
def boot_noise_ceiling(rdms, method='cosine', rdm_descriptor='index'):
""" calculates a noise ceiling by leave one out & full set
Args:
rdms(pyrsa.rdm.RDMs): data to calculate noise ceiling
method(string): comparison method to use
rdm_descriptor(string): descriptor to group rdms
Returns:
list: [lower nc-bound, upper nc-bound]
"""
_, test_set, ceil_set = sets_leave_one_out_rdm(rdms, rdm_descriptor)
pred_test = pool_rdm(rdms, method=method)
noise_min = []
noise_max = []
for i in range(len(ceil_set)):
train = ceil_set[i]
test = test_set[i]
pred_train = pool_rdm(train[0], method=method)
noise_min.append(np.mean(compare(pred_train, test[0], method)))
noise_max.append(np.mean(compare(pred_test, test[0], method)))
noise_min = np.mean(np.array(noise_min))
noise_max = np.mean(np.array(noise_max))
return noise_min, noise_max
|
{"hexsha": "5db153ccefc17827aeb7cd9c98bf026c9b698e8d", "size": 2909, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyrsa/inference/noise_ceiling.py", "max_stars_repo_name": "Brandon-YuHu/pyrsa", "max_stars_repo_head_hexsha": "074213cc22e79f702ebbb4f154235f8df8c111cc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyrsa/inference/noise_ceiling.py", "max_issues_repo_name": "Brandon-YuHu/pyrsa", "max_issues_repo_head_hexsha": "074213cc22e79f702ebbb4f154235f8df8c111cc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyrsa/inference/noise_ceiling.py", "max_forks_repo_name": "Brandon-YuHu/pyrsa", "max_forks_repo_head_hexsha": "074213cc22e79f702ebbb4f154235f8df8c111cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9135802469, "max_line_length": 73, "alphanum_fraction": 0.6517703678, "include": true, "reason": "import numpy", "num_tokens": 696}
|
// Copyright 2020 The "Oko" project authors. All rights reserved.
// Use of this source code is governed by a MIT license that can be
// found in the LICENSE file.
#include "viewer/ui/log_files_window.h"
#include <algorithm>
#include <array>
#include <boost/algorithm/string/replace.hpp>
#include <boost/format.hpp>
#include <charconv>
#include <future>
#include <memory>
#include <unordered_set>
#include <utility>
#include "viewer/ui/color_manager.h"
#include "viewer/ui/message_window.h"
#include "viewer/ui/progress_window.h"
namespace oko {
LogFilesWindow::LogFilesWindow(
LogFilesProvider* files_provider,
int start_row,
int start_col,
int num_rows,
int num_columns)
: Window(start_row, start_col, num_rows, num_columns),
files_provider_(files_provider) {
std::future<outcome::std_result<std::vector<LogFileInfo>>> get_file_infos_async =
std::async(
std::launch::async,
[files_provider]() {
return files_provider->GetLogFileInfos();
});
{
ProgressWindow progress_window(
"Retrieving file list...",
[&get_file_infos_async] {
return get_file_infos_async.wait_for(
std::chrono::seconds(0)) == std::future_status::ready;
});
progress_window.PostSync();
Display();
}
auto maybe_file_infos = get_file_infos_async.get();
if (!maybe_file_infos) {
MessageWindow::PostSync(boost::str(boost::format(
"Failed retrieve file list. %1%.") %
maybe_file_infos.error().message()));
finished_ = true;
return;
}
file_infos_.reserve(maybe_file_infos.value().size());
file_infos_.insert(
file_infos_.begin(),
maybe_file_infos.value().begin(),
maybe_file_infos.value().end());
if (file_infos_.empty()) {
MessageWindow::PostSync("Don't found any log files under specified path");
finished_ = true;
return;
}
std::sort(
file_infos_.begin(),
file_infos_.end(),
[](const LogFileInfo& first, const LogFileInfo& second) {
return first.name < second.name;
});
ColorManager& cm = ColorManager::instance();
selected_color_pair_ = cm.RegisterColorPair(COLOR_BLACK, COLOR_WHITE);
selected_marked_color_pair_ = cm.RegisterColorPair(COLOR_WHITE, COLOR_RED);
marked_color_pair_ = cm.RegisterColorPair(COLOR_YELLOW, COLOR_RED);
}
void LogFilesWindow::HandleKeyPress(int key) noexcept {
switch (key) {
case 'j':
case KEY_DOWN:
if (selected_item_ + 1 < file_infos_.size()) {
SetSelectedItem(selected_item_ + 1);
}
break;
case 'k':
case KEY_UP:
if (selected_item_ > 0) {
SetSelectedItem(selected_item_ - 1);
}
break;
case 'm':
case KEY_F(10):
if (!file_infos_.empty()) {
file_infos_[selected_item_].is_marked =
!file_infos_[selected_item_].is_marked;
if (selected_item_ + 1 < file_infos_.size()) {
SetSelectedItem(selected_item_ + 1);
}
}
break;
case KEY_ENTER:
case '\n':
Finish();
break;
}
}
void LogFilesWindow::DisplayImpl() noexcept {
DisplayTitle();
if (num_rows_ == 1) {
return;
}
const size_t limit = std::min(
file_infos_.size(),
first_shown_item_ + num_rows_ - 1);
int original_bkgd = getbkgd(window_.get());
for (size_t i = first_shown_item_, row = 1; i < limit; ++i, ++row) {
bool bg_changed = true;
if (i == selected_item_ && file_infos_[i].is_marked) {
wbkgdset(window_.get(), COLOR_PAIR(selected_marked_color_pair_));
} else if (i == selected_item_) {
wbkgdset(window_.get(), COLOR_PAIR(selected_color_pair_));
} else if (file_infos_[i].is_marked) {
wbkgdset(window_.get(), COLOR_PAIR(marked_color_pair_));
} else {
bg_changed = false;
}
DisplayItem(row, file_infos_[i]);
if (bg_changed) {
wbkgdset(window_.get(), original_bkgd);
}
}
wclrtobot(window_.get());
}
void LogFilesWindow::DisplayTitle() noexcept {
mvwhline(window_.get(), 0, 0, 0, num_columns_);
mvwaddstr(window_.get(), 0, 1, "Log file name");
const std::string_view kFileSize{"File size"};
if (num_columns_ > kFileSize.size() + 1) {
mvwaddstr(
window_.get(),
0,
num_columns_ - kFileSize.size() - 1,
kFileSize.data());
}
}
void LogFilesWindow::DisplayItem(int row, const LogFileInfo& info) noexcept {
mvwaddstr(window_.get(), row, 0, info.name.c_str());
wclrtoeol(window_.get());
std::array<char, 22> buf;
buf[0] = ' ';
auto res = std::to_chars(
buf.data() + 1,
buf.data() + buf.size(),
info.size);
if (res.ec != std::errc()) {
assert(false);
return;
}
*res.ptr = 0;
mvwaddstr(window_.get(),
row, num_columns_ - std::strlen(buf.data()), buf.data());
}
void LogFilesWindow::SetSelectedItem(size_t new_item) noexcept {
selected_item_ = new_item;
const int list_height = num_rows_ - 1;
if (selected_item_ >= (first_shown_item_ + list_height)) {
first_shown_item_ = std::max<int>(0, selected_item_ - list_height + 1);
}
if (selected_item_ < first_shown_item_) {
first_shown_item_ = selected_item_;
}
}
void LogFilesWindow::Finish() noexcept {
std::vector<outcome::std_result<std::unique_ptr<LogFile>>> maybe_files;
maybe_files.reserve(file_infos_.size());
std::future<void> fetch_async =
std::async(
std::launch::async,
[this, &maybe_files] {
for (size_t i = 0; i < file_infos_.size(); ++i) {
if (i == selected_item_ || file_infos_[i].is_marked) {
auto fetch_result = files_provider_->FetchLog(
file_infos_[i].name);
maybe_files.emplace_back(std::move(fetch_result));
}
}
});
{
ProgressWindow progress_window(
"Fetching files...",
[&fetch_async] {
return fetch_async.wait_for(
std::chrono::seconds(0)) == std::future_status::ready;
});
progress_window.PostSync();
Display();
}
fetch_async.get();
std::unordered_set<std::error_code> errors;
fetched_files_.clear();
if (!maybe_files.empty()) {
for (auto& maybe_file : maybe_files) {
if (!maybe_file) {
errors.emplace(std::move(maybe_file.error()));
} else {
fetched_files_.emplace_back(std::move(maybe_file.value()));
}
}
}
if (!errors.empty()) {
std::stringstream message_buf;
message_buf << "Some files failed to fetch. ";
for (const auto& error : errors) {
message_buf << error.message() << ". ";
}
MessageWindow::PostSync(message_buf.str());
}
finished_ = true;
}
void LogFilesWindow::SearchForFilesBySubstring(std::string str) noexcept {
if (file_infos_.empty() || str.empty()) {
return;
}
string_to_search_ = std::move(str);
auto it = std::find_if(
file_infos_.begin() + selected_item_,
file_infos_.end(),
[&s = string_to_search_.value()](const LogFileInfo& r) {
return r.name.find(s) != std::string::npos;
});
if (it != file_infos_.end()) {
SetSelectedItem(it - file_infos_.begin());
}
}
void LogFilesWindow::SearchNextEntry() noexcept {
if (file_infos_.empty() ||
!string_to_search_ ||
selected_item_ + 1 >= file_infos_.size()) {
return;
}
auto it = std::find_if(
file_infos_.begin() + selected_item_ + 1,
file_infos_.end(),
[&s = string_to_search_.value()](const LogFileInfo& r) {
return r.name.find(s) != std::string::npos;
});
if (it != file_infos_.end()) {
SetSelectedItem(it - file_infos_.begin());
}
}
void LogFilesWindow::SearchPrevEntry() noexcept {
if (file_infos_.empty() ||
!string_to_search_ ||
selected_item_ == 0) {
return;
}
auto it = std::find_if(
std::make_reverse_iterator(file_infos_.begin() + selected_item_),
file_infos_.rend(),
[&s = string_to_search_.value()](const LogFileInfo& r) {
return r.name.find(s) != std::string::npos;
});
if (it != file_infos_.rend()) {
SetSelectedItem(it.base() - file_infos_.begin() - 1);
}
}
} // namespace oko
|
{"hexsha": "1a551a7403ecdde811c2b54aec2c774586436a62", "size": 8165, "ext": "cc", "lang": "C++", "max_stars_repo_path": "viewer/ui/log_files_window.cc", "max_stars_repo_name": "vchigrin/oko", "max_stars_repo_head_hexsha": "2167ae07f450b623d23b9b5a07ff5bac49347e09", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-06-01T16:39:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-01T16:39:19.000Z", "max_issues_repo_path": "viewer/ui/log_files_window.cc", "max_issues_repo_name": "vchigrin/oko", "max_issues_repo_head_hexsha": "2167ae07f450b623d23b9b5a07ff5bac49347e09", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "viewer/ui/log_files_window.cc", "max_forks_repo_name": "vchigrin/oko", "max_forks_repo_head_hexsha": "2167ae07f450b623d23b9b5a07ff5bac49347e09", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3705035971, "max_line_length": 83, "alphanum_fraction": 0.6306184936, "num_tokens": 2120}
|
#define CATCH_CONFIG_MAIN
#include <catch2/catch.hpp>
#include <mitama/result/result.hpp>
#include <mitama/maybe/maybe.hpp>
#include <boost/xpressive/xpressive.hpp>
#include <string>
using namespace mitama;
using namespace std::string_literals;
TEST_CASE("is_just()", "[maybe][is_just]"){
maybe<int> x = just(2);
REQUIRE( x.is_just() );
maybe<int> y = nothing<>;
REQUIRE_FALSE( y.is_just() );
}
TEST_CASE("is_nothing()", "[maybe][is_nothing]"){
maybe<int> x = just(2);
REQUIRE_FALSE( x.is_nothing() );
maybe<int> y = nothing<>;
REQUIRE( y.is_nothing() );
}
TEST_CASE("unwrap()", "[maybe][unwrap]"){
{
auto x = just("air"s);
REQUIRE(x.unwrap() == "air"s);
}
try {
maybe<int> x = nothing<>;
x.unwrap(); // panics
}
catch (runtime_panic const &p)
{
using namespace boost::xpressive;
sregex re =
as_xpr(
"runtime panicked at 'called `maybe::unwrap()` on a `nothing` value', ") >>
*_ >> as_xpr(":") >> +range('0', '9');
smatch what;
REQUIRE(regex_match(std::string{p.what()}, what, re));
}
}
TEST_CASE("expect()", "[maybe][expect]"){
{
auto x = just("air"s);
REQUIRE(x.expect("the world is ending") == "air"s);
}
try {
maybe<int> x = nothing<>;
x.expect("the world is ending"); // panics
}
catch (runtime_panic const &p)
{
using namespace boost::xpressive;
sregex re =
as_xpr(
"runtime panicked at 'the world is ending`, ") >>
*_ >> as_xpr(":") >> +range('0', '9');
smatch what;
REQUIRE(regex_match(std::string{p.what()}, what, re));
}
}
TEST_CASE("unwrap_or()", "[maybe][unwrap_or]"){
REQUIRE(just("car"s).unwrap_or("bike"s) == "car"s);
REQUIRE(nothing<std::string>.unwrap_or("bike"s) == "bike"s);
}
TEST_CASE("unwrap_or_else()", "[maybe][unwrap_or_else]"){
int k = 10;
REQUIRE(just(4).unwrap_or_else([k]{ return 2 * k; }) == 4);
REQUIRE(nothing<int>.unwrap_or_else([k]{ return 2 * k; }) == 20);
}
TEST_CASE("map()", "[maybe][map]"){
auto maybe_some_string = just("Hello, World!"s);
// `maybe::map` takes self *by ref*,
// *not* consuming `maybe_some_string`
auto maybe_some_len = maybe_some_string.map(&std::string::size);
REQUIRE(maybe_some_len == just(13u));
}
TEST_CASE("map_or()", "[maybe][map_or]"){
auto x = just("foo"s);
REQUIRE(x.map_or(42, &std::string::size) == 3);
maybe<std::string> y = nothing<>;
REQUIRE(y.map_or(42, &std::string::size) == 42);
}
TEST_CASE("map_or_else()", "[maybe][map_or_else]"){
int k = 21;
auto x = just("foo"s);
REQUIRE(x.map_or_else([k]{ return 2 * k; }, &std::string::size) == 3);
maybe<std::string> y = nothing<>;
REQUIRE(y.map_or_else([k]{ return 2 * k; }, &std::string::size) == 42);
}
TEST_CASE("ok_or()", "[maybe][ok_or]"){
auto x = just("foo"s);
REQUIRE(x.ok_or(0) == success("foo"s));
maybe<std::string> y = nothing<>;
REQUIRE(y.ok_or(0) == failure(0));
REQUIRE(y.ok_or() == failure<>());
}
TEST_CASE("ok_or_else()", "[maybe][ok_or_else]"){
auto x = just("foo"s);
REQUIRE(x.ok_or_else([]{ return 0; }) == success("foo"s));
maybe<std::string> y = nothing<>;
REQUIRE(y.ok_or_else([]{ return 0; }) == failure(0));
}
TEST_CASE("and_then()", "[maybe][and_then]"){
auto sq = [](int x) -> maybe<int> { return just(x * x); };
auto nope = [](...) -> maybe<int> { return nothing<>; };
REQUIRE(just(2).and_then(sq).and_then(sq) == just(16));
REQUIRE(just(2).and_then(sq).and_then(nope) == nothing<>);
REQUIRE(just(2).and_then(nope).and_then(sq) == nothing<>);
REQUIRE(nope().and_then(sq).and_then(sq) == nothing<>);
}
TEST_CASE("filter()", "[maybe][filter]"){
auto is_even = [](int n) -> bool {
return n % 2 == 0;
};
REQUIRE(maybe<int>{}.filter(is_even) == nothing<>);
REQUIRE(just(3).filter(is_even) == nothing<>);
REQUIRE(just(4).filter(is_even) == just(4));
}
TEST_CASE("or_else()", "[maybe][or_else]"){
auto nobody = []() -> maybe<std::string> { return nothing<>; };
auto vikings = []() -> maybe<std::string> { return just("vikings"s); };
REQUIRE(just("barbarians"s).or_else(vikings) == just("barbarians"s));
REQUIRE(maybe<std::string>{}.or_else(vikings) == just("vikings"s));
REQUIRE(maybe<std::string>{}.or_else(nobody) == nothing<>);
}
TEST_CASE("get_or_insert()", "[maybe][get_or_insert]"){
GIVEN("a nothing type of maybe<int>.") {
maybe<int> x = nothing<>;
WHEN("call get_or_insert with value `5` and bind to `y`."){
auto& y = x.get_or_insert(5);
REQUIRE(y == 5);
WHEN("assign 7 to `y`") {
y = 7;
THEN("changed x to just(7)"){
REQUIRE(x == just(7));
}
}
}
}
}
TEST_CASE("get_or_insert_with()", "[maybe][get_or_insert_with]"){
GIVEN("a nothing type of maybe<int>.") {
maybe<int> x = nothing<>;
WHEN("call get_or_insert_with and bind to `y`."){
auto& y = x.get_or_insert_with([]{ return 5; });
REQUIRE(y == 5);
WHEN("assign 7 to `y`") {
y = 7;
THEN("changed x to just(7)"){
REQUIRE(x == just(7));
}
}
}
}
}
TEST_CASE("replace()", "[maybe][replace]"){
{
auto x = just(2);
auto old = x.replace(5);
REQUIRE(x == just(5));
REQUIRE(old == just(2));
}
{
maybe<int> x = nothing<>;
auto old = x.replace(3);
REQUIRE(x == just(3));
REQUIRE(old == nothing<>);
}
}
TEST_CASE("transpose()", "[maybe][transpose]"){
result<maybe<int>, std::string> x = success(just(5));
maybe<result<int, std::string>> y = just(mitama::in_place(success(5)));
REQUIRE(x == y.transpose());
}
TEST_CASE("unwrap_or_default()", "[maybe][unwrap_or_default]"){
maybe<std::string> x = nothing<>;
REQUIRE(x.unwrap_or_default() == ""s);
}
TEST_CASE("flatten()", "[maybe][flatten]"){
auto x = just(just(6));
REQUIRE(just(6) == x.flatten());
maybe<maybe<int>> y = just(nothing<int>);
REQUIRE(nothing<> == y.flatten());
maybe<maybe<int>> z = nothing<>;
REQUIRE(nothing<> == z.flatten());
// Flattening once only removes one level of nesting:
auto nest = just(just(just(6)));
REQUIRE(just(just(6)) == nest.flatten());
REQUIRE(just(6) == nest.flatten().flatten());
}
TEST_CASE("and_finally()", "[maybe][and_finally]"){
std::string hook = "default";
maybe<std::string> x = nothing<>;
x.and_finally([&hook](std::string v){
hook = v;
});
REQUIRE(hook == "default"s);
auto y = just("error"s);
y.and_finally([&hook](std::string v){
hook = v;
});
REQUIRE(hook == "error"s);
}
TEST_CASE("less compare", "[maybe][less]"){
maybe<int> just1 = just(1);
maybe<int> just2 = just(2);
maybe<int> none1 = nothing<>;
maybe<int> none2 = nothing<>;
REQUIRE(just1 < just2);
REQUIRE_FALSE(just2 < just1);
REQUIRE_FALSE(just1 < just1);
REQUIRE_FALSE(just2 < just2);
REQUIRE_FALSE(none1 < none2);
REQUIRE_FALSE(none2 < none1);
REQUIRE_FALSE(none1 < none1);
REQUIRE_FALSE(none2 < none2);
REQUIRE(none1 < just1);
REQUIRE(none1 < just2);
REQUIRE(none2 < just1);
REQUIRE(none2 < just2);
REQUIRE_FALSE(just1 < none1);
REQUIRE_FALSE(just1 < none2);
REQUIRE_FALSE(just2 < none1);
REQUIRE_FALSE(just2 < none2);
}
TEST_CASE("less_or_equal compare", "[maybe][less_or_equal]"){
maybe<int> just1 = just(1);
maybe<int> just2 = just(2);
maybe<int> none1 = nothing<>;
maybe<int> none2 = nothing<>;
REQUIRE(just1 <= just2);
REQUIRE_FALSE(just2 <= just1);
REQUIRE(just1 <= just1);
REQUIRE(just2 <= just2);
REQUIRE(none1 <= none2);
REQUIRE(none2 <= none1);
REQUIRE(none1 <= none1);
REQUIRE(none2 <= none2);
REQUIRE(none1 <= just1);
REQUIRE(none1 <= just2);
REQUIRE(none2 <= just1);
REQUIRE(none2 <= just2);
REQUIRE_FALSE(just1 <= none1);
REQUIRE_FALSE(just1 <= none2);
REQUIRE_FALSE(just2 <= none1);
REQUIRE_FALSE(just2 <= none2);
}
TEST_CASE("greater compare", "[maybe][greater]"){
maybe<int> just1 = just(1);
maybe<int> just2 = just(2);
maybe<int> none1 = nothing<>;
maybe<int> none2 = nothing<>;
REQUIRE_FALSE(just1 > just2);
REQUIRE(just2 > just1);
REQUIRE_FALSE(just1 > just1);
REQUIRE_FALSE(just2 > just2);
REQUIRE_FALSE(none1 > none2);
REQUIRE_FALSE(none2 > none1);
REQUIRE_FALSE(none1 > none1);
REQUIRE_FALSE(none2 > none2);
REQUIRE_FALSE(none1 > just1);
REQUIRE_FALSE(none1 > just2);
REQUIRE_FALSE(none2 > just1);
REQUIRE_FALSE(none2 > just2);
REQUIRE(just1 > none1);
REQUIRE(just1 > none2);
REQUIRE(just2 > none1);
REQUIRE(just2 > none2);
}
TEST_CASE("greater_or_equal compare", "[maybe][greater_or_equal]"){
maybe<int> just1 = just(1);
maybe<int> just2 = just(2);
maybe<int> none1 = nothing<>;
maybe<int> none2 = nothing<>;
REQUIRE_FALSE(just1 >= just2);
REQUIRE(just2 >= just1);
REQUIRE(just1 >= just1);
REQUIRE(just2 >= just2);
REQUIRE(none1 >= none2);
REQUIRE(none2 >= none1);
REQUIRE(none1 >= none1);
REQUIRE(none2 >= none2);
REQUIRE_FALSE(none1 >= just1);
REQUIRE_FALSE(none1 >= just2);
REQUIRE_FALSE(none2 >= just1);
REQUIRE_FALSE(none2 >= just2);
REQUIRE(just1 >= none1);
REQUIRE(just1 >= none2);
REQUIRE(just2 >= none1);
REQUIRE(just2 >= none2);
}
|
{"hexsha": "f7a1139d9d126cd5a0a6c6fd061cfddeffaafbf1", "size": 9163, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/Maybe_Test.cpp", "max_stars_repo_name": "agate-pris/mitama-cpp-result", "max_stars_repo_head_hexsha": "9d94f3c9b5722892496ee7c63833fe5f12392b89", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/Maybe_Test.cpp", "max_issues_repo_name": "agate-pris/mitama-cpp-result", "max_issues_repo_head_hexsha": "9d94f3c9b5722892496ee7c63833fe5f12392b89", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/Maybe_Test.cpp", "max_forks_repo_name": "agate-pris/mitama-cpp-result", "max_forks_repo_head_hexsha": "9d94f3c9b5722892496ee7c63833fe5f12392b89", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2407407407, "max_line_length": 87, "alphanum_fraction": 0.6072247081, "num_tokens": 2799}
|
%!TEX root = ..\..\dissertation.tex
\chapter{A Platform Framework}\label{chp:pltfFramework}
\section{Supporting Production Platform Development \& Documentation}
\section{Utilisation of Platforms through Derivative System}
|
{"hexsha": "ab9db132675ab82e24c5edfb0f6913793210f71b", "size": 224, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "mainmatter/researchResults/pltfFramework.tex", "max_stars_repo_name": "Firebrazer/DevelopingManufacturingSystemPlatforms", "max_stars_repo_head_hexsha": "7b8b71e6dfbe16da3298dce0e03b62e59d3d7ae8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mainmatter/researchResults/pltfFramework.tex", "max_issues_repo_name": "Firebrazer/DevelopingManufacturingSystemPlatforms", "max_issues_repo_head_hexsha": "7b8b71e6dfbe16da3298dce0e03b62e59d3d7ae8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mainmatter/researchResults/pltfFramework.tex", "max_forks_repo_name": "Firebrazer/DevelopingManufacturingSystemPlatforms", "max_forks_repo_head_hexsha": "7b8b71e6dfbe16da3298dce0e03b62e59d3d7ae8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3333333333, "max_line_length": 69, "alphanum_fraction": 0.7991071429, "num_tokens": 52}
|
#%%
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score
import numpy as np
import pprint as pprint
import math
import pandas as pd
from sklearn.metrics import roc_curve
from matplotlib import pyplot
#%%
def train_multinomNB(features, labels, classes, folds=5):
"""Train MultinomialNB() model on a set of features, given labels.
Args:
features (array): Array of feature counts. Array may be TF-IDF weighted.
labels (array): 1-D Array of true labels.
classes (list): List of possible classes. The first class is assumed to be the positive class.
Returns:
None: All output printed to console.
"""
kf = KFold(n_splits=folds, shuffle=True, random_state=999)
kf.get_n_splits(features)
f1scores = []
accuracies = []
confusions = [[0,0], [0,0]]
log_ratios = []
for train_indices, test_indices in kf.split(features):
train_x = features.iloc[train_indices]
train_y = labels.iloc[train_indices]
test_x = features.iloc[test_indices]
test_y = labels.iloc[test_indices]
naive_bayes_classifier = MultinomialNB()
naive_bayes_classifier.fit(train_x, train_y)
predictions = naive_bayes_classifier.predict(test_x)
lr_probs = naive_bayes_classifier.predict_proba(test_x)
lr_probs = lr_probs[:, 0]
confusions += confusion_matrix(test_y, predictions)
score = f1_score(test_y, predictions, pos_label=classes[0])
f1scores.append(score)
accuracy = accuracy_score(test_y, predictions)
accuracies.append(accuracy)
pos_scores = naive_bayes_classifier.feature_log_prob_[0, :]
neg_scores = naive_bayes_classifier.feature_log_prob_[1, :]
ratio = neg_scores/pos_scores
log_ratio = [math.log(item) for item in ratio]
log_ratios.append(log_ratio)
# How the HELL do you get the top n features...
# I keep getting math domain error
# is it beause log(0) is undefined?
# Figured it out, had to do log(x/y) instead of log(x) - log(y)
# Mathematically this is the same but idk why this works
# too tired to figure out why...
# Plotting AUC ROC
fpr, tpr, threshhold = roc_curve(test_y, lr_probs, pos_label=classes[0])
pyplot.plot(fpr, tpr, marker='.', label='Logistic')
pyplot.xlabel('False Positive Rate')
pyplot.ylabel('True Positive Rate')
# show the legend
pyplot.legend()
# show the plot
pyplot.show()
print('Important words (Democratic):')
log_ratios = np.array(log_ratios)
top_features = {
'feature': features.columns,
'score': log_ratios.mean(axis=0)
}
top_features = pd.DataFrame(top_features).sort_values('score', ascending=False)
print(top_features['feature'].head(n=20))
print('\n')
print('Important words (Republican):')
print(top_features['feature'].tail(n=20))
print('\n')
print(f'Average F1-Score: {np.mean(f1scores)}')
print('\n')
print(f'Average Accuracy: {np.mean(accuracies)}')
pprint.pprint(confusions)
print('\n\n\n')
return None
def train_bernoulliNB(features, labels, classes, folds=5):
"""Train MultinomialNB() model on a set of features, given labels.
Args:
features (array): Array of feature counts. Array may be TF-IDF weighted.
labels (array): 1-D Array of true labels.
classes (list): List of possible classes. The first class is assumed to be the positive class.
Returns:
None: All output printed to console.
"""
kf = KFold(n_splits=folds, shuffle=True, random_state=999)
kf.get_n_splits(features)
f1scores = []
accuracies = []
confusions = [[0,0], [0,0]]
log_ratios = []
for train_indices, test_indices in kf.split(features):
train_x = features.iloc[train_indices]
train_y = labels.iloc[train_indices]
test_x = features.iloc[test_indices]
test_y = labels.iloc[test_indices]
naive_bayes_classifier = BernoulliNB()
naive_bayes_classifier.fit(train_x, train_y)
predictions = naive_bayes_classifier.predict(test_x)
lr_probs = naive_bayes_classifier.predict_proba(test_x)
lr_probs = lr_probs[:, 1]
confusions += confusion_matrix(test_y, predictions)
score = f1_score(test_y, predictions, pos_label=classes[0])
f1scores.append(score)
accuracy = accuracy_score(test_y, predictions)
accuracies.append(accuracy)
pos_scores = naive_bayes_classifier.feature_log_prob_[0, :]
neg_scores = naive_bayes_classifier.feature_log_prob_[1, :]
ratio = neg_scores/pos_scores
log_ratio = [math.log(item) for item in ratio]
log_ratios.append(log_ratio)
# How the HELL do you get the top n features...
# I keep getting math domain error
# is it beause log(0) is undefined?
# Figured it out, had to do log(x/y) instead of log(x) - log(y)
# Mathematically this is the same but idk why this works
# too tired to figure out why...
# Plotting AUC ROC
fpr, tpr, threshhold = roc_curve(test_y, lr_probs, pos_label=classes[0])
pyplot.plot(fpr, tpr, marker='.', label='Logistic')
pyplot.xlabel('False Positive Rate')
pyplot.ylabel('True Positive Rate')
# show the legend
pyplot.legend()
# show the plot
pyplot.show()
print('Important words (Democratic):')
log_ratios = np.array(log_ratios)
top_features = {
'feature': features.columns,
'score': log_ratios.mean(axis=0)
}
top_features = pd.DataFrame(top_features).sort_values('score', ascending=False)
print(top_features.columns)
print(top_features['feature'].head(n=20))
print('\n')
print('Important words (Republican):')
print(top_features['feature'].tail(n=20))
print('\n')
print(f'Average F1-Score: {np.mean(f1scores)}')
print('\n')
print(f'Average Accuracy: {np.mean(accuracies)}')
pprint.pprint(confusions)
print('\n\n\n')
return None
|
{"hexsha": "b6a9512cc9fd4e1326f5a0a500336ff304d3d4c2", "size": 6332, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/models.py", "max_stars_repo_name": "dapluggg/politicalParty-classifier", "max_stars_repo_head_hexsha": "53f38ac5783305adbba815ab1739aab448565ee0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/models.py", "max_issues_repo_name": "dapluggg/politicalParty-classifier", "max_issues_repo_head_hexsha": "53f38ac5783305adbba815ab1739aab448565ee0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/models.py", "max_forks_repo_name": "dapluggg/politicalParty-classifier", "max_forks_repo_head_hexsha": "53f38ac5783305adbba815ab1739aab448565ee0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.227027027, "max_line_length": 103, "alphanum_fraction": 0.6550852811, "include": true, "reason": "import numpy", "num_tokens": 1544}
|
// Software License for MTL
//
// Copyright (c) 2007 The Trustees of Indiana University.
// 2008 Dresden University of Technology and the Trustees of Indiana University.
// 2010 SimuNova UG (haftungsbeschränkt), www.simunova.com.
// All rights reserved.
// Authors: Peter Gottschling and Andrew Lumsdaine
//
// This file is part of the Matrix Template Library
//
// See also license.mtl.txt in the distribution.
#include <iostream>
#include <utility>
#include <cmath>
#include <boost/test/minimal.hpp>
#include <boost/numeric/mtl/mtl.hpp>
#include <boost/numeric/itl/itl.hpp>
using namespace std;
using namespace mtl;
template <typename Vector, typename Matrix>
class f_ftor
{
typedef typename Collection<Vector>::value_type value_type;
public:
/// Arguments: each stock's ROI, the aimed ROI, the Langrange factor and the covariance
f_ftor(const Vector& rv, value_type r, value_type lg, const Matrix& S)
: rv(rv), r(r), lg(lg), S(S) {}
value_type operator()(const Vector& pi) const
{
Vector S_pi(S * pi);
return lg * sq(sum(pi) - 1) + lg * sq(dot(pi, rv) - r) + trans(pi) * S_pi;
}
value_type sq(value_type x) const { return x * x; }
private:
Vector rv;
value_type r, lg;
Matrix S;
};
template <typename Vector, typename Matrix>
class grad_f_ftor
{
typedef typename Collection<Vector>::value_type value_type;
public:
/// Arguments: each stock's ROI, the aimed ROI, the Langrange factor and the covariance
grad_f_ftor(const Vector& rv, value_type r, value_type lg, const Matrix& S)
: rv(rv), onev(size(rv), 1), r(r), lg(lg), S(S) {}
Vector operator()(const Vector& pi) const
{
value_type f1= 2.0 * lg * (sum(pi) - 1), f2= 2.0 * lg * (dot(pi, rv) - r);
return Vector(f1 * onev + f2 * rv + 2.0 * S * pi);
}
private:
Vector rv, onev;
value_type r, lg;
Matrix S;
};
template <typename Vector, typename Matrix>
class portfolio_optimizer
{
typedef typename Collection<Vector>::value_type value_type;
public:
/// Arguments: each stock's ROI, the aimed ROI and the covariance
portfolio_optimizer(const Vector& rv, value_type r, const Matrix& S)
: s(size(rv) + 2), A(s, s), b(s, value_type(0))
{
unsigned s2= size(rv);
A[irange(s2)][irange(s2)]= S; A[irange(s2)][s2]= Vector(s2, 1); A[irange(s2)][s2+1]= rv;
A[s2][irange(s2)]= trans(Vector(s2, 1)); A[irange(s2, s)][irange(s2, s)]= 0;
A[s2+1][irange(s2)]= trans(rv);
b[s2]= 1; b[s2+1]= r;
cout << "A is\n" << A << "\nb is " << b << '\n';
}
Vector operator()() const { return clone(lu_solve(A, b)[irange(s-2)]); }
private:
unsigned s;
Matrix A;
Vector b;
};
int test_main(int, char**)
{
using namespace mtl;
dense_vector<double> pi(4, 0.25), rv(4);
rv= 1.03, 1.14, 1.05, 1.08;
dense2D<double> S(4, 4);
#if 1
S= 1.0, 0.1, 0.3, -0.2,
0.1, 1., -0.4, 0.7,
0.3, -0.4, 1., 0.4,
-0.2, 0.7, 0.4, 1.;
#else
S= 1.0, 0.1, 0.3, 0.2,
0.1, 1., 0.4, 0.7,
0.3, 0.4, 1., 0.4,
0.2, 0.7, 0.4, 1.;
#endif
const double lagrange= 10000.0;
f_ftor< dense_vector<double>, dense2D<double> > f(rv, 1.09, lagrange, S);
cout << "f(pi) is " << f(pi) << '\n';
grad_f_ftor< dense_vector<double>, dense2D<double> > grad_f(rv, 1.09, lagrange, S);
cout << "grad_f(pi) is " << grad_f(pi) << '\n';
portfolio_optimizer< dense_vector<double>, dense2D<double> > opt(rv, 1.09, S);
dense_vector<double> pi_opt(opt());
cout << "Optimal portfolio is " << pi_opt << "\n";
std::cout<< "Sum of pi is " << sum(pi_opt) << "\n";
std::cout<< "Overall ROI is " << dot(pi_opt, rv) << "\n";
std::cout<< "Variance is " << dot(pi_opt, dense_vector<double>(S * pi_opt)) << "\n";
#if 1
std::cout<< "Sum of pi is " << sum(pi) << "\n";
std::cout<< "Overall ROI is " << dot(pi, rv) << "\n";
std::cout<< "Variance is " << dot(pi, dense_vector<double>(S * pi)) << "\n";
itl::cyclic_iteration<double> iter(grad_f(pi), 1000, 0, 1e-5, 10);
quasi_newton(pi, f, grad_f, itl::armijo<>(), itl::sr1(), iter);
iter.error_code();
std::cout<< "pi= " << pi << "\n";
std::cout<< "grad_f(pi)= " << grad_f(pi) << "\n";
std::cout<< "Sum of pi is " << sum(pi) << "\n";
std::cout<< "Overall ROI is " << dot(pi, rv) << "\n";
std::cout<< "Variance is " << dot(pi, dense_vector<double>(S * pi)) << "\n";
#endif
return 0;
}
|
{"hexsha": "39001db3bcf89a210f1417ca77316d7a0273ea0a", "size": 4518, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/numeric/mtl/experimental/portfolio_test.cpp", "max_stars_repo_name": "lit-uriy/mtl4-mirror", "max_stars_repo_head_hexsha": "37cf7c2847165d3537cbc3400cb5fde6f80e3d8b", "max_stars_repo_licenses": ["MTLL"], "max_stars_count": 24.0, "max_stars_repo_stars_event_min_datetime": "2019-03-26T15:25:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T10:00:45.000Z", "max_issues_repo_path": "libs/numeric/mtl/experimental/portfolio_test.cpp", "max_issues_repo_name": "lit-uriy/mtl4-mirror", "max_issues_repo_head_hexsha": "37cf7c2847165d3537cbc3400cb5fde6f80e3d8b", "max_issues_repo_licenses": ["MTLL"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2020-04-17T12:35:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-03T15:46:25.000Z", "max_forks_repo_path": "libs/numeric/mtl/experimental/portfolio_test.cpp", "max_forks_repo_name": "lit-uriy/mtl4-mirror", "max_forks_repo_head_hexsha": "37cf7c2847165d3537cbc3400cb5fde6f80e3d8b", "max_forks_repo_licenses": ["MTLL"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2019-12-01T13:40:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T08:39:54.000Z", "avg_line_length": 26.4210526316, "max_line_length": 94, "alphanum_fraction": 0.5838866755, "num_tokens": 1525}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 25 19:14:27 2019
@author: Browsing
"""
import numpy as np
import matplotlib.pyplot as plt
def f(x, y):
return (x+20.0*y)*np.sin(x*y)
# return 3*x
def RK2(startX , startY , endX , h , a2):
a1 = 1.0- a2
p1 = 0.5/a2
q11 = 0.5/a2
x = list()
y = list()
while startX <= endX:
x.append(startX)
y.append(startY)
k1 = f(startX , startY)
k2 = f(startX +p1 *h , startY + q11 * k1 * h)
startY = startY + (a1 * k1 + a2 * k2 ) * h
startX = startX + h
return x,y
def Euler(startX , startY , endX , h ):
x = list()
y = list()
while startX <= endX:
x.append(startX)
y.append(startY)
startY = startY + f(startX , startY ) * h
startX = startX + h
return x,y
def Heun( startX , startY , endX , h ):
return RK2(startX , startY , endX , h , 0.5 )
def MidPoint( startX , startY , endX , h ):
return RK2(startX , startY , endX , h , 1.0 )
def Ralston( startX , startY , endX , h ):
return RK2(startX , startY , endX , h , 2.0/3.0 )
def RK4(startX , startY , endX , h):
x = list()
y = list()
while startX <= endX:
x.append(startX)
y.append(startY)
k1 = f(startX, startY)
k2 = f( startX + 0.5 * h , startY + 0.5 * k1 * h)
k3 = f(startX + 0.5 * h , startY + 0.5 * k2 * h)
k4 = f(startX + h , startY + k3* h )
startY = startY + (k1 + 2 *k2 + 2* k3 + k4 ) * h /6.0
startX = startX + h
return x,y
def SubPlot1(startX , startY , endX , hs , Title , func ):
plt.title(Title)
for h in hs:
x , y = func(startX , startY , endX , h)
# plt.ylim(-200.0 , 200.0)
plt.plot(x,y , label = "h = %f"%(h))
plt.legend()
def SubPlot2(startX , startY , endX , h , Title ):
plt.title(Title)
x,y= Euler(startX , startY , endX , h )
plt.plot( x, y , label = "Euler method")
x,y= Heun(startX , startY , endX , h )
# plt.ylim(-200.0 , 200.0)
plt.plot( x, y , label = "Heun's Method")
x,y= MidPoint(startX , startY , endX , h )
# plt.ylim(-200.0 , 200.0)
plt.plot( x, y , label = "Midpoint Method")
x,y= Ralston(startX , startY , endX , h )
# plt.ylim(-200.0 , 200.0)
plt.plot( x, y , label = "Ralston’s Method")
x,y= RK4(startX , startY , endX , h )
# plt.ylim(-200.0 , 200.0)
plt.plot( x, y , label = "4th order RK Method")
plt.legend()
def Plot1(startX , startY , endX , hs):
plt.figure(1,[10,10] )
SubPlot1(startX,startY , endX , hs , "Euler Method" ,Euler )
plt.figure(2,[10,10] )
SubPlot1(startX,startY , endX , hs , "Heun's Method" ,Heun )
plt.figure(3,[10,10] )
SubPlot1(startX,startY , endX , hs , "Midpoint Method" ,MidPoint )
plt.figure(4,[10,10] )
SubPlot1(startX,startY , endX , hs , "Ralston’s Method" ,Ralston )
plt.figure(5,[10,10] )
SubPlot1(startX,startY , endX , hs , "4th order RK Method" ,RK4 )
def Plot2(startX , startY , endX , hs):
for i in range( len(hs) ):
plt.figure(i+6,[10, 10])
h=hs[i]
SubPlot2(startX , startY , endX , h , "h = %f"%(h) )
if __name__ == '__main__':
# print(RK2(0.0 , 1.0 , 10.0 , 0.5 , 0.5))
Plot1(0.0 , 4.0 , 10.0 ,[0.01, 0.05, 0.1, 0.5 ])
Plot2(0.0 , 4.0 , 10.0 ,[0.01, 0.05, 0.1, 0.5 ])
|
{"hexsha": "55434b29326da2edfd6daced5c8e6d6f4efdab60", "size": 3395, "ext": "py", "lang": "Python", "max_stars_repo_path": "Numerical/Offline 5 on ODE/Numerical Offline RK method.py", "max_stars_repo_name": "mahdihasnat/2-1-kodes", "max_stars_repo_head_hexsha": "1526de08f1bce66dbe428a8b27fedaca1ec75004", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Numerical/Offline 5 on ODE/Numerical Offline RK method.py", "max_issues_repo_name": "mahdihasnat/2-1-kodes", "max_issues_repo_head_hexsha": "1526de08f1bce66dbe428a8b27fedaca1ec75004", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Numerical/Offline 5 on ODE/Numerical Offline RK method.py", "max_forks_repo_name": "mahdihasnat/2-1-kodes", "max_forks_repo_head_hexsha": "1526de08f1bce66dbe428a8b27fedaca1ec75004", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0578512397, "max_line_length": 70, "alphanum_fraction": 0.522533137, "include": true, "reason": "import numpy", "num_tokens": 1248}
|
"""Testing for Bag-of-Words."""
import numpy as np
import pytest
import re
from pyts.bag_of_words import BagOfWords
X = [['a', 'a', 'a', 'b', 'a'],
['a', 'a', 'b', 'b', 'a'],
['b', 'b', 'b', 'b', 'a']]
@pytest.mark.parametrize(
'params, error, err_msg',
[({'window_size': '4'}, TypeError,
"'window_size' must be an integer or a float."),
({'window_step': [0, 1]}, TypeError,
"'window_step' must be an integer or a float."),
({'window_size': 0}, ValueError,
"If 'window_size' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to n_timestamps (got 0)."),
({'window_size': 2.}, ValueError,
"If 'window_size' is a float, it must be greater than 0 and lower "
"than or equal to 1 (got {0}).".format(2.)),
({'window_step': 0}, ValueError,
"If 'window_step' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to n_timestamps (got 0)."),
({'window_step': 2.}, ValueError,
"If 'window_step' is a float, it must be greater than 0 and lower "
"than or equal to 1 (got {0}).".format(2.))]
)
def test_parameter_check(params, error, err_msg):
"""Test parameter validation."""
bow = BagOfWords(**params)
with pytest.raises(error, match=re.escape(err_msg)):
bow.transform(X)
@pytest.mark.parametrize(
'params, arr_desired',
[({}, ['a b a', 'a b a', 'b a']),
({'numerosity_reduction': False},
['a a a b a', 'a a b b a', 'b b b b a']),
({'window_size': 1, 'numerosity_reduction': False},
['a a a b a', 'a a b b a', 'b b b b a']),
({'window_size': 2, 'numerosity_reduction': False},
['aa aa ab ba', 'aa ab bb ba', 'bb bb bb ba']),
({'window_size': 0.4, 'window_step': 0.2, 'numerosity_reduction': False},
['aa aa ab ba', 'aa ab bb ba', 'bb bb bb ba']),
({'window_size': 2, 'window_step': 1, 'numerosity_reduction': True},
['aa ab ba', 'aa ab bb ba', 'bb ba']),
({'window_size': 3, 'window_step': 2, 'numerosity_reduction': False},
['aaa aba', 'aab bba', 'bbb bba']),
({'window_size': 3, 'window_step': 2, 'numerosity_reduction': True},
['aaa aba', 'aab bba', 'bbb bba'])]
)
def test_actual_results(params, arr_desired):
"""Test that the actual results are the expected ones."""
arr_actual = BagOfWords(**params).fit_transform(X)
np.testing.assert_array_equal(arr_actual, arr_desired)
|
{"hexsha": "be08499fe417c76f9f4780eda94b39ea1baa8216", "size": 2463, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyts/bag_of_words/tests/test_bow.py", "max_stars_repo_name": "martanto/pyts", "max_stars_repo_head_hexsha": "1c0b0c9628068afaa57e036bd157fcb4ecdddee6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyts/bag_of_words/tests/test_bow.py", "max_issues_repo_name": "martanto/pyts", "max_issues_repo_head_hexsha": "1c0b0c9628068afaa57e036bd157fcb4ecdddee6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyts/bag_of_words/tests/test_bow.py", "max_forks_repo_name": "martanto/pyts", "max_forks_repo_head_hexsha": "1c0b0c9628068afaa57e036bd157fcb4ecdddee6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-22T03:16:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-22T03:16:42.000Z", "avg_line_length": 33.2837837838, "max_line_length": 78, "alphanum_fraction": 0.5895249695, "include": true, "reason": "import numpy", "num_tokens": 755}
|
import tables
import pandas as pd
import numpy as np
from scipy.interpolate import interp1d
import os
import pickle
import time
from ismore import brainamp_channel_lists
from ismore.invasive import discrete_movs_emg_classification
from ismore.noninvasive.emg_feature_extraction import EMGMultiFeatureExtractor
from ismore.common_state_lists import *
from utils.constants import *
from db import dbfunctions as dbfn
from db.tracker import models
from matplotlib import pyplot as plt
saveClassifier = True
use_scalar_fixed_var = True
dbname = 'default'
# dbname = 'tecnalia'
emg_channels = brainamp_channel_lists.emg14_bip #list of recorded channels
filt_training_data = True
## Feature Extraction
feature_names = ['WL']
# feature_names = ['MAV', 'VAR', 'WL', 'RMS', 'LOGVAR']#,'WAMP','ZC','SSC']
win_len = 0.500 # secs
# win_len = 1. # secs
# win_len = 2. # secs
step_len = 0.050 # secs
# step_len = 0.001 #secs
fs = 1000 # Hz
feature_fn_kwargs = {
'WAMP': {'threshold': 30},
'ZC': {'threshold': 30},
'SSC': {'threshold': 700},
}
extractor_cls = EMGMultiFeatureExtractor
#set svm classifier parameters
C=1.0
gamma=0.01
# ---------------------------------- Multi-movement classification ------------------------ #
# Calibration H - 2017.07.11
calibration_H_pre1 = [4737,4738,4742,4743,4746,4747] #R
# Calibration P - 2017.07.12s
calibration_P_pre1 = [4769,4770,4773,4774,4777,4778] #R
# Calibration P - 2017.07.14
calibration_P_pre2 = [4795,4796,4807,4811,4813] #R #4797 not saved
[4767,4771,4802,4809] #GT
# Calibration H - 2017.09.20
calibration_H_post1 = [6967,6968,6971,6973,6974,6979,6980,6987,6988]
# 6976,6982,6984 --> USB connection lost with exo at some point, inomplete runs.
# Calibration P - 2017.09.18
calibration_P_post1 = [6937,6938,6946,6949,6950,6953,6954] #R
[6935,6947,6951,] #GT
# Calibration P
calibration_P_post2 = [9426,9627,9429,9430,9431,9432] #R -- 2017.12.06 -- neural data also recorded, spikes and raw data
# Calibration H
calibration_H_post2 = [9690,9691,9692,9693, 9694, 9695, 9696, 9697]
#######-------------------------------------------------------- MEASUREMENTS ----------------------------------------------------########
train_te_list = calibration_H_pre1 + calibration_H_post1 + calibration_H_post2 # all movements
test_te_list = calibration_P_post1
# #Hand movements
# select list of channels used for training
channels_2train = [
'InterFirst',
'AbdPolLo',
'ExtCU',
'ExtCarp',
'ExtDig',
'FlexDig',
'FlexCarp',]
# 'PronTer',
# 'Biceps',
# 'Triceps',
# 'FrontDelt',
# 'MidDelt']
for subset_muscles in [channels_2train]:
subset_muscles_ix = [emg_channels.index(subset_muscles[i]) for i in range(len(subset_muscles))]
# channels_2train = emg_channels
tt2classify = ['grasp'] # trial_types to classify
movs2classify = ['rest-grasp', 'grasp-back']
movs_labels = [1,2]
tt2classify = ['grasp', 'blue_grasp', 'grasp_down', 'grasp_up'] # trial_types to classify
movs2classify = ['rest-grasp', 'grasp-back', 'rest-blue_grasp', 'blue_grasp-back' , 'rest-grasp_down', 'grasp_down-back', 'rest-grasp_up' , 'grasp_up-back' ]
movs_labels = [1,2,1,2,1,2,1,2]
# tt2classify = ['grasp', 'point'] # trial_types to classify
# movs2classify = ['rest-grasp', 'grasp-back', 'rest-point', 'point-back']
# movs_labels = [1,2,3,4]
# # tt2classify = ['grasp','point','up']
# # movs2classify = ['rest-grasp', 'grasp-back', 'rest-point', 'point-back', 'rest-up', 'up-back', 'rest-down', 'down-back']
# # movs_labels = [1,2,3,4,5,6,7,8] # if label =0, we do not consider that data for testing, only for plotting
# tt2classify = ['grasp','point','up','down']
# movs2classify = ['rest-' + tt for tt in tt2classify ]
# movs_labels = [1,2,3,4]
# tt2classify = ['red_up' , 'red_down' , 'green_point', 'blue_grasp']
# movs2classify = ['rest-' + tt for tt in tt2classify ]
# movs_labels = [1,2,3,4]
# tt2classify = ['up','down']
# movs2classify = [ 'rest-up', 'rest-down', 'down-back', 'up-back', ]
# movs_labels = [1,2,1,2]
# # movs2classify = [ 'rest-up', 'rest-down']
# # movs_labels = [1,2]
# tt2classify = ['grasp','point']
# movs2classify = ['rest-grasp','rest-point']
# movs_labels = [1,2]
# --- Arm movements
# channels_2train = [
# 'Biceps',
# 'Triceps',
# 'FrontDelt',
# 'MidDelt',
# 'TeresMajor',
# 'PectMajor']
# channels_2train = emg_channels
# tt2classify = ['red','green','blue','red to blue', 'red to green','blue to red', 'blue to green']
# tt2classify = ['red','green','blue']
# movs2classify = ['rest-' + tt for tt in B1_targets ]
# # movs2classify = [tt + '-back' for tt in B1_targets ]
# movs_labels = [1,2,3]
# # ## --------------
# B2_targets = ['grasp','point','up','down']
# B3_targets = ['grasp_up', 'grasp_down', 'point_up', 'point_down']
# # Invasive - compliant blocks
# blk1_targets = ['red', 'green', 'blue', 'red_to_blue', 'red_to_green','blue_to_red', 'blue_to_green']
# blk2_targets = B2_targets + B3_targets
# blk3_targets = ['red_up' , 'red_down' , 'green_point', 'blue_grasp']
# blk4_targets = ['red_grasp_up', 'red_point_down','green_grasp_down','blue_grasp_up']
# tt2classify = blk3_targets
# movs2classify = ['rest-' + tt for tt in tt2classify ]
# channels_2train = emg_channels
# movs_labels = [1,2,3,4]
# ## --- Combined arm-hand movements
# channels_2train = emg_channels
# tt2classify = ['red_up','green_point','blue_grasp' ]
# movs2classify = ['rest-red_up','rest-green_point', 'rest-blue_grasp']
# movs_labels = [1,2,3]
### see differences between healthy and paretic
# train_te_list = calibration_H_pre1
# train_te_list = calibration_P_pre1
# channels_2train = emg_channels
# tt2classify = ['red' ]
# movs2classify = ['rest-red']
# movs_labels = [1]
# ## --------------
extractor_kwargs = {
'emg_channels': emg_channels,
'feature_names': feature_names,
'feature_fn_kwargs': feature_fn_kwargs,
'win_len': win_len,
'step_len': step_len,
'fs': fs,
'channels_2train': channels_2train,
'subset_muscles_ix': subset_muscles_ix,
'use_scalar_fixed_var': use_scalar_fixed_var,
}
# Task types - data
from db import dbfunctions as dbfn
import numpy as np
import unicodedata
def get_trial_type_te_list(te_list , mov_list, dbname):
mov_te_list = []
for idx_te, te_id in enumerate(te_list):
print 'checking te : ', te_id
try:
te = dbfn.TaskEntry(te_id)
task_name= unicodedata.normalize('NFKD', te.task.name).encode('ascii','ignore')
if task_name not in ['ismore_disable_system', 'ismore_recordGoalTargets']:
trial_types_te = np.unique(te.hdf.root.task[:]['trial_type'])
for idx_tt, tt in enumerate(trial_types_te):
print 'trial type : ', tt
if tt in mov_list:
mov_te_list.append(te_id)
te.close()
te.close_hdf()
except:
print 'data not found in storage'
pass
mov_te_list = np.unique(mov_te_list).tolist()
# mov_te_list = mov_te_list.tolist()
print 'Final mov_te_list is : ', mov_te_list
return mov_te_list
# get task entries with specific type of trial to be classified
train_hdf_ids = get_trial_type_te_list(train_te_list, tt2classify, dbname)
test_hdf_ids = get_trial_type_te_list(test_te_list, tt2classify, dbname)
# test_hdf_ids = test_hdf_ids[0:len(test_hdf_ids)/3]
# test_hdf_ids = test_hdf_ids[(len(test_hdf_ids)/3)+1:np.int(len(test_hdf_ids)*(2./3.))]
# test_hdf_ids = test_hdf_ids[np.int(len(test_hdf_ids)*(2./3.))+1:-1]
normalize_data = True
mov_classifier = discrete_movs_emg_classification.SVM_mov_EMGClassifier(channels_2train, filt_training_data,
extractor_cls, extractor_kwargs)
class_mode = 'trials'
class_mode = 'windows'
[train_data, train_label, vel_filt_train, _, _, _, _] = mov_classifier.process_data(train_hdf_ids, [], normalize_data,tt2classify,movs2classify, movs_labels,dbname, class_mode)
# [_ , _ , _ , test_data, test_label, ts_features_test, vel_filt_test] = mov_classifier.process_data([], test_hdf_ids, normalize_data,tt2classify,movs2classify, movs_labels,dbname,class_mode)
mov_classifier.train_svm(C, gamma, train_data, train_label)
predicted_label_train, predicted_prob_train = mov_classifier.test_svm(train_data, train_label)
# predicted_label, predicted_prob = mov_classifier.test_svm(test_data, test_label)
# training data
x_train = predicted_prob_train[:,1]
# y_train = vel_filt_train[:,3]
# slope, intercept, r_value, p_value, std_err = linregress(x_train,y_train)
rh_dof = [3,4,5]
mov_classifier.get_LM_from_train(x_train,vel_filt_train,rh_dof)
# mov_classifier.m = m
# mov_classifier.b = b
# save grasp_emg_classifier
mov_classifier.training_ids = train_hdf_ids
classifier_name = 'grasp_emg_classifier_scalarvar_%s_%s' %(str(use_scalar_fixed_var), time.strftime('%Y%m%d_%H%M'))
pkl_name = classifier_name + '.pkl'
storage_dir = '/storage/decoders'
mov_classifier.path = os.path.join(storage_dir, pkl_name)
pickle.dump(mov_classifier, open(os.path.join(storage_dir, pkl_name), 'wb'))
# #testing data
# pred_kin_test = np.zeros([len(predicted_prob),len(rh_dof)])
# for idx_dof, ind_dof in enumerate(rh_dof):
# pred_kin_test[:,idx_dof] = m[idx_dof]* predicted_prob[:,1] + b[idx_dof]
# plt.figure()
# plt.plot(pred_kin_test)
# plt.plot(vel_filt_test[:,rh_dof])
# plt.plot(test_label)
# # check rest_emg_classifier output for bmi sessions using rest_emg_classifier
# for te_id in bmi_invasive_tes:
# print te_id
# te = dbfn.TaskEntry(te_id, dbname = dbname)
# rest_emg_output = te.hdf.root.task[:]['rest_emg_output']
# print str(te.date.month) + '_' + str(te.date.day)
# plt.plot(rest_emg_output)
# plt.show()
# ### see differences between healthy and paretic
# channels_2train = emg_channels
# tt2classify = ['red' ]
# movs2classify = ['rest-red']
# normalize_data = True
# mov_classifier = discrete_movs_emg_classification.SVM_mov_EMGClassifier(channels_2train, filt_training_data, extractor_cls, extractor_kwargs)
# train_te_list = calibration_H_pre1
# movs_labels = [1]
# train_hdf_ids = get_trial_type_te_list(train_te_list, tt2classify, dbname)
# [train_data1, train_label1, test_data, test_label] = mov_classifier.process_data(train_hdf_ids, [], normalize_data,tt2classify,movs2classify, movs_labels,dbname)
# train_te_list = calibration_P_pre1
# movs_labels = [2]
# train_hdf_ids = get_trial_type_te_list(train_te_list, tt2classify, dbname)
# [train_data2, train_label2, test_data, test_label] = mov_classifier.process_data(train_hdf_ids, [], normalize_data,tt2classify,movs2classify, movs_labels,dbname)
# train_data = np.vstack([train_data1, train_data2])
# train_label = np.hstack([train_label1, train_label2])
# mov_classifier.train_svm(C, gamma, train_data, train_label)
# train_te_list = calibration_P_pre1
# # # ---------------------------------- Rest vs mov classification ------------------------ #
# # states2classify = ['rest', 'trial', 'trial_return']
# # rest_classifier = discrete_movs_emg_classification.SVM_rest_EMGClassifier(channels_2train, fs, win_len, filt_training_data, extractor_cls, extractor_kwargs, classifier_type)
# # classifier_MovNoMov.train_svm(C, gamma, train_hdf_names, test_hdf_names)
# # classifier.classifier_MovNoMov = classifier_MovNoMov
# print 'rest classifier trained'
# rest_classifier.training_ids = train_hdf_ids
# train_ids_str = str(min(train_hdf_ids)) + '_' + str(max(train_hdf_ids))
# subject_name = models.TaskEntry.objects.using(dbname).get(id=train_hdf_ids[0]).subject.name
# classifier_name = 'emg_classifier_%s_%s_%s' % (subject_name,train_ids_str, time.strftime('%Y%m%d_%H%M'))
# pkl_name = classifier_name + '.pkl'
# rest_classifier.classifier_name = classifier_name
# # --------------------
if saveClassifier:
## Store a record of the data file in the database
storage_dir = '/storage/decoders'
if not os.path.exists(storage_dir):
os.popen('mkdir -p %s' % storage_dir)
#pickle.dump(mov_classifier, open(os.path.join(storage_dir, pkl_name), 'wb'))
# Create a new database record for the decoder object if it doesn't already exist
dfs = models.Decoder.objects.filter(name=classifier_name)
if len(dfs) == 0:
df = models.Decoder()
df.path = pkl_name
df.name = classifier_name
df.entry = models.TaskEntry.objects.using(dbname).get(id=min(train_hdf_ids))
# # if you recorded hdf files in another machine and you want to read them in a new machine and save the classfier in this new machine:
# #df.entry = models.TaskEntry.objects.using(dbname).get(id=an_id_in_our_current_db_where_we_used_a_decoder)
# dbname = 'default'
# df.entry = models.TaskEntry.objects.using(dbname).get(id=3578)
df.save()
elif len(dfs) == 1:
pass # no new data base record needed
elif len(dfs) > 1:
print "More than one classifier with the same name! fix manually!"
# # --------------------
|
{"hexsha": "91d6a57737d43be3b58435ee562602b339b52692", "size": 13103, "ext": "py", "lang": "Python", "max_stars_repo_path": "ismore/invasive/train_movs_emg_classifier.py", "max_stars_repo_name": "DerekYJC/bmi_python", "max_stars_repo_head_hexsha": "7b9cf3f294a33688db24b0863c1035e9cc6999ea", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ismore/invasive/train_movs_emg_classifier.py", "max_issues_repo_name": "DerekYJC/bmi_python", "max_issues_repo_head_hexsha": "7b9cf3f294a33688db24b0863c1035e9cc6999ea", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-07-31T18:58:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T14:36:00.000Z", "max_forks_repo_path": "ismore/invasive/train_movs_emg_classifier.py", "max_forks_repo_name": "DerekYJC/bmi_python", "max_forks_repo_head_hexsha": "7b9cf3f294a33688db24b0863c1035e9cc6999ea", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-03-06T15:39:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-26T17:03:21.000Z", "avg_line_length": 32.3530864198, "max_line_length": 191, "alphanum_fraction": 0.6901472945, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3805}
|
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import time
import pickle
np.random.seed(32113)
def data_preparer_ensemble(df1,df2,df3,df4, lbl = 'word', countries=['US','BR','RU','KR'],\
words=['cat','tiger','lion','dog'],sample=30000, limit = 5000):
'''
Function:
process dataframes so that it can be used for xgboost, random forest and other ensemble methods.
the function prepares dataframe for image recognition model and country code prediction model.
Input:
df1,2,3,4 = dataframes with different topics (cat,dog,lion,tiger) [dataframe]
lbl = "word" or "coountrycode":
word is used when running image recognition.
countrycode is used when running countrycode prediction.
countries = list of string that contains country codes of interest [list]
words = list of string that contains words of topic of interest [list]
sample = max number of data to take in (used when lbl = word)
limit = max number of data from one country (used when lbl = countrycode)
Output:
new_df = dataframe or the non-label features of your model
Y = a label feature of your model
note: uses random.seed(32113)
'''
# if running image recognition,
if lbl == 'word':
#runs _df_initial_fixer for each word to prepare dataframe
df_test1 = _df_initial_fixer(df1,words[0],sample)
df_test2 = _df_initial_fixer(df2,words[1],sample)
df_test3 = _df_initial_fixer(df3,words[2],sample)
df_test4 = _df_initial_fixer(df4,words[3],sample)
print len(df_test1),len(df_test2),len(df_test3),len(df_test4)
# convining all 4 dataframe to create a new dataframe. the new_df will be the input for XGB.
new_df = pd.concat([df_test1,df_test2,df_test3,df_test4], axis =0)
yd = new_df.pop('countrycode')
Y = new_df.pop('word')
b_loon={}
for i in xrange(len(words)):
b_loon[words[i]] = i
# Y will be the label for my XGB model.
Y = Y.map(b_loon)
return new_df,Y
# if running country prediction,
elif lbl == 'countrycode':
#runs _df_initial_fixer_cc for each word to prepare dataframe
df_test1 = _df_initial_fixer_cc(df1,words[0])
df_test2 = _df_initial_fixer_cc(df2,words[1])
df_test3 = _df_initial_fixer_cc(df3,words[2])
df_test4 = _df_initial_fixer_cc(df4,words[3])
print len(df_test1),len(df_test2),len(df_test3),len(df_test4)
new_df = pd.concat([df_test1,df_test2,df_test3,df_test4], axis =0)
#filter dataframe by selected countries
df_cf = new_df[(new_df['countrycode']==countries[0])|(new_df['countrycode']==countries[1])|\
(new_df['countrycode']==countries[2])|(new_df['countrycode']==countries[3])]
print len(df_cf)
# US
df_US = _country_initial_fixer(df_cf,countries[0],limit)
#BR
df_BR = _country_initial_fixer(df_cf,countries[1],limit)
#RU
df_RU = _country_initial_fixer(df_cf,countries[2],limit)
#KR
df_KR = _country_initial_fixer(df_cf,countries[3],limit)
print "number of images for US:{}, BR:{}, RU:{}, KR:{}\n"\
.format(len(df_US),len(df_BR),len(df_RU),len(df_KR))
# new_df will be the input Dataframe for XGBoost and Y will be the label
new_df = pd.concat([df_US,df_BR,df_RU,df_KR], axis=0)
Y = new_df.pop('countrycode')
b_loon = {}
for i in xrange(len(countries)):
b_loon[countries[i]] = i
Y = Y.map(b_loon)
# creating additional feature called word. In this feature number represents the word of image.
b_loon2={'cat':0,'tiger':1,'lion':2,'dog':3}
new_df['word']=new_df['word'].map(b_loon2)
return new_df,Y #,df_US,df_BR,df_RU,df_KR
else:
print "set your lbl to 'word' or 'countrycode' "
def _df_initial_fixer(df, word, sample=60000):
'''
function:
- ramdomly select rows (image) "sample" times from the df dataframe
and delete features that are not used in ensemble method modeling
input:
df = dataframe. output of 1_feature_engineering_func. [pd.dataframe]
word = name of topic ig "cat" [str]
sample = number of sample you want to extract from df [int]
output:
new data frame!
'''
print "total number of images for df_{}: {}".format(word, len(df))
random_index = np.random.choice(list(df.index), sample, replace=False)
df = df.loc[list(random_index)]
df_test = df.drop(['drawing','key_id','timestamp','recognized','X','Y','time',\
'X_per_stroke','Y_per_stroke','time_per_stroke',\
'total_time_of_stroke','dp_per_stroke','dp_percent_per_stroke',\
'direction'], axis=1)
return df_test
def _df_initial_fixer_cc(df, word):
'''
prepares training and test X and Y for xgboost test for countrycode classifier
function:
- delete features that are not used in ensemble method modeling
input:
df = dataframe. output of 1_feature_engineering_func. [pd.dataframe]
word = name of topic ig "cat" [str]
output:
new data frame!
'''
df_test = df.drop(['drawing','key_id','timestamp','recognized','X','Y','time',\
'X_per_stroke','Y_per_stroke','time_per_stroke',\
'total_time_of_stroke','dp_per_stroke','dp_percent_per_stroke',\
'direction'], axis=1)
return df_test
def _country_initial_fixer(df,country,limit):
'''
Function:
extracts data by country and ramdomly select "limit" amount of data from that dataset
Input:
df = dataframe (should contain 'countrycode' features) [dataframe]
country = should be 2 capital letter country code[string]
limit = max number of rows (data) you want to take into the new data frame
Output:
dataframe contains data from selected country (# of data <= limit)
note: uses random.seed(32113)
'''
if df[df['countrycode']==country].count()[0] > limit:
df_c = df[df['countrycode']==country]
random_c = np.random.choice(list(df_c.index), limit, replace=False)
df_c = df_c.loc[list(random_c)]
else:
df_c = df[df['countrycode']==country]
return df_c
|
{"hexsha": "1469c10d6ed151344546e73b2489c5514241c80e", "size": 6424, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/quickdraw_dis_builder/python/ensemble_method_func.py", "max_stars_repo_name": "obastani/verifair", "max_stars_repo_head_hexsha": "1d5efea041330fa9fe8d59d976bdd3ef97aff417", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-11-05T20:40:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-16T03:13:54.000Z", "max_issues_repo_path": "model/quickdraw_dis_builder/python/ensemble_method_func.py", "max_issues_repo_name": "obastani/verifair", "max_issues_repo_head_hexsha": "1d5efea041330fa9fe8d59d976bdd3ef97aff417", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/quickdraw_dis_builder/python/ensemble_method_func.py", "max_forks_repo_name": "obastani/verifair", "max_forks_repo_head_hexsha": "1d5efea041330fa9fe8d59d976bdd3ef97aff417", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.900621118, "max_line_length": 103, "alphanum_fraction": 0.6421232877, "include": true, "reason": "import numpy", "num_tokens": 1685}
|
# import h5pyprovider
import numpy as np
import pickle
import os
import sys
from pointTriangleDistance import pointTriangleDistance
BASE_DIR = os.path.abspath(__file__+"/../")
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
sys.path.append(BASE_DIR) # model
sys.path.append(os.path.dirname(BASE_DIR)) # model
sys.path.append(ROOT_DIR) # provider
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from show3d_balls import showpoints
'''
Input position x, y, z's unit is meter
x_theta, y_theta, z_theta is arc angle
This is the function would form tranformation matrix for any generated cannonical shapes
including rotation along x, y, z axis and translation to another position
'''
def get_transfer_matrix( x, y, z, x_theta, y_theta, z_theta):
x_rotation = np.asarray([
[1 ,0, 0, 0],
[0 , np.cos(x_theta), -np.sin(x_theta), 0],
[0 , np.sin(x_theta), np.cos(x_theta), 0],
[0 ,0, 0, 1]
], dtype=float)
y_rotation = np.asarray([
[np.cos(y_theta), 0, np.sin(y_theta), 0],
[0, 1, 0, 0],
[-np.sin(y_theta), 0, np.cos(y_theta), 0],
[0, 0, 0, 1]
], dtype=float)
z_rotation = np.asarray([
[np.cos(z_theta), -np.sin(z_theta), 0, 0],
[np.sin(z_theta), np.cos(z_theta), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
], dtype=float)
translation = np.asarray([
[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1]
], dtype=float)
return x_rotation.dot(y_rotation).dot(z_rotation).dot(translation)
'''
Generate cube surface, pick position from 6 faces separately
Each faces we sample the point from a uniform grid, then add some random noise to the position
however, the point would be still strictly on the cube's surface.
keep_prob means the probability that we would not skip this point.
x_theta, etc. is arc angle
'''
def gen_cube_surface(center_x, center_y, center_z, l, w, h, x_theta, y_theta, z_theta,
unit_density = 1000, keep_prob = 1):
point_cloud = []
den_step = 1.0 / np.power(unit_density,(1/3.0))
# generate a transformation matrix
tran_mat = get_transfer_matrix(center_x, center_y, center_z, x_theta, y_theta, z_theta)
for i in np.arange(-l/2.0, l/2.0, den_step):
for j in np.arange(-w / 2.0, w / 2.0, den_step):
if np.random.uniform(0, 1) <= keep_prob:
# add some random noise to x, y
point_cloud.append([min(l,i + np.random.uniform(0,den_step)),
min(w, j + np.random.uniform(0, den_step)), -h / 2, 1])
if np.random.uniform(0, 1) <= keep_prob:
point_cloud.append([min(l, i + np.random.uniform(0, den_step)),
min(w, j + np.random.uniform(0, den_step)), h / 2, 1])
for i in np.arange(-l/2.0, l/2.0, den_step):
for k in np.arange(-h / 2.0, h / 2.0, den_step):
if np.random.uniform(0, 1) <= keep_prob:
# add some random noise to y, z
point_cloud.append([min(l, i + np.random.uniform(0, den_step)), -w / 2,
min(h, k + np.random.uniform(0, den_step)), 1])
if np.random.uniform(0, 1) <= keep_prob:
point_cloud.append([min(l, i + np.random.uniform(0, den_step)), w / 2,
min(h, k + np.random.uniform(0, den_step)), 1])
for j in np.arange(-w/2.0, w/2.0, den_step):
for k in np.arange(-h / 2.0, h / 2.0, den_step):
if np.random.uniform(0, 1) <= keep_prob:
# add some random noise to x, z
point_cloud.append([-l/2, min(w, j + np.random.uniform(0, den_step)),
min(h, k + np.random.uniform(0, den_step)), 1])
if np.random.uniform(0, 1) <= keep_prob:
point_cloud.append([l/2, min(w, j + np.random.uniform(0, den_step)),
min(h, k + np.random.uniform(0, den_step)), 1])
# apply homogeneous transfromation
point_cloud = tran_mat.dot(np.transpose(point_cloud))
return np.transpose(point_cloud[:3,:])
'''
Generate sphere surface, first pick u and a angle uniformly
please refer to http://mathworld.wolfram.com/SpherePointPicking.html
We also add noise to u and the angle, however the points would be still strictly on the sphere
x_theta, etc. is arc angle
'''
def gen_sphere_surface(center_x, center_y, center_z, r, unit_density = 1000, keep_prob = 1):
point_cloud = []
# step of u
den_u_step = 1.0 / np.power(unit_density,(1/3.0)) / r
# step of angle
den_angle_step = 2 * np.pi / np.power(unit_density,(2/3.0)) / r ** 2
# print den_u_step, den_angle_step
tran_mat = get_transfer_matrix(center_x, center_y, center_z, 0, 0, 0)
for u in np.arange(-1, 1, den_u_step):
for theta in np.arange(0, 2 * np.pi, den_angle_step):
if np.random.uniform(0, 1) <= keep_prob:
# add
ur = min(1,u + np.random.uniform(0, den_u_step))
thetar = min(2* np.pi,theta + np.random.uniform(0, den_angle_step))
point_cloud.append([r * np.power((1-np.power(ur,2)),0.5) * np.cos(thetar),
r * np.power((1-np.power(ur,2)),0.5) * np.sin(thetar),
r * ur, 1])
point_cloud = np.asarray(point_cloud, dtype=float)
# apply homogeneous transfromation
point_cloud = tran_mat.dot(np.transpose(point_cloud))
return np.transpose(point_cloud[:3,:])
'''
Generate triangle_pyramid surface, first pick points in the bounding cube,
then, we save all these points if their minimal distance to one of the closest surface is within a threshold
We also add noise to u and the angle, however the points would be still strictly on the sphere
x_theta, etc. is arc angle
'''
def gen_triangle_pyramid(center_x, center_y, center_z, x_theta, y_theta, z_theta,
a, b, c, d, threshold = 0.1 ,unit_density = 1000, keep_prob = 1):
point_cloud = []
den_step = 1.0 / np.power(unit_density, (1 / 3.0))
tran_mat = get_transfer_matrix(center_x, center_y, center_z, x_theta, y_theta, z_theta)
pre_tran_mat = get_transfer_matrix(0, 0, 0, np.pi / 3.0, np.pi / 3.0, np.pi / 3.0)
# print pre_tran_mat
a = np.transpose(pre_tran_mat.dot(np.transpose(a))[:3])
b = np.transpose(pre_tran_mat.dot(np.transpose(b))[:3])
c = np.transpose(pre_tran_mat.dot(np.transpose(c))[:3])
d = np.transpose(pre_tran_mat.dot(np.transpose(d))[:3])
# 4 triangle surfaces
TRIabc = np.asarray([a, b, c])
TRIabd = np.asarray([a, b, d])
TRIacd = np.asarray([a, c, d])
TRIbcd = np.asarray([b, c, d])
for i in np.arange(min(a[0], b[0], c[0], d[0]), max(a[0], b[0], c[0], d[0]), den_step):
for j in np.arange(min(a[1], b[1], c[1], d[1]), max(a[1], b[1], c[1], d[1]), den_step):
for k in np.arange(min(a[2], b[2], c[2], d[2]), max(a[2], b[2], c[2], d[2]), den_step):
if np.random.uniform(0, 1) <= keep_prob:
# add random noise to x, y, z
ir = min(max(a[0], b[0], c[0], d[0]) ,i + np.random.uniform(0, den_step))
jr = min(max(a[1], b[1], c[1], d[1]) ,j + np.random.uniform(0, den_step))
kr = min(max(a[2], b[2], c[2], d[2]) ,k + np.random.uniform(0, den_step))
p = np.asarray([ir,jr,kr])
# calculate the point and surface's distance, find the minimum
dis_min = np.min([pointTriangleDistance(TRIabc, p)[0],
pointTriangleDistance(TRIabd, p)[0],
pointTriangleDistance(TRIacd, p)[0],
pointTriangleDistance(TRIbcd, p)[0]])
if dis_min <= threshold:
point_cloud.append([ir, jr, kr, 1])
point_cloud = np.asarray(point_cloud, dtype=float)
# apply homogeneous transfromation
point_cloud = tran_mat.dot(np.transpose(point_cloud))
return np.transpose(point_cloud[:3, :])
'''
calculate l2 distance if need to be sparse
'''
def is_sparse(As, B, R):
for A in As:
if np.linalg.norm(np.asarray(A) - np.asarray(B)) < R: return False
return True
'''
function to generate individual scene
L, W, H is the scene's space range
ratio is the geometric scale of shape to the space range
min_num and max_num are minimal number of shapes and maximal number of shapes
unit_density is the sample density per unit volume
keep_prob is the probability the sample point would be kept
Sparse is to indicate whether we allowed two shapes to be close or even intersect
'''
def gen_scene(L, W, H, ratio, min_num, max_num, unit_density = 1000, keep_prob = 0.95, sparse=True):
point_cloud = np.array([], dtype=np.float32).reshape(0,3)
labels = np.array([], dtype=np.float32).reshape(0)
centers = []
# largest r
R = np.min([L,W,H]) * ratio
for i in range(np.random.randint(min_num, max_num)):
r = np.random.uniform(R/1.5, R)
choice = np.random.uniform(0,1)
count = 0
while True:
if count > 1000: exit("cant't find a sparse solution, please reduce num of shapes or shape geometric ratio")
count +=1
center_x = np.random.uniform(-L / 2.0 + r, L / 2.0 - r)
center_y = np.random.uniform(-W / 2.0 + r, W / 2.0 - r)
center_z = np.random.uniform(-H / 2.0 + r, H / 2.0 - r)
if not sparse: break
# whether it's the first shape or it's distant enought to other shapes
if len(centers) == 0 or is_sparse(centers, [center_x, center_y, center_z], R * 3):
centers.append([center_x, center_y, center_z])
break
x_theta = np.random.uniform(0, 2*np.pi)
y_theta = np.random.uniform(0, 2*np.pi)
z_theta = np.random.uniform(0, 2*np.pi)
# 33% to be a cube
if choice < 1 / 3.0:
l = 2 * r * np.random.uniform(0.3, 1)
w = 2 * r * np.random.uniform(0.3, 1)
h = 2 * r * np.random.uniform(0.3, 1)
cube_point = gen_cube_surface(center_x, center_y, center_z,
l, w, h, x_theta, y_theta, z_theta, unit_density=unit_density, keep_prob=keep_prob)
point_cloud = np.concatenate([point_cloud, cube_point], axis= 0)
labels = np.concatenate([labels, np.zeros(cube_point.shape[0], dtype=int)],axis=0)
# 33% to be a sphere
elif choice < 2 / 3.0:
sphere_point = gen_sphere_surface(center_x, center_y, center_z,
r * np.random.uniform(0.7, 1), unit_density=unit_density, keep_prob=keep_prob)
point_cloud = np.concatenate([point_cloud, sphere_point], axis= 0)
labels = np.concatenate([labels, np.ones(sphere_point.shape[0], dtype=int)], axis=0)
else:
# 33% to be a triangle pyramid
a = [0, - 3**0.5 / 2.0 * r, -1 / 3.0 * r, 1]
b = [6**0.5/3 * r, 2**0.5 / 3.0 * r, -1 / 3.0 * r, 1]
c = [-6**0.5/3 * r, 2**0.5 / 3.0 * r, -1 / 3.0 * r, 1]
d = [0, 0, r, 1]
pyramid_point = gen_triangle_pyramid(center_x, center_y, center_z, x_theta, y_theta,
z_theta, a, b, c, d, threshold=0.02, unit_density=unit_density*1.4, keep_prob=keep_prob)
point_cloud = np.concatenate([point_cloud, pyramid_point], axis= 0)
labels = np.concatenate([labels, 2 * np.ones(pyramid_point.shape[0], dtype=int)], axis=0)
return point_cloud, labels
#
'''
generate whole dataset and save it to pickle
'''
def gen_data_set(name, num, keep_prob_min, keep_prob_max, min_num, max_num,
ratio, L, W, H, unit_density = 1000, sparse=True):
data_file = name +".pickle"
print os.path.join(ROOT_DIR, 'data/primatives/') + data_file
cloud = []
labels = []
with open(os.path.join(ROOT_DIR, 'data/primatives/') + data_file, 'w') as fp:
for i in range(num):
keep_prob = np.random.uniform(keep_prob_min, keep_prob_max)
cloud_point, l = gen_scene(L, W, H, ratio, min_num,
max_num, unit_density=unit_density, keep_prob=keep_prob, sparse=sparse)
cloud.append(cloud_point)
labels.append(l)
print i
pickle.dump(cloud,fp)
pickle.dump(labels,fp)
if __name__=='__main__':
# visualize one scene
point_cloud, batch_label = gen_scene(1.5, 1.5, 3.0, 0.2, 10, 20, unit_density = 100000, keep_prob = 0.9, sparse=False)
print "len(point_cloud)", len(point_cloud), batch_label
c_gt = np.zeros((batch_label.shape[0], 3))
color_list = np.asarray([[64, 224, 208], [220, 20, 60], [173, 255, 47]])
for i in range(batch_label.shape[0]):
c_gt[i,:] = color_list[int(batch_label[i]),:]
showpoints(point_cloud, c_gt = c_gt, normalizecolor=False)
# generate dataset
# gen_data_set("prim_train_overlaps_20", 2000, 0.9, 1, 10, 20, 0.2, 1.5, 1.5, 3.0, unit_density = 100000, sparse=False)
# gen_data_set("prim_test_overlaps_20", 500, 0.9, 1, 10, 20, 0.2, 1.5, 1.5, 3.0, unit_density = 100000, sparse=False)
|
{"hexsha": "e1e92e60caab274e04699213e8464345dd4d7ea1", "size": 13350, "ext": "py", "lang": "Python", "max_stars_repo_path": "primative_seg/pre_process/generate_primative.py", "max_stars_repo_name": "Xharlie/core3d_point_net", "max_stars_repo_head_hexsha": "d1e520ddbcda4539a90f3cc51ebdc9660a79c78f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "primative_seg/pre_process/generate_primative.py", "max_issues_repo_name": "Xharlie/core3d_point_net", "max_issues_repo_head_hexsha": "d1e520ddbcda4539a90f3cc51ebdc9660a79c78f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "primative_seg/pre_process/generate_primative.py", "max_forks_repo_name": "Xharlie/core3d_point_net", "max_forks_repo_head_hexsha": "d1e520ddbcda4539a90f3cc51ebdc9660a79c78f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.5681818182, "max_line_length": 123, "alphanum_fraction": 0.5883146067, "include": true, "reason": "import numpy", "num_tokens": 3892}
|
#!/usr/bin/env python
"""
xvg_plot.py
Python script to plot XVG line charts produced by GROMACS analysis tools.
Requires:
* python2.7+
* matplotlib
* numpy
"""
from __future__ import print_function, division
__author__ = 'Joao Rodrigues'
__email__ = 'j.p.g.l.m.rodrigues@gmail.com'
import os
import re
import shlex
import sys
try:
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
except ImportError as e:
print('[!] The required Python libraries could not be imported:', file=sys.stderr)
print('\t{0}'.format(e))
sys.exit(1)
##
def parse_xvg(fname, sel_columns='all'):
"""Parses XVG file legends and data"""
_ignored = set(('legend', 'view'))
_re_series = re.compile('s[0-9]+$')
_re_xyaxis = re.compile('[xy]axis$')
metadata = {}
num_data = []
metadata['labels'] = {}
metadata['labels']['series'] = []
ff_path = os.path.abspath(fname)
if not os.path.isfile(ff_path):
raise IOError('File not readable: {0}'.format(ff_path))
with open(ff_path, 'r') as fhandle:
for line in fhandle:
line = line.strip()
if line.startswith('@'):
tokens = shlex.split(line[1:])
if tokens[0] in _ignored:
continue
elif tokens[0] == 'TYPE':
if tokens[1] != 'xy':
raise ValueError('Chart type unsupported: \'{0}\'. Must be \'xy\''.format(tokens[1]))
elif _re_series.match(tokens[0]):
metadata['labels']['series'].append(tokens[-1])
elif _re_xyaxis.match(tokens[0]):
metadata['labels'][tokens[0]] = tokens[-1]
elif len(tokens) == 2:
metadata[tokens[0]] = tokens[1]
else:
print('Unsupported entry: {0} - ignoring'.format(tokens[0]), file=sys.stderr)
elif line[0].isdigit():
num_data.append(map(float, line.split()))
num_data = zip(*num_data)
if not metadata['labels']['series']:
for series in range(len(num_data) - 1):
metadata['labels']['series'].append('')
# Column selection if asked
if sel_columns != 'all':
sel_columns = map(int, sel_columns)
x_axis = num_data[0]
num_data = [x_axis] + [num_data[col] for col in sel_columns]
metadata['labels']['series'] = [metadata['labels']['series'][col - 1] for col in sel_columns]
return metadata, num_data
def running_average(data, metadata, window=10):
"""
Performs a running average calculation over all series in data.
Assumes the first series is the x-axis.
Appends the series and a new label to the original data and label arrays.
"""
weights = np.repeat(1.0, window)/window
s_labels = metadata['labels']['series']
for n_series, series in enumerate(data[1:]):
series_rav = np.convolve(series, weights, 'valid')
s_labels.append('{0} (Av)'.format(s_labels[n_series]))
data.append(series_rav)
return metadata, data
def plot_data(data, metadata, window=1, interactive=True, outfile=None,
colormap='Set1', bg_color='lightgray'):
"""
Plotting function.
"""
n_series = len(data) - 1
f = plt.figure()
ax = plt.gca()
color_map = getattr(plt.cm, colormap)
color_list = color_map(np.linspace(0, 1, n_series))
for i, series in enumerate(data[1:]):
label = metadata['labels']['series'][i]
# Adjust x-axis for running average series
if label.endswith('(Av)'):
x_step = (data[0][1] - data[0][0])
x_window = (window * x_step) / 2
x_start = data[0][0] + x_window - x_step
x_end = data[0][-1] - x_window + x_step
x_data = np.arange(x_start, x_end, x_step)
else:
x_data = data[0]
ax.plot(x_data, series, c=color_list[i], label=label)
# Formatting Labels & Appearance
ax.set_xlabel(metadata['labels'].get('xaxis', ''))
ax.set_ylabel(metadata['labels'].get('yaxis', ''))
ax.set_title(metadata.get('title', ''))
ax.set_axis_bgcolor(bg_color)
ax.grid('on')
try:
legend = ax.legend()
frame = legend.get_frame()
frame.set_facecolor(bg_color)
except AttributeError as e:
# No legend, likely because no labels
pass
if outfile:
plt.savefig(outfile)
if interactive:
plt.show()
return
##
if __name__ == '__main__':
import argparse
from argparse import RawDescriptionHelpFormatter
ap = argparse.ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)
ap.add_argument('xvg_f', type=str, help='XVG input file', metavar='XVG input file')
io_group = ap.add_mutually_exclusive_group(required=True)
io_group.add_argument('-o', '--output', type=str, help='PDF output file')
io_group.add_argument('-i', '--interactive', action='store_true',
help='Launches an interactive matplotlib session')
ana_group = ap.add_argument_group('Data Analysis')
ana_group.add_argument('-s', '--selection', type=str, default='all', nargs='+',
help='Selects particular data series from xvg file.')
ana_group.add_argument('-a', '--average', action='store_true',
help='Smoothes each series using a running average')
ana_group.add_argument('-w', '--window', type=int, default=10,
help='Window size for the running average calculation [Default: 10]')
ot_group = ap.add_argument_group('Other Options')
ot_group.add_argument('-c', '--colormap', default='Set1',
help='Range of colors used for each series in the plot. For a list of all\
available colormaps refer to \
matplotlib.org/examples/color/colormaps_reference.html')
ot_group.add_argument('-b', '--background-color', default='lightgray',
help='Background color used in the plot. For a list of all available \
colors refer to \
matplotlib.org/examples/color/named_colors.html')
cmd = ap.parse_args()
metadata, data = parse_xvg(cmd.xvg_f, cmd.selection)
n_series = len(data[1:])
n_elements = sum(map(len, data[1:]))
print('[+] Read {0} series of data ({1} elements)'.format(n_series, n_elements))
if cmd.average:
print('[+] Calculating Running Averages (window size = {0})'.format(cmd.window))
metadata, data = running_average(data, metadata, window=cmd.window)
plot_data(data, metadata,
window=cmd.window,
interactive=cmd.interactive, outfile=cmd.output,
colormap=cmd.colormap, bg_color=cmd.background_color)
|
{"hexsha": "e070423d0c7d1b9072930d8531b4fb8663cfdf0a", "size": 7013, "ext": "py", "lang": "Python", "max_stars_repo_path": "xvg_plot.py", "max_stars_repo_name": "JoaoRodrigues/gmx-tools", "max_stars_repo_head_hexsha": "3bf12e447bd1efa5f02a4eb88753075fd92ad60b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-12-02T11:44:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T23:20:39.000Z", "max_issues_repo_path": "xvg_plot.py", "max_issues_repo_name": "JoaoRodrigues/gmx-tools", "max_issues_repo_head_hexsha": "3bf12e447bd1efa5f02a4eb88753075fd92ad60b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xvg_plot.py", "max_forks_repo_name": "JoaoRodrigues/gmx-tools", "max_forks_repo_head_hexsha": "3bf12e447bd1efa5f02a4eb88753075fd92ad60b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-01-25T10:22:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-11T21:35:10.000Z", "avg_line_length": 33.8792270531, "max_line_length": 109, "alphanum_fraction": 0.5909026094, "include": true, "reason": "import numpy", "num_tokens": 1638}
|
import os
import pprint
import random
import warnings
import torch
import numpy as np
from trainer import Trainer, Tester
from inference import Inference
from config import getConfig
warnings.filterwarnings('ignore')
args = getConfig()
def main(args):
print('<---- Training Params ---->')
pprint.pprint(args)
# Random Seed
seed = args.seed
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if args.action == 'train':
save_path = os.path.join(args.model_path, args.dataset, f'TE{args.arch}_{str(args.exp_num)}')
# Create model directory
os.makedirs(save_path, exist_ok=True)
Trainer(args, save_path)
elif args.action == 'test':
save_path = os.path.join(args.model_path, args.dataset, f'TE{args.arch}_{str(args.exp_num)}')
datasets = ['DUTS', 'DUT-O', 'HKU-IS', 'ECSSD', 'PASCAL-S']
for dataset in datasets:
args.dataset = dataset
test_loss, test_mae, test_maxf, test_avgf, test_s_m = Tester(args, save_path).test()
print(f'Test Loss:{test_loss:.3f} | MAX_F:{test_maxf:.4f} '
f'| AVG_F:{test_avgf:.4f} | MAE:{test_mae:.4f} | S_Measure:{test_s_m:.4f}')
else:
save_path = os.path.join(args.model_path, args.dataset, f'TE{args.arch}_{str(args.exp_num)}')
print('<----- Initializing inference mode ----->')
Inference(args, save_path).test()
if __name__ == '__main__':
main(args)
|
{"hexsha": "6c3b6d26c5caba7e7492da7403dfd0a3175841d9", "size": 1753, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "Karel911/TRACER", "max_stars_repo_head_hexsha": "bedc653c3b725cb7e2dd6736f55911b4d24fb246", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 55, "max_stars_repo_stars_event_min_datetime": "2021-12-15T12:49:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T07:25:51.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "Karel911/TRACER", "max_issues_repo_head_hexsha": "bedc653c3b725cb7e2dd6736f55911b4d24fb246", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2021-12-17T03:18:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T06:44:17.000Z", "max_forks_repo_path": "main.py", "max_forks_repo_name": "Karel911/TRACER", "max_forks_repo_head_hexsha": "bedc653c3b725cb7e2dd6736f55911b4d24fb246", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2022-01-17T07:03:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T19:41:33.000Z", "avg_line_length": 31.8727272727, "max_line_length": 102, "alphanum_fraction": 0.6303479749, "include": true, "reason": "import numpy", "num_tokens": 441}
|
import numpy as np
import pandas as pd
import sklearn.decomposition
import sklearn.impute
import time
import torch
import kernels
import gaussian_process_latent_variable_model
from utils import transform_forward, transform_backward
import bayesian_optimization
torch.set_default_tensor_type(torch.FloatTensor)
fn_data = 'all_normalized_accuracy_with_pipelineID.csv'
fn_train_ix = 'ids_train.csv'
fn_test_ix = 'ids_test.csv'
fn_data_feats = 'data_feats_featurized.csv'
def get_data():
"""
returns the train/test splits of the dataset as N x D matrices and the
train/test dataset features used for warm-starting bayesian_optimization as D x F matrices.
N is the number of pipelines, D is the number of datasets (in train/test),
and F is the number of dataset features.
"""
df = pd.read_csv(fn_data)
pipeline_ids = df['Unnamed: 0'].tolist()
dataset_ids = df.columns.tolist()[1:]
dataset_ids = [int(dataset_ids[i]) for i in range(len(dataset_ids))]
Y = df.values[:,1:].astype(np.float64)
ids_train = np.loadtxt(fn_train_ix).astype(int).tolist()
ids_test = np.loadtxt(fn_test_ix).astype(int).tolist()
ix_train = [dataset_ids.index(i) for i in ids_train]
ix_test = [dataset_ids.index(i) for i in ids_test]
Ytrain = Y[:, ix_train]
Ytest = Y[:, ix_test]
df = pd.read_csv(fn_data_feats)
dataset_ids = df[df.columns[0]].tolist()
ix_train = [dataset_ids.index(i) for i in ids_train]
ix_test = [dataset_ids.index(i) for i in ids_test]
Ftrain = df.values[ix_train, 1:]
Ftest = df.values[ix_test, 1:]
return Ytrain, Ytest, Ftrain, Ftest
def train(model, optimizer, f_callback=None, f_stop=None):
iteration = 0
while True:
try:
t = time.time()
# set gradients of all model parameters to 0
optimizer.zero_grad()
# using __call__ of our model calls the forward function
negative_log_likelihood = model()
# calculate gradient
negative_log_likelihood.backward()
# perform parameter update based on current gradient
optimizer.step()
iteration += 1
t = time.time() - t
if f_callback is not None:
f_callback(model, negative_log_likelihood, iteration, t)
# f_stop should not be a substantial portion of total iteration time
if f_stop is not None and f_stop(model, negative_log_likelihood, iteration, t):
break
except KeyboardInterrupt:
break
return model
def bayesian_optimization_search(model, bo_n_init, bo_n_iterations, Ytrain, Ftrain, ftest, ytest, do_print=False):
"""
Initializes BayesianOptimization with L1 warm-start (using dataset features). Returns a
numpy array of length bo_n_iterations holding the best performance attained
so far per iteration (including initialization).
bo_n_iterations includes initialization iterations, i.e., after warm-start, BayesianOptimization
will run for bo_n_iterations - bo_n_init iterations.
"""
predictions = bayesian_optimization.BayesianOptimization(model.dim, model.kernel, bayesian_optimization.expected_improvement,
variance=transform_forward(model.variance))
ix_evaluated = []
ix_candidates = np.where(np.invert(np.isnan(ytest)))[0].tolist()
ybest_list = []
def _process_ix(ix, predictions, model, ytest, ix_evaluated, ix_candidates):
predictions.add(model.X[ix], ytest[ix])
ix_evaluated.append(ix)
ix_candidates.remove(ix)
def _print_status(ix, bo_iteration, ytest, ybest, do_print):
if do_print:
print('Iteration: %d, %g [%d], Best: %g' % (bo_iteration, ytest[ix], ix, ybest))
ix_init = bayesian_optimization.init_l1(Ytrain, Ftrain, ftest).tolist()
for bo_iteration in range(bo_n_init):
ix = ix_init[bo_iteration]
if not np.isnan(ytest[ix]):
_process_ix(ix, predictions, model, ytest, ix_evaluated, ix_candidates)
ybest = predictions.ybest
if ybest is None:
ybest = np.nan
ybest_list.append(ybest)
_print_status(ix, bo_iteration, ytest, ybest, do_print)
for bo_iteration in range(bo_n_init, bo_n_iterations):
ix = ix_candidates[predictions.next(model.X[ix_candidates])]
_process_ix(ix, predictions, model, ytest, ix_evaluated, ix_candidates)
ybest = predictions.ybest
ybest_list.append(ybest)
_print_status(ix, bo_iteration, ytest, ybest, do_print)
return np.asarray(ybest_list)
def random_search(bo_n_iterations, ytest, speed=1, do_print=False):
"""
Speed denotes how many random queries are performed per iteration.
"""
ix_evaluated = []
ix_candidates = np.where(np.invert(np.isnan(ytest)))[0].tolist()
ybest_list = []
ybest = np.nan
for bo_iteration in range(bo_n_iterations):
for _ in range(speed):
ix = ix_candidates[np.random.permutation(len(ix_candidates))[0]]
if np.isnan(ybest):
ybest = ytest[ix]
else:
if ytest[ix] > ybest:
ybest = ytest[ix]
ix_evaluated.append(ix)
ix_candidates.remove(ix)
ybest_list.append(ybest)
if do_print:
print('Iteration: %d, %g [%d], Best: %g' % (bo_iteration, ytest[ix], ix, ybest))
return np.asarray(ybest_list)
if __name__ == '__main__':
# TODO: make these specifiable through command line params
# train and evaluation settings
Q = 20 # number of latent dimensions
batch_size = 50 # size of dataset batches
n_epochs = 300
lr = 1e-7 # called eta in the paper
N_max = 1000
bo_n_init = 5
bo_n_iterations = 200
save_checkpoint = False
fn_checkpoint = None
checkpoint_period = 50
# train
Ytrain, Ytest, Ftrain, Ftest = get_data()
max_iterations = int(Ytrain.shape[1] / batch_size * n_epochs)
def f_stop(model, negative_log_likelihood, iteration, t):
if iteration >= max_iterations - 1:
print('max_iterations (%d) reached' % max_iterations)
return True
return False
variances = []
log_probabilities = []
t_list = []
def f_callback(model, negative_log_likelihood, iteration, t):
variances.append(transform_forward(model.variance).item())
log_probabilities.append(model().item()/model.D)
if iteration == 1:
t_list.append(t)
else:
t_list.append(t_list[-1] + t)
if save_checkpoint and not (iteration % checkpoint_period):
torch.save(model.state_dict(), fn_checkpoint + '_it%d.pt' % iteration)
print('iteration=%d, log probability=%g, variance=%g, t: %g'
% (iteration, log_probabilities[-1], transform_forward(model.variance), t_list[-1]))
# create initial latent space with PCA, first imputing missing observations
imputer = sklearn.impute.SimpleImputer(missing_values=np.nan, strategy='mean')
X = sklearn.decomposition.PCA(Q).fit_transform(imputer.fit(Ytrain).transform(Ytrain))
# define model
kernel = kernels.Add(kernels.RBF(Q, lengthscale=None), kernels.White(Q))
model = gaussian_process_latent_variable_model.GaussianProcessLatentVariableModel(Q, X, Ytrain, kernel, N_max=N_max, D_max=batch_size)
if save_checkpoint:
torch.save(model.state_dict(), fn_checkpoint + '_it%d.pt' % 0)
# optimize
print('training...')
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
model = train(model, optimizer, f_callback=f_callback, f_stop=f_stop)
if save_checkpoint:
torch.save(model.state_dict(), fn_checkpoint + '_itFinal.pt')
# evaluate model and random baselines
print('evaluating...')
with torch.no_grad():
Ytest = Ytest.astype(np.float32)
regrets_automl = np.zeros((bo_n_iterations, Ytest.shape[1]))
regrets_random1x = np.zeros((bo_n_iterations, Ytest.shape[1]))
regrets_random2x = np.zeros((bo_n_iterations, Ytest.shape[1]))
regrets_random4x = np.zeros((bo_n_iterations, Ytest.shape[1]))
for d in np.arange(Ytest.shape[1]):
print(d)
ybest = np.nanmax(Ytest[:,d])
regrets_random1x[:,d] = ybest - random_search(bo_n_iterations,
Ytest[:,d], speed=1)
regrets_random2x[:,d] = ybest - random_search(bo_n_iterations,
Ytest[:,d], speed=2)
regrets_random4x[:,d] = ybest - random_search(bo_n_iterations,
Ytest[:,d], speed=4)
regrets_automl[:,d] = ybest - bayesian_optimization_search(model, bo_n_init, bo_n_iterations,
Ytrain, Ftrain, Ftest[d,:],
Ytest[:,d])
results = {
'pmf': regrets_automl,
'random1x': regrets_random1x,
'random2x': regrets_random2x,
'random4x': regrets_random4x
}
|
{"hexsha": "930ace36f715d3683d8e3ccc99d35a60c5b9bad5", "size": 9210, "ext": "py", "lang": "Python", "max_stars_repo_path": "run.py", "max_stars_repo_name": "romanlutz/pmf-automl", "max_stars_repo_head_hexsha": "2600cf484658803ecd08b3c03d77eb83f675fa95", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run.py", "max_issues_repo_name": "romanlutz/pmf-automl", "max_issues_repo_head_hexsha": "2600cf484658803ecd08b3c03d77eb83f675fa95", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run.py", "max_forks_repo_name": "romanlutz/pmf-automl", "max_forks_repo_head_hexsha": "2600cf484658803ecd08b3c03d77eb83f675fa95", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1370967742, "max_line_length": 138, "alphanum_fraction": 0.6426710098, "include": true, "reason": "import numpy", "num_tokens": 2164}
|
import chainer
import numpy
import pytest
import torch
from espnet.scheduler import scheduler
from espnet.scheduler.chainer import ChainerScheduler
from espnet.scheduler.pytorch import PyTorchScheduler
@pytest.mark.parametrize("name", scheduler.SCHEDULER_DICT.keys())
def test_scheduler(name):
s = scheduler.dynamic_import_scheduler(name).build("lr")
assert s.key == "lr"
assert isinstance(s.scale(0), float)
assert isinstance(s.scale(1000), float)
def test_pytorch_scheduler():
warmup = 30000
s = scheduler.NoamScheduler.build("lr", warmup=warmup)
net = torch.nn.Linear(2, 1)
o = torch.optim.SGD(net.parameters(), lr=1.0)
so = PyTorchScheduler([s], o)
so.step(0)
for g in o.param_groups:
assert g["lr"] == s.scale(0)
so.step(warmup)
for g in o.param_groups:
numpy.testing.assert_allclose(g["lr"], 1.0, rtol=1e-4)
def test_chainer_scheduler():
warmup = 30000
s = scheduler.NoamScheduler.build("lr", warmup=warmup)
net = chainer.links.Linear(2, 1)
o = chainer.optimizers.SGD(lr=1.0)
o.setup(net)
so = ChainerScheduler([s], o)
so.step(0)
assert o.lr == s.scale(0)
so.step(warmup)
numpy.testing.assert_allclose(o.lr, 1.0, rtol=1e-4)
|
{"hexsha": "fadbfb94bc8adf57d2bd35bf803171e11efa2e9b", "size": 1247, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_scheduler.py", "max_stars_repo_name": "roshansh-cmu/espnet", "max_stars_repo_head_hexsha": "5fa6dcc4e649dc66397c629d0030d09ecef36b80", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_scheduler.py", "max_issues_repo_name": "roshansh-cmu/espnet", "max_issues_repo_head_hexsha": "5fa6dcc4e649dc66397c629d0030d09ecef36b80", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_scheduler.py", "max_forks_repo_name": "roshansh-cmu/espnet", "max_forks_repo_head_hexsha": "5fa6dcc4e649dc66397c629d0030d09ecef36b80", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1086956522, "max_line_length": 65, "alphanum_fraction": 0.6848436247, "include": true, "reason": "import numpy", "num_tokens": 358}
|
#include "apriltag_ros/apriltag_detector.h"
#include <boost/make_shared.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <ros/ros.h>
namespace apriltag_ros {
namespace mit = apriltag_mit;
namespace umich = apriltag_umich3;
/// ================
/// ApriltagDetector
/// ================
ApriltagDetector::ApriltagDetector(const DetectorType &detector_type,
const TagFamily &tag_family)
: payload_(TagFamilyToPayload(tag_family)),
detector_type_(detector_type),
tag_family_(tag_family),
tag_family_str_(DetectorTypeToString(detector_type)) {}
void ApriltagDetector::set_black_border(int black_border) {
black_border_ = black_border;
SetBlackBorder(black_border);
}
int ApriltagDetector::black_border() const { return black_border_; }
void ApriltagDetector::set_decimate(int decimate) { SetDecimate(decimate); }
int ApriltagDetector::decimate() const { return decimate_; }
void ApriltagDetector::set_nthreads(int nthreads) { SetNThreads(nthreads); }
int ApriltagDetector::nthreads() const { return nthreads_; }
void ApriltagDetector::print_profiling_info() const {
PrintProfilingInfo();
}
int ApriltagDetector::payload() const { return payload_; }
const std::string &ApriltagDetector::tag_family() const {
return tag_family_str_;
}
ApriltagVec ApriltagDetector::Detect(const cv::Mat &image) {
if (image.empty()) return {};
// Check image type
cv::Mat gray;
if (image.type() == CV_8UC1) {
gray = image;
} else if (image.type() == CV_8UC3) {
cv::cvtColor(image, gray, cv::COLOR_BGR2GRAY);
} else {
return {};
}
// Detect
return DetectImpl(gray);
}
ApriltagDetectorPtr ApriltagDetector::Create(const DetectorType &detector_type,
const TagFamily &tag_family) {
switch (detector_type) {
case DetectorType::Mit:
return boost::make_shared<ApriltagDetectorMit>(tag_family);
case DetectorType::Umich:
return ApriltagDetectorPtr(new ApriltagDetectorUmich(tag_family));
default:
throw std::invalid_argument("Invalid apriltag detector type.");
}
}
/// ===================
/// ApriltagDetectorMit
/// ===================
ApriltagDetectorMit::ApriltagDetectorMit(const TagFamily &tag_family)
: ApriltagDetector(DetectorType::Mit, tag_family) {
switch (tag_family) {
case TagFamily::tf36h11:
tag_detector_ =
boost::make_shared<mit::TagDetector>(mit::tag_codes_36h11);
break;
case TagFamily::tf25h9:
tag_detector_ = boost::make_shared<mit::TagDetector>(mit::tag_codes_25h9);
break;
case TagFamily::tf16h5:
tag_detector_ = boost::make_shared<mit::TagDetector>(mit::tag_codes_16h5);
break;
default:
throw std::invalid_argument("Invalid tag family");
}
}
void ApriltagDetectorMit::SetBlackBorder(int black_border) {
tag_detector_->set_black_border(black_border);
}
void ApriltagDetectorMit::SetDecimate(int decimate) { decimate_ = 1; }
void ApriltagDetectorMit::SetNThreads(int nthreads) { nthreads_ = 1; }
void ApriltagDetectorMit::PrintProfilingInfo() const {
// TODO: implement performance analysis for MIT detector
}
ApriltagVec ApriltagDetectorMit::DetectImpl(const cv::Mat &image) {
// Detection
auto detections = tag_detector_->ExtractTags(image);
// Convert to Apriltag message
ApriltagVec apriltags;
apriltags.reserve(detections.size());
for (const mit::TagDetection &td : detections) {
apriltag_msgs::Apriltag apriltag;
apriltag.id = td.id;
apriltag.bits = payload();
apriltag.border = black_border();
apriltag.family = tag_family();
apriltag.hamming = td.hamming_distance;
apriltag.center.x = td.cxy.x;
apriltag.center.y = td.cxy.y;
for (size_t i = 0; i < 4; ++i) {
apriltag.corners[i].x = td.p[i].x;
apriltag.corners[i].y = td.p[i].y;
}
apriltags.push_back(apriltag);
}
return apriltags;
}
/// =====================
/// ApriltagDetectorUmich
/// =====================
ApriltagDetectorUmich::ApriltagDetectorUmich(const TagFamily &tag_family)
: ApriltagDetector(DetectorType::Umich, tag_family) {
tag_detector_.reset(umich::apriltag_detector_create());
switch (tag_family) {
case TagFamily::tf36h11:
tag_family_.reset(umich::tag36h11_create());
break;
case TagFamily::tf25h9:
tag_family_.reset(umich::tag25h9_create());
break;
case TagFamily::tf16h5:
tag_family_.reset(umich::tag16h5_create());
break;
default:
throw std::invalid_argument("Invalid tag family");
}
apriltag_detector_add_family(tag_detector_.get(), tag_family_.get());
}
void ApriltagDetectorUmich::SetBlackBorder(int black_border) {
if (black_border != 1) {
ROS_WARN_STREAM("black_border no longer supported (must be 1)!");
}
}
void ApriltagDetectorUmich::SetDecimate(int decimate) {
tag_detector_->quad_decimate = decimate;
}
void ApriltagDetectorUmich::SetNThreads(int nthreads) {
tag_detector_->nthreads = nthreads_;
}
void ApriltagDetectorUmich::PrintProfilingInfo() const {
timeprofile_display(tag_detector_->tp);
}
// helper function for grey image conversion
static umich::image_u8_t *image_u8_create_from_gray(int width, int height, uint8_t *gray) {
int stride = width;
uint8_t *buf = static_cast<uint8_t *>(calloc(height * stride, sizeof(uint8_t)));
umich::image_u8_t tmp = {
.width = width, .height = height, .stride = stride, .buf = buf};
umich::image_u8_t *im = static_cast<umich::image_u8_t *>(
calloc(1, sizeof(umich::image_u8_t)));
memcpy(im, &tmp, sizeof(umich::image_u8_t));
memcpy(im->buf, gray, im->height * im->stride);
return im;
}
ApriltagVec ApriltagDetectorUmich::DetectImpl(const cv::Mat &image) {
umich::ImageU8Ptr image_u8(
image_u8_create_from_gray(image.cols, image.rows, image.data));
// Detection
umich::ZarrayPtr detections(
apriltag_detector_detect(tag_detector_.get(), image_u8.get()));
// Handle empty detection
const auto num_detections = zarray_size(detections.get());
ApriltagVec apriltags;
apriltags.reserve(num_detections);
for (int i = 0; i < num_detections; ++i) {
umich::apriltag_detection_t *td;
zarray_get(detections.get(), i, &td);
apriltag_msgs::Apriltag apriltag;
apriltag.id = td->id;
apriltag.bits = payload();
apriltag.hamming = td->hamming;
apriltag.border = black_border();
apriltag.center.x = td->c[0];
apriltag.center.y = td->c[1];
for (size_t i = 0; i < 4; ++i) {
// Umich's order of corners is different from mit's
apriltag.corners[i].x = td->p[i][0];
apriltag.corners[i].y = td->p[i][1];
}
apriltags.push_back(apriltag);
}
return apriltags;
}
void DrawApriltag(cv::Mat &image, const apriltag_msgs::Apriltag &apriltag,
int thickness, bool draw_corners) {
const auto &p = apriltag.corners;
cv::line(image, cv::Point2i(p[0].x, p[0].y), cv::Point2i(p[1].x, p[1].y),
CV_RGB(255, 0, 0), thickness);
cv::line(image, cv::Point2i(p[0].x, p[0].y), cv::Point2i(p[3].x, p[3].y),
CV_RGB(0, 255, 0), thickness);
cv::line(image, cv::Point2i(p[2].x, p[2].y), cv::Point2i(p[3].x, p[3].y),
CV_RGB(0, 0, 255), thickness);
cv::line(image, cv::Point2i(p[2].x, p[2].y), cv::Point2i(p[1].x, p[1].y),
CV_RGB(0, 0, 255), thickness);
const auto line_type = cv::LINE_AA;
if (draw_corners) {
int r = thickness;
cv::circle(image, cv::Point2i(p[0].x, p[0].y), r, CV_RGB(255, 0, 0), -1,
line_type);
cv::circle(image, cv::Point2i(p[1].x, p[1].y), r, CV_RGB(0, 255, 0), -1,
line_type);
cv::circle(image, cv::Point2i(p[2].x, p[2].y), r, CV_RGB(0, 0, 255), -1,
line_type);
cv::circle(image, cv::Point2i(p[3].x, p[3].y), r, CV_RGB(255, 0, 255), -1,
line_type);
}
cv::putText(image, std::to_string(apriltag.id),
cv::Point2f(apriltag.center.x - 5, apriltag.center.y + 5),
cv::FONT_HERSHEY_SIMPLEX, 1, CV_RGB(255, 0, 255), 2, line_type);
}
void DrawApriltags(cv::Mat &image, const ApriltagVec &apriltags) {
for (const auto &apriltag : apriltags) {
DrawApriltag(image, apriltag);
}
}
int TagFamilyToPayload(const TagFamily &tag_family) {
switch (tag_family) {
case TagFamily::tf36h11:
return 6;
case TagFamily::tf25h9:
return 5;
case TagFamily::tf16h5:
return 4;
default:
throw std::invalid_argument("Invalid tag family");
}
}
std::string DetectorTypeToString(const DetectorType &detector_type) {
switch (detector_type) {
case DetectorType::Mit:
return {"mit"};
case DetectorType::Umich:
return {"umich"};
default:
throw std::invalid_argument("Invalid detector type");
}
}
bool InsideImage(const cv::Mat &image, float x, float y, int b) {
const auto w = image.cols;
const auto h = image.rows;
return (x >= b) && (y >= b) && (x < w - b) && (y < h - b);
}
bool InsideImage(const cv::Mat &image, const cv::Point2f &p, int b) {
return InsideImage(image, p.x, p.y, b);
}
// void RefineApriltags(const cv::Mat &image, ApriltagVec &apriltags,
// int win_size) {
// if (apriltags.empty())
// return;
// std::vector<cv::Point2f> corners;
// corners.reserve(apriltags.size() * 4);
// for (const auto &apriltag : apriltags) {
// for (const auto &corner : apriltag.corners) {
// if (InsideImage(image, corner.x, corner.y, win_size)) {
// corners.push_back(cv::Point2f(corner.x, corner.y));
// }
// }
// }
// const auto cv_win_size = cv::Size(win_size, win_size);
// const auto zero_zone = cv::Size(-1, -1);
// const auto criteria =
// cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 0.001);
// cv::cornerSubPix(image, corners, cv_win_size, zero_zone, criteria);
// size_t i = 0;
// for (auto &apriltag : apriltags) {
// for (auto &corner : apriltag.corners) {
// if (InsideImage(image, corner.x, corner.y, win_size)) {
// const auto &refined = corners[i++];
// corner.x = refined.x;
// corner.y = refined.y;
// }
// }
// }
//}
} // namespace apriltag_ros
|
{"hexsha": "9ad16191fab379f013663b0b190b0cf08bf03ee5", "size": 10230, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "apriltag_ros/src/apriltag_detector.cpp", "max_stars_repo_name": "versatran01/sv_fiducial", "max_stars_repo_head_hexsha": "7e054d975f4da423d1e230ec699512e6c83e3261", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 15.0, "max_stars_repo_stars_event_min_datetime": "2016-08-11T13:50:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T22:27:45.000Z", "max_issues_repo_path": "apriltag_ros/src/apriltag_detector.cpp", "max_issues_repo_name": "versatran01/sv_fiducial", "max_issues_repo_head_hexsha": "7e054d975f4da423d1e230ec699512e6c83e3261", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6.0, "max_issues_repo_issues_event_min_datetime": "2017-08-29T12:41:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-07T22:06:04.000Z", "max_forks_repo_path": "apriltag_ros/src/apriltag_detector.cpp", "max_forks_repo_name": "versatran01/sv_fiducial", "max_forks_repo_head_hexsha": "7e054d975f4da423d1e230ec699512e6c83e3261", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 23.0, "max_forks_repo_forks_event_min_datetime": "2016-08-09T00:54:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T13:20:17.000Z", "avg_line_length": 31.96875, "max_line_length": 91, "alphanum_fraction": 0.6568914956, "num_tokens": 3037}
|
import numpy as np
import pandas as pd
from scipy.stats import wilcoxon, binomtest, f
import statsmodels.formula.api as smf
import statsmodels.api as sm
import statsmodels.tools.sm_exceptions as sme
from scipy.special import digamma,polygamma
from scipy.stats import nbinom
libmtspec = True
try:
from mtspec import mtspec
except ModuleNotFoundError:
libmtspec = False
def get_2D_matrix (psites):
"""converts from 1D array to 2D matrix"""
mat = np.reshape (psites, (int (len(psites)/3), 3))
return (mat)
def get_counts (psites):
"""counts number of p-sites in each frame"""
mat = get_2D_matrix (psites)
return ({
'total' : np.sum(mat),
'frame0' : np.sum(mat[:,0]),
'frame1' : np.sum(mat[:,1]),
'frame2' : np.sum(mat[:,2])
})
def get_taper (psites, time_bandwidth = 3, ntapers = "default", nfft = "default"):
"""Performs multitaper analysis (as in ribotaper) with Ftest statistics for 1/3 frequency
psites: 1D array (ORF length) with P-site counts ncodons
returns: p-value
"""
if sum (psites) == 0:
return (np.nan)
if nfft == "default":
nfft = int(2 * 2**np.ceil(np.log2(len(psites))))
if ntapers == "default":
ntapers = int(2*time_bandwidth) - 1
# Calculate the spectral estimation.
spec, freq, jackknife, fstatistics, _ = mtspec(data=np.array(psites), delta = 1, time_bandwidth = time_bandwidth, number_of_tapers=ntapers, nfft=nfft, statistics=True, rshape=0)
m = int(np.round (nfft/3))
sf = f.sf (fstatistics[m],dfn=2,dfd=(2*ntapers)-2)
return (sf)
def get_wilcox (mat):
"""
Paired wilcoxon-test for frame0 > mean (frame1, frame2)
mat: 2D matrix with shape (3, ncodons)
returns: p-value
"""
frame0 = mat[:,0]
frame12 = np.mean (mat[:,1:3], axis=1)
#wilcox_stat, wilcox_p = wilcoxon(frame0, frame12, alternative="greater") if not np.all (frame0-frame12==0) else (np.nan, np.nan)
wilcox_stat, wilcox_p = wilcoxon(frame0 - frame12, alternative="greater") if not np.all (frame0-frame12==0) else (np.nan, np.nan)
return (wilcox_p)
def get_binom (mat):
"""
Perform binomial-test for n(frame0 > frame1 and frame0 > frame2). Adding random noise to reduce draw-bias, otherwise on-frame is max on draw
mat: 2D matrix with shape (3, ncodons)
returns: p-value
"""
mat = mat + np.random.uniform(low=0.0, high=0.99, size=mat.shape)
index_max = np.argmax (mat, axis=1)
binom_p = binomtest (k=np.sum (index_max == 0), n=len(index_max), p=1/3, alternative="greater").pvalue if len (index_max) > 0 else np.nan
return (binom_p)
def get_theta_md (y, limit=20, eps = np.finfo(float).eps**.25):
"""estimates theta for nb GLM - adapted from theta.md (MASS package, R)"""
y = np.array (y)
mu = np.mean (y)
dfr = y.shape[0] - 2
weights = np.ones (len(y))
n = np.sum(weights)
t0 = n/np.sum(weights * (y/mu - 1)**2)
nmax = [np.max ([1,p]) for p in y]
a = 2 * np.sum(weights * y * np.log(nmax/mu)) - dfr
it = 0
idel = 1
while (it + 1 < limit and np.abs(idel) > eps and not np.isnan (t0)):
it = it+1
t0 = np.abs(t0)
tmp = np.log((y + t0)/(mu + t0))
top = a - 2 * np.sum(weights * (y + t0) * tmp)
bot = 2 * np.sum(weights * ((y - mu)/(mu + t0) - tmp))
idel = top/bot
t0 = t0 - idel
if t0 <= 0 or np.isnan (t0) or np.isinf (t0):
t0 = 1 # default alpha in statsmodels nb glm
return (t0)
def get_theta_ml (y, limit = 10, eps = np.finfo(float).eps**.25, trace = False):
"""estimates theta for nb GLM - adapted from theta.ml (MASS package, R)"""
def score (n, th, mu, y, w):
return (sum(w * (digamma(th + y) - digamma(th) + np.log(th) + 1 - np.log(th + mu) - (y + th)/(mu + th))))
def info (n, th, mu, y, w):
return (sum(w * (-polygamma(1, th + y) + polygamma(1, th) - 1/th + 2/(mu + th) - (y + th)/(mu + th)**2)))
try:
mu = np.mean (y)
weights = np.ones ((len (y)))
n = np.sum(weights)
t0 = n/sum(weights * (y/mu - 1)**2)
it = 0
idel = 1
if (trace):
print ("theta.ml: iter", it, "theta", t0)
while (it < limit and abs(idel) > eps):
t0 = abs (t0)
i = info (n, t0, mu, y, weights)
idel = score(n, t0, mu, y, weights) / i
t0 = t0 + idel
it = it+1
if t0 <= 0 or np.isnan (t0) or np.isinf (t0):
t0 = 1
if it == limit and trace:
print ("iteration limit reached")
return (t0)
except ZeroDivisionError:
return (1)
def convert_params(mu, theta):
"""
Convert mean/dispersion parameterization of a negative binomial to the ones scipy supports
See https://en.wikipedia.org/wiki/Negative_binomial_distribution#Alternative_formulations
"""
r = theta
var = mu + 1 / r * mu ** 2
p = (var - mu) / var
return r, 1 - p
def pmf(counts, mu, theta):
"""
"""
return nbinom.pmf(counts, *convert_params(mu, theta))
def get_glm (mat, remove_outliers = False):
"""
Fits a negative binomial GLM to the p-sites with a two-class frame feature (on or off-frame) and extracts the parameter for the frame coefficient.
mat: 2D matrix with shape (ncodons, 3)
returns: p-value
"""
df_glm = pd.DataFrame ({
'counts' : mat.reshape (-1),
'frame' : ['onframe', 'offframe', 'offframe'] * mat.shape[0]
})
try:
if remove_outliers:
theta_g = df_glm.groupby ("frame").agg ([np.mean, get_theta_ml])
df_glm['pmf'] = pmf (df_glm.counts.values, theta_g.loc[df_glm.frame, ('counts','mean')], theta_g.loc[df_glm.frame, ('counts','get_theta_ml')])
df_glm['adj_pmf'] = p_adjust_bh (df_glm.pmf)
df_glm = df_glm[df_glm.adj_pmf >= 0.01]
theta = get_theta_ml (df_glm.counts.values)
model = smf.glm(formula = "counts ~ frame", data=df_glm, family=sm.families.NegativeBinomial(alpha=1/theta)).fit()
glm_p = model.pvalues[1] # glm_ttest.pvalue
# converting to one-tailed
if model.params[1] > 0: #== max (model.params):
glm_p_onetailed = glm_p/2
else:
glm_p_onetailed = 1-glm_p/2
return (glm_p_onetailed)
except sme.PerfectSeparationError:
return (np.nan)
except ValueError:
return (np.nan)
except IndexError:
return (np.nan)
def p_adjust_bh (p):
"""
Benjamini-Hochberg p-value correction for multiple hypothesis testing.
adapted from here: https://stackoverflow.com/questions/7450957/how-to-implement-rs-p-adjust-in-python to allow NaNs
"""
p = np.asfarray(p)
nna = ~np.isnan (p)
q = np.empty ((len(p)))
q[:] = np.nan
pnna = p[nna]
by_descend = pnna.argsort()[::-1]
by_orig = by_descend.argsort()
n = len(pnna) #[~np.isnan (p)])
i = np.arange(len(pnna), 0, -1)
q[nna] = np.minimum(1, np.fmin.accumulate((float (n)/i) * pnna[by_descend]))[by_orig]
return q
def get_filtered_padj (s, pcol="p_glm", name="filtered_padj"):
"""
Adapted from DESeq2; filtering by expression, the BH padjustment if performed solely on ORFs exceeding the expression threshold.
Then, the threshold that maximized number of rejections (i.e. significant ORFs) are used. In contrast to DESeq2, the maximization is
not based on lowess regression, but simply the cutoff with max rejections (lowess implementation TODO).
"""
filter=np.array (s['n'])
p=np.array(s[pcol])
nrows = s.shape[0]
if nrows < 50:
s[name] = p_adjust_bh(p)
return (s)
lq = np.mean(filter == 0)
uq = .95 if lq < .95 else 1
r = np.array (np.linspace(start=lq, stop=uq, num=50))
cutoffs = np.quantile (filter, r)
result = np.empty((nrows,len(cutoffs)))
result[:] = np.nan
for i in range (len (cutoffs)):
use = filter >= cutoffs[i]
if (np.any(use)):
use_p = p[use]
result[use, i] = p_adjust_bh(use_p)
numRej = np.sum (result < 0.05, axis=0)
j = np.argmax(numRej)
s[name] = result[:,j]
return (s)
|
{"hexsha": "aeb67be00d54c264e2412a5183418be1ae77f9e1", "size": 8490, "ext": "py", "lang": "Python", "max_stars_repo_path": "ribofy/stats.py", "max_stars_repo_name": "ncrnalab/ribofy", "max_stars_repo_head_hexsha": "f0140018f322d60b87a44796358e179e52d6f837", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-11T09:10:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-11T09:10:12.000Z", "max_issues_repo_path": "ribofy/stats.py", "max_issues_repo_name": "ncrnalab/ribofy", "max_issues_repo_head_hexsha": "f0140018f322d60b87a44796358e179e52d6f837", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ribofy/stats.py", "max_forks_repo_name": "ncrnalab/ribofy", "max_forks_repo_head_hexsha": "f0140018f322d60b87a44796358e179e52d6f837", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3, "max_line_length": 181, "alphanum_fraction": 0.5766784452, "include": true, "reason": "import numpy,from scipy,import statsmodels", "num_tokens": 2598}
|
# Copyright (C) 2020 NumS Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import numpy as np
from nums.core.array import selection
from nums.core.array import utils as array_utils
from nums.core.array.base import Block, BlockArrayBase
from nums.core.array.selection import BasicSelection
from nums.core.compute.compute_manager import ComputeManager
from nums.core.grid.grid import ArrayGrid
class ArrayView:
@classmethod
def from_block_array(cls, bab):
assert isinstance(bab, BlockArrayBase)
return cls(source=bab, block_shape=bab.block_shape)
@classmethod
def from_subscript(cls, bab, subscript):
assert isinstance(bab, BlockArrayBase)
return cls(source=bab, sel=BasicSelection.from_subscript(bab.shape, subscript))
def __init__(self, source, sel: BasicSelection = None, block_shape: tuple = None):
self._source: BlockArrayBase = source
self._cm: ComputeManager = self._source.cm
if sel is None:
sel = BasicSelection.from_shape(self._source.shape)
# Currently, this is all we support.
assert len(sel.axes) == len(self._source.shape)
self.sel = sel
self.shape: tuple = self.sel.get_output_shape()
if block_shape is None:
block_shape: tuple = array_utils.block_shape_from_subscript(
self.sel.selector(), self._source.block_shape
)
self.block_shape = block_shape
assert len(self.block_shape) == len(self.shape)
self.grid: ArrayGrid = ArrayGrid(
self.shape, self.block_shape, dtype=self._source.dtype.__name__
)
def __getitem__(self, item):
if isinstance(item, tuple):
for val in item:
assert array_utils.is_regular_subscript(val)
return self.select(item)
elif array_utils.is_regular_subscript(item):
return self.select((item,))
else:
raise Exception("getitem failed", item)
def select(self, subscript: tuple):
if selection.is_advanced_selection(subscript):
# This is not optimized.
return self.advanced_select(subscript)
else:
# This is optimized.
return self.basic_select(subscript)
def basic_select(self, subscript: tuple):
# No support for subscripts of subscripts.
# We create new block arrays to deal with nested subscripts.
assert self.shape == self._source.shape
assert self.block_shape == self._source.block_shape
sel: BasicSelection = BasicSelection.from_subscript(self.shape, subscript)
result: ArrayView = ArrayView(self._source, sel)
return result
def create(self, concrete_cls=None) -> BlockArrayBase:
if self.sel.basic_steps():
if self.sel.is_aligned(self._source.block_shape):
# Assertion below should form a conjunction with the above condition.
# This isn't currently an issue but an assumption that
# may not always hold true, depending on how the ArrayView
# is constructed.
assert array_utils.can_broadcast_shape_to(
self.sel.get_broadcastable_block_shape(self.block_shape),
self._source.block_shape,
)
return self.create_references(concrete_cls)
else:
return self.create_basic_single_step(concrete_cls)
else:
return self.create_basic_multi_step(concrete_cls)
def create_references(self, concrete_cls) -> BlockArrayBase:
# TODO (hme): Double check this.
array_cls = BlockArrayBase if concrete_cls is None else concrete_cls
dst_ba: BlockArrayBase = array_cls(self.grid, self._cm)
if 0 in self.shape:
return dst_ba
grid_offset = self.sel.position().value // np.array(
self._source.block_shape, dtype=np.int
)
dst_inflated_shape = self.sel.get_broadcastable_shape()
dst_inflated_block_shape = self.sel.get_broadcastable_block_shape(
self.block_shape
)
dst_inflated_grid: ArrayGrid = ArrayGrid(
dst_inflated_shape, dst_inflated_block_shape, self.grid.dtype.__name__
)
dst_grid_entry_iterator = list(dst_ba.grid.get_entry_iterator())
for dst_index, dst_inflated_grid_entry in enumerate(
dst_inflated_grid.get_entry_iterator()
):
dst_grid_entry = dst_grid_entry_iterator[dst_index]
src_grid_entry = tuple(
(np.array(dst_inflated_grid_entry, dtype=np.int) + grid_offset).tolist()
)
dst_ba.blocks[dst_grid_entry].oid = self._source.blocks[src_grid_entry].oid
dst_ba.blocks[dst_grid_entry].transposed = self._source.blocks[
src_grid_entry
].transposed
return dst_ba
def create_basic_single_step(self, concrete_cls) -> BlockArrayBase:
array_cls = BlockArrayBase if concrete_cls is None else concrete_cls
dst_ba: BlockArrayBase = array_cls(self.grid, self._cm)
if 0 in self.shape:
return dst_ba
src_sel_arr: np.ndarray = selection.BasicSelection.block_selection(
self._source.shape, self._source.block_shape
)
# TODO(hme): The following op is very slow for integer subscripts of large arrays.
src_sel_clipped: np.ndarray = src_sel_arr & self.sel
assert src_sel_clipped.shape == self._source.grid.grid_shape
broadcast_shape = self.sel.get_broadcastable_shape()
broadcast_block_shape = self.sel.get_broadcastable_block_shape(
dst_ba.block_shape
)
dst_grid_bc: ArrayGrid = ArrayGrid(
broadcast_shape, broadcast_block_shape, self.grid.dtype.__name__
)
dst_sel_arr: np.ndarray = selection.BasicSelection.block_selection(
broadcast_shape, broadcast_block_shape
)
dst_sel_offset: np.ndarray = dst_sel_arr + self.sel.position()
dst_entry_iterator = list(dst_ba.grid.get_entry_iterator())
for dst_index, dst_grid_entry_bc in enumerate(dst_grid_bc.get_entry_iterator()):
dst_sel_offset_block: BasicSelection = dst_sel_offset[dst_grid_entry_bc]
if dst_sel_offset_block.is_empty():
continue
src_dst_intersection_arr = src_sel_clipped & dst_sel_offset_block
cm: ComputeManager = self._cm
src_oids = []
src_params = []
dst_params = []
for _, src_grid_entry in enumerate(self._source.grid.get_entry_iterator()):
src_dst_intersection_block: BasicSelection = src_dst_intersection_arr[
src_grid_entry
]
if src_dst_intersection_block.is_empty():
continue
src_block: Block = self._source.blocks[src_grid_entry]
src_oids.append(src_block.oid)
src_sel_block: BasicSelection = src_sel_arr[src_grid_entry]
src_dep_sel_loc = src_dst_intersection_block - src_sel_block.position()
src_params.append((src_dep_sel_loc.selector(), src_block.transposed))
dst_block_sel_loc = (
src_dst_intersection_block - dst_sel_offset_block.position()
)
dst_params.append((dst_block_sel_loc.selector(), False))
dst_block: Block = dst_ba.blocks.reshape(dst_grid_bc.grid_shape)[
dst_grid_entry_bc
]
dst_block.oid = cm.create_block(
*src_oids,
src_params=src_params,
dst_params=dst_params,
dst_shape=dst_block.shape,
dst_shape_bc=dst_sel_offset_block.get_output_shape(),
syskwargs={
"grid_entry": dst_entry_iterator[dst_index],
"grid_shape": self.grid.grid_shape,
}
)
return dst_ba
def create_basic_multi_step(self, concrete_cls) -> BlockArrayBase:
# Create each entry one by one.
raise NotImplementedError("Positive steps of size 1 are currently supported.")
def advanced_select(self, subscript: tuple):
raise NotImplementedError()
def __setitem__(self, key, value):
if isinstance(key, tuple):
for entry in key:
assert array_utils.is_regular_subscript(entry) or isinstance(
entry, type(...)
)
return self.assign(key, value)
elif array_utils.is_regular_subscript(key) or isinstance(key, type(...)):
return self.assign((key,), value)
else:
raise Exception("setitem failed", key)
def assign(self, subscript: Tuple, value):
if selection.is_advanced_selection(subscript):
# This is not optimized.
return self.advanced_assign(subscript, value)
else:
# This is optimized.
return self.basic_assign(subscript, value)
def basic_assign(self, subscript: tuple, value):
# No support for subscripts of subscripts.
# We create new block arrays to deal with nested subscripts.
assert self.shape == self._source.shape
assert self.block_shape == self._source.block_shape
dst_ba: BlockArrayBase = self._source
dst_sel = selection.BasicSelection.from_subscript(dst_ba.shape, subscript)
if 0 in dst_sel.get_output_shape():
# Nothing to do.
return
if dst_sel.basic_steps():
value_is_aligned = True
if isinstance(value, ArrayView):
value_is_aligned = value.sel.is_aligned(value._source.block_shape)
if (
value_is_aligned
and dst_sel.is_aligned(self._source.block_shape)
and self.block_shape == value.block_shape
):
# TODO (hme): Sometimes self.block_shape != value.block_shape
# when it is in fact equal. This happens when value
# is created from the last block of its source,
# and is being assigned to the last block of this view's source.
# This is a minor issue.
return self.assign_references(dst_sel, value)
else:
return self.basic_assign_single_step(dst_sel, value)
else:
return self.basic_assign_multi_step(dst_sel, value)
def assign_references(self, dst_sel: BasicSelection, value):
# TODO (hme): This seems overly complicated, but correct. Double check it.
# Also, revisit some of the variable names. They will likely
# be confusing in the future.
# The destination has same block shape as value,
# but the destination selection may not have the same shape as value.
# May need to broadcast value to destination selection output shape.
dst_offset = dst_sel.position().value // np.array(
self._source.block_shape, dtype=np.int
)
# Do we need to broadcast?
if isinstance(value, ArrayView) and (
dst_sel.get_output_shape() != value.sel.get_output_shape()
):
value = value.create()
if isinstance(value, ArrayView):
# This is the best case.
# We don't need to create value to perform the reference copy.
# No broadcasting required, so this should be okay.
src_offset = value.sel.position().value // np.array(
value._source.block_shape, dtype=np.int
)
src_inflated_shape = dst_sel.get_broadcastable_shape()
src_inflated_block_shape = dst_sel.get_broadcastable_block_shape(
value.block_shape
)
src_inflated_grid: ArrayGrid = ArrayGrid(
src_inflated_shape, src_inflated_block_shape, self.grid.dtype.__name__
)
for src_grid_entry_inflated in src_inflated_grid.get_entry_iterator():
# Num axes in value grid may be too small.
dst_grid_entry = tuple(
(
np.array(src_grid_entry_inflated, dtype=np.int) + dst_offset
).tolist()
)
src_grid_entry = tuple(
(
np.array(src_grid_entry_inflated, dtype=np.int) + src_offset
).tolist()
)
# This is a reference assignment, and the grid properties between the
# two blocks may differ, so retain those properties in the copy.
dst_block: Block = self._source.blocks[dst_grid_entry]
src_block_copy: Block = value._source.blocks[src_grid_entry].copy()
src_block_copy.grid_entry = dst_block.grid_entry
src_block_copy.grid_shape = dst_block.grid_shape
src_block_copy.rect = dst_block.rect
self._source.blocks[dst_grid_entry] = src_block_copy
elif isinstance(value, BlockArrayBase):
# The value has already been created, so just leverage value's existing grid iterator.
if value.shape != dst_sel.get_output_shape():
# Need to broadcast.
src_ba: BlockArrayBase = value.broadcast_to(dst_sel.get_output_shape())
else:
src_ba: BlockArrayBase = value
src_inflated_shape = dst_sel.get_broadcastable_shape()
src_inflated_block_shape = dst_sel.get_broadcastable_block_shape(
src_ba.block_shape
)
src_inflated_grid: ArrayGrid = ArrayGrid(
src_inflated_shape, src_inflated_block_shape, self.grid.dtype.__name__
)
src_grid_entry_iterator = list(src_ba.grid.get_entry_iterator())
for src_index, src_grid_entry_inflated in enumerate(
src_inflated_grid.get_entry_iterator()
):
src_grid_entry = src_grid_entry_iterator[src_index]
dst_grid_entry = tuple(
(
np.array(src_grid_entry_inflated, dtype=np.int) + dst_offset
).tolist()
)
# This is a reference assignment, and the grid properties between the
# two blocks may differ, so retain those properties in the copy.
dst_block: Block = self._source.blocks[dst_grid_entry]
src_block_copy: Block = src_ba.blocks[src_grid_entry].copy()
src_block_copy.grid_entry = dst_block.grid_entry
src_block_copy.grid_shape = dst_block.grid_shape
src_block_copy.rect = dst_block.rect
self._source.blocks[dst_grid_entry] = src_block_copy
def basic_assign_single_step(self, dst_sel: BasicSelection, value):
assert isinstance(value, (ArrayView, BlockArrayBase))
dst_ba: BlockArrayBase = self._source
dst_sel_arr: np.ndarray = selection.BasicSelection.block_selection(
dst_ba.shape, dst_ba.block_shape
)
dst_sel_clipped: np.ndarray = dst_sel_arr & dst_sel
assert dst_sel_clipped.shape == self._source.grid.grid_shape
# We create value's block array, in case we need to broadcast.
# This may not be necessary, but alternative solutions are extremely tedious.
# The result is a block array with replicated blocks,
# which match the output shape of dst_sel.
if isinstance(value, ArrayView):
src_ba_bc: BlockArrayBase = value.create().broadcast_to(
dst_sel.get_output_shape()
)
elif isinstance(value, BlockArrayBase):
src_ba_bc: BlockArrayBase = value.broadcast_to(dst_sel.get_output_shape())
else:
raise Exception("Unexpected value type %s." % type(value))
# Different lengths occur when an index is used to perform
# a selection on an axis. Numpy semantics drops such axes. To allow operations
# between source and destination selections, dropped axes are restored with dimension 1
# so that selections are of equal length.
# We restore the dropped dimensions of the destination selection, because
# the source selection must be broadcastable to the destination selection
# for the assignment to be valid.
src_inflated_shape = dst_sel.get_broadcastable_shape()
# The block shapes need not be equal, but the broadcast source block shape must
# match the block shape we obtain below, so that there's a 1-to-1 correspondence
# between the grid entries.
src_inflated_block_shape = dst_sel.get_broadcastable_block_shape(
src_ba_bc.block_shape
)
src_inflated_grid: ArrayGrid = ArrayGrid(
src_inflated_shape, src_inflated_block_shape, self.grid.dtype.__name__
)
src_sel_arr: np.ndarray = selection.BasicSelection.block_selection(
src_inflated_shape, src_inflated_block_shape
)
src_sel_offset: np.ndarray = src_sel_arr + dst_sel.position()
# The enumeration of grid entries is identical if the broadcast source grid and
# inflated grid have the same number of blocks.
src_grid_entry_iterator = list(src_ba_bc.grid.get_entry_iterator())
for dst_grid_entry in dst_ba.grid.get_entry_iterator():
dst_sel_block: BasicSelection = dst_sel_arr[dst_grid_entry]
dst_sel_block_clipped: BasicSelection = dst_sel_clipped[dst_grid_entry]
if dst_sel_block_clipped.is_empty():
continue
src_intersection_arr = src_sel_offset & dst_sel_block_clipped
src_oids = []
src_params = []
dst_params = []
dst_block: Block = dst_ba.blocks[dst_grid_entry]
for src_index, src_grid_entry_bc in enumerate(
src_inflated_grid.get_entry_iterator()
):
src_intersection_block: BasicSelection = src_intersection_arr[
src_grid_entry_bc
]
if src_intersection_block.is_empty():
continue
src_grid_entry = src_grid_entry_iterator[src_index]
src_block: Block = src_ba_bc.blocks[src_grid_entry]
src_oids.append(src_block.oid)
src_sel_block_offset: BasicSelection = src_sel_offset[src_grid_entry_bc]
src_dep_sel_loc = (
src_intersection_block - src_sel_block_offset.position()
)
src_params.append(
(
src_dep_sel_loc.selector(),
src_sel_block_offset.get_output_shape(),
src_block.transposed,
)
)
# We're looking at intersection of dst block and src block, so the
# location to which we assign must be offset by dst_sel_block.
dst_block_sel_loc: BasicSelection = (
src_intersection_block - dst_sel_block.position()
)
dst_params.append((dst_block_sel_loc.selector(), dst_block.transposed))
if len(src_oids) == 0:
continue
dst_block.oid = self._cm.update_block(
dst_block.oid,
*src_oids,
src_params=src_params,
dst_params=dst_params,
syskwargs={
"grid_entry": dst_block.grid_entry,
"grid_shape": dst_block.grid_shape,
}
)
def basic_assign_multi_step(self, dst_sel: BasicSelection, value):
# Update each entry in subscript shape, one by one.
raise NotImplementedError()
def advanced_assign(self, subscript: tuple, value):
raise NotImplementedError()
|
{"hexsha": "f21b140ab0fb79e316d472e3071987f6446c72fc", "size": 20690, "ext": "py", "lang": "Python", "max_stars_repo_path": "nums/core/array/view.py", "max_stars_repo_name": "UsernameChun/nums", "max_stars_repo_head_hexsha": "3a10598cc32b9763f1f2733e9e1089399d48ef3c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nums/core/array/view.py", "max_issues_repo_name": "UsernameChun/nums", "max_issues_repo_head_hexsha": "3a10598cc32b9763f1f2733e9e1089399d48ef3c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nums/core/array/view.py", "max_forks_repo_name": "UsernameChun/nums", "max_forks_repo_head_hexsha": "3a10598cc32b9763f1f2733e9e1089399d48ef3c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.5990990991, "max_line_length": 98, "alphanum_fraction": 0.6276462059, "include": true, "reason": "import numpy", "num_tokens": 4111}
|
import random
import numpy as np
import torch
class game:
def __init__(self, size, nConnect):
self.size = size
self.nConnect = nConnect
def reset(self):
self.state = torch.zeros((self.size, self.size))
return self.state, False
def show_state(self):
print("Current board position:")
print(self.state.int().numpy(), "\n")
def valid_actions(self):
return self.state == 0
def valid_action_indices(self):
return self.valid_actions().nonzero()
def is_valid(self, coord):
x, y = coord
return self.state[x, y] == 0
def move(self, coord, alternate_players=True):
x, y = coord
if self.is_valid(coord):
self.state[x, y] = 1
else:
raise ValueError("move not valid")
if self.is_winning_move(coord):
return self.state, +1, True # state, reward, done
if len(self.valid_action_indices()) == 0:
return self.state, +0, True
if alternate_players: self.state *= -1
return self.state, 0, False
def is_winning_move(self, coord):
x, y = coord
for dx in [0,1]:
for dy in [0,1]:
if not dx==dy==0:
nConnected = 0
for depth in range(-self.nConnect-1, self.nConnect):
new_coord = np.array([x, y]) + np.array([dx, dy])*depth
if (0 <= new_coord).all() and (new_coord < self.size).all():
if self.state[tuple(new_coord)] == 1:
nConnected += 1
if nConnected == self.nConnect:
return True
else:
nConnected = 0
return False
game = game(5, nConnect = 0)
state, done = game.reset()
game.show_state()
while not done:
poss_actions = game.valid_action_indices()
action = poss_actions[random.randint(0, len(poss_actions)-1)]
new_state, reward, done = game.move(action)
state = new_state
game.show_state()
if reward:
print("Won!")
#
|
{"hexsha": "a175695c55ef4a6d2d81d6889bc14cc0f0a6b55f", "size": 2200, "ext": "py", "lang": "Python", "max_stars_repo_path": "game.py", "max_stars_repo_name": "lbarazza/sedano", "max_stars_repo_head_hexsha": "f45ed2fe40c81904871e0ec72ad980c1bc20e3d6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "game.py", "max_issues_repo_name": "lbarazza/sedano", "max_issues_repo_head_hexsha": "f45ed2fe40c81904871e0ec72ad980c1bc20e3d6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "game.py", "max_forks_repo_name": "lbarazza/sedano", "max_forks_repo_head_hexsha": "f45ed2fe40c81904871e0ec72ad980c1bc20e3d6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.4042553191, "max_line_length": 84, "alphanum_fraction": 0.5218181818, "include": true, "reason": "import numpy", "num_tokens": 504}
|
[STATEMENT]
lemma encode_complete:
"encode h prob = Inr err \<Longrightarrow>
\<not>(ast_problem.well_formed prob \<and> (\<forall>op \<in> set (ast_problem.ast\<delta> prob). consistent_pres_op op) \<and>
(\<forall>op \<in> set (ast_problem.ast\<delta> prob). is_standard_operator op))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. encode h prob = Inr err \<Longrightarrow> \<not> (ast_problem.well_formed prob \<and> (\<forall>op\<in>set (ast_problem.ast\<delta> prob). consistent_pres_op op) \<and> (\<forall>op\<in>set (ast_problem.ast\<delta> prob). is_standard_operator op))
[PROOF STEP]
unfolding encode_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (if ast_problem.well_formed prob then if \<forall>op\<in>set (ast_problem.ast\<delta> prob). consistent_pres_op op then if \<forall>op\<in>set (ast_problem.ast\<delta> prob). is_standard_operator op then Inl (SASP_to_DIMACS' h prob) else Inr STR ''Error: Conditional effects!'' else Inr STR ''Error: Preconditions inconsistent'' else Inr STR ''Error: Problem malformed!'') = Inr err \<Longrightarrow> \<not> (ast_problem.well_formed prob \<and> (\<forall>op\<in>set (ast_problem.ast\<delta> prob). consistent_pres_op op) \<and> (\<forall>op\<in>set (ast_problem.ast\<delta> prob). is_standard_operator op))
[PROOF STEP]
by (auto split: if_splits simp: list.pred_set
intro: planning_dimacs_complete_code'[unfolded Let_def])
|
{"llama_tokens": 482, "file": "Verified_SAT_Based_AI_Planning_Solve_SASP", "length": 2}
|
#!/usr/bin/env python
#This script plots drag around an inline oscillating cylinder for re 200 kc 10 against dutsch et als work at cycle 14
import argparse
import os
import os.path
import sys
import csv
import matplotlib
from matplotlib import pyplot as plt
import numpy
cuibmFolder = os.path.expandvars("/scratch/src/cuIBM")
validationData = '/osc_Re200_KC10_Dutsch.txt'
execPath = cuibmFolder + '/bin/cuIBM'
caseFolder = cuibmFolder + '/validation/osc/static'
validationData = cuibmFolder + '/validation-data' + validationData
print "\n"+"-"*100
print "Plotting validation for flow around inline oscillating cylinder with Re200 and KC10\n"
print "-"*100+"\n"
experiment = numpy.genfromtxt(validationData,delimiter='\t')
external = numpy.genfromtxt(caseFolder+'/externalkc10/forces',delimiter='\t')
embedded = numpy.genfromtxt(caseFolder+'/embeddedkc10/forces',delimiter='\t')
#external
plt.plot([i-13 for i in zip(*external)[0]],[i*5 for i in zip(*external)[1]],'-',color='blue',linewidth=2,label='External')
plt.plot(zip(*experiment)[0],zip(*experiment)[1],'o', color = 'red', markersize = 8, label = 'Dutsch et al')
plt.title('Drag for flow around inline oscillating cylinder Re 200, KC 10')
plt.legend(loc='lower right',numpoints=1, fancybox=True)
plt.xlabel('t/T')
plt.ylabel('Fd')
plt.ylim([-6,6])
plt.xlim([0,1])
plt.savefig('%s/External_static_kc10.pdf' % (caseFolder))
plt.clf()
#emb
plt.plot([i-13 for i in zip(*embedded)[0]],[i*5 for i in zip(*embedded)[1]],'-',color='blue',linewidth=2,label='Embedded')
plt.plot(zip(*experiment)[0],zip(*experiment)[1],'o', color = 'red', markersize = 8, label = 'Dutsch et al')
plt.title('Drag for flow around inline oscillating cylinder Re 200, KC 10')
plt.legend(loc='lower right',numpoints=1, fancybox=True)
plt.xlabel('t/T')
plt.ylabel('Fd')
plt.ylim([-6,6])
plt.xlim([0,1])
plt.savefig('%s/Embedded_static_kc10.pdf' % (caseFolder))
plt.clf()
print '\nDone plotting!\n Files saved to %s' % caseFolder
|
{"hexsha": "ccbc293e44f9e193c1aa8b2cc6e9c264d0cadafc", "size": 1972, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/validation/osc_Re200_KC10.py", "max_stars_repo_name": "Niemeyer-Research-Group/cuIBM", "max_stars_repo_head_hexsha": "0fa913a465e4f0f3432e0dbd4d3df9bc47905406", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-05T17:48:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-05T17:48:41.000Z", "max_issues_repo_path": "scripts/validation/osc_Re200_KC10.py", "max_issues_repo_name": "Niemeyer-Research-Group/cuIBM-FSI", "max_issues_repo_head_hexsha": "0fa913a465e4f0f3432e0dbd4d3df9bc47905406", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-05-11T16:04:33.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-12T01:40:27.000Z", "max_forks_repo_path": "scripts/validation/osc_Re200_KC10.py", "max_forks_repo_name": "Niemeyer-Research-Group/cuIBM-FSI", "max_forks_repo_head_hexsha": "0fa913a465e4f0f3432e0dbd4d3df9bc47905406", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-06T14:32:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-03T13:36:48.000Z", "avg_line_length": 36.5185185185, "max_line_length": 122, "alphanum_fraction": 0.7231237323, "include": true, "reason": "import numpy", "num_tokens": 564}
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to map weights from default model definition to the phased execution model"""
import numpy as np
from typing import Mapping, Callable
from functools import partial
__all__ = [
"default_to_phased_mapping",
"phased_to_default_mapping",
"default_to_phased_transform",
"add_phased_from_default_initializers"
]
WEIGHT_MAPPING = {}
# Embedding layers
WEIGHT_MAPPING['Embedding/Embedding_Dict'] = "BertModel/Encoder/Embeddings/Token/weight"
WEIGHT_MAPPING['Embedding/Segment_Dict'] = "BertModel/Encoder/Embeddings/Segment/weight"
WEIGHT_MAPPING['Embedding/Positional_Dict'] = "BertModel/Encoder/Embeddings/Position/weight"
WEIGHT_MAPPING['Embedding/Gamma'] = "BertModel/Encoder/Embeddings/Norm/Gamma"
WEIGHT_MAPPING['Embedding/Beta'] = "BertModel/Encoder/Embeddings/Norm/Beta"
def layers_mapping(N):
''' Returns a mapping default -> phased weights for N transformer layers'''
mapping = {}
for i in range(N):
# Attention layer
mapping[f'Layer{i}/Attention/QKV'] = f'BertModel/Encoder/Layer{i}/Attention/QKV'
mapping[f'Layer{i}/Attention/Out'] = f'BertModel/Encoder/Layer{i}/Attention/Out'
mapping[f'Layer{i}/Attention/Gamma'] = f'BertModel/Encoder/Layer{i}/Attention/Norm/Gamma'
mapping[f'Layer{i}/Attention/Beta'] = f'BertModel/Encoder/Layer{i}/Attention/Norm/Beta'
# Feedforward layer
mapping[f'Layer{i}/FF/1/W'] = f'BertModel/Encoder/Layer{i}/FF/1/Dense/Weight'
mapping[f'Layer{i}/FF/1/B'] = f'BertModel/Encoder/Layer{i}/FF/1/Dense/Bias'
mapping[f'Layer{i}/FF/2/W'] = f'BertModel/Encoder/Layer{i}/FF/2/Dense/Weight'
mapping[f'Layer{i}/FF/2/B'] = f'BertModel/Encoder/Layer{i}/FF/2/Dense/Bias'
mapping[f'Layer{i}/FF/Gamma'] = f'BertModel/Encoder/Layer{i}/FF/Norm/Gamma'
mapping[f'Layer{i}/FF/Beta'] = f'BertModel/Encoder/Layer{i}/FF/Norm/Beta'
return mapping
# MaskLM
WEIGHT_MAPPING['CLS/LMPredictionW'] = "BertModel/MLM/LMPrediction/Dense/Weight"
WEIGHT_MAPPING['CLS/LMPredictionB'] = "BertModel/MLM/LMPrediction/Dense/Bias"
WEIGHT_MAPPING['CLS/Gamma'] = "BertModel/MLM/LMPrediction/Norm/Gamma"
WEIGHT_MAPPING['CLS/Beta'] = "BertModel/MLM/LMPrediction/Norm/Beta"
# NSP
WEIGHT_MAPPING['NSP/PoolW'] = "BertModel/NSP/Pool/Dense/Weight"
WEIGHT_MAPPING['NSP/PoolB'] = "BertModel/NSP/Pool/Dense/Bias"
WEIGHT_MAPPING['NSP/NspW'] = "BertModel/NSP/Classifier/Dense/Weight"
WEIGHT_MAPPING['NSP/NspB'] = "BertModel/NSP/Classifier/Dense/Bias"
# SQUAD
WEIGHT_MAPPING['Squad/SquadW'] = 'BertModel/Squad/Dense/Weight'
WEIGHT_MAPPING['Squad/SquadB'] = 'BertModel/Squad/Dense/Bias'
# MaskLM is on a different scope when serialising the embedding
SPLIT_EMBEDDING_MAPPING = {}
SPLIT_EMBEDDING_MAPPING['CLS/LMPredictionW'] = "BertModel/MLMSerialised/Slice/LMPrediction/Dense/Weight"
SPLIT_EMBEDDING_MAPPING['CLS/LMPredictionB'] = "BertModel/MLMSerialised/Slice/LMPrediction/Dense/Bias"
SPLIT_EMBEDDING_MAPPING['CLS/Gamma'] = "BertModel/MLMSerialised/Slice/LMPrediction/Norm/Gamma"
SPLIT_EMBEDDING_MAPPING['CLS/Beta'] = "BertModel/MLMSerialised/Slice/LMPrediction/Norm/Beta"
def split_embedding_mapping(N):
'''Returns a mapping phased -> default weights for N split embedding'''
mapping = {}
for i in range(N):
mapping[f'BertModel/Encoder/Embeddings/Token/split{i}/weight'] = "Embedding/Embedding_Dict"
return mapping
def default_to_phased_mapping(args) -> Mapping[str, str]:
'''Returns weight name mapping of default -> phased.
Does not include split weights.'''
mapping = {**WEIGHT_MAPPING}
mapping.update(**layers_mapping(args.num_layers))
# Handle serialisation of layers.
if args.embedding_serialization_vocab_steps > 1:
mapping.update(**SPLIT_EMBEDDING_MAPPING)
return mapping
def phased_to_default_mapping(args) -> Mapping[str, str]:
'''Returns weight name mapping of phased -> default.
Including split weights.'''
mapping = {v: k for k, v in default_to_phased_mapping(args).items()}
# Handle serialisation of layers
if args.embedding_serialization_vocab_steps > 1:
mapping.update(**split_embedding_mapping(args.embedding_serialization_vocab_steps))
return mapping
def phased_from_default_transform(args) -> Mapping[str, Callable[[np.ndarray], np.ndarray]]:
'''Returns a mapping of phased -> fn.
where fn takes the default numpy weight and returns the phased numpy weight.'''
transform = {}
# Handle serialisation of layers
vocab_splits = args.embedding_serialization_vocab_steps
if vocab_splits > 1:
def get_split(idx, full_t):
vocab_axis = full_t.shape.index(args.vocab_length)
return np.split(full_t, vocab_splits, axis=vocab_axis)[idx]
for i in range(vocab_splits):
transform[f'BertModel/Encoder/Embeddings/Token/split{i}/weight'] = partial(get_split, i)
return transform
def get_phased_initializers_from_default(args, initializers: Mapping[str, np.ndarray]) -> Mapping[str, np.ndarray]:
'''Returns an initializer mapping for phased execution from a mapping for default execution.
This will add splits weights as specfied by args.'''
phased_initializers = {}
mapping = phased_to_default_mapping(args)
transform = phased_from_default_transform(args)
for phased, default in mapping.items():
if default in initializers.keys():
weight = initializers[default]
if phased in transform.keys():
weight = transform[phased](weight)
phased_initializers[phased] = weight
return phased_initializers
def get_default_initializers_from_phased(initializers: Mapping[str, np.ndarray]) -> Mapping[str, np.ndarray]:
'''Returns a dict with mappings for the default execution mode.
This will concat any split weights detected in initializers.
This is intended help to go from any phased init to any other phased init via the default mapping.
old_phased_init = onnx.load(..)
default_init = get_default_initializers_from_phased(old_phased_init)
new_phased_init = add_phased_from_default_initializers(args, default_init)'''
raise NotImplementedError()
|
{"hexsha": "5c33aa6f44991335d31e27b7b270c61509e0ce3f", "size": 6808, "ext": "py", "lang": "Python", "max_stars_repo_path": "applications/popart/bert/phased_execution/weight_mapping.py", "max_stars_repo_name": "kew96/GraphcoreExamples", "max_stars_repo_head_hexsha": "22dc0d7e3755b0a7f16cdf694c6d10c0f91ee8eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "applications/popart/bert/phased_execution/weight_mapping.py", "max_issues_repo_name": "kew96/GraphcoreExamples", "max_issues_repo_head_hexsha": "22dc0d7e3755b0a7f16cdf694c6d10c0f91ee8eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "applications/popart/bert/phased_execution/weight_mapping.py", "max_forks_repo_name": "kew96/GraphcoreExamples", "max_forks_repo_head_hexsha": "22dc0d7e3755b0a7f16cdf694c6d10c0f91ee8eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.641025641, "max_line_length": 115, "alphanum_fraction": 0.732079906, "include": true, "reason": "import numpy", "num_tokens": 1753}
|
import time
import cv2
from gym.envs.atari.atari_env import AtariEnv
import numpy as np
def run_experiment(dataset, preprocess_fn):
times = []
for x in dataset:
start = time.time()
y = preprocess_fn(x)
end = time.time()
times.append(end - start)
times = 1e6 * np.asarray(times)
mean = np.mean(times)
std = np.std(times, ddof=1)
print('{:.2f}'.format(mean), '+/-', '{:.2f}'.format(std), 'μs')
cv2.imwrite('before.jpg', x)
cv2.imwrite('after.jpg', y)
def main():
env = AtariEnv('space_invaders', frameskip=4, obs_type='image')
env.seed(0)
env.action_space.seed(0)
state = env.reset()
dataset = []
for _ in range(1_000):
image = cv2.cvtColor(state, cv2.COLOR_BGR2GRAY)
dataset.append(image)
image, _, done, _ = env.step(env.action_space.sample())
if done:
image = env.reset()
print("identity", end=': ')
run_experiment(dataset, lambda x: x)
for dims in [(84, 84), (80, 84), (80, 104), (80, 80)]:
for interpolation in [cv2.INTER_LINEAR, cv2.INTER_NEAREST]:
for crop in [False, True]:
if crop:
preprocess_fn = lambda x: cv2.resize(x[1:-1], dims, interpolation=interpolation)
else:
preprocess_fn = lambda x: cv2.resize(x, dims, interpolation=interpolation)
print(dims, interpolation, crop, end=': ')
run_experiment(dataset, preprocess_fn)
if __name__ == '__main__':
main()
|
{"hexsha": "3deed484f53ecfab3feeb86bc14f452bcbde7a4a", "size": 1551, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/image_preprocessing.py", "max_stars_repo_name": "brett-daley/fast-dqn", "max_stars_repo_head_hexsha": "acf21e8bb193e52d73aa8e2d4e355957095bbd36", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-11-06T02:16:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-10T18:42:02.000Z", "max_issues_repo_path": "experiments/image_preprocessing.py", "max_issues_repo_name": "brett-daley/fast-dqn", "max_issues_repo_head_hexsha": "acf21e8bb193e52d73aa8e2d4e355957095bbd36", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/image_preprocessing.py", "max_forks_repo_name": "brett-daley/fast-dqn", "max_forks_repo_head_hexsha": "acf21e8bb193e52d73aa8e2d4e355957095bbd36", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2, "max_line_length": 100, "alphanum_fraction": 0.5789813024, "include": true, "reason": "import numpy", "num_tokens": 400}
|
import os
import time
import numpy as np
from sklearn.utils.random import check_random_state
from ilp.experiments.base import BaseExperiment
from ilp.helpers.data_fetcher import fetch_load_data, IS_DATASET_STREAM
from ilp.helpers.params_parse import parse_yaml, experiment_arg_parser
from ilp.constants import CONFIG_DIR
from ilp.helpers.data_flow import split_labels_rest, split_burn_in_rest
from ilp.helpers.log import make_logger
logger = make_logger(__name__)
class VarSamplesLabeled(BaseExperiment):
def __init__(self, n_labeled_values, params, n_runs=1, isave=100):
super(VarSamplesLabeled, self).__init__(name='n_L', config=params,
isave=isave, n_runs=n_runs,
plot_title=r'Influence of '
r'number of labels',
multi_var=True)
self.n_labeled_values = n_labeled_values
def pre_single_run(self, X_run, y_run, mask_labeled, n_burn_in, seed_run,
X_test, y_test, n_run):
config = self.config
n_labels = config['data']['n_labels']
save_dir = os.path.join(self.top_dir, 'n_L_' + str(n_labels))
stats_file = os.path.join(save_dir, 'run_' + str(n_run))
logger.info('\n\nExperiment: {}, n_labels = {}, run {}...\n'.
format(self.name.upper(), n_labels, n_run))
time.sleep(1)
self._single_run(X_run, y_run, mask_labeled, n_burn_in,
stats_file, seed_run, X_test, y_test)
def run(self, dataset_name, random_state=42):
config = self.config
X_train, y_train, X_test, y_test = fetch_load_data(dataset_name)
n_classes = len(np.unique(y_train))
# if dataset_name == 'usps':
# X_train = np.concatenate((X_train, X_test))
# y_train = np.concatenate((y_train, y_test))
for n_run in range(self.n_runs):
seed_run = random_state * n_run
logger.info('\n\nRANDOM SEED = {} for data split.'.format(seed_run))
rng = check_random_state(seed_run)
if config['dataset']['is_stream']:
logger.info('Dataset is a stream. Sampling observed labels.')
# Just randomly sample ratio_labeled samples for mask_labeled
n_burn_in = config['data']['n_burn_in_stream']
ratio_labeled = config['data']['stream']['ratio_labeled']
n_labeled = int(ratio_labeled*len(y_train))
ind_labeled = rng.choice(len(y_train), n_labeled,
replace=False)
mask_labeled = np.zeros(len(y_train), dtype=bool)
mask_labeled[ind_labeled] = True
X_run, y_run = X_train, y_train
else:
burn_in_params = config['data']['burn_in']
ind_burn_in, mask_labeled_burn_in = \
split_burn_in_rest(y_train, shuffle=True, seed=seed_run,
**burn_in_params)
n_labeled_burn_in = sum(mask_labeled_burn_in)
X_burn_in, y_burn_in = X_train[ind_burn_in], \
y_train[ind_burn_in]
mask_rest = np.ones(len(X_train), dtype=bool)
mask_rest[ind_burn_in] = False
X_rest, y_rest = X_train[mask_rest], y_train[mask_rest]
for nlpc in self.n_labeled_values:
n_labels = nlpc*n_classes
config['data']['n_labels'] = n_labels
rl = (n_labels - n_labeled_burn_in) / len(y_rest)
assert rl >= 0
mask_labeled_rest = split_labels_rest(y_rest, batch_size=0,
seed=seed_run, shuffle=True, ratio_labeled=rl)
# Shuffle the rest
indices = np.arange(len(y_rest))
rng.shuffle(indices)
X_run = np.concatenate((X_burn_in, X_rest[indices]))
y_run = np.concatenate((y_burn_in, y_rest[indices]))
mask_labeled = np.concatenate((mask_labeled_burn_in,
mask_labeled_rest[indices]))
n_burn_in = len(y_burn_in)
config['data']['n_burn_in'] = n_burn_in
config.setdefault('options', {})
config['options']['random_state'] = seed_run
self.pre_single_run(X_run, y_run, mask_labeled, n_burn_in,
seed_run, X_test, y_test, n_run)
if __name__ == '__main__':
parser = experiment_arg_parser()
args = vars(parser.parse_args())
dataset_name = args['dataset'].lower()
config_file = os.path.join(CONFIG_DIR, 'var_n_L.yml')
config = parse_yaml(config_file)
# Store dataset info
config.setdefault('dataset', {})
config['dataset']['name'] = dataset_name
config['dataset']['is_stream'] = IS_DATASET_STREAM.get(dataset_name, False)
N_LABELED_PER_CLASS = config['data']['n_labeled_per_class'].copy()
experiment = VarSamplesLabeled(N_LABELED_PER_CLASS, params=config,
n_runs=args['n_runs'])
if args['plot'] != '':
experiment.load_plot(path=args['plot'])
else:
experiment.run(dataset_name)
|
{"hexsha": "26f82cff850f7768ca3c6c2db5cc17e90037edc9", "size": 5475, "ext": "py", "lang": "Python", "max_stars_repo_path": "ilp/experiments/var_n_labeled.py", "max_stars_repo_name": "johny-c/incremental-label-propagation", "max_stars_repo_head_hexsha": "29c413dba023694b99e2c2708c0aa98d891d234d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2019-01-07T05:32:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T03:42:52.000Z", "max_issues_repo_path": "ilp/experiments/var_n_labeled.py", "max_issues_repo_name": "johny-c/incremental-label-propagation", "max_issues_repo_head_hexsha": "29c413dba023694b99e2c2708c0aa98d891d234d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ilp/experiments/var_n_labeled.py", "max_forks_repo_name": "johny-c/incremental-label-propagation", "max_forks_repo_head_hexsha": "29c413dba023694b99e2c2708c0aa98d891d234d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-10-09T07:30:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-05T02:32:23.000Z", "avg_line_length": 43.4523809524, "max_line_length": 80, "alphanum_fraction": 0.5722374429, "include": true, "reason": "import numpy", "num_tokens": 1188}
|
"""
This module provides utility functions for the reduction pipeline.
"""
import astropy.io.fits as pyfits
import numpy as np
def find_angle(loc1, loc2):
"""
Calculated the angle between two locations on a grid.
Inputs:
:loc1: (tuple) first location.
:relative: (tuple) second location.
Outputs:
:angle : (float) real-valued angle between loc1 and loc2.
"""
angle = np.atan(loc1[1] / loc2[1])
return angle
def make_filelist(directory, numlist, inst):
"""Turn a list of numbers into a list of properly formatted filenames.
Inputs:
:directory: (string) path leading to directory of interest.
:numlist: (list) list of numbers corresponding to fits files.
:inst: (Instrument object) instrument for which data is being reduced.
Outputs:
:filelist: (list) list of strings pertaining to files of interest
"""
filelist = [
directory + inst.file_prefix + "{:04d}.fits".format(d) for d in numlist
]
return filelist
def read_imcube(filelist):
"""Reads a stack of fits files into an image cube of dimensions (nims, xpix, ypix).
Inputs:
:filelist: (list) list of strings pertaining to files of interest.
Outputs:
:im_array: (3D array) array of 2D arrays pertaining to the files in filelist.
"""
im_array = np.array([pyfits.getdata(file, 0) for file in filelist])
return im_array
def image_subsection(input_image, npix, center):
"""reads in a full image array, selects the relevant subsection of the array,
and returns the new array, transposed for use with Python.
input_image must be 2D
Inputs:
:input_image: (2D array) image of which a subsection is desired.
:center: (tuple) center of image, with format (x, y)
:npix: (float) value for size of return image. If non-square image desired, enter as list.
:default: center = (750, 1100)
:npix: = 800 for inscribed region or npix = 1000 for circumscribing region
(was 600 for inscribed region; CDD changed to 800)
:transposed: = np.rot90(input_image.T,2)
:transposed: = np.rot90(input_image,2)
:center: = (2047-center[0],2047-center[1]
Outputs:
:subsection: (2d array) subsection of original image.
)"""
npix = np.array(npix)
if np.size(npix) == 1:
npix = [npix, npix]
half = [int(n / 2) for n in npix]
subsection = input_image[
center[0] - half[0] : center[0] + half[0],
center[1] - half[1] : center[1] + half[1],
]
return subsection
def header_subsection(input_image_file, npix, center):
"""
Reads out the header of a subsection.
Inputs:
:input_image: (2D array) image of which a subsection is desired.
:center: (tuple) center of image, with format (x, y)
:npix: (float) value for size of return image. If non-square image desired, enter as list.
Outputs:
:header: (FITS header) header of the image file, adjusted accordingly.
"""
header = pyfits.getheader(input_image_file)
# hdulist = fits.open(input_image_file)
# header = wcs.WCS(hdulist[0].header)
#CDD fix to prevent CRPIX1, CRPIX2 being evaluated as strings
header["CRPIX1"] = npix / 2 - (center[1] - float(header["CRPIX1"])) # x=col
header["CRPIX2"] = npix / 2 - (center[0] - float(header["CRPIX2"])) # y=row
#header["CRPIX1"] = npix / 2 - (center[1] - header["CRPIX1"]) # x=col
#header["CRPIX2"] = npix / 2 - (center[0] - header["CRPIX2"]) # y=row
#end CDD
header["NAXIS1"] = 800 #CDD changed from 600
header["NAXIS2"] = 800 #CDD changed from 600
return header
# def general_bad_pix(image):
# sh = np.shape(image)
# bp_im = image.copy()
# px = 5
#
# for r in range(sh[0]):
# for c in range(sh[1]):
# left = np.max([0, c-px]) #left of image, or 5 less than current pixel
# right = np.min([sh[1], c+px]) #right of image or 5 more than current pixel
# bott = np.max([0, r-px]) #bottom of image or 5 less than current pixel
# top = np.min([sh[0], c+px]) #top of image or 5 more than current pixel
#
# region = image[bott:top, left:right]
# region_size = np.size(region)
#
# nans = np.sum(np.isnan(region))
# if nans == region_size:
# #all these pixels are shitty, set value to 0
# bp_im[r,c] = 0.
# else:
# r_med = np.nanmedian(region)
# if image[r,c] > 5.*r_med or np.isnan(image[r,c]):
# bp_im[r,c] = r_med
#
# return bp_im
|
{"hexsha": "107b888d54f03b798c0c297333a84764b2aeaec7", "size": 4681, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/simmer/utils.py", "max_stars_repo_name": "arjunsavel/SImMer", "max_stars_repo_head_hexsha": "71d9bf0bf329f597426ebcd71dd0cda731592ec6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/simmer/utils.py", "max_issues_repo_name": "arjunsavel/SImMer", "max_issues_repo_head_hexsha": "71d9bf0bf329f597426ebcd71dd0cda731592ec6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/simmer/utils.py", "max_forks_repo_name": "arjunsavel/SImMer", "max_forks_repo_head_hexsha": "71d9bf0bf329f597426ebcd71dd0cda731592ec6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4161073826, "max_line_length": 98, "alphanum_fraction": 0.610339671, "include": true, "reason": "import numpy,import astropy", "num_tokens": 1316}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.