text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import copy
from ctypes import addressof
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import random_split
from dataset.data_loading import BasicDataset
from networks.UNet.unet_model import UNet
from trainers.trainer import Trainer
class OutOfFoldTrainer(Trainer):
@staticmethod
def get_oof_dataset(dataset_path: Path, oof_p: float, dataset_config: BasicDataset.Config):
dataset = BasicDataset(dataset_path, dataset_config)
lens = np.floor([len(dataset) * oof_p for _ in range(3)]).astype(np.int32)
lens[-1] += len(dataset) - lens[-1] // oof_p
assert(sum(lens) == len(dataset))
return random_split(dataset, lens)
@staticmethod
def get_validation_subset(dataset: torch.utils.data.Dataset, percent: float = 0.1):
indices = np.random.choice(range(len(dataset)), size=(len(dataset) // int(1 / percent)), replace=False)
return torch.utils.data.Subset(dataset, indices)
def __init__(
self,
trainer_id: str,
device: torch.device,
dataset_path: Path,
dataset_config: BasicDataset.Config,
oof_p: float,
initial_channels: int,
bilinear: bool,
):
super.__init__(trainer_id, device, dataset_config)
dataset_params = dict(dataset_path=dataset_path, oof_p=oof_p)
val_dataset_config = copy.deepcopy(dataset_config)
val_dataset_config.enable_augmentation = False
# train sets
self.P_1, self.P_2, self.P_test = self.get_oof_dataset(
dataset_config=dataset_config, **dataset_params)
self.P_1_and_P_2 = torch.utils.data.ConcatDataset([self.P_1, self.P_2])
# validation sets
P_1_val, P_2_val, P_test_val = self.get_oof_dataset(
dataset_config=val_dataset_config, **dataset_params)
self.P_1_val = self.get_validation_subset(P_1_val)
self.P_2_val = self.get_validation_subset(P_2_val)
self.P_test_val = self.get_validation_subset(P_test_val)
n_input_channels = dataset_config.num_in_channels()
n_output_channels = 1
self.M_11 = UNet(
n_input_channels=n_input_channels,
n_output_channels=n_output_channels,
initial_channels=initial_channels,
name='M_11'
)
self.M_12 = UNet(
n_input_channels=n_input_channels,
n_output_channels=n_output_channels,
initial_channels=initial_channels,
bilinear=bilinear,
name='M_12'
)
self.M_1 = UNet(
n_input_channels=n_input_channels,
n_output_channels=n_output_channels,
initial_channels=initial_channels,
bilinear=bilinear,
name='M_1'
)
# self.M_2 = LSTMUNet(
# n_channels=Args.n_channels,
# bilinear=Args.bilinear,
# name='M_2'
# )
|
{"hexsha": "6ed390430918c5ce3b74c44dd8101bfc4c7270e8", "size": 2935, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/trainers/oof_trainer.py", "max_stars_repo_name": "claudius-kienle/self-supervised-depth-denoising", "max_stars_repo_head_hexsha": "4dffb30e8ef5022ef665825d26f45f67bf712cfd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-02T15:06:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T09:48:32.000Z", "max_issues_repo_path": "src/trainers/oof_trainer.py", "max_issues_repo_name": "claudius-kienle/self-supervised-depth-denoising", "max_issues_repo_head_hexsha": "4dffb30e8ef5022ef665825d26f45f67bf712cfd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2022-02-24T09:17:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T16:57:58.000Z", "max_forks_repo_path": "src/trainers/oof_trainer.py", "max_forks_repo_name": "alr-internship/self-supervised-depth-denoising", "max_forks_repo_head_hexsha": "4dffb30e8ef5022ef665825d26f45f67bf712cfd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9404761905, "max_line_length": 111, "alphanum_fraction": 0.6551959114, "include": true, "reason": "import numpy", "num_tokens": 678}
|
/**
* @file semimprk.cc
* @brief NPDE homework SemImpRK code
* @author Unknown, Oliver Rietmann
* @date 04.04.2021
* @copyright Developed at ETH Zurich
*/
#include "semimprk.h"
#include <Eigen/Core>
#include <algorithm>
#include <cmath>
#include <iomanip>
#include <iostream>
#include <vector>
#include "../../../lecturecodes/helperfiles/polyfit.h"
namespace SemImpRK {
/* SAM_LISTING_BEGIN_0 */
double CvgRosenbrock() {
double cvgRate = 0.0;
// Use polyfit to estimate the rate of convergence
// for SolveRosenbrock.
//====================
// Your code goes here
//====================
return cvgRate;
}
/* SAM_LISTING_END_0 */
} // namespace SemImpRK
|
{"hexsha": "112af004c470d2b4bf989373a8ccb26854a2b49a", "size": 680, "ext": "cc", "lang": "C++", "max_stars_repo_path": "homeworks/SemImpRK/templates/semimprk.cc", "max_stars_repo_name": "0xBachmann/NPDECODES", "max_stars_repo_head_hexsha": "70a9d251033ab3d8719f0e221de4c2f4e9e8f4ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15.0, "max_stars_repo_stars_event_min_datetime": "2019-04-29T11:28:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T05:10:58.000Z", "max_issues_repo_path": "homeworks/SemImpRK/templates/semimprk.cc", "max_issues_repo_name": "0xBachmann/NPDECODES", "max_issues_repo_head_hexsha": "70a9d251033ab3d8719f0e221de4c2f4e9e8f4ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12.0, "max_issues_repo_issues_event_min_datetime": "2020-02-29T15:05:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T13:51:07.000Z", "max_forks_repo_path": "homeworks/SemImpRK/templates/semimprk.cc", "max_forks_repo_name": "0xBachmann/NPDECODES", "max_forks_repo_head_hexsha": "70a9d251033ab3d8719f0e221de4c2f4e9e8f4ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26.0, "max_forks_repo_forks_event_min_datetime": "2020-01-09T15:59:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T16:27:33.000Z", "avg_line_length": 19.4285714286, "max_line_length": 54, "alphanum_fraction": 0.65, "num_tokens": 187}
|
#!/usr/bin/env python3
# naive_stereo.py
# This program performs block-based matching to create a depth map
# given 2 stereo images.
#Import the Image class from PIL (Pillow)
from PIL import Image, ImageOps, ImageDraw
import numpy as np
import math
###########
###########
MAX_DISPARITY = 50 ### PLAY AROUND WITH THIS
WINDOW_SIZE = 13 ### PLAY AROUND WITH THIS
#Load left and right images
imL = Image.open("im0.png") ### MODIFY THIS
imR = Image.open("im1.png") ### MODIFY THIS
###########
###########
#Convert image modes
imL = ImageOps.grayscale(imL)
imR = ImageOps.grayscale(imR)
#Convert to numpy arrays
imL = np.array(imL,dtype='int64')
imR = np.array(imR,dtype='int64')
#possible disparity values
possible_disparities = range(0,MAX_DISPARITY)
#window size
safe_zone = int(WINDOW_SIZE/2)
disparity_matrix = np.zeros(imL.shape,dtype='int64')
def E_0(col,row,d):
return np.sum((imL[row - safe_zone : row + safe_zone + 1, col-safe_zone + d: col + safe_zone + 1 + d]
- imR[row - safe_zone : row + safe_zone + 1, col-safe_zone : col + safe_zone + 1])**2)
def E(disparity_matrix):
costs = np.full(imL.shape,np.inf)
for d in possible_disparities:
print("Calculating disparity",d, "/",possible_disparities[-1])
for row in range(safe_zone, imL.shape[0] - safe_zone):
for col in range(safe_zone, imL.shape[1] - safe_zone - d):
local_result = E_0(col,row,d)
if local_result < costs[row][col]:
costs[row][col] = local_result
disparity_matrix[row][col] = d
return disparity_matrix
def normalize(input_im):
base = input_im.min()
roof = input_im.max()
diff = roof - base
scale = diff/255
input_im = input_im - base
output = input_im/scale
return np.uint8(output)
disparity_matrix = normalize(E(disparity_matrix))
# save depth map as image (so that you can reuse it later!!!)
imDepth_image = Image.fromarray(disparity_matrix)
imDepth_image = imDepth_image.convert('RGB')
imDepth_image.save("depth.png")
def convolution(imR,image, weak_filter, medium_filter, strong_filter):
image_padded=imR
for i in range(image.shape[1]):
for j in range(image.shape[0]):
if(image[j][i]>=101 and image[j][i]<=150):
try:imR[j][i]=(weak_filter * image_padded[j:j + len(weak_filter), i:i + len(weak_filter)]).sum()
except:pass
elif(image[j][i]>=51 and image[j][i]<=100):
try:imR[j][i]=(medium_filter * image_padded[j:j + len(medium_filter), i:i + len(medium_filter)]).sum()
except:pass
elif(image[j][i]>=0 and image[j][i]<=50):
try:imR[j][i]=(strong_filter * image_padded[j:j + len(strong_filter), i:i + len(strong_filter)]).sum()
except:pass
image_padded = imR
return Image.fromarray(imR)
imL = Image.open("im0.png")
imR = Image.open("im1.png")
imL = ImageOps.grayscale(imL)
imR = ImageOps.grayscale(imR)
image=Image.open("depth.png")
image=ImageOps.grayscale(image)
image=np.array(image)
weak_filter=(1/9)*np.ones((3,3))
medium_filter=(1/49)*np.ones((7,7))
strong_filter=(1/169)*np.ones((13,13))
convolution(np.array(imR),image,weak_filter,medium_filter,strong_filter).save("koech_effect_r.jpg")
convolution(np.array(imL),image,weak_filter,medium_filter,strong_filter).save("koech_effect_l.jpg")
|
{"hexsha": "c035be9c4d13d1f731f86af27549ab00f6923b84", "size": 3434, "ext": "py", "lang": "Python", "max_stars_repo_path": "lab5/stereo.py", "max_stars_repo_name": "nahushr/Computer-Vision", "max_stars_repo_head_hexsha": "a0f9867c416a5f734f0de4bfae11c0b6cbbd5f66", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab5/stereo.py", "max_issues_repo_name": "nahushr/Computer-Vision", "max_issues_repo_head_hexsha": "a0f9867c416a5f734f0de4bfae11c0b6cbbd5f66", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab5/stereo.py", "max_forks_repo_name": "nahushr/Computer-Vision", "max_forks_repo_head_hexsha": "a0f9867c416a5f734f0de4bfae11c0b6cbbd5f66", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6034482759, "max_line_length": 118, "alphanum_fraction": 0.6488060571, "include": true, "reason": "import numpy", "num_tokens": 938}
|
"""
这个例子将展示如何使用决策树进行分类.
"""
import sys
import numpy as np
import pandas as pd
import classicML as cml
DATASET_PATH = './datasets/西瓜数据集.tsv'
ATTRIBUTE_NAME = ['脐部', '色泽', '根蒂', '敲声', '纹理', '触感', '密度', '含糖率']
# 读取数据
dataframe = pd.read_csv(DATASET_PATH, sep='\t', index_col=0, header=0)
train_index = np.asarray([1, 2, 3, 6, 7, 10, 14, 15, 16, 17]) - 1
validation_index = np.asarray([4, 5, 8, 9, 11, 12, 13]) - 1
train_ds = cml.data.Dataset(dataset_type='train')
val_ds = cml.data.Dataset(dataset_type='val')
train_ds.from_dataframe(dataframe.iloc[train_index])
val_ds.from_dataframe(dataframe.iloc[validation_index])
# 生成模型
model = cml.models.DecisionTreeClassifier(attribute_name=ATTRIBUTE_NAME)
model.compile(criterion='gain',
pruning='pre')
# 训练模型
model.fit(train_ds.x, train_ds.y, val_ds.x, val_ds.y)
# 可视化模型(如果您使用的是MacOS, 请注释掉此句, 这句是为了在CI上测试用的.)
if sys.platform != 'darwin':
cml.plots.plot_tree(model)
|
{"hexsha": "2c71f18d66e8335e7e14bff5bf94b3e8f9736f25", "size": 929, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/decision_tree.py", "max_stars_repo_name": "sun1638650145/classicML", "max_stars_repo_head_hexsha": "7e0c2155bccb6e491a150ee689d3786526b74565", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-05-10T12:11:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-31T13:23:55.000Z", "max_issues_repo_path": "examples/decision_tree.py", "max_issues_repo_name": "sun1638650145/classicML", "max_issues_repo_head_hexsha": "7e0c2155bccb6e491a150ee689d3786526b74565", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/decision_tree.py", "max_forks_repo_name": "sun1638650145/classicML", "max_forks_repo_head_hexsha": "7e0c2155bccb6e491a150ee689d3786526b74565", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-17T06:22:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-18T14:32:51.000Z", "avg_line_length": 29.03125, "max_line_length": 72, "alphanum_fraction": 0.7082884822, "include": true, "reason": "import numpy", "num_tokens": 362}
|
import os.path
import math
import numpy as np
from typing import Tuple
from adaptiveleak.utils.constants import PERIOD, BT_FRAME_SIZE, BIG_NUMBER, LENGTH_SIZE
from adaptiveleak.utils.encryption import AES_BLOCK_SIZE, CHACHA_NONCE_LEN
from adaptiveleak.utils.data_utils import calculate_bytes, truncate_to_block
from adaptiveleak.utils.data_types import PolicyType, EncodingMode, CollectMode, EncryptionMode
from adaptiveleak.utils.file_utils import iterate_dir, read_json_gz
from .energy_systems import EnergyUnit
MARGIN = 1e-2
NUM_PADDING_FRAMES = 2
PADDING_FRAMES_FACTOR = 500
def convert_rate_to_energy(collection_rate: float, width: int, encryption_mode: EncryptionMode, collect_mode: CollectMode, seq_length: int, num_features: int) -> float:
"""
Converts a target collection rate to an energy rate / sequences. This
energy rate corresponds to power when considering the fixed time per sequence period.
Args:
collection_rate: The fraction of sequence elements to collect
width: The bit width of each feature
encryption_mode: The type of encryption algorithm (block or stream)
collect_mode: The type of collection mode
seq_length: The number of elements per sequence
num_features: The number of features in each sequence element
"""
# Make the energy unit based on uniform sampling
energy_unit = EnergyUnit(policy_type=PolicyType.UNIFORM,
encoding_mode=EncodingMode.STANDARD,
collect_mode=collect_mode,
encryption_mode=encryption_mode,
seq_length=seq_length,
num_features=num_features,
period=PERIOD)
# Calculate the energy required to collect the target rate
target_collected = int(collection_rate * seq_length)
sent_bytes = calculate_bytes(width=width,
num_collected=target_collected,
num_features=num_features,
seq_length=seq_length,
encryption_mode=encryption_mode)
energy_per_seq = energy_unit.get_energy(num_collected=target_collected,
num_bytes=sent_bytes,
use_noise=False)
return energy_per_seq + MARGIN
def get_group_target_bytes(width: int,
collection_rate: float,
num_features: int,
seq_length: int,
encryption_mode: EncryptionMode,
energy_unit: EnergyUnit,
target_energy: float) -> int:
"""
Calculates the number of bytes targeted by the group encoding policy.
Args:
width: The bit-width of each feature
collection_rate: The collection rate (fraction)
num_features: The number of features per measurement
seq_length: The length of a full sequence
encryption_mode: The type of encryption (block or stream)
energy_unit: The simulated energy unit
target_energy: The targeted energy consumption per sequence
Returns:
The total number of bytes to target.
"""
# Get the target number of collected elements
num_collected = int(collection_rate * seq_length)
# Calculate the number of bytes used by the standard policy
standard_num_bytes = calculate_bytes(width=width,
num_collected=num_collected,
num_features=num_features,
seq_length=seq_length,
encryption_mode=encryption_mode)
# Estimate the energy required to send the number of bytes. We start by (conservatively)
# going multiple blocks under the given limit.
rounded_bytes = standard_num_bytes
num_padding_frames = NUM_PADDING_FRAMES + int(math.floor(rounded_bytes / PADDING_FRAMES_FACTOR))
for _ in range(num_padding_frames):
rounded_bytes = truncate_to_block(rounded_bytes, block_size=BT_FRAME_SIZE) - 1
# Subtract out the meta-data bytes (be conservative here)
metadata_bytes = LENGTH_SIZE + max(CHACHA_NONCE_LEN, AES_BLOCK_SIZE)
data_bytes = rounded_bytes - metadata_bytes
# Align with block encryption padding, as the block padding may put us into
# the next communication frame. Stream ciphers wouldn't need this, but we
# use this conservative approach to equalize the simulated communication
# with the hardware results
rounded_bytes = truncate_to_block(data_bytes, block_size=AES_BLOCK_SIZE) + metadata_bytes
estimated_energy = energy_unit.get_energy(num_collected=num_collected,
num_bytes=rounded_bytes,
use_noise=False)
# Adjust the number of sent bytes until we reach a lower energy level
while (estimated_energy > target_energy) and (rounded_bytes >= BT_FRAME_SIZE):
rounded_bytes = truncate_to_block(rounded_bytes, block_size=BT_FRAME_SIZE) - 1
data_bytes = rounded_bytes - metadata_bytes
rounded_bytes = truncate_to_block(data_bytes, block_size=AES_BLOCK_SIZE) + metadata_bytes
estimated_energy = energy_unit.get_energy(num_collected=num_collected,
num_bytes=rounded_bytes,
use_noise=False)
return rounded_bytes
def get_padded_collection_rate(dataset: str,
current_rate: float,
encryption_mode: str,
policy_type: str,
collect_mode: str,
width: int,
num_features: int,
seq_length: int) -> Tuple[float, int]:
"""
Adjusts the collection rate for padded policies.
Args:
dataset: The name of the dataset
current_rate: The existing collection rate
encryption_mode: The name of the encryption type (block or stream)
policy_type: The name of the policy
collect_mode: The name of the collection mode (tiny, small, low, or high)
width: The bit width of each feature
num_features: The number of features per measurement
seq_length: The number of measurements per sequence
Returns:
The adjusted collection rate
"""
# Create the energy unit
energy_unit = EnergyUnit(policy_type=PolicyType[policy_type.upper()],
encryption_mode=EncryptionMode[encryption_mode.upper()],
collect_mode=CollectMode[collect_mode.upper()],
encoding_mode=EncodingMode.PADDED,
seq_length=seq_length,
num_features=num_features,
period=PERIOD)
# Get the target energy budget per sequence
target_energy = convert_rate_to_energy(collection_rate=current_rate,
width=width,
encryption_mode=EncryptionMode[encryption_mode.upper()],
collect_mode=CollectMode[collect_mode.upper()],
num_features=num_features,
seq_length=seq_length)
# Get the directory of the existing logs
base = os.path.dirname(__file__)
policy_name = '{0}_standard'.format(policy_type.lower())
log_folder = os.path.join(base, '..', 'saved_models', dataset, collect_mode.lower(), policy_name)
best_diff = BIG_NUMBER
best_rate = current_rate
best_collected = 0
min_rate = BIG_NUMBER
min_collected = BIG_NUMBER
did_find = False
for log_path in iterate_dir(log_folder, pattern='.*json.gz'):
standard_results = read_json_gz(log_path)
# Get the max number of elements
num_elements = max(standard_results['num_measurements'])
num_bytes = calculate_bytes(width=width,
num_collected=num_elements,
num_features=num_features,
seq_length=seq_length,
encryption_mode=EncryptionMode[encryption_mode.upper()])
avg_collected = int(math.ceil(np.average(standard_results['num_measurements'])))
estimated_energy = energy_unit.get_energy(num_collected=avg_collected,
num_bytes=num_bytes,
use_noise=False)
diff = abs(target_energy - estimated_energy)
rate = standard_results['policy']['collection_rate']
if rate < min_rate:
min_rate = rate
min_collected = num_elements
if (diff < best_diff) and (target_energy >= estimated_energy):
best_diff = diff
best_rate = rate
best_collected = num_elements
did_find = True
if did_find:
return best_rate, best_collected
return min_rate, min_collected
|
{"hexsha": "ae7b492d5d1305c0de54edbd7151204a8fef101c", "size": 9398, "ext": "py", "lang": "Python", "max_stars_repo_path": "adaptiveleak/energy_systems/conversion.py", "max_stars_repo_name": "tejaskannan/adaptive-sensor-security", "max_stars_repo_head_hexsha": "4c6dd1eb55eb30a8330c4bf3537e06c7d7802c0b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "adaptiveleak/energy_systems/conversion.py", "max_issues_repo_name": "tejaskannan/adaptive-sensor-security", "max_issues_repo_head_hexsha": "4c6dd1eb55eb30a8330c4bf3537e06c7d7802c0b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "adaptiveleak/energy_systems/conversion.py", "max_forks_repo_name": "tejaskannan/adaptive-sensor-security", "max_forks_repo_head_hexsha": "4c6dd1eb55eb30a8330c4bf3537e06c7d7802c0b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.711627907, "max_line_length": 168, "alphanum_fraction": 0.613428389, "include": true, "reason": "import numpy", "num_tokens": 1700}
|
# -*- coding: utf-8 -*-
"""Python implementation of Tapir Lab.'s Acoustic Lip Synchronization.
This program extracts required statistics for training and matching phases
Functions are divided into two sub-groups as auxiliary and main functions
Main Functions
--------------
Voice Activity Detection:
VAD(1D_array,
sampling_frequency=44100,
average_phoneme_duration=0.06,
noise_figure="constant",
show_graph="False",
)
1D_array is the recording of voice.
If it is required to see detected activity, make show_graph "True".
Linear Predictive Coding Coefficients:
lpc(correlation_coefficients, order_of_filter)
correlation coefficients should be normalized.
Parameter Extraction:
parameter_extraction(1D_array, sampling_frequency, subsegments)
1D_array is the recording of voice.
Subsegments are constructed in VAD.
The rest of the functions are called inside these three main functions.
Dependencies
------------
os
soundfile
numpy
scipy
matplotlib
math
------------
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Lip-Sync
%% -------------------
%% $Author: Halil Said Cankurtaran$,
%% $Date: December 11th, 2020$,
%% $Revision: 1.1$
%% Tapir Lab.
%% Copyright: Halil Said Cankurtaran, Tapir Lab.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
from math import ceil, floor, log
import soundfile
import numpy as np
import scipy.io as sio
import scipy.signal as signal # lfilter, firwin
import matplotlib.pyplot as plt
# %% Auxiliary functions
def load_sound(filename):
"""Load recorded .wav file.
Requires `soundfile` package to read a record.
Parameters
----------
filename : str
Returns
-------
x_t : numpy.ndarray, dtype('float64')
One-dimensional array of sound recording
sampling_frequency : int
Sampling frequency of recording which is always 44100 Hz in study
Raises
------
FileNotFoundError
If can not load the sound file.
"""
try:
x_t, sampling_frequency = soundfile.read(filename)
return x_t, sampling_frequency
except Exception as e:
raise e
def load_mat_file(filename):
"""Use to load previously recorded .mat files.
Requires `scipy.io` package to read a record. `scipy.io` imported as sio
Actually, ``scipy.io.loadmat(filename)`` loads the recorded file
Additionally, read file parsed into previously named variables.
Parameters
----------
filename : str
Exact path to .mat file
Returns
-------
x_t : numpy.ndarray, dtype('float64')
A one-dimensional array of sound recording
sampling_frequency : int
The sampling frequency of recording, which is always 44100 Hz in
the study
number_of_audio_channels : int
mono or stereo
number_of_bits : int
The number of bits to represent each sample in memory
Raises
------
FileNotFoundError
If can not load the mat file.
"""
try:
mat_file = sio.loadmat(filename) # loads .mat file
# Parse loaded file
x_t = mat_file["r_t_recorded"]
sampling_frequency = mat_file["sampling_frequency"][0][0] # save int
number_of_audio_channels = mat_file["number_of_audio_channels"]
number_of_bits = mat_file["number_of_bits"]
return [x_t,
sampling_frequency,
number_of_audio_channels,
number_of_bits,
]
except Exception as e:
raise e
def hist_bin_mid(array, neyman_pearson_bin_size=3):
# Numpy returns start and end points of bins,
# Matlab returns mid point of bins,
# This function calculates mid points of histogram bins
number_of_bins = floor(len(array)/neyman_pearson_bin_size)
tmp, edges = np.histogram(array, number_of_bins)
# Calculate center of the bins
bin_mid = [(edges[i] + edges[i+1])/2 for i in range(0, len(edges) - 1)]
return tmp, bin_mid
def normalize(r_t):
# Normalizes given signal into [1,-1] interval.
return r_t/max(abs(r_t))
def nextPow2(value):
# Implementation of builtin matlab function
# finds next power of two for given value
return ceil(log(abs(value), 2))
def removeDC(r_t):
# Removes the avereage (DC) from the signal
return r_t - r_t.mean()
def lpFilter(x_t, sampling_frequency, cutoff, ntaps):
"""Filter given (x_t) signal with specified FIR low-pass filter.
Requires `scipy.signal` package. `scipy.signal` imported as signal
Parameters
----------
x_t : numpy.ndarray, dtype('float64')
A one-dimensional array of sound recording
sampling_frequency : int
The sampling frequency of recording, which is always 44100 Hz in
the study
cutoff : int
The cutoff frequency of the filter
ntaps : int
Number of taps
Returns
-------
filtered_signal : numpy.ndarray, dtype('float64')
"""
nyq_rate = sampling_frequency / 2
fir_coeff = signal.firwin(ntaps, cutoff/nyq_rate) # create filter coeffs
# filter x_t with all-zero low-pass filter
filtered_signal = signal.lfilter(fir_coeff, 1.0, x_t)
return filtered_signal
def reshape_signal(x_t, chunk_size):
# To process signal column-wise, result is transposed
number_of_chunks = floor(len(x_t)/chunk_size) # calculate number of chunks
# Remove residual part which may occur at the end of the signal.
x_t_resized = x_t[:chunk_size*number_of_chunks]
# Reshape signal to column vectors
xt_reshaped = x_t_resized.reshape(number_of_chunks, chunk_size).transpose()
return xt_reshaped
def avgEnergy(x_t_reshaped, scale="log"):
"""Calculate average energy of reshaped signal.
Signal is reshaped according to following chunk_size calculation
``energy_bin_duration = 15e-3``
``chunk_size = floor(energy_bin_duration * sampling_frequency)``
Energy bin duration
Parameters
----------
x_t_reshaped : numpy.ndarray, dtype('float64')
This array is divided into equal chunks by given calculation
scale : str, optional
Logarithmic or linear calculation of the energy of chunks
It is logarithmic by default
Returns
-------
log_energy/linear_energy : numpy.ndarray, dtype('float64')
calculated energy of chunks
"""
if scale.lower() == "log":
log_energy = 10*np.log10((abs(x_t_reshaped)**2).mean(axis=0))
return log_energy
elif scale.lower() == "linear":
linear_energy = (abs(x_t_reshaped)**2).mean(axis=0)
return linear_energy
else:
print("please set scale to \"log\" or \"linear\" "
"| default -> \"log\"")
return 0
def optimumThreshold(r_t, option):
"""Calculate optimum noise figure in the signal to detect activity.
Parameters
----------
r_t : numpy.ndarray, dtype('float64')
Signal to detect noise figure
option : str
This parameter must be given as adaptive/constant
If constant, function returns -12. This is an emprical value
Returns
-------
noise_figure : float
"""
avg_bin_energy_in_dB = avgEnergy(r_t, "log")
avg_bin_energy = avgEnergy(r_t, "linear")
ndts = 3 # noise distribution three sigma
tmp, bin_mid = hist_bin_mid(avg_bin_energy) # returns mid points of bins
if option == "adaptive":
noise_figure_in_dB = 10 * log(bin_mid[ndts], 10)
elif option == "constant":
noise_figure_in_dB = -12
else:
print("please specify option as 'adaptive' or 'constant'")
noise_figure = (max(avg_bin_energy_in_dB) + noise_figure_in_dB)
return noise_figure
def autocorrbyfft(x_t, order=-2, mod="normalized"):
"""Calculate autocorreletaion via multiplication in frequency domain.
Autocorrelation array will be sample averaged and normalized by default.
However, if normalization is not required, a None type can be given.
Parameters
----------
x_t : numpy.ndarray, dtype('float64')
Signal to autocorrelate
order : integer, optional
Order is required to provide an adequate but shorter array
If the order is not specified, the function returns a complete array
mod : str, optional
A normalized autocorrelation array is needed in LPC calculation
If don't need to normalize, pass None as a parameter
Returns
-------
R : numpy.ndarray, dtype('float64')
Autocorrelation array of signal.
If order is specified len(R) == order+1!
"""
length_of_signal = len(x_t) # get length of signal to calculate fft size
fft_size = nextPow2(2*length_of_signal-1) # calculate fft size
# Transform signal to frequency domain
X = np.fft.rfft(x_t, 2**fft_size)
# Calculate autocorrelation and transform signal to time domain
# Acoustic signals are real, thus use (rfft and irfft)
R = np.fft.irfft(abs(X)**2)
R = R / len(x_t) # Sample average of autocorrelation array
if mod == "normalized": # If normalized. It is default
R = R / R[0] # Normalize
return R[:order+1]
def activityMask(x_t, chunk_size, noise_figure):
"""Detect activity in the given signal which is divided into chunks.
Parameters
----------
x_t : numpy.ndarray, dtype('float64')
Signal to detect activity
chunk_size : int
length of chunk
noise_figure : str
the way of calculation of noise figure, adaptive or constant
Returns
-------
activity_mask_flattened : numpy.ndarray, dtype('np.int8')
An array of ones and zeros which indicates the activity in the signal
"""
# Calculate average energy of bins
avg_bin_energy_in_dB = avgEnergy(x_t, scale="log")
# Calculate optimum threshold to detect activity
if noise_figure == "constant":
opt_threshold = optimumThreshold(x_t, "constant")
if noise_figure == "adaptive":
opt_threshold = optimumThreshold(x_t, "adaptive")
# Decide activity in chunks by comparing enegry level to optimum threshold
# Use comprehension to process in single line
mask = [i > opt_threshold for i in avg_bin_energy_in_dB]
# Each boolean in mask represents chunk_size of samples
# Thus following line expands decision of activity to chunk_size of samples
activity_mask = np.array([[i]*chunk_size for i in mask])
# Convert boolean mask to binary array which has same size with x_t
activity_mask_flattened = (activity_mask.flatten())*1
return activity_mask_flattened
def construct_segments(activity_mask_flattened):
"""Find first and last samples of segments acc. to activity mask.
Parameters
----------
activity_mask_flattened : numpy.ndarray, dtype("np.int8")
array of 1s and 0s which shows where the activity is
Returns
-------
segments : numpy.ndarray, dtype("int64")
An array of beginning and end points of segments
Each row represents a particular segment of activity
"""
# Find indices of first and last samples in a segment
segment_start_idx = np.asarray(np.diff(activity_mask_flattened) > 0).nonzero()[0] + 1
segment_stop_idx = np.asarray(np.diff(activity_mask_flattened) < 0).nonzero()[0]
# If activity does not end before the last sample of recording
if len(segment_start_idx) > len(segment_stop_idx):
segment_stop_idx = np.insert(segment_stop_idx,
len(segment_stop_idx),
len(activity_mask_flattened))
# If activity starts with the first sample there might be a missing start
if len(segment_stop_idx) > len(segment_start_idx):
segment_start_idx = np.insert(segment_start_idx, 0, 0)
# There might be a third case where activity starts before first sample
# and ends after last sample, in that case, lengths would be equal
# however there segment start and finish samples will be mis-detected
segments = np.array([segment_start_idx, segment_stop_idx]).transpose()
return segments
def construct_subsegments(segments, sampling_frequency,
average_phoneme_duration):
"""Create an array of indices which represents the subsegments in segments.
Parameters
----------
segments : numpy.ndarray, dtype("int64")
An array of beginning and endpoints of segments
sampling_frequency : int
The sampling frequency of the recorded signal
average_phoneme_duration : float, optional
The length of the average phoneme duration in seconds.
Returns
-------
subsegments : numpy.ndarray, dtype("np.int8")
An array of beginning and end points of subsegments
subsegment[0] -> segment number
subsegment[1] -> subsegment number
subsegment[2] -> start of subsegment
subsegment[3] -> end of subsegment
"""
samples_per_subsegment = floor(average_phoneme_duration*sampling_frequency)
sps = samples_per_subsegment
number_of_segments = len(segments)
count_of_subsegments = []
if number_of_segments < 1:
raise 'No segmentation found!'
for k in range(0, number_of_segments):
segment_length_in_samples = segments[k, 1] - segments[k, 0] + 1
number_of_subsegments = 1
if segment_length_in_samples >= sps:
number_of_subsegments = floor(segment_length_in_samples / sps)
count_of_subsegments.append(number_of_subsegments)
subsegments = []
segment_id = 0
for k in count_of_subsegments:
for i in range(0, k):
subsegments.append([
segment_id,
i,
segments[segment_id, 0] + i*sps,
segments[segment_id, 0] + (i+1)*sps,
])
segment_id += 1
return subsegments
def calculate_ffreqs(lpc_coeff, sampling_frequency):
"""Calculate formant frequencies.
Finds roots of all-pole filter and converts to frequencies
Parameters
----------
lpc_coeff : numpy.ndarray, dtype('float64')
An array of linear predictive coding coefficients
sampling_frequency : int
Sampling frequency of recorded signal
Returns
-------
ffreqs : numpy.ndarray, dtype('float64')
Formant frequencies
"""
# Formants are the roots of all-pole transfer function
# Following calculates roots of lpc coefficients
# Calculate roots
roots = np.roots(lpc_coeff) # Calculate poles of voice model
rts = roots[np.imag(roots) >= 0.01] # Select positives of conjugate roots
# Convert formant frequencies
ffreqs = sorted(np.arctan2(np.imag(rts), np.real(rts))*sampling_frequency/(2*np.pi))
return ffreqs
#%% Main functions
def VAD(x_t, sampling_frequency, avg_phn_dr, noise_fig, show_graph=False):
"""Detect voice activity and return segments, and subsegments.
Parameters
----------
x_t : numpy.ndarray, dtype('float64')
One-dimensional array of sound recording
sampling_frequency : int
Sampling frequency of recorded signal
avg_phn_dr : float
average phoneme duration
noise_fig : string
"adaptive" or "constant" to decide optimum activity threshold
show_graph = bool
A parameter to show graph of activity detection if needed.
Returns
-------
xt_processed_flattened : numpy.ndarray, dtype('float64')
Loaded sound is processed before next steps
This function returns normalized and mean extracted version of sound
segments : numpy.ndarray, dtype("int64")
An array of beginning and end points of voice activities
Each row represents a particular part of the activity
subsegments : numpy.ndarray, dtype("np.int8")
An array of beginning and end points of subsegments
subsegment[0] -> segment number
subsegment[1] -> subsegment number
subsegment[2] -> start of subsegment
subsegment[3] -> end of subsegment
"""
# Required variables to process x_t
energy_bin_duration = 15e-3
chunk_size = floor(energy_bin_duration * sampling_frequency)
# Process x_t
xt_normalized = normalize(x_t)
xt_normalized_zero_mean = removeDC(xt_normalized)
xt_processed_reshaped = reshape_signal(xt_normalized_zero_mean, chunk_size)
# Create activity mask
activity_mask_flattened = activityMask(xt_processed_reshaped,
chunk_size,
noise_fig)
xt_processed_flattened = xt_processed_reshaped.transpose().flatten()
if show_graph:
plt.plot(xt_processed_flattened)
plt.plot(activity_mask_flattened)
segments = construct_segments(activity_mask_flattened)
subsegments = construct_subsegments(segments,
sampling_frequency,
avg_phn_dr)
return [xt_processed_flattened, segments, subsegments]
def lpc(r, n):
"""Calculate Linear Predictive Coding Coefficients with autocorrelation
method.
Parameters
----------
r : numpy.ndarray, dtype('float64')
Autocorrelation array of signal
n : int
Filter order
Returns
-------
lpc_coeff : np.ndarray, dtype('float64')
Linear predictive coding coefficients
e : np.ndarray, dtype('float64')
Estimation error array
"""
order = n + 1
# Construct required matrix
a = np.zeros([order, order])
k = np.zeros(order)
e = np.zeros(order)
# initilaziation of calculation
a[:, 0] = 1.0
k[1] = r[1]/r[0]
a[1, 1] = k[1]
e[1] = (1.0 - k[1]**2)*r[0]
# Calculation of lpc coefficients
for j in range(2, order):
k[j] = (r[j] - sum([a[j-1, i]*r[j-i] for i in range(1, j)]))/e[j-1]
a[j, j] = k[j]
for i in range(1, j):
a[j, i] = a[j-1, i] - k[j]*a[j-1][j-i]
e[j] = (1-k[j]**2)*e[j-1]
# Return only LPC Coefficients with correct order
lpc_coeff = np.ones(order)
# all-pole filter 1 - sum(lpc_coeff*z^(k)) coeffs are multiplied by -1
# Get the results of correct order, first coefficient is set to +1
lpc_coeff[1:] = -a[n::][0, 1:]
return [lpc_coeff, e]
def parameter_extraction(x_t, sampling_frequency, subsegments):
"""Calculate formant freqs, mean, std. dev. and normalization vector.
Parameters
----------
x_t : numpy.ndarray, dtype('float64')
One-dimensional array of sound recording
sampling_frequency : int
Sampling frequency of recorded signal
subsegments : numpy.ndarray, dtype("np.int8")
Indicies of subsegments with segment number and subsegment number.
subsegment[0] -> segment number
subsegment[1] -> subsegment number
subsegment[2] -> start of subsegment
subsegment[3] -> end of subsegment
Returns
-------
formants : numpy.ndarray, dtype('float64')
Formant frequencies of subsegments
formants[0] -> segment number
formants[1] -> subsegment number
mean_lpc_freqs : numpy.ndarray, dtype('float64')
Sample mean of formant frequencies
vowel_formant_deviation_error_normalization_vector :
Weightenin coeffs which will be used in mean square error estimation
"""
# Reqired variables
number_of_ffreqs = 3 # number of formant frequencies will be used
order = floor(2 + sampling_frequency/1000)
counter = 0
# Create empty matrices to store data
formants = np.zeros([len(subsegments), number_of_ffreqs + 2], np.float64)
# lpc_coeffs = np.zeros([len(subsegments), order+1])
# For each subsegment, find formant frequencies
for subsegment in subsegments:
subsegment_tmp = x_t[subsegment[-2]:subsegment[-1]] # Slice of record
r = autocorrbyfft(subsegment_tmp, order, "normalized") # ACorr. Arr.
lpc_coeff, e = lpc(r, order) # Calc. of LPC coefficients of subsegment
fftmp = calculate_ffreqs(lpc_coeff, sampling_frequency) # root -> freq
# Segment and subsegment numbers are saved as first two elements
formants[counter, 0:2] = subsegment[0], subsegment[1]
# Only reqiured part of the formant frequencies are returned
formants[counter, 2:] = fftmp[:number_of_ffreqs]
# lpc_coeffs[counter,:] = lpc_coeff # store coeffs of each subsegment
counter += 1
mean_lpc_freqs = np.mean(formants[:, -3:], 0) # mean of ffreqs
# ddof=1 to calculate sample mean -> 1/(N-1)
std_lpc_freqs = np.std(formants[:, -3:], 0, ddof=1) # std. dev. of ffreqs
# Calculation of proposed weightening coefficients
tmp = min(std_lpc_freqs) / std_lpc_freqs
tmp = tmp/np.linalg.norm(tmp)
vowel_formant_deviation_error_normalization_vector = tmp
return [formants, mean_lpc_freqs,
vowel_formant_deviation_error_normalization_vector]
|
{"hexsha": "f0a77215cc594f2517caa4a95124a52d72f41982", "size": 21836, "ext": "py", "lang": "Python", "max_stars_repo_path": "parameter_extraction.py", "max_stars_repo_name": "TapirLab/lip-sync", "max_stars_repo_head_hexsha": "f34c545597eaa946174c15bf4df0b91325deb5bd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-09-10T17:22:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T07:52:05.000Z", "max_issues_repo_path": "parameter_extraction.py", "max_issues_repo_name": "TapirLab/lip-sync", "max_issues_repo_head_hexsha": "f34c545597eaa946174c15bf4df0b91325deb5bd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-06-08T22:31:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T03:22:18.000Z", "max_forks_repo_path": "parameter_extraction.py", "max_forks_repo_name": "TapirLab/lip-sync", "max_forks_repo_head_hexsha": "f34c545597eaa946174c15bf4df0b91325deb5bd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-10T07:15:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-10T07:15:20.000Z", "avg_line_length": 34.7154213037, "max_line_length": 90, "alphanum_fraction": 0.6350064114, "include": true, "reason": "import numpy,import scipy", "num_tokens": 5048}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Common fit statistics used in gamma-ray astronomy.
see :ref:`fit-statistics`
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
__all__ = [
"cash",
"cstat",
"wstat",
"get_wstat_mu_bkg",
"get_wstat_gof_terms",
"chi2",
"chi2constvar",
"chi2datavar",
"chi2gehrels",
"chi2modvar",
"chi2xspecvar",
]
N_ON_MIN = 1e-25
def cash(n_on, mu_on):
r"""Cash statistic, for Poisson data.
The Cash statistic is defined as:
.. math::
C = 2 \left( \mu_{on} - n_{on} \log \mu_{on} \right)
and :math:`C = 0` where :math:`\mu <= 0`.
For more information see :ref:`fit-statistics`
Parameters
----------
n_on : array_like
Observed counts
mu_on : array_like
Expected counts
Returns
-------
stat : ndarray
Statistic per bin
References
----------
* `Sherpa statistics page section on the Cash statistic
<http://cxc.cfa.harvard.edu/sherpa/statistics/#cash>`_
* `Sherpa help page on the Cash statistic
<http://cxc.harvard.edu/sherpa/ahelp/cash.html>`_
* `Cash 1979, ApJ 228, 939
<http://adsabs.harvard.edu/abs/1979ApJ...228..939C>`_
"""
# suppress zero division warnings, they are corrected below
with np.errstate(divide="ignore", invalid="ignore"):
stat = 2 * (mu_on - n_on * np.log(mu_on))
stat = np.where(mu_on > 0, stat, 0)
return stat
def cstat(n_on, mu_on, n_on_min=N_ON_MIN):
r"""C statistic, for Poisson data.
The C statistic is defined as
.. math::
C = 2 \left[ \mu_{on} - n_{on} + n_{on}
(\log(n_{on}) - log(\mu_{on}) \right]
and :math:`C = 0` where :math:`\mu_{on} <= 0`.
``n_on_min`` handles the case where ``n_on`` is 0 or less and
the log cannot be taken.
For more information see :ref:`fit-statistics`
Parameters
----------
n_on : array_like
Observed counts
mu_on : array_like
Expected counts
n_on_min : array_like
``n_on`` = ``n_on_min`` where ``n_on`` <= ``n_on_min.``
Returns
-------
stat : ndarray
Statistic per bin
References
----------
* `Sherpa stats page section on the C statistic
<http://cxc.cfa.harvard.edu/sherpa/statistics/#cstat>`_
* `Sherpa help page on the C statistic
<http://cxc.harvard.edu/sherpa/ahelp/cash.html>`_
* `Cash 1979, ApJ 228, 939
<http://adsabs.harvard.edu/abs/1979ApJ...228..939C>`_
"""
n_on = np.asanyarray(n_on, dtype=np.float64)
mu_on = np.asanyarray(mu_on, dtype=np.float64)
n_on_min = np.asanyarray(n_on_min, dtype=np.float64)
n_on = np.where(n_on <= n_on_min, n_on_min, n_on)
term1 = np.log(n_on) - np.log(mu_on)
stat = 2 * (mu_on - n_on + n_on * term1)
stat = np.where(mu_on > 0, stat, 0)
return stat
def wstat(n_on, n_off, alpha, mu_sig, mu_bkg=None, extra_terms=True):
r"""W statistic, for Poisson data with Poisson background.
For a definition of WStat see :ref:`wstat`. If ``mu_bkg`` is not provided
it will be calculated according to the profile likelihood formula.
Parameters
----------
n_on : array_like
Total observed counts
n_off : array_like
Total observed background counts
alpha : array_like
Exposure ratio between on and off region
mu_sig : array_like
Signal expected counts
mu_bkg : array_like, optional
Background expected counts
extra_terms : bool, optional
Add model independent terms to convert stat into goodness-of-fit
parameter, default: True
Returns
-------
stat : ndarray
Statistic per bin
References
----------
* `Habilitation M. de Naurois, p. 141
<http://inspirehep.net/record/1122589/files/these_short.pdf>`_
* `XSPEC page on Poisson data with Poisson background
<https://heasarc.nasa.gov/xanadu/xspec/manual/XSappendixStatistics.html>`_
"""
# Note: This is equivalent to what's defined on the XSPEC page under the
# following assumptions
# t_s * m_i = mu_sig
# t_b * m_b = mu_bkg
# t_s / t_b = alpha
n_on = np.atleast_1d(np.asanyarray(n_on, dtype=np.float64))
n_off = np.atleast_1d(np.asanyarray(n_off, dtype=np.float64))
alpha = np.atleast_1d(np.asanyarray(alpha, dtype=np.float64))
mu_sig = np.atleast_1d(np.asanyarray(mu_sig, dtype=np.float64))
if mu_bkg is None:
mu_bkg = get_wstat_mu_bkg(n_on, n_off, alpha, mu_sig)
term1 = mu_sig + (1 + alpha) * mu_bkg
# suppress zero division warnings, they are corrected below
with np.errstate(divide="ignore", invalid="ignore"):
# This is a false positive error from pylint
# See https://github.com/PyCQA/pylint/issues/2436
term2_ = -n_on * np.log(
mu_sig + alpha * mu_bkg
) # pylint:disable=invalid-unary-operand-type
# Handle n_on == 0
condition = n_on == 0
term2 = np.where(condition, 0, term2_)
# suppress zero division warnings, they are corrected below
with np.errstate(divide="ignore", invalid="ignore"):
# This is a false positive error from pylint
# See https://github.com/PyCQA/pylint/issues/2436
term3_ = -n_off * np.log(mu_bkg) # pylint:disable=invalid-unary-operand-type
# Handle n_off == 0
condition = n_off == 0
term3 = np.where(condition, 0, term3_)
stat = 2 * (term1 + term2 + term3)
if extra_terms:
stat += get_wstat_gof_terms(n_on, n_off)
return stat
def get_wstat_mu_bkg(n_on, n_off, alpha, mu_sig):
"""Calculate ``mu_bkg`` for wstat
see :ref:`wstat`.
"""
n_on = np.atleast_1d(np.asanyarray(n_on, dtype=np.float64))
n_off = np.atleast_1d(np.asanyarray(n_off, dtype=np.float64))
alpha = np.atleast_1d(np.asanyarray(alpha, dtype=np.float64))
mu_sig = np.atleast_1d(np.asanyarray(mu_sig, dtype=np.float64))
# NOTE: Corner cases in the docs are all handled correcty by this formula
C = alpha * (n_on + n_off) - (1 + alpha) * mu_sig
D = np.sqrt(C ** 2 + 4 * alpha * (alpha + 1) * n_off * mu_sig)
mu_bkg = (C + D) / (2 * alpha * (alpha + 1))
return mu_bkg
def get_wstat_gof_terms(n_on, n_off):
"""Calculate goodness of fit terms for wstat
see :ref:`wstat`.
"""
term = np.zeros(len(n_on))
# suppress zero division warnings, they are corrected below
with np.errstate(divide="ignore", invalid="ignore"):
term1 = -n_on * (1 - np.log(n_on))
term2 = -n_off * (1 - np.log(n_off))
term += np.where(n_on == 0, 0, term1)
term += np.where(n_off == 0, 0, term2)
return 2 * term
def chi2(N_S, B, S, sigma2):
r"""Chi-square statistic with user-specified variance.
.. math::
\chi^2 = \frac{(N_S - B - S) ^ 2}{\sigma ^ 2}
Parameters
----------
N_S : array_like
Number of observed counts
B : array_like
Model background
S : array_like
Model signal
sigma2 : array_like
Variance
Returns
-------
stat : ndarray
Statistic per bin
References
----------
* Sherpa stats page (http://cxc.cfa.harvard.edu/sherpa/statistics/#chisq)
"""
N_S = np.asanyarray(N_S, dtype=np.float64)
B = np.asanyarray(B, dtype=np.float64)
S = np.asanyarray(S, dtype=np.float64)
sigma2 = np.asanyarray(sigma2, dtype=np.float64)
stat = (N_S - B - S) ** 2 / sigma2
return stat
def chi2constvar(N_S, N_B, A_S, A_B):
r"""Chi-square statistic with constant variance.
"""
N_S = np.asanyarray(N_S, dtype=np.float64)
N_B = np.asanyarray(N_B, dtype=np.float64)
A_S = np.asanyarray(A_S, dtype=np.float64)
A_B = np.asanyarray(A_B, dtype=np.float64)
alpha2 = (A_S / A_B) ** 2
# Need to mulitply with np.ones_like(N_S) here?
sigma2 = (N_S + alpha2 * N_B).mean()
stat = chi2(N_S, A_B, A_S, sigma2)
return stat
def chi2datavar(N_S, N_B, A_S, A_B):
r"""Chi-square statistic with data variance.
"""
N_S = np.asanyarray(N_S, dtype=np.float64)
N_B = np.asanyarray(N_B, dtype=np.float64)
A_S = np.asanyarray(A_S, dtype=np.float64)
A_B = np.asanyarray(A_B, dtype=np.float64)
alpha2 = (A_S / A_B) ** 2
sigma2 = N_S + alpha2 * N_B
stat = chi2(N_S, A_B, A_S, sigma2)
return stat
def chi2gehrels(N_S, N_B, A_S, A_B):
r"""Chi-square statistic with Gehrel's variance.
"""
N_S = np.asanyarray(N_S, dtype=np.float64)
N_B = np.asanyarray(N_B, dtype=np.float64)
A_S = np.asanyarray(A_S, dtype=np.float64)
A_B = np.asanyarray(A_B, dtype=np.float64)
alpha2 = (A_S / A_B) ** 2
sigma_S = 1 + np.sqrt(N_S + 0.75)
sigma_B = 1 + np.sqrt(N_B + 0.75)
sigma2 = sigma_S ** 2 + alpha2 * sigma_B ** 2
stat = chi2(N_S, A_B, A_S, sigma2)
return stat
def chi2modvar(S, B, A_S, A_B):
r"""Chi-square statistic with model variance.
"""
S = np.asanyarray(S, dtype=np.float64)
B = np.asanyarray(B, dtype=np.float64)
A_S = np.asanyarray(A_S, dtype=np.float64)
A_B = np.asanyarray(A_B, dtype=np.float64)
stat = chi2datavar(S, B, A_S, A_B)
return stat
def chi2xspecvar(N_S, N_B, A_S, A_B):
r"""Chi-square statistic with XSPEC variance.
"""
N_S = np.asanyarray(N_S, dtype=np.float64)
N_B = np.asanyarray(N_B, dtype=np.float64)
A_S = np.asanyarray(A_S, dtype=np.float64)
A_B = np.asanyarray(A_B, dtype=np.float64)
# TODO: is this correct?
mask = (N_S < 1) | (N_B < 1)
# _stat = np.empty_like(mask, dtype='float')
# _stat[mask] = 1
stat = np.where(mask, 1, chi2datavar(N_S, N_B, A_S, A_B))
return stat
|
{"hexsha": "0c099a36aeb95c4118d5b65a38016306bd34151e", "size": 9770, "ext": "py", "lang": "Python", "max_stars_repo_path": "gammapy/stats/fit_statistics.py", "max_stars_repo_name": "contrera/gammapy", "max_stars_repo_head_hexsha": "aa0a74baa977ee2477b5c63e036075f4219792a3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gammapy/stats/fit_statistics.py", "max_issues_repo_name": "contrera/gammapy", "max_issues_repo_head_hexsha": "aa0a74baa977ee2477b5c63e036075f4219792a3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gammapy/stats/fit_statistics.py", "max_forks_repo_name": "contrera/gammapy", "max_forks_repo_head_hexsha": "aa0a74baa977ee2477b5c63e036075f4219792a3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1556195965, "max_line_length": 85, "alphanum_fraction": 0.6209825998, "include": true, "reason": "import numpy", "num_tokens": 3024}
|
"""
"""
import re
import decorator
import numpy as np
import pandas as pd
import datetime
import time
try:
import cPickle as pickle
except ImportError:
import pickle
__author__ = 'Seung Hyeon Yu'
__email__ = 'rambor12@business.kaist.ac.kr'
def _memoize(func, *args, **kw):
# should we refresh the cache?
refresh = False
refresh_kw = func.mrefresh_keyword
# kw is not always set - check args
if refresh_kw in func.__code__.co_varnames:
if args[func.__code__.co_varnames.index(refresh_kw)]:
refresh = True
# check in kw if not already set above
if not refresh and refresh_kw in kw:
if kw[refresh_kw]:
refresh = True
key = pickle.dumps(args, 1) + pickle.dumps(kw, 1)
cache = func.mcache
if not refresh and key in cache:
return cache[key]
else:
cache[key] = result = func(*args, **kw)
return result
def memoize(f, refresh_keyword='mrefresh'):
"""
Memoize decorator. The refresh keyword is the keyword
used to bypass the cache (in the function call).
"""
f.mcache = {}
f.mrefresh_keyword = refresh_keyword
return decorator.decorator(_memoize, f)
def parse_arg(arg):
"""
Parses arguments for convenience. Argument can be a
csv list ('a,b,c'), a string, a list, a tuple.
Returns a list.
"""
# handle string input
if type(arg) == str:
arg = arg.strip()
# parse csv as tickers and create children
if ',' in arg:
arg = arg.split(',')
arg = [x.strip() for x in arg]
# assume single string - create single item list
else:
arg = [arg]
return arg
def clean_ticker(ticker):
"""
Cleans a ticker for easier use throughout MoneyTree
Splits by space and only keeps first bit. Also removes
any characters that are not letters. Returns as lowercase.
>>> clean_ticker('^VIX')
'vix'
>>> clean_ticker('SPX Index')
'spx'
"""
pattern = re.compile('[\W_]+')
res = pattern.sub('', ticker.split(' ')[0])
return res.lower()
def clean_tickers(tickers):
"""
Maps clean_ticker over tickers.
"""
return [clean_ticker(x) for x in tickers]
def fmtp(number):
"""
Formatting helper - percent
"""
if np.isnan(number):
return '-'
return format(number, '.2%')
def fmtpn(number):
"""
Formatting helper - percent no % sign
"""
if np.isnan(number):
return '-'
return format(number * 100, '.2f')
def fmtn(number):
"""
Formatting helper - float
"""
if np.isnan(number):
return '-'
return format(number, '.2f')
def get_period_name(period):
period = period.upper()
periods = {
'B': 'business day',
'C': 'custom business day',
'D': 'daily',
'W': 'weekly',
'M': 'monthly',
'BM': 'business month end',
'CBM': 'custom business month end',
'MS': 'month start',
'BMS': 'business month start',
'CBMS': 'custom business month start',
'Q': 'quarterly',
'BQ': 'business quarter end',
'QS': 'quarter start',
'BQS': 'business quarter start',
'Y': 'yearly',
'A': 'yearly',
'BA': 'business year end',
'AS': 'year start',
'BAS': 'business year start',
'H': 'hourly',
'T': 'minutely',
'S': 'secondly',
'L': 'milliseonds',
'U': 'microseconds'}
if period in periods:
return periods[period]
else:
return None
def scale(val, src, dst):
"""
Scale value from src range to dst range.
If value outside bounds, it is clipped and set to
the low or high bound of dst.
Ex:
scale(0, (0.0, 99.0), (-1.0, 1.0)) == -1.0
scale(-5, (0.0, 99.0), (-1.0, 1.0)) == -1.0
"""
if val < src[0]:
return dst[0]
if val > src[1]:
return dst[1]
return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
def as_percent(self, digits=2):
return as_format(self, '.%s%%' % digits)
def as_format(item, format_str='.2f'):
"""
Map a format string over a pandas object.
"""
if isinstance(item, pd.Series):
return item.map(lambda x: format(x, format_str))
elif isinstance(item, pd.DataFrame):
return item.applymap(lambda x: format(x, format_str))
DECIMAL = 2
def prettyfloat(number):
return "{:9.2f}".format(number)
def to_numeric(string):
"""
numeric if parsing succeeded. Otherwise, str itself.
Return type depends on input.
:type string: str
"""
if pd.isnull(string) or isinstance(string, float):
return string
else:
try:
return float(string.replace(',', ''))
except:
return string
def get_form(date):
"""
Get Form from the date
:param date:
:return: form
"""
if isinstance(date, str):
if len(date) == 8:
form = "%Y%m%d"
elif '-' in date:
form = "%Y-%m-%d"
elif '/' in date:
form = "%Y/%m/%d"
elif '.' in date:
form = "%Y.%m.%d"
else:
raise NotImplementedError
return form
def date_to_numeric(date):
"""
Return Unix Time which is total elapsed nanoseconds from 1970-01-01
:param date: any time format
:return: int total elapsed nanoseconds from 1970-01-01
"""
if isinstance(date, pd.tslib.Timestamp):
return date.value
elif isinstance(date, (pd.datetime, np.datetime64)):
return pd.Timestamp(date).value
elif isinstance(date, str):
return int(time.mktime(str_to_date(date).timestamp()))
def date_to_str(date, form="%Y-%m-%d"):
"""
Return Date String
:param date: date
:param form: format of return
:return: str formatted date time
"""
if isinstance(date, str):
return date
elif isinstance(date, (pd.tslib.Timestamp, pd.datetime)):
return date.strftime(form)
def str_to_date(date, form=None):
"""
Return Date with datetime format
:param form:
:param date: str date
:return: datetime date
"""
if form is None:
form = get_form(date)
return datetime.datetime.strptime(date, form)
def to_list(*args):
"""
Return list
:return: list of listed values
"""
result = []
for arg in args:
if isinstance(arg, (str, int, float)):
result.append([arg])
elif isinstance(arg, (list, pd.Series, np.ndarray)):
result.append(list(arg))
elif arg is None:
result.append(arg)
else:
raise NotImplementedError
return result
|
{"hexsha": "4d59bebb98b332564376f0979960f3268dc99f87", "size": 7069, "ext": "py", "lang": "Python", "max_stars_repo_path": "KSIF/core/utils.py", "max_stars_repo_name": "ksif/KSIF", "max_stars_repo_head_hexsha": "03d4a25b0ae30453ca8254a48641f1b9665f1d2c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-29T14:55:36.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-29T14:55:36.000Z", "max_issues_repo_path": "KSIF/core/utils.py", "max_issues_repo_name": "ksif/KSIF", "max_issues_repo_head_hexsha": "03d4a25b0ae30453ca8254a48641f1b9665f1d2c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "KSIF/core/utils.py", "max_forks_repo_name": "ksif/KSIF", "max_forks_repo_head_hexsha": "03d4a25b0ae30453ca8254a48641f1b9665f1d2c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5633333333, "max_line_length": 77, "alphanum_fraction": 0.5459046541, "include": true, "reason": "import numpy", "num_tokens": 1744}
|
import pyautogui
import PySimpleGUI as sg
import cv2
import numpy as np
"""
Demo program that displays a webcam using OpenCV
"""
def main():
sg.theme('Black')
# define the window layout
layout = [[sg.Text('OpenCV Demo', size=(40, 1), justification='center', font='Helvetica 20')],
[sg.Image(filename='', key='image')],
[sg.Button('Record', size=(10, 1), font='Arial 14'),
sg.Button('Stop', size=(10, 1), font='Arial 14'),
sg.Button('Exit', size=(10, 1), font='Arial 14'),
sg.Button('Screenshot',size=(10,1),font='Arial 14') ]]
# create the window and show it without the plot
window = sg.Window('Demo Application - OpenCV Integration',
layout, location=(800, 400))
# ---===--- Event LOOP Read and display frames, operate the GUI --- #
cap = cv2.VideoCapture(0)
recording = False
while True:
event, values = window.read(timeout=20)
if event == 'Exit' or event == sg.WIN_CLOSED:
return
elif event == 'Record':
recording = True
elif event=='Screenshot':
myScreenshot = pyautogui.screenshot()
myScreenshot.save(r'shot.png')
elif event == 'Stop':
recording = False
img = np.full((480, 640), 255)
# this is faster, shorter and needs less includes
imgbytes = cv2.imencode('.png', img)[1].tobytes()
window['image'].update(data=imgbytes)
if recording:
ret, frame = cap.read()
imgbytes = cv2.imencode('.png', frame)[1].tobytes() # ditto
window['image'].update(data=imgbytes)
main()
|
{"hexsha": "ed7b170507d5585ee1a7e2898a3bf5e74e202750", "size": 1718, "ext": "py", "lang": "Python", "max_stars_repo_path": "Camera.py", "max_stars_repo_name": "akpythonyt/Mystuffs", "max_stars_repo_head_hexsha": "9a7d4198ea0d3907af510e118ea8fa9ecdc5f4db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 47, "max_stars_repo_stars_event_min_datetime": "2020-11-12T10:52:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T22:44:48.000Z", "max_issues_repo_path": "Camera.py", "max_issues_repo_name": "SahityaRoy/AKpythoncodes", "max_issues_repo_head_hexsha": "331428e9b157f3a8e6f35987417fa022956047d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-24T16:24:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T16:24:26.000Z", "max_forks_repo_path": "Camera.py", "max_forks_repo_name": "SahityaRoy/AKpythoncodes", "max_forks_repo_head_hexsha": "331428e9b157f3a8e6f35987417fa022956047d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 45, "max_forks_repo_forks_event_min_datetime": "2020-10-26T09:58:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T05:42:29.000Z", "avg_line_length": 30.6785714286, "max_line_length": 98, "alphanum_fraction": 0.5564610012, "include": true, "reason": "import numpy", "num_tokens": 418}
|
"""
Library Features:
Name: lib_data_io_binary
Author(s): Francesco Avanzi (francesco.avanzi@cimafoundation.org), Fabio Delogu (fabio.delogu@cimafoundation.org)
Date: '20210603'
Version: '1.0.0'
"""
#######################################################################################
# Library
import logging
import os
import struct
from copy import deepcopy
import numpy as np
import pandas as pd
import xarray as xr
from lib_info_args import logger_name
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
import matplotlib.pylab as plt
#######################################################################################
# --------------------------------------------------------------------------------
# Method to get size of binary file
def search_geo_reference(file_name, info_dict, tag_geo_reference=None,
tag_cols='ncols', tag_rows='nrows', scale_factor=4):
file_handle = open(file_name, 'rb')
file_stream = file_handle.read(-1)
straem_n = file_stream.__len__()
data_tag = None
for info_key, info_fields in info_dict.items():
data_n = int(info_fields[tag_cols]) * int(info_fields[tag_rows]) * scale_factor
if data_n == straem_n:
data_info = info_fields
data_tag = info_key
break
file_handle.close()
assert data_tag == tag_geo_reference, " ===> Geographical reference set and found are not equal. " \
"Check your settings and datasets"
return data_tag
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Method to read 2d variable in binary format (saved as 1d integer array)
def read_data_binary(file_name, var_geo_x, var_geo_y, var_geo_attrs=None, var_format='i', var_scale_factor=10,
var_name=None, var_time=None, var_geo_1d=True,
coord_name_geo_x='west_east', coord_name_geo_y='south_north', coord_name_time='time',
dim_name_geo_x='west_east', dim_name_geo_y='south_north', dim_name_time='time',
dims_order=None):
if dims_order is None:
dims_order = [dim_name_geo_y, dim_name_geo_x, dim_name_time]
if os.path.exists(file_name):
# Open file handle
file_handle = open(file_name, 'rb')
rows = var_geo_y.shape[0]
cols = var_geo_x.shape[0]
# Values shape (1d)
var_n = rows * cols
# Values format
data_format = var_format * var_n
# Open and read binary file
data_stream = file_handle.read(-1)
array_data = struct.unpack(data_format, data_stream)
# Close file handle
file_handle.close()
# Reshape binary file in Fortran order and scale Data (float32)
file_values = np.reshape(array_data, (rows, cols), order='F')
file_values = np.float32(file_values / var_scale_factor)
if var_geo_1d:
var_geo_x_2d, var_geo_y_2d = np.meshgrid(var_geo_x, var_geo_y)
else:
var_geo_x_2d = var_geo_x
var_geo_y_2d = var_geo_y
geo_y_upper = var_geo_y_2d[0, 0]
geo_y_lower = var_geo_y_2d[-1, 0]
if geo_y_lower > geo_y_upper:
var_geo_y_2d = np.flipud(var_geo_y_2d)
file_dims = file_values.shape
file_high = file_dims[0]
file_wide = file_dims[1]
var_data = np.zeros(shape=[var_geo_x_2d.shape[0], var_geo_y_2d.shape[1], 1])
var_data[:, :, :] = np.nan
var_data[:, :, 0] = file_values
else:
log_stream.warning(' ===> File ' + file_name + ' not available in loaded datasets!')
var_data = None
if var_data is not None:
if isinstance(var_time, pd.Timestamp):
var_time = pd.DatetimeIndex([var_time])
elif isinstance(var_time, pd.DatetimeIndex):
pass
else:
log_stream.error(' ===> Time format is not allowed. Expected Timestamp or Datetimeindex')
raise NotImplemented('Case not implemented yet')
var_da = xr.DataArray(var_data, name=var_name, dims=dims_order,
coords={coord_name_time: ([dim_name_time], var_time),
coord_name_geo_x: ([dim_name_geo_x], var_geo_x_2d[0, :]),
coord_name_geo_y: ([dim_name_geo_y], var_geo_y_2d[:, 0])})
if var_geo_attrs is not None:
var_da.attrs = var_geo_attrs
else:
log_stream.warning(' ===> All filenames in the selected period are not available')
var_da = None
return var_da
# -------------------------------------------------------------------------------------
|
{"hexsha": "a1455e55c6243a2b48ee8572c73dd0df0b956605", "size": 4827, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/s3m_source2nc_converter/lib_data_io_binary.py", "max_stars_repo_name": "c-hydro/fp-s3m", "max_stars_repo_head_hexsha": "cbb1f347f558fbfaa8d564441931989bc833a02d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/s3m_source2nc_converter/lib_data_io_binary.py", "max_issues_repo_name": "c-hydro/fp-s3m", "max_issues_repo_head_hexsha": "cbb1f347f558fbfaa8d564441931989bc833a02d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/s3m_source2nc_converter/lib_data_io_binary.py", "max_forks_repo_name": "c-hydro/fp-s3m", "max_forks_repo_head_hexsha": "cbb1f347f558fbfaa8d564441931989bc833a02d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2335766423, "max_line_length": 117, "alphanum_fraction": 0.5608038119, "include": true, "reason": "import numpy", "num_tokens": 1077}
|
import json
import pickle5 as pickle
import joblib
import numpy as np
__locations = None
__data_columns = None
__model = None
def load_saved_artifacts():
print("Loading server artifact ... Start")
global __data_columns
global __locations
global __model
with open("./artifacts/columns.json", 'r') as f:
__data_columns = json.load(f)['data-columns']
__locations = __data_columns[3:]
__model = joblib.load("artifacts/banglore_home_prices_model.joblib")
print("Loading saved artifact .. Done")
def get_location_names():
return __locations
def get_estimated_price(location, sqft, bhk, bath):
try:
loc_index = __data_columns.index(location.lower())
except:
loc_index = -1
x = np.zeros(len(__data_columns))
x[0] = sqft
x[1] = bath
x[2] = bhk
if loc_index >= 0:
x[loc_index] = 1
return round(__model.predict([x])[0], 2)
if __name__ == "__main__":
load_saved_artifacts()
print(get_location_names())
print(get_estimated_price('1st phase jp nagar', 1000, 2, 2))
#Hello
|
{"hexsha": "becd914f4c8fd3c91ee1da62f92df0d0c337fc75", "size": 1082, "ext": "py", "lang": "Python", "max_stars_repo_path": "util.py", "max_stars_repo_name": "hello5423/template-python-django", "max_stars_repo_head_hexsha": "0e08a9cc1e75ff38a82ee7e5d58c821fb7b2de89", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "util.py", "max_issues_repo_name": "hello5423/template-python-django", "max_issues_repo_head_hexsha": "0e08a9cc1e75ff38a82ee7e5d58c821fb7b2de89", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util.py", "max_forks_repo_name": "hello5423/template-python-django", "max_forks_repo_head_hexsha": "0e08a9cc1e75ff38a82ee7e5d58c821fb7b2de89", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0444444444, "max_line_length": 72, "alphanum_fraction": 0.6728280961, "include": true, "reason": "import numpy", "num_tokens": 294}
|
# -*- coding: utf-8 -*-
__all__ = ["Zero", "Constant"]
import theano.tensor as tt
class Zero:
def __call__(self, x):
return tt.zeros_like(x)
class Constant:
def __init__(self, value):
self.value = tt.as_tensor_variable(value)
def __call__(self, x):
return tt.zeros_like(x) + self.value
|
{"hexsha": "48895537e8664223d4ab7da5fa633c000cd9c33f", "size": 329, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/exoplanet/gp/means.py", "max_stars_repo_name": "t-brandt/exoplanet", "max_stars_repo_head_hexsha": "68c567de27702190b41434c56d78315358f5c441", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/exoplanet/gp/means.py", "max_issues_repo_name": "t-brandt/exoplanet", "max_issues_repo_head_hexsha": "68c567de27702190b41434c56d78315358f5c441", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/exoplanet/gp/means.py", "max_forks_repo_name": "t-brandt/exoplanet", "max_forks_repo_head_hexsha": "68c567de27702190b41434c56d78315358f5c441", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-04T22:27:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-04T22:27:14.000Z", "avg_line_length": 17.3157894737, "max_line_length": 49, "alphanum_fraction": 0.6261398176, "include": true, "reason": "import theano", "num_tokens": 85}
|
# Copyright Shirin Yamani,2021
# Licensed under MIT licensed.
# See LICENSE.txt for more information.
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import time
import matplotlib.pyplot as plt
import argparse
import torch.optim as optim
from random import shuffle
import pickle
from models import *
from utilities import *
PAD_TOKEN = 0
SOS_TOKEN = 1
EOS_TOKEN = 2
class Trainer():
def initialize_weights(self, model):
if hasattr(model, 'weight') and model.weight.dim() > 1:
nn.init.xavier_uniform_(model.weight.data)
def save_dictionary(self, dictionary, input=True):
if input is True:
with open('saved_models/' + self.input_lang_dic.name + '2' + self.output_lang_dic.name + '/input_dic.pkl', 'wb') as f:
pickle.dump(dictionary, f, pickle.HIGHEST_PROTOCOL)
else:
with open('saved_models/' + self.input_lang_dic.name + '2' + self.output_lang_dic.name + '/output_dic.pkl', 'wb') as f:
pickle.dump(dictionary, f, pickle.HIGHEST_PROTOCOL)
def __init__(self, lang1, lang2, data_directory, reverse, MAX_LENGTH, MAX_FILE_SIZE, batch_size, lr=0.0005, hidden_size=256, encoder_layers=3, decoder_layers=3,
encoder_heads=8, decoder_heads=8, encoder_ff_size=512, decoder_ff_size=512, encoder_dropout=0.1, decoder_dropout=0.1, device='cpu'):
self.MAX_LENGTH = MAX_LENGTH
self.MAX_FILE_SIZE = MAX_FILE_SIZE
self.device = device
self.input_lang_dic, self.output_lang_dic, self.input_lang_list, self.output_lang_list = load_files(lang1, lang2, data_directory, reverse, self.MAX_FILE_SIZE, self.MAX_LENGTH)
for sentence in self.input_lang_list:
self.input_lang_dic.add_sentence(sentence)
for sentence in self.output_lang_list:
self.output_lang_dic.add_sentence(sentence)
self.save_dictionary(self.input_lang_dic, input=True)
self.save_dictionary(self.output_lang_dic, input=False)
self.tokenized_input_lang = [tokenize(sentence, self.input_lang_dic, self.MAX_LENGTH) for sentence in self.input_lang_list]
self.tokenized_output_lang = [tokenize(sentence, self.output_lang_dic, self.MAX_LENGTH) for sentence in self.output_lang_list]
self.batch_size = batch_size
self.data_loader = load_batches(self.tokenized_input_lang, self.tokenized_output_lang, self.batch_size, self.device)
input_size = self.input_lang_dic.n_count
output_size = self.output_lang_dic.n_count
#define encoder and decoder parts of transformer
encoder_part = Encoder(input_size, hidden_size, encoder_layers, encoder_heads, encoder_ff_size, encoder_dropout, self.device)
decoder_part = Decoder(output_size, hidden_size, decoder_layers, decoder_heads, decoder_ff_size, decoder_dropout, self.device)
self.transformer = Transformer(encoder_part, decoder_part, self.device, PAD_TOKEN).to(self.device)
self.transformer.apply(self.initialize_weights)
self.loss_func = nn.CrossEntropyLoss(ignore_index=PAD_TOKEN)
self.optimizer = optim.Adam(self.transformer.parameters(), lr=lr)
def train(self, epochs, saved_model_directory):
start_time = time.time()
for epoch in range(epochs):
#shuffle batches to prevent overfitting
shuffle(self.data_loader)
start_time = time.time()
train_loss = 0
for input, target in self.data_loader:
#zero gradient
self.optimizer.zero_grad()
#pass through transformer
output, _ = self.transformer(input, target[:,:-1])
output_dim = output.shape[-1]
#flatten and omit SOS from target
output = output.contiguous().view(-1, output_dim)
target = target[:,1:].contiguous().view(-1)
#loss
loss = self.loss_func(output, target)
#backprop
loss.backward()
nn.utils.clip_grad_norm_(self.transformer.parameters(), 1)
self.optimizer.step()
train_loss += loss.item()
train_loss /= len(self.data_loader)
end_time = int(time.time() - start_time)
torch.save(self.transformer.state_dict(), saved_model_directory + self.input_lang_dic.name +
'2' + self.output_lang_dic.name + '/transformer_model_{}.pt'.format(epoch))
print('Epoch: {}, Time: {}s, Estimated {} seconds remaining.'.format(epoch, end_time, (epochs-epoch)*end_time))
print('\tTraining Loss: {:.4f}\n'.format(train_loss))
print('Training finished!')
def main():
parser = argparse.ArgumentParser(description='Hyperparameters for training Transformer')
#hyperparameter loading
parser.add_argument('--lang1', type=str, default='french', help='first language in language text file')
parser.add_argument('--lang2', type=str, default='english', help='second language in language text file')
parser.add_argument('--data_directory', type=str, default='data', help='data directory')
parser.add_argument('--reverse', type=int, default=1, help='whether to switch roles of lang1 and lang2 as input and output')
#default hyperparameters (dont need to be inputed when called script)
parser.add_argument('--MAX_LENGTH', type=int, default=60, help='max number of tokens in input')
parser.add_argument('--MAX_FILE_SIZE', type=int, default=100000, help='max number of lines to read from files')
parser.add_argument('--batch_size', type=int, default=128, help='size of batches passed through networks at each step')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate of models')
parser.add_argument('--hidden_size', type=int, default=256, help='number of hidden layers in transformer')
parser.add_argument('--encoder_layers', type=int, default=3, help='number of encoder layers')
parser.add_argument('--decoder_layers', type=int, default=3, help='number of decoder layers')
parser.add_argument('--encoder_heads', type=int, default=8, help='number of encoder heads')
parser.add_argument('--decoder_heads', type=int, default=8, help='number of decoder heads')
parser.add_argument('--encoder_ff_size', type=int, default=512, help='fully connected input size for encoder')
parser.add_argument('--decoder_ff_size', type=int, default=512, help='fully connected input size for decoder')
parser.add_argument('--encoder_dropout', type=float, default=0.1, help='dropout for encoder feed forward')
parser.add_argument('--decoder_dropout', type=float, default=0.1, help='dropout for decoder feed forward')
parser.add_argument('--device', type=str, default='cpu', help='cpu or gpu depending on availability and compatability')
parser.add_argument('--epochs', type=int, default=10, help='number of iterations of dataset through network for training')
parser.add_argument('--saved_model_directory', type=str, default='saved_models/', help='data directory')
args = parser.parse_args()
lang1 = args.lang1
lang2 = args.lang2
data_directory = args.data_directory
reverse = args.reverse
MAX_LENGTH = args.MAX_LENGTH
MAX_FILE_SIZE = args.MAX_FILE_SIZE
batch_size = args.batch_size
lr = args.lr
hidden_size = args.hidden_size
encoder_layers = args.encoder_layers
decoder_layers = args.decoder_layers
encoder_heads = args.encoder_heads
decoder_heads = args.decoder_heads
encoder_ff_size = args.encoder_ff_size
decoder_ff_size = args.decoder_ff_size
encoder_dropout = args.encoder_dropout
decoder_dropout = args.decoder_dropout
device = args.device
epochs = args.epochs
saved_model_directory = args.saved_model_directory
transformer = Trainer(lang1, lang2, data_directory, reverse, MAX_LENGTH, MAX_FILE_SIZE, batch_size, lr, hidden_size, encoder_layers, decoder_layers,
encoder_heads, decoder_heads, encoder_ff_size, decoder_ff_size, encoder_dropout, decoder_dropout, device)
transformer.train(epochs, saved_model_directory)
if __name__ == "__main__":
main()
|
{"hexsha": "98d23bf3c7ce3c3ba53bb555960af0f470498efb", "size": 8473, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "shirinyamani/Translation_Machine", "max_stars_repo_head_hexsha": "ab4a6a2c73560ba5b281ec3ed6737ef7f208b955", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-19T18:00:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-19T18:00:28.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "shirinyamani/Translation_Machine", "max_issues_repo_head_hexsha": "ab4a6a2c73560ba5b281ec3ed6737ef7f208b955", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-10-15T19:28:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-08T14:07:35.000Z", "max_forks_repo_path": "train.py", "max_forks_repo_name": "shirinyamani/Translation_Machine", "max_forks_repo_head_hexsha": "ab4a6a2c73560ba5b281ec3ed6737ef7f208b955", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-29T20:00:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-09T18:33:13.000Z", "avg_line_length": 48.4171428571, "max_line_length": 184, "alphanum_fraction": 0.6849994099, "include": true, "reason": "import numpy", "num_tokens": 1802}
|
!---------------------------------------------------!
! Copyright (c) 2017 Shunsuke A. Sato !
! Released under the MIT license !
! https://opensource.org/licenses/mit-license.php !
!---------------------------------------------------!
subroutine mesh
use global_variables
use hpsi
use density_matrix
implicit none
integer :: ix, iy
write(*,'(A)')'===== Making mesh ================================================================'
write(*,'(A)')
write(*,'(A)')
allocate(xn(0:Nx),xyn(0:Nx,0:Nx))
dx = length_x/dble(Nx)
do ix = 0,Nx
xn(ix) = dx*dble(ix) - 0.5d0*length_x
end do
do ix = 0,Nx
do iy = 0,Nx
xyn(ix,iy)= xn(ix)+xn(iy)
end do
end do
call initialize_hpsi
call initialize_density_matrix
write(*,'(A)')'===== Complete Making mesh ========================================================'
return
end subroutine mesh
|
{"hexsha": "d4ca8ca62812d79da7009cd45913b453e703e720", "size": 917, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/mesh.f90", "max_stars_repo_name": "shunsuke-sato/qm1d", "max_stars_repo_head_hexsha": "b09bbe97dc2987188fe635b27dce89ce82e5d1ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mesh.f90", "max_issues_repo_name": "shunsuke-sato/qm1d", "max_issues_repo_head_hexsha": "b09bbe97dc2987188fe635b27dce89ce82e5d1ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mesh.f90", "max_forks_repo_name": "shunsuke-sato/qm1d", "max_forks_repo_head_hexsha": "b09bbe97dc2987188fe635b27dce89ce82e5d1ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4722222222, "max_line_length": 101, "alphanum_fraction": 0.4449291167, "num_tokens": 246}
|
import requests
import json
import pandas as pd
from pandas import json_normalize
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
from bokeh.io import show, output_file
from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, WheelZoomTool
from bokeh.models.graphs import from_networkx, NodesAndLinkedEdges, EdgesAndLinkedNodes
from bokeh.palettes import Spectral4
import warnings
warnings.filterwarnings('ignore')
class Issues:
def __init__(self, repos):
self.repos = repos
token = 'mytoken'
self.headers = {'Authorization': f'token {token}'}
self.configure_pandas()
self.df = self.init_df()
def init_df(self):
try:
dfs = []
for repo in self.repos:
url = f'https://api.github.com/repos/filetrust/{repo}/issues'
res = requests.get(url, headers=self.headers, params={'state': 'all'}).json()
data = json_normalize(res, max_level=1)
temp_df = pd.DataFrame(data)
temp_df['repo'] = repo
dfs.append(temp_df)
df = pd.concat(dfs, ignore_index=True)
return df
except requests.exceptions.RequestException as e:
raise SystemExit(e)
def get_df(self):
return self.df
def configure_pandas(self):
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
pd.set_option('display.expand_frame_repr', False)
def important(self):
# Selecting Important Columns
df = self.df[['created_at', 'state','closed_at','user.login','author_association','title','body','milestone.title','milestone.state','repo']]
# Creating date columns
df['created_at'] = pd.to_datetime(df['created_at']).dt.date
df['closed_at'] = pd.to_datetime(df['closed_at']).dt.date
self.df = df
def show_pie(self, column):
plt.figure(figsize=(15,10))
print(self.df[column].value_counts())
self.df[column].value_counts().plot(kind='pie',colors = ['blue','red'],autopct='%.2f%%', fontsize=20)
plt.title("Pie Chart Showing Open Vs. Closed Issues")
plt.show()
def show_pie_association(self, column):
plt.figure(figsize=(15,10))
print(self.df[column].value_counts())
self.df[column].value_counts().plot(kind='pie',colors = ['purple','green','yellow'],explode = (0.1 ,0.1, 0),startangle = 90,autopct='%.2f%%', fontsize=20)
plt.title("Pie Chart Showing User Association")
plt.show()
def show_bar_chart_by_repo(self):
#Visualize Repos with number of Issues
plt.figure(figsize=(15,10))
self.df["repo"].value_counts().plot.bar(title="Bar Chart Showing Number of Issues By Repo")
plt.ylabel('Number of Issues')
plt.xlabel('Repo')
plt.show()
def show_bar_chart_by_date(self):
#Visualize Dates with number of Issues
plt.figure(figsize=(40,15))
chart = sns.countplot(
data = self.df,
x = 'created_at',
order = self.df['created_at'].value_counts().index
)
chart.set_xticklabels(chart.get_xticklabels(), rotation=45, horizontalalignment='right')
plt.title("A bar Graph showing number of Issues per date")
def show_state_user(self):
plt.figure(figsize=(15,10))
sns.catplot(y="user.login", hue="state", kind="count",palette="pastel", edgecolor=".6", data=self.df, height=10);
plt.title("A graph Showing Open Vs. Closed Issues per user")
def show_grid_chart(self, column):
#Visualize Repos with number of Issues
df = self.df
keys = [pair for pair, x in df.groupby([column])]
plt.figure(figsize=(15,10))
plt.plot(keys, df.groupby([column]).count())
plt.xticks(keys)
plt.grid()
plt.show()
def show_bar_chart_by_user(self):
# Number of Issues per Individual
self.df['user.login'].value_counts().head(30).plot(kind='barh', figsize=(20,10), title="Bar Graph Showing Number of Issues per user")
def table_project_state(self):
table = self.df.groupby('repo')['state'].value_counts().unstack().fillna(0)
print(table)
def table_user_state(self):
table = self.df.groupby(['user.login','created_at']).sum()
print(table)
def show_en_graph(self):
df = self.df
df = df.rename({'user.login':'dusers'}, axis=1)
issues = list(df.title.unique())
users = list(df.dusers.unique())
plt.figure(figsize=(12, 12))
g = nx.from_pandas_edgelist(df, source='dusers', target='title', edge_attr='dusers')
layout = nx.spring_layout(g,iterations=50)
nx.draw_networkx_edges(g, layout, edge_color='#AAAAAA')
users = [node for node in g.nodes() if node in df.dusers.unique()]
size = [g.degree(node) * 80 for node in g.nodes() if node in df.dusers.unique()]
nx.draw_networkx_nodes(g, layout, nodelist=users, node_size=size, node_color='lightblue')
issues = [node for node in g.nodes() if node in df.title.unique()]
nx.draw_networkx_nodes(g, layout, nodelist=issues, node_size=100, node_color='#AAAAAA')
high_degree_issues = [node for node in g.nodes() if node in df.title.unique() and g.degree(node) > 1]
nx.draw_networkx_nodes(g, layout, nodelist=high_degree_issues, node_size=100, node_color='#fc8d62')
user_dict = dict(zip(users, users))
nx.draw_networkx_labels(g, layout, labels=user_dict)
plt.axis('off')
plt.title("Network Graph of Users and the Issues generated")
plt.show()
# Exporting Interactive Graph
TOOLTIPS = [
("name", "@dusers"),
]
plot = Plot(x_range=Range1d(-1.1,1.1), y_range=Range1d(-1.1,1.1))
plot.title.text = "Network Graph of Users and the Issues generated"
plot.add_tools(HoverTool(tooltips=TOOLTIPS), TapTool(), BoxSelectTool(), WheelZoomTool())
graph_renderer = from_networkx(g, nx.spring_layout, scale=1, center=(0,0))
graph_renderer.node_renderer.glyph = Circle(size=15, fill_color=Spectral4[0])
graph_renderer.node_renderer.selection_glyph = Circle(size=15, fill_color=Spectral4[2])
graph_renderer.node_renderer.hover_glyph = Circle(size=15, fill_color=Spectral4[1])
graph_renderer.edge_renderer.glyph = MultiLine(line_color="#CCCCCC", line_alpha=0.8, line_width=5)
graph_renderer.edge_renderer.selection_glyph = MultiLine(line_color=Spectral4[2], line_width=5)
graph_renderer.edge_renderer.hover_glyph = MultiLine(line_color=Spectral4[1], line_width=5)
graph_renderer.selection_policy = NodesAndLinkedEdges()
graph_renderer.inspection_policy = EdgesAndLinkedNodes()
plot.renderers.append(graph_renderer)
output_file("interactive_graph.html")
show(plot)
|
{"hexsha": "863964681e9a3ab38be52fa9d4ad2eba7fc632e8", "size": 7608, "ext": "py", "lang": "Python", "max_stars_repo_path": "upwork-devs/lwasampijja-baker/issues.py", "max_stars_repo_name": "GiuseMSD/k8-data-visualization", "max_stars_repo_head_hexsha": "a14b20e843149eda946d764781efd75835ae7158", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "upwork-devs/lwasampijja-baker/issues.py", "max_issues_repo_name": "GiuseMSD/k8-data-visualization", "max_issues_repo_head_hexsha": "a14b20e843149eda946d764781efd75835ae7158", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "upwork-devs/lwasampijja-baker/issues.py", "max_forks_repo_name": "GiuseMSD/k8-data-visualization", "max_forks_repo_head_hexsha": "a14b20e843149eda946d764781efd75835ae7158", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.3902439024, "max_line_length": 179, "alphanum_fraction": 0.6038380652, "include": true, "reason": "import networkx", "num_tokens": 1710}
|
# The MIT License (MIT)
#
# Copyright (c) 2016, Jack Liu
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import time, datetime, os
from collections import OrderedDict
import csv, json, re, sys
import requests
import random
import operator
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.finance import quotes_historical_yahoo_ochl
from matplotlib.dates import HourLocator, MinuteLocator, SecondLocator, DateFormatter
from matplotlib.patches import Ellipse
ctrl = {}
class Quote(object):
def dump(self):
print self.dt, self.o, self.h, self.l, self.c, self.v
def __init__(self, dt, o, h, l, c, v):
assert(h >= l)
self.dt = dt # time stamp
self.o = o; # open privde
self.h = h; # high
self.l = l; # low
self.c = c; # close
self.v = v; # volume
def get_day(self):
return self.dt.day
def get_median(self):
return (self.o + self.c) / 2
# Return the median price of this quote wrt ref price.
def get_ratio(self,ref_score):
return ((self.get_median() - ref_score) / ref_score)
def get_normalized_dt(self):
return datetime.datetime(1971, 1, 1, \
self.dt.hour, self.dt.minute, self.dt.second)
#
# Class Plot
#
class Plot(object):
def __init__(self, chart_type):
self.markers = ['o', 'v', '^', 's', 'p', '*', 'h', 'H', 'D', 'd']
self.ls = ['dashed', 'dashdot', 'dotted']
self.hours = HourLocator() # every hour
self.minutes = MinuteLocator() # every minute
self.seconds = SecondLocator() # every second
self.hoursFmt = DateFormatter('%H')
self.fig, self.ax = plt.subplots(figsize=(20, 10))
self.chart_type = chart_type
return
def plot_scores(self, dates, scores, mfc, marker, quote):
self.ax.plot_date(dates, scores,
ls=random.choice(self.ls), marker=marker,
markersize=5.0, markerfacecolor=mfc,
label=str(quote.dt.month)+'/'+str(quote.dt.day))
self.ax.text(quote.dt, quote.c, str(quote.c), fontsize=12, color='g')
return
def plot_buys(self, buy_quotes, quotes_2_scores):
if not buy_quotes:
return
norm_dates = [q.get_normalized_dt() for q in buy_quotes]
scores = [score for quote, score in quotes_2_scores.iteritems() \
for buy_quote in buy_quotes if quote.dt == buy_quote.dt]
size = [200.0 for _ in buy_quotes]
self.ax.scatter(norm_dates, scores, s=size, color='b', alpha=0.8)
return
def plot_future(self, future_scores):
(dates, scores) = zip(*future_scores)
self.ax.plot_date(dates, scores,
ls=random.choice(self.ls), marker='D',
markersize=5.0, markerfacecolor='g',
label='future')
return
def format_ticks(self):
self.ax.xaxis.set_major_locator(self.hours)
self.ax.xaxis.set_major_formatter(self.hoursFmt)
self.ax.xaxis.set_minor_locator(self.minutes)
self.ax.autoscale_view()
# format the coords message box
def price(x):
return '$%.3f' % x
self.ax.fmt_xdata = DateFormatter('%H-%M-%S')
self.ax.fmt_ydata = price
self.ax.grid(True)
self.fig.autofmt_xdate()
plt.legend(loc='best', shadow=True)
plt.tick_params(axis='y', which='both', labelleft='on', labelright='on')
return
def format_labels(self, interval_seconds):
plt.ylabel(self.chart_type)
plt.xlabel('Interval ' + str(interval_seconds / 60.0) + ' min')
return
def format_title(self, title):
plt.title(title)
return
def show(self):
sys.stdout.flush()
plt.show()
return
def annotate(self, x, y, xytext):
self.ax.annotate('%.3f' % y, xy=(x, y), xycoords='data',
bbox=dict(boxstyle="round4", fc="w", alpha=0.75),
xytext=xytext, textcoords='offset points', size=14,
arrowprops=dict(arrowstyle="fancy",
fc="0.3", ec="none",
patchB=Ellipse((2, -1), 0.5, 0.5),
connectionstyle="angle3,angleA=0,angleB=-90"),
)
return
#
#
#
class Stock(object):
def __repr__(self):
return "Stock()"
def __str__(self):
return self.symbol
def __init__(self, symbol, interval_seconds, buys, sells):
self.symbol = symbol
# The page_num, or key, of self.book is the yr_mo_day, and the
# content of each page is a list of quotes on that day.
self.book = OrderedDict()
self.interval_seconds = interval_seconds
self.knn_candidate_set = set()
# Create buys and sells records, and convert them
# from string to datetime type
self.buys = []
self.sells = []
for buy in buys:
self.buys.append(datetime.datetime.strptime(buy, "%Y-%m-%d %H:%M:%S"))
for sell in sells:
self.sells.append(datetime.datetime.strptime(sell, "%Y-%m-%d %H:%M:%S"))
self.buys.sort()
self.sells.sort()
return
# Given a list of quotes, return a sub-list of it that have buy events
def get_buy_quotes(self, quotes):
assert(quotes)
results = []
for buy in self.buys:
if (buy > quotes[-1].dt) or (buy < quotes[0].dt):
continue
for quote in quotes:
if quote.dt >= buy:
results.append(quote)
break;
return results;
# Given a quote, return the page_num, or key, where this qutoe should
# belong to.
def get_page_num_str(self, quote):
return str(quote.dt.year) + '_' + str(quote.dt.month) + '_' + str(quote.dt.day)
# Append a new quote according to its day.
def append(self, quote):
# page_num is the key of self.book
page_num = self.get_page_num_str(quote)
if not self.book.has_key(page_num):
self.book[page_num] = []
self.book[page_num].append(quote)
return
def dump(self):
print self.symbol, len(self.quotes)
for quote in self.quotes:
quote.dump()
def __repr__(self):
return self.to_csv()
#
# Compute KNN candidates that meet today's criteria.
#
def prepare_knn_candidate_set(self):
if len(self.book) < 2:
return 0
open_price = self.get_today_open_price();
prev_close_price = self.get_yesterday_close_price();
ref_ratio = (open_price - prev_close_price) / prev_close_price
print "KNN ratio: ", prev_close_price, open_price, ref_ratio
prev_close_price = 1
for page_num, quotes in self.book.iteritems():
open_price = quotes[0].o
ratio = (open_price - prev_close_price) / prev_close_price
if (abs(ref_ratio) <= 0.04) and (abs(ratio) <= 0.04):
# in [-0.04, 0.04] range
self.knn_candidate_set.add(quotes[0].dt);
elif (ref_ratio > 0.04) and (ratio > 0.04):
# in [0.04, +] range
self.knn_candidate_set.add(quotes[0].dt);
elif (ref_ratio < 0.04) and (ratio < 0.04):
# in [-, -0.04] range
self.knn_candidate_set.add(quotes[0].dt);
else:
self.knn_candidate_set.add(quotes[0].dt);
prev_close_price = quotes[-1].c
return len(self.knn_candidate_set)
#
# Should we consider these quotes a valid candidate for KNN
#
def is_knn_candidate(self, quotes):
return quotes[0].dt in self.knn_candidate_set;
#
# Given a sequence of quotes, retun a list of
# scores depending on ChartType.
#
def compute_display_scores(self, quotes, chart_type):
# Use close price as score
if chart_type == 'close':
return [q.c for q in quotes]
# Use median price
if chart_type == 'median':
return [q.get_median() for q in quotes]
# K-nearest neighbors
if chart_type == 'knn':
if self.is_knn_candidate(quotes):
return [q.get_ratio(quotes[0].o) for q in quotes]
else:
return []
# Default will use close price as score
return [q.c for q in quotes]
def get_first_page(self):
return self.book.itervalues().next()
def get_latest_quote(self):
page_num = self.book.keys()[-1];
return self.book[page_num][-1]
def get_today_open_price(self):
assert(len(self.book) >= 1)
page_num = self.book.keys()[-1];
return self.book[page_num][0].o
def get_yesterday_close_price(self):
assert(len(self.book) >= 2)
page_num = self.book.keys()[-2];
return self.book[page_num][-1].c
def is_last_page(self, page_num):
return page_num == self.book.keys()[-1];
def get_local_time(self):
now = datetime.datetime.now()
local_hr = now.hour + ctrl['UTC']
local_day = now.day
if local_hr < 0:
local_hr += 24
local_day -= 1
return (local_day, local_hr, now.minute, now.second)
def get_num_days_ago(self, quote):
return (datetime.datetime.now() - quote.dt).days
def is_market_closed(self):
(local_day, local_hr, local_min, local_sec) = self.get_local_time()
last_quote_dt = self.get_latest_quote().dt
if local_day != last_quote_dt.day:
return True
if local_hr < 6 or local_hr >= 13:
return True
return False
def is_market_open(self):
return not self.is_market_closed()
def write2csv(self):
if not self.book:
return
fname = self.symbol + ".csv"
print "Create", fname
with open(fname, 'wb') as f:
first_page = self.get_first_page();
keys = first_page[0].__dict__.keys()
w = csv.DictWriter(f, fieldnames=keys)
w.writeheader()
last_quote_dt = datetime.datetime.fromtimestamp(0);
for page_num, quotes in self.book.iteritems():
# Make sure quotes are listed in order
assert(last_quote_dt < quotes[0].dt)
last_quote_dt = quotes[0].dt
for quote in quotes:
w.writerow(quote.__dict__)
return
#
# Return reference datetime which shows where the quote is at this moment
#
def get_ref_datetime(self) :
(_, local_hr, local_min, local_sec) = self.get_local_time()
if self.is_market_open():
ref_datetime = datetime.datetime(1971, 1, 1, \
local_hr, local_min, local_sec)
else:
ref_datetime = datetime.datetime(1971, 1, 1, 6, 20)
return ref_datetime
#
# Compute today's future scores based on historical scores, volume, and
# today's opening score.
#
def compute_future_scores(self, hist, today_opening_score, rel):
future = []
hist_opening_scores = hist.itervalues().next()
future_score = today_opening_score
def geomean(nums):
return reduce(lambda x, y: x*y, nums)**(1.0/len(nums))
for dt, scores_and_volume in hist.iteritems():
# An effective_record is a tuple of (hist_score, volume, hist_opening_score)
effective_records = [(a[0], float(a[1]), b[0]) for (a, b) in \
zip(scores_and_volume, hist_opening_scores) if rel(a[0], b[0])]
total_volume = sum([rec[1] for rec in effective_records])
if today_opening_score < 1.0:
# No need to compute ratio again if chart type is knn
delta_score = sum(([(rec[0] - rec[2]) * (rec[1] / total_volume) for rec in effective_records]))
future_score = today_opening_score + delta_score
else:
# Use ratio wrt hist_opening
ratio = sum(([(rec[0] / rec[2]) * (rec[1] / total_volume) for rec in effective_records]))
future_score = today_opening_score * ratio
future.append((dt, future_score))
return future
# Plot the history for current symbol
def plot(self, chart_type='close'):
if not self.book:
print "Not enough data to plot"
return
# Prepare data for KNN
if chart_type == 'knn':
if self.prepare_knn_candidate_set() < 2:
print "No candidate for KNN plot"
return
begin_time = time.time()
plot = Plot(chart_type);
num_days = len(self.book)
gradient = 1.0;
ref_datetime = self.get_ref_datetime()
print "Reference time:", ref_datetime
last_quote_dt = datetime.datetime.fromtimestamp(0);
opening_score = 0.0
#
# Walk thru each day
#
# historical[normalized_dt] is a list of scores happened at that time.
# We use historcial later to estimate future scores
historical = OrderedDict()
for page_num, quotes in self.book.iteritems():
scores = self.compute_display_scores(quotes, chart_type)
if not scores:
continue;
# Keep the mapping for later reference.
quotes_2_scores = OrderedDict(zip(quotes, scores))
opening_score = scores[0]
# Update historical
for quote, score in quotes_2_scores.iteritems():
_norm_dt = quote.get_normalized_dt()
if not historical.has_key(_norm_dt):
historical[_norm_dt] = []
historical[_norm_dt].append((score, quote.v))
# Make sure quotes are listed in ascending order
assert(last_quote_dt < quotes[0].dt)
last_quote_dt = quotes[0].dt
# Normalize the year/month/day, since the chart only cares about hr/min/sec
norm_dates = [q.get_normalized_dt() for q in quotes]
# Quotes from last page worths more attention.
if self.is_last_page(page_num):
mfc = "red"
marker = 'D'
else:
mfc = str(gradient)
marker = random.choice(plot.markers)
# Only show details for the past 7 days
if self.get_num_days_ago(quotes[0]) > 7:
continue
plot.plot_scores(norm_dates, scores, mfc, marker, quotes[0])
#
# Highlight the buys using big green dots
#
plot.plot_buys(self.get_buy_quotes(quotes), quotes_2_scores)
# Adjust gradient for next page's quotes
gradient -= (1.0 / float(num_days - 1));
# Compute future scores (upper bounds and lower bounds, based on historical data
future_scores_lb = self.compute_future_scores(historical, opening_score, operator.le)
future_scores_ub = self.compute_future_scores(historical, opening_score, operator.ge)
# Plot future scores
plot.plot_future(future_scores_ub)
plot.plot_future(future_scores_lb)
#
# Print out prediction before showing the chart
#
latest_quote = self.get_latest_quote();
print self.symbol, "now @", latest_quote.c
end_time = time.time()
print 'Process time: %0.3f ms' % ((end_time-begin_time)*1000.0)
# format the ticks and labels
plot.format_ticks()
plot.format_labels(self.interval_seconds)
title = self.symbol + " in last " + str(num_days) + " days" + " @" + str(latest_quote.c)
plot.format_title(title)
#
# Annotate the min/max points
#
(x1, y1) = max(future_scores_ub, key=operator.itemgetter(1))
(x2, y2) = min(future_scores_lb, key=operator.itemgetter(1))
plot.annotate(x1, y1, (-80, 60))
plot.annotate(x2, y2, (-80, -60))
# Flush whatever we have, and draw it!
plot.show()
return
#
# Collect intraday quote for a symbol.
#
def CollectIntradayQuote(record, interval_seconds, num_days):
symbol = record["Symbol"]
stock = Stock(symbol, interval_seconds, buys=record.get("Buy", []),
sells=record.get("Sell", []))
url = ctrl['URL']
url += "q={0}&i={1}&p={2}d&f=d,o,h,l,c,v".format(symbol,interval_seconds,num_days)
print "Query", url
csv = requests.get(url).text.encode('utf-8').split('\n')
_, timezone_offset = csv[6].split('=')
# Adjust timezone wrt UTC
timezone_offset = (0 * float(timezone_offset)) + (ctrl["UTC"] * 60 * 60)
for row in csv[7:]:
fields = row.split(',')
if len(fields) != 6:
continue;
# COLUMNS=DATE,CLOSE,HIGH,LOW,OPEN,VOLUME
offset = fields[0]
if offset.startswith('a'):
day = int(offset[1:])
offset = 0
else:
offset = int(offset)
dt = datetime.datetime.fromtimestamp(day+(interval_seconds*offset)+timezone_offset)
# Create a new quote
quote = Quote(dt, float(fields[4]), float(fields[2]), float(fields[3]), \
float(fields[1]), int(fields[5]))
# Append this new quote to current stock
stock.append(quote)
stock.write2csv()
return stock
#
# main()
#
try:
with open('stock.json') as f:
ctrl = json.load(f);
except IOError as e:
sys.exit( "I/O error({0}): {1}".format(e.errno, e.strerror) + ": stock.json")
for record in ctrl["Records"]:
stock = CollectIntradayQuote(record, ctrl["Interval"], ctrl["Days"])
stock.plot(record['ChartType'].lower())
|
{"hexsha": "6e6dfcc962f1a489b0168e34ecdc6599e004f8b8", "size": 19297, "ext": "py", "lang": "Python", "max_stars_repo_path": "stock.py", "max_stars_repo_name": "liu12295/stock", "max_stars_repo_head_hexsha": "4335ac1e85d27913d245f4b8663258f93a05b625", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-07-19T01:48:27.000Z", "max_stars_repo_stars_event_max_datetime": "2017-07-19T01:48:27.000Z", "max_issues_repo_path": "stock.py", "max_issues_repo_name": "liu12295/stock", "max_issues_repo_head_hexsha": "4335ac1e85d27913d245f4b8663258f93a05b625", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stock.py", "max_forks_repo_name": "liu12295/stock", "max_forks_repo_head_hexsha": "4335ac1e85d27913d245f4b8663258f93a05b625", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0335097002, "max_line_length": 111, "alphanum_fraction": 0.5790537389, "include": true, "reason": "import numpy", "num_tokens": 4554}
|
!> Parameters to be used during calculations
!! such as conversion factors, imaginary number, \f$ \pi \f$....
MODULE parameters
USE kinds
IMPLICIT NONE
REAL(dp), PARAMETER :: pi=4.0_dp*DATAN(1.0_dp), & !4.0d0*atan(1.d0),
kb=1.3806488e-23_dp, & ! Boltzmann constant J/K
planck=6.626068e-34_dp, & ! Planck's constant in Js
hbar=planck/2.0_dp/pi, & ! hbar in Js
avog = 6.0221415e23_dp, & ! Avogadro's number
light = 2.99792458e10_dp, & ! Speed of light (SI) in cm/s
! *************** ENERGY *************** !
joules2wvnbr = 1.0_dp/planck/light, & ! Joules (SI) to wavenumber
wvnbr2joules = 1.0_dp/joules2wvnbr, & ! wavenumber to Joules
! **************** LENGTH ************** !
m2ang = 1.e10_dp, & ! meter to Angstrom
ang2m = 1.0_dp/m2ang, & ! Angstrom to meter
! ***************** RMASS *************** !
amu2kg = 1.66053892e-27_dp, & ! atomic rmass unit to kg
! ************** TIME ****************** !
s2ps = 1.0e12_dp, & ! seconds to picoseconds
ps2s = 1.0_dp/s2ps, & ! picoseconds to seconds
s2fs = 1.0e15_dp, & ! seconds to femtoseconds
fs2s = 1.0_dp/s2fs, & ! femtoseconds to seconds
! ************** FREQUENCY ************* !
wvnbr2Hz = light, & ! wavenumbers to Hertz
Hz2wvnbr = 1.0_dp/wvnbr2Hz, & ! Hertz to wavenumbers
! ***************** 2 a.u. ************* !
au2joules = 4.35974417e-18_dp, & ! a.u. energy to Joules
joules2au = 1.0_dp/au2joules, & ! Joules to a.u. energy
ev2au=0.03674932_dp, & ! eV to a.u. energy
au2ev=1.0_dp/ev2au, & ! a.u. energy to eV
autime2s = 2.418884326505e-17_dp, & ! a.u. time to seconds
s2autime = 1.0_dp/autime2s, & ! seconds to a.u. time
autemp2K = 3.1577464e5_dp, & ! a.u. temperature to Kelvin
K2autemp = 1.0_dp/autemp2K, & ! Kelvin to a.u. temperature
au2ang = 0.52917725_dp, & ! atomic length to Angstrom
ang2au = 1.0_dp/au2ang ! Angstrom to atomic length
COMPLEX(dp), PARAMETER :: eye=(0.0_dp, 1.0_dp)
END MODULE parameters
|
{"hexsha": "2ff9d36b15754cbdd0845f87fcf5987006ae142e", "size": 2544, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "mqds/src/general_src/parameters.f90", "max_stars_repo_name": "jprov410/mqds", "max_stars_repo_head_hexsha": "beead5c30aac77a7ae2d07e808d8c587cdd1c3ce", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-02-08T20:58:49.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-23T02:07:17.000Z", "max_issues_repo_path": "mqds/src/general_src/parameters.f90", "max_issues_repo_name": "jprov410/MQDS", "max_issues_repo_head_hexsha": "beead5c30aac77a7ae2d07e808d8c587cdd1c3ce", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-02-21T18:41:21.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-21T21:27:48.000Z", "max_forks_repo_path": "mqds/src/general_src/parameters.f90", "max_forks_repo_name": "jprov410/MQDS", "max_forks_repo_head_hexsha": "beead5c30aac77a7ae2d07e808d8c587cdd1c3ce", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-02-07T20:12:07.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-01T02:33:09.000Z", "avg_line_length": 57.8181818182, "max_line_length": 80, "alphanum_fraction": 0.463836478, "num_tokens": 798}
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Membership Inference
"""
from multiprocessing import cpu_count
import numpy as np
import mindspore as ms
from mindspore.train import Model
from mindspore.dataset.engine import Dataset
from mindspore import Tensor
from mindarmour.utils.logger import LogUtil
from mindarmour.utils._check_param import check_param_type, check_param_multi_types, \
check_model, check_numpy_param
from .attacker import get_attack_model
from ._check_config import verify_config_params
LOGGER = LogUtil.get_instance()
TAG = "MembershipInference"
def _eval_info(pred, truth, option):
"""
Calculate the performance according to pred and truth.
Args:
pred (numpy.ndarray): Predictions for each sample.
truth (numpy.ndarray): Ground truth for each sample.
option (str): Type of evaluation indicators; Possible
values are 'precision', 'accuracy' and 'recall'.
Returns:
float32, calculated evaluation results.
Raises:
ValueError, size of parameter pred or truth is 0.
ValueError, value of parameter option must be in ["precision", "accuracy", "recall"].
"""
check_numpy_param("pred", pred)
check_numpy_param("truth", truth)
if option == "accuracy":
count = np.sum(pred == truth)
return count / len(pred)
if option == "precision":
if np.sum(pred) == 0:
return -1
count = np.sum(pred & truth)
return count / np.sum(pred)
if option == "recall":
if np.sum(truth) == 0:
return -1
count = np.sum(pred & truth)
return count / np.sum(truth)
msg = "The metric value {} is undefined.".format(option)
LOGGER.error(TAG, msg)
raise ValueError(msg)
def _softmax_cross_entropy(logits, labels, epsilon=1e-12):
"""
Calculate the SoftmaxCrossEntropy result between logits and labels.
Args:
logits (numpy.ndarray): Numpy array of shape(N, C).
labels (numpy.ndarray): Numpy array of shape(N, ).
epsilon (float): The calculated value of softmax will be clipped into [epsilon, 1 - epsilon]. Default: 1e-12.
Returns:
numpy.ndarray: numpy array of shape(N, ), containing loss value for each vector in logits.
"""
labels = np.eye(logits.shape[1])[labels].astype(np.int32)
exp_logits = np.exp(logits - np.max(logits, axis=-1, keepdims=True))
predictions = exp_logits / np.sum(exp_logits, axis=-1, keepdims=True)
predictions = np.clip(predictions, epsilon, 1.0 - epsilon)
loss = -1 * np.sum(labels*np.log(predictions), axis=-1)
return loss
class MembershipInference:
"""
Evaluation proposed by Shokri, Stronati, Song and Shmatikov is a grey-box attack.
The attack requires loss or logits results of training samples.
References: `Reza Shokri, Marco Stronati, Congzheng Song, Vitaly Shmatikov.
Membership Inference Attacks against Machine Learning Models. 2017.
<https://arxiv.org/abs/1610.05820v2>`_
Args:
model (Model): Target model.
n_jobs (int): Number of jobs run in parallel. -1 means using all processors,
otherwise the value of n_jobs must be a positive integer.
Examples:
>>> # train_1, train_2 are non-overlapping datasets from training dataset of target model.
>>> # test_1, test_2 are non-overlapping datasets from test dataset of target model.
>>> # We use train_1, test_1 to train attack model, and use train_2, test_2 to evaluate attack model.
>>> model = Model(network=net, loss_fn=loss, optimizer=opt, metrics={'acc', 'loss'})
>>> attack_model = MembershipInference(model, n_jobs=-1)
>>> config = [{"method": "KNN", "params": {"n_neighbors": [3, 5, 7]}}]
>>> attack_model.train(train_1, test_1, config)
>>> metrics = ["precision", "recall", "accuracy"]
>>> result = attack_model.eval(train_2, test_2, metrics)
Raises:
TypeError: If type of model is not mindspore.train.Model.
TypeError: If type of n_jobs is not int.
ValueError: The value of n_jobs is neither -1 nor a positive integer.
"""
def __init__(self, model, n_jobs=-1):
check_param_type("n_jobs", n_jobs, int)
if not (n_jobs == -1 or n_jobs > 0):
msg = "Value of n_jobs must be either -1 or positive integer, but got {}.".format(n_jobs)
LOGGER.error(TAG, msg)
raise ValueError(msg)
self._model = check_model("model", model, Model)
self._n_jobs = min(n_jobs, cpu_count())
self._attack_list = []
def train(self, dataset_train, dataset_test, attack_config):
"""
Depending on the configuration, use the input dataset to train the attack model.
Save the attack model to self._attack_list.
Args:
dataset_train (mindspore.dataset): The training dataset for the target model.
dataset_test (mindspore.dataset): The test set for the target model.
attack_config (Union[list, tuple]): Parameter setting for the attack model. The format is
[{"method": "knn", "params": {"n_neighbors": [3, 5, 7]}},
{"method": "lr", "params": {"C": np.logspace(-4, 2, 10)}}].
The support methods are knn, lr, mlp and rf, and the params of each method
must within the range of changeable parameters. Tips of params implement
can be found below:
`KNN <https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html>`_,
`LR <https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html>`_,
`RF <https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html>`_,
`MLP <https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html>`_.
Raises:
KeyError: If any config in attack_config doesn't have keys {"method", "params"}.
NameError: If the method(case insensitive) in attack_config is not in ["lr", "knn", "rf", "mlp"].
"""
check_param_type("dataset_train", dataset_train, Dataset)
check_param_type("dataset_test", dataset_test, Dataset)
check_param_multi_types("attack_config", attack_config, (list, tuple))
verify_config_params(attack_config)
features, labels = self._transform(dataset_train, dataset_test)
for config in attack_config:
self._attack_list.append(get_attack_model(features, labels, config, n_jobs=self._n_jobs))
def eval(self, dataset_train, dataset_test, metrics):
"""
Evaluate the different privacy of the target model.
Evaluation indicators shall be specified by metrics.
Args:
dataset_train (mindspore.dataset): The training dataset for the target model.
dataset_test (mindspore.dataset): The test dataset for the target model.
metrics (Union[list, tuple]): Evaluation indicators. The value of metrics
must be in ["precision", "accuracy", "recall"]. Default: ["precision"].
Returns:
list, each element contains an evaluation indicator for the attack model.
"""
check_param_type("dataset_train", dataset_train, Dataset)
check_param_type("dataset_test", dataset_test, Dataset)
check_param_multi_types("metrics", metrics, (list, tuple))
metrics = set(metrics)
metrics_list = {"precision", "accuracy", "recall"}
if not metrics <= metrics_list:
msg = "Element in 'metrics' must be in {}, but got {}.".format(metrics_list, metrics)
LOGGER.error(TAG, msg)
raise ValueError(msg)
result = []
features, labels = self._transform(dataset_train, dataset_test)
for attacker in self._attack_list:
pred = attacker.predict(features)
item = {}
for option in metrics:
item[option] = _eval_info(pred, labels, option)
result.append(item)
return result
def _transform(self, dataset_train, dataset_test):
"""
Generate corresponding loss_logits features and new label, and return after shuffle.
Args:
dataset_train (mindspore.dataset): The train set for the target model.
dataset_test (mindspore.dataset): The test set for the target model.
Returns:
- numpy.ndarray, loss_logits features for each sample. Shape is (N, C).
N is the number of sample. C = 1 + dim(logits).
- numpy.ndarray, labels for each sample, Shape is (N,).
"""
features_train, labels_train = self._generate(dataset_train, 1)
features_test, labels_test = self._generate(dataset_test, 0)
features = np.vstack((features_train, features_test))
labels = np.hstack((labels_train, labels_test))
shuffle_index = np.array(range(len(labels)))
np.random.shuffle(shuffle_index)
features = features[shuffle_index]
labels = labels[shuffle_index]
return features, labels
def _generate(self, input_dataset, label):
"""
Return a loss_logits features and labels for training attack model.
Args:
input_dataset (mindspore.dataset): The dataset to be generated.
label (int): Whether input_dataset belongs to the target model.
Returns:
- numpy.ndarray, loss_logits features for each sample. Shape is (N, C).
N is the number of sample. C = 1 + dim(logits).
- numpy.ndarray, labels for each sample, Shape is (N,).
"""
loss_logits = np.array([])
for batch in input_dataset.create_tuple_iterator(output_numpy=True):
batch_data = Tensor(batch[0], ms.float32)
batch_labels = batch[1].astype(np.int32)
batch_logits = self._model.predict(batch_data).asnumpy()
batch_loss = _softmax_cross_entropy(batch_logits, batch_labels)
batch_feature = np.hstack((batch_loss.reshape(-1, 1), batch_logits))
if loss_logits.size == 0:
loss_logits = batch_feature
else:
loss_logits = np.vstack((loss_logits, batch_feature))
if label == 1:
labels = np.ones(len(loss_logits), np.int32)
elif label == 0:
labels = np.zeros(len(loss_logits), np.int32)
else:
msg = "The value of label must be 0 or 1, but got {}.".format(label)
LOGGER.error(TAG, msg)
raise ValueError(msg)
return loss_logits, labels
|
{"hexsha": "b48ca58ff475e1820eb0eef839bd84cfc8ff5255", "size": 11310, "ext": "py", "lang": "Python", "max_stars_repo_path": "mindarmour/privacy/evaluation/membership_inference.py", "max_stars_repo_name": "mindspore-ai/mindarmour", "max_stars_repo_head_hexsha": "a5db0825fa06e4da870c0a850a18b374e8cdd086", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 139, "max_stars_repo_stars_event_min_datetime": "2020-03-28T02:37:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T15:35:39.000Z", "max_issues_repo_path": "mindarmour/privacy/evaluation/membership_inference.py", "max_issues_repo_name": "mindspore-ai/mindarmour", "max_issues_repo_head_hexsha": "a5db0825fa06e4da870c0a850a18b374e8cdd086", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-04-02T09:50:21.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-09T06:52:57.000Z", "max_forks_repo_path": "mindarmour/privacy/evaluation/membership_inference.py", "max_forks_repo_name": "mindspore-ai/mindarmour", "max_forks_repo_head_hexsha": "a5db0825fa06e4da870c0a850a18b374e8cdd086", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-03-28T02:52:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-15T08:05:06.000Z", "avg_line_length": 42.679245283, "max_line_length": 119, "alphanum_fraction": 0.6491600354, "include": true, "reason": "import numpy", "num_tokens": 2519}
|
#include <stan/math/rev/scal.hpp>
#include <gtest/gtest.h>
#include <test/unit/math/rev/scal/fun/nan_util.hpp>
#include <test/unit/math/rev/scal/util.hpp>
#include <boost/math/special_functions/beta.hpp>
TEST(AgradRev,ibeta_vvv) {
using stan::math::var;
using stan::math::ibeta;
using stan::math::ibeta;
using boost::math::ibeta_derivative;
AVAR a = 0.6;
AVAR b = 0.3;
AVAR c = 0.5;
AVAR f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.3121373, f.val());
AVEC x = createAVEC(a,b,c);
VEC grad_f;
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(-0.436993,grad_f[0]);
EXPECT_FLOAT_EQ(0.7779751,grad_f[1]);
EXPECT_FLOAT_EQ(ibeta_derivative(a.val(), b.val(), c.val()),grad_f[2]);
a = 3;
b = 2;
c = 0.2;
f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.0272, f.val());
x = createAVEC(a,b,c);
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(-0.03737671,grad_f[0]);
EXPECT_FLOAT_EQ(0.02507405,grad_f[1]);
EXPECT_FLOAT_EQ(ibeta_derivative(a.val(), b.val(), c.val()),grad_f[2]);
}
TEST(AgradRev,ibeta_vvd) {
using stan::math::var;
using stan::math::ibeta;
using stan::math::ibeta;
using boost::math::ibeta_derivative;
AVAR a = 0.6;
AVAR b = 0.3;
double c = 0.5;
AVAR f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.3121373, f.val());
AVEC x = createAVEC(a,b);
VEC grad_f;
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(-0.436993,grad_f[0]);
EXPECT_FLOAT_EQ(0.7779751,grad_f[1]);
a = 3;
b = 2;
c = 0.2;
f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.0272, f.val());
x = createAVEC(a,b);
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(-0.03737671,grad_f[0]);
EXPECT_FLOAT_EQ(0.02507405,grad_f[1]);
}
TEST(AgradRev,ibeta_vdv) {
using stan::math::var;
using stan::math::ibeta;
using stan::math::ibeta;
using boost::math::ibeta_derivative;
AVAR a = 0.6;
double b = 0.3;
AVAR c = 0.5;
AVAR f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.3121373, f.val());
AVEC x = createAVEC(a,c);
VEC grad_f;
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(-0.436993,grad_f[0]);
EXPECT_FLOAT_EQ(ibeta_derivative(a.val(), b, c.val()),grad_f[1]);
a = 3;
b = 2;
c = 0.2;
f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.0272, f.val());
x = createAVEC(a,c);
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(-0.03737671,grad_f[0]);
EXPECT_FLOAT_EQ(ibeta_derivative(a.val(), b, c.val()),grad_f[1]);
}
TEST(AgradRev,ibeta_vdd) {
using stan::math::var;
using stan::math::ibeta;
using stan::math::ibeta;
using boost::math::ibeta_derivative;
AVAR a = 0.6;
double b = 0.3;
double c = 0.5;
AVAR f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.3121373, f.val());
AVEC x = createAVEC(a);
VEC grad_f;
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(-0.436993,grad_f[0]);
a = 3;
b = 2;
c = 0.2;
f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.0272, f.val());
x = createAVEC(a);
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(-0.03737671,grad_f[0]);
}
TEST(AgradRev,ibeta_dvv) {
using stan::math::var;
using stan::math::ibeta;
using stan::math::ibeta;
using boost::math::ibeta_derivative;
double a = 0.6;
AVAR b = 0.3;
AVAR c = 0.5;
AVAR f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.3121373, f.val());
AVEC x = createAVEC(b,c);
VEC grad_f;
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(0.7779751,grad_f[0]);
EXPECT_FLOAT_EQ(ibeta_derivative(a, b.val(), c.val()),grad_f[1]);
a = 3;
b = 2;
c = 0.2;
f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.0272, f.val());
x = createAVEC(b,c);
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(0.02507405,grad_f[0]);
EXPECT_FLOAT_EQ(ibeta_derivative(a, b.val(), c.val()),grad_f[1]);
}
TEST(AgradRev,ibeta_dvd) {
using stan::math::var;
using stan::math::ibeta;
using stan::math::ibeta;
using boost::math::ibeta_derivative;
double a = 0.6;
AVAR b = 0.3;
double c = 0.5;
AVAR f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.3121373, f.val());
AVEC x = createAVEC(b);
VEC grad_f;
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(0.7779751,grad_f[0]);
a = 3;
b = 2;
c = 0.2;
f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.0272, f.val());
x = createAVEC(b);
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(0.02507405,grad_f[0]);
}
TEST(AgradRev,ibeta_ddv) {
using stan::math::var;
using stan::math::ibeta;
using stan::math::ibeta;
using boost::math::ibeta_derivative;
double a = 0.6;
double b = 0.3;
AVAR c = 0.5;
AVAR f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.3121373, f.val());
AVEC x = createAVEC(c);
VEC grad_f;
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(ibeta_derivative(a, b, c.val()),grad_f[0]);
a = 3;
b = 2;
c = 0.2;
f = ibeta(a,b,c);
EXPECT_FLOAT_EQ(0.0272, f.val());
x = createAVEC(c);
f.grad(x,grad_f);
EXPECT_FLOAT_EQ(ibeta_derivative(a, b, c.val()),grad_f[0]);
}
struct ibeta_fun {
template <typename T0, typename T1, typename T2>
inline
typename stan::return_type<T0,T1,T2>::type
operator()(const T0& arg1,
const T1& arg2,
const T2& arg3) const {
return ibeta(arg1,arg2,arg3);
}
};
TEST(AgradRev,ibeta_NaN) {
ibeta_fun ibeta_;
test_nan(ibeta_,0.6,0.3,0.5,true,false);
}
TEST(AgradRev, check_varis_on_stack) {
AVAR a = 0.6;
AVAR b = 0.3;
AVAR c = 0.5;
test::check_varis_on_stack(stan::math::ibeta(a, b, c));
test::check_varis_on_stack(stan::math::ibeta(a, b, 0.5));
test::check_varis_on_stack(stan::math::ibeta(a, 0.3, c));
test::check_varis_on_stack(stan::math::ibeta(a, 0.3, 0.5));
test::check_varis_on_stack(stan::math::ibeta(0.6, b, c));
test::check_varis_on_stack(stan::math::ibeta(0.6, b, 0.5));
test::check_varis_on_stack(stan::math::ibeta(0.6, 0.3, c));
}
|
{"hexsha": "a46bac1f814593ca43c9899f84aaf34b0d816134", "size": 5480, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cmdstan/stan/lib/stan_math/test/unit/math/rev/scal/fun/ibeta_test.cpp", "max_stars_repo_name": "yizhang-cae/torsten", "max_stars_repo_head_hexsha": "dc82080ca032325040844cbabe81c9a2b5e046f9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cmdstan/stan/lib/stan_math/test/unit/math/rev/scal/fun/ibeta_test.cpp", "max_issues_repo_name": "yizhang-cae/torsten", "max_issues_repo_head_hexsha": "dc82080ca032325040844cbabe81c9a2b5e046f9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cmdstan/stan/lib/stan_math/test/unit/math/rev/scal/fun/ibeta_test.cpp", "max_forks_repo_name": "yizhang-cae/torsten", "max_forks_repo_head_hexsha": "dc82080ca032325040844cbabe81c9a2b5e046f9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.3191489362, "max_line_length": 73, "alphanum_fraction": 0.6326642336, "num_tokens": 2114}
|
// Copyright Rein Halbersma 2010-2020.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <core/board/group.hpp> // axioms::is_realized, make
#include <dctl/core/board/angle.hpp> // _deg, inverse, rotate
#include <boost/test/unit_test.hpp> // BOOST_AUTO_TEST_SUITE, BOOST_AUTO_TEST_SUITE_END, BOOST_AUTO_TEST_CASE, BOOST_CHECK
#include <algorithm> // all_of
#include <type_traits> // common_type
#include <vector> // vector
using namespace dctl::core;
using namespace literals;
BOOST_AUTO_TEST_SUITE(GroupCyclic)
BOOST_AUTO_TEST_CASE(GroupAxiomsAreRealizedOnCyclicGroups)
{
constexpr auto op = [](auto i, auto j) { return rotate(i, j); };
constexpr auto inv = [](auto i) { return inverse(i); };
auto const C1 = make_group(
{ 0_deg },
op, inv
);
auto const C2 = make_group(
{ 0_deg, 180_deg },
op, inv
);
auto const C4 = make_group(
{ 0_deg, 90_deg, 180_deg, 270_deg },
op, inv
);
auto const C8 = make_group(
{ 0_deg, 45_deg, 90_deg, 135_deg,
180_deg, 225_deg, 270_deg, 315_deg },
op, inv
);
using CyclicGroup = std::common_type_t<decltype(C1), decltype(C2), decltype(C4), decltype(C8)>;
auto const C_N = std::vector<CyclicGroup>
{
C1, C2, C4, C8
};
BOOST_CHECK(
std::all_of(C_N.begin(), C_N.end(), [](auto const& g) {
return group::axioms::is_realized(g);
})
);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "a374e5b8dc4a17ba63c62742fca3ad8a045645a4", "size": 1863, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/src/core/board/cyclic.cpp", "max_stars_repo_name": "sagarpant1/dctl", "max_stars_repo_head_hexsha": "b858fa139159eff73e8f3eec32da93ba077e0bd3", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/src/core/board/cyclic.cpp", "max_issues_repo_name": "sagarpant1/dctl", "max_issues_repo_head_hexsha": "b858fa139159eff73e8f3eec32da93ba077e0bd3", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/src/core/board/cyclic.cpp", "max_forks_repo_name": "sagarpant1/dctl", "max_forks_repo_head_hexsha": "b858fa139159eff73e8f3eec32da93ba077e0bd3", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-07-27T14:19:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-27T14:19:28.000Z", "avg_line_length": 31.5762711864, "max_line_length": 126, "alphanum_fraction": 0.5512614063, "num_tokens": 464}
|
import cv2
import sklearn
import numpy as np
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def generateZoom(imgs, labs, samples):
rawGen = []
labelGen = []
n = imgs.shape[0]
for j in range(n):
img = imgs[j]
lab = labs[j]
seed = np.random.randint(1000)
sampleimg = np.expand_dims(img, 0)
samplelab = np.expand_dims(lab, 0)
datagen = ImageDataGenerator(zoom_range = [0.5, 1.0])
itimg = datagen.flow(sampleimg, batch_size = 1, seed = seed)
itlabel = datagen.flow(samplelab, batch_size = 1, seed = seed)
for i in range(samples):
batchraw = itimg.next()
imageraw = batchraw[0].astype('uint8')
batchlabel = itlabel.next()
imagelabel = batchlabel[0].astype('uint8')
rawGen.append(imageraw)
labelGen.append(imagelabel)
rawGen = np.array(rawGen)
labelGen = np.array(labelGen)
return rawGen, labelGen
def shuffle(raw, label):
raw_shuffled, label_shuffled = sklearn.utils.shuffle(raw, label)
return raw_shuffled, label_shuffled
def flipInd(imgOrig):
img = imgOrig.reshape(imgOrig.shape[0], imgOrig.shape[1], 1)
data = img_to_array(img)
samples = np.expand_dims(data, 0)
datagen = ImageDataGenerator(horizontal_flip = True)
it = datagen.flow(samples, batch_size = 1)
while True:
batch = it.next()
image = batch[0].astype('uint8')
if not np.array_equal(data, image):
break
return image.reshape(imgOrig.shape)
def genFlip(img, lab):
imgGen = flipInd(img)
labGen = flipInd(lab)
return imgGen, labGen
def flip(raw, lab, save = False, show = False):
rawGen = np.zeros(raw.shape)
labGen = np.zeros(lab.shape)
for i in range(raw.shape[0]):
rawGen[i], labGen[i] = genFlip(raw[i], lab[i])
nameR = 'flip/flipRaw' + str(i) + '.jpg'
nameL = 'flip/flipLab' + str(i) + '.jpg'
if save:
cv2.imwrite(nameR, rawGen[i])
cv2.imwrite(nameL, labGen[i])
if show:
cv2.imshow(nameR, rawGen[i])
cv2.imshow(nameL, labGen[i])
cv2.waitKey()
return rawGen, labGen
def zoom(raw, lab, samples, save = False, show = False):
raw = raw.reshape(raw.shape[0], raw.shape[1], raw.shape[2], 1)
lab = lab.reshape(lab.shape[0], lab.shape[1], lab.shape[2], 1)
rawGen, labGen = generateZoom(raw, lab, samples)
for i in range(rawGen.shape[0]):
nameR = 'zoom/zoomRaw' + str(i) + '.jpg'
nameL = 'zoom/zoomLab' + str(i) + '.jpg'
if save:
cv2.imwrite(nameR, rawGen[i])
cv2.imwrite(nameL, labGen[i])
if show:
cv2.imshow(nameR, rawGen[i])
cv2.imshow(nameL, labGen[i])
cv2.waitKey()
return rawGen, labGen
def deNormalize(x_in, y_in, u = 128):
x = x_in.copy()
y = y_in.copy()
x = (x * (255/2)) + (255/2)
y = (y * (255/2)) + (255/2)
x[x < u] = 0
x[x >= u] = 1
y[y < u] = 0
y[y >= u] = 1
return x, y
|
{"hexsha": "10ea05219446687ca433d6733ac0c8eed4498ec5", "size": 3164, "ext": "py", "lang": "Python", "max_stars_repo_path": "helper.py", "max_stars_repo_name": "oliverquintana/UUr-cGAN", "max_stars_repo_head_hexsha": "769e6e6c72f91f67efe58b6d68a0c302f8db95bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-12T02:57:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T02:57:15.000Z", "max_issues_repo_path": "helper.py", "max_issues_repo_name": "oliverquintana/UUr-cGAN", "max_issues_repo_head_hexsha": "769e6e6c72f91f67efe58b6d68a0c302f8db95bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "helper.py", "max_forks_repo_name": "oliverquintana/UUr-cGAN", "max_forks_repo_head_hexsha": "769e6e6c72f91f67efe58b6d68a0c302f8db95bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1487603306, "max_line_length": 70, "alphanum_fraction": 0.5843868521, "include": true, "reason": "import numpy", "num_tokens": 910}
|
import pandas as pd
import numpy as np
file1 = '../data/STRIDE_PATIENT.xlsx'
x1 = pd.ExcelFile(file1)
stride_patient = x1.parse('Sheet1')
file2 = '../data//SURGERY.xlsx'
x2 = pd.ExcelFile(file2)
surgery = x2.parse('Sheet1')
stride_patient_req = stride_patient
pat_surgery = pd.merge(stride_patient_req, surgery, on='PAT_DEID', how='inner')
pat_surgery['BIRTH_DATE'] = pat_surgery['BIRTH_DATE'].str[0:7] + '19' + pat_surgery['BIRTH_DATE'].str[7:]
pat_surgery['SURGERY_DATE'] = pat_surgery['SURGERY_DATE'].str[0:7] + '20' + pat_surgery['SURGERY_DATE'].str[7:]
pat_surgery['BIRTH_DATE'] = pd.to_datetime(pat_surgery['BIRTH_DATE'])
pat_surgery['SURGERY_DATE'] = pd.to_datetime(pat_surgery['SURGERY_DATE'])
print(pat_surgery.dtypes)
pat_surgery['Difference'] = pat_surgery['SURGERY_DATE'].sub(pat_surgery['BIRTH_DATE'], axis=0)
pat_surgery['AGE AT SURGERY'] = pat_surgery['Difference'] / np.timedelta64(365, 'D')
pat_surgery['AGE AT SURGERY'] = pat_surgery['AGE AT SURGERY'].astype(int)
pat_surgery = pat_surgery.drop(['BIRTH_DATE', 'SURGERY_DATE', 'Difference'], axis=1)
print(pat_surgery.dtypes)
writer = pd.ExcelWriter('../data/PATIENT_FINAL.xlsx')
pat_surgery.to_excel(writer,'Sheet1')
writer.save()
|
{"hexsha": "25ef5f11521f66d9ef3f67033db2f87ab7da8078", "size": 1205, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/feature vector creation/step1_patient_demographics.py", "max_stars_repo_name": "arjun-parthi/SSRI-Project", "max_stars_repo_head_hexsha": "62f610a594e5849ccf0f3c25cd6adcd63888ec2a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-02-12T00:37:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-25T05:40:06.000Z", "max_issues_repo_path": "src/feature vector creation/step1_patient_demographics.py", "max_issues_repo_name": "arjun-parthi/SSRI-Project", "max_issues_repo_head_hexsha": "62f610a594e5849ccf0f3c25cd6adcd63888ec2a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/feature vector creation/step1_patient_demographics.py", "max_forks_repo_name": "arjun-parthi/SSRI-Project", "max_forks_repo_head_hexsha": "62f610a594e5849ccf0f3c25cd6adcd63888ec2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-25T05:40:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-25T05:40:17.000Z", "avg_line_length": 40.1666666667, "max_line_length": 111, "alphanum_fraction": 0.7460580913, "include": true, "reason": "import numpy", "num_tokens": 373}
|
/////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2021 Andreas Milton Maniotis.
//
// Email: andreas.maniotis@gmail.com
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
/////////////////////////////////////////////////////////////////////////////////////////////
#include "aml/find.hpp"
#include "aml/conslist.hpp"
#include <type_traits>
#include <iostream>
#include <boost/core/demangle.hpp>
namespace test::find
{
using l0 = aml::conslist<>;
using l1 = aml::conslist<void, int, char*>;
using l2 = aml::conslist<void, short int, char*, long int>;
template<typename X>
struct pred
{
static constexpr bool eval() { return std::is_integral<X>::value; };
};
using f0 = l0::apply<aml::find<pred>::in>;
using f1 = l1::apply<aml::find<pred>::in>;
using f2 = l2::apply<aml::find<pred>::in>;
using r0 = aml::conslist<>;
using r1 = aml::conslist<int>;
using r2 = aml::conslist<short int>;
void test()
{
static_assert(std::is_same<f0, r0>::value, "");
static_assert(std::is_same<f1, r1>::value, "");
static_assert(std::is_same<f2, r2>::value, "");
}
}
#include <iostream>
#include <string>
int main()
{
void (*test_set[])() = { test::find::test };
for ( auto test : test_set )
test();
std::cout << __FILE__ << ": " << sizeof(test_set)/sizeof(test_set[0]) << " tests passed." << std::endl;
}
|
{"hexsha": "844f8d298291fa4dbf95f0675c03e262011165d1", "size": 1584, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "deprecated-material/test/test_find.cpp", "max_stars_repo_name": "aandriko/libaml", "max_stars_repo_head_hexsha": "9db1a3ac13ef8160a33ed03e861be5d8cc8ea311", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deprecated-material/test/test_find.cpp", "max_issues_repo_name": "aandriko/libaml", "max_issues_repo_head_hexsha": "9db1a3ac13ef8160a33ed03e861be5d8cc8ea311", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deprecated-material/test/test_find.cpp", "max_forks_repo_name": "aandriko/libaml", "max_forks_repo_head_hexsha": "9db1a3ac13ef8160a33ed03e861be5d8cc8ea311", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.75, "max_line_length": 108, "alphanum_fraction": 0.5391414141, "num_tokens": 415}
|
<H1 style="text-align: center">ECMM426 - Computer Vision / ECMM441 - Machine Vision (Professional)</H1>
<H1 style="text-align: center"></H1>
<H2 style="text-align: center">Workshop 1</H2>
<H2 style="text-align: center">Image Processing</H2>
Simple examples of image processing concepts on OpenCV.
## Imports
```python
import urllib
import matplotlib, cv2
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (20.0, 10.0)
print('OpenCV version: {}'.format(cv2.__version__))
```
## Download Images
Download some images and prepare for reading
```python
import os
if not os.path.exists('images.zip'):
!wget --no-check-certificate https://empslocal.ex.ac.uk/people/staff/ad735/ECMM426/images.zip
!unzip -q images.zip
```
## Image Data Structures in OpenCV
Color images have three channels: red, green and blue
```python
# read an image
img = cv2.imread('images/lena.png')
# show image format (basically a 3-d array of pixel color info, in BGR format)
print('Image shape: {}'.format(img.shape))
print('Image: {}'.format(img))
```
## Color Conversions
By default OpenCV loads images in BGR format.
```python
# show image with matplotlib
plt.imshow(img)
```
```python
# convert image to RGB color space
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# show image with matplotlib
plt.imshow(img)
```
```python
# convert image to grayscale
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
print('Image shape: {}'.format(gray_img.shape))
# grayscale image represented as a 2-d array
print(gray_img)
```
Gray images have single channel
```python
# plot the gray image, note the cmap parameter
plt.imshow(gray_img, cmap='gray')
```
## Average color of an image
```python
# find average per row
# np.average() takes in an axis argument which finds the average across that axis.
average_color_per_row = np.average(img, axis=0)
# find average across average per row
average_color = np.average(average_color_per_row, axis=0)
# convert back to uint8
average_color = np.uint8(average_color)
print(average_color)
```
```python
# create 100 x 100 pixel array with average color value
average_color_img = np.array([[average_color]*100]*100, np.uint8)
plt.imshow(average_color_img)
```
## Box Filtering
```python
img = cv2.cvtColor(cv2.imread('images/books.jpg'), cv2.COLOR_BGR2RGB)
plt.imshow(img)
```
```python
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
blur_img = cv2.blur(gray_img, (10, 10))
plt.subplot(1, 2, 1); plt.imshow(gray_img, cmap='gray')
plt.subplot(1, 2, 2); plt.imshow(blur_img, cmap='gray')
```
### Ringing Artifact
```python
# box filtering with 20x20 kernel
blur = cv2.blur(img, (20, 20))
plt.imshow(blur)
```
```python
gray_img = cv2.imread('images/grass.png')
blur_img = cv2.blur(gray_img, (25, 25))
plt.figure(figsize=(16, 8))
plt.subplot(1, 2, 1); plt.imshow(gray_img, cmap='gray')
plt.subplot(1, 2, 2); plt.imshow(blur_img, cmap='gray')
```
## Gaussian Filtering
```python
img = cv2.cvtColor(cv2.imread('images/oy.jpg'), cv2.COLOR_BGR2RGB)
plt.imshow(img)
```
```python
# preproccess with blurring, with 5x5 kernel (note kernel size should be odd)
img_blur_small = cv2.GaussianBlur(img, (5, 5), 0)
plt.imshow(img_blur_small)
```
```python
img_blur_small = cv2.GaussianBlur(img, (5,5), 25)
plt.imshow(img_blur_small)
```
```python
img_blur_large = cv2.GaussianBlur(img, (15,15), 0)
plt.imshow(img_blur_large)
```
## Unsharp Masking or Sharpening
\begin{align}
I_\text{sharp} &= I_\text{original} + \alpha I_\text{detail}\\
&= I_\text{original} + \alpha I_\text{original} - \alpha I_\text{blurred}\\
&=(1+\alpha)I_\text{original} - \alpha I_\text{blurred}
\end{align}
```python
alpha = 0.5
img = cv2.cvtColor(cv2.imread('images/oy.jpg'), cv2.COLOR_BGR2RGB)
img_sharp = cv2.addWeighted(img, 1.0 + alpha, img_blur_large, -alpha, 0)
plt.subplot(1, 3, 1); plt.imshow(img); plt.title('Original');
plt.subplot(1, 3, 2); plt.imshow(img_blur_large); plt.title('Gaussian Blurred');
plt.subplot(1, 3, 3); plt.imshow(img_sharp); plt.title('Sharp');
```
## Median Filtering
First create the function for creating noisy images
```python
def add_sp_noise(image, amount=0.1):
row, col, ch = image.shape
s_vs_p = 0.5
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords[0], coords[1], coords[2]] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords[0], coords[1], coords[2]] = 0
return out
```
Load an image and apply salt and pepper noise and then try to smooth it with Gaussian and Median filter
```python
img = cv2.cvtColor(cv2.imread('images/coins.jpg'), cv2.COLOR_BGR2RGB)
noisy_img = add_sp_noise(img, amount=0.1)
img_gaus = cv2.GaussianBlur(noisy_img, (5, 5), 3)
img_med = cv2.medianBlur(noisy_img, 5)
plt.subplot(1, 4, 1); plt.imshow(img); plt.title('Original');
plt.subplot(1, 4, 2); plt.imshow(noisy_img); plt.title('Salt & Pepper Noise');
plt.subplot(1, 4, 3); plt.imshow(img_gaus); plt.title('Gaussian Filtered');
plt.subplot(1, 4, 4); plt.imshow(img_med); plt.title('Median Filtered');
```
## Image Gradient
Experiment with negative values. Note `cv2.CV_8U` cannot contain negative values.
```python
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('images/books.jpg', 0)
laplacian = cv2.Laplacian(img, cv2.CV_8U)
sobelx = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize=5)
sobely = cv2.Sobel(img, cv2.CV_8U, 0, 1, ksize=5)
plt.subplot(2,2,1),plt.imshow(img, cmap = 'gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,2),plt.imshow(laplacian, cmap = 'gray')
plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,3),plt.imshow(sobelx, cmap = 'gray')
plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,4),plt.imshow(sobely, cmap = 'gray')
plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
plt.show()
```
Note `cv2.CV_64F` can contain negative values.
```python
laplacian = cv2.Laplacian(img, cv2.CV_64F)
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5)
plt.subplot(2,2,1),plt.imshow(img, cmap = 'gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,2),plt.imshow(laplacian, cmap = 'gray')
plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,3),plt.imshow(sobelx, cmap = 'gray')
plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,4),plt.imshow(sobely, cmap = 'gray')
plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
plt.show()
```
## Edge Detection
Canny edge detector on OpenCV. Usage of edge detection versus thresholding to obtain binary image.
```python
cups = cv2.cvtColor(cv2.imread('images/cups.jpg'), cv2.COLOR_BGR2RGB)
plt.imshow(cups)
```
```python
# preprocess by blurring and grayscale
cups_preprocessed = cv2.cvtColor(cv2.GaussianBlur(cups, (7,7), 0), cv2.COLOR_RGB2GRAY)
```
```python
# find binary image with thresholding
low_thresh = 120
high_thresh = 200
_, cups_thresh = cv2.threshold(cups_preprocessed, low_thresh, 255, cv2.THRESH_BINARY)
plt.imshow(cv2.cvtColor(cups_thresh, cv2.COLOR_GRAY2RGB))
_, cups_thresh_hi = cv2.threshold(cups_preprocessed, high_thresh, 255, cv2.THRESH_BINARY)
```
```python
# find binary image with edges
cups_edges = cv2.Canny(cups_preprocessed, threshold1=90, threshold2=110)
plt.imshow(cv2.cvtColor(cups_edges, cv2.COLOR_GRAY2RGB))
```
## Binary Thresholding
Binarization converts an image to a two tone (0,255 or 0,1) image. Examples using thresholding on brightness/darkness of grayscale image and on color ranges
### On Grayscale Image
### Global Thresholding
```python
# threshold for grayscale image
img = cv2.imread('images/oy.jpg')
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold_img = cv2.threshold(gray_img, 127, 255, cv2.THRESH_BINARY)
plt.figure(figsize=(20,10))
plt.subplot(1, 2, 1); plt.imshow(gray_img, cmap='gray')
plt.subplot(1, 2, 2); plt.imshow(threshold_img, cmap='gray')
```
```python
#threshold on blurred image
gray_blur_img = cv2.cvtColor(img_blur_small, cv2.COLOR_BGR2GRAY)
_, threshold_img_blur = cv2.threshold(gray_blur_img, 100, 255, cv2.THRESH_BINARY)
plt.imshow(cv2.cvtColor(threshold_img_blur, cv2.COLOR_GRAY2RGB))
```
### Adaptive Thresholding
It is local thresholding where threshold is decided in local windows.
```python
# using adaptive threshold instead of global
adaptive_thresh = cv2.adaptiveThreshold(gray_img,255,\
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
plt.imshow(adaptive_thresh, cmap='gray')
```
### On Color Image
```python
# open new Mondrian Piet painting photo
piet = cv2.cvtColor(cv2.imread('images/piet.png'), cv2.COLOR_BGR2RGB)
piet_hsv = cv2.cvtColor(piet, cv2.COLOR_RGB2HSV)
plt.imshow(piet)
```
### Range thresholding
The HSV color space is quite similar to the way in which humans perceive color. Most of the other models define color in relation to the primary colors. The colors used in HSV can be clearly defined by human perception.
```python
# threshold for hue channel in blue range
blue_min = np.array([85, 60, 60], np.uint8)
blue_max = np.array([150, 255, 255], np.uint8)
threshold_blue_img = cv2.inRange(piet_hsv, blue_min, blue_max)
# show threshold bits
plt.imshow(threshold_blue_img, cmap='gray')
```
### Binary Thesholding and Image Masking
```python
upstate = cv2.cvtColor(cv2.imread('images/upstate-ny.jpg'), cv2.COLOR_BGR2RGB)
upstate_hsv = cv2.cvtColor(upstate, cv2.COLOR_RGB2HSV)
plt.imshow(upstate)
```
Note `bitwise_not` to filter out the blue sky
```python
mask_inverse = cv2.inRange(upstate_hsv, blue_min, blue_max)
mask = cv2.bitwise_not(mask_inverse)
plt.imshow(mask, cmap='gray')
```
Use the above mask to select the non sky part
```python
# convert single channel mask back into 3 channels
mask_rgb = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
# perform bitwise and on mask to obtain cut-out image that is not blue
masked_upstate = cv2.bitwise_and(upstate, mask_rgb)
# replace the cut-out parts with white
masked_replace_white = cv2.addWeighted(masked_upstate, 1, \
cv2.cvtColor(mask_inverse, cv2.COLOR_GRAY2RGB), 1, 0)
plt.imshow(masked_replace_white)
```
|
{"hexsha": "7dc6618979c6ac95f7e8993965f9309ae2843b1f", "size": 20160, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Notebooks/ECMM426_ECMM441_Image_Processing (1).ipynb", "max_stars_repo_name": "Aarif1430/monthly-challenges", "max_stars_repo_head_hexsha": "23990bb3e8930eeb9dc4308c73b43b77a6a3ab49", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-03T19:19:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-03T19:19:27.000Z", "max_issues_repo_path": "Notebooks/ECMM426_ECMM441_Image_Processing (1).ipynb", "max_issues_repo_name": "Aarif1430/monthly-challenges", "max_issues_repo_head_hexsha": "23990bb3e8930eeb9dc4308c73b43b77a6a3ab49", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Notebooks/ECMM426_ECMM441_Image_Processing (1).ipynb", "max_forks_repo_name": "Aarif1430/monthly-challenges", "max_forks_repo_head_hexsha": "23990bb3e8930eeb9dc4308c73b43b77a6a3ab49", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9196538937, "max_line_length": 225, "alphanum_fraction": 0.5534722222, "converted": true, "num_tokens": 3079}
|
(* This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
\:w
Some proofs were added by Yutaka Nagashima.*)
theory TIP_sort_BSortCount
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
fun x :: "'a list => 'a list => 'a list" where
"x (nil2) z = z"
| "x (cons2 z2 xs) z = cons2 z2 (x xs z)"
fun sort2 :: "int => int => int list" where
"sort2 y z =
(if y <= z then cons2 y (cons2 z (nil2)) else
cons2 z (cons2 y (nil2)))"
fun pairs :: "int list => int list => int list" where
"pairs (nil2) z = z"
| "pairs (cons2 z2 x2) (nil2) = cons2 z2 x2"
| "pairs (cons2 z2 x2) (cons2 x3 x4) =
x (sort2 z2 x3) (pairs x2 x4)"
fun stitch :: "int list => int list => int list" where
"stitch (nil2) z = z"
| "stitch (cons2 z2 xs) z = cons2 z2 (pairs xs z)"
function evens :: "'a list => 'a list"
and odds :: "'a list => 'a list" where
"evens (nil2) = nil2"
| "evens (cons2 z xs) = cons2 z (odds xs)"
| "odds (nil2) = nil2"
| "odds (cons2 z xs) = evens xs"
by pat_completeness auto
fun count :: "'a => 'a list => int" where
"count y (nil2) = 0"
| "count y (cons2 z2 ys) =
(if (y = z2) then 1 + (count y ys) else count y ys)"
(*fun did not finish the proof*)
function bmerge :: "int list => int list => int list" where
"bmerge (nil2) z = nil2"
| "bmerge (cons2 z2 x2) (nil2) = cons2 z2 x2"
| "bmerge (cons2 z2 x2) (cons2 x3 x4) =
(let fail :: int list =
stitch
(bmerge (evens (cons2 z2 x2)) (evens (cons2 x3 x4)))
(bmerge (odds (cons2 z2 x2)) (odds (cons2 x3 x4)))
in (case x2 of
nil2 =>
(case x4 of
nil2 => sort2 z2 x3
| cons2 x5 x6 => fail)
| cons2 x7 x8 => fail))"
by pat_completeness auto
(*fun did not finish the proof*)
function bsort :: "int list => int list" where
"bsort (nil2) = nil2"
| "bsort (cons2 z (nil2)) = cons2 z (nil2)"
| "bsort (cons2 z (cons2 x2 x3)) =
bmerge
(bsort (evens (cons2 z (cons2 x2 x3))))
(bsort (odds (cons2 z (cons2 x2 x3))))"
by pat_completeness auto
theorem property0 :
"((count y (bsort xs)) = (count y xs))"
oops
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/UR/TIP_with_Proof/TIP15/TIP15/TIP_sort_BSortCount.thy"}
|
! mpiexec -np 4 bin/Debug/WGFM_test2.exe
program matrices_mpi_test
use meshread_mod
use linalg_mod
use WGFM_matrices
use WGFM_matrices_mpi
use data_tools
use mpi
implicit none
complex(8), parameter :: IU = (0d0, 1d0)
! Variables
type(Mesh) :: msh
integer, parameter :: nNS = 6 ! number of quadrature points for EFIE
complex(8), allocatable :: Z1(:, :)
complex(8), allocatable :: Z2(:, :)
complex(8), allocatable :: S1Pot(:, :, :), D1Pot(:, :, :), S2Pot(:, :, :), D2Pot(:, :, :)
complex(8), allocatable :: S1Pot_m(:, :, :), D1Pot_m(:, :, :), S2Pot_m(:, :, :), D2Pot_m(:, :, :)
real(8), allocatable :: EvlMsh(:,:)
! MPI variables
integer :: ierr, N_procs, id
! Parameters
real(8), parameter :: ep0 = 1d0 / (35950207149.4727056d0 * PI) ! vacuum permittivity [F/m]
real(8), parameter :: mu0 = PI * 4d-7 ! vacuum permeability [H/m]
real(8), parameter :: k = 0.1d0 * PI ! free-space wavenumber [1/m]
real(8), parameter :: ep1 = ep0 ! upper half-space permittivity [F/m]
real(8), parameter :: mu1 = mu0 ! upper half-space permeability [H/m]
real(8), parameter :: ep2 = ep0 * 2.5d0 ! lower half-space permittivity [F/m]
real(8), parameter :: mu2 = mu0 * 5.3 ! lower half-space permeability [H/m]
real(8), parameter :: w = k / sqrt(ep0 * mu0) ! angular frequency [rad/s]
real(8), parameter :: k1 = w * sqrt(ep1 * mu1) ! upper half-space wavenumber [1/m]
real(8), parameter :: k2 = w * sqrt(ep2 * mu2) ! lower half-space wavenumber [1/m]
character(len=100) :: file_msh = 'meshes/half_sphere_plane/half_sphere_plane_a5_h04_t20.msh'
character(len=100) :: file_evl_msh = 'meshes/eval_msh/circle_msh.txt'
!character(len=100) :: file_msh = 'meshes/convergence_sphere/simple_mesh.msh'
! Set window (just to test the program)
call set_circ_window(Lr=4d0, c_in=0.7d0)
! MPI init
call mpi_init(ierr)
call mpi_comm_size(MPI_COMM_WORLD, N_procs, ierr)
call mpi_comm_rank(MPI_COMM_WORLD, id, ierr)
print *, "Total processes:", N_procs, "Process id:", id
! Load mesh
call load_gmsh(file_msh, msh)
! Compute WGFM matrices
if (id == 0) print *, "WGFM1"
call genWGFMMat(msh, w, k1, k2, ep1, ep2, mu1, mu2, Z1)
if (id == 0) print *, "WGFM2"
call genWGFMMat_mpi(msh, w, k1, k2, ep1, ep2, mu1, mu2, Z2)
! Retrieve evaluation points
call load_matlab_mesh(file_evl_msh, EvlMsh)
! Compute WGFM potencial
if (id == 0) print *, "WGFM Pot1"
call genWGFMPot(msh, EvlMsh, k1, k2, S1Pot, D1Pot, S2Pot, D2Pot)
if (id == 0) print *, "WGFM Pot2"
call genWGFMPot_mpi(msh, EvlMsh, k1, k2, S1Pot_m, D1Pot_m, S2Pot_m, D2Pot_m)
if (id == 0) then
! Compute matrices norm
print *, "shape", shape(Z1)
print *, "k", k
print *, "w", w
print *,
print *, "Z", sqrt(sum(abs((Z1-Z2)) ** 2))
print *, "S1", sqrt(sum(abs((S1Pot-S1Pot_m)) ** 2))
print *, "D1", sqrt(sum(abs((D1Pot-D1Pot_m)) ** 2))
print *, "S2", sqrt(sum(abs((S2Pot-S2Pot_m)) ** 2))
print *, "D2", sqrt(sum(abs((D2Pot-D2Pot_m)) ** 2))
end if
call MPI_Finalize(ierr)
end program
|
{"hexsha": "143774fb5f74f913edd5af0ad4cb7f20e21b0a44", "size": 3290, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "script/matrices_test/matrices_mpi_test.f90", "max_stars_repo_name": "Riarrieta/WindowedGreenFunctionMethod", "max_stars_repo_head_hexsha": "e6f965cac1acd8d9922e2bf94c33e3577ceb0dc9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "script/matrices_test/matrices_mpi_test.f90", "max_issues_repo_name": "Riarrieta/WindowedGreenFunctionMethod", "max_issues_repo_head_hexsha": "e6f965cac1acd8d9922e2bf94c33e3577ceb0dc9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "script/matrices_test/matrices_mpi_test.f90", "max_forks_repo_name": "Riarrieta/WindowedGreenFunctionMethod", "max_forks_repo_head_hexsha": "e6f965cac1acd8d9922e2bf94c33e3577ceb0dc9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7058823529, "max_line_length": 101, "alphanum_fraction": 0.5917933131, "num_tokens": 1133}
|
import time
import numpy as np
import pandas as pd
try:
import MySQLdb
except:
import pymysql
try:
db = MySQLdb.Connect(host="10.20.212.172", user="varientdb", passwd="varient2017", db="varientDB_new")
except:
db = pymysql.connect(host="10.20.212.172", user="varientdb", passwd="varient2017", db="varientDB_new")
cursor = db.cursor()
def fix_illegal_char(index_dict):
new_index_dict = {}
new_index_list = []
for index in index_dict:
index_backup = index
index = index.replace(' ','_')
index = index.replace('-','_')
index = index.replace('+','plus')
new_index_dict[index] = index_dict[index_backup]
#new_index_list.append(index)
return new_index_dict
def get_index_cols_list(f_base,split_mark='\t',clean_col = True):
f = open(f_base,'r')
l = f.readline()
new_index_list = []
temp = l[:-1].split(split_mark)
if clean_col:
for index in temp:
index = index.replace(' ','_')
index = index.replace('-','_')
index = index.replace('+','plus')
new_index_list.append(index)
else:
for index in temp:
new_index_list.append(index)
return new_index_list
def write_info_into_database(base_file,index_dict,index_list,dbname):
sql_col = ''
for key in index_list:
l = ' `' + key + '`,'
sql_col += l
#print(sql_col)
f = open(base_file,'r')
l = f.readline()
l = f.readline()
count = 0
while l:
temp = l[:-1].split('\t')
#temp_dict = {}
index_dict_temp = dict(zip(index_list , temp))
#sql = "INSERT INTO EMPLOYEE(FIRST_NAME, LAST_NAME, AGE, SEX, INCOME) VALUES ('Mac', 'Mohan', 20, 'M', 2000)"
#print(index_dict_temp)
sql_head = "INSERT INTO varientDB_new." + dbname + "(" + sql_col[:-1] + ") VALUES ("
new_value = ""
for key in index_list:
new_value += "'"
new_value += str(index_dict_temp[key])
new_value += "',"
sql = sql_head + new_value[:-1] + ');'
#print('sql query: ',sql)
#try:
cursor.execute(sql)
db.commit()
l = f.readline()
count +=1
if count % 1000 == 0:
print(str(count)+ ' varientDB_new.' + dbname +' lines write done')
print(str(int(time.time())))
#break
def get_model_print(index_dict,index_list,max_len=255):
#CDS_position = models.CharField(db_column='CDS_position', max_length=255, blank=True, null=True)
print('####################')
print('####################')
print('####################')
ls = []
for i in index_list:
l = i + " = models.CharField(db_column='" + i + "', max_length=" + str(max_len) +", blank=True, null=True)"
ls.append(l)
print(l)
print('####################')
print('####################')
print('####################')
return ls
def check_maxi_length(base_file,split_mark='\t'):
train=pd.read_csv(base_file, sep=split_mark)
index_dict = get_index_for_cols(base_file)
more_than_200 = []
for i in index_dict:
#print(train[i].head())
df = train[i]
len_maxi = df.map(lambda x: len(str(x))).max()
if len_maxi > 200:
more_than_200.append(i+' : '+str(len_maxi))
print(i+' : '+str(len_maxi))
print('####################')
print('####################')
print('####################')
print('more than 200:')
for s in more_than_200:
print(s)
print('####################')
print('####################')
print('####################')
def cut_file(base_file,save_file,lines='all',cols='all',split_mark = '\t'):
if cols == 'all':
if lines != 'all':
f_r = open(base_file,'r')
f_s = open(save_file,'w')
l = f_r.readline()
f_s.write(l)
for i in range(lines):
l = f_r.readline()
f_s.write(l)
else:
f_r = open(base_file,'r')
f_s = open(save_file,'w')
l = f_r.readline()
while l:
f_s.write(l)
l = f_r.readline()
else:
if lines != 'all':
index_list = []
index = 0
f_r = open(base_file,'r')
f_s = open(save_file,'w')
l = f_r.readline()
temp = l[:-1].split(split_mark)
for col_name in temp:
if col_name in cols:
index_list.append(index)
index += 1
n_l = ''
for index in index_list:
n_l += temp[index]
n_l += split_mark
n_l = n_l[:-1]+ '\n'
f_s.write(n_l)
print(n_l)
for i in range(lines):
l = f_r.readline()
temp = l[:-1].split(split_mark)
n_l = ''
for index in index_list:
n_l += temp[index]
n_l += split_mark
n_l = n_l[:-1]+ '\n'
f_s.write(n_l)
else:
index_list = []
index = 0
f_r = open(base_file,'r')
f_s = open(save_file,'w')
l = f_r.readline()
temp = l[:-1].split(split_mark)
for col_name in temp:
if col_name in cols:
index_list.append(index)
index += 1
n_l = ''
for index in index_list:
n_l += temp[index]
n_l += split_mark
n_l = n_l[:-1]+ '\n'
f_s.write(n_l)
print(n_l)
l = f_r.readline()
while l:
temp = l[:-1].split(split_mark)
n_l = ''
for index in index_list:
n_l += temp[index]
n_l += split_mark
n_l = n_l[:-1]+ '\n'
f_s.write(n_l)
l = f_r.readline()
def get_index_for_cols(base_file,cols='all',split_mark = '\t'):
index_list = []
index_dict = {}
index = 0
f_r = open(base_file,'r')
l = f_r.readline()
temp = l[:-1].split(split_mark)
index_range = range(len(temp))
index_dict_temp = dict(zip(temp , index_range))
#print(index_dict_temp)
if cols != 'all':
for col_name in cols:
index_dict[col_name] = index_dict_temp[col_name]
return index_dict
else:
return index_dict_temp
def count_time(func):
def int_time(*args, **kwargs):
start_time = time.time()
func()
over_time = time.time()
total_time = over_time - start_time
print('Updater process running : %s s' % total_time)
return int_time
def main():
print('welcome')
if __name__ == '__main__':
main()
|
{"hexsha": "ab9399f8bf8239163d969035a1e7b49c09f8c050", "size": 7004, "ext": "py", "lang": "Python", "max_stars_repo_path": "cut_file.py", "max_stars_repo_name": "luyu103713/variantDB_updater", "max_stars_repo_head_hexsha": "53cb390a63e7a392f628c566a87269dafdeeec29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cut_file.py", "max_issues_repo_name": "luyu103713/variantDB_updater", "max_issues_repo_head_hexsha": "53cb390a63e7a392f628c566a87269dafdeeec29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cut_file.py", "max_forks_repo_name": "luyu103713/variantDB_updater", "max_forks_repo_head_hexsha": "53cb390a63e7a392f628c566a87269dafdeeec29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7049180328, "max_line_length": 117, "alphanum_fraction": 0.4842946887, "include": true, "reason": "import numpy", "num_tokens": 1690}
|
import numpy as np
def vindex(v):
if len(v.shape) > 2:
return len(v.shape) - 2
return 0
def vncomp(v):
return v.shape[vindex(v)]
def vcomplimit(v, n):
"""
Return a stack of vectors with the same shape as the input stack, but only
including the first n vector components.
:param v: input vector. Must have at least n components
:param n: Number of vector components to keep
:return:
"""
if vindex(v) == 0:
return v[:n, ...]
else:
return v[..., :n, :]
def vdot(a, b, array=False):
"""
Dot product of two vectors or stack of vectors
:param a: (nD stack of) Vector(s) for first dot product operand
:param b: (nD stack of) Vector(s) for second dot product operand
:param array: If true and passed a single vector for each operand, return a
numpy 1D array result. Otherwise you will get a scalar result if you pass
in single vector operands. No effect if passed stacks as either operand
:return: Dot product(s) of inputs
This uses numpy broadcasting to calculate the result, so the operands do not
have to be the same size, just broadcast-compatible. In this case, the result
may be larger than either input.
If one input has more components than the other, the result will be equivalent
to the result of the shorter input having the same number of components, all
of which are zero. Equivalently, the result is equivalent to the longer input
being truncated to match the length of the shorter input. Note that this only
applies to the vector component -- all other axes of a stack are subject to
numpy broadcast rules.
"""
n = np.min((vncomp(a), vncomp(b)))
c = vcomplimit(a, n) * vcomplimit(b, n)
result = np.sum(c, axis=vindex(c))
if result.size == 1 and not array:
result = result.ravel()[0]
if np.isscalar(result) and array:
result = np.array([result])
return result
def vlength(v, array=False):
"""
Compute the length of a vector as the square root of the vector's dot product with itself
:param v: (Stack of) Vector(s) to compute the length of
:param array: Passed to vdot
If true and passed a single vector, return a numpy 1D array result. Otherwise you will get a scalar result if you pass a single vector.
If true and passed a stack of vectors, result will have the shape of a stack of 1D vectors.
IE if you pass a vector of shape (3072,3,4096), the answer will have shape (3072,1,4096)
"""
return np.sqrt(vdot(v, v, array))
def vangle(a, b, array=False):
"""
Compute the angle between two vectors
:param a: (stack of) first vector operand(s)
:param b: (stack of) second vector operand(s)
:param array: Passed to vdot and vlength
:return: Angle(s) between two (stacks of) vectors in radians
Note - using true real numbers, it is impossible for the dot product to be
greater than the product of the input vector lengths, and therefore
impossible for the argument to arccos to be outside of [-1,1].
However, it can and does happen with limited-precision floating
point numbers. This has been observed operationally, and must be
accounted for. It is assumed but not checked that if the argument
is out of range, it is only out by a small amount.
"""
arg=vdot(a, b, array) / vlength(a, array) / vlength(b, array)
try:
arg[np.where(arg<-1)]=-1
arg[np.where(arg> 1)]= 1
except TypeError:
if arg<-1:
arg=-1
if arg> 1:
arg= 1
return np.arccos(arg)
def vcomp(comps):
"""
Compose stacks of vector components into a single stack of vectors (inverse of vdecomp())
:param comps: Iterable of components. All components must be the same size. m will be length of comps
unless adjusted by l, minlen, or maxlen below.
:return: nD stack of m-element vectors
"""
try:
if len(comps[0].shape) >= 2:
axis = -2
elif len(comps[0].shape)==0:
return np.stack([np.array([x]) for x in comps],axis=0)
else:
axis = 0
except AttributeError:
#This case handles things that aren't already numpy arrays, and is triggered by
#any of the input comps not having a .shape attribute.
comps=[np.array([x]) for x in comps]
axis=0
return np.stack(np.broadcast_arrays(*comps), axis=axis)
def vdecomp(v, m=None, minlen=None, maxlen=None):
"""
Decompose a vector into components. an nD stack of m-element vectors will return a tuple with up to m elements,
each of which will be an nD stack of scalars
:param v: nD stack of m-element vectors, a numpy (n+1)D array with shape
(n_stack0,n_stack1,...,n_stackn-2,m,n_stackn-1)
:param minlen: If passed, this will pad out the returned vector components with zero scalars
such that the returned tuple has minlen components. We do zero scalars rather than zero arrays
of the same size as the other components to save memory, since a scalar is compatible by
broadcasting with an array of any size.
:param maxlen: If passed, this will restrict the returned vector components to the given
size, even if the input vector has more components.
:param m: If passed, treat the input as if it were an nD stack of m-element vectors. If the actual
stack has more components, don't return them. If it has less, return scalar zeros for the
missing components
:return: A tuple. Each element is a vector component. Vector components pulled from the vector will be
an nD stack of scalars, a numpy nD array with shape (n_stack0,n_stack1,...,n_stackn-2,n_stackn-1).
Vector components which are made up will be scalar zeros.
Note: If you pass maxlen<minlen, the result is still well-defined, since the maxlen is used first,
then the minlen. If you pass a vector with m=4, a minlen of 7, and a maxlen of 2, you will get
a result with the first two components of the vector, followed by 5 zeros. I'm not sure if this
is useful, but there it is.
Example:
v=np.zeros((24,3,50)) #Suitable for holding multiple trajectories
#OR
v0=np.zeros((3,50)) #Initial conditions for 50 trajectories
t=np.arange(24) #Time steps
v=rk4(x0=v0,t=t) #Numerically integrate multiple trajectories. Result shape will be (t.size,)+v0.shape,
#IE (24,3,50)
x,y,z=vcomp(v) #after this, x, y, and z are each numpy arrays of shape (24,50)
"""
if maxlen is None and m is not None:
maxlen = m
if minlen is None and m is not None:
minlen = m
ndStack = len(v.shape) > 2
efflen = v.shape[-2 if ndStack else 0]
if maxlen is not None and maxlen < efflen:
efflen = maxlen
result = tuple([v[..., i, :] if ndStack else v[i, ...] for i in range(efflen)])
if minlen is not None and minlen > efflen:
result = result + (0,) * (minlen - efflen)
return result
def vcross(a, b):
"""
Compute the three-dimensional cross-product of two vectors or stack of vectors
:param a: (nD stack of) Vector(s) for first cross product operand
:param b: (nD stack of) Vector(s) for second cross product operand
:return: Cross product(s) of inputs
This uses numpy broadcasting to calculate the result, so the operands do not
have to be the same size, just broadcast-compatible. In this case, the result
may be larger than either input.
If either of the input vectors have fewer than three components, the extra components
are made up and assumed to be zero. If either input has more than three components,
the extra components are ignored. The result will always have three components.
"""
(ax, ay, az) = vdecomp(a, m=3)
(bx, by, bz) = vdecomp(b, m=3)
cx = ay * bz - az * by
cy = az * bx - ax * bz
cz = ax * by - ay * bx
return vcomp((cx,cy,cz))
#Generic functions, usable in this or other projects. No access to global state
def vnormalize(a):
"""
Calculate the unit vector in a given direction
:param a: vector to get direction from
:return: unit vector in given direction
"""
return a/vlength(a)
def vncross(a, b):
"""
Normalized cross product
:param a: First cross product factor
:param b: Second cross product factor
:return: Unit vector in same direction as vcross(a,b)
"""
return vnormalize(vcross(a, b))
def vforce_proj(a,b,d):
"""
Force a vector to have a given length projection with another vector.
Calculate a scalar constant k such that dot(k*a,b)=d and return k*a
:param a: Vector to scale
:param b: Vector to use as reference
:param d: Required dot product
:return: A vector k*a with the same direction as a which has the
given dot product with b
"""
# Since the dot product is linear:
#vdot(k*a,b)=d
#k*vdot(a,b)=d
k=d/vdot(a,b)
#Stretch k to be broadcastable with a
try:
return k[...,np.newaxis,:]*a
except:
return k*a
def rv(sv):
"""
Position part of state vector
:param sv: Stack of state vectors, can be one in stack IE column vector
:return: Position part, will be stack matching sv
"""
return sv[:3,:]
def vv(sv):
"""
Velocity part of state vector
:param sv: Stack of state vectors, can be one in stack IE column vector
:return: Position part, will be stack matching sv
"""
return sv[3:,:]
|
{"hexsha": "30d9f6812489bbb143a2a58c1c8e8a8ced226370", "size": 9683, "ext": "py", "lang": "Python", "max_stars_repo_path": "kwanmath/vector.py", "max_stars_repo_name": "kwan3217/kwanmath", "max_stars_repo_head_hexsha": "c43f8209324cdb0c673b969b41b06d49c9d46e71", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kwanmath/vector.py", "max_issues_repo_name": "kwan3217/kwanmath", "max_issues_repo_head_hexsha": "c43f8209324cdb0c673b969b41b06d49c9d46e71", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kwanmath/vector.py", "max_forks_repo_name": "kwan3217/kwanmath", "max_forks_repo_head_hexsha": "c43f8209324cdb0c673b969b41b06d49c9d46e71", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.577689243, "max_line_length": 143, "alphanum_fraction": 0.6530001033, "include": true, "reason": "import numpy", "num_tokens": 2500}
|
[STATEMENT]
lemma lms_contains_aref: "(list_contains, op_mset_contains) \<in> Id \<rightarrow> list_mset_rel \<rightarrow> bool_rel"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (list_contains, op_mset_contains) \<in> Id \<rightarrow> list_mset_rel \<rightarrow> bool_rel
[PROOF STEP]
unfolding list_mset_rel_def list_contains_def[abs_def]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. list_ex ((=) x), op_mset_contains) \<in> Id \<rightarrow> br mset (\<lambda>_. True) \<rightarrow> bool_rel
[PROOF STEP]
by (auto simp: in_br_conv list_ex_iff in_multiset_in_set)
|
{"llama_tokens": 231, "file": "Refine_Imperative_HOL_IICF_Impl_IICF_List_Mset", "length": 2}
|
% mnras_template.tex
%
% LaTeX template for creating an MNRAS paper
%
% v3.0 released 14 May 2015
% (version numbers match those of mnras.cls)
%
% Copyright (C) Royal Astronomical Society 2015
% Authors:
% Keith T. Smith (Royal Astronomical Society)
% Change log
%
% v3.0 May 2015
% Renamed to match the new package name
% Version number matches mnras.cls
% A few minor tweaks to wording
% v1.0 September 2013
% Beta testing only - never publicly released
% First version: a simple (ish) template for creating an MNRAS paper
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Basic setup. Most papers should leave these options alone.
\documentclass[fleqn,usenatbib]{mnras}
% MNRAS is set in Times font. If you don't have this installed (most LaTeX
% installations will be fine) or prefer the old Computer Modern fonts, comment
% out the following line
\usepackage{newtxtext,newtxmath}
% Depending on your LaTeX fonts installation, you might get better results with one of these:
%\usepackage{mathptmx}
%\usepackage{txfonts}
% Use vector fonts, so it zooms properly in on-screen viewing software
% Don't change these lines unless you know what you are doing
\usepackage[T1]{fontenc}
% Allow "Thomas van Noord" and "Simon de Laguarde" and alike to be sorted by "N" and "L" etc. in the bibliography.
% Write the name in the bibliography as "\VAN{Noord}{Van}{van} Noord, Thomas"
\DeclareRobustCommand{\VAN}[3]{#2}
\let\VANthebibliography\thebibliography
\def\thebibliography{\DeclareRobustCommand{\VAN}[3]{##3}\VANthebibliography}
%%%%% AUTHORS - PLACE YOUR OWN PACKAGES HERE %%%%%
% Only include extra packages if you really need them. Common packages are:
\usepackage{graphicx} % Including figure files
\usepackage{amsmath} % Advanced maths commands
\usepackage{amssymb} % Extra maths symbols
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% AUTHORS - PLACE YOUR OWN COMMANDS HERE %%%%%
% Please keep new commands to a minimum, and use \newcommand not \def to avoid
% overwriting existing commands. Example:
%\newcommand{\pcm}{\,cm$^{-2}$} % per cm-squared
\newcommand{\Rwd}{\mbox{$R_{\mathrm{wd}}$}}
\newcommand{\Mwd}{\mbox{$M_{\mathrm{wd}}$}}
\newcommand{\Msun}{\mbox{$\mathrm{M}_{\odot}$}}
\newcommand{\Rsun}{\mbox{$\mathrm{R}_{\odot}$}}
\newcommand{\bz}{\ensuremath{\langle B_z \rangle}}
\newcommand{\bs}{\ensuremath{\langle \vert B \vert \rangle}}
\newcommand{\sz}{\ensuremath{\sigma_{\langle B_z \rangle}}}
\newcommand{\snr}{\ensuremath{S/N}}
\newcommand{\vsini}{\ensuremath{v\,\sin\,i}}
\newcommand{\kms}{km\,s$^{-1}$}
\newcommand{\Ion}[2]{#1{\,\textsc{#2}}}
\newcommand{\Teff}{\mbox{$T_{\mathrm{eff}}$}}
\newcommand{\logg}{\mbox{$\log g$}}
\usepackage[usenames,dvipsnames]{xcolor}
\newcommand{\bgc}[1]{\textcolor{orange}{[#1]}}
\newcommand{\bgs}[1]{\textcolor{orange}{#1}}
\newcommand{\grout}[1]{\textcolor{lightgray}{#1}}
\newcommand{\jlc}[1]{\textcolor{red}{[#1]}}
\newcommand{\ot}[1]{\textcolor{magenta}{[#1]}}
\newcommand{\dwc}[1]{\textcolor{green}{[#1]}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%% TITLE PAGE %%%%%%%%%%%%%%%%%%%
% Title of the paper, and the short title which is used in the headers.
% Keep the title short and informative.
\title[A pre-intermediate polar]{Discovery of a young pre-intermediate polar}
% The list of authors, and the short list which is used in the headers.
% If you need two or more lines of authors, add an extra line using \newauthor
\author[Wilson et al.]{David J. Wilson$^{1}$\thanks{djwilson394@gmail.com}, Odette Toloza$^{2,3}$, John D. Landstreet$^{4,5}$,
Boris T. G{\"a}nsicke$^{2}$, \newauthor Jeremy J. Drake$^{5}$, J. J. Hermes$^{7}$, Detlev Koester$^{8}$ \medskip\\
$^{1}$ McDonald Observatory, University of Texas at Austin, 2515 Speedway, C1402, Austin, TX 78712, USA \\
$^{2}$ Department of Physics, University of Warwick, Coventry CV4 7AL, UK \\
$^{3}$ Departamento de Física, Universidad Técnica Federico Santa María, Avenida España 1680, Valparaíso, Chile\\
$^{4}$ Armagh Observatory \& Planetarium, Armagh, BT61 9DG, Northern Ireland, and \\
$^{5}$ Department of Physics \& Astronomy, University of Western Ontario, London, ON N6G 1P7 \\
$^{6}$ Center for Astrophysics | Harvard \& Smithsonian, 60 Garden Street, Cambridge, MA 02138, USA\\
$^{7}$ Department of Astronomy, Boston University, 725 Commonwealth Ave., Boston, MA 02215, USA\\
$^{8}$Institut f{\"u}r Theoretische Physik und Astrophysik, University of Kiel, 24098 Kiel, Germany
}
% These dates will be filled out by the publisher
\date{Accepted XXX. Received YYY; in original form ZZZ}
% Enter the current year, for the copyright statements etc.
\pubyear{2021}
% Don't change these lines
\begin{document}
\label{firstpage}
\pagerange{\pageref{firstpage}--\pageref{lastpage}}
\maketitle
% Abstract of the paper
\begin{abstract}
We present the discovery of a magnetic field on the white dwarf component in the detached post common envelope binary (PCEB) CC\,Cet. Magnetic white dwarfs in detached PCEBs are extremely rare, in contrast to the high incidence of magnetism in single white dwarfs and cataclysmic variables. We find Zeeman-split absorption lines in both ultraviolet \textit{Hubble Space Telescope} (\textit{HST}) spectra and archival optical spectra of CC\,Cet. Model fits to the lines return a mean magnetic field strength of \bs\ $\approx$ 600--700\,kG. Differences in the best-fit magnetic field strength between two separate \textit{HST} observations and the high \vsini\ of the lines indicate that the white dwarf is rotating with a period $\sim0.5$\,hours, and that the magnetic field is not axisymmetric about the spin axis. The magnetic field strength and rotation period are consistent with those observed among the intermediate polar class of cataclysmic variable, and we compute stellar evolution models that predict CC\,Cet will evolve into an intermediate polar in 7--17\,Gyr. Among the small number of known PCEBs containing a magnetic white dwarf, CC\,Cet is by far the hottest \bgc{umm... not if the white dwarf in V471\,Tau is magnetic (but see below, no field measurement available for V471\,Tau)} (and thus youngest), and has by far the weakest field, and cannot have formed via the recently proposed crystallisation/spin-up scenario. In addition to the magnetic field measurements, we update the atmospheric parameters of the CC\,Cet white dwarf via model spectra fits to the COS data and provide a refined orbital period and ephemeris from \textit{TESS} photometry.
\end{abstract}
% Select between one and six entries from the list of approved keywords.
% Don't make up new ones.
\begin{keywords}
binaries: close -- stars: magnetic field -- white dwarfs -- stars: individual: CC\,Cet
\end{keywords}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%% BODY OF PAPER %%%%%%%%%%%%%%%%%%
%bgc{there is one odd exception: really wide (=spatially resolved) white dwarf plus white dwarf binaries, e.g. \href{https://ui.adsabs.harvard.edu/abs/2012MNRAS.421..202D}{Dobbie et al. (2012)}. In fact, from some very crude stats that I've seen, it could be 25\% of wide WD-WD binaries have one magnetic component. Maybe we should be more specific, "detached post-common envelope binaries (PCEBs)"? }
\section{Introduction}
Post Common Envelope Binaries (PCEBs) are systems containing at least one evolved star, in which the initial separation was close enough that the secondary was engulfed by the expanding envelope of the primary as it passed through the giant stages of its evolution. After double-degenerates, the second most common type of PCEBs are white dwarfs plus main-sequence companions \citep{toonenetal17-1}. The vast majority of the known systems of this kind contain a white dwarf and an M~dwarf companion \citep{rebassa-mansergasetal10-1}, although this is a selection effect as identification requires the white dwarf to be detectable against its main-sequence companion \citep{inightetal21-1}. For the remainder of this paper, we will use PCEB as synonymous for a close, detached binary consisting of a white dwarf and a main sequence companion that formed through a common envelope. The common envelope dramatically shrinks the binary separation, leaving most PCEBs with orbital periods of $\approx$\,0.1--5\,d \citep{nebotetal11-1}. After emerging from the common envelope, PCEBs lose angular momentum via gravitational radiation \citep{paczynski+sienkiewicz81-1}, and, if the main-sequence component possesses a convective envelope, magnetic wind braking \citep{rappaportetal83-1}. Consequently, the orbital separation decreases, eventually bringing the system into a semi-detached configuration, starting Roche-lobe overflow mass transfer from the companion onto the white dwarf~--~at this point, the PCEB will have evolved into a cataclysmic variable (CV).
%\grout{From their initial post common envelope orbits, such binaries lose angular momentum to gravitational wave radiation, decreasing in orbital period and thus separation until the secondary star overflows its Roche radius, starting mass transfer onto the white dwarf primary and forming a Cataclysmic Variable (CV).}
%\grout{Although many different configurations exist, by far the most common PCEBs consist of a white dwarf primary and an M~dwarf secondary \citep{rebassa-mansergasetal10-1}.}
Because these are stages along an evolutionary path, fundamental physical characteristics that are not expected to be affected by age or the mass transfer process should have the same distributions in both the PCEB and CV populations. This turns out not to be the case. Most prominently, the occurrence rate of white dwarfs with detectable magnetic fields is hugely discrepant between the two populations \citep{liebertetal05-2}.
In a volume-limited sample of 42 CVs, \citet{palaetal20-1} found that $36\pm7$\,per\,cent of the white dwarf primaries had magnetic field strengths $\gtrsim 1$\,MG. Magnetic CVs are divided into polars with $B \gtrsim 10$\,MG, where material from the secondary is accreted directly onto the poles of the white dwarf along the magnetic field lines, and intermediate polars with $0.1 \la B \lesssim 10$\,MG, in which an accretion disc is formed but truncated at the magnetospheric radius. Whereas the white dwarf spin periods of polars are locked to the binary period, the periods of intermediate polars are highly asynchronous, with some exhibiting spin periods of just a few tens of seconds \citep{patterson79-1,lopesdeoliveiraetal20-1}.
% \bgc{Not sure the next sentence is needed, may confuse more then anything else.} More complex structures such as rings and propellers can also be generated for certain combinations of magnetic field strength and white dwarf spin period \citep{nortonetal08-1}.
The lack of an accretion disc around polars allows the white dwarf photosphere to be detected, enabling robust characterisation of the white dwarf parameters via spectral fitting and of the magnetic field strength and structure via detection of Zeeman-split lines and/or cyclotron emission \citep[e.g.][]{schwope90-1, gaensickeetal04-1, ferrarioetal95-1}. Conversely, very few robust measurements of the characteristics and magnetic field strengths of white dwarfs in intermediate polars exist, as the white dwarf is typically outshone by the accretion flow and the fields are too low to be detected via cyclotron emission from the accretion regions on the white dwarf. Variable polarised emission has been detected in a few intermediate polars \citep[e.g. ][]{potter+buckley18-1}, providing loose constraints on their magnetic fields strengths consistent with the expected $1 \la B \lesssim 10$\,MG range. Detecting and characterising ''Pre-intermediate polars'', that is, white dwarfs with magnetic fields of $B \lesssim 10$\,MG in detached binaries that have yet to be begin mass transfer, would therefore provide a useful insight into the unobservable properties of the intermediate polar population, such as the magnetic field and white dwarf mass distribution.
However, in stark contrast to the CVs, magnetic white dwarfs in detached PCEBs are extremely rare. In a spectroscopic survey of over 1200 detached white dwarf plus M~dwarf binaries, \citet{silvestrietal07-1} found just two candidate magnetic white dwarfs (which to our knowledge have not yet been confirmed or refuted), while \citet{liebertetal15-1} found no magnetic white dwarfs in a sample of 1735 binaries. \bgc{Reading this, I wonder if Liebert / Silvestri had some selection effect, i.e. they probably \textit{excluded} systems with cyclotron bumps from their samples... maybe we could make this clear in the next paragraph? I added a corresponding sentence}
%Around 16 pre-polars,
%%% confusing if we call them here pre-polars, then later argue that they may be inactive CVs...
\bgs{There are, however, currently 16} PCEBs
with fields $ > 10$\,MG, known \citep{reimers+hagen00-1, reimersetal99-1, schmidtetal05-1, schmidtetal07-1, schwopeetal09-1, parsonsetal21-1}. \bgs{These systems were all identified because of the detection of either cyclotron or Balmer emission lines.} In the intermediate polar range, SDSS\,J030308.35+005444.1 \citep[$B = 8$\,MG,][]{parsonsetal13-1} is the only unambiguous detection. A second, ambiguous case is the prototype PCEB V471\,Tauri: A low S/N \bgc{define S/N upon first use} feature consistent with Zeeman splitting of the \ion{Si}{iii}\,1207.5\,\AA\ by a $\sim 350$\,kG field was found by \citet{sionetal98-2}, but extensive spectroscopic follow-up failed to detect Zeeman splitting at any other lines \citep{sionetal12-1}. \bgc{I find the structure here a bit confusing, as we talk about the 16 high-field systems, then mix in V471\,Tau, which may be magnetic, however, without a confirmation of the field strength, then J0303, then mention that all 17 systems may be dormant CVs ... which we pick up two paragraphs further down again (Recently, Schreiber). The next paragraph, field origin, is quite long/dense. Maybe this is a bit too much detail / confusing for the introduction? I'd suggest to focus on the observational statistics: \textit{confirmed}, i.e. with measured field strengths, there are 17 detached WD+dM binaries which all have in common that they have high ($\ga7$\,MG) fields and cool ($\la10\,000$\,K) white dwarfs, (maybe) a brief mention that there there has been subject to a lot of discussion about their actual nature ("LARP", low accretion rate polar vs. "PREP", pre-polar, e.g. \citet{vogeletal07-1}), and then highlight the fact that there is no \textit{confirmed} young, hot PCEB with a magnetic white dwarf. Band V471\,Tau to a footnote with the caveat that it may be magnetic, but the field strength could not be measured. Then cut down on detail on the field origin \& Matthias' paper, as we will pick this up later in the discussion. The introduction should just provide enough context that the reader gets excited about the discovery we present, not give a headache.} However, \citet{parsonsetal21-1} find that the main sequence companions of all 17 confirmed systems are close to overfilling their Roche lobes, implying that these systems may be dormant CVs which have already undergone a period of mass transfer, rather than true pre-polars. The same may also be true for V471\,Tauri \citep{obrienetal01-1, hussainetal06-1}.
The dearth of magnetic PCEBs requires a formation pathway, or pathways, that can efficiently produce magnetic white dwarfs in single systems \citep[with an occurrence rate of $\sim10$--20\,per\,cent,][]{kawkaetal07-1, hollandsetal15-1, holbergetal16-1}, wide, resolved binaries \citep[occurrence rate unconstrained but multiple systems known, e.g.][]{dobbieetal12-1}, and CVs, but that rarely generates fields in detached PCEBs. Several mechanisms have been suggested for magnetic field creation in white dwarfs. The fossil field hypothesis \citep{woltjer64-1, angeletal81-1, braithwaite+spruit04-1} suggests that the magnetic Ap and Bp stars represent the progenitors of magnetic white dwarfs, with their $\sim$kG strength fields enhanced to $\sim$MG fields by magnetic flux conservation as their radii shrink, although the space density of Ap/Bp stars is too low to account for all of the magnetic white dwarfs \citep{kawka+vennes04-1}. Alternatively the fields may be induced during the common envelope phase \citep{toutetal04-1, nordhausetal11-1}. \citet{wickramasingheetal14-1} and \citet{briggsetal18-1} propose a model in which interactions between the two stars and the envelope induce strong differential rotation in the primary, generating a dynamo that enhances a pre-existing weak field. The common envelope may end with the merger of the two objects, leaving an isolated magnetic white dwarf \citep{briggsetal15-1}, which would explain the higher average mass of magnetic white dwarfs compared with the population of non-magnetic white dwarfs \citep{ferrarioetal15-1}. However \citet{bellonietal20-1} found multiple issues with this pathway when compared with the observed population of PCEBs and CVs, in particular that the model produced too many magnetic white dwarfs, and that the predicted magnetic fields were too weak. However, the fossil field, common envelope and merger scenarios all imply that a white dwarf in a close binary emerges from the common envelope with its magnetic field in place, and therefore none of them explain the dearth of magnetic white dwarfs in detached binaries. \citet{iserneral17-1} propose instead that the magnetic field is generated during the crystallisation process as the white dwarf cools. This has the advantage of occurring well after the common envelope, but it occurs too late to explain the magnetic CVs.
Recently, \citet{schreiberetal21-1} proposed a scenario that more accurately reproduces the observed magnetic white dwarf population. In their model, non-magnetic PCEBs evolve into CVs, starting mass transfer via Roche Lobe overflow and spinning up the white dwarf. If the white dwarf core has begun to crystallize, the spin-up combines with convection in the core to produce a strong magnetic dynamo. As the white dwarf magnetic field becomes stronger, it couples with the magnetic field of the secondary, braking the white dwarf until it is synchronised with the orbital period. The resulting excess angular momentum is transferred to the secondary, causing it to detach from its Roche lobe. The system will then contain a cool magnetic white dwarf accreting the wind of a detached companion, i.e. matching the characteristics of the observed pre-polar systems. The orbit of the secondary will continue to decay via gravitational wave radiation and magnetic braking until it again fills its Roche lobe, resuming mass transfer and becoming a polar (or intermediate polar). Single magnetic white dwarfs, on the other hand, are produced via the fossil field or common envelope pathways described above, which require a stellar merger either on the main sequence (to form an Ap/Bp star) or during the common envelope. A PCEB containing a young magnetic white dwarf would have had to have started out as a triple system, where two stars merged to form the magnetic white dwarf, and then underwent a common envelope phase with the third star to form the PCEB. The fine-tuned initial configuration required (and triple systems being much rarer than binaries in general) explains the rarity, and hence the hitherto non-detection, of young magnetic white dwarfs in PCEBs. \bgc{see above, this paragraph feels too long for the introduction to "CC\,Cet has a magnetic white dwarf", basically a full summary of Matthias' paper ... which doesn't apply to CC\,Cet}
Here we present the first detection of a young magnetic white dwarf in a PCEB. CC\,Cet (PG 0308+096) was identified as a post common envelope binary by \citet{safferetal93-1} via radial velocity measurements of the H$_{\alpha}$ line, with a $\approx 6.9$\,h orbital period confirmed by \citet{somersetal96-2}. The system consists of a low-mass white dwarf ($\approx 0.4$\,\Msun) with effective temperature \Teff\,$\approx 25000$\,K and an M4.5--5\, dwarf secondary \citep{tappertetal07-2} at a distance of $121\pm 1$~pc \citep{gaia18-1} \bgc{update to EDR3?}. We have obtained ultraviolet spectroscopy of the white dwarf component, revealing Zeeman-split absorption lines induced by a 600--700\,kG magnetic field, indicating that CC\,Cet is a young, low-mass pre-intermediate polar.
The paper is arranged as follows: Section \ref{sec:obs} describes the observations of CC\,Cet; Section \ref{sec:models} presents our model fitting process to the spectra to measure the white dwarf atmospheric parameters and magnetic field characteristics; in Section \ref{sec:dis} we model the evolution of CC\,Cet and discuss the implications of our results for the study of magnetic white dwarfs in binaries. We conclude in Section \ref{sec:conc}.
\section{Observations}
\label{sec:obs}
\subsection{\textit{Hubble Space Telescope}}
\begin{figure*}
\centering
\includegraphics[width=2\columnwidth]{cc_cet_cos.pdf}
\caption{Full COS G130M spectrum of CC\,Cet obtained on 2018~February~01. The spectrum has been smoothed by a 5-point boxcar, matching the
%approximate
oversampling
%rate
of the COS detector. The best-fit model atmosphere spectrum is overplotted in orange. Rest wavelengths of the C and Si absorption lines discussed in the text are labeled, and interstellar absorption lines are marked with green dashed lines.}
\label{fig:cos_spec}
\end{figure*}
CC\,Cet was observed with the Cosmic Origins Spectrograph \citep[COS,][]{greenetal12-1} onboard the \textit{Hubble Space Telescope} (\textit{HST}) as part of program ID 15189 (PI Wilson). Observations were obtained on 2018~February~1 and 2018~July~22 with exposure times of 1865\,s each, using the G130M grating with a central wavelength of 1291\,\AA. We abbreviate these as the Feb\,18 and Jul\,18 \bgc{confusing use of the dates: 18 refers to 2018, not the day? Above we have YYYY MM DD. Also, we do later talk about the ``2018 February data'', e.g. Sect. 2.3, or the caption of Fig.\,1. Go through the paper, and make sure we refer to the two HST spectra in a consistent way. If we need a shorthand, I'd suggest 2018~Feb and 2018~Jul} spectra respectively throughout. The spectra were reduced using the standard \textsc{calcos} tools. Figure \ref{fig:cos_spec} shows the Feb\,18 spectrum with the absorption lines discussed below marked. Due to the combination of the radial velocity shifts induced by the binary orbit (see Section \ref{sec:rotation}) and the magnetic splitting discussed below, we did not attempt to coadd the two spectra, instead analysing each one separately.
Of the other targets observed in program 15189, we use LM\,Com as a comparison example of a non-magnetic white dwarf with a similar \Teff\ and surface gravity \logg\ to CC\,Cet in several plots below. LM\,Com was observed on 2017~December~17 with an exposure time of 1815\,s and otherwise the same details as the CC\,Cet observations.
%We produced light curves by using the \texttt{costools splittag} routine to split the time-tag files into 30\,s bins, from which spectra were extracted and integrated to obtain the flux time series.
%Mention the other stars here?
\subsection{\textit{TESS}}
\defcitealias{lightkurve18-1}{Lightkurve Collaboration, 2018}
%\begin{figure*}
% \centering
% \includegraphics[width=2\columnwidth]{cc_cet_tess.pdf}
% \caption{Left panel: \textit{TESS} FFI photometry of CC\,Cet with 30\,m cadence. Right panel: Lomb-Scargle periodogram of the \textit{TESS} light curve, clearly showing the binary orbital period. The precise period and amplitude were measured via a sinusoidal model fit to the light curve.}
% \label{fig:tess}
%\end{figure*}
\begin{figure}
\centering
\includegraphics[width=\columnwidth]{cc_cet_tess_20s.pdf}
\caption{Top panel: \textit{TESS} photometry of CC\,Cet with 20\,s cadence (blue), with a model fit in orange. Middle panel: Lomb-Scargle periodogram of the \textit{TESS} light curve, clearly showing the binary orbital period but with no evidence for any other periodicities. The orange dashed line shows the 0.01\% false alarm probability. Bottom panel: Phase-folded light curve (blue) and sine model fit (orange), repeated twice for clarity. The residuals are shown as an O-C calculation.}
\label{fig:tess}
\end{figure}
CC\,Cet was observed by the \textit{Transiting Exoplanet Survey Satellite} (\textit{TESS}, TIC 337219837) in Sector 4 (2018~October~19--2018~November~14) at 30\,min cadence and again in Sector 31 (2020~October~12--2020~November~22), for which both 2\,min and 20\,s cadence data was returned (GO 3124, PI Hermes). Figure \ref{fig:tess} shows the results of our analysis of the 20\,s data using \textsc{Lightkurve} \citepalias{lightkurve18-1}. The false alarm probability was calculated following the recipe from \citet{belletal19-1}. The light curve shows clear sinusoidal variation with a period of $6.88233\pm0.00045$\,h, in agreement with the orbital period of the binary system measured by \citet{somersetal96-2}. The photometric modulation has a constant amplitude, confirming that it is produced by irradiation of the M\,dwarf, which varies on the orbital period, rather than ellipsoidal modulation, which would induce an asymmetric double-peaked light curve.
The results from the 20\,s data were double-checked against a 30\,min cadence light curve extracted from the Sector 4 data using the \texttt{eleanor} package \citep{feinsteinetal19-1}, with no evidence found for significant changes in either the
period or the amplitude of the modulation. Inspecting the power spectrum of the 20\,second cadence light curve, we find no evidence for additional periods between 40\,s and $\approx14$\,d, adopting the 0.01\,per\,cent False Alarm Probability of 0.25\,per\,cent amplitude as an upper limit. We therefore detect no evidence for flux modulations induced by the white dwarf rotation (see Section \ref{sec:rotation}) or any evidence that the M\,dwarf rotation is not tidally locked to the orbital period. Splitting the light curve into 0.5\,d chunks, we found that the amplitude remained stable to within 1\,$\sigma$ over the sector, and hence we do not detect evidence for modulation due to spots on the secondary star and/or differential rotation, as seen in V471\,Tauri by \citet{kovarietal21-1}. A visual inspection of the 20\,s light curve found no significant flare events.
% TIC = 337219837
\subsection{\textit{XMM-Newton}}
We obtained X-ray observations of CC Cet with the {\em XMM-Newton} (\textit{XMM}) space telescope timed to overlap the two \textit{HST}/COS visits: on 2018 February 1 at UT23:05:40 for a nominal exposure time of 5127s; and on 2018 July 22 at UT03:56:09 for a nominal exposure time of 15423s. The EPIC pn data were obtained in Large Window mode using the thin filter, with MOS1 and MOS2 employing Partial Window mode and the Medium filter. Data were processed using standard extraction methods within the \textit{XMM} {\tt Science Analysis System} version 18.0.0 to extract images, light curves, and spectra.
Unfortunately, both observations were severely afflicted with background flares, the first to such an extent that only a few hundred seconds of exposure were useful. By-eye inspection of those data revealed no obvious signs of a strong source, and so in the following we ignore the scant remaining 2018 February data. For the second observation, 6011s and 7828s of useful exposure time were retrieved for pn and MOS detectors, respectively. The pn image in the vicinity of the source position is illustrated in Figure~\ref{f:pn}.
\begin{figure}
\centering
\includegraphics[width=\columnwidth]{ds9.pdf}
\caption{The {\it XMM-Newton} image in the pn detector of the vicinity around CC\,Cet. The red circle is centered on the position of CC\,Cet and has a radius of 15~arcsec. \bgc{mention that no source is detected(?)?}}
\label{f:pn}
\end{figure}
The MOS1 and MOS2 data were combined prior to analysis. Source counts were extracted from both pn and MOS data using a circular region with a radius of 15~arcsec enclosing approximately $70\pm 5$\%\ of the energy. This size of region was chosen because the \textit{XMM} point spread function has very extended wings and enlarging the extraction region to encircle a greater fraction of the energy comes at the expense of significantly increasing the background. An annular region of area greater than ten times the source region and centered on the source position was used for estimating the background rate. The resulting count rates are listed in Table~\ref{t:xmm}. While the source appears at a level greater than the background in both pn and MOS, neither represents a significant detection at greater than the $3\sigma$ level.
\begin{table}
\setlength{\tabcolsep}{4pt} %% reduce space between columns
%\centering
\caption{Summary of {\it XMM-Newton} results}
\begin{tabular}{lcc}
\hline
Parameter & pn & MOS\\
\hline
Net exposure (s) & 6011 & 7828 \\
$15\arcsec$ radius count rate (count~ks$^{-1}$) & $4.99\pm 0.91$ & $3.45\pm 0.66$ \\
Scaled background (count~ks$^{-1}$) & $3.17\pm 0.18$ & $1.87\pm 0.13$\\
Net Source rate (count~ks$^{-1}$) & $1.82\pm 0.93$ & $1.58\pm 0.66$ \\
$L_X$ at $10^7$~K $(10^{27}$ erg s$^{-1}$)$^a$ & $6.1\pm 2.3$& $10.8\pm 4.6$\\
\hline
\end{tabular}
\label{t:xmm} \\
{\footnotesize $^a$X-ray luminosity assuming an optically-thin plasma radiative loss model with solar metallicity and an interstellar absorbing column of $2\times 10^{20}$~cm$^{-2}$ (see text).}
\end{table}
\subsection{VLT/UVES}
CC\,Cet was observed with the Ultraviolet and Visual Echelle Spectrograph \citep[UVES,][]{Dekkeretal00-1} on the Very Large Telscope (VLT) as part of the Supernovae Type Ia Progenitor Survey \citep[SPY,][]{napiwotzkietal20-1, koesteretal09-2}. Two spectra were obtained on 2001~February~7--8 and cover the wavelength range 3281--6686\,\AA\ with $R\approx21000$. We retrieved both spectra as fully calibrated data products from the ESO Archive Science Portal\footnote{\url{http://archive.eso.org/scienceportal/home}}.
\section{Modelling}
\label{sec:models}
\subsection{White dwarf characteristics}
\label{sec:wdparams}
Figure \ref{fig:cos_spec} shows the Feb\,18 COS G130M spectrum of CC\,Cet. The spectrum is typical of white dwarfs in this temperature range \citep{koesteretal14-1}: dominated by the broad \ion{H}{i}\,1215.67\,\AA\ Lyman $\alpha$ line, along with a mixture of narrow and deep interstellar lines and broader, shallower photospheric lines. We detect no contribution from emission lines produced by the M\,dwarf, which is unsurprising given that the lack of flares in the \textit{TESS} light curve indicates that the M\,dwarf is relatively inactive. The spectrum shows photospheric absorption lines of \Ion{Si}{iv}, implying that the effective temperature (\Teff) \bgc{see above, we used \Teff\ before it is defined here} of the white dwarf is higher than 25000\,K. \bgc{set numbers as XX\textbackslash{,}XXX ... check throughout the paper}
To estimate the atmospheric parameters of the white dwarf in CC\,Cet, i.e. \Teff\ and the surface gravity (\logg) we fitted the continuum of the COS spectroscopy with synthetic models using the Markov Chain Monte Carlo (MCMC) technique. The Eddington flux ($f_\text{Edd}$) of the models were scaled as
\begin{equation}
F_{\rm obs} = 4\,\pi\,\left(\Rwd \times \Pi\right)^{2}\,\times\,f_{\rm Edd}(\Teff, \logg),
\label{eq:model}
\end{equation}
where $\Pi$ is the parallax and \Rwd\ is the white dwarf radius. \Rwd\ is a function of \logg\ and \Teff\ via the white dwarf mass-radius relation. We used the mass-radius relation for white dwarfs with hydrogen-rich atmospheres by interpolating the cooling models from \citet{fontaineetal01-1} with thick hydrogen layers of $M_{\rm H}/\Mwd=10^{-4}$, which are available from the University of Montreal website\footnote{\href{http://www.astro.umontreal.ca/~bergeron/CoolingModels}{http://www.astro.umontreal.ca/$\sim$bergeron/CoolingModels}, \citet{bergeronetal95-2, holberg+bergeron06-1, tremblayetal11-2,kowalski+saumon06-1}.}
In addition the models were corrected by reddening ($E(B-V)$) using the extinction parameterization from \citet{fitzpatrick99-1}. In summary, the parameters to be fitted are: \Teff, \logg, $\Pi$, and $E(B-V)$.
%priors
We set a flat prior on $\Pi$ using the \textit{Gaia} DR2, parallax for CC\,Cet \citep[$\Pi=8.2381\pm0.0758$\,mas, \textit{Gaia} source id~=~15207693216816512][]{gaia18-1}, which corresponds to a distance of $D=121.39\pm1.12$\,pc\footnote{Note this analysis was carried out before the release of \textit{Gaia}\, EDR3, but the improvement in astrometric accuracy from DR2 to DR3 was small so will not noticeably change the parameters given here.}, and forced the fits to find the best parallax value within $1\sigma$. We set a Gaussian prior on the reddening of $E(B-V)=0.012 \pm 0.015$\,mag based on the measurement from the STructuring by Inversion the Local Interstellar Medium (stilism)\footnote{\href{https://stilism.obspm.fr/}{https://stilism.obspm.fr/}, \citet{lallementetal14-1, lallementetal18-1, capitanioetal17-1}.} at a distance of 120\,pc.
%Regarding \Teff\ and \logg, we constrained their values within the limits of the grid of synthetic models for white dwarfs.
\bgs{\Teff\ and \logg\ were constrained to the values covered by our grid of model spectra.}
We used a grid of synthetic white dwarf models computed with
%the latest version
%%% by now probably the second/third-latest ;)
an updated version of the code described in \citep{koester10-1}. The models have a pure hydrogen atmosphere with the parameter for mixing length convection set to 0.8. The grid spans
%effective temperatures of
$\Teff=10000-35000$\,K in steps of 200\,K, and
%surface gravities in the range of
$\logg=7.0-9.0$\,dex in steps of 0.1\,dex.
Earth airglow emission lines from Lyman\,$\alpha$ and metal absorption lines from the interstellar medium (Table\,\ref{tab:ISlines}) and the white dwarf photosphere (\ion{C}{iii}, \ion{Si}{ii}, \ion{Si}{iii},\ion{Si}{iv}, see below), were masked out during the process (see Figure \ref{fig:cos_spec}).
We used the python-based \texttt{emcee} MCMC method \citep{foreman-mackeyetal13-1}, where 100 walkers were sampling the parameter space during 10\,000 iterations. The likelihood function was defined as -0.5$\chi^{2}$. In general the walkers converged quickly, therefore we removed the first 250 steps from the chain. The samples of \Teff, \logg, and $E(B-V)$ follow a normal distribution, except the samples of the parallax which clustered towards the lower tail of the distribution. While the parallax was tightly constrained during the fits to be within $\pm1\sigma$ from the \textit{Gaia} average value, the results hint that larger distances are required to improve the far-ultraviolet spectroscopic fits of CC\,Cet. For the normal distribution, we considered the median as the best value and the 15.9$^{\rm th}$ and 84.1$^{\rm th}$ percentiles for one standard deviation as error. The intrinsic uncertainties from the MCMC method are very small and are purely statistical. The results are $\Teff=25\,245^{+18}_{-19}$\,K, $\logg=7.606^{+0.005}_{-0.004}$\,dex, $E(B-V) = 0.0183\pm0.0005$\,mag for the Feb\,18 COS spectrum and $\Teff=25\,162^{+19}_{-20}$\,K, $\logg=7.564\pm0.005$\,dex, and $E(B-V) =0.023\pm0.0005$\,mag for the Jul\,18 spectrum. We computed the mean and standard deviation using the two estimates of the parameters from the fits which account for systematic errors. These best-fit values are quoted in Table \ref{tab:characteristics}. The mass and radius of the white dwarf were computed using the measured \Teff\ and \logg\ as inputs to the mass-radius relation described above. We generated samples containing 10\,000 data \bgc{not 100\% clear, this refers to the last 10\,000 walkers? ``We compute the best-fit values and their uncertainties from the distribution of the last 10\,000 samples of the MCMC chain?''} each for \Teff\ and \logg\ with normal distributions of $\Teff=25203\pm42$\,K and $\logg=7.58\pm0.02$\,dex, respectively. We computed the mass-radius relation for these distributions, resulting in two normal distributions for the mass and radius described by $\Mwd=0.441\pm0.008$\,\Msun\ and $\Rwd=0.0179\pm0.0003$\,\Rsun.
% MCMC Results: ldlc01010_x1dsum.dat
%parallax = 0.0081629936426 1.1386859242300956e-06 5.126433676987813e-07
%teff = 25244.9227415 17.9079195000013 18.750862128195877
%logg = 7.6059399006 0.004672911648049727 0.004471602860889767
%ebv = 0.01835252453 0.00045118159385819714 0.0004869671508149036
% MCMC Results: ldlc51010_x1dsum.dat
%parallax = 0.00816302408005 1.1321386424404528e-06 5.461089025091126e-07
%teff = 25161.823765 18.635903999998845 20.029930000004242
%logg = 7.5639907557 0.0047884727672906635 0.004805631100000873
%ebv = 0.022963178083 0.0004979210313211004 0.0005040809228204005
\begin{table}
\setlength{\tabcolsep}{4pt} %% reduce space between columns
\centering
\caption{Characteristics of the CC\,Cet system.
References: 1. This work; 2. \citet{gaia18-1}; 3. \citet{safferetal93-1}; 4. \citet{tappertetal07-2}; 5. \citet{somersetal96-2}.}
\begin{tabular}{llr}
\hline
Parameter & Value & Reference \\
\hline
$T_{\mathrm{eff}}$\,(K) & $25203\pm42$ & 1\\
$\log g$\,(cm\,s$^{-2}$) & $7.58\pm0.02$ & 1\\
$E(B-V)$\,(mag) & $0.021\pm0.002$ & 1\\
White dwarf Mass (\Msun) & $0.441\pm0.008$ & 1\\
White dwarf Radius (\Rsun) & $0.0179\pm0.0003$ & 1\\
Parallax (mas) & $8.2381\pm0.0758$ & 2 \\
Distance (pc) & $121.4\pm1.1$ & 1\\
Magnetic field strength (kG) & 600--700 & 1 \\
$v \sin i$ (km\,s$^{-1}$) & $\sim 40$ & 1\\
Secondary Mass (\Msun) & $0.18\pm0.05$ & 3\\
Secondary Spectral Type & M4.5--5 & 3, 4 \\
Orbital period (\textit{TESS}, h) & $6.88233\pm0.00045$ & 1\\
Ephemeris (\textit{TESS}, TJD) & $2459157.16489\pm0.00052$ & 1 \\
Binary inclination ($^{\circ}$) & $35\pm5.5$ & 5 \\
\hline
\end{tabular}
\label{tab:characteristics}
\end{table}
\subsection{Spectral lines in the COS spectra of CC\,Cet}
The COS spectra of CC\,Cet contain multiple absorption lines of both interstellar and photospheric origin. The interstellar lines are invariably due to resonance lines of neutral or singly-ionised abundant elements such as C\,{\sc ii}, N\,{\sc i}, O\,{\sc i}, Si\,{\sc ii}, and S\,{\sc ii}. Because the observed interstellar lines arise only from the lowest ground state level, it can sometimes happen that in a multiplet of resonance lines which is also present in the photospheric spectrum, only some of the lines of the multiplet are contaminated by interstellar scattering. This allows the photospheric lines to be reliably modelled without contamination from the interstellar lines.
The strongest photospheric lines present in the COS spectrum of CC\,Cet are primarily due to C\,{\sc iii} (six lines at 1175\,\AA), Si\,{\sc iii} (six lines between 1294 and 1303\,\AA), and Si\,{\sc iv} (two lines at 1393 and 1402\,\AA). There are also two blended photospheric lines of Si\,{\sc ii} at 1265\,\AA.
\subsection{Magnetic field}
\begin{figure}
\centering
\includegraphics[width=8 cm]{siii_lines.pdf}
\caption{The \ion{Si}{ii} 1264.738\,\AA\ line in the Feb\,18 spectrum (blue) compared with a model fit (orange). The line is Zeeman-split into a central $\pi$ and outer $\sigma$ components.}
\label{fig:siii_lines}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=8 cm]{siiv_lines.pdf}
\caption{Silicon \ion{Si}{iv} lines in the two \textit{HST}/COS spectra of CC\,Cet, and one spectrum of LM Com, another PCEB with a similar $T_{\mathrm{eff}}$ and $\log g$. The difference between the Zeeman split lines in CC\,Cet and the non-magnetic LM\,Com is readily apparent. The orange line shows the best-fit magnetic model to the lines (in the case of LM\,Com corresponding to an upper limit on the field strength), and the dashed vertical lines show the rest wavelengths. The date and best-fit mean field modulus are given under each spectrum.}
\label{fig:siiv_lines}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=8 cm]{siiii_lines.pdf}
\caption{As Figure \ref{fig:siiv_lines} but for \ion{Si}{iii} lines around 1300\,\AA. The deep unmarked absorption features in the LM\,Com spectrum are interstellar \ion{O}{i} lines that have been removed from the spectrum of CC\,Cet. Each line in LM\,Com is replaced in the spectrum of CC\,Cet by a Zeeman triplet. Note the slight mis-match between the centroids of some of the lines in the model and the spectrum induced by the partial Paschen-Back effect.}
\label{fig:siiii_lines}
\end{figure}
\subsubsection{Discovery of field}
Closer examination of the photospheric lines reveals that they differ significantly from those in similar COS spectra. In particular, the Si\,{\sc ii}\,1264\,\AA\ resonance line (which is slightly blended with a much weaker line in the same multiplet, but not blended with any interstellar lines because it arises from a state a few hundred cm$^{-1}$ above the ground level) shows a clear triplet structure composed of three very similar components separated by about 0.5\,\AA\ (Figure \ref{fig:siii_lines}). The structure is strongly reminiscent of the appearance of the normal Zeeman triplet produced in many spectral lines by a magnetic field of tens or hundreds of kiloGauss (kG). The appearance of this feature strongly suggests that CC\,Cet has a magnetic field.
Furthermore, although the other strong photospheric lines do not show such obvious Zeeman splitting, they all look rather peculiar. The two \ion{Si}{iv} lines around 1400\,\AA, which normally show very similar Lorentzian profiles, instead are quite broad and different from one another (Figure \ref{fig:siiv_lines}). The six strong lines of \ion{Si}{iii} around 1300\,\AA\ appear as about 15 slightly weaker lines in the same wavelength interval (Figure \ref{fig:siiii_lines}). The \ion{C}{iii} lines at 1175\,\AA\ show only three strong lines instead of the usual five (Figure \ref{fig:ciii_ppe_obs}). As we shall show below, these effects can be produced by a magnetic field of hundreds of kG.
\subsubsection{Qualitative analysis of field}
To obtain further information about the magnetic field which appears to be present in CC\,Cet, we start by estimating the mean field modulus \bs, the value of the local field strength $|{\bf B}|$ averaged over the hemisphere visible at the time of observation. The observed splitting of the \ion{Si}{ii}\,1264\,\AA\ line is about 0.5\,\AA\ between each of the $\sigma$ components and the central $\pi$ component. This value may be used with the standard equation for the $\pi$ -- $\sigma$ separation $\Delta \lambda_{\rm Z}$ due to the normal Zeeman effect for an initial field strength estimate \citep[e.g.][]{Land92}:
\begin{equation}
\label{zeeman-splitting.eqn}
\Delta \lambda_{\mathrm{Z}}(\mbox{\AA}) = 4.67\times10^{-13} z \lambda_0^2 B,
\end{equation}
where $\lambda_0$ is the unperturbed wavelength of the line, wavelengths are measured in \AA\ units, $z = 1.43$ is the mean Land{\'e} factor of this line, and $B$ is the magnetic field strength in Gauss ($10^4$\,G = 1\,Tesla). Applying this equation to the Si\,{\sc ii} line we deduce the presence of a field of $B \sim 500$\,kG.
A field of this strength is well within the regime of normal Zeeman splitting for transitions between two isolated atomic levels. However, for some of the multiplets of the light elements observed in CC\,Cet, the spacing between fine-structure levels of single spin-orbit (LS) terms is small enough that the level splitting by the Zeeman effect in a field of hundreds of kG is comparable to the unperturbed term level separation. Alternatively, one can say that the Zeeman splitting is so large that some of the outer Zeeman components approach or cross neighbouring lines of the multiplet. In this case, it is no longer correct to use the usual Zeeman splitting theory appropriate for lines formed between isolated levels. Instead, the magnetic field effect must be considered together with the fine structure splitting of the term. This case is known as the partial Paschen-Back effect, and we will discuss it below when we come to this situation.
%Our next step was to experiment with modelling spectral regions such as those identified above. These were modelled with a line synthesis code designed for modelling spectral lines in the presence of a magnetic field. We concentrated on the region of the Si\,{\sc ii} line at 1264.738\,\AA\ (weakly blended with a weaker line of Si\,{\sc ii}); the series of six low-excitation lines of Si\,{\sc iii} between 1294.545 and 1303.343\,\AA; and the two resonance lines of Si\,{\sc iv} at 1393.755\,\AA\ and at 1402.770\,\AA. All three regions appear to show magnetic alterations with component separation of the same order as seen in the Si\,{\sc ii} line. We also experimented with modelling the prominent C\,{\sc iii} multiplet at 1173--1178\,\AA, which arises from excited levels and therefore is completely photospheric.
\subsubsection{Simple dipolar magnetic field models}
Ideally, we would aim to obtain a low resolution map or model of the global structure of the magnetic field over the whole surface of CC\,Cet. In principle this is possible if a series of polarised spectra are obtained through the rotation period of the star, and these are modelled collectively. However, we have only two spectra, both of mediocre S/N, without polarisation information. The stellar rotation period is fast (see below) but unknown, so we are unable to determine the net viewing angles of the observations.
%In this situation we can only hope to extract rather limited information about the magnetic field from the spectra
%Thus the objective of our modelling was simply to try to identify generic magnetic field configurations that are consistent with the observed line profiles. Our goal was to extract as much information about the magnetic field of CC\,Cet as possible. At a minimum we wanted to show that a single model and field strength could account approximately for the various observed magnetically split line profiles.
The magnetic spectrum synthesis code used for this modelling was the code {\sc zeeman.f} \citep{landstreet88-1}, which performs a forward line profile computation starting from a specified stellar atmosphere model, a specified magnetic configuration over the entire stellar surface, relevant geometric parameters such as the inclination $i$ of the rotation axis and a range of projected rotation velocities, an abundance table for relevant elements, and a specified wavelength window and line list. The code solves the four coupled equations of polarised radiative transfer, and computes the expected emergent Stokes parameters $I$ (essentially the flux, normalised to the continuum for convenience, including emergent spectral line profiles), and the polarisation Stokes parameters $Q$ and $U$ (linear polarisation), and $V$ (circular polarisation). The output flux and polarisation profiles can then be compared to the observed spectra. One or more parameters (such as the abundance of a specific chemical element) can be iterated by the code to improve the agreement between computed and observed spectra.
Because of the limited data available for CC\,Cet we cannot fully constrain even a simple magnetic model such as a dipole field. Instead, we aimed to find a set of parameters that yield satisfactory fits to the lines in the modelled regions, and extract some general field properties from such models, such as a reasonably well-defined value of \bs, information about the extent to which different spectral lines provide concordant estimates, and possibly an estimate of \vsini. For this modelling we restricted the field structure to a dipole, or a dipole plus a weak parallel but opposing linear octupole that has the effect of reducing the range of local $B$ variations over the surface.
%Our first experiments led to rather limited success, because we failed to realise initially how much the partial Paschen-Back effect changes some line profiles of C and Si in fields of hundreds of kG. This effect is not implemented in our magnetic line synthesis code because it has normally been used either for considerably smaller fields, or for H lines which are well described by the normal Zeeman effect up to fields of the order of 1\,MG.
\subsubsection{\ion{Si}{iv}\,1400\,\AA\ multiplet}
\label{sec: siiv}
Figure \ref{fig:siiv_lines} shows the \ion{Si}{iv} lines near 1400\,\AA, which can be fit reasonably well with a variety of simple dipole models. The two lines of this multiplet share a common lower level, and the two upper states are separated by about 460\,cm$^{-1}$, so that the lines are separated by about 9\,\AA, much more than the typical Zeeman splitting at 500\,kG. Their splitting is thus accurately described by the normal Zeeman theory, and correctly computed by {\sc zeeman.f}. A typical fit of the lines in the spectral window around 1400\,\AA\ is shown in Figure\,\ref{fig:siiv_lines}. The strange spectral line profiles of the two Si\,{\sc iv} lines in this window are easily understood as the effect of rather different Zeeman splitting patterns of the two lines. The line at 1402\,\AA\ comes from a $^2$S$_{1/2}$ to $^2$P$_{1/2}$ transition, whose Zeeman splitting pattern has the two $\pi$ components almost as far apart as the two $\sigma$ components, and thus forms effectively a Zeeman doublet, with almost no absorption at the unperturbed line centre because there are no regions of field near 0\,kG. In contrast, the six Zeeman components of the 1393\,\AA\ $^2$S$_{1/2}$ to $^2$P$_{3/2}$ transition are pretty uniformly spaced through the profile, with the most displaced and weakest $\sigma$ components at the outer edges of the line profile. This splitting pattern leads to a roughly U-shaped overall profile. (For orientation, many simple LS-coupling Zeeman splitting patterns are shown schematically by \citealt{condon+shortley35-1}, Fig\,2$^{16}$ \bgc{not clear what the ${}^{16}$ refers to?}.) This region can be fit well with a wide variety of magnetic model parameters, provided that the mean field modulus is about 630\,kG (for the Feb\,18 spectrum) or 710\,kG (Jul 18 spectrum). The abundance of Si relative to H, $\epsilon_{\rm Si} = \log(n_{\rm Si}/n_{\rm H})$, is found from the combined best fit to the two lines to be about $-5.6 \pm 0.1$ for the Feb\,18 spectrum, and about $-5.7 \pm 0.1$ for the Jul\,18 spectrum.
\subsubsection{\ion{Si}{iii}\,1300\,\AA\ multiplet}
A second window that can be modelled fairly well in the Zeeman approximation is the set of six low excitation lines of \ion{Si}{iii} between 1294 and 1303\,\AA. These lines arise from transitions between two $^3$P terms, and a peculiarity of LS coupling leads to magnetically perturbed levels of non-zero $J$ having identical level separation with an anomalous Land{\'e} $g$-factor 1.5 (i.e. the level separation is 1.5 times larger than in the normal Zeeman separation). This splitting pattern replaces each of the single lines of the sextuplet by three lines, and the strength of the field just happens to be a value for which the added $\sigma$ components of the lines fall in between the central $\pi$ components (which lie approximately at the wavelengths of the magnetically unperturbed sextet), and replace the original lines with a forest of about 15 distinct lines, approximately equally spaced (two lines of the original sextet almost coincide in wavelength). This effect is shown in Figure\,\ref{fig:siiii_lines}, where the original lines of the multiplet (now $\pi$ components), have been marked.
In this set of lines, the separation between $\sigma$ and $\pi$ Zeeman components is comparable to the separation between lines of the sextet. Correspondingly, the magnetic splitting of some of the individual multiplet levels is comparable to the separations between both the lower and the upper term levels. In this situation the simple weak-field Zeeman splitting is no longer an accurate description; instead the splitting is making the transition to Paschen-Back splitting, and both magnetic and fine structure splitting should be computed together. Partial Paschen-Back splitting is not built into {\sc zeeman.f}, so our computations assume that the usual expression for the anomalous Zeeman effect apply. This approximation meant that the simple magnetic models that fit the Si\,{\sc iv} lines at 1400\,\AA\ did not produce good fits to the other strong lines.
%This led us to spend a lot of time exploring a variety of basically dipolar magnetic field structures. The main conclusion of this exploration was that the fit depended essentially on the value of the mean field modulus \bs\ of the model, and that a number of models having $\bs \approx 650$\,kG (for the first COS spectrum) or 720\,kG (for the second spectrum) all fit the 1300\,\AA\ window moderately well.
%Eventually we realised that many of our fitting problems throughout the stellar spectrum arise mainly because of the partial Paschen-Back effect rather than inadequate magnetic models. In fact, even the multiplet of Si\,{\sc iii} at 1300\,\AA, which is only mildly affected by this effect, nevertheless does exhibit clear symptoms of it.
\subsubsection{Partial Paschen-Back splitting}
The partial Paschen-Back effect, and how to compute its effects on the lines of a multiplet, is discussed extensively by \citet[][Sec. 3.4]{landideglinnocenti+landolfi04-1}. We obtained a {\sc fortran} program, {\sc gidi.f}, from Dr Stefano Bagnulo which was originally written by Prof. Landi Degl'Innocenti. This program solves the problem of the combined effect of magnetic and fine structure splitting of a single multiplet that is described by LS coupling. This approximation is appropriate for most light elements, including C and Si. A calculation of the splitting of the 3s3p $^3$P$^{\rm o}$ -- 3p$^2$ $^3$P \bgc{is the notation for the energy levels consistent with that used in 3.3.4.?} transition that leads to the sextet of lines at 1300\,\AA\ indicates that the most significant effect of the partial Paschen-Back effect at 600\,kG is to shift the $\pi$ components of the 1296.72\,\AA\ and 1301.15\,\AA\ lines by about 0.12\,\AA\ bluewards and redwards respectively. Exactly this effect is clearly observed in the discrepancy between our fit using only conventional Zeeman splitting, and the observations shown in Fig.\,\ref{fig:siiii_lines}. Otherwise, the simple Zeeman effect computation of {\sc zeeman.f} provides a reasonable fit, and allows us to refine the strength of the field observed.
%%When we assume a simple magnetic dipole but vary the geometrical parameters such as the inclination of the rotation axis to the line of sight, the angle between the magnetic axis and the rotation axis, and the rotational phase of observation, it is found that there are minor differences in the quality of the best fit. Again, dipoles of various inclinations and polar field strengths can be found to yield satisfactory fits to the spectral windows in the two available spectra provided that the values of \bs\ are close to the optimum values mentioned above, and in fact these optimum values of \bs\ can be made more precise by fitting the $\pi - \sigma$ spacings in this window. Varying the model with this constraint primarily alters the shapes of the sigma components, especially their asymmetry. We have finally selected a magnetic model for all computations that minimises the asymmetry of the sigma components, but the details of this model do not provide significant further useful information about the magnetic field structure.
%%It is visible in the figure that the Zeeman split wavelengths used in the computation do not fit all the original lines of the multiplet well. This particularly affects the positions of the $\pi$ components of the lines at 1296.73\,\AA\ and at 1301.15\,\AA. From the multiplet components shifted and the directions of wavelength shift relative to the $B = 0$ positions, it appears that the zero field positions of the two $^3$P$_0$ states involved in the sextet may be shifted slightly to lower energies relative to their two respective neighbouring higher energy $^3$P$_1$ and $^3$P$_2$ states by the field. However, as we have not tried to calculate the expected changes to the line wavelengths in detail, we have left these discrepancies in our computations.
The value of the Si abundance can also be determined from the model for the \ion{Si}{iii} window. We find values of $\epsilon_{\rm Si} \approx -6.1$ to $-6.3$. Note that this abundance is about a factor of three lower than the abundance level deduced from the \ion{Si}{iv}\,1400\,\AA\ lines. The origin of this difference is not known, but may represent an overabundance of the \ion{Si}{iv}/\ion{Si}{iii} ratio produced by non-LTE effects that cannot be predicted by our LTE code, an effect of vertical stratification \citep{koesteretal14-1}, or perhaps an effect of non-uniform distribution of Si over the stellar surface, as is found on upper main sequence magnetic stars.
\subsubsection{Other UV spectral lines}
\begin{figure}
\centering
\includegraphics[width=9 cm]{ciii_ppe_obs.pdf}
\caption{"Spaghetti diagram" showing wavelengths of individual magnetic subcomponents of the C\,{\sc iii} sextet at 1175\,\AA\ as a function of magnetic field strength, computed as a partial Paschen-Back case. Subcomponents are colour-coded as blue $\sigma$ (blue), $\pi$ (black), and red $\sigma$ (red). The COS flux spectrum (green) is plotted with the continuum set at about 600\,kG to show how main line features correspond with clustering of magnetic subcomponents.}
\label{fig:ciii_ppe_obs}
\end{figure}
The \ion{Si}{ii} line at 1265\,\AA\ (Figure \ref{fig:siii_lines}) is one of three resonance lines formed by the 3p $^2$P$^{\rm o}$ to 3d $^2$D transition. Two of the three lines nearly coincide at 1264.730 (strong) and 1265.023\,\AA\ (weak). The effect of the partial Paschen-Back effect is to shift all the components of the weaker line towards the blue, and those of the stronger line towards the red, so that radial velocities measured with the $\pi$ or $\sigma$ components of the stronger line in a field near 600\,kG will be systematically red shifted from radial velocities measured with lines unaffected by the partial Paschen-Back effect, such as those from the Si\,{\sc iv} doublet at 1400\,\AA. However, the normal Zeeman effect is not a bad approximation at 600\,kG, and models of this feature with {\sc zeeman.f} fit reasonably well assuming a field structure with $\bs \approx 550$\,kG (see Fig.\,\ref{fig:siii_lines}).
As discussed above, because the partial Paschen-Back splitting of lines is not implemented in our line synthesis code, we cannot produce an accurate model of the \ion{C}{iii} \,1175\,\AA\ multiplet.
%However,
\bgs{Therefore}, using {\sc gidi.f}, we have computed the variation of the line splitting as a function magnetic field strength up to 800\,kG. Below about 200\,kG, the splitting of the individual lines of the multiplet follows closely the prediction of the Zeeman theory. However, by about 600\,kG, the splitting is converging on the Paschen-Back limit. In fact, the computed splitting at 600\,kG quite closely resembles the observed multiplet, with five groups of line components whose centroids coincide almost exactly with the positions of the observed components, and that agree qualitatively with the relative strengths of those components (Fig.~\ref{fig:ciii_ppe_obs}).
\subsubsection{H in the optical spectrum}
\label{sec:hlines}
\begin{figure}
\centering
\includegraphics[width=9 cm]{hbeta_obs_fit.pdf}
\caption{H$\beta$ from UVES spectrum of CC\,Cet (black) with overplotted fit to line core assuming approximately dipolar magnetic field with $\bs = 620$\,kG. The Zeeman triplet is clearly detected in spite of strong emission from the M~dwarf secondary star.}
\label{fig:hbeta_obs_fit}
\end{figure}
The archival UVES spectra cover the entire Balmer spectrum in the visible with resolving power of about 20\,000 and S/N of roughly 15--20. Balmer lines H$\alpha$ to H$\epsilon$ are clearly visible in the spectra. We have examined the cores of these lines for evidence of the magnetic field of CC\,Cet. Because of the low S/N and the presence of emission lines from the M~dwarf companion, especially strong in H$\alpha$ and H$\beta$, the magnetic splitting of the Balmer lines by the 700\,kG field is not immediately obvious, and was missed in searches for magnetic fields in the analysis of the SPY survey \citep{napiwotzkietal20-1, koesteretal09-2}. However, if we smooth the UVES spectra slightly and shift the two spectra to the same stellar radial velocity framework, the superposed H$\beta$ lines reveal fairly clear Zeeman triplet structure, with splitting that agrees closely with that expected from the ultraviolet lines. One spectrum with a model fit is shown in Figure \ref{fig:hbeta_obs_fit}.
\subsubsection{Rotation velocity}
\label{sec:rotation}
A remarkable feature of both the \ion{Si}{iii}\,1300\,\AA\ and \ion{Si}{iv}\,1400\,\AA\ lines is that, if the rotational velocity of the white dwarf is taken to be close to 0\,\kms, the computed patterns have deep, narrow $\pi$ components, especially in the 1393\,\AA\ line, which are not present in the observed profiles. The $\pi$ components of each line are not broadened by a variation of values of the local magnetic field strength $B$ over the visible hemisphere, so the broadening may instead be due to a rapid rotation of the white dwarf. By fitting the cleanest $\pi$ components we find \vsini$\approx 40 \pm 10$\,\kms.
CC\,Cet is part of a short-period binary system with an M4.5 main sequence star. The velocity semi-amplitude of the M\,dwarf is about 120\,\kms \citep{safferetal93-1}. Because the white dwarf has a mass very close to twice that of the M\,dwarf, its velocity semi-amplitude is about 60\,\kms, with the full range being covered in about 3.5\,h. During a single 0.5\,h COS exposure, the radial velocity of the white dwarf could therefore change by as much as 25\,\kms. This effect is not included in the explicit velocity broadening of the computed spectrum, but together with the resolving power and the small wavelength spread due to the partial Paschen-Back effect could account for a maximum line broadening of about 35\,\kms\ FWHM. The observed $\pi$ components of this multiplet are about 60\,\kms\ in FWHM in both of the COS spectra. Thus although the deduced value of \vsini\ may be slightly overestimated, it is very unlikely that \vsini\ is actually much smaller than 40\,\kms. With this lower limit to $v_{\rm eq} \sin i$ and the radius of CC\,Cet, we can obtain an upper limit to the spin period $P = 2 \pi R/v_{\rm eq} \la 2 \pi R/v_{\rm eq} \sin i \approx 2000$\,s
The spin period/orbital period ratio of CC\,Cet is therefore $\approx0.05$, consistent with the bulk of the intermediate polars with spin and orbital period measurements \citep{bernardinietal17-1}. The difference in the line profiles between the two spectra indicate that the magnetic field axis is inclined to the rotation axis and/or the metals are not evenly distributed across the white dwarf surface, so it may be possible to measure the rotation period from high cadence, high signal photometry or spectroscopy. No rotation signal is detected in the \textit{TESS} light curve, but the red \textit{TESS} bandpass is dominated by the M\,dwarf and may not be sensitive to subtle flux variations from the white dwarf. We attempted to produce time-series spectroscopy from the COS data using the \texttt{costools splittag}\footnote{\url{https://costools.readthedocs.io/en/latest/index.html}} routine to split the time-tag files into 30\,s bins, but the S/N became too low to reliably measure any periodic flux or absorption line variation.
\subsubsection{Results of the magnetic analysis}
The following conclusions can be drawn from our magnetic modelling efforts. (1) CC\,Cet has $v \sin i \approx 40$\,\kms, and therefore has a rotation period of about 2000\,s or less. The rotation of the white dwarf is not synchronised with the orbital motion. (2) The typical value of \bs\ on CC\,Cet is about 600--700\,kG. This value may be modestly different during the two COS observations. The deduced value of the field appears to be generally consistent with the splitting of all photospheric lines in the spectrum, although some observed lines (such as the sextet of lines of C\,{\sc iii} at 1175\,\AA), are split by the partial Paschen-Back effect in ways that our modelling code cannot reproduce accurately. (3) It appears, from the fact that the $\sigma$ components of the Si\,{\sc iii} sextet are hardly broader than the $\pi$ components, that the field is fairly homogeneous in local strength $|B|$ over the visible hemisphere; the local field strength probably varies mostly by less than $\pm 100$\,kG in magnitude. (4) Correspondingly, the fact that the centre of the 1402\,\AA\ line reaches almost to the continuum shows that there are no important regions of field strength close to zero on the visible surface. These facts are consistent with, but do not strongly require, a roughly dipole-like field, similar to those found in some other low field magnetic white dwarfs that have been modelled in detail \citep[e.g. WD\,2047+372: see][]{landstreetetal17-1}.
\subsection{X-ray Analysis}
\label{sec:x-rays}
Since there were too few X-ray events to perform any sort of spectral analysis, in order to understand the source X-ray luminosity that might have given rise to a weak signal we used the {\tt PIMMS} software\footnote{\url{https://heasarc.gsfc.nasa.gov/docs/software/tools/pimms.html}} version 4.11 to convert between pn and MOS count rates and incident X-ray flux. This was done for the APEC optically-thin plasma radiative loss model \citep{fosteretal12-1} for the solar abundances of \citet{asplundetal09-1} and an intervening hydrogen column density of $2\times 10^{20}$~cm$^{-2}$. The latter was estimated from the distance of 121\,pc \citep{gaia18-1} and interpolation within the neutral hydrogen column density compilations of \citet{Linsky.etal:19} and \citet{Gudennavar.etal:12}.
The resulting X-ray luminosities in the 0.3--10\,keV band corresponding to the observed pn and MOS count rates are illustrated as a function of isothermal plasma temperature in Figure~\ref{f:xmmlx}. The luminosities for a typical active stellar coronal temperature of $10^7$\,K are also listed in Table~\ref{t:xmm}. Sensitivity of the results to the adopted absorbing column was examined by computing the analogous luminosities for values of $N_\mathrm{H}$ lower and high by a factor of two, while sensitivity to metallicity was checked by computing luminosities for metallicity reduced by a factor of two. These cases are also illustrated in Figure~\ref{f:xmmlx}. By far the largest uncertainty is in the estimate of the X-ray count rates. Luminosities derived from the MOS data appear larger than those from pn, but again the uncertainties overlap for most of the temperature range expected for active stellar coronae.
\begin{figure}
\centering
\includegraphics[width=0.47\textwidth]{cccet.png}
\caption{The X-ray luminosities obtained from {\it XMM-Newton} pn and MOS count rates assuming the X-ray signal originates in a hot isothermal collision-dominated optically-thin plasma. The derived luminosities are shown as a function of isothermal plasma temperature. Uncertainty ranges due to Poisson statistics in the observed count rates, and to uncertainties in the intervening interstellar medium absorbing column are indicated by shaded regions. The sensitivity to plasma metallicity is very low and indicated for the pn case only. The saturated coronal X-ray emission level for a star of the spectral type of the CC\,Cet secondary is also illustrated.}
\label{f:xmmlx}
\end{figure}
Also shown in Figure~\ref{f:xmmlx} is the canonical X-ray saturation luminosity, $L_\mathrm{X}/L_\mathrm{bol}=10^{-3}$, corresponding to an M4.5~V star with a luminosity of $0.0040\mathrm{L}_\odot$ \citep{pecaut+mamajek13-1}. Our very tentative detection of CC\,Cet then lies essentially at the saturation limit. This is expected since the orbital period of the binary, and presumably the rotation period of the secondary M~dwarf assuming tidal synchronization, is well into the saturated regime that sets in at rotation periods shorter than approximately 20 days for a mid-M~dwarf \citep[e.g.][]{Wright.etal:11}.
\section{Discussion}
\label{sec:dis}
\begin{figure}
\centering
\includegraphics[width=8 cm]{teffvlogg.pdf}
\caption{Atmospheric parameters of the white dwarfs in PCEBs observed with \textit{HST}/COS, demonstrating that CC\,Cet (orange) is not an outlier in either $T_{\mathrm{eff}}$ or $\log g$.}
\label{fig:tefflogg}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=8 cm]{teffvB.pdf}
\caption{Temperatures and magnetic field strengths of known magnetic white dwarfs in PCEBs compiled by \citet{ferrarioetal15-1} and \citet{parsonsetal21-1} (blue) compared with CC\,Cet (orange). An approximate cooling time scale is given on the top axis. CC\,Cet is clearly an extreme outlier in both \Teff\ and $B$ \bgc{see my comment in the introduction, we do (should) not include V471\,Tau in the sample of known magnetic PCEBs.}.}
\label{fig:teffB}
\end{figure}
\subsection{CC\,Cet in context}
%two formation pathways exist, one that can induce weak magnetic fields during or soon after the common envelope phase, as in CC\,Cet, and another that induces stronger fields as the systems age to produce the pre-polar population. \bgc{I agree... The difference in \Teff\ and $B$ is huge, and Matthias has a paper in press where he explains the cool magnetic systems as being linked to a field generated when they crystallise under rapid rotation. I can send you a copy, and we can simply refer to that as one possible mechanism explaining those systems. But I have no idea what we can say about the origin of the field in CC\, Cet itself...}
%\subsection{Occurrence Rate}
Along with CC\,Cet, program 15189 observed 9 other PCEBs containing a white dwarf and an M~dwarf with the same COS setup. Archival COS\bgs{/G130M} spectra of three additional PCEBs exhibiting metal absorption lines are also available (\bgs{Program \#XXXXX}). Although a full analysis of these observations will be left for a future publication, we can use them to estimate an occurrence rate for magnetic PCEBs. All the other 12 white dwarf spectra show metal absorption lines, with no detectable magnetic field (\bs\ $\lesssim100$\,kG). Although the PCEBs were selected for practicality of observation, rather than to provide an unbiased sample, a comparison of the white dwarf atmospheric parameters (Figure \ref{fig:tefflogg}) demonstrates that CC\,Cet is typical of the sample. Using the \texttt{astropy.stats.binned\_binom\_proportion} function we calculate an occurrence rate for magnetic white dwarfs of $7.7^{+14}_{-2.5}$\,per\,cent, where the uncertainties are the $1\sigma$ confidence boundaries. This stands in stark contrast to the 0/1735 ($<0.11$\,per\,cent) and 2/1200 ($0.17^{+0.22}_{-0.05}$\,per\,cent) rates found by \citet{liebertetal15-1} and \citep{silvestrietal07-1} respectively. The reason for the discrepancy is probably the different waveband used here to the two previous studies: the ultraviolet wavelength range is riddled with sharp metal absorption lines that are sensitive tracers of even relatively low fields, which will go unrecognised in low-resolution optical spectra such as those obtained by the Sloan Digital Sky Survey, and are difficult to detect even in high-resolution spectra of the Balmer lines (see Section\,\ref{sec:hlines}, Figure \ref{fig:hbeta_obs_fit}). It is interesting to note that the fraction of intermediate polars among the CVs in the nearly complete 150\,pc sample of \citet{palaetal20-1} is $3/42$ or $7.1^{+6.6}_{-2.2}$\,per\,cent, i.e. consistent with the the incidence of magnetic white dwarfs in the COS PCEB sample derived above.
Figure \ref{fig:teffB} compares CC\,Cet effective temperatures and magnetic field strengths of the magnetic PCEBs compiled by \citet{ferrarioetal15-1} and the latest discoveries by \citet{parsonsetal21-1}. CC\,Cet is clearly an extreme outlier, being both much hotter and (therefore younger) than the rest of the sample and having a much weaker magnetic field strength, even in comparison to the other confirmed pre-intermediate polar SDSS\,J030308.35+005444.1. CC\,Cet is an outlier by $\approx6\sigma$ in both $\log (B)$ and \Teff, where the stars that only have upper limits on \Teff\ were treated as being at the upper limit.
%\bgc{ah... yes: so there is a point to be made, maybe: besides the absence of magnetic pre-CVs, their average WD mass is way lower than that of CVs (again Zorotovic et al. 2011), which raises the question if what we (used to) call pre-CVs are really pre-CVs. Some theories are that those systems may not live long as CVs (\href{https://ui.adsabs.harvard.edu/abs/2016MNRAS.455L..16S}{Schreiber et al. 2016}, \href{https://ui.adsabs.harvard.edu/abs/2016ApJ...817...69N/abstract}{Nelemans et al. 2016})... not sure into how much detail to go on this.}
%\dwc{Need to compare this with previous work- seems like a high occurrence rate?}
%V727 Car not on plot as no teff/logg yet. Also not a COS spectrum.
\subsection{Formation and evolution}
\subsubsection{Formation}
Invoking a common origin for the magnetic white dwarfs in Figure \ref{fig:teffB} would imply a large and thus-far undetected population filling the parameter space in between CC\,Cet and the rest of the sample. We speculate that it is instead more likely that the CC\,Cet magnetic field was formed via a different pathway to \bgs{those of} the pre-polars. Such a pathway must consistently explain the low but non-zero occurrence rate of magnetic white dwarfs in PCEBS, as well as the high rotation rate and low mass of the CC\,Cet white dwarf.
Here we discuss the various proposed formation \bgs{scenarios}
%pathways
%%% 3 x pathway in a row
and formation scenarios for magnetic white dwarfs and their applicability to CC\,Cet:
\textit{Spin up from accretion:} The crystallization/spin up model \citep{iserneral17-1, schreiberetal21-1} successfully reproduces the previously observed examples of pre-polars \bgc{see my comment in the introduction, this is confusing, and \citet{schreiberetal21-1} concluded that these systems have already undergone mass transfer. I'm not sure how to best phrase this / call them (technically, they are post-non-magnetic CVs, and pre-polars at the same time)}. However, the CC\,Cet secondary will not fill its Roche lobe and start mass transfer for many Gyr (Section \ref{sec:future}), so it is highly unlikely that the system has undergone the required previous period of mass transfer. Furthermore, the system is too young for the white dwarf core to have adequately crystalised. Our measured mass for the CC\,Cet white dwarf, $0.441\pm0.008$\,\Msun, indicates that it may be a He-core white dwarf \citep{driebeetal98-1, althausetal13-1}, and thus may lack \bgs{altogether} the core chemical stratification necessary to generate a dynamo via convention.
% \textit{Core crystallisation:} The young age and He core of the white dwarf also rules out the crystallisation only model of \citep{iserneral17-1}.
\textit{Fossil Field:} In the fossil field model, the progenitor of the white dwarf is an Ap/Bp star with the magnetic field in place before the common envelope. Whilst we cannot conclusively rule this pathway out, there are several issues. Firstly, the formation of the Ap/Bp star is thought to be due to a main sequence stellar merger \citep{braithwaite+spruit04-1}, so the average masses of the progenitor stars, and by extension their white dwarf remnants, should be higher than non-magnetic objects \citep{ferrarioetal20-1}. CC\,Cet's mass is instead lower than the average masses of both PCEBs and CVs \citep{zorotovicetal11-1}. The fossil field pathway also provides no explanation for the high rotation rate of the CC\,Cet white dwarf.
\textit{Common envelope dynamo:} \citep{briggsetal18-1} proposed a model whereby a weak initial field is wound up by differential rotation between the two stellar components in a common envelope. Whilst \citet{bellonietal20-1} found that this model was unable to predict the observed magnetic white dwarf population, it may still be relevant for rare objects such as CC\,Cet. The fact that the CC\,Cet white dwarf is already rotating with a period similar to those seen in intermediate polars may also hint at unusual dynamical interactions in the common envelope. Using Equation 1 of \citet{briggsetal18-1} and assuming that the current orbital period of CC\,Cet is close to the initial post-common envelope orbital period, we find a predicted field strength of $\approx20 $\,MG, roughly 30 times the measured value. The common envelope dynamo model as it stands cannot therefore fully explain the CC\,Cet system, but \bgs{could still, at least conceptually, provide a formation scenario for the magnetic white dwarfs in}
%is perhaps a useful starting point for a more detailed description of the formation of
CC\,Cet and the intermediate polars. \bgs{However, any future extension of this model will also have to explain why \textit{most} systems emerging from a common envelope are not magnetic (Fig.\,\ref{fig:tefflogg}).}
%In summary, the core crystalisation/spin up model \citep{iserneral17-1, schreiberetal21-1} cannot explain the formation of CC\,Cet. The he fossil field and common envelope models can be conclusively ruled out, although neither offers a consistent explanation for all of the properties of the CC\,Cet system.
% \subsubsection{Mass}
%\bgc{The mass of the white dwarf in CC\,Cet is very low compared to the measured average mass of CV white dwarf \citep{zorotovicetal11-1}, in particular, only a handful of CV white dwarfs with likely helium core compositions are known [2019MNRAS.489.1023Y, 2020A\&A...634A..91B]. CV population models based on the standard assumptions predict large numbers of low-mass white dwarfs, and the reasons for the lack of those systems in the real world are still debated [2016MNRAS.455L..16S]. }
%consistent, we note that the white dwarf mass of CC\,Cet, $0.44$\,\Msun\ is much lower than the average mass of CVs, $0.83$\,\Msun\ \citep{zorotovicetal11-1}. However, we also caution the reader that white dwarf masses of intermediate polars are rather uncertain, and thus, it is not straight-forward to establish if CC\,Cet is a ``typical'' pre-intermediate polar.
%Compilations of observed magnetic white dwarfs have consistently shown that they have a higher average mass than non-magnetic white dwarfs \citep{liebertetal88-1, ferrarioetal05-1, ferrarioetal20-1}, $\approx0.8$\,\Msun compared with $\approx0.6$\,\Msun. The known CV population also has an average mass around $\approx0.8$\,\Msun\ \citep{zorotovicetal11-1}, and only a handful of low-mass CV white dwarfs with likely helium core compositions are known \citep{yuetal19-1, beuermannetal20-1}. CV population models predict large numbers of low-mass white dwarfs, and the reasons for the non-detection of those systems in the real world are still debated \citep{schreiberetal16-1}. Complicating this picture, especially in relation to CC\,Cet, is the fact that the white dwarf masses in intermediate polars are uncertain due to obscuration by the disc.
%We find a mass of $0.441\pm0.008$\,\Msun\ for CC\,Cet, low in comparison to both the magnetic white dwarf and CV populations and indicating that it is a He-core white dwarf \citep{driebeetal98-1, althausetal13-1}). Magnetic white dwarfs with low mass are not unheard of: \citet{bagnulo+landstreet19-1} identify four magnetic, H atmosphere white dwarfs with masses $\lesssim 0.5$\,\Msun, 25\,per\,cent of the total within 20\,pc. Nevertheless, it may be that the non-detection of similar objects to CC\,Cet is due to inefficient formation of magnetic fields in low-mass white dwarfs, irrespective of their binarity.
%urrrgh something about mergers
\subsubsection{Future of CC\,Cet}
\label{sec:future}
The future of CC\,Cet was first modelled by \citet{schreiber+gaensicke03-1}. They found that the companion will fill its Roche-lobe and start mass transfer onto the white dwarf, becoming a cataclysmic variable once the system has evolved down to an orbital period of $P_\mathrm{orb}\simeq2$\,h in $\simeq18$\,Gyr.
%Because of the low mass of the companion, angular momentum losses from the system are driven only by gravitational wave radiation, and hence it will take CC\,Cet a long time, $\simeq23$\,Gyr, to turn into a cataclysmic variable.
%We performed evolutionary simulations to evaluate the future of CC-Cet as a cataclysmic variable. Cataclysmic variables are systems composed by a white dwarf accreting from non-degenerate companions at rates of $10^{-10}-10^{-13}$\,\Msun/yr.
Here we improve on those calculations using the state-of-the-art stellar evolution code Modules for Experiments in Stellar Astrophysics \citep[{\tt MESA} v.12778,][]{paxtonetal11-1, paxtonetal13-1, paxtonetal15-1, paxtonetal18-1, paxtonetal19-1}. The standard model of evolution for CVs states that the orbital period is reduced via angular momentum losses driven by gravitational wave radiation \citep{paczynski67-1} and magnetic braking generated by the secondary (donor) star \citep{verbunt+zwaan81-1, rappaportetal83-1, mestel+spruit87-1, kawaler88-1, andronovetal03-1}. The low mass of the donor star in CC\,Cet indicates that it is fully convective, so magnetic braking is not active and the reduction of CC\,Cet's orbit is driven only by the angular momentum loss via emission of gravitational waves
%However, if the system contains a magnetic white dwarf the magnetic flux in open field lines responsible for magnetic braking is reduced, significantly reducing the magnetic braking of the secondary \citep{lietal94-2, li+wickramasinghe98-1, webbink+wickramasinghe02-1}.
%The magnetic braking is also thought to become very inefficient once the systems reach orbital periods around $\simeq3$\,h, which could be the result of the donor becoming fully convective \bgs{\citep{rappaportetal83-1}}. The abrupt decrease of donor's magnetic braking causes the mass transfer to halt (and thus the detached systems sit in the commonly denominated period gap; $\sim 2-3$\,h). The subsequent evolution is driven mainly by angular momentum loss via gravitational radiation. With decreasing period the Roche lobe of the donor reduces, and at a period roughly of 2\,h the donor once again overflows its Roche lobe, resuming mass transfer and exiting the period gap. \bgc{I'm not sure the above details are relevant, as we star the evolution from ``here and now'', where the low mass of the donor means that it is fully convective, and should not have magnetic breaking? }
% The low mass of the main-sequence star indicates that it is fully convective, therefore magnetic braking in CC\,Cet is not active \bgc{see the comment above... shorten the text above?} and the reduction of CC\,Cet's orbit is driven only by the angular momentum loss via emission of gravitational waves.
For our simulations we adopt an initial orbital period of 0.287\,d and treat the white dwarf is as a point source with a mass 0.44\,\Msun (Table \ref{tab:characteristics}). We assumed that the white dwarf retains none of the accreted mass (\textsc{mass\_transfer\_beta} = 1.0, where the typical assumption is that the accreted mass is ejected in classical nova eruptions) and adopted the $M_{\rm donor}=0.18\pm0.05$\,\Msun\ secondary mass from \citet{safferetal93-1}. We ran the simulations with two different prescriptions for the gravitational wave radiation: (1) the classical prescription dictated by Einstein's quadrupole formula \citep{paczynski67-1} and (2) the calibrated version, which reproduces the observed masses and radii of the donors in cataclysmic variables \citep{kniggeetal11-1}. The simulations were ended when the mass of the donor reached the brown dwarf mass limit ($\mbox{\textsc{star\_mass\_min\_limit}}=0.08\,\Msun$).
\begin{figure}
\centering
\includegraphics[width=8cm]{Porb_age_v2.eps}
\caption{MESA simulations adopting angular momentum loss via gravitational wave radiation using the classical prescription \citep[][blue]{paczynski67-1} and the calibrated version \citep[][ orange]{kniggeetal11-1}. Our simulations show that CC\,Cet will start mass transfer from the secondary onto the white dwarf (i.e. become a cataclysmic variable) in the next 17.7\,Gyr and 7.2\,Gyr, for the classical and the calibrated versions of gravitational radiation, respectively. The vertical dashed line shows the calculations performed by \citet{schreiber+gaensicke03-1} using the classical prescription, which is slightly longer than in our simulations. The system will have an orbital period shorter than 2\,hr when the accretion starts (dashed lines.) }
\label{fig:MESA}
\end{figure}
The results of our simulations confirm that the final fate of CC\,Cet is to become an intermediate polar. The system will start to transfer mass at a low rate of $\simeq 3.5\times10^{-11}$\,\Msun/yr ($\simeq 10^{-10}$\,\Msun/yr) in roughly 17.7\,Gyr (7.2\,Gyr) for the classical (calibrated) version of gravitational radiation. The system will have an orbital period of $\approx 1.89$ ($\approx 1.83$)\,h when the accretion starts (Figure \ref{fig:MESA}). The accretion rate will slightly decrease while the system continues shrinking, until the mass of the donor reaches a mass of 0.08\,\Msun\ at an orbital period of $\simeq1.14$\,h ($\simeq1.24$\,h).
\citet{nortonetal04-1} modelled the magnetic moment of intermediate polars given their measured spin period to orbital period ratios. Given that we know the magnetic moment of CC\,Cet ($\mu_1 = B\,R^3 \approx1.35\times10^{33}$\,G\,cm$^{-3}$), we can use their Figure~2 to estimate the white dwarf spin period when it reaches the orbital period minimums calculated above. We find equilibrium spin periods of $\simeq1.7$\,min ($\simeq1.9$\,min) for orbital periods of $\simeq1.14$\,h ($\simeq1.24$\,h), comparable to the spin period of V455\,And, an intermediate polar near the orbital period minimum \citep{araujo-betancoretal05-1, bloemenetal13-1}.
\subsection{M~dwarf wind}
\label{s:wind}
As mass-transfer by Roche lobe overflow has not yet begun in the CC\,Cet system, the origin of the metals in the white dwarf atmosphere is most likely the stellar wind of the M~dwarf companion. \citet{debes06-1} has demonstrated that such wind accreting systems can be used to quantify the wind mass-loss rates of M~dwarfs, which is otherwise extremely difficult to measure (see \citealt{woodetal21-1} for a summary of the current state of the art). \citet{debes06-1} assumed that the non-magnetic white dwarfs in their sample accreted via the Bondi-Hoyle process \citep{bondi+hoyle44-1}, where the white dwarf gathers a fraction of the stellar wind in proportion to the white dwarf's gravity. \citet{webbink+wickramasinghe05-1} demonstrated that for binaries containing white dwarfs with high (10s of MG, i.e pre-polars) magnetic fields, the energy density of the magnetic field is much stronger than that of the wind down to the surface of the secondary, and thus the white dwarf accretes all of the wind emitted. CC\,Cet is an intermediate case, where the magnetic field is not strong enough to accrete all of the wind but nevertheless gathers wind from a wider radius than the Bondi-Hoyle process. Following the model from Section 6 of \citet{webbink+wickramasinghe05-1}, the wind will be accreted inside of a critical radius $r_{\mathrm{crit}}$ from the white dwarf where the energy density of the magnetic field exceeds that of the wind:
\newcommand{\D}{\displaystyle}
\begin{equation}
\label{eqn: rcrit}
\frac{\left(B_0 \left(\frac{\D r_{\mathrm{wd}}}{\D r_{\mathrm{crit}}}\right)^3\right)^2}{8\pi} = u_0\left(\frac{r_{\mathrm{md}}}{a-r_{\mathrm{crit}}}\right)^2
\end{equation}
Where $B_0$ is the strength of the magnetic field at the white dwarf surface, $u_0$ the energy density of the wind at the secondary surface, $a$ the orbital separation, and $r_{\mathrm{wd}}$ and $r_{\mathrm{md}}$ the radii of the primary and secondary respectively. Taking the average of our two magnetic field measurements and adopting a secondary radius of $r_{\mathrm{md}}=0.243$\,\Rsun\ from the latest version of the tables from \citet{pecaut+mamajek13-1}\footnote{\url{http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt}}, we numerically solve equation \ref{eqn: rcrit} to find a critical radius of $\approx$ 1.2\,\Rsun, 75\,per\,cent of the orbital radius and roughly twice the Bondi radius. Assuming (incorrectly, but close enough for our purposes) that the M\,dwarf emits wind uniformly in all directions, the accretion rate of wind siphoned onto the white dwarf $\dot{M}_{\mathrm{wd}}$ is:
\begin{equation}
\label{eqn: wdfrac}
\dot{M}_{\mathrm{wd}} = \dot{M}_{\mathrm{wind}} \left(\frac{r_{\mathrm{crit}}}{a}\right)^2
\end{equation}
To calculate $\dot{M}_{\mathrm{wd}}$, we take the average of our measured Si abundance and calculate an accretion rate assuming that the atmosphere is in accretion diffusion equilibrium \citep{koester10-1}, taking the diffusion timescale from the Montreal White Dwarf Database\footnote{\url{http://www.montrealwhitedwarfdatabase.org/evolution.html}}. We then assumed that the wind has Solar abundances, so scaled the Si accretion rate to the total accretion rate via the Si mass fraction measured by \citet{asplundetal09-1}, arriving at an accretion rate onto the white dwarf of $\approx7.3\times10^9$\,g\,s$^{-1}$. Via equation \ref{eqn: wdfrac}, we therefore infer a wind mass loss rate $\dot{M}_{\mathrm{wind}}$ of $\approx1.3\times10^{10}$\,g\,cm$^{-1}$ \bgc{s$^{-1}$?}or $\approx2\times10^{-16}$\,\Msun\,yr$^{-1}$. Repeating the calculation with only Bondi-Hoyle accretion in effect results in an inferred wind rate approximately an order of magnitude higher. These values are comparable to the wind mass loss rates found by \citep{debes06-1}.
X-ray observations may also be used to place constraints on the wind mass loss rate. Although the weak X-ray detection at CC\,Cet can be satisfactorily accounted for by coronal emission (Section \ref{sec:x-rays}), it is still possible that some fraction of the signal could be due to accretion onto the white dwarf. The maximum X-ray luminosity that can be extracted from accretion by conversion of gravitational potential energy to X-rays is $L_\mathrm{X}\leq GM_\mathrm{wd}\dot{M}/R_\mathrm{wd}$. For an X-ray luminosity of $L_\mathrm{X}\sim 10^{28}$\,erg\,s$^{-1}$, and the white dwarf mass and radius derived in Sect.~\ref{sec:wdparams}, the corresponding maximum mass accretion rate is $\sim 3\times 10^{-15} \Msun$\,yr$^{-1}$. This is of the same order of magnitude as estimates of the mass loss rate for the moderately active mid-M dwarf Proxima Centuri \citep[e.g.][]{wood18-1,Wargelin.Drake:02}. Since the white dwarf is unlikely to accrete a majority of the stellar wind, a more reasonable accretion efficiency of $\sim 10$\% would imply a mass loss rate of $3\times 10^{-14} M_\odot$~yr$^{-1}$, which is similar to that of the Sun. As this is two orders of magnitude higher than the
%secure measurement
the mass loss rate estimated from the
%ultraviolet
\bgs{phtospheric metal} absorption lines, we retain the former value as the final wind mass loss rate.
\section{Conclusions}
\label{sec:conc}
We have detected a 600--700\,kG magnetic field on the white dwarf component of the detached PCEB CC\,Cet, classifying it as a pre-intermediate polar. Analysis of \textit{HST} COS spectra demonstrates that the white dwarf is accreting the stellar wind of its M4.5--5 companion inhomogenously over its surface, and that the axis of the magnetic field is likely offset from the spin axis of the white dwarf. The white dwarf has a relatively low mass of $0.441\pm0.008$\,\Msun, and is rotating with a period of $\la 2000$\,s, which is much faster than the $6.88233\pm0.00045$\,h binary orbital period and consistent with the high spin period to orbital period ratios of intermediate polars.
CC\,Cet is by far the youngest and has the weakest field of all known pre-polars \bgc{sigh... how do we call those!?}, being a 6\,$\sigma$ outlier in both \Teff\ and $B$. Using MESA stellar evolution models, we show that the secondary star will not start mass transfer for at least 7\,Gyr, and is so far from filling its Roche lobe that it has not undergone a period of mass transfer in the past, unlike the rest of the known pre-polars. The CC\,Cet magnetic field therefore cannot have formed via mass transfer, ruling out the formation pathway proposed by \citet{schreiberetal21-1} for the pre-polars. The CC\,Cet magnetic field must instead have formed either before or during the common envelope phase, although neither the fossil field or common envelope interaction models provide a complete explanation of the observed white dwarf properties.
% -- can't have formed by mass transfer
% -- low mass so merger iffy???
% -- already spinning at IP speeds
%Via measurement of spectral line broadening, we find that the CC\,Cet white dwarf is \textit{already} rotating with a period of just a few tens of minutes, with a similar spin period to orbital period ratio as the intermediate polars. It is unc
%and is so distinct from the rest of the sample that the field likely has a different origin. CC\,Cet will not start mass transfer for at least 7\,Gyr and is thus unambiguously a pre-intermediate polar, rather than a dormant CV. The white dwarf is rapidly rotating and demonstrates subtle variations in the absorption lines between spectra.
With only one known example in a relatively small sample of PCEBs with ultraviolet spectra, we can place only limited constraints on both the true occurrence rate and formation pathway of CC\,Cet\bgs{-like}
%-type
systems. \bgc{Yes. But. Should we re-iterate here that with the currently available data, the fraction of magnetic (young) PCEBs is consistent with that of IPs in the 150pc sample?} The online catalogue of white dwarf plus main sequence binaries compiled by \citet{rebassa-mansergasetal12-1}\footnote{\url{https://www.sdss-wdms.org/}} contains 66 objects with \textit{GALEX} FUV magnitudes $<16$, roughly the limit where ultraviolet spectroscopy of similar precision to CC\,Cet ($FUV=14.03$\,mag) can be obtained in a single \textit{HST} orbit \bgc{ah... I see another HST proposal being written!}. The periods of most of these targets are currently unconstrained, so in many cases the binary separation may be too large for wind accretion to form the strong absorption lines required to search for magnetic fields. If periods for these systems can be measured then a factor 2--3 increase in the number of PCEBs with ultraviolet spectra may be achievable with only a moderate investment of \textit{HST} time, further constraining the occurrence rate and/or discovering new examples of magnetic systems.
%Future high-cadence, high signal spectroscopy in the ultraviolet or blue optical may be able to map out the magnetic field structure and the surface distribution of metals accreted from the M\,dwarf companion.
\section*{Acknowledgements}
BTG was supported by a Leverhulme Research Fellowship and the UK STFC grant ST/T000406/1. JDL acknowledges support from the Natural Sciences and Engineering Research Council of Canada (NSERC), funding reference number 6377--2016. OT was supported by a Leverhulme Trust Research Project Grant and FONDECYT project 321038. Support for this work was in part provided by NASA {\em TESS} Cycle 2 Grant 80NSSC20K0592.
We acknowledge the contribution to this work of the late Professor Egidio Landi Degl'Innocenti, whose computer program {\sc gidi.f} was supplied to us by Dr. S. Bagnulo.
Based on observations made with the NASA/ESA Hubble Space Telescope, obtained from the Data Archive at the Space Telescope Science Institute, which is operated by the Association of Universities for Research in Astronomy, Inc., under NASA contract NAS 5-26555, and observations obtained with \textit{XMM-Newton}, an ESA science mission with instruments and contributions directly funded by ESA Member States and NASA. These observations are associated with program \# 15189. We thank the {\em HST} and {\em XMM} support teams for their work arranging two simultaneous observations of CC\,Cet.
This paper includes data collected by the TESS mission. Funding for the TESS mission is provided by the NASA's Science Mission Directorate.
Based on data obtained from the ESO Science Archive Facility.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%% REFERENCES %%%%%%%%%%%%%%%%%%
% The best way to enter references is to use BibTeX:
\bibliographystyle{mnras}
%\bibliography{example} % if your bibtex file is called example.bib
\bibliography{aamnem99,aamorebib,aabib,boris}
% Alternatively you could enter them by hand, like this:
% This method is tedious and prone to error if you have lots of references
%\begin{thebibliography}{99}
%\bibitem[\protect\citeauthoryear{Author}{2012}]{Author2012}
%Author A.~N., 2013, Journal of Improbable Astronomy, 1, 1
%\bibitem[\protect\citeauthoryear{Others}{2013}]{Others2013}
%Others S., 2012, Journal of Interesting Stuff, 17, 198
%\end{thebibliography}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%% APPENDICES %%%%%%%%%%%%%%%%%%%%%
\appendix
\section{Supplementary Material}
\begin{table*}
\centering
\caption{Summary of \textit{HST} and \textit{XMM} observations. Dataset numbers are given for retrieval from MAST (\url{https://archive.stsci.edu/hst/}) or the XMM-Newton Science Archive (\url{http://nxsa.esac.esa.int/nxsa-web/\#home}) for \textit{HST} and \textit{XMM} respectively.}
\begin{tabular}{lcccccc}\\
\hline
Date & Instrument & Grating & Central Wavelength (\AA) & Start Time (UT) & Total Exposure Time (s) & Dataset \\
\hline
\textit{HST} & & & & & & \\
2018-02-02 & COS & G130M & 1291.000 & 23:50:53 & 1865.056 & LDLC01010 \\
2018-07-22 & COS & G130M & 1291.000 & 05:40:51 & 1865.088 & LDLC51010\\
\textit{XMM} & & & & & & \\
2018-02-01 & -- & -- & -- & 22:48:07 & 9500 & 0810230101\\
2018-02-01 & -- & -- & -- & 22:48:07 & 16800 & 0810231301\\
\hline
\hline
\end{tabular}
\label{tab:hst_obs}
\end{table*}
\begin{table}
\centering
\caption{Interstellar absorption lines identified in the COS spectrum of CC-Cet. These lines are narrower than the white dwarf's photospheric absorption lines because they are not affected by the rotational broadening of the white dwarf.}
\label{tab:ISlines}
\begin{tabular}{c l}
\hline \hline
ion & wavelength (\AA; vacuum) \\
\hline
\Ion{C}{ii} & 1334.532, 1335.703\\
\Ion{N}{i} & 1134.165, 1134.415, 1134.980, 1199.549, 1200.224, 1200.711\\
\Ion{O}{ii} & 1302.168\\
\Ion{Si}{ii} & 1190.416, 1193.289, 1260.421, 1304.372\\
%\Ion{Si}{iii} & 1206.510\\
%\Ion{Si}{iv} & 1393.755, 1402.770\\
\Ion{S}{ii} & 1250.586, 1253.812, 1259.520\\
\hline
\end{tabular}
\end{table}
\begin{figure}
\centering
\includegraphics[width=\columnwidth]{cccet_corner.pdf}
\caption{Results of the MCMC fit to the white dwarf atmospheric parameters plotted with {\tt corner.py} \citep{corner}.}
\label{fig:mcmc}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Don't change these lines
\bsp % typesetting comment
\label{lastpage}
\end{document}
% End of mnras_template.tex
|
{"hexsha": "38da16ba35b8211078b101e9f55389f420f40287", "size": 100657, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "cc_cet/paper/submission_1/oldintro.tex", "max_stars_repo_name": "davidjwilson/pceb", "max_stars_repo_head_hexsha": "259cf4b18b51b7163d6ce84ab150c5f65f8cfdec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cc_cet/paper/submission_1/oldintro.tex", "max_issues_repo_name": "davidjwilson/pceb", "max_issues_repo_head_hexsha": "259cf4b18b51b7163d6ce84ab150c5f65f8cfdec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cc_cet/paper/submission_1/oldintro.tex", "max_forks_repo_name": "davidjwilson/pceb", "max_forks_repo_head_hexsha": "259cf4b18b51b7163d6ce84ab150c5f65f8cfdec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 135.1100671141, "max_line_length": 2481, "alphanum_fraction": 0.7657887678, "num_tokens": 26586}
|
"""
ast_is(ast::VPtr, what::Symbol)::Bool
Helper for quickly recognizing kinds of ASTs
"""
ast_is(ast::VPtr, what::Symbol)::Bool = ccall(sbml(what), Cint, (VPtr,), ast) != 0
"""
parse_math_children(ast::VPtr)::Vector{Math}
Recursively parse all children of an AST node.
"""
parse_math_children(ast::VPtr)::Vector{Math} = [
parse_math(ccall(sbml(:ASTNode_getChild), VPtr, (VPtr, Cuint), ast, i - 1)) for
i = 1:ccall(sbml(:ASTNode_getNumChildren), Cuint, (VPtr,), ast)
]
"""
parse_math(ast::VPtr)::Math
This attempts to parse out a decent Julia-esque ([`Math`](@ref) AST from a
pointer to `ASTNode_t`.
"""
function parse_math(ast::VPtr)::Math
if ast_is(ast, :ASTNode_isName)
if ccall(sbml(:ASTNode_getType), Cint, (VPtr,), ast) == 262
# This is a special case checking for the value of "simulation
# time" as defined by SBML. The constant `262` is the value of the
# enum AST_NAME_TIME in `libsbml/src/sbml/math/ASTNodeType.h`,
# needs to be kept up to date with the library (otherwise this
# breaks).
return MathTime(get_string(ast, :ASTNode_getName))
else
return MathIdent(get_string(ast, :ASTNode_getName))
end
elseif ast_is(ast, :ASTNode_isConstant)
return MathConst(get_string(ast, :ASTNode_getName))
elseif ast_is(ast, :ASTNode_isInteger)
return MathVal(ccall(sbml(:ASTNode_getInteger), Cint, (VPtr,), ast))
elseif ast_is(ast, :ASTNode_isReal)
return MathVal(ccall(sbml(:ASTNode_getReal), Cdouble, (VPtr,), ast))
elseif ast_is(ast, :ASTNode_isFunction)
return MathApply(get_string(ast, :ASTNode_getName), parse_math_children(ast))
elseif ast_is(ast, :ASTNode_isOperator) || ast_is(ast, :ASTNode_isRelational)
return MathApply(
string(Char(ccall(sbml(:ASTNode_getCharacter), Cchar, (VPtr,), ast))),
parse_math_children(ast),
)
elseif ast_is(ast, :ASTNode_isLambda)
children = parse_math_children(ast)
if !isempty(children)
body = pop!(children)
return MathLambda(broadcast((x::MathIdent) -> x.id, children), body)
else
@warn "invalid function definition found"
return MathIdent("?invalid?")
end
else
@warn "unsupported math element found"
return MathIdent("?unsupported?")
end
end
|
{"hexsha": "e7b750ec32d39684aa23b6317ccc57796530a215", "size": 2422, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/math.jl", "max_stars_repo_name": "anandijain/SBML.jl-1", "max_stars_repo_head_hexsha": "be679eabfeec4f0d79e5c98448387c188b7e4aa2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/math.jl", "max_issues_repo_name": "anandijain/SBML.jl-1", "max_issues_repo_head_hexsha": "be679eabfeec4f0d79e5c98448387c188b7e4aa2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/math.jl", "max_forks_repo_name": "anandijain/SBML.jl-1", "max_forks_repo_head_hexsha": "be679eabfeec4f0d79e5c98448387c188b7e4aa2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-11T19:18:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-15T19:26:59.000Z", "avg_line_length": 37.84375, "max_line_length": 85, "alphanum_fraction": 0.646985962, "num_tokens": 639}
|
import math
import base64
import numpy as np
import tensorflow as tf
from .tflite_schema import Model as tflite_schema_model
from .tflite_schema import BuiltinOperator as tflite_schema_builtin_operator
from .tflite_schema import TensorType as tflite_schema_tensor_type
builtin_operator_code_lookup = {
code: name
for name, code
in vars(tflite_schema_builtin_operator.BuiltinOperator).items()
if not name.startswith('_')
}
tensor_type_code_lookup = {
code: name
for name, code
in vars(tflite_schema_tensor_type.TensorType).items()
if not name.startswith('_')
}
tensor_type_bits = {
"FLOAT32": 32,
"FLOAT16": 16,
"INT32": 32,
"UINT8": 8,
"INT64": 64,
"STRING": np.nan,
"BOOL": 8,
"INT16": 16,
"COMPLEX64": 64,
"INT8": 8
}
builtin_operator_version_support = {
"DEPTHWISE_CONV_2D": [1],
"FULLY_CONNECTED": [1,2,3,4],
"MAX_POOL_2D": [1, 2],
"SOFTMAX": [1],
"LOGISTIC": [1],
"SVDF": [1],
"CONV_2D": [1, 2, 3],
"SPACE_TO_BATCH_ND" : [1, 2, 3],
"AVERAGE_POOL_2D": [1],
"ABS": [1],
"SIN": [1],
"COS": [1],
"LOG": [1],
"SQRT": [1],
"RSQRT": [1],
"SQUARE": [1],
"PRELU": [1],
"FLOOR": [1],
"MAXIMUM": [1],
"MINIMUM": [1],
"ARG_MAX": [1],
"ARG_MIN": [1],
"LOGICAL_OR": [1],
"LOGICAL_AND": [1],
"LOGICAL_NOT": [1],
"RESHAPE": [1],
"EQUAL": [1],
"NOT_EQUAL": [1],
"GREATER": [1],
"GREATER_EQUAL": [1],
"LESS": [1],
"LESS_EQUAL": [1],
"CEIL": [1],
"ROUND": [1],
"STRIDED_SLICE": [1],
"PACK": [1],
"SPLIT": [1,2,3],
"UNPACK": [1],
"NEG": [1],
"ADD": [1],
"QUANTIZE": [1,2,3,4],
"DEQUANTIZE": [1,2,3,4]
}
def _export_model(model, dataset, quantize):
converter = tf.lite.TFLiteConverter.from_keras_model(model)
if quantize:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = lambda: (
[obs] for obs in dataset.validation.x[
np.random.permutation(dataset.validation.x.shape[0]),
np.newaxis,
...]
)
return converter.convert()
def _validate_flatbuffer_for_tflite_micro(model_bytes):
buffer = bytearray(model_bytes)
model = tflite_schema_model.Model.GetRootAsModel(buffer, 0)
if model.Version() < 3:
raise ValueError('only version 3 of the TFLite format is supported, use TensorFlow 2')
for operator_code_index in range(model.OperatorCodesLength()):
operator_code = model.OperatorCodes(operator_code_index)
if operator_code.CustomCode() is not None:
raise ValueError('Custom operators are not supported')
operator_name = builtin_operator_code_lookup[operator_code.BuiltinCode()]
if operator_name not in builtin_operator_version_support:
raise ValueError(f'the operator {operator_name} is not supported by TFLite Micro')
operator_version = operator_code.Version()
if operator_version not in builtin_operator_version_support[operator_name]:
raise ValueError(f'the operator {operator_name} does not support version {operator_version}')
if model.SubgraphsLength() != 1:
raise ValueError('expected only 1 subgraph')
graph = model.Subgraphs(0)
for tensor_index in range(graph.TensorsLength()):
tensor = graph.Tensors(tensor_index)
tensor_shape = tensor.ShapeAsNumpy()
tensor_name = tensor.Name().decode()
if tensor_shape.size == 0:
raise ValueError(f'shape not specified for tensor {tensor_index} ({tensor_name})')
class ExportModel:
"""Export a keras model in TFLite format and assert for TFMicro.
This exports a keras in the TFLite format using
`tf.lite.TFLiteConverter.from_keras_model(model)`. The exported model
is by default quantize to (u)int8, although this can be disabled by
setting `quantize=False`.
For measureing the neuron activations needed for quantization, a
the `dataset.validation` is used.
Finally the model is asserted, first by using `tf.lite.Interpreter`
for TFLite compatibility. Then the flatbuffer is parsed and checked
for TFMicro compatibility. TFMicro supports less kernels than TFLite
and requires all shapes to be set. There may be additional restrictions
that are not asserted.
Finally the exported model can be saved with `exporter.save()`. You can also
get an approximate size estimate with `exporter.size_report()`.
Arguments:
model: fitted Keras model.
dataset: Dataset, should have an `validation` attribute with a named tuple
containing `x` and `y` attributes.
quantize: bool, should the exported model be quantized.
assert_export: bool, should the exported model be tested for compatibility.
"""
def __init__(self, model, dataset, quantize=True, assert_export=True):
self._quantize = quantize
self._model_bytes = _export_model(model, dataset, quantize)
if assert_export:
self.evaluate_zeros_input()
_validate_flatbuffer_for_tflite_micro(self._model_bytes)
def modelsize(self):
return len(self._model_bytes)
def areasize(self):
total_areasize = 0
model = tflite_schema_model.Model.GetRootAsModel(bytearray(self._model_bytes), 0)
graph = model.Subgraphs(0)
for tensor_index in range(graph.TensorsLength()):
tensor = graph.Tensors(tensor_index)
tensor_shape = tensor.ShapeAsNumpy()
tensor_type = tensor_type_code_lookup[tensor.Type()]
tensor_size_bytes = np.prod(tensor_shape) * tensor_type_bits[tensor_type] // 8
tensor_size_bytes_aligned = tensor_size_bytes + (16 - (tensor_size_bytes % 16))
total_areasize += tensor_size_bytes_aligned
return total_areasize
def size_report(self):
"""Returns a size-report of the model as a printable string.
The size-report consists of:
modelsize: the exact size of the model
areasize: the sum of the size of all tensors.
total: modelsize and areasize added together.
Note that the areasize is an estimate.
"""
modelsize = self.modelsize()
areasize = self.areasize()
total = modelsize + areasize
return (
f'{"Quantized" if self._quantize else "Not-quantized"} model\n'
f' modelsize {modelsize / 1024}KB\n'
f' areasize: ~{areasize / 1024}KB\n'
f' total: ~{total / 1024}KB'
)
def save(self, filepath):
"""Saves the model at `filepath`."""
with open(filepath, "wb") as fp:
fp.write(self._model_bytes)
def base64(self):
return base64.b64encode(self._model_bytes).decode('utf-8')
def evaluate_zeros_input(self):
interpreter = tf.lite.Interpreter(model_content=self._model_bytes)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
interpreter.set_tensor(
input_details["index"],
np.zeros(input_details["shape"], dtype=input_details["dtype"]))
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
return output
def predict(self, input_tensor):
"""Similar to model.predict(input_tensor) but uses the exported model."""
interpreter = tf.lite.Interpreter(model_content=self._model_bytes)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
predictions = []
for observation in input_tensor:
interpreter.set_tensor(input_details["index"], observation[np.newaxis, ...])
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
predictions.append(output)
return np.stack(predictions)
|
{"hexsha": "9cbdd92ec01e6fd7ec9d3da2f696d0213b157185", "size": 8021, "ext": "py", "lang": "Python", "max_stars_repo_path": "nodeconfeu_watch/convert/export_tflite.py", "max_stars_repo_name": "coreml-models/nodeconfeu-gesture-models", "max_stars_repo_head_hexsha": "59545c091ed42d904c0ed41cc0a01d744959c48a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nodeconfeu_watch/convert/export_tflite.py", "max_issues_repo_name": "coreml-models/nodeconfeu-gesture-models", "max_issues_repo_head_hexsha": "59545c091ed42d904c0ed41cc0a01d744959c48a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nodeconfeu_watch/convert/export_tflite.py", "max_forks_repo_name": "coreml-models/nodeconfeu-gesture-models", "max_forks_repo_head_hexsha": "59545c091ed42d904c0ed41cc0a01d744959c48a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8729508197, "max_line_length": 105, "alphanum_fraction": 0.6570253086, "include": true, "reason": "import numpy", "num_tokens": 1994}
|
import random
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
"""
皮尔森相关系数
"""
def PCC(l1, l2):
return stats.pearsonr(l1, l2)
if __name__ == '__main__':
lis = []
height_score = {}
for i in range(1000):
l1 = [random.randint(0, 9) for _ in range(5)]
l2 = [random.randint(0, 9) for _ in range(5)]
res = PCC(
# [1, 2, 9, 4, 5, 6, 8],
# [1, 2, 3, 4, 5, 6, 8],
l1,
l2
)
lis.append(res[0])
if res[0] >= 0.95:
height_score[f"{res[0]}"] = [l1, l2]
lis.sort()
for x in lis:
# if isinstance(x, type(np.nan)):
# x = 0
s = "#" if x > 0 else "-"
print(round(x, 5), s * abs(round(x*100)))
for sc, num_liss in height_score.items():
print("score: ", sc)
yd1 = num_liss[0]
yd2 = num_liss[1]
xd = [x for x in range(len(yd1))]
print("l1: ", yd1)
print("l2: ", yd2)
plt.plot(xd, yd1, color='red', linewidth=2.0, linestyle='--')
plt.plot(xd, yd2, color='blue', linewidth=2.0, linestyle='-')
plt.show()
print()
print(f"height_score rate: {round(len(height_score)/1000 * 100, 2)}")
|
{"hexsha": "adeddd9f929f8762cbb11859000855a4b8a5c161", "size": 1248, "ext": "py", "lang": "Python", "max_stars_repo_path": "NetRecorder/math_works/Pearson_CorrelationCoefficient.py", "max_stars_repo_name": "ga1008/net_tracfic_recorder", "max_stars_repo_head_hexsha": "adc837a62d772ca0a3142cf4efef17c34b908ab5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-02T13:42:42.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-02T13:42:42.000Z", "max_issues_repo_path": "NetRecorder/math_works/Pearson_CorrelationCoefficient.py", "max_issues_repo_name": "ga1008/net_tracfic_recorder", "max_issues_repo_head_hexsha": "adc837a62d772ca0a3142cf4efef17c34b908ab5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NetRecorder/math_works/Pearson_CorrelationCoefficient.py", "max_forks_repo_name": "ga1008/net_tracfic_recorder", "max_forks_repo_head_hexsha": "adc837a62d772ca0a3142cf4efef17c34b908ab5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.4705882353, "max_line_length": 73, "alphanum_fraction": 0.4903846154, "include": true, "reason": "import numpy,from scipy", "num_tokens": 428}
|
import numpy as np
class Variable:
def __init__(self, data):
if data is not None:
if not isinstance(data, np.ndarray):
raise TypeError('{} is not supported'.format(type(data)))
self.data = data
self.grad = None
self.creator = None
def set_creator(self, func):
self.creator = func
def backward(self):
if self.grad is None:
self.grad = np.ones_like(self.data)
funcs = [self.creator]
while funcs:
f = funcs.pop()
x, y = f.input, f.output
x.grad = f.backward(y.grad)
if x.creator is not None:
funcs.append(x.creator)
def as_array(x):
if np.isscalar(x):
return np.array(x)
return x
class Function:
def __call__(self, input):
x = input.data
y = self.forward(x)
output = Variable(as_array(y))
output.set_creator(self)
self.input = input
self.output = output
return output
def forward(self, x):
raise NotImplementedError()
def backward(self, gy):
raise NotImplementedError()
class Square(Function):
def forward(self, x):
return x ** 2
def backward(self, gy):
x = self.input.data
return 2 * x * gy
class Exp(Function):
def forward(self, x):
return np.exp(x)
def backward(self, gy):
x = self.input.data
return np.exp(x) * gy
def square(x):
return Square()(x)
def exp(x):
return Exp()(x)
x = Variable(np.array(0.5))
y = square(exp(square(x)))
y.backward()
print(x.grad) # 3.297442541400256
x = Variable(1.0) # NG
|
{"hexsha": "8a0ffe7606640fcc95f81840735b195d440dfad5", "size": 1681, "ext": "py", "lang": "Python", "max_stars_repo_path": "steps/step09.py", "max_stars_repo_name": "timwuu/deep-learning-from-scratch-3", "max_stars_repo_head_hexsha": "6f18dee8c1d764e16275ed68f90966bc85f0ae66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-30T03:52:33.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-30T03:52:33.000Z", "max_issues_repo_path": "steps/step09.py", "max_issues_repo_name": "timwuu/deep-learning-from-scratch-3", "max_issues_repo_head_hexsha": "6f18dee8c1d764e16275ed68f90966bc85f0ae66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "steps/step09.py", "max_forks_repo_name": "timwuu/deep-learning-from-scratch-3", "max_forks_repo_head_hexsha": "6f18dee8c1d764e16275ed68f90966bc85f0ae66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.7764705882, "max_line_length": 73, "alphanum_fraction": 0.5562165378, "include": true, "reason": "import numpy", "num_tokens": 397}
|
'''
summary
Classes and functions for summarising Trips
Tightly coupled to CleanTrip
'''
import numpy as numpy
import pandas as pd
class TripSummaryStatistics(object):
'''
Summary statistics for a cleaned trip
'''
def __init__(self, clean_trip):
'''
Create an instance of TripSummaryStatistics for an individual
trip.
Parameters:
-------
clean_trip, cats_analysis.io.CleanTrip. Cleaned Trip Data
'''
self._clean_trip = clean_trip
self._summary = None
self._duration = -1.0
def _get_duration(self):
'''
Return trip duration in HH:MM:SS
'''
return self._get_duration
def _get_summary_table(self):
'''
Summary statistics for the trip
'''
return self._summary
def calculate(self, resample='30s', smooth=False, interp_missing=False):
'''
Calculate basic summary statistics for trip.
1. Trip Duration
2. Completely empty fields
3. For every field:
3.1 Mean,
3.2 Stdev,
3.3 Histogram.... think about that one. (numpy.hist?)
3.4
Parameters:
----------
resample -- str, interval to aggregate values over (default=30s)
interp_missing -- bool, linear interpolation between missing values
(default=False)
'''
df = self._clean_trip.resample(resample, smooth, interp_missing)
self.duration = df.index.max() - df.index.min()
results = {}
results['per_missing'] = (1 - df.count()/df.shape[0])*100
results['mean'] = df.mean()
results['std'] = df.std()
results['min'] = df.min()
results['max'] = df.max()
results['median'] = df.quantile(q=0.5)
results['iqr'] = df.quantile(q=0.75) - df.quantile(q=0.25)
results['skew'] = df.skew()
results['kurtosis'] = df.kurtosis()
self._summary = pd.DataFrame(results)
summary_table = property(_get_summary_table)
trip_duration = property(_get_duration)
|
{"hexsha": "b82fc5d8b28eb50bdd23682c17c982da617dbedb", "size": 2143, "ext": "py", "lang": "Python", "max_stars_repo_path": "cats_analysis/summary.py", "max_stars_repo_name": "TomMonks/cats-time-series", "max_stars_repo_head_hexsha": "fb8ec51bc5805b070a7616e10b6aadd594915642", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-17T14:05:18.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-17T14:05:18.000Z", "max_issues_repo_path": "cats_analysis/summary.py", "max_issues_repo_name": "TomMonks/cats-time-series", "max_issues_repo_head_hexsha": "fb8ec51bc5805b070a7616e10b6aadd594915642", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cats_analysis/summary.py", "max_forks_repo_name": "TomMonks/cats-time-series", "max_forks_repo_head_hexsha": "fb8ec51bc5805b070a7616e10b6aadd594915642", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-25T11:55:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-25T11:55:10.000Z", "avg_line_length": 27.4743589744, "max_line_length": 76, "alphanum_fraction": 0.5739617359, "include": true, "reason": "import numpy", "num_tokens": 494}
|
import numpy as np
import torch
import math
class SVGD:
def __init__(self, distribution, kernel, optimizer):
self.P = distribution
self.K = kernel
self.optim = optimizer
def phi(self, X, *data):
X = X.detach().requires_grad_(True)
log_prob = self.P.log_prob(X, *data)
score_func = torch.autograd.grad(log_prob.sum(), X)[0]
K_XX = self.K(X, X.detach())
grad_K = - torch.autograd.grad(K_XX.sum(), X)[0]
phi = (K_XX.detach().matmul(score_func) + grad_K) / X.size(0)
return phi
def step(self, particles, *data):
self.optim.zero_grad()
particles.grad = -self.phi(particles, *data)
self.optim.step()
class RBF_Kernel(torch.nn.Module):
r"""
RBF kernel
:math:`K(x, y) = exp(||x-v||^2 / (2h))
"""
def __init__(self, bandwidth=None):
super().__init__()
self.bandwidth = bandwidth
def _bandwidth(self, norm_sq):
# Apply the median heuristic (PyTorch does not give true median)
if self.bandwidth is None:
np_dnorm2 = norm_sq.detach().cpu().numpy()
h = np.median(np_dnorm2) / (2 * np.log(np_dnorm2.shape[0] + 1))
return np.sqrt(h).item()
else:
return self.bandwidth
def forward(self, X, Y):
dnorm2 = norm_sq(X, Y)
bandwidth = self._bandwidth(dnorm2)
gamma = 1.0 / (1e-8 + 2 * bandwidth ** 2)
K_XY = (-gamma * dnorm2).exp()
return K_XY
class IMQSteinKernel(torch.nn.Module):
r"""
IMQ (inverse multi-quadratic) kernel
:math:`K(x, y) = (\alpha + ||x-y||^2/h)^{\beta}`
"""
def __init__(self, alpha=0.5, beta=-0.5, bandwidth=None):
super(IMQSteinKernel, self).__init__()
assert alpha > 0.0, "alpha must be positive."
assert beta < 0.0, "beta must be negative."
self.alpha = alpha
self.beta = beta
self.bandwidth = bandwidth
def _bandwidth(self, norm_sq):
"""
Compute the bandwidth along each dimension using the median pairwise squared distance between particles.
"""
if self.bandwidth is None:
num_particles = norm_sq.size(0)
index = torch.arange(num_particles)
norm_sq = norm_sq[index > index.unsqueeze(-1), ...]
median = norm_sq.median(dim=0)[0]
assert median.shape == norm_sq.shape[-1:]
return median / math.log(num_particles + 1)
else:
return self.bandwidth
def forward(self, X, Y):
norm_sq = (X.unsqueeze(0) - Y.unsqueeze(1))**2 # N N D
assert norm_sq.dim() == 3
bandwidth = self._bandwidth(norm_sq) # D
base_term = self.alpha + torch.sum(norm_sq / bandwidth, dim=-1)
log_kernel = self.beta * torch.log(base_term) # N N D
return log_kernel.exp()
""" Helpers """
def norm_sq(X, Y):
XX = X.matmul(X.t())
XY = X.matmul(Y.t())
YY = Y.matmul(Y.t())
return -2 * XY + XX.diag().unsqueeze(1) + YY.diag().unsqueeze(0)
|
{"hexsha": "2427a00fec91b27edfc04282c9e20c540c622589", "size": 3011, "ext": "py", "lang": "Python", "max_stars_repo_path": "meta_learn/svgd.py", "max_stars_repo_name": "shlu2019/meta_learning_pacoh", "max_stars_repo_head_hexsha": "376349e66bdd782e3d06b4bac2ecb56a2a10bcf6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2020-02-13T12:45:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T20:37:14.000Z", "max_issues_repo_path": "meta_learn/svgd.py", "max_issues_repo_name": "JeremyAlain/meta_learning_pacoh", "max_issues_repo_head_hexsha": "b4c2c37d9715e74542bab556ac1f5d778cc3409c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-09-01T15:24:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-03T10:39:16.000Z", "max_forks_repo_path": "meta_learn/svgd.py", "max_forks_repo_name": "JeremyAlain/meta_learning_pacoh", "max_forks_repo_head_hexsha": "b4c2c37d9715e74542bab556ac1f5d778cc3409c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-04-15T09:43:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-18T13:37:38.000Z", "avg_line_length": 27.8796296296, "max_line_length": 112, "alphanum_fraction": 0.5808701428, "include": true, "reason": "import numpy", "num_tokens": 821}
|
/*
* test_output.cpp
*
* Created: 11/2/2017
* Author: Michael E. Tryby
* US EPA - ORD/NRMRL
*
* Unit testing for SWMM outputapi using Boost Test.
*/
#define BOOST_TEST_MODULE "output"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <boost/test/included/unit_test.hpp>
#include "swmm_output.h"
// NOTE: Reference data for the unit tests is currently tied to SWMM 5.1.7
#define DATA_PATH "./test_example1.out"
using namespace std;
// Custom test to check the minimum number of correct decimal digits between
// the test and the ref vectors.
boost::test_tools::predicate_result check_cdd_float(std::vector<float>& test,
std::vector<float>& ref, long cdd_tol)
{
float tmp, min_cdd = 10.0f;
// TODO: What is the vectors aren't the same length?
std::vector<float>::iterator test_it;
std::vector<float>::iterator ref_it;
for (test_it = test.begin(), ref_it = ref.begin();
(test_it < test.end()) && (ref_it < ref.end());
++test_it, ++ref_it)
{
if (*test_it != *ref_it)
{
// Compute log absolute error
tmp = abs(*test_it - *ref_it);
if (tmp < 1.0e-7)
tmp = 1.0e-7f;
else if (tmp > 2.0)
tmp = 1.0f;
tmp = - log10f(tmp);
if (tmp < 0.0)
tmp = 0.0f;
if (tmp < min_cdd)
min_cdd = tmp;
}
}
return floor(min_cdd) >= cdd_tol;
}
boost::test_tools::predicate_result check_string(std::string test, std::string ref)
{
if (ref.compare(test) == 0)
return true;
else
return false;
}
BOOST_AUTO_TEST_SUITE (test_output_auto)
BOOST_AUTO_TEST_CASE(InitTest) {
SMO_Handle p_handle = NULL;
int error = SMO_init(&p_handle);
BOOST_REQUIRE(error == 0);
BOOST_CHECK(p_handle != NULL);
SMO_close(&p_handle);
}
BOOST_AUTO_TEST_CASE(CloseTest) {
SMO_Handle p_handle = NULL;
SMO_init(&p_handle);
int error = SMO_close(&p_handle);
BOOST_REQUIRE(error == 0);
BOOST_CHECK(p_handle == NULL);
}
BOOST_AUTO_TEST_CASE(InitOpenCloseTest) {
std::string path = std::string(DATA_PATH);
SMO_Handle p_handle = NULL;
SMO_init(&p_handle);
int error = SMO_open(p_handle, path.c_str());
BOOST_REQUIRE(error == 0);
SMO_close(&p_handle);
}
BOOST_AUTO_TEST_SUITE_END()
struct Fixture{
Fixture() {
std::string path = std::string(DATA_PATH);
error = SMO_init(&p_handle);
SMO_clearError(p_handle);
error = SMO_open(p_handle, path.c_str());
array = NULL;
array_dim = 0;
}
~Fixture() {
SMO_free((void**)&array);
error = SMO_close(&p_handle);
}
std::string path;
int error;
SMO_Handle p_handle;
float* array;
int array_dim;
};
BOOST_AUTO_TEST_SUITE(test_output_fixture)
BOOST_FIXTURE_TEST_CASE(test_getVersion, Fixture) {
int version;
error = SMO_getVersion(p_handle, &version);
BOOST_REQUIRE(error == 0);
BOOST_CHECK_EQUAL(51000, version);
}
BOOST_FIXTURE_TEST_CASE(test_getProjectSize, Fixture) {
int* i_array = NULL;
error = SMO_getProjectSize(p_handle, &i_array, &array_dim);
BOOST_REQUIRE(error == 0);
std::vector<int> test;
test.assign(i_array, i_array + array_dim);
// subcatchs, nodes, links, pollutants
const int ref_dim = 4;
int ref_array[ref_dim] = {8,14,13,2};
std::vector<int> ref;
ref.assign(ref_array, ref_array + ref_dim);
BOOST_CHECK_EQUAL_COLLECTIONS(ref.begin(), ref.end(), test.begin(), test.end());
SMO_free((void**)&i_array);
}
BOOST_FIXTURE_TEST_CASE(test_getFlowUnits, Fixture) {
int units = -1;
error = SMO_getFlowUnits(p_handle, &units);
BOOST_REQUIRE(error == 0);
BOOST_CHECK_EQUAL(0, units);
}
BOOST_FIXTURE_TEST_CASE(test_getPollutantUnits, Fixture) {
int* i_array = NULL;
error = SMO_getPollutantUnits(p_handle, &i_array, &array_dim);
BOOST_REQUIRE(error == 0);
std::vector<int> test;
test.assign(i_array, i_array + array_dim);
const int ref_dim = 2;
int ref_array[ref_dim] = {0, 1};
std::vector<int> ref;
ref.assign(ref_array, ref_array + ref_dim);
BOOST_CHECK_EQUAL_COLLECTIONS(ref.begin(), ref.end(), test.begin(), test.end());
SMO_free((void**)&i_array);
BOOST_CHECK(i_array == NULL);
}
BOOST_FIXTURE_TEST_CASE(test_getStartDate, Fixture) {
double date = -1;
error = SMO_getStartDate(p_handle, &date);
BOOST_REQUIRE(error == 0);
BOOST_CHECK_EQUAL(35796., date);
}
BOOST_FIXTURE_TEST_CASE(test_getTimes, Fixture) {
int time = -1;
error = SMO_getTimes(p_handle, SMO_reportStep, &time);
BOOST_REQUIRE(error == 0);
BOOST_CHECK_EQUAL(3600, time);
error = SMO_getTimes(p_handle, SMO_numPeriods, &time);
BOOST_REQUIRE(error == 0);
BOOST_CHECK_EQUAL(36, time);
}
BOOST_FIXTURE_TEST_CASE(test_getElementName, Fixture) {
char* c_array = NULL;
int index = 1;
error = SMO_getElementName(p_handle, SMO_node, index, &c_array, &array_dim);
BOOST_REQUIRE(error == 0);
std::string test(c_array);
std::string ref("10");
BOOST_CHECK(check_string(test, ref));
SMO_free((void**)&c_array);
}
BOOST_FIXTURE_TEST_CASE(test_getSubcatchSeries, Fixture) {
error = SMO_getSubcatchSeries(p_handle, 1, SMO_runoff_rate, 0, 10, &array, &array_dim);
BOOST_REQUIRE(error == 0);
const int ref_dim = 10;
float ref_array[ref_dim] = {0.0f,
1.2438242f,
2.5639679f,
4.524055f,
2.5115132f,
0.69808137f,
0.040894926f,
0.011605669f,
0.00509294f,
0.0027438672f};
std::vector<float> ref_vec;
ref_vec.assign(ref_array, ref_array + 10);
std::vector<float> test_vec;
test_vec.assign(array, array + array_dim);
BOOST_CHECK(check_cdd_float(test_vec, ref_vec, 2));
}
BOOST_FIXTURE_TEST_CASE(test_getSubcatchResult, Fixture) {
error = SMO_getSubcatchResult(p_handle, 1, 1, &array, &array_dim);
BOOST_REQUIRE(error == 0);
const int ref_dim = 10;
float ref_array[ref_dim] = {0.5f,
0.0f,
0.0f,
0.125f,
1.2438242f,
0.0f,
0.0f,
0.0f,
33.481991f,
6.6963983f};
std::vector<float> ref_vec;
ref_vec.assign(ref_array, ref_array + ref_dim);
std::vector<float> test_vec;
test_vec.assign(array, array + array_dim);
BOOST_CHECK(check_cdd_float(test_vec, ref_vec, 3));
}
BOOST_FIXTURE_TEST_CASE(test_getNodeResult, Fixture) {
error = SMO_getNodeResult(p_handle, 2, 2, &array, &array_dim);
BOOST_REQUIRE(error == 0);
const int ref_dim = 8;
float ref_array[ref_dim] = {0.296234f,
995.296204f,
0.0f,
1.302650f,
1.302650f,
0.0f,
15.361463f,
3.072293f};
std::vector<float> ref_vec;
ref_vec.assign(ref_array, ref_array + ref_dim);
std::vector<float> test_vec;
test_vec.assign(array, array + array_dim);
BOOST_CHECK(check_cdd_float(test_vec, ref_vec, 3));
}
BOOST_FIXTURE_TEST_CASE(test_getLinkResult, Fixture) {
error = SMO_getLinkResult(p_handle, 3, 3, &array, &array_dim);
BOOST_REQUIRE(error == 0);
const int ref_dim = 7;
float ref_array[ref_dim] = {4.631762f,
1.0f,
5.8973422f,
314.15927f,
1.0f,
19.070757f,
3.8141515f};
std::vector<float> ref_vec;
ref_vec.assign(ref_array, ref_array + ref_dim);
std::vector<float> test_vec;
test_vec.assign(array, array + array_dim);
BOOST_CHECK(check_cdd_float(test_vec, ref_vec, 3));
}
BOOST_FIXTURE_TEST_CASE(test_getSystemResult, Fixture) {
error = SMO_getSystemResult(p_handle, 4, 4, &array, &array_dim);
BOOST_REQUIRE(error == 0);
const int ref_dim = 14;
float ref_array[ref_dim] = {70.0f,
0.1f,
0.0f,
0.19042271f,
14.172027f,
0.0f,
0.0f,
0.0f,
0.0f,
14.172027f,
0.55517411f,
13.622702f,
2913.0793f,
0.0f};
std::vector<float> ref_vec;
ref_vec.assign(ref_array, ref_array + ref_dim);
std::vector<float> test_vec;
test_vec.assign(array, array + array_dim);
BOOST_CHECK(check_cdd_float(test_vec, ref_vec, 3));
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "f1cc5bab749bde6f77cbee59897216b4247014b8", "size": 9432, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/outfile/test_output.cpp", "max_stars_repo_name": "20-S2-2-C-Flood-Modelling/Stormwater-Management-Model", "max_stars_repo_head_hexsha": "c351f07713ae130f765b4e4a63def42dbc05998f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/outfile/test_output.cpp", "max_issues_repo_name": "20-S2-2-C-Flood-Modelling/Stormwater-Management-Model", "max_issues_repo_head_hexsha": "c351f07713ae130f765b4e4a63def42dbc05998f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/outfile/test_output.cpp", "max_forks_repo_name": "20-S2-2-C-Flood-Modelling/Stormwater-Management-Model", "max_forks_repo_head_hexsha": "c351f07713ae130f765b4e4a63def42dbc05998f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-09-23T01:53:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-23T01:53:10.000Z", "avg_line_length": 26.3463687151, "max_line_length": 91, "alphanum_fraction": 0.5619168787, "num_tokens": 2439}
|
# -*- coding: utf-8 -*-
"""test_energycondensation.py
py Test:
Energy condensation on multi-group maco- and micro- parameters.
The condensation procedure can be applied on vectors, such as absorption xs,
as well as on scattering matrices.
Created on Mon Apr 11 11:00:00 2022 @author: Dan Kotlyar
Last updated on Mon Apr 11 11:30:00 2022 @author: Dan Kotlyar
email: dan.kotlyar@me.gatech.edu
"""
import pytest
import numpy as np
from xsInterface.functions.energycondensation import EnergyCondensation
from xsInterface.examples.xs_data_condensation import MICRO_E, NG, ABS, NSF,\
flx, SP0
prdAbs =\
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E, attr=ABS, flux=flx,
cutoffE=[0.625e-06])
# Expected values (pre-generated in advance)
refAbs = np.array([1.00889E-02, 1.15010E-01])
# Percent difference
diffAbs = 100*(1-prdAbs/refAbs)
def test_absorption():
"""Errors in input parameters"""
prdAbs, condE =\
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E, attr=ABS, flux=flx,
cutoffE=[0.625e-06])
# Expected values (pre-generated in advance)
refAbs = np.array([1.00889E-02, 1.15010E-01])
assert prdAbs == pytest.approx(refAbs, rel=0.01)
def test_nufission():
"""Errors in input parameters"""
prdNsf, condE =\
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E, attr=NSF, flux=flx,
cutoffE=[0.625e-06])
# Expected values (pre-generated in advance)
refNsf = np.array([7.22598E-03, 1.41791E-01])
assert prdNsf == pytest.approx(refNsf, rel=0.01)
def test_scattering():
"""Errors in input parameters"""
# matrix condensation
prdSct, condE =\
EnergyCondensation(ndim=2, ng=NG, boundsE=MICRO_E, attr=SP0, flux=flx,
cutoffE=[0.625e-06])
# Expected values (pre-generated in advance)
refSct = np.array([[4.80685E-01, 1.61547E-02], # 1-->1, 1-->2
[2.10527E-03, 1.18859E+00]]) # 2-->1, 2-->2
assert prdSct == pytest.approx(refSct, rel=0.01)
def test_badParameters():
"""Errors in input parameters"""
with pytest.raises(TypeError, match="Dimensions*"):
EnergyCondensation(ndim="BAD_DIM", ng=NG, boundsE=MICRO_E,
attr=ABS, flux=flx, cutoffE=[0.625e-06])
with pytest.raises(TypeError, match="Number*"):
EnergyCondensation(ndim=1, ng="BAD_NG", boundsE=MICRO_E,
attr=ABS, flux=flx, cutoffE=[0.625e-06])
with pytest.raises(TypeError, match="Energy boundaries*"):
EnergyCondensation(ndim=1, ng=NG, boundsE="NOT_ARRAY",
attr=ABS, flux=flx, cutoffE=[0.625e-06])
with pytest.raises(TypeError, match="Attribute*"):
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E,
attr="NOT_ARRAY", flux=flx, cutoffE=[0.625e-06])
with pytest.raises(TypeError, match="Flux*"):
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E,
attr=ABS, flux="NOT_ARRAY", cutoffE=[0.625e-06])
with pytest.raises(TypeError, match="Energy cutoffs*"):
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E,
attr=ABS, flux=flx, cutoffE="NOT_ARRAY")
with pytest.raises(TypeError, match="Attribute*"):
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E,
attr=[[1]], flux=flx, cutoffE=[0.625e-06])
with pytest.raises(TypeError, match="Flux*"):
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E,
attr=ABS, flux=[[1]], cutoffE=[0.625e-06])
with pytest.raises(TypeError, match="Energy cutoffs*"):
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E,
attr=ABS, flux=flx, cutoffE=[[1]])
with pytest.raises(TypeError, match="Attribute*"):
EnergyCondensation(ndim=2, ng=NG, boundsE=MICRO_E,
attr=[1], flux=flx, cutoffE=[0.625e-06])
with pytest.raises(ValueError, match="Dimensions*"):
EnergyCondensation(ndim=3, ng=NG, boundsE=MICRO_E,
attr=ABS, flux=flx, cutoffE=[0.625e-06])
with pytest.raises(ValueError, match="Number of energy groups*"):
EnergyCondensation(ndim=1, ng=0, boundsE=MICRO_E,
attr=ABS, flux=flx, cutoffE=[0.625e-06])
with pytest.raises(ValueError, match="Energy boundaries*"):
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E[1::],
attr=ABS, flux=flx, cutoffE=[0.625e-06])
with pytest.raises(ValueError, match="Attribute*"):
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E,
attr=ABS[1::], flux=flx, cutoffE=[0.625e-06])
with pytest.raises(ValueError, match="Flux*"):
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E,
attr=ABS, flux=flx[1::], cutoffE=[0.625e-06])
with pytest.raises(ValueError, match="Energy boundaries*"):
EnergyCondensation(ndim=1, ng=NG, boundsE=-1*MICRO_E,
attr=ABS, flux=flx, cutoffE=[0.625e-06])
with pytest.raises(ValueError, match="Energy cutoffs*"):
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E,
attr=ABS, flux=flx, cutoffE=[-0.625e-06])
with pytest.raises(ValueError, match="Energy boundaries*"):
MICRO_E0 = MICRO_E
MICRO_E0[10] = 10
EnergyCondensation(ndim=1, ng=NG, boundsE=MICRO_E0,
attr=ABS, flux=flx, cutoffE=[0.625e-06])
|
{"hexsha": "bce154900b7ad4cbbe5ede4c5ae2a9679876bf02", "size": 5594, "ext": "py", "lang": "Python", "max_stars_repo_path": "xsInterface/tests/test_energycondensation.py", "max_stars_repo_name": "CORE-GATECH-GROUP/xs-interface", "max_stars_repo_head_hexsha": "b391c0b3f5690a602f2f69a4cce137460d345413", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xsInterface/tests/test_energycondensation.py", "max_issues_repo_name": "CORE-GATECH-GROUP/xs-interface", "max_issues_repo_head_hexsha": "b391c0b3f5690a602f2f69a4cce137460d345413", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-02-01T13:49:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-02T17:31:24.000Z", "max_forks_repo_path": "xsInterface/tests/test_energycondensation.py", "max_forks_repo_name": "CORE-GATECH-GROUP/xs-interface", "max_forks_repo_head_hexsha": "b391c0b3f5690a602f2f69a4cce137460d345413", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0463576159, "max_line_length": 78, "alphanum_fraction": 0.6135144798, "include": true, "reason": "import numpy", "num_tokens": 1626}
|
from json import loads
import json
import numpy as np
import math
from statistics import mean
#import re
elem_type = ['화속성', '명속성', '수속성', '암속성']
class SkillTree():
skill_db = None
passive_db = None
buffer_db = None
@classmethod
def initstatic(cls):
with open("./skill_db.json","r") as f:
cls.skill_db = json.load(f)
with open("./passive.json", "r") as f:
cls.passive_db = json.load(f)
with open('./buffer_db.json', "r") as f:
cls.buffer_db = json.load(f)
with open('./factor_dict.json',"r") as f:
cls.factor_db = json.load(f)
@classmethod
def get_neo_jobid(cls, classid, jobid):
class_skills = cls.skill_db[classid]
for k, v in class_skills.items():
if k == 'name':
continue
if v['pjid'] == jobid:
return k
return None
def build_skill_enchant(self):
myskill_dict = self.owner.sk_dict['skill']['style']
myskill_list = myskill_dict['active'] + myskill_dict['passive']
if self.owner.jobname == '眞 메카닉':
if self.has_skill(myskill_list, '솔라 모듈 시스템'):
self.inner_data['skillpassive'] = (True, '스패로우 팩토리', '솔라 모듈 시스템')
else:
self.inner_data['skillpassive'] = (False, '스패로우 팩토리', '솔라 모듈 시스템')
elif self.owner.jobname == '眞 엘레멘탈마스터':
if self.has_skill(myskill_list, '썬더 스트라이크'):
self.inner_data['skillpassive'] = (True, '썬더 콜링', '썬더 스트라이크')
else:
self.inner_data['skillpassive'] = (False, '썬더 콜링', '썬더 스트라이크')
elif self.owner.jobname == '眞 배틀메이지':
if self.has_skill(myskill_list, '황룡난무'):
self.inner_data['skillpassive'] = (True, '황룡천공', '황룡난무')
else:
self.inner_data['skillpassive'] = (False, '황룡천공', '황룡난무')
for skill in self.skill_list:
skid = skill['skillId']
for myskill in myskill_list:
if skid == myskill['skillId']:
req = myskill['requiredLevel']
level = myskill['level']
#비진각 캐릭터 50렙제
if req == 50:
level += 1
self.skill_enchant[str(req)]['skills'].append([skill, level, 0, level])
for myskill in myskill_list:
skid = myskill['skillId']
name = myskill['name']
req = myskill['requiredLevel']
level = myskill['level']
self.init_factor(name, skid, req, level)
for myskill in myskill_list:
skid = myskill['skillId']
name = myskill['name']
req = myskill['requiredLevel']
lvl = myskill['level']
if name == '크리티컬 히트':
rlist = list(range(0, 21))
data = {
'name':name, 'skillId':skid,
'skillType':'passive', 'type':'크리티컬히트',
'values':rlist, 'req':20
}
self.skill_enchant[str(req)]['skills'].append([data, lvl, 0, lvl])
elif name == '무기의 극의':
rlist = [0] + list(range(2, 40))
data = {
'name':name, 'skillId':skid,
'skillType':'passive', 'type':'없음',
'values' : rlist, 'req':15
}
self.skill_enchant[str(req)]['skills'].append([data, lvl, 0, lvl])
elif name == '히트엔드':
rlist = [0] + list(range(81, 141, 2))
data = {
'name':name, 'skillId':skid,
'skillType':'passive', 'type':'없음',
'values' : rlist, 'req':15
}
self.skill_enchant[str(req)]['skills'].append([data, lvl, 0, lvl])
elif name == '인법 : 잔영 남기기':
rlist = [0] + list(range(100, 122, 2))
data = {
'name':name, 'skillId':skid,
'skillType':'passive', 'type':'없음',
'values' : rlist, 'req':35
}
self.skill_enchant[str(req)]['skills'].append([data, lvl, 0, lvl])
elif name == '디바인 퍼니쉬먼트':
rlist = [0]
for i in range(1,41):
rlist.append(math.floor(2.5*i+10)*24)
data = {'name':name, 'skillId':skid,
'type':'패시브스탯', 'values':rlist, 'req':req, 'subtype':'오라'}
self.skill_enchant[str(req)]['skills'].append([data, lvl, 0, lvl])
elif name in ['매거진 드럼', '니트로 모터']:
rlist = [0] + list(range(2, 42, 2))
data = {
'name':name, 'skillId':skid,
'skillType':'passive', 'type':'없음',
'values' : rlist, 'req':15
}
self.skill_enchant[str(req)]['skills'].append([data, lvl, 0, lvl])
elif name == '유탄 마스터리':
rlist = [0] + list(range(10, 201, 10))
data = {
'name':name, 'skillId':skid,
'skillType':'passive', 'type':'없음',
'values' : rlist, 'req':20
}
#print(name)
self.skill_enchant[str(req)]['skills'].append([data, lvl, 0, lvl])
elif req >= 50 and name[-2:] == '강화':
skname = name[:-3]
if skname == '류탄':
#print(name, skname)
si = self.get_skill_instance('actives', 'G-35L 섬광류탄')
if si is not None:
si['dam'] *= (1 + 0.1*lvl)
si['invest'].append('TP:'+str(lvl))
si = self.get_skill_instance('actives', 'G-18C 빙결류탄')
if si is not None:
si['dam'] *= (1 + 0.1*lvl)
si['invest'].append('TP:'+str(lvl))
si = self.get_skill_instance('actives', 'G-14 파열류탄')
if si is not None:
si['dam'] *= (1 + 0.1*lvl)
si['invest'].append('TP:'+str(lvl))
if skname == '류심':
si = self.get_skill_instance('actives', '류심 쾌')
if si is not None:
si['dam'] *= (1 + 0.1*lvl)
si['invest'].append('TP:'+str(lvl))
si = self.get_skill_instance('actives', '류심 강')
if si is not None:
si['dam'] *= (1 + 0.1*lvl)
si['invest'].append('TP:'+str(lvl))
si = self.get_skill_instance('actives', '류심 승')
if si is not None:
si['dam'] *= (1 + 0.1*lvl)
si['invest'].append('TP:'+str(lvl))
sil = self._get_skill_instance_all('actives', skname, None)
if len(sil) > 0:
for si in sil:
si = si[0]
if skname == '흑염의 칼라':
si = self.get_skill_instance('actives', '흑염검')
if si is not None:
si['dam'] *= (1 + 0.1*lvl)
elif self.owner.classname == '격투가(여)' and skname == '크레이지 발칸':
hit = 8 + lvl
si['dam'] *= (hit/13)
elif self.owner.classname == '거너(남)' and skname == '더블 건호크':
si['dam'] *= (1 + 0.1462*lvl)
else:
si['dam'] *= (1 + 0.1*lvl)
si['invest'].append('TP:'+str(lvl))
for fitem in self.factor_data:
if fitem['skillName'] == '평타':
penchant = fitem['penchant']
poison = fitem['poison']
data = {'name':'평타', 'skillId':None, 'req':1, 'dam':1, 'cool':1, 'cooltime': fitem['cool'], 'factor': fitem['factor'], 'longterm':fitem['longterm'], 'oriname':None, 'invest':[]}
data['penchant'] = penchant if penchant != '' else None
data['poison'] = poison if poison != '' else None
self.skill_enchant['1']['actives'].append([data, 100, 0, 100, 100])
def init_factor(self, name, skid, req, lvl):
blevel = 0
base_cool = 1
base_inc = 0
jobname = self.owner.jobname
"""
직업별 특수처리
"""
#무기의 극의 처리
if jobname == '眞 웨펀마스터' and name.find('마스터리') >= 0:
blevel = 1
#프렌지 및 갈증 쿨감
elif jobname == '眞 버서커' and name in ['블러드 러스트', '레이징 퓨리', '고어 크로스', '블러드 소드', '블러디 레이브', '아웃레이지 브레이크']:
base_cool = 0.8
#중화기개조
elif jobname == '眞 런처' and req not in [50, 85, 100]:
base_inc = 1
skillpassive = self.inner_data.get('skillpassive')
for fitem in self.factor_data:
if name == fitem['skillName']:
if skillpassive is not None:
skp_on, skp_name, skp_passive = skillpassive
if name == skp_name:
if skp_on is True and fitem['skillOriName'] != skp_passive:
continue
if skp_on is False and fitem['skillOriName'] == skp_passive:
continue
if blevel == 0:
blevel = fitem['baselevel']
if lvl < int(0.1 * blevel):
break
mlevel = fitem['maxlevel']
factor = fitem['factor']
longterm = fitem['longterm']
poison = fitem['poison']
penchant = fitem['penchant']
talisman = fitem['talisman']
weptypeinfo = fitem['weptypeinfo']
oriname = fitem['skillOriName']
cool = fitem['cool']
if name in ['썬더 스트라이크', '썬더 콜링'] or self.owner.jobname == '眞 소환사':
coolfixed = False
elif oriname in ['환영폭쇄', '금기 : 독사굴']:
coolfixed = False
else:
coolfixed = fitem['coolFixed']
data = {'name':name, 'skillId':skid, 'req':req, 'dam':1, 'cool':base_cool, 'oriname':oriname, 'invest':[]}
data['factor'] = factor if factor != '' else None
data['cooltime'] = cool if cool != '' else None
data['longterm'] = longterm if longterm != '' else None
data['poison'] = poison if poison != '' else None
data['penchant'] = penchant if penchant != '' else None
data['coolFixed'] = coolfixed
if talisman != '':
(tname, tfactor, dammode) = talisman[1:-1].split(',')
tal_list = self.owner.t_dict['talismans']
if tal_list is not None:
isFound = False
for tal in tal_list:
mytname = tal['talisman']['itemName']
if mytname.find(tname) >= 0:
isFound = True
break
if isFound == True:
if dammode == 'f':
if data.get('longterm') != None:
del data['longterm']
data['factor'] = int(tfactor)
else:
if data.get('factor') != None:
del data['factor']
data['longterm'] = int(tfactor)
if weptypeinfo != '':
(wtype, wfactor,dammode) = weptypeinfo[1:-1].split(',')
if wtype == self.owner.char_stat['inventory'].weapon_info['type']:
if dammode == 'f':
if data.get('longterm') != None:
del data['longterm']
data['factor'] = int(wfactor)
else:
if data.get('factor') != None:
del data['factor']
data['longterm'] = int(wfactor)
self.skill_enchant[str(req)]['actives'].append([data, lvl, base_inc, blevel, mlevel])
def __init__(self, char, test_mode = False):
self.test_mode = test_mode
self.owner = char
self.inner_data = {}
classid = char.classid
jobid = char.jobid
self.skill_enchant = {
'1':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'5':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'10':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'15':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'20':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'25':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'30':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'35':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'40':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'45':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'48':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'50':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'60':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'70':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'75':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'80':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'85':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'95':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
'100':{'level':0, 'cool':1, 'dam':1, 'skills':[], 'actives':[]},
}
try:
self.skill_data = self.skill_db[classid][jobid]['skills']
self.factor_data = self.factor_db[classid][jobid]
if char.buffer is not None:
self.skill_list = self.passive_db[classid][jobid] + self.buffer_db[classid][jobid]
else:
self.skill_list = self.passive_db[classid][jobid]
except:
raise
self.build_skill_enchant()
if self.test_mode is True:
with open('skill_data.json', 'w') as f:
json.dump(self.skill_data, f)
def get_skills_by_level(self, typ, req):
return self.skill_enchant[str(req)][typ]
def _get_skill_instance(self, typ, name, req):
if req is None:
for lv, item in self.skill_enchant.items():
skills = item[typ]
for skill in skills:
data = skill[0]
if data['name'].replace(' ','') == name.replace(' ',''):
return skill
if typ == 'actives' and isinstance(data['oriname'], str) and data['oriname'].replace(' ','') == name.replace(' ',''):
return skill
else:
skills = self.skill_enchant[str(req)][typ]
for skill in skills:
data = skill[0]
if data['name'].replace(' ','') == name.replace(' ',''):
return skill
if typ == 'actives' and isinstance(data['oriname'], str) and data['oriname'].replace(' ','') == name.replace(' ',''):
return skill
return None
def _get_skill_instance_all(self, typ, name, req):
skill_instances = []
if req is None:
for lv, item in self.skill_enchant.items():
skills = item[typ]
for skill in skills:
data = skill[0]
if data['name'].replace(' ','') == name.replace(' ',''):
skill_instances.append(skill)
if typ == 'actives' and isinstance(data['oriname'], str) and data['oriname'].replace(' ','') == name.replace(' ',''):
skill_instances.append(skill)
else:
skills = self.skill_enchant[str(req)][typ]
for skill in skills:
data = skill[0]
if data['name'].replace(' ','') == name.replace(' ',''):
skill_instances.append(skill)
if typ == 'actives' and isinstance(data['oriname'], str) and data['oriname'].replace(' ','') == name.replace(' ',''):
skill_instances.append(skill)
return skill_instances
def handle_special_case(self):
if self.owner.jobname == '眞 로그':
si = self.get_skill('skills', '히트엔드', req = 15)
data = si[0]
lv = min(si[1] + si[2], len(data['values']) - 1)
hitend_add = 0 if '히트엔드추가' not in self.inner_data.keys() else self.inner_data['히트엔드추가']
hitend = data['values'][lv]
mywtype = self.inventory.weapon_info['type']
if mywtype == '단검':
hitend *= (4.25 + hitend_add)
else:
hitend *= (4.74 + hitend_add)
self.inner_data['히트엔드'] = hitend
elif self.owner.jobname == '眞 쿠노이치':
si = self.get_skill('skills', '인법 : 잔영 남기기', req = 35)
data = si[0]
lv = min(si[1] + si[2], len(data['values']) - 1)
mirage = data['values'][lv] - 100
self.inner_data['잔영'] = mirage
"""
si = self.get_skill('actives', '인법 : 육도윤회', req = 40)
data = si[0]
print(data)
"""
elif self.owner.jobname == '眞 엘븐나이트':
si = self.get_skill('actives', '체인러쉬', req = 30)
data = si[0]
lv = min(si[1] + si[2], 16)
chainrush = (lv * 0.5 + 12.5)
si = self.get_skill('skills', '대자연의 가호', req = 48)
data = si[0]
lv = min(si[1] + si[2], 40)
nature = (lv * 2)
self.inner_data['체인러쉬'] = (chainrush * 6, nature)
elif self.owner.jobname == '眞 스핏파이어' and self.owner.classname == '거너(남)':
si = self.get_skill('skills', '매거진 드럼', req = 15)
lv = min(si[1] + si[2], 20)
self.inner_data['사격강화'] = lv*2
si = self.get_skill('skills', '유탄 마스터리', req = 20)
if si is not None:
data = si[0]
lv = min(si[1] + si[2], 20)
fragup = data['values'][lv]
self.inner_data['류탄강화'] = fragup
else:
self.inner_data['류탄강화'] = 0
elif self.owner.jobname == '眞 스핏파이어' and self.owner.classname == '거너(여)':
si = self.get_skill('skills', '니트로 모터', req = 15)
lv = min(si[1] + si[2], 20)
self.inner_data['사격강화'] = lv*2
si = self.get_skill('skills', '유탄 마스터리', req = 20)
if si is not None:
data = si[0]
lv = min(si[1] + si[2], 20)
fragup = data['values'][lv]
self.inner_data['류탄강화'] = fragup
else:
self.inner_data['류탄강화'] = 0
elif self.owner.jobname == '眞 데몬슬레이어':
si = self.get_skill('skills', '탐욕의 번제', req = 48)
if si is not None:
data = si[0]
lv = min(si[1] + si[2], 40)
self.inner_data['탐욕의 번제'] = data['values'][lv]
else:
self.inner_data['탐욕의 번제'] = 0
elif self.owner.jobname == '眞 빙결사':
si = self.get_skill('skills', '아이스크래프트', req = 75)
if si is not None:
data = si[0]
lv = min(si[1] + si[2], 40)
self.inner_data['얼음창'] = (1 + 1.5 * data['values'][lv]/100) / (1 + data['values'][lv]/100)
else:
self.inner_data['얼음창'] = 1
elif self.owner.jobname == '眞 레인저' and self.owner.classname == '거너(여)':
si = self.get_skill('skills', '체인 인라이튼', req = 95)
if si is not None:
data = si[0]
lv = min(si[1] + si[2], 40)
self.inner_data['체인 인라이튼'] = (1 + data['values'][lv]/100)
else:
self.inner_data['체인 인라이튼'] = 1
elif self.owner.jobname == '眞 검귀':
si = self.get_skill('skills', '악귀현신', req = 95)
if si is not None:
self.inner_data['악귀현신'] = 1
else:
self.inner_data['악귀현신'] = 0
elif self.owner.jobname == '眞 다크템플러':
si = self.get_skill('skills', '베스퍼스', req = 75)
if si is not None:
data = si[0]
lv = min(si[1] + si[2], 40)
self.inner_data['베스퍼스'] = (1 + data['values'][lv]/100)
else:
self.inner_data['베스퍼스'] = 1
"""
elif self.owner.jobname == '眞 스트리트파이터' and self.owner.classname == '격투가(여)':
si = self.get_skill('actives', '독문무공', req = 85)
if si is not None:
lv = si[1] + si[2]
self.inner_data['독문스증'] = (1 + (2 * lv / 100))
elif self.owner.jobname == '眞 스트라이커' and self.owner.classname == '격투가(남)':
si = self.get_skill('actives', '화염의 각', req = 50)
if si is not None:
lv = si[1] + si[2]
self.inner_data['화각스증'] = (1 + ((4 * lv - 2) / 100))
"""
def get_skill_instance(self, typ, name, req = None):
skill = self._get_skill_instance(typ, name, req)
if skill is not None:
return skill[0]
else:
return None
def get_skill(self, typ, name, req = None):
skill = self._get_skill_instance(typ, name, req)
return skill
"""
def increase_skill_level(self, typ, name, inc, req = None):
skill = self._get_skill_instance(typ, name, req)
if skill is not None:
skill[2] += inc
else:
raise Exception
"""
def increase_skill_level(self, typ, name, inc, req = None, base = False):
skill_list = self._get_skill_instance_all(typ, name, req)
if len(skill_list) == 0:
return False
for skill in skill_list:
if base is True:
skill[1] += inc
else:
skill[2] += inc
return True
def increase_skill_level_range(self, typ, lvrange, inc, base = False):
ret = []
minlv, maxlv = lvrange
minlv = int(minlv)
maxlv = int(maxlv)
for lv, val in self.skill_enchant.items():
lvl = int(lv)
if minlv <= lvl <= maxlv:
skills = val[typ]
for skill in skills:
ret.append(skill[0]['name'])
if base is True:
skill[1] += int(inc)
else:
skill[2] += int(inc)
return ret
def finalize_levelup(self):
self.owner.char_stat['레벨링구간'] = {}
for lv, val in self.skill_enchant.items():
lvup = val['level']
#print("최종", lv, lvup)
for skill in val['skills']:
skill[2] += lvup
for skill in val['actives']:
skill[2] += lvup
self.owner.char_stat['레벨링구간'][lv] = lvup
def has_skill(self, target, skname):
for skill in target:
if skill['name'] == skname:
return True
return False
#퇴마사 진격의 황룡 염주 데미지
#스패로우 팩토리, 솔라 모듈 시스템(솔라 모듈 있으면 스패로우 데미지 삭제)
#여스파 독데미지 계산
#탈리스만 있을때 완전 데미지 변경 (템페스터, 게일포스) 지딜 -> 폭딜
"""
def prepare_buffer_passive(self):
if self.buffer is None or self.buffer.get('main_stat') is None
return
for se in self.skill_enchant.values():
for sk in se['skills']:
data = sk[0]
lv = sk[1]
v = data['values'][lv]
typ = data['type']
if typ == '패시브스탯':
if data.get('subtype') != '오라':
char_stat['아포'][data['name']]=[v]
char_stat['아포']['스탯'] += v
#print ('- 패시브기본(아포)', data['name'], v)
else:
char_stat['아포'][data['name']] = 0
"""
def finalize_passive(self):
char_stat = self.owner.char_stat
mywep_type = char_stat['inventory'].weapon_info['type']
mywep_name = char_stat['inventory'].weapon_info['name']
passive_inc = 1
passive_orig = 1
elem_orig = max([char_stat['화속성강화'], char_stat['명속성강화'], char_stat['수속성강화'], char_stat['암속성강화']])
self.inner_data['쿨패시브'] = 1
for req, se in self.skill_enchant.items():
for pas in se['skills']:
data = pas[0]
lv = pas[1]
base = pas[3]
sname = data['name']
typ = data['type']
stype = data.get('skillType')
wep_type = data.get('wep_type')
mastery = data.get('mastery')
if wep_type is not None and wep_type != mywep_type:
continue
try:
v3 = data['values'][base-1]
except:
print(base)
v3 = 0
lv = min(len(data['values']) - 1, lv)
if mastery is True and '무기의 극의' in self.inner_data.keys():
v1 = data['values'][lv + self.inner_data['무기의 극의']['기본레벨']]
nlevel = min(len(data['values']) - 1, lv + self.inner_data['무기의 극의']['마스터리레벨'])
else:
v1 = data['values'][lv]
nlevel = min(len(data['values']) - 1, lv + pas[2])
v2 = data['values'][nlevel]
v1 = 0 if isinstance(v1, str) else v1
v2 = 0 if isinstance(v2, str) else v2
v3 = 0 if isinstance(v3, str) else v3
if '초월의 룬' in self.inner_data.keys():
if sname == '쇼타임' and typ == '쿨감':
#print (sname, v2, self.inner_data['초월의 룬']['쿨감'])
v2 *= self.inner_data['초월의 룬']['쿨감']
elif sname == '속성 마스터리' and typ == '데미지':
v2 *= self.inner_data['초월의 룬']['데미지']
elif sname == '마력 증폭' and typ == '크리티컬히트':
v2 += self.inner_data['초월의 룬']['크리티컬']
v = v2 - v1
#print (pdata['name'], typ, v1, v2, curlvl, curlvl + pas[2] + lvup, pas[2], lvup)
if typ == '공이속':
char_stat['공격속도'] += v
char_stat['이동속도'] += v
elif typ == '이속':
char_stat['이동속도'] += v
elif typ == '공속':
char_stat['공격속도'] += v
elif typ == '캐속':
char_stat['캐스팅속도'] += v
elif typ == '크리티컬히트':
char_stat['물리크리티컬'] += v
char_stat['마법크리티컬'] += v
elif typ.find('저항') >= 0:
elem = typ.split('속성')[0][-1]
if elem == '든':
for et in elem_type:
char_stat[et + '저항'] += v
else:
char_stat[elem + '속성저항'] += v
elif typ.find('강화') >= 0:
if (data.get('skillType') == 'passive' and sname != '원소 융합') or sname == '아이스 로드':
inc = v
else:
inc = v2
elem = typ.split('속성')[0][-1]
if elem == '든' or elem == '사':
for et in elem_type:
char_stat[et + '강화'] += inc
t = '모든'
else:
t = elem
char_stat[elem + '속성강화'] += inc
if char_stat['패시브'].get(sname) is not None:
char_stat['패시브'][sname].update({'레벨':lv + pas[2], t+'속성강화':v2, 'base':v3, 'req':int(req)})
else:
char_stat['패시브'][sname] = {'레벨':lv + pas[2], t+'속성강화':v2, 'base':v3, 'req':int(req)}
elif typ == '쿨감':
if sname == '윌 드라이버' and mywep_type != '토템':
continue
#TODO: level 대신 개별 스킬로 쿨타임 적용
self.inner_data['쿨패시브'] *= (1 - v2/100)
for clv, cinfo in self.skill_enchant.items():
if sname in ['강인한 신념', '병기 숙련', '원소 폭격', '윌 드라이버', '세라픽 페더', '블러드', 'HS-1 친구들', 'G-오퍼레이터', '쇼타임', '휘몰아치는 질풍의 봉 마스터리', '거병 마스터리', '장창 숙련', '화염의 각'] and clv in ['50', '85', '100']:
continue
askill_list = cinfo['actives']
for askill in askill_list:
data = askill[0]
asname = data['name']
if asname in ['블랙 망토', '변이 파리채', '대왕 파리채', '롤리팝 크러쉬']:
continue
if sname == '인술' and asname not in ['화염구', '환영 다중 수리검', '화염선풍', '나선 대차륜 쿠나이', '열화천도', '두꺼비유염탄', '비기 : 염무개화', '마환 풍마 수리검', '야마타오로치', '아마테라스']:
continue
if data.get('cool') is not None:
data['cool'] *= (1 - v2/100)
#cinfo['cool'] *= (1 - v2/100)
#print('쿨감', sname, v2, lv, nlevel)
elif typ == '데미지':
if sname in ['자각의 실마리', '각성의 실마리']:
self.skill_enchant['85']['dam'] *= (1 + v2/100)
continue
elif sname == '대자연의 가호':
self.skill_enchant['85']['dam'] *= (1 + v2/100)
self.skill_enchant['50']['dam'] *= (1 + v2/100)
elif sname == '천수천안':
self.inner_data['천수천안'] = 14.5 + v2
elif sname == '현자의 돌':
self.inner_data[sname] = v2
char_stat['패시브'][sname] = {'레벨':lv + pas[2], '데미지증가':v2 , 'base':v3, 'req':int(req)}
#char_stat['패시브']['호문쿨루스'] = {'레벨':lv + pas[2], '데미지증가':v2-5}
continue
if sname == '코어 블레이드 마스터리' and mywep_name == '프로젝트 : 오버코어':
v2 *= 2
passive_orig *= (1 + v1/100)
passive_inc *= (1 + v2/100)
if char_stat['패시브'].get(sname) is not None:
char_stat['패시브'][sname].update({'레벨':lv + pas[2], '데미지증가':v2, 'base':v3, 'req':int(req)})
else:
char_stat['패시브'][sname] = {'레벨':lv + pas[2], '데미지증가':v2, 'base':v3, 'req':int(req)}
elif typ == '패시브스탯' and self.owner.isBuffer is True:
main_stat_type = self.inventory.main_stat['type']
buffopt = self.buffoption
if data.get('subtype') != '오라':
inc = v2
self.inventory.main_stat['value'] += inc
#if buffopt['type'] == '아포':
#print ('지능증가', '패시브:'+sname, inc, v)
#print(sname, pas[1], pas[2])
char_stat['버프패시브'][sname] = v2
if buffopt['type'] == '아포':
char_stat[main_stat_type] += v
else:
aura_name = buffopt['오라']['name']
aura_val = buffopt['오라']['value']
if sname == aura_name:
inc = v2 + aura_val
buffopt['오라']['value'] = inc
else:
inc = v2
char_stat['버프패시브'][sname] = v2
self.inventory.main_stat['value'] += inc
if buffopt['type'] == '아포':
char_stat[main_stat_type] += inc
elif typ.find('추가') >= 0:
buff_type = self.buffoption['type']
if buff_type == '아포':
self.buffoption['보조스킬'][sname] = v2
elem_inc = max([char_stat['화속성강화'], char_stat['명속성강화'], char_stat['수속성강화'], char_stat['암속성강화']])
self.inner_data['레벨링패시브증가량'] = ((passive_inc/passive_orig) - 1) * 100
self.inner_data['레벨링속강증가량'] = elem_inc - elem_orig
self.owner.char_stat['패시브레벨링증가량'] = self.inner_data['레벨링패시브증가량']
self.owner.char_stat['속강패시브레벨링증가량'] = self.inner_data['레벨링속강증가량']
self.inner_data['패시브총합'] = passive_inc
def finalize_active(self):
for lv, items in self.skill_enchant.items():
dam = items['dam']
cool = items['cool']
for skill_items in items['actives']:
act = skill_items[0]
ori_level = skill_items[1]
lvup = skill_items[2]
mlevel = skill_items[3]
maxlevel = skill_items[4]
if ori_level + lvup > maxlevel:
lvup = (ori_level + lvup) - maxlevel
elif ori_level + lvup < mlevel:
lvup = (ori_level + lvup) - mlevel
if 1 <= int(lv) <= 20:
inc = 1 + (0.01 * lvup)
elif 25 <= int(lv) <= 45:
inc = 1 + (0.02 * lvup)
elif int(lv) == 50:
inc = 1 + (0.06 * lvup)
elif 60 <= int(lv) <= 70:
inc = 1 + (0.03 * lvup)
elif 75 <= int(lv) <= 80:
inc = 1 + (0.04 * lvup)
elif int(lv) == 85:
inc = 1 + (0.12 * lvup)
elif int(lv) == 95:
inc = 1 + (0.06 * lvup)
elif int(lv) == 100:
inc = 1 + (0.19 * lvup)
else:
inc = 1
act['final_level'] = ori_level + skill_items[2]
act['plus_level'] = skill_items[2]
act['dam'] *= dam * inc
acool = act.get('cooltime')
if acool is not None and acool != '':
if 'coolFixed' in act.keys() and act['coolFixed']:
act['cool'] = 1
act['cooltime'] = float(act['cooltime'])
else:
act['cool'] *= cool
if isinstance(act['cooltime'], list):
try:
if act['final_level'] <= len(act['cooltime']):
act['cooltime'] = float(act['cooltime'][act['final_level']]) * act['cool']
else:
act['cooltime'] = float(act['cooltime'][0]) * act['cool']
except:
act['cooltime'] = float(act['cooltime'][0]) * act['cool']
pass
else:
act['cooltime'] = float(act['cooltime']) * act['cool']
def apply_baselevel(self):
char_stat = self.owner.char_stat
mywep_name = char_stat['inventory'].weapon_info['name']
town_inc = self.inventory.inner_data.get('마을적용패시브')
if town_inc is None:
town_inc = 0
for req, se in self.skill_enchant.items():
lvup = se['level']
if req != '95':
add_inc = town_inc
else:
add_inc = 0
for pas in se['skills']:
data = pas[0]
sname = data['name']
typ = data['type']
wep_type = data.get('wep_type')
if sname == '원소 융합':
self.owner.char_stat['암속성저항'] += 10
if typ == '패시브스탯' and self.owner.isBuffer is True:
buff_type = self.buffoption['type']
if buff_type == '아포' and data.get('subtype') != '오라':
pas[1] += pas[2] + lvup
pas[2] = -1 * (lvup + add_inc)
continue
if (data.get('skillType') == 'passive' and sname != '용독술') or sname == '아이스 로드':
if wep_type is not None and wep_type != self.inventory.weapon_info['type']:
continue
if typ == '데미지' and data['mastery'] is True:
char_dam_type = self.owner.inner_data.get('dam_type')
if self.owner.jobname == '眞 웨펀마스터':
si = self.get_skill('skills', '무기의 극의', req = 15)
lv = si[1] + si[2] + add_inc + lvup
lv = si[0]['values'][lv] + 1
else:
lv = pas[1] + pas[2] + lvup + add_inc
lv = min(len(data['values']) - 1, lv)
v = data['values'][lv]
if sname == '코어 블레이드 마스터리' and mywep_name == '프로젝트 : 오버코어':
v *= 2
self.owner.char_stat[char_dam_type] /= (1 + v/100)
self.owner.char_stat[char_dam_type] = int(self.owner.char_stat[char_dam_type])
if typ not in ['데미지', '쿨감', '없음']:
pas[1] += pas[2] + lvup
pas[2] = -1 * (lvup + add_inc)
elif data['name'] == '차원일치':
lv = pas[1] + pas[2] + (lvup + add_inc)
v = data['values'][lv]
self.owner.char_stat['힘'] /= (1 + v/100)
self.owner.char_stat['지능'] /= (1 + v/100)
self.owner.char_stat['힘'] = int(self.owner.char_stat['힘'])
self.owner.char_stat['지능'] = int(self.owner.char_stat['지능'])
def calculate_skills(self, deal_time, cooldown, pf, f, d, def_dual, exclude = None, active_rate = 1):
skill_list = []
ori_deal_time = deal_time
ori_active_rate = active_rate
count_add = 0
for lv, items in self.skill_enchant.items():
#if self.owner.classTypeNeo is True and lv == '50':
# continue
if self.owner.classTypeNeo is False and lv in ['95', '100']:
continue
if exclude is not None and int(lv) in exclude:
continue
for skill_items in items['actives']:
act = skill_items[0]
name = act['name']
dam = act['dam']
cooltime = act.get('cooltime')
factor = act.get('factor')
longterm = act.get('longterm')
oriname = act.get('oriname')
if oriname is None or oriname == '':
skname = name
imgname = None
else:
if oriname.find('$') >= 0:
_oriname = oriname.split('$')
skname = _oriname[0]
imgname = _oriname[1]
else:
skname = oriname
imgname = None
if factor is not None and factor != '':
factor = int(factor)
else:
factor = 0
cooltime = 0
if longterm is not None and longterm != '':
longterm = int(longterm)
else:
longterm = 0
if self.owner.jobname == '眞 스트리트파이터' and self.owner.classname == '격투가(여)':
penchant = int(act['penchant'])
penchant *= (self.owner.char_stat['버프'][2]/100)
poison = int(act['poison']) + penchant
if longterm == 0:
factor += poison
else:
longterm += poison
fdeal = dam * factor
ldeal = dam * longterm
coolFixed = act.get('coolFixed')
if coolFixed == False and cooldown is not None:
cooltime *= (1 - cooldown/100)
if skname == '트윙클 스매쉬' and self.inner_data.get('홈런 퀸') is not None:
count_add = math.ceil(ori_deal_time / cooltime) + 1
elif skname == '악귀참수' and self.inner_data.get('천쇄참수') is not None:
count_add = math.ceil(ori_deal_time / cooltime) + 1
elif (skname == '엘레멘탈 필드' or skname == '폭풍의 숨결') and self.inner_data.get('고정쿨감') is not None:
cooltime -= self.inner_data['고정쿨감']
elif skname == 'FSC-7' and self.inner_data.get('라이트웨이트') is not None:
count_add = 1
elif skname in ['플레게의 정수', '성화']:
cooltime *= 0.85
try:
skill_data = {'이름':skname,
'skimage': imgname,
'레벨':act['final_level'],
'레벨링':act['plus_level'],
'데미지증가':round(dam, 2),
'쿨타임':round(cooltime, 1),
'원계수':round(factor),
'스킬계수':round(fdeal),
'지속계수':round(ldeal),
'requiredLevel': int(lv),
'invest': act['invest'],
'coolfixed':coolFixed
}
if act.get('cool') is not None:
skill_data['쿨타임 증감'] = round(act['cool'], 4)
except:
print(skname)
raise
skill_list.append(skill_data)
skill_list.sort(key=(lambda x: len(x['invest'])), reverse=True)
act_cool_mean = self.inner_data['평균쿨감']
point_skill = 3
for act in skill_list:
deal_time = ori_deal_time
active_rate = ori_active_rate
name = act['이름']
cooltime = act['쿨타임']
factor = act['스킬계수']
longterm = act['지속계수']
lv = act['requiredLevel']
if cooltime != 0:
if self.owner.classname == '크리에이터':
if name in ['빙하시대', '링크', '아이스 스톤', '플레임 허리케인', '윈드 스톰', '윈드 프레스', '운석 낙하', '파이어 월']:
cooltime /= 2
if name != '플레임 허리케인':
factor /= 2
if deal_time <= 30:
if self.owner.jobname in ['眞 메카닉', '眞 소울브링어', '眞 스핏파이어', '眞 카오스'] or self.owner.classname in ['크리에이터']:
time = deal_time
elif self.owner.jobname not in ['眞 엘븐나이트', '眞 그래플러', '眞 아수라', '眞 버서커', '眞 마도학자', '眞 인파이터', '眞 퇴마사', '眞 어벤저', '眞 빙결사', '眞 팔라딘', '眞 드래곤나이트', '眞 뱅가드', '眞 드래고니안 랜서', '眞 듀얼리스트', '眞 스트라이커', '眞 엘레멘탈마스터', '眞 요원']:
if lv >= 80:
time = deal_time
elif 30 < lv <= 75:
time = deal_time * 0.95
else:
time = deal_time * 0.85
else:
if lv <= 35:
time = deal_time * 0.95
else:
time = deal_time
else:
if self.owner.jobname in ['眞 메카닉', '眞 소울브링어', '眞 스핏파이어', '眞 카오스'] or self.owner.classname in ['크리에이터']:
time = deal_time
elif self.owner.jobname not in ['眞 엘븐나이트', '眞 그래플러', '眞 아수라', '眞 버서커', '眞 마도학자', '眞 인파이터', '眞 퇴마사', '眞 어벤저', '眞 빙결사', '眞 팔라딘', '眞 드래곤나이트', '眞 뱅가드', '眞 드래고니안 랜서', '眞 듀얼리스트', '眞 스트라이커', '眞 엘레멘탈마스터', '眞 요원']:
if lv >= 80:
time = deal_time
elif 30 < lv <= 75:
time = deal_time * 0.9
else:
time = deal_time * 0.8
else:
if lv <= 35:
time = deal_time * 0.9
else:
time = deal_time
if act_cool_mean < 0.8 and lv <= 75:
if 30 <= lv <= 45:
coolf = 4
elif lv < 30:
coolf = 3
else:
coolf = 4.5
diff = 1 - act_cool_mean
diff /= coolf
coolcut = 1 - diff
time = time * coolcut
if lv not in [50, 85, 100] and point_skill != 0:
if point_skill == 3:
tmargin = 0.1
else:
tmargin = 0.5
#if deal_time == 25 and cooldown != 20:
# print(deal_time, time, math.ceil(deal_time / cooltime), math.ceil(time / cooltime),cooltime, name)
if math.ceil((deal_time - tmargin) / cooltime) != math.ceil(time / cooltime):
#print(deal_time, time, math.ceil((deal_time - tmargin) / cooltime), math.ceil(time / cooltime),cooltime, name)
time = deal_time - tmargin
point_skill -= 1
if self.owner.jobname == '眞 검귀' and name == '귀신보':
cooltime -= self.inner_data['악귀현신']
deal_count = math.ceil(time / cooltime)
if active_rate != 1 and deal_count > 2:
_ar = 1 - active_rate
_ar *= (math.ceil((deal_count - 1)/5))
active_rate = 1 - _ar
time *= active_rate
deal_count = math.ceil(time / cooltime)
#print(name, deal_count)
else:
deal_count = 0
time = deal_time * 0.95 * active_rate
time = math.ceil(time*10) / 10
#print(name, time)
if self.owner.jobname == '眞 런처' and self.owner.classname == '거너(여)':
if name == 'FSC-7':
deal_count += count_add
elif self.owner.jobname == '眞 검귀':
if name == '귀신보':
deal_count += count_add
elif self.owner.jobname == '眞 배틀메이지':
if name == '쇄패':
deal_count += count_add
elif self.owner.jobname == '眞 스트리트파이터' and self.owner.classname == '격투가(남)':
if name == '바늘 투척':
deal_count += (3 * math.ceil(deal_time/35))
elif self.owner.jobname == '眞 스핏파이어': #and self.owner.classname == '거너(남)':
if name.find('류탄') >= 0:
deal_count = min(5 * math.ceil(deal_time/35), deal_count)
elif self.owner.jobname == '眞 웨펀마스터':
plimit = self.owner.char_stat['패시브']['극한의 경지']['데미지증가']
if name.find('류심') >= 0:
deal_count = min(4 * math.ceil(deal_time/35), deal_count)
#factor /= (1 + plimit/100)
factor *= 1.1111
elif name == '발도':
deal_count = min(3 * math.ceil(deal_time/26), deal_count)
#elif name == '환영검무':
# deal_count = min(1 * math.ceil(deal_time/26), deal_count)
if name in ['환영검무', '극 귀검술 : 유성락', '극 귀검술 : 심검', '맹룡단공참', '차지 크래시']:
factor /= (1 + plimit/100)
elif self.owner.classname == '다크나이트':
s_tal_list = self.owner.char_stat['탈리스만스킬']
talSel = self.owner.char_stat.get('탈선착용')
if name == '일루젼 슬래쉬':
deal_count = math.floor(deal_time/11)
factor *= 2
elif name in ['다크 웨이브 폴', '차지 익스플로전', '다크 버스트', '다크 플레임 소드']:
if talSel is not None:
if name in s_tal_list[:-1]:
if (s_tal_list.index(name)) == 0:
_cooltime = cooltime * 0.3
else:
_cooltime = cooltime * 0.4
deal_count = math.ceil(time/_cooltime)
else:
if name in s_tal_list:
deal_count = math.ceil(deal_time/11)
factor *= 2
elif name in ['웨이브 스핀', '다크 소드', '다크 레이브']:
factor *= 2
elif name in ['다크 브레이크', '디아볼릭 슬래쉬']:
deal_count = math.floor(deal_time/22)
factor *= 2
elif lv in [85, 100]:
factor *= 2
"""
elif self.owner.jobname == '眞 쿠노이치':
if name in ['야마타오로치', '마환 풍마수리검', '아마테라스', '비기 : 염무개화', '두꺼비유염탄' '열화천도']:
deal_count = min(2 * math.ceil(deal_time/35) , deal_count)
"""
elif self.owner.jobname == '眞 메카닉' and self.owner.classname == '거너(여)':
if name == 'G-S.P. 팔콘':
deal_count += 2
elif self.owner.jobname == '眞 뱅가드':
if name == '임팩트 스매쉬':
pass
#deal_count *= 2
elif self.owner.jobname == '眞 소환사':
if name == '채찍질':
deal_count = 3
if def_dual is not None:
ef_max, ef_min = def_dual
if deal_count > 0:
max_count = math.ceil(deal_count / 2)
min_count = math.floor(deal_count / 2)
if max_count == min_count:
max_count += 0.5
min_count -= 0.5
e_avg = (ef_max * max_count + ef_min * min_count)/deal_count
#print (name, ef_max, ef_min, e_avg, max_count, min_count, deal_count)
countf = (1 + e_avg/100)
timef = (1 + ((ef_max * 3 + ef_min * 2) / 5) / 100)
else:
countf = timef = 1
if self.owner.jobname == '眞 소환사':
"""
if name in ['썬더콜링(글레어린)', '화염폭검술(플레임 헐크)', '식선(라모스)', '헵타 아이스(아퀘리스)', '블랙 스웜(데드 멀커)', '식락', '필살검 천귀살']:
countf *= 1.15
elif name in ['전설소환 : 월영', '포식의 델라리온']:
countf *= 0.957
elif name == '초고밀도 레이저(에체베리아)':
countf *= 1.3
elif name == '채찍질':
countf *= 0.68
elif name == '지고의 정령왕':
countf *= 1.3
if name in ['전설소환 : 갈애의 라모스', '계약소환 : 정복자 카시야스']:
timef *= 1.15
elif name in ['정령소환 : 글레어린', '정령소환 : 데드 멀커', '정령소환 : 아퀘리스', '정령소환 : 플레임 헐크']:
timef *= 1.725
elif name == '정령소환 : 정령왕 에체베리아':
timef *= 1.95
elif name in ['계약소환 : 기갑 호도르', '계약소환 : 프리트', '계약소환 : 검은기사 산도르', '계약소환 : 루이즈 언니!', '계약소환 : 타우킹 쿠루타']:
timef *= 1.5
"""
if name not in ['포식의 델라리온', '채찍질', '정령 희생']:
countf *= 1.045
timef *= 1.045
elif self.owner.jobname == '眞 메카닉' and self.owner.classname == '거너(남)':
if name in ['메카 드롭', 'Ez-10 카운터어택', 'TX-45 A-Team', 'RX-60 트랩러너', 'Ez-8 카운트다운', 'RX-78 랜드러너', '공중 전투 메카 : 템페스터']:
countf *= 1.4
elif name == '스패로우 팩토리' and self.has_skill(self.owner.sk_dict['skill']['style']['passive'], '솔라 모듈 시스템') is False:
countf *= 1.4
elif self.owner.jobname == '眞 메카닉' and self.owner.classname == '거너(여)':
if name in ['Ez-8 카운트다운', 'RX-78 랜드러너', 'Ez-10 카운터 어택']:
countf *= 1.4
elif name in ['트랜스폼 : G-2 롤링썬더', 'G 마그네틱']:
countf *= 0.76923
deal = deal_count * factor * countf + time * longterm * timef
if self.owner.jobname == '眞 다크템플러':
if name.find('리버레이트') >= 0:
ves = self.inner_data['베스퍼스']
deal /= ves
elif self.owner.jobname == '眞 로그':
hitend = self.inner_data['히트엔드']
if name == '데스 허리케인':
deal *= (0.92 + (0.08 * hitend/100))
else:
deal *= (0.8 + (0.2 * hitend/100))
elif self.owner.jobname == '眞 사령술사':
if name != '니콜라스 강령':
deal *= 1.2
elif self.owner.jobname == '眞 쿠노이치':
mirage = self.inner_data['잔영']
if name != '쿠사나기의 검':
deal *= (1 + mirage/100)
if name in ['야마타오로치', '마환 풍마수리검', '아마테라스', '비기 : 염무개화', '두꺼비유염탄', '열화천도']:
record = min(deal_count, math.ceil((ori_deal_time - 10) / 25))
"""
if (ori_deal_time == 25 and cooldown == None):
print(name, record, deal_count, (ori_deal_time - 10) / 25)
"""
deal += (deal/deal_count * 0.8) * record
elif self.owner.jobname == '眞 넨마스터' and self.owner.classname == '격투가(남)':
if name in ['사자후', '기공장', '기호지세', '금뇌호 : 심판의 넨수', '넨 스피어', '광호천지파', '기공환', '제황나선격']:
deal *= 1.6
elif self.owner.jobname == '眞 엘븐나이트':
chainrush, nature = self.inner_data['체인러쉬']
if name == '체인 스트라이크':
deal *= (1 + (chainrush+30)/100)
elif lv not in [50, 85, 100]:
deal *= (1 + chainrush/100)
elif self.owner.jobname == '眞 스트리트파이터' and self.owner.classname == '격투가(남)':
if name in ['니들 스핀', '천붕지괴', '더티 배럴', '광폭혈쇄']:
tpas = self.inner_data['천수천안']
ori = tpas - 14.5
deal /= (1 + ori/100)
deal *= (1 + tpas/100)
"""
elif self.owner.jobname == '眞 스트라이커' and self.owner.classname == '격투가(남)':
deal *= self.inner_data['화각스증']
"""
elif self.owner.jobname == '眞 그래플러' and self.owner.classname == '격투가(여)':
if name in ['자이언트 스윙', '싸이클론 어택', '와일드 캐넌 스파이크']:
deal *= 1.6
elif self.owner.jobname == '眞 스핏파이어':
if name in ['버스터 샷', '네이팜 탄', '피스톨 카빈', '교차 사격']:
md = self.inner_data['사격강화']
deal *= (1 + md/100)
elif name in ['G-18C 빙결류탄', 'G-35L 섬광류탄', 'G-14 파열류탄']:
fd = self.inner_data['류탄강화']
deal *= (1 + fd/100)
elif self.owner.jobname == '眞 마도학자':
if name in ['플로레 컬라이더', '반중력 기동장치']:
deal *= (1 + (self.inner_data['현자의 돌'] - 5)/100)
elif name in ['메가 드릴', '성난 불길 가열로', '용암지대 생성물약']:
deal *= (1 + (self.inner_data['현자의 돌'] + 68)/100)
else:
deal *= (1 + self.inner_data['현자의 돌']/100)
elif self.owner.jobname == '眞 데몬슬레이어':
if name in ['검마격살', '혈화난무']:
deal /= (1 + self.inner_data['탐욕의 번제']/100)
elif self.owner.jobname == '眞 빙결사':
if name in ['샤드 매그넘', '핀포인트 러시', '아이스 빅 해머', '아이스 크래시', '회전투창', '크리스탈 블레이드', '설화연창']:
deal *= self.inner_data['얼음창']
elif self.owner.jobname == '眞 레인저' and self.owner.classname == '거너(여)':
if name == '소닉 스파이크':
deal /= self.inner_data['체인 인라이튼']
elif self.owner.jobname == '眞 소드마스터':
if name == '반월':
deal *= 1.23
elif name == '섬광':
deal *= 1.073
elif self.owner.jobname == '眞 퇴마사':
if name not in ['무쌍격', '거선풍', '단층지격', '공참타']:
deal *= 1.18
#print(deal, f, round(deal * f))
act['최종계수'] = round(deal)
act['적용패시브'] = pf
act['점수기준'] = round(deal * pf * f)
act['데미지'] = round(deal * pf * f * 1.5 * (1 - d/100))
act['사용횟수'] = deal_count
act['적용딜타임'] = time
"""
if deal_time == 25 and cooldown == 20:
for skill in skill_list:
if skill['이름'] in ['낙뢰부', '오망성진','오행(五行):벽천지격']:
print(skill)
"""
return skill_list
def calculate_deal(self, d_info, synergy):
char_stat = self.owner.char_stat
add_stat = 0
s_inc = 1
if synergy is not None:
for k, v in synergy.items():
if k in ['증추', '크증추', '모공', '추댐', '힘지', '물마독공']:
char_stat[k] += v
elif k == '스탯':
add_stat = v
elif k == '시너지':
s_inc *= 1.34
elif k == '시너지스공' and char_stat['char_type'] == '시너지':
s_inc *= v
else:
s_inc = 1.34
#TODO: 증추 크증추 계산은 다른데서 한번만
if char_stat.get('증추') is not None and char_stat.get('크증추') is not None:
char_stat['증댐'] += char_stat['증추']
char_stat['크증댐'] += char_stat['크증추']
del char_stat['증추']
del char_stat['크증추']
char_stat['스공'] = round((char_stat['스공'] - 1),2) * 100
main_stat = max([char_stat['힘'], char_stat['지능']])
base_stat = self.owner.base_stat
if self.inner_data.get('수문장모드') == 'dual':
elem_list = [char_stat['화속성강화'], char_stat['명속성강화'], char_stat['수속성강화'], char_stat['암속성강화']]
elem_list.sort()
elem_max = sum(elem_list[2:]) / 2
elem_min = sum(elem_list[:2]) / 2
elem_max_inc = (1 + (elem_max + 11 - (d_info['몹속저'] - d_info['버퍼속저깍'])) * 0.0045)
elem_min_inc = (1 + (elem_min + 11 - (d_info['몹속저'] - d_info['버퍼속저깍'])) * 0.0045)
#print (elem_max, elem_min)
ef_max = (elem_max_inc - 1) * 100
ef_min = (elem_min_inc - 1) * 100
ef = 0
char_stat['듀얼수문장'] = (ef_max, ef_min)
char_stat['속강딜증가'] = ef_max + ef_min / 2
char_stat['적용속강'] = (elem_max_inc + elem_min_inc) / 2
else:
elem_cur = max([char_stat['화속성강화'], char_stat['명속성강화'], char_stat['수속성강화'], char_stat['암속성강화']])
elem_inc = (1 + (elem_cur + 11 - (d_info['몹속저'] - d_info['버퍼속저깍'])) * 0.0045)
ef = char_stat['속강딜증가'] = (elem_inc - 1) * 100
char_stat['적용속강'] = elem_cur
if self.owner.classTypeNeo is False:
if self.owner.bonus_disable is False:
type_inc = d_info['비진각보정']
else:
type_inc = 1
else:
type_inc = 1
#print('type_inc', type_inc)
char_dam_type = self.owner.inner_data.get('dam_type')
char_dam = char_stat[char_dam_type]
if char_dam > 5000:
#API 오류 방지
char_dam = 0
if self.owner.buffer is None:
buff_dam = d_info['축공격력']
else:
buff_dam = 0
self.inner_data['적용공격력'] = char_dam + buff_dam
sf = self.inner_data['적용공격력'] * type_inc * (1 + char_stat['스공']/100)
sf *= (1 + char_stat['증댐']/100) * (1 + char_stat['크증댐']/100)
sf *= (1 + char_stat['추댐']/100) * (1 + char_stat['모공']/100)
sf *= (1 + char_stat['물마독공']/100) * (1 + char_stat['지속댐']/100)
#sf *= (1 + char_stat['패시브딜증가']/100)
sf *= (1 + ef/100)
if d_info.get('지역버프') is not None:
self.inner_data['지역버프'] = int((main_stat - base_stat) * d_info['지역버프']['factor'] + d_info['지역버프']['inc'])
d_result = char_stat[d_info['name']+'분석결과'] = {}
for key, val in d_info['공격유형'].items():
f = sf
d_result[key] = {}
dstat = 0
for typ in val['반영스탯']:
dstat += d_info[typ]
d_result[key]['캐릭터공격력'] = char_dam
d_result[key]['버프공격력'] = buff_dam
d_result[key]['적용공격력'] = char_dam + buff_dam
d_result[key]['지역버프스탯'] = self.inner_data['지역버프']
d_result[key]['버퍼버프스탯'] = dstat
if self.owner.buffer is None:
d_result[key]['적용스탯'] = dstat + self.inner_data['지역버프'] + main_stat + add_stat
else:
d_result[key]['적용스탯'] = main_stat + self.inner_data['지역버프']
f *= (1 + ((1 + char_stat['힘지']/100) * d_result[key]['적용스탯']/250)) * s_inc
if self.inventory.inner_data.get('정령왕') is True:
pinc = val['정령왕']
else:
pinc = 1
damp = val.get('투함포')
if damp is not None:
pinc += (damp - 1)
f *= pinc
addf = val.get('추가딜증')
if addf is not None:
f *= addf
if self.owner.jobname == '眞 스트리트파이터' and self.owner.classname == '격투가(여)':
pf = self.inner_data['패시브총합']
else:
f *= (1 + char_stat['버프'][2]/100)
pf = self.inner_data['패시브총합']
cooldown = val.get('쿨감')
d_result[key]['스킬'] = self.calculate_skills(val['시간'], cooldown, pf, f, val['방어력'], char_stat.get('듀얼수문장'), exclude = val['제외스킬'], active_rate = val['가동률'])
d_result[key]['스킬'].sort(key=(lambda x: x['최종계수']), reverse=True)
total_deal = sum(sd['데미지'] for sd in d_result[key]['스킬'])
total_factor = sum(sd['점수기준'] for sd in d_result[key]['스킬'])
awkd = 0
if self.owner.classTypeNeo is True:
if self.owner.classname == '다크나이트' or self.owner.classname == '크리에이터':
for sd in d_result[key]['스킬']:
if sd['requiredLevel'] == 85 and sd['사용횟수'] > 0:
total_deal -= sd['데미지']
total_factor -= sd['점수기준']
elif sd['requiredLevel'] == 100 and sd['사용횟수'] > 0:
awkd += sd['데미지']
else:
for sd in d_result[key]['스킬']:
if sd['requiredLevel'] == 50 and sd['사용횟수'] > 0:
total_deal -= sd['데미지']
total_factor -= sd['점수기준']
elif sd['requiredLevel'] in [85, 100] and sd['사용횟수'] > 0 and sd['coolfixed'] is False:
awkd += sd['데미지']
#print(total_factor)
d_result[key]['기준점수'] = round(total_factor / 100000000000)
d_result[key]['기준딜'] = round(total_deal)
for key, val in d_info['공격유형'].items():
_formula = val['계산식']
formula = _formula.split('+')
score = 0
deal = 0
for _f in formula:
f = _f.split('*')
if f[0] == _f:
score += d_result[f[0]]['기준점수']
else:
score += d_result[f[0]]['기준점수'] * float(f[1])
deal += d_result[f[0]]['기준딜']
d_result[key]['점수'] = score
d_result[key]['총합딜'] = deal
score_list = {}
total_score = 0
for key, val in d_info['공격유형'].items():
ratio = val['반영률']
total_score += (d_result[key]['총합딜'] * ratio)
score_list[key] = round(d_result[key]['총합딜'])
#score_list['총점'] = round(total_score)
d_result['점수표'] = {'scores': score_list}
if self.owner.isBuffer is False and d_info['name'] == '시로코':
vsum = 0
d = {}
d2 = {}
for k, v in score_list.items():
if k == '지속딜':
vsum += (v / 10000000000000 / 40)
#d[k] = v / 4000000000000
#d2[k] = (v - awkd) / 4000000000000
std = d[k] = v / 4000000000000
std2 = d2[k] = (v - awkd) / 4000000000000
elif k == '지속정자극딜':
vsum += (v / 10000000000000 / 48)
elif k == '그로기딜':
vsum += (v / 10000000000000 / 25)
d[k] = v / 2500000000000
d2[k] = (v - awkd) / 2500000000000
elif k == '정자극딜':
vsum += (v / 10000000000000 / 30)
elif k == '80초딜':
d[k] = v / 8000000000000
d2[k] = (v - awkd) / 8000000000000
elif k == '60초딜':
d[k] = v / 6000000000000
d2[k] = (v - awkd) / 6000000000000
if char_stat['char_type'] == '시너지':
vsum *= 1.0816
namep = char_stat.get('모험가명성')
if namep is not None:
char_stat['points'] = (namep-6068)/15
char_stat['points'] += vsum * 500
d3 = {}
for k, v in d.items():
d3[k] = d[k]/d2[k]
for k, v in d.items():
d[k] /= std
d2[k] /= std2
#print(d)
#print(d2)
#print(d3)
m1 = mean(d2.values())
m2 = mean(d3.values())
#print(m1)
#print(m2)
m3 = m2 - m1
if m3 < 0:
m3 = 0
elif m3 > 1:
m3 = 1
#print(m3)
char_stat['딜밸런스'] = m3
del d_result['80초딜']
del d_result['60초딜']
def calculate_buff(self):
buff_type = self.buffoption['type']
buff_name = self.buffoption['name']
buff_base = self.buffoption['base']
main_stat_type = self.inventory.main_stat['type']
#print (buff_type, buff_name, 'buffoption', self.buffoption)
d_result = self.owner.char_stat['버프분석결과']
if buff_type == '아포':
si = self.get_skill('skills', buff_name, req = 50)
apod = data = si[0]
lv = si[1] + si[2]
alv = lv
v = data['values'][lv]
v += self.buffoption['계수']
v *= self.buffoption['힘지']
s = self.owner.char_stat[main_stat_type]
#print ('마을힘', s, '추정치', self.inventory.main_stat['value'], '아포레벨', lv, si[2])
s = self.inventory.main_stat['value']
res = v * (1 + s/buff_base)
#d_result[buff_type].append((50, buff_name, math.ceil(v), s, res))
sl = self.get_skills_by_level('skills', 100)
si = sl[0]
data = si[0]
lv = si[1] + si[2]
name = data['name']
res1 = round(res * (1 + (lv + 23)/100))
res2 = round(res * (1 + (lv + 9)/100))
d_result[buff_type] = {'name':name, '계수':math.ceil(v), '적용스탯':{'type':main_stat_type, 'value':s}, 'value':res1, 'level':alv, 'level100':lv, 'value2':res2, 'factors': apod['values'], 'buffopts':self.buffoption}
else:
d_result[buff_type] = {}
sl = self._get_skill_instance_all('skills', buff_name, req = 30)
for si in sl:
data = si[0]
lv = si[1] + si[2]
if data['type'] == '축공격력':
v = data['values'][lv]
v *= self.buffoption['물공']
s = self.inventory.main_stat['value']
res = v * (1 + s/buff_base)
if self.owner.classname == '프리스트(여)':
res *= 1.15
elif self.owner.classname == '마법사(여)':
res *= (1.15 * 1.25)
res = round(res)
d_result[buff_type]['공'] = {'name':buff_name, '계수':math.ceil(v), '적용스탯':{'type':main_stat_type, 'value':s}, 'value':res, 'level':lv, 'factors': data['values'], 'inc':self.buffoption['물공']}
else:
v = data['values'][lv]
v *= self.buffoption['힘지']
s = self.inventory.main_stat['value']
res = v * (1 + s/buff_base)
if self.owner.classname == '프리스트(여)':
res *= 1.15
elif self.owner.classname == '마법사(여)':
res *= (1.15 * 1.25)
res = round(res)
d_result[buff_type]['힘지'] = {'name':buff_name, '계수':math.ceil(v), '적용스탯':{'type':main_stat_type, 'value':s}, 'value':res, 'level':lv, 'factors': data['values'], 'inc':self.buffoption['힘지'], 'buffopts':self.buffoption}
if '보조스킬' in self.buffoption.keys():
d_result['보조스킬'] = self.buffoption['보조스킬']
|
{"hexsha": "d952835cf78e6d8b97084d2a0d9fa4bec1481a20", "size": 70537, "ext": "py", "lang": "Python", "max_stars_repo_path": "skilltree.py", "max_stars_repo_name": "dwlee08/dnfp-analyzer", "max_stars_repo_head_hexsha": "4ae4ec4d32c08288b997c83655a0c97c7d347216", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skilltree.py", "max_issues_repo_name": "dwlee08/dnfp-analyzer", "max_issues_repo_head_hexsha": "4ae4ec4d32c08288b997c83655a0c97c7d347216", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skilltree.py", "max_forks_repo_name": "dwlee08/dnfp-analyzer", "max_forks_repo_head_hexsha": "4ae4ec4d32c08288b997c83655a0c97c7d347216", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-10T06:24:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-10T06:24:34.000Z", "avg_line_length": 39.1220188575, "max_line_length": 237, "alphanum_fraction": 0.4218069949, "include": true, "reason": "import numpy", "num_tokens": 21378}
|
"""
This file contains code for a 2 layer fully connected neural network for classification; written from scratch.
"""
import torch
import numpy as np
from core.activations import activations
class SimpleFullyConnected(object):
"""
Simple 2 layer (single hidden) fully connected network for classification.
This model uses a Cross-Entropy loss with L2 weight decay, relu activations for the hidden layer, and
softmax activation for the output layer.
"""
def __init__(self, in_features, hidden_size, n_classes, weight_init, dtype=torch.float,
device=torch.device('cpu')):
"""
Args:
in_features: int,
number of input features into the model.
hidden_size: int,
number of neurons in the single hidden layer.
n_classes: int,
number of output classes to choose from.
weight_init: str,
weight initialization strategy.
One of ('Kaiming')
dtype: torch.dtype,
data type for model parameters.
device: torch.device,
device to use for this model.
"""
self.in_features = in_features
self.hidden_size = hidden_size
self.n_classes = n_classes
self.dtype = dtype
self.device = device
# Model parameters
self.W_1 = torch.empty(self.hidden_size, self.in_features, dtype=dtype, device=self.device)
self.b_1 = torch.empty(self.hidden_size, dtype=dtype, device=self.device)
self.W_2 = torch.empty(self.n_classes, self.hidden_size, dtype=dtype, device=self.device)
self.b_2 = torch.empty(self.n_classes, dtype=dtype, device=self.device)
self.initialize_params(weight_init=weight_init)
# Gradients for model parameters
self.gradient_b_2 = torch.zeros(self.b_2.size(), device=self.device)
self.gradient_W_2 = torch.zeros(self.W_2.size(), device=self.device)
self.gradient_b_1 = torch.zeros(self.b_1.size(), device=self.device)
self.gradient_W_1 = torch.zeros(self.W_1.size(), device=self.device)
# Computation graph
# - forward
self.batch_probabilities = None
self.hidden = None
self.hidden_pre_activation = None
self.x = None
# - backward
self.gradient_unnormalized_log_probs = None
self.gradient_hidden = None
self.gradient_hidden_pre_activation = None
def forward(self, x, return_type='probs'):
"""
Forward propogation function.
Args:
x: Tensor of shape=(batch_size, n_features),
data to forward propogate through the network.
return_type: str,
type of output desired. One of ('unnormalized_log_probs', 'log_probs', 'probs').
Returns:
depending on return_type, one of:
probabilities: Tensor, shape=(batch_size, n_classes),
Predicted probability distribution over the classes; one for each sample in the batch.
unnormalized_log_probabilities: Tensor, shape(batch_size, n_classes),
Predict unnormalized log probabilities over the classes; one for each sample in the
batch.
log_probabilities: Predict log probabilities over the classes; one for each sample in the batch.
"""
# Validate inputs
implemented_return_types = ('unnormalized_log_probs', 'log_probs', 'probs')
if return_type not in implemented_return_types:
raise ValueError('valid return types are %s, but %s was passed.' % (implemented_return_types,
return_type))
self.x = x
# Input to hidden
self.hidden_pre_activation = torch.matmul(x, torch.transpose(self.W_1, dim0=0, dim1=1)) + self.b_1
self.hidden = activations.relu(self.hidden_pre_activation)
# Hidden to out
unnormalized_log_probs = torch.matmul(self.hidden, torch.transpose(self.W_2, dim0=0, dim1=1)) + self.b_2
log_probs = activations.log_softmax(unnormalized_log_probs)
self.batch_probabilities = torch.exp(log_probs)
if return_type == 'unnormalized_log_probs':
return unnormalized_log_probs
elif return_type == 'log_probs':
return log_probs
elif return_type == 'probs':
return self.batch_probabilities
def backward(self, ground_truth):
"""
Backward pass using cross-entropy loss. This function sets the gradients for the model parameters.
Args:
ground_truth: tensor of int, shape=(batch_size)
ground truth labels.
Returns:
self.gradient_W_1,
self.gradient_b_1,
self.gradient_W_2,
self.gradient_b_2
"""
# Make sure the forward propogation has been run
if not self._check_forward_ran():
raise ValueError('Forward propogation must be run first.')
batch_size = len(ground_truth)
# Convert ground truth labels to one-hot-encoding
y_one_hot = self._one_hot(ground_truth)
# Gradient of the loss w.r.t unnormalized_log_probs for each sample in the batch - size(batch_size, n_classes)
self.gradient_unnormalized_log_probs = self.batch_probabilities - y_one_hot
# Gradient of the loss w.r.t b2 - shape(n_class) - this is the average over all samples in the batch
self.gradient_b_2.add_(torch.mean(self.gradient_unnormalized_log_probs, dim=0))
# Gradient of the loss w.r.t W2 - size(n_classes, n_hidden) - this is the average over all samples in the batch
self.gradient_W_2.add_(torch.matmul(torch.transpose(self.gradient_unnormalized_log_probs, dim0=0, dim1=1),
self.hidden) / batch_size)
# Gradient of the loss w.r.t the hidden layer - size(batch_size, n_hidden)
self.gradient_hidden = torch.matmul(self.gradient_unnormalized_log_probs, self.W_2)
# Gradient of of the loss w.r.t the hidden layer pre-relu - size(batch_size, n_hidden)
self.gradient_hidden_pre_activation = self.gradient_hidden * activations.relu_grad(self.hidden_pre_activation)
# Gradient of the loss w.r.t b1 - size(n_hidden) - this is the average over all samples in the batch
self.gradient_b_1.add_(torch.mean(self.gradient_hidden_pre_activation, dim=0))
# Gradient of the loss w.r.t W1 - size(n_hidden, n_features) - this is the average over all samples in the batch
self.gradient_W_1.add_(torch.matmul(torch.transpose(self.gradient_hidden_pre_activation, dim0=0, dim1=1),
self.x) / batch_size)
# Reset the cached variables in the computation graph
self._reset_comp_graph_cache()
return self.gradient_W_1, self.gradient_b_1, self.gradient_W_2, self.gradient_b_2
def get_params(self):
"""
Returns a a tuple of all parameter sets in the model.
Returns: tuple of tensor,
tuple of all parameter sets in the model.
"""
return self.W_1, self.b_1, self.W_2, self.b_2
def get_gradients(self):
"""
Returns a tuple of gradients for all parameter sets in the model.
Returns: tuple of tensor,
tuple of gradients for all parameter sets in the model.
"""
return self.gradient_W_1, self.gradient_b_1, self.gradient_W_2, self.gradient_b_2
def _check_forward_ran(self):
"""
Checks if the forward propogation has been run
Returns: True or False
"""
return self.batch_probabilities is not None and \
self.hidden is not None and \
self.hidden_pre_activation is not None and \
self.x is not None
def _reset_comp_graph_cache(self):
"""
Resets the cache of computed variables in the computation graph from the forward pass
Returns: None
"""
self.batch_probabilities = None
self.hidden = None
self.hidden_pre_activation = None
self.x = None
def reset_gradients(self):
self.gradient_unnormalized_log_probs = None
self.gradient_b_2.copy_(torch.tensor(0))
self.gradient_W_2.copy_(torch.tensor(0))
self.gradient_hidden = None
self.gradient_hidden_pre_activation = None
self.gradient_b_1.copy_(torch.tensor(0))
self.gradient_W_1.copy_(torch.tensor(0))
def _one_hot(self, labels):
"""
Converts a 1-D array of ints to one-hot-encoding. Labels are expected to be integers in the
range [0, self.n_classes - 1]
Args:
labels: tensor of int, shape=(batch_size)
array to convert.
Returns:
one_hot: 2-D tensor of ints, shape=(batch_size, self.n_classes)
"""
# Make sure all labels are in expected range
if torch.max(labels) > (self.n_classes - 1) or torch.min(labels) < 0:
raise ValueError('labels must be integers in the range [0, %d]' % (self.n_classes - 1))
batch_size = len(labels)
one_hot = torch.zeros(batch_size, self.n_classes, device=self.device)
# Fill one-hot encoded matrix
for i in range(batch_size):
one_hot[i, labels[i]] = 1
return one_hot
def initialize_params(self, weight_init):
"""
Initialize the parameters of the model in the specified way.
Args:
weight_init: str,
weight initialization strategy.
One of ('Kaiming')
Returns: None
"""
if weight_init not in ('Kaiming'):
raise ValueError('Weight init must be \'Kaiming\'. You passed %s.' % weight_init)
if weight_init == 'Kaiming':
self.W_1 = torch.tensor(
data=np.random.randn(self.W_1.shape[0], self.W_1.shape[1]) * np.sqrt(2 / self.in_features),
requires_grad=False,
dtype=self.dtype, device=self.device
)
self.W_2 = torch.tensor(
data=np.random.randn(self.W_2.shape[0], self.W_2.shape[1]) * np.sqrt(2 / self.hidden_size),
requires_grad=False,
dtype=self.dtype, device=self.device
)
# Initialize bias to be small constant
self.b_1 = torch.full(size=self.b_1.size(), fill_value=0.01, dtype=self.dtype, device=self.device)
self.b_2 = torch.full(size=self.b_2.size(), fill_value=0.01, dtype=self.dtype, device=self.device)
def to(self, device):
"""
Moves this model to the specified device.
Args:
device: Device, Pytorch device to move the model to
Returns: None
"""
# Move params
self.W_1 = self.W_1.to(device)
self.W_2 = self.W_2.to(device)
self.b_1 = self.b_1.to(device)
self.b_2 = self.b_2.to(device)
# Move gradients
self.gradient_W_1 = self.gradient_W_1.to(device)
self.gradient_W_2 = self.gradient_W_2.to(device)
self.gradient_b_1 = self.gradient_b_1.to(device)
self.gradient_b_2 = self.gradient_b_2.to(device)
# Set model device
self.device = device
|
{"hexsha": "34b1533af7cfc7accceafb5a61f30d089bb3eac3", "size": 11593, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/models/three_layer_fc.py", "max_stars_repo_name": "kalvare/machine_learning", "max_stars_repo_head_hexsha": "9710177515bcb22901eea5738364221fe4bf52b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "core/models/three_layer_fc.py", "max_issues_repo_name": "kalvare/machine_learning", "max_issues_repo_head_hexsha": "9710177515bcb22901eea5738364221fe4bf52b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/models/three_layer_fc.py", "max_forks_repo_name": "kalvare/machine_learning", "max_forks_repo_head_hexsha": "9710177515bcb22901eea5738364221fe4bf52b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9026845638, "max_line_length": 120, "alphanum_fraction": 0.618390408, "include": true, "reason": "import numpy", "num_tokens": 2489}
|
\chapter{Vita}
This is another example of an appendix. Perhaps for listing your CV. Or giving
examples of having multiple appendices.
|
{"hexsha": "3403a0c56f9607bd49d5d63e90563a403831c070", "size": 135, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/Thesis tools/appendixB.tex", "max_stars_repo_name": "Malificiece/Leap-Motion-Thesis", "max_stars_repo_head_hexsha": "a87684c7e9c1d7250922d00f37f31ae242dcc363", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/Thesis tools/appendixB.tex", "max_issues_repo_name": "Malificiece/Leap-Motion-Thesis", "max_issues_repo_head_hexsha": "a87684c7e9c1d7250922d00f37f31ae242dcc363", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/Thesis tools/appendixB.tex", "max_forks_repo_name": "Malificiece/Leap-Motion-Thesis", "max_forks_repo_head_hexsha": "a87684c7e9c1d7250922d00f37f31ae242dcc363", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0, "max_line_length": 78, "alphanum_fraction": 0.8, "num_tokens": 30}
|
#include "async_web_server_cpp/http_request_handler.hpp"
#include "async_web_server_cpp/http_connection.hpp"
#include "async_web_server_cpp/http_reply.hpp"
#include <boost/bind.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/noncopyable.hpp>
#include <boost/regex.hpp>
#include <boost/shared_ptr.hpp>
#if defined(_MSC_VER)
#include <BaseTsd.h>
typedef SSIZE_T ssize_t;
#endif
namespace async_web_server_cpp
{
HttpRequestHandlerGroup::HttpRequestHandlerGroup(
HttpServerRequestHandler default_handler)
: default_handler_(default_handler)
{
}
class PathMatcher
{
public:
explicit PathMatcher(const std::string& path_regex_string)
: path_regex_(boost::regex(path_regex_string))
{
}
bool operator()(const HttpRequest& request)
{
return regex_match(request.path, path_regex_);
}
private:
const boost::regex path_regex_;
};
void HttpRequestHandlerGroup::addHandlerForPath(
const std::string& path_regex, HttpServerRequestHandler handler)
{
addHandler(PathMatcher(path_regex), handler);
}
void HttpRequestHandlerGroup::addHandler(HandlerPredicate predicate,
HttpServerRequestHandler handler)
{
handlers_.push_back(std::make_pair(predicate, handler));
}
bool HttpRequestHandlerGroup::operator()(
const HttpRequest& request, boost::shared_ptr<HttpConnection> connection,
const char* begin, const char* end)
{
for (auto& handler : handlers_)
{
if (handler.first(request))
{
if (handler.second(request, connection, begin, end))
return true;
}
}
return default_handler_(request, connection, begin, end);
}
class BodyCollectingConnection;
typedef boost::shared_ptr<BodyCollectingConnection> BodyCollectingConnectionPtr;
typedef boost::weak_ptr<BodyCollectingConnection>
BodyCollectingConnectionWeakPtr;
class BodyCollectingConnection
: public boost::enable_shared_from_this<BodyCollectingConnection>,
private boost::noncopyable
{
public:
BodyCollectingConnection(HttpRequestBodyCollector::Handler handler,
const HttpRequest& request,
boost::shared_ptr<HttpConnection> connection)
: handler_(handler), request_(request), connection_(connection),
received_length_(0)
{
std::string length_str =
request_.get_header_value_or_default("Content-Length", "");
try
{
length_ = boost::lexical_cast<ssize_t>(length_str);
}
catch (const boost::bad_lexical_cast&)
{
length_ = -1; // indicate error
}
}
static void static_handle_read(BodyCollectingConnectionPtr _this,
const char* begin, const char* end)
{
_this->handle_read(begin, end);
}
void handle_read(const char* begin, const char* end)
{
if (length_ < 0)
{
HttpReply::builder(HttpReply::bad_request).write(connection_);
connection_->write("No Content-Length header");
return;
}
std::string chunk(begin, end - begin);
body_stream_ << chunk;
received_length_ += chunk.length();
if (received_length_ >= static_cast<size_t>(length_))
{
handler_(request_, connection_,
body_stream_.str().substr(0, length_));
}
else
{
connection_->async_read(
boost::bind(&BodyCollectingConnection::static_handle_read,
shared_from_this(), _1, _2));
}
}
private:
HttpRequestBodyCollector::Handler handler_;
const HttpRequest request_;
boost::shared_ptr<HttpConnection> connection_;
std::stringstream body_stream_;
ssize_t length_;
size_t received_length_;
};
HttpRequestBodyCollector::HttpRequestBodyCollector(Handler handler)
: handler_(handler)
{
}
bool HttpRequestBodyCollector::operator()(
const HttpRequest& request, boost::shared_ptr<HttpConnection> connection,
const char* begin, const char* end)
{
BodyCollectingConnectionPtr collecting_connection(
new BodyCollectingConnection(handler_, request, connection));
collecting_connection->handle_read(begin, end);
return true;
}
} // namespace async_web_server_cpp
|
{"hexsha": "b3b08a4f29f4611d35b09acd1255941319feecd0", "size": 4380, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/http_request_handler.cpp", "max_stars_repo_name": "PranavDhulipala/async_web_server_cpp", "max_stars_repo_head_hexsha": "7d810f35b684021c3c5e6d9349c22b51b6e6e9cb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/http_request_handler.cpp", "max_issues_repo_name": "PranavDhulipala/async_web_server_cpp", "max_issues_repo_head_hexsha": "7d810f35b684021c3c5e6d9349c22b51b6e6e9cb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/http_request_handler.cpp", "max_forks_repo_name": "PranavDhulipala/async_web_server_cpp", "max_forks_repo_head_hexsha": "7d810f35b684021c3c5e6d9349c22b51b6e6e9cb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3959731544, "max_line_length": 80, "alphanum_fraction": 0.6723744292, "num_tokens": 897}
|
import sys
#Imports
import numpy as np
import math
import matplotlib.pyplot as plt
question = sys.argv[1]
def berkan_ozdamar_21602353_hw1(question):
if question == '1' :
A = np.array([[1, 0, -1, 2], [2, 1, -1, 5], [3, 3, 0, 9]])
b = np.array([1, 4, 9])
# Part a
print('Part a')
print('\n')
# Since x_3 and x_4 are free variables, we will assign them random numbers with numpy.random
x_3 = np.random.random()
x_4 = np.random.random()
x_n = np.array([x_3 - 2 * x_4, -x_3 - x_4, x_3, x_4])
print('Solution for Ax = 0')
print(x_n)
print('\n')
result = np.around(A.dot(x_n), 6)
print('Confirming that Ax = 0')
print(result)
# Part b
print('Part b')
print('\n')
# Since x_3 and x_4 are free variables, we will write solution set in terms of them. And since we are looking for
# a particular solution, assigning x_3 and x_4 as 0 will make x_p = [1 2 0 0]^T. Then check whether A.x_p = b where
# b is [1 4 9]^T
x_3 = 0
x_4 = 0
print('Solution for Ax = b')
x_p = np.array([1 + x_3 - 2 * x_4, 2 - x_3 - x_4, x_3, x_4])
print(x_p)
print('\n')
print('Confirming that Ax = b')
result = A.dot(x_p)
print(result)
# Part c
print('Part c')
print('\n')
x_3 = np.random.random()
x_4 = np.random.random()
x_c = np.array([1 + x_3 - 2 * x_4, 2 - x_3 - x_4, x_3, x_4])
print('Confirming that Ax = b')
result = A.dot(x_c)
print(result)
# Part d
print('Part d')
u, s, v = np.linalg.svd(A)
# Make sigma in the correct form which is 3x4 matrix
sigma = np.zeros((3, 4))
for i in range(len(sigma[:, 0])):
for j in range(len(sigma[0])):
if (i == j):
# Instead of zero, it assigns third sigma value as 2*10^-16, which makes problem when taking reciprocal.
# To resolve that issue, i have just assigned 0 instead 2*10^-16
if (s[i] > 10 ** -15):
sigma[i, j] = s[i]
else:
sigma[i, j] = 0
# To find sigma_plus, take reciprocals of the non-zero values.
sigma_plus = np.zeros((3, 4))
for i in range(len(sigma[:, 0])):
for j in range(len(sigma[0])):
if (i == j and sigma[i, j] != 0):
sigma_plus[i, j] = 1 / sigma[i, j]
print('U is:')
print(u)
print('V is:')
print(v)
print('sigma is:')
print(sigma)
A_pseudo = (v.T).dot(sigma_plus.T).dot(u.T)
print('Pseudo inverse of A by SVD decomposition :')
print(A_pseudo)
print('\n')
print('To see how accurate our pseudo inverse of A, is we check A.A_pseudo.A = A')
print(A.dot(A_pseudo).dot(A))
# Finally, find pseudo inverse of A with numpy.linalg.pinv(A)
A_pseudo_2 = np.linalg.pinv(A)
print('\n')
print('Pseudo inverse of A with numpy.linalg.pinv(A)')
print(A_pseudo_2)
# Part e
print('Part e')
print('\n')
# 1. Set free variables to zero
x_3 = 0
x_4 = 0
x_sparsest = np.array([1 + x_3 - 2 * x_4, 2 - x_3 - x_4, x_3, x_4])
print('Sparsest solution example for x is')
print(x_sparsest)
# 2. Set one of the free variables to zero. And set other free variable such that one of the pivots become 0.
x_3 = 0
x_4 = 1 / 2
x_sparsest = np.array([1 + x_3 - 2 * x_4, 2 - x_3 - x_4, x_3, x_4])
print('Sparsest solution example for x is')
print(x_sparsest)
# 3.Set free variables such that both pivots are zero.
x_3 = 1
x_4 = 1
x_sparsest = np.array([1 + x_3 - 2 * x_4, 2 - x_3 - x_4, x_3, x_4])
print('Sparsest solution example for x is')
print(x_sparsest)
# Part f
print('Part f')
print('\n')
L2 = A_pseudo.dot(b)
print('Least norm solution is:')
print(L2)
elif question == '2' :
# Part a
print('Part A')
print('\n')
def nChooseK(n, k):
return math.factorial(n) / (math.factorial(k) * math.factorial(n - k))
def bernoulliDist(n, k, p):
return nChooseK(n, k) * (p ** k) * ((1 - p) ** (n - k))
x = np.arange(0, 1.001, 0.001)
likelihood_lang = bernoulliDist(869, 103, x)
likelihood_notlang = bernoulliDist(2353, 199, x)
index = np.arange(0, 1001)
plt.xlim(0, 200)
plt.bar(index, likelihood_lang)
plt.xlabel('Probability * 1000')
plt.ylabel('Likelihood')
plt.title('Likelihood distribution of language involved tasks')
plt.show(block=False)
plt.xlim(0, 200)
plt.bar(index, likelihood_notlang)
plt.xlabel('Probability * 1000')
plt.ylabel('Likelihood')
plt.title('Likelihood distribution of tasks that do not involve language')
plt.show(block=False)
# Part b
print('Part b')
print('\n')
max_l = np.amax(likelihood_lang)
max_nl = np.amax(likelihood_notlang)
max_l_prob = 0
max_nl_prob = 0
for i in range(len(likelihood_lang)):
if (likelihood_lang[i] == max_l):
# Dividing the i by 1000 since the probability is scaled by 1000.
max_l_prob = i / 1000
if (likelihood_notlang[i] == max_nl):
max_nl_prob = i / 1000
print(
'The probability that maximizes the likelihood of language involving tasks: (probability, likelihood of that probability)')
print(max_l_prob, max_l)
print(
'The probability that maximizes the likelihood of not language involving tasks: (probability, likelihood of that probability)')
print(max_nl_prob, max_nl)
# Part c
print('Part c')
print('\n')
# x has 1001 values, and since it is given that uniformly distributed, prior P(X) = 1/1001
prior = 1 / 1001
normalizer_l = 0
posterior_l = np.zeros(len(likelihood_lang))
normalizer_nl = 0
posterior_nl = np.zeros(len(likelihood_notlang))
for i in range(len(likelihood_lang)):
normalizer_l += likelihood_lang[i] * prior
normalizer_nl += likelihood_notlang[i] * prior
posterior_l[:] = likelihood_lang[:] * prior / normalizer_l
posterior_nl[:] = likelihood_notlang[:] * prior / normalizer_nl
plt.xlim(0, 200)
plt.bar(index, posterior_l)
plt.xlabel('Probability * 1000')
plt.ylabel('Posterior')
plt.title('Posterior distribution of language involved tasks')
plt.show(block=False)
plt.xlim(0, 200)
plt.bar(index, posterior_nl)
plt.xlabel('Probability * 1000')
plt.ylabel('Posterior')
plt.title('Posterior distribution of tasks that do not involve language')
plt.show(block=False)
def pdf_to_cdf(posterior):
lowerbound = 0
temp_min = 0
upperbound = np.inf
temp_max = np.inf
cdf = np.zeros(len(posterior) + 1)
for i in range(1, len(posterior) + 1):
for j in range(i):
cdf[i] += posterior[j]
# Since i have iterated i by 1, when finding lowerbound and upperbound, i decrease 1 from i.
if (cdf[i] >= 0.025 and lowerbound <= temp_min):
temp_min = cdf[i]
lowerbound = (i - 1) / 1000
if (cdf[i] >= 0.975 and upperbound >= temp_max):
temp_max = cdf[i]
upperbound = (i - 1) / 1000
return cdf, lowerbound, upperbound
cdf_l, lower_l, upper_l = pdf_to_cdf(posterior_l)
cdf_nl, lower_nl, upper_nl = pdf_to_cdf(posterior_nl)
index2 = np.arange(0, 1002)
plt.bar(index2, cdf_l)
plt.xlabel('Probability * 1000')
plt.ylabel('P(X < x|data)')
plt.title('CDF of posterior distribution of tasks that involve language')
plt.show(block=False)
plt.bar(index2, cdf_nl)
plt.xlabel('Probability * 1000')
plt.ylabel('P(X < x|data)')
plt.title('CDF of posterior distribution of tasks that do not involve language')
plt.show(block=False)
print('Confidence interval of x_l is (', lower_l, ',', upper_l, ')')
print('Confidence interval of x_nl is (', lower_nl, ',', upper_nl, ')')
# Part d
print('Part d')
print('\n')
matrixPosterior_l = np.matrix(posterior_l)
matrixPosterior_nl = np.matrix(posterior_nl)
joint_dist = (matrixPosterior_l.T).dot(matrixPosterior_nl)
plt.imshow(joint_dist)
plt.xlabel('x_nl * 1000')
plt.ylabel('x_l * 1000')
plt.colorbar()
plt.show(block=False)
x_l_greater = 0
x_l_notgreater = 0
for i in range(len(joint_dist)):
for j in range(len(joint_dist)):
if (i > j):
x_l_greater += joint_dist[i, j]
else:
x_l_notgreater += joint_dist[i, j]
print('Sum of posteriors such that x_l > x_nl')
print(x_l_greater)
print('Sum of posteriors such that x_l <= x_nl')
print(x_l_notgreater)
# Part e
print('Part e')
print('\n')
PROB_LANG = 0.5
prob_lang_active = (max_l_prob * PROB_LANG) / ((max_l_prob * PROB_LANG) + max_nl_prob * (1 - PROB_LANG))
print('P(language | activation) is :')
print(prob_lang_active)
berkan_ozdamar_21602353_hw1(question)
|
{"hexsha": "77a2d0185b80f2ff20d611bd6cae27b52d758b7b", "size": 9945, "ext": "py", "lang": "Python", "max_stars_repo_path": "Homework1/berkan_ozdamar_21602353_hw1.py", "max_stars_repo_name": "ozdamarberkan/Computational_Neuroscience", "max_stars_repo_head_hexsha": "8a8c02842968037fcbdcab15f203827642b6d816", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Homework1/berkan_ozdamar_21602353_hw1.py", "max_issues_repo_name": "ozdamarberkan/Computational_Neuroscience", "max_issues_repo_head_hexsha": "8a8c02842968037fcbdcab15f203827642b6d816", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Homework1/berkan_ozdamar_21602353_hw1.py", "max_forks_repo_name": "ozdamarberkan/Computational_Neuroscience", "max_forks_repo_head_hexsha": "8a8c02842968037fcbdcab15f203827642b6d816", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8217821782, "max_line_length": 139, "alphanum_fraction": 0.5434891905, "include": true, "reason": "import numpy", "num_tokens": 2725}
|
{-# OPTIONS --without-K #-}
module hw1 where
open import Level using (_⊔_)
open import Function using (id)
open import Data.Nat using (ℕ; suc; _+_; _*_)
open import Data.Empty using (⊥)
open import Data.Sum using (_⊎_; inj₁; inj₂)
import Level
infix 4 _≡_
recℕ : ∀ {ℓ} → (C : Set ℓ) → C → (ℕ → C → C) → ℕ → C
recℕ C z f 0 = z
recℕ C z f (suc n) = f n (recℕ C z f n)
indℕ : ∀ {ℓ}
→ (C : ℕ → Set ℓ)
→ C 0
→ ((n : ℕ) → C n → C (suc n))
→ (n : ℕ)
→ C n
indℕ C z f 0 = z
indℕ C z f (suc n) = f n (indℕ C z f n)
------------------------------------------------------------------------------
data _≡_ {ℓ} {A : Set ℓ} : (x y : A) → Set ℓ where
refl : (x : A) → x ≡ x
rec≡ : {A : Set} →
(R : A → A → Set) {reflexiveR : {a : A} → R a a} →
({x y : A} (p : x ≡ y) → R x y)
rec≡ R {reflR} (refl y) = reflR {y}
subst : ∀ {ℓ} {A : Set ℓ} {C : A → Set ℓ} →
({x y : A} (p : x ≡ y) → C x → C y)
subst (refl x) = id
------------------------------------------------------------------------------
-- Exercise 1.1
-- show h ∘ (g ∘ f) = (h ∘ g) ∘ f
------------------------------------------------------------------------------
_∘_ : {A B C : Set} → (f : B → C) → (g : A → B) → A → C
f ∘ g = (λ a → f (g a))
compose≡ : {A B C D : Set}
→ (f : A → B)
→ (g : B → C)
→ (h : C → D)
→ h ∘ (g ∘ f) ≡ (h ∘ g) ∘ f
compose≡ = λ f g h → refl (λ a → (h (g (f a))))
------------------------------------------------------------------------------
-- Exercise 1.2
-- Derive the recursion principle for products recA×B
-- using only the projections, and verify that the definitional
-- equalities are valid. Do the same for Σ-types.
------------------------------------------------------------------------------
---------------------------------------------------
-- Product Types
data _×_ {ℓ₁ ℓ₂} (A : Set ℓ₁) (B : Set ℓ₂) : Set (ℓ₁ ⊔ ℓ₂) where
pair : A → B → A × B
fst : ∀ {ℓ₁ ℓ₂} {A : Set ℓ₁} {B : Set ℓ₂} → A × B → A
fst (pair a _) = a
snd : ∀ {ℓ₁ ℓ₂} {A : Set ℓ₁} {B : Set ℓ₂} -> A × B -> B
snd (pair _ b) = b
rec× : ∀ {ℓ} {A B : Set ℓ}
→ (C : Set ℓ)
→ (A → B → C)
→ A × B → C
rec× c f = λ p -> (f (fst p) (snd p))
fstofab≡a : ∀ {A B : Set}
(a : A)
→ (b : B)
→ fst (pair a b) ≡ a
fstofab≡a {A} {B} = λ a b → refl a
sndofab≡b : ∀ {A B : Set}
(a : A)
→ (b : B)
→ snd (pair a b) ≡ b
sndofab≡b {A} {B} = λ a b → refl b
uniq× : ∀ {ℓ₁ ℓ₂}
{A : Set ℓ₁}
{B : Set ℓ₂}
(p : A × B)
→ (pair (fst p) (snd p)) ≡ p
uniq× (pair a b) = refl (pair a b)
rec×g≡g : ∀ {A B C : Set}
(g : A → B → C)
(a : A)
→ (b : B)
→ rec× C g (pair a b) ≡ g a b
rec×g≡g {A} {B} {C} = λ g a b → refl (g a b)
recfst : ∀ (A B : Set)
→ fst {B = B} ≡ rec× A (λ a b → a)
recfst A B = refl fst
---------------------------------------------------
-- Sigma Types
data Σ {ℓ₁ ℓ₂} (A : Set ℓ₁)
(B : A → Set ℓ₂) : Set (ℓ₁ ⊔ ℓ₂)
where dpair : (a : A) → (B a) → Σ A B
dfst : ∀ {A : Set} {B : A → Set} → Σ A B → A
dfst (dpair a _) = a
dsnd : ∀ {A : Set} {B : A → Set} → (p : Σ A B) → (B (dfst p))
dsnd (dpair _ b) = b
dfstofab≡a : ∀ {A : Set}
{B : A → Set}
(a : A)
(b : B a) →
dfst {B = B} (dpair a b) ≡ a
dfstofab≡a {A} {B} = λ a b → refl a
dsndofab≡a : ∀ {A : Set}
{B : A → Set}
(a : A)
(b : B a) →
dsnd {B = B} (dpair a b) ≡ b
dsndofab≡a {A} {B} = λ a b → refl b
uniqΣ : ∀ {A : Set}
{B : A → Set}
(p : Σ A B)
→ (dpair (dfst p) (dsnd p)) ≡ p
uniqΣ (dpair a b) = refl (dpair a b)
------------------------------------------------------------------------------
-- Exercise 1.3
-- Derive the induction principle for products indA×B,
-- using only the projections and the propositional uniqueness
-- principle uniqA×B. Verify that the definitional equalities are
-- valid. Generalize uniqA×B to Σ-types, and do the same for Σ-types.
------------------------------------------------------------------------------
ind× : ∀ {ℓ} {A : Set ℓ} {B : Set ℓ}
→ (C : (A × B) → Set ℓ)
→ ((a : A) (b : B) → (C (pair a b)))
→ (p : (A × B)) → (C p)
ind× = λ C f → λ p → subst {C = C}
(uniq× p)
(f (fst p) (snd p))
indΣ' : ∀ {A : Set} {B : A → Set} → (C : Σ A B → Set) →
((a : A) → (b : B a) → C (dpair a b)) → (p : Σ A B) → C p
indΣ' C g s = subst {C = C}
(uniqΣ s)
(g (dfst s) (dsnd s))
------------------------------------------------------------------------------
--- Exercise 1.4 Given the function iter, derive a function having the
--- type of the recursor recN. Show that the defining equations of the
--- recursor hold propositionally for this function, using the
--- induction principle for Nats.
------------------------------------------------------------------------------
iter : ∀ {ℓ} {C : Set ℓ} → C → (C → C) → ℕ → C
iter c₀ c₊ 0 = c₀
iter c₀ c₊ (suc n) = c₊ (iter c₀ c₊ n)
recℕ' : ∀ {ℓ} → (C : Set ℓ) → C → (ℕ → C → C) → ℕ → C
recℕ' C c₀ f n =
snd (iter (pair 0 c₀)
(λ nc →
(pair (suc (fst nc))
(f (fst nc) (snd nc))))
n)
-- quick def and sanity check of fact via recℕ
fact = recℕ ℕ 1 (λ n nfact → (suc n) * nfact)
fact1 : fact 0 ≡ 1
fact1 = refl 1
fact5 : fact 5 ≡ 120
fact5 = refl 120
-- quick def and sanity check of fact via recℕ'
fact' = recℕ' ℕ 1 (λ n nfact → (suc n) * nfact)
fact'1 : fact' 0 ≡ 1
fact'1 = refl 1
fact'5 : fact' 5 ≡ 120
fact'5 = refl 120
cong : ∀ {a b} {A : Set a} {B : Set b}
(f : A → B) {x y} → x ≡ y → f x ≡ f y
cong f (refl y) = refl (f y)
-- this _is_ valid but I haven't done enough Agda
-- to see how to prove this. I proved it in the Coq HoTT library...
-- https://github.com/andmkent/HoTT/blob/5f9faf5ef4ea21db249d6ad45bcee0adf97f8f9d/contrib/HoTTBookExercises.v#L124
postulate
punt1 : ∀ {ℓ} (C : Set ℓ) →
(c₀ : C) →
(f : (ℕ → C → C)) →
(n : ℕ) →
recℕ C c₀ f (suc n) ≡ recℕ' C c₀ f (suc n)
recℕ≡recℕ' : ∀ {ℓ} (C : Set ℓ) →
(c₀ : C) →
(f : (ℕ → C → C)) →
((n : ℕ) → recℕ C c₀ f n ≡ recℕ' C c₀ f n)
recℕ≡recℕ' {ℓ} C c₀ f n =
indℕ {ℓ = ℓ}
(λ n → (((recℕ {ℓ = ℓ} C c₀ f) n)
≡
((recℕ' {ℓ = ℓ} C c₀ f) n)))
(refl c₀)
(λ n IH → (punt1 C c₀ f n))
n
------------------------------------------------------------------------------
--- Exercise 1.5 Show that if we define A + B Σ(x:2) rec2(U, A, B,
--- x), then we can give a definition of indA+B for which the
--- definitional equalities stated in §1.7 hold.
------------------------------------------------------------------------------
data 𝟚 : Set where
true : 𝟚
false : 𝟚
rec𝟚 : ∀ {ℓ} → {C : Set ℓ} → C → C → 𝟚 → C
rec𝟚 th el false = el
rec𝟚 th el true = th
if_then_else_ : ∀ {ℓ} {C : Set ℓ} → 𝟚 → C → C → C
if b then X else Y = rec𝟚 X Y b
ind𝟚 : ∀ {ℓ} → {C : 𝟚 → Set ℓ} → C true → C false → (b : 𝟚) → C b
ind𝟚 th el false = el
ind𝟚 th el true = th
bsum : ∀ (A : Set) → (B : Set) → Set
bsum A B = Σ 𝟚 (rec𝟚 A B)
injbs1 : ∀ (A : Set) → (B : Set) → A → bsum A B
injbs1 A B a = dpair true a
injbs2 : ∀ (A : Set) → (B : Set) → B → bsum A B
injbs2 A B b = dpair false b
recΣ : ∀ {ℓ₁ ℓ₂ ℓ₃} → {A : Set ℓ₁} {B : A → Set ℓ₂} → (C : Set ℓ₃) →
((a : A) → B a → C) → Σ A B → C
recΣ C g (dpair a b) = g a b
indΣ : ∀ {ℓ₁ ℓ₂ ℓ₃} → {A : Set ℓ₁} {B : A → Set ℓ₂} → (C : Σ A B → Set ℓ₃) →
((a : A) → (b : B a) → C (dpair a b)) → (p : Σ A B) → C p
indΣ C g (dpair a b) = g a b
indbsum : (A : Set) (B : Set) (C : (bsum A B → Set))
→ ((a : A) → (C (injbs1 A B a)))
→ ((b : B) → (C (injbs2 A B b)))
→ (a+b : bsum A B) → (C a+b)
indbsum A B C ca cb = indΣ C (ind𝟚 ca cb)
-- where ind𝟚's C = (λ b → (t : rec𝟚 A B b) → C (dpair b t))
indbs1 : ∀ {A B : Set} (P : (bsum A B) → Set)
→ (fa : (a : A) → P (injbs1 A B a))
→ (fb : (b : B) → P (injbs2 A B b))
→ (a : A)
→ indbsum A B P fa fb (injbs1 A B a) ≡ fa a
indbs1 P fa fb x = refl (fa x)
indbs2 : ∀ {A B : Set} (P : (bsum A B) → Set)
→ (fa : (a : A) → P (injbs1 A B a))
→ (fb : (b : B) → P (injbs2 A B b))
→ (b : B)
→ indbsum A B P fa fb (injbs2 A B b) ≡ fb b
indbs2 P fa fb x = refl (fb x)
rec⊎ : ∀ {ℓ₁ ℓ₂ ℓ₃} → {A : Set ℓ₁} {B : Set ℓ₂} →
(C : Set ℓ₃) → (A → C) → (B → C) → (A ⊎ B → C)
rec⊎ C f g (inj₁ a) = f a
rec⊎ C f g (inj₂ b) = g b
------------------------------------------------------------------------------
--- Exercise 1.10
-- Show that the Ackermann function ack : ℕ → ℕ → ℕ is definable using
-- only recℕ satisfying the following equations:
-- ack(0, m) = succ(m) -> ack(0) = suc
-- ack(succ(n), 0) = ack(n, 1) -> ack (suc n) =
-- ack(succ(n), succ(m)) = ack(n, ack(succ(n), m)).
ack : ℕ → ℕ → ℕ
ack = recℕ (ℕ → ℕ)
suc
(λ n ackn → recℕ ℕ (ackn 1) (λ m res → (ackn res)))
acktest00 : ack 0 0 ≡ 1
acktest00 = refl 1
acktest01 : ack 0 1 ≡ 2
acktest01 = refl 2
acktest10 : ack 1 0 ≡ 2
acktest10 = refl 2
acktest11 : ack 1 1 ≡ 3
acktest11 = refl 3
acktest22 : ack 2 2 ≡ 7
acktest22 = refl 7
------------------------------------------------------------------------------
--- Exercise 1.11
-- Show that for any type A, we have ¬¬¬A → ¬A.
¬ : Set → Set
¬ P = P → ⊥
ex11 : ∀ (P : Set) → ¬ (¬ (¬ P)) → ¬ P
ex11 P = λ nnnP → λ P → nnnP (λ nP → nP P)
------------------------------------------------------------------------------
-- Exercise 1.12
-- Using the propositions as types interpretation, derive the following tautologies.
-- (i) If A, then (if B then A).
-- (ii) If A, then not (not A).
-- (iii) If (not A or not B), then not (A and B).
ex12i : ∀ (A : Set) → A → (Set → A)
ex12i = λ A a _ → a
ex12ii : ∀ (A : Set) → A → (¬ (¬ A))
ex12ii = λ A a → λ nA → nA a
ex12iii : ∀ (A B : Set) → (¬ (A ⊎ B)) → (¬ (A × B))
ex12iii = λ A B → λ nAorB → λ AandB → nAorB (inj₁ (fst AandB))
------------------------------------------------------------------------------
-- Exercise 1.13
-- Using propositions-as-types, derive the double negation of the principle of ex-
-- cluded middle, i.e. prove not (not (P or not P)).
ex13 : ∀ (P : Set) → (¬ (¬ (P ⊎ (¬ P))))
ex13 = λ P nPorPnot → nPorPnot (inj₂ (λ P → nPorPnot (inj₁ P)))
------------------------------------------------------------------------------
-- Exercise 1.16
-- Show that addition of natural numbers is commutative.
ex16 : ∀ (a b c : ℕ) → a + (b + c) ≡ (a + b) + c
ex16 = indℕ (λ a → (b c : ℕ) → a + (b + c) ≡ a + b + c)
(λ b c → refl (b + c))
(λ n IHn b c → cong suc (IHn b c))
|
{"hexsha": "e1b640d7174c2df0588dae3ed5260719015d37b2", "size": 11112, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "hw1.agda", "max_stars_repo_name": "andmkent/misc-HoTT", "max_stars_repo_head_hexsha": "b05c58ffdaed99932ca2acc632deca8d14742b04", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-01-26T18:17:16.000Z", "max_stars_repo_stars_event_max_datetime": "2016-01-26T18:17:16.000Z", "max_issues_repo_path": "hw1.agda", "max_issues_repo_name": "andmkent/misc-HoTT", "max_issues_repo_head_hexsha": "b05c58ffdaed99932ca2acc632deca8d14742b04", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hw1.agda", "max_forks_repo_name": "andmkent/misc-HoTT", "max_forks_repo_head_hexsha": "b05c58ffdaed99932ca2acc632deca8d14742b04", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.474801061, "max_line_length": 114, "alphanum_fraction": 0.3959683225, "num_tokens": 4308}
|
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/basics/safe_cast.h>
#include <ripple/beast/core/LexicalCast.h>
#include <ripple/beast/rfc2616.h>
#include <ripple/server/Port.h>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/algorithm/string/trim.hpp>
#include <sstream>
namespace ripple {
bool
Port::secure() const
{
return protocol.count("peer") > 0 || protocol.count("https") > 0 ||
protocol.count("wss") > 0 || protocol.count("wss2") > 0;
}
std::string
Port::protocols() const
{
std::string s;
for (auto iter = protocol.cbegin(); iter != protocol.cend(); ++iter)
s += (iter != protocol.cbegin() ? "," : "") + *iter;
return s;
}
std::ostream&
operator<<(std::ostream& os, Port const& p)
{
os << "'" << p.name << "' (ip=" << p.ip << ":" << p.port << ", ";
if (p.admin_nets_v4.size() || p.admin_nets_v6.size())
{
os << "admin nets:";
for (auto const& net : p.admin_nets_v4)
{
os << net.to_string();
os << ", ";
}
for (auto const& net : p.admin_nets_v6)
{
os << net.to_string();
os << ", ";
}
}
if (p.secure_gateway_nets_v4.size() || p.secure_gateway_nets_v6.size())
{
os << "secure_gateway nets:";
for (auto const& net : p.secure_gateway_nets_v4)
{
os << net.to_string();
os << ", ";
}
for (auto const& net : p.secure_gateway_nets_v6)
{
os << net.to_string();
os << ", ";
}
}
os << p.protocols() << ")";
return os;
}
//------------------------------------------------------------------------------
static void
populate(
Section const& section,
std::string const& field,
std::ostream& log,
std::vector<boost::asio::ip::network_v4>& nets4,
std::vector<boost::asio::ip::network_v6>& nets6)
{
auto const optResult = section.get(field);
if (!optResult)
return;
std::stringstream ss(*optResult);
std::string ip;
while (std::getline(ss, ip, ','))
{
boost::algorithm::trim(ip);
bool v4;
boost::asio::ip::network_v4 v4Net;
boost::asio::ip::network_v6 v6Net;
try
{
// First, check to see if 0.0.0.0 or ipv6 equivalent was configured,
// which means all IP addresses.
auto const addr = beast::IP::Endpoint::from_string_checked(ip);
if (addr)
{
if (is_unspecified(*addr))
{
nets4.push_back(
boost::asio::ip::make_network_v4("0.0.0.0/0"));
nets6.push_back(boost::asio::ip::make_network_v6("::/0"));
// No reason to allow more IPs--it would be redundant.
break;
}
// The configured address is a single IP (or else addr would
// be unset). We need this to be a subnet, so append
// the number of network bits to make a subnet of 1,
// depending on type.
v4 = addr->is_v4();
std::string addressString = addr->to_string();
if (v4)
{
addressString += "/32";
v4Net = boost::asio::ip::make_network_v4(addressString);
}
else
{
addressString += "/128";
v6Net = boost::asio::ip::make_network_v6(addressString);
}
}
else
{
// Since addr is empty, assume that the entry is
// for a subnet which includes trailing /0-32 or /0-128
// depending on ip type.
// First, see if it's an ipv4 subnet. If not, try ipv6.
// If that throws, then there's nothing we can do with
// the entry.
try
{
v4Net = boost::asio::ip::make_network_v4(ip);
v4 = true;
}
catch (boost::system::system_error const&)
{
v6Net = boost::asio::ip::make_network_v6(ip);
v4 = false;
}
}
// Confirm that the address entry is the same as the subnet's
// underlying network address.
// 10.1.2.3/24 makes no sense. The underlying network address
// is 10.1.2.0/24.
if (v4)
{
if (v4Net != v4Net.canonical())
{
log << "The configured subnet " << v4Net.to_string()
<< " is not the same as the network address, which is "
<< v4Net.canonical().to_string();
Throw<std::exception>();
}
nets4.push_back(v4Net);
}
else
{
if (v6Net != v6Net.canonical())
{
log << "The configured subnet " << v6Net.to_string()
<< " is not the same as the network address, which is "
<< v6Net.canonical().to_string();
Throw<std::exception>();
}
nets6.push_back(v6Net);
}
}
catch (boost::system::system_error const& e)
{
log << "Invalid value '" << ip << "' for key '" << field << "' in ["
<< section.name() << "]: " << e.what();
Throw<std::exception>();
}
}
}
void
parse_Port(ParsedPort& port, Section const& section, std::ostream& log)
{
{
auto const optResult = section.get("ip");
if (optResult)
{
try
{
port.ip = boost::asio::ip::address::from_string(*optResult);
}
catch (std::exception const&)
{
log << "Invalid value '" << *optResult << "' for key 'ip' in ["
<< section.name() << "]";
Rethrow();
}
}
}
{
auto const optResult = section.get("port");
if (optResult)
{
try
{
port.port = beast::lexicalCastThrow<std::uint16_t>(*optResult);
// Port 0 is not supported
if (*port.port == 0)
Throw<std::exception>();
}
catch (std::exception const&)
{
log << "Invalid value '" << *optResult << "' for key "
<< "'port' in [" << section.name() << "]";
Rethrow();
}
}
}
{
auto const optResult = section.get("protocol");
if (optResult)
{
for (auto const& s : beast::rfc2616::split_commas(
optResult->begin(), optResult->end()))
port.protocol.insert(s);
}
}
{
auto const lim = get(section, "limit", "unlimited");
if (!boost::iequals(lim, "unlimited"))
{
try
{
port.limit =
safe_cast<int>(beast::lexicalCastThrow<std::uint16_t>(lim));
}
catch (std::exception const&)
{
log << "Invalid value '" << lim << "' for key "
<< "'limit' in [" << section.name() << "]";
Rethrow();
}
}
}
{
auto const optResult = section.get("send_queue_limit");
if (optResult)
{
try
{
port.ws_queue_limit =
beast::lexicalCastThrow<std::uint16_t>(*optResult);
// Queue must be greater than 0
if (port.ws_queue_limit == 0)
Throw<std::exception>();
}
catch (std::exception const&)
{
log << "Invalid value '" << *optResult << "' for key "
<< "'send_queue_limit' in [" << section.name() << "]";
Rethrow();
}
}
else
{
// Default Websocket send queue size limit
port.ws_queue_limit = 100;
}
}
populate(section, "admin", log, port.admin_nets_v4, port.admin_nets_v6);
populate(
section,
"secure_gateway",
log,
port.secure_gateway_nets_v4,
port.secure_gateway_nets_v6);
set(port.user, "user", section);
set(port.password, "password", section);
set(port.admin_user, "admin_user", section);
set(port.admin_password, "admin_password", section);
set(port.ssl_key, "ssl_key", section);
set(port.ssl_cert, "ssl_cert", section);
set(port.ssl_chain, "ssl_chain", section);
set(port.ssl_ciphers, "ssl_ciphers", section);
port.pmd_options.server_enable =
section.value_or("permessage_deflate", true);
port.pmd_options.client_max_window_bits =
section.value_or("client_max_window_bits", 15);
port.pmd_options.server_max_window_bits =
section.value_or("server_max_window_bits", 15);
port.pmd_options.client_no_context_takeover =
section.value_or("client_no_context_takeover", false);
port.pmd_options.server_no_context_takeover =
section.value_or("server_no_context_takeover", false);
port.pmd_options.compLevel = section.value_or("compress_level", 8);
port.pmd_options.memLevel = section.value_or("memory_level", 4);
}
} // namespace ripple
|
{"hexsha": "1b869f6a5dabcd80089a77ecd8a532309008619a", "size": 10676, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/ripple/server/impl/Port.cpp", "max_stars_repo_name": "shichengripple001/rippled", "max_stars_repo_head_hexsha": "7c66747d27869f9f3c96617bd4227038f1fa92b8", "max_stars_repo_licenses": ["ISC"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ripple/server/impl/Port.cpp", "max_issues_repo_name": "shichengripple001/rippled", "max_issues_repo_head_hexsha": "7c66747d27869f9f3c96617bd4227038f1fa92b8", "max_issues_repo_licenses": ["ISC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ripple/server/impl/Port.cpp", "max_forks_repo_name": "shichengripple001/rippled", "max_forks_repo_head_hexsha": "7c66747d27869f9f3c96617bd4227038f1fa92b8", "max_forks_repo_licenses": ["ISC"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6483180428, "max_line_length": 80, "alphanum_fraction": 0.488478831, "num_tokens": 2377}
|
/*
* Copyright (c) 2013-2015, Michael Grey and Markus Theil
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "SQLiteAreaPopulationReader.hpp"
#include <boost/log/trivial.hpp>
#include <cassert>
#include <iostream>
#include <sstream>
SQLiteAreaPopulationReader::SQLiteAreaPopulationReader(std::string dbPath, double lat, double lon, double length)
: _lat(lat), _lon(lon), _length(length) {
int retval = sqlite3_open(dbPath.c_str(), &_sqliteDB);
// If connection failed, handle returns NULL
if (retval) {
BOOST_LOG_TRIVIAL(error) << "Database connection failed in SQLiteAreaPopulationReader";
}
assert(retval == SQLITE_OK);
BOOST_LOG_TRIVIAL(info) << "SQLite connection to " << dbPath
<< " successfully established in SQLiteAreaPopulationReader!";
std::string queryString(
" SELECT geo.Latitude AS Latitude,"
" geo.Longitude AS Longitude,"
" geo.population AS Population,"
" ci.country AS Country"
" FROM geoname as geo, countryinfo as ci"
" WHERE geo.Latitude >= ?1"
" AND geo.Latitude <= ?2"
" AND geo.Longitude >= ?3"
" AND geo.Longitude <= ?4"
" AND geo.country_code = ci.iso"
" ORDER BY Population DESC");
sqlite3_prepare_v2(_sqliteDB, queryString.c_str(), queryString.length(), &_stmt, NULL);
sqlite3_bind_double(_stmt, 1, _lat - (length / 2));
sqlite3_bind_double(_stmt, 2, _lat + (length / 2));
sqlite3_bind_double(_stmt, 3, _lon - (length / 2));
sqlite3_bind_double(_stmt, 4, _lon + (length / 2));
retval = sqlite3_step(_stmt);
if (retval == SQLITE_ROW) {
_rowAvailable = true;
} else {
_rowAvailable = false;
}
}
PopulatedPosition SQLiteAreaPopulationReader::getNext() {
assert(_rowAvailable);
double latitude(sqlite3_column_double(_stmt, 0));
double longitude(sqlite3_column_double(_stmt, 1));
double population(sqlite3_column_double(_stmt, 2));
std::string country(reinterpret_cast<const char*>(sqlite3_column_text(_stmt, 3)));
PopulatedPosition pp(population, latitude, longitude, country);
if (sqlite3_step(_stmt) == SQLITE_ROW)
_rowAvailable = true;
else {
_rowAvailable = false;
}
return pp;
}
|
{"hexsha": "6921bf9c2b56799ae07017e32d95185efe080e89", "size": 3812, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/db/SQLiteAreaPopulationReader.cpp", "max_stars_repo_name": "thillux/topoGen", "max_stars_repo_head_hexsha": "219901bda2df2594393dd5f52a6b6e961c59225e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2016-12-27T05:05:56.000Z", "max_stars_repo_stars_event_max_datetime": "2017-04-28T16:46:01.000Z", "max_issues_repo_path": "src/db/SQLiteAreaPopulationReader.cpp", "max_issues_repo_name": "thillux/topoGen", "max_issues_repo_head_hexsha": "219901bda2df2594393dd5f52a6b6e961c59225e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/db/SQLiteAreaPopulationReader.cpp", "max_forks_repo_name": "thillux/topoGen", "max_forks_repo_head_hexsha": "219901bda2df2594393dd5f52a6b6e961c59225e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9892473118, "max_line_length": 113, "alphanum_fraction": 0.6998950682, "num_tokens": 870}
|
subroutine svdfit(x,y,z,sig,ndata,a,ma,u,v,w,mp,np,chisq)
implicit real*8 (a-h,o-z)
parameter(nmax=327680,mmax=10,tol=1.e-12)
dimension x(ndata),y(ndata),z(ndata),sig(ndata),a(ma),v(np,np),
* u(mp,np),w(np),b(nmax),afunc(mmax)
c type *,'evaluating basis functions...'
do 12 i=1,ndata
call poly_funcs(x(i),y(i),afunc,ma)
tmp=1./sig(i)
do 11 j=1,ma
u(i,j)=afunc(j)*tmp
11 continue
b(i)=z(i)*tmp
12 continue
c type *,'SVD...'
call svdcmp(u,ndata,ma,mp,np,w,v)
wmax=0.
do 13 j=1,ma
if(w(j).gt.wmax)wmax=w(j)
13 continue
thresh=tol*wmax
c type *,'eigen value threshold',thresh
do 14 j=1,ma
c type *,j,w(j)
if(w(j).lt.thresh)w(j)=0.
14 continue
c type *,'calculating coefficients...'
call svbksb(u,w,v,ndata,ma,mp,np,b,a)
chisq=0.
c type *,'evaluating chi square...'
do 16 i=1,ndata
call poly_funcs(x(i),y(i),afunc,ma)
sum=0.
do 15 j=1,ma
sum=sum+a(j)*afunc(j)
15 continue
chisq=chisq+((z(i)-sum)/sig(i))**2
16 continue
return
end
subroutine doppler(n_ra,l1,l2,image1,f_d,dbuf)
use fortranUtils
implicit none
integer n_ra
complex*8 image1(N_RA,*)
integer*4 ia,ir,i,j,jj,l1,l2
real*4 wgth
real*4 f_est
real*4 f_d(N_RA)
complex*8 dbuf(N_RA)
integer*4 rinc
real*8 pi
write(6,*) ' '
write(6,*) ' doppler estimation as a function of range :'
pi = getPi()
rinc = nint(float(n_ra)/n_ra)
cc Doppler estimation
do i = 1,n_ra
dbuf(i) = (0.0,0.0)
enddo
do ia=l1+1,l2-1
c wgth = abs(sin(pi*ia/float(2*(l2-l1))))
wgth = 1.0
do ir = rinc+2,n_ra-2,rinc
jj = ir/rinc
do j = ir-rinc+1-2,ir-rinc+1+2
dbuf(jj) = dbuf(jj)
2 + wgth*image1(j,ia)*conjg(image1(j,ia-1))
enddo ! j-loop
enddo ! ir-loop
enddo ! ia-loop
c Doppler ambiguity resolution
do jj = rinc+2,n_ra-2
f_est = atan2(aimag(dbuf(jj)),real(dbuf(jj)))/(2.d0*pi)
if(jj .ne. rinc+2)then
if(abs(f_est-f_d(jj-1)) .gt. .5)then
f_est = f_est + sign(1.0,f_d(jj-1)-f_est)
endif
endif
f_d(jj)= f_est
end do
f_d(1) = f_d(3)
f_d(2) = f_d(3)
f_d(n_ra-1) = f_d(n_ra-2)
f_d(n_ra) = f_d(n_ra-2)
return
end
subroutine covsrt(covar,ncvm,ma,lista,mfit)
implicit real*8 (a-h,o-z)
dimension covar(ncvm,ncvm),lista(mfit)
do 12 j=1,ma-1
do 11 i=j+1,ma
covar(i,j)=0.
11 continue
12 continue
do 14 i=1,mfit-1
do 13 j=i+1,mfit
if(lista(j).gt.lista(i)) then
covar(lista(j),lista(i))=covar(i,j)
else
covar(lista(i),lista(j))=covar(i,j)
endif
13 continue
14 continue
swap=covar(1,1)
do 15 j=1,ma
covar(1,j)=covar(j,j)
covar(j,j)=0.
15 continue
covar(lista(1),lista(1))=swap
do 16 j=2,mfit
covar(lista(j),lista(j))=covar(1,j)
16 continue
do 18 j=2,ma
do 17 i=1,j-1
covar(i,j)=covar(j,i)
17 continue
18 continue
return
end
|
{"hexsha": "1bc6493f933151312cb4f4d09c38cf48e21f1352", "size": 3502, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "components/stdproc/stdproc/resampLib/src/svd.f", "max_stars_repo_name": "vincentschut/isce2", "max_stars_repo_head_hexsha": "1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 1133, "max_stars_repo_stars_event_min_datetime": "2022-01-07T21:24:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-07T21:33:08.000Z", "max_issues_repo_path": "components/stdproc/stdproc/resampLib/src/svd.f", "max_issues_repo_name": "vincentschut/isce2", "max_issues_repo_head_hexsha": "1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 276, "max_issues_repo_issues_event_min_datetime": "2019-02-10T07:18:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:45:55.000Z", "max_forks_repo_path": "components/stdproc/stdproc/resampLib/src/svd.f", "max_forks_repo_name": "vincentschut/isce2", "max_forks_repo_head_hexsha": "1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 235, "max_forks_repo_forks_event_min_datetime": "2019-02-10T05:00:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T07:37:24.000Z", "avg_line_length": 26.1343283582, "max_line_length": 69, "alphanum_fraction": 0.4934323244, "num_tokens": 1222}
|
import tensorflow as tf
import numpy as np
from tqdm import tqdm
def make_iterator(dataset,BATCH_SIZE):
with tf.name_scope('data'):
data = tf.data.Dataset.from_tensor_slices(dataset)
data = data.batch(BATCH_SIZE)
iterator = tf.data.Iterator.from_structure(data.output_types,data.output_shapes)
img,label = iterator.get_next()
return img,label,iterator
def make_data_initializer(data,iterator,BATCH_SIZE=64):
with tf.name_scope('data'):
data = tf.data.Dataset.from_tensor_slices(data)
#data = data.shuffle(10000)
data = data.batch(BATCH_SIZE)
init = iterator.make_initializer(data)
return init
def initialize_model(net,trainset,BATCH_SIZE=64):
print('Initialization ... ')
num_batches = len(trainset[0][0]) // BATCH_SIZE
net.num_batches = num_batches
X_holder,y_holder,iterator = make_iterator(trainset[0],BATCH_SIZE)
net.set_fisher_graph(X_holder,y_holder)
net.set_uncertain_prediction()
out, log_probs, nll, kl_diver= net(X_holder, targets=y_holder, sample=True, n_samples=1,
loss_function=lambda y, y_target:
tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_target, logits=y))
net.set_vanilla_loss(log_probs,nll,num_batches)
'''
_, kl_log_probs, kl_nll, _= net(X_holder, targets=y_holder, sample=True, n_samples=1,
loss_function=lambda y, y_target:
tf.nn.softmax_cross_entropy_with_logits(labels=y_target, logits=y))
net.set_kl_loss(kl_log_probs,kl_nll,num_batches)
'''
#_, mode_kl_log_probs, mode_kl_nll, _= net(X_holder, targets=y_holder, sample=True, n_samples=1, drop_out=True,
# loss_function=lambda y, y_target:
# tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_target, logits=y))
#net.set_drop_loss(mode_kl_log_probs,mode_kl_nll,num_batches)
net.summary()
return iterator
def get_data_init(trainset,testsets,iterator):
train_init = []
test_init = []
for t in range(len(trainset)):
train_init.append(make_data_initializer(trainset[t],iterator))
for t in range(len(testsets)):
test_init.append(make_data_initializer(testsets[t],iterator))
return train_init,test_init
def load_iterator(net,trainset,testsets):
iterator = initialize_model(net,trainset)
train_init,test_init = get_data_init(trainset,testsets,iterator)
return train_init,test_init
|
{"hexsha": "00abff70c6dd2f5493ce2a29a8e99bdf88f3f073", "size": 2582, "ext": "py", "lang": "Python", "max_stars_repo_path": "CBLN/bnn/model_utils.py", "max_stars_repo_name": "Honglin20/Thesis", "max_stars_repo_head_hexsha": "ec344f82d8200ce006082ad32d75a905314be77e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CBLN/bnn/model_utils.py", "max_issues_repo_name": "Honglin20/Thesis", "max_issues_repo_head_hexsha": "ec344f82d8200ce006082ad32d75a905314be77e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CBLN/bnn/model_utils.py", "max_forks_repo_name": "Honglin20/Thesis", "max_forks_repo_head_hexsha": "ec344f82d8200ce006082ad32d75a905314be77e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5373134328, "max_line_length": 115, "alphanum_fraction": 0.6789310612, "include": true, "reason": "import numpy", "num_tokens": 583}
|
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: nadavs@google.com <Nadav Samet>
#include <iostream>
#include <boost/thread/thread.hpp>
#include <glog/logging.h>
#include <gtest/gtest.h>
#include <zmq.hpp>
#include "rpcz/callback.hpp"
#include "rpcz/connection_manager.hpp"
#include "rpcz/rpc_channel.hpp"
#include "rpcz/rpc.hpp"
#include "rpcz/server.hpp"
#include "rpcz/sync_event.hpp"
#include "proto/search.pb.h"
#include "proto/search.rpcz.h"
using namespace std;
namespace rpcz {
void super_done(SearchResponse *response,
rpc* newrpc, reply<SearchResponse> reply) {
delete newrpc;
reply.send(*response);
delete response;
}
class SearchServiceImpl : public SearchService {
public:
SearchServiceImpl(SearchService_Stub* backend, connection_manager* cm)
: backend_(backend), delayed_reply_(NULL), cm_(cm) {};
~SearchServiceImpl() {
}
virtual void Search(
const SearchRequest& request,
reply<SearchResponse> reply) {
if (request.query() == "foo") {
reply.Error(-4, "I don't like foo.");
} else if (request.query() == "bar") {
reply.Error(17, "I don't like bar.");
} else if (request.query() == "delegate") {
rpc* newrpc = new rpc;
SearchResponse* response = new SearchResponse;
backend_->Search(request, response, newrpc, new_callback(super_done,
response,
newrpc,
reply));
return;
} else if (request.query() == "timeout") {
// We "lose" the request. We are going to reply only when we get a request
// for the query "delayed".
boost::unique_lock<boost::mutex> lock(mu_);
delayed_reply_ = reply;
timeout_request_received.signal();
return;
} else if (request.query() == "delayed") {
boost::unique_lock<boost::mutex> lock(mu_);
delayed_reply_.send(SearchResponse());
reply.send(SearchResponse());
} else if (request.query() == "terminate") {
reply.send(SearchResponse());
cm_->terminate();
} else {
SearchResponse response;
response.add_results("The search for " + request.query());
response.add_results("is great");
reply.send(response);
}
}
sync_event timeout_request_received;
private:
scoped_ptr<SearchService_Stub> backend_;
boost::mutex mu_;
reply<SearchResponse> delayed_reply_;
connection_manager* cm_;
};
// For handling complex delegated queries.
class BackendSearchServiceImpl : public SearchService {
public:
virtual void Search(
const SearchRequest&,
reply<SearchResponse> reply) {
SearchResponse response;
response.add_results("42!");
reply.send(response);
}
};
class server_test : public ::testing::Test {
public:
server_test() :
context_(new zmq::context_t(1)),
cm_(new connection_manager(context_.get(), 10)),
frontend_server_(*cm_.get()),
backend_server_(*cm_.get()) {
start_server();
}
~server_test() {
// terminate the context, which will cause the thread to quit.
cm_.reset(NULL);
context_.reset(NULL);
}
void start_server() {
backend_server_.register_service(
new BackendSearchServiceImpl);
backend_server_.bind("inproc://myserver.backend");
backend_connection_ = cm_->connect("inproc://myserver.backend");
frontend_server_.register_service(
frontend_service = new SearchServiceImpl(
new SearchService_Stub(
rpc_channel::create(backend_connection_), true), cm_.get()));
frontend_server_.bind("inproc://myserver.frontend");
frontend_connection_ = cm_->connect("inproc://myserver.frontend");
}
SearchResponse send_blocking_request(connection connection,
const std::string& query) {
SearchService_Stub stub(rpc_channel::create(connection), true);
SearchRequest request;
SearchResponse response;
request.set_query(query);
rpc rpc;
stub.Search(request, &response, &rpc, NULL);
rpc.wait();
EXPECT_TRUE(rpc.ok());
return response;
}
protected:
scoped_ptr<zmq::context_t> context_;
scoped_ptr<connection_manager> cm_;
connection frontend_connection_;
connection backend_connection_;
server frontend_server_;
server backend_server_;
SearchServiceImpl* frontend_service;
};
TEST_F(server_test, SimpleRequest) {
SearchResponse response =
send_blocking_request(frontend_connection_, "happiness");
ASSERT_EQ(2, response.results_size());
ASSERT_EQ("The search for happiness", response.results(0));
}
TEST_F(server_test, SimpleRequestAsync) {
SearchService_Stub stub(rpc_channel::create(frontend_connection_), true);
SearchRequest request;
SearchResponse response;
rpc rpc;
request.set_query("happiness");
sync_event sync;
stub.Search(request, &response, &rpc, new_callback(
&sync, &sync_event::signal));
sync.wait();
ASSERT_TRUE(rpc.ok());
ASSERT_EQ(2, response.results_size());
ASSERT_EQ("The search for happiness", response.results(0));
}
TEST_F(server_test, SimpleRequestWithError) {
SearchService_Stub stub(rpc_channel::create(frontend_connection_), true);
SearchRequest request;
request.set_query("foo");
SearchResponse response;
rpc rpc;
stub.Search(request, &response, &rpc, NULL);
rpc.wait();
ASSERT_EQ(rpc_response_header::APPLICATION_ERROR, rpc.get_status());
ASSERT_EQ("I don't like foo.", rpc.get_error_message());
}
TEST_F(server_test, SimpleRequestWithTimeout) {
SearchService_Stub stub(rpc_channel::create(frontend_connection_), true);
SearchRequest request;
SearchResponse response;
rpc rpc;
request.set_query("timeout");
rpc.set_deadline_ms(1);
stub.Search(request, &response, &rpc, NULL);
rpc.wait();
ASSERT_EQ(rpc_response_header::DEADLINE_EXCEEDED, rpc.get_status());
}
TEST_F(server_test, SimpleRequestWithTimeoutAsync) {
SearchService_Stub stub(rpc_channel::create(frontend_connection_), true);
SearchRequest request;
SearchResponse response;
{
rpc rpc;
request.set_query("timeout");
rpc.set_deadline_ms(1);
sync_event event;
stub.Search(request, &response, &rpc,
new_callback(&event, &sync_event::signal));
event.wait();
ASSERT_EQ(rpc_response_header::DEADLINE_EXCEEDED, rpc.get_status());
}
}
TEST_F(server_test, DelegatedRequest) {
SearchService_Stub stub(rpc_channel::create(frontend_connection_), true);
SearchRequest request;
SearchResponse response;
rpc rpc;
request.set_query("delegate");
stub.Search(request, &response, &rpc, NULL);
rpc.wait();
ASSERT_EQ(rpc_response_header::OK, rpc.get_status());
ASSERT_EQ("42!", response.results(0));
}
TEST_F(server_test, EasyBlockingRequestUsingDelegate) {
SearchService_Stub stub(rpc_channel::create(frontend_connection_), true);
SearchRequest request;
SearchResponse response;
request.set_query("delegate");
stub.Search(request, &response);
ASSERT_EQ("42!", response.results(0));
}
TEST_F(server_test, EasyBlockingRequestRaisesExceptions) {
SearchService_Stub stub(rpc_channel::create(frontend_connection_), true);
SearchRequest request;
SearchResponse response;
request.set_query("foo");
try {
stub.Search(request, &response);
ASSERT_TRUE(false);
} catch (rpc_error &error) {
ASSERT_EQ(status::APPLICATION_ERROR, error.get_status());
ASSERT_EQ(-4, error.get_application_error_code());
}
}
TEST_F(server_test, EasyBlockingRequestWithTimeout) {
SearchService_Stub stub(rpc_channel::create(frontend_connection_), true);
SearchRequest request;
SearchResponse response;
request.set_query("timeout");
try {
stub.Search(request, &response, 1);
ASSERT_TRUE(false);
} catch (rpc_error &error) {
ASSERT_EQ(status::DEADLINE_EXCEEDED, error.get_status());
}
// We may get here before the timing out request was processed, and if we
// just send delay right away, the server may be unable to reply.
frontend_service->timeout_request_received.wait();
request.set_query("delayed");
stub.Search(request, &response);
}
TEST_F(server_test, ConnectionManagerTermination) {
SearchService_Stub stub(rpc_channel::create(frontend_connection_), true);
SearchRequest request;
request.set_query("terminate");
SearchResponse response;
try {
stub.Search(request, &response, 1);
} catch (rpc_error &error) {
ASSERT_EQ(status::DEADLINE_EXCEEDED, error.get_status());
}
LOG(INFO)<<"I'm here";
cm_->run();
LOG(INFO)<<"I'm there";
}
} // namespace
|
{"hexsha": "f75f2bb5056dde55c75d023434bbe774c769f4f6", "size": 9209, "ext": "cc", "lang": "C++", "max_stars_repo_path": "3rdparty/rpcz/test/client_server_test.cc", "max_stars_repo_name": "marinadudarenko/bigartm", "max_stars_repo_head_hexsha": "c7072663581c59e970ef165a577dc4969810a19d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 97.0, "max_stars_repo_stars_event_min_datetime": "2015-01-08T17:10:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T02:29:44.000Z", "max_issues_repo_path": "3rdparty/rpcz/test/client_server_test.cc", "max_issues_repo_name": "marinadudarenko/bigartm", "max_issues_repo_head_hexsha": "c7072663581c59e970ef165a577dc4969810a19d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 18.0, "max_issues_repo_issues_event_min_datetime": "2015-01-17T12:14:14.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-26T04:19:09.000Z", "max_forks_repo_path": "3rdparty/rpcz/test/client_server_test.cc", "max_forks_repo_name": "marinadudarenko/bigartm", "max_forks_repo_head_hexsha": "c7072663581c59e970ef165a577dc4969810a19d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 41.0, "max_forks_repo_forks_event_min_datetime": "2015-01-10T17:37:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-25T07:58:48.000Z", "avg_line_length": 31.4300341297, "max_line_length": 80, "alphanum_fraction": 0.6963839722, "num_tokens": 2087}
|
import tempfile
import unittest
from pathlib import Path
import numpy as np
from dacbench.agents import StaticAgent
from dacbench.benchmarks import LubyBenchmark
from dacbench.logger import Logger, load_logs, log2dataframe
from dacbench.runner import run_benchmark
from dacbench.wrappers import EpisodeTimeWrapper
class TestTimeTrackingWrapper(unittest.TestCase):
def test_logging(self):
temp_dir = tempfile.TemporaryDirectory()
episodes = 5
logger = Logger(
output_path=Path(temp_dir.name),
experiment_name="test_logging",
)
bench = LubyBenchmark()
env = bench.get_environment()
time_logger = logger.add_module(EpisodeTimeWrapper)
wrapped = EpisodeTimeWrapper(env, logger=time_logger)
agent = StaticAgent(env=env, action=1)
run_benchmark(wrapped, agent, episodes, logger)
logger.close()
logs = load_logs(time_logger.get_logfile())
dataframe = log2dataframe(logs, wide=True)
# all steps must have logged time
self.assertTrue((~dataframe.step_duration.isna()).all())
# each episode has a recored time
episodes = dataframe.groupby("episode")
last_steps_per_episode = dataframe.iloc[episodes.step.idxmax()]
self.assertTrue((~last_steps_per_episode.episode_duration.isna()).all())
# episode time equals the sum of the steps in episode
calculated_episode_times = episodes.step_duration.sum()
recorded_episode_times = last_steps_per_episode.episode_duration
self.assertListEqual(
calculated_episode_times.tolist(), recorded_episode_times.tolist()
)
temp_dir.cleanup()
def test_init(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = EpisodeTimeWrapper(env)
self.assertTrue(len(wrapped.overall_times) == 0)
self.assertTrue(wrapped.time_interval is None)
wrapped.instance = [0]
self.assertTrue(wrapped.instance[0] == 0)
wrapped2 = EpisodeTimeWrapper(env, 10)
self.assertTrue(len(wrapped2.overall_times) == 0)
self.assertTrue(wrapped2.time_interval == 10)
self.assertTrue(len(wrapped2.time_intervals) == 0)
self.assertTrue(len(wrapped2.current_times) == 0)
def test_step(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = EpisodeTimeWrapper(env, 10)
state = wrapped.reset()
self.assertTrue(len(state) > 1)
state, reward, done, _ = wrapped.step(1)
self.assertTrue(len(state) > 1)
self.assertTrue(reward <= 0)
self.assertFalse(done)
self.assertTrue(len(wrapped.all_steps) == 1)
self.assertTrue(len(wrapped.current_step_interval) == 1)
self.assertTrue(len(wrapped.step_intervals) == 0)
for _ in range(20):
wrapped.step(1)
self.assertTrue(len(wrapped.overall_times) > 2)
self.assertTrue(len(wrapped.time_intervals) == 1)
def test_get_times(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = EpisodeTimeWrapper(env)
wrapped.reset()
for i in range(5):
wrapped.step(i)
wrapped2 = EpisodeTimeWrapper(env, 2)
wrapped2.reset()
for i in range(5):
wrapped2.step(i)
overall_times_only, steps_only = wrapped.get_times()
overall_times, steps, intervals, step_intervals = wrapped2.get_times()
self.assertTrue(
np.array_equal(
np.round(overall_times, decimals=2),
np.round(overall_times_only, decimals=2),
)
)
self.assertTrue(len(step_intervals) == 3)
self.assertTrue(len(step_intervals[0]) == 2)
self.assertTrue(len(step_intervals[1]) == 2)
self.assertTrue(len(step_intervals[2]) == 1)
def test_rendering(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = EpisodeTimeWrapper(env, 10)
wrapped.reset()
for _ in range(30):
wrapped.step(1)
img = wrapped.render_step_time()
self.assertTrue(img.shape[-1] == 3)
img = wrapped.render_episode_time()
self.assertTrue(img.shape[-1] == 3)
|
{"hexsha": "c63d5c2e74da656a6946706210cc6fecaa89e5ac", "size": 4330, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/wrappers/test_time_tracking_wrapper.py", "max_stars_repo_name": "ndangtt/LeadingOnesDAC", "max_stars_repo_head_hexsha": "953747d8702f179851d7973c65779a1f830e03a1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-11-09T10:50:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-19T09:23:44.000Z", "max_issues_repo_path": "tests/wrappers/test_time_tracking_wrapper.py", "max_issues_repo_name": "ndangtt/LeadingOnesDAC", "max_issues_repo_head_hexsha": "953747d8702f179851d7973c65779a1f830e03a1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 95, "max_issues_repo_issues_event_min_datetime": "2020-11-18T09:37:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T10:05:33.000Z", "max_forks_repo_path": "tests/wrappers/test_time_tracking_wrapper.py", "max_forks_repo_name": "ndangtt/LeadingOnesDAC", "max_forks_repo_head_hexsha": "953747d8702f179851d7973c65779a1f830e03a1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-11-15T15:24:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T14:51:43.000Z", "avg_line_length": 34.3650793651, "max_line_length": 80, "alphanum_fraction": 0.6438799076, "include": true, "reason": "import numpy", "num_tokens": 934}
|
PROGRAM hello
print*, 'Hello, World!'
END
|
{"hexsha": "4b34eeda6556cfcc74fd2536d09cde99732fff7a", "size": 62, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "source/pkgsrc/lang/gcc44/files/hello.f", "max_stars_repo_name": "Scottx86-64/dotfiles-1", "max_stars_repo_head_hexsha": "51004b1e2b032664cce6b553d2052757c286087d", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-20T22:46:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-20T22:46:39.000Z", "max_issues_repo_path": "source/pkgsrc/lang/gcc44/files/hello.f", "max_issues_repo_name": "Scottx86-64/dotfiles-1", "max_issues_repo_head_hexsha": "51004b1e2b032664cce6b553d2052757c286087d", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/pkgsrc/lang/gcc44/files/hello.f", "max_forks_repo_name": "Scottx86-64/dotfiles-1", "max_forks_repo_head_hexsha": "51004b1e2b032664cce6b553d2052757c286087d", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 10.3333333333, "max_line_length": 29, "alphanum_fraction": 0.4838709677, "num_tokens": 16}
|
#include "util/atomic_queue.hpp"
#include <boost/python/errors.hpp>
#include <iostream>
#include "util/errors.hpp"
namespace cvisual {
void
atomic_queue_impl::push_notify()
{
empty = false;
}
} // !namespace cvisual
|
{"hexsha": "afd21987c62911f9800d90737a466243306a8143", "size": 236, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/core/util/atomic_queue.cpp", "max_stars_repo_name": "lebarsfa/vpython-wx", "max_stars_repo_head_hexsha": "38df062e5532b79f632f4f2a1abae86754c264a9", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 68.0, "max_stars_repo_stars_event_min_datetime": "2015-01-17T05:41:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-24T08:35:24.000Z", "max_issues_repo_path": "src/core/util/atomic_queue.cpp", "max_issues_repo_name": "lebarsfa/vpython-wx", "max_issues_repo_head_hexsha": "38df062e5532b79f632f4f2a1abae86754c264a9", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 16.0, "max_issues_repo_issues_event_min_datetime": "2015-01-02T19:36:06.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-09T21:01:25.000Z", "max_forks_repo_path": "src/core/util/atomic_queue.cpp", "max_forks_repo_name": "lebarsfa/vpython-wx", "max_forks_repo_head_hexsha": "38df062e5532b79f632f4f2a1abae86754c264a9", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 37.0, "max_forks_repo_forks_event_min_datetime": "2015-02-04T04:23:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-07T03:24:41.000Z", "avg_line_length": 14.75, "max_line_length": 35, "alphanum_fraction": 0.686440678, "num_tokens": 52}
|
"""
** deeplean-ai.com **
created by :: GauravBh1010tt
contact :: gauravbhatt.deeplearn@gmail.com
"""
import time
import math
import pandas as pd
import numpy as np
import torch
import os
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def to_big_strokes(stroke, max_len=100):
result = np.zeros((max_len, 5), dtype=float)
l = len(stroke)
assert l <= max_len
result[0:l, 0:2] = stroke[:, 0:2]
result[0:l, 3] = stroke[:, 2]
result[0:l, 2] = 1 - result[0:l, 3]
result[l:, 4] = 1
return result
def to_normal_strokes(big_stroke):
l = 0
for i in range(len(big_stroke)):
if big_stroke[i, 4] > 0:
l = i
break
if l == 0:
l = len(big_stroke)
result = np.zeros((l, 3))
result[:, 0:2] = big_stroke[0:l, 0:2]
result[:, 2] = big_stroke[0:l, 3]
return result
def purify(strokes, max_seq=200):
data = []
for seq in strokes:
if seq.shape[0] <= max_seq and seq.shape[0] > 10:
seq = np.minimum(seq, 1000)
seq = np.maximum(seq, -1000)
seq = np.array(seq, dtype=np.float32)
data.append(seq)
return data
def calculate_normalizing_scale_factor(strokes):
data = []
for i in range(len(strokes)):
for j in range(len(strokes[i])):
data.append(strokes[i][j, 0])
data.append(strokes[i][j, 1])
data = np.array(data)
return np.std(data)
def normalize(strokes):
data = []
scale_factor = calculate_normalizing_scale_factor(strokes)
for seq in strokes:
seq[:, 0:2] /= scale_factor
data.append(seq)
return data
def get_batch_validation(data_enc, data_dec, batch_size):
batch_idx = np.random.choice(len(data_enc),batch_size)
data_e, data_d =[], []
for i in batch_idx:
data_e.append(data_enc[i])
data_d.append(data_dec[i])
return data_e, data_d
def get_data(data_type='kanji', max_len=200, part="train"):
if data_type == 'kanji':
raw_data = pd.read_pickle('sketch-rnn-datasets/kanji/kanji.cpkl')
else:
# If the dataset is a mixture of several datasets, they should be separated with underscores
datasets = data_type.split("_")
if(len(datasets)==1):
raw_data = np.load('sketch-rnn-datasets/'+data_type+'/sketchrnn_'+data_type+'.npz', encoding='latin1',allow_pickle=True)[part]
else:
n = len(datasets)
raw_data = np.load('sketch-rnn-datasets/'+datasets[0]+'/sketchrnn_'+datasets[0]+'.npz', encoding='latin1',allow_pickle=True)[part]
np.random.shuffle(raw_data)
raw_data = raw_data[:int(len(raw_data)/n)]
for i in range(1,len(datasets)):
rd = np.load('sketch-rnn-datasets/'+datasets[i]+'/sketchrnn_'+datasets[i]+'.npz', encoding='latin1',allow_pickle=True)[part]
np.random.shuffle(rd)
rd = rd[:int(len(rd)/n)]
raw_data = np.concatenate((raw_data,rd),axis=0)
all_len = [len(i)for i in raw_data]
max_len = max(all_len)
raw_data = purify(raw_data)
data_enc = np.zeros((len(raw_data), max_len, 5))
data_dec = np.zeros((len(raw_data), max_len+1, 5))
data_enc[:,:,-1] = 1
data_dec[:,:,-1] = 1
data_dec[:,0,:] = [0,0,1,0,0]
for i,j in enumerate(raw_data):
big_strokes = to_big_strokes(j, max_len)
data_enc[i] = big_strokes
data_dec[i,1:,] = big_strokes
data_enc = normalize(data_enc)
data_dec = normalize(data_dec)
return data_enc, data_dec, max_len
def save_checkpoint(epoch, model, optimizer, directory, \
filename='best.pt'):
checkpoint=({'epoch': epoch+1,
'model': model.state_dict(),
'optimizer' : optimizer.state_dict()
})
try:
torch.save(checkpoint, os.path.join(directory, filename))
except:
os.mkdir(directory)
torch.save(checkpoint, os.path.join(directory, filename))
|
{"hexsha": "36fc9bd99aa200539601b907aa8502efc8165ffc", "size": 4172, "ext": "py", "lang": "Python", "max_stars_repo_path": "sketch_generation/data_load.py", "max_stars_repo_name": "louis-gautier/Project-DL-Seq2Seq", "max_stars_repo_head_hexsha": "25fca13803b1a70dc9c4f5be2cf5c85a75496feb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sketch_generation/data_load.py", "max_issues_repo_name": "louis-gautier/Project-DL-Seq2Seq", "max_issues_repo_head_hexsha": "25fca13803b1a70dc9c4f5be2cf5c85a75496feb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sketch_generation/data_load.py", "max_forks_repo_name": "louis-gautier/Project-DL-Seq2Seq", "max_forks_repo_head_hexsha": "25fca13803b1a70dc9c4f5be2cf5c85a75496feb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1748251748, "max_line_length": 140, "alphanum_fraction": 0.5977948226, "include": true, "reason": "import numpy", "num_tokens": 1201}
|
program flotante
use, intrinsic :: iso_fortran_env, only: sp=>real32, dp=>real64
implicit none
real(sp) :: flotante32
real(dp) :: flotante64
flotante32 = 1.0_sp
!Sufijo explicito para constantes literales
flotante64 = 1.0_dp
print *,'flotante32 = ',flotante32
print *,'flotante64 = ',flotante64
!flotante32 = 1.00000000
!flotante64 = 1.0000000000000000
end program flotante
|
{"hexsha": "85d2870207beb14ae3219de7422995ce9e745479", "size": 487, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Programas/flotante.f90", "max_stars_repo_name": "Marc-xyz/InicioRapidoEnFortran", "max_stars_repo_head_hexsha": "8cfe017877061bbfbb2bef66e16c2afc9375243d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-25T22:00:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-25T22:00:53.000Z", "max_issues_repo_path": "Programas/flotante.f90", "max_issues_repo_name": "Marc-xyz/InicioRapidoEnFortran", "max_issues_repo_head_hexsha": "8cfe017877061bbfbb2bef66e16c2afc9375243d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Programas/flotante.f90", "max_forks_repo_name": "Marc-xyz/InicioRapidoEnFortran", "max_forks_repo_head_hexsha": "8cfe017877061bbfbb2bef66e16c2afc9375243d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4375, "max_line_length": 71, "alphanum_fraction": 0.59137577, "num_tokens": 160}
|
# coding:utf-8
import logging
import time
import pyaudio
import numpy
import platform
import itertools
from threading import Timer, Event
from queue import Queue, Empty
from .base import BaseOperation
class EnterGate(BaseOperation):
def __init__(self, stoped, tasksQueue):
"""
副本顶门
"""
isinstance(tasksQueue, Queue)
BaseOperation.__init__(self, stoped, tasksQueue)
self.log = logging.getLogger()
self.log.setLevel(logging.INFO)
self.x_dim, self.y_dim = self.mouse.screen_size()
self.x_atom, self.y_atom = self.x_dim / 1000, self.y_dim / 1000
def start(self):
self.queue.put(self.do)
# self.queue.put(self.move)
def run(self):
"""
计时循环
:param pos:
:return:
"""
while not self.stoped.wait(0):
try:
func = self.queue.get(timeout=1)
if func:
with self.lock:
func()
except Empty:
pass
def do(self):
"""
:return:
"""
# 前进1秒
self.keyboard.press_key('w')
time.sleep(2)
self.keyboard.release_key('w')
# 后退1秒
self.keyboard.press_key('s')
time.sleep(1)
self.keyboard.release_key('s')
self.put(1, self.do)
|
{"hexsha": "ea87076c784ca8acaa79c18275b7c3268e6580ac", "size": 1368, "ext": "py", "lang": "Python", "max_stars_repo_path": "slavewg/entergate.py", "max_stars_repo_name": "lamter/slavewg", "max_stars_repo_head_hexsha": "c3a4098b6c2cbfd232f8ed2290141b7f61d7db6f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-08-13T15:04:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-12T16:12:39.000Z", "max_issues_repo_path": "slavewg/entergate.py", "max_issues_repo_name": "lamter/slavewg", "max_issues_repo_head_hexsha": "c3a4098b6c2cbfd232f8ed2290141b7f61d7db6f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slavewg/entergate.py", "max_forks_repo_name": "lamter/slavewg", "max_forks_repo_head_hexsha": "c3a4098b6c2cbfd232f8ed2290141b7f61d7db6f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.064516129, "max_line_length": 71, "alphanum_fraction": 0.5423976608, "include": true, "reason": "import numpy", "num_tokens": 323}
|
// Copyright (c) 2016 Alexander Gallego. All rights reserved.
//
#include "codegen.h"
#include <iostream>
#include <boost/filesystem.hpp>
#include <glog/logging.h>
#include "cpp_generator.h"
#include "go_generator.h"
#include "python_generator.h"
namespace smf_gen {
codegen::codegen(std::string ifname, std::string out_dir,
std::vector<std::string> includes, std::vector<language> langs)
: input_filename(std::move(ifname)), output_dir(std::move(out_dir)),
languages(std::move(langs)), include_dirs_(std::move(includes)) {
LOG_IF(FATAL, languages.empty()) << "No programming langues";
LOG_IF(FATAL, output_dir.empty()) << "No place to put generated code";
parser_ = std::make_unique<flatbuffers::Parser>(opts_);
// make sure that the current file's dir is added to the include dirs
//
if (include_dirs_.end() ==
std::find_if(include_dirs_.begin(), include_dirs_.end(),
[this](auto &s) { return s == input_filename; })) {
include_dirs_.push_back(flatbuffers::StripFileName(input_filename));
}
}
std::size_t
codegen::service_count() const {
if (!parsed_) {
LOG(ERROR) << "Generator not parsed, please call ::parse() first";
return 0;
}
return parser_->services_.vec.size();
}
smf::compat::optional<std::string>
codegen::parse() {
if (parsed_) {
if (!parser_->error_.empty()) {
return "Parser in error state: " + parser_->error_;
}
return smf::compat::nullopt;
}
parsed_ = true;
// UGH!!! Flatbuffers C-looking API .... is...
//
std::vector<const char *> cincludes;
cincludes.reserve(include_dirs_.size() + 1);
for (auto &s : include_dirs_) {
cincludes.push_back(s.c_str());
}
// always end in null :'(
cincludes.push_back(nullptr);
std::string contents;
if (!flatbuffers::LoadFile(input_filename.c_str(), true, &contents)) {
return "Could not load file: " + input_filename;
}
if (!parser_->Parse(contents.c_str(), cincludes.data(),
input_filename.c_str())) {
return "Could not PARSE file: " + parser_->error_;
}
return smf::compat::nullopt;
}
smf::compat::optional<std::string>
codegen::gen() {
auto x = parse();
if (x) { return x; }
for (const auto &l : languages) {
switch (l) {
case language::cpp: {
VLOG(1) << "Adding cpp generator";
auto g = std::make_unique<cpp_generator>(*parser_.get(), input_filename,
output_dir);
x = g->gen();
if (x) { return x; }
VLOG(1) << "Generated: " << g->output_filename();
break;
}
case language::go: {
VLOG(1) << "Adding golang generator";
auto g = std::make_unique<go_generator>(*parser_.get(), input_filename,
output_dir);
x = g->gen();
if (x) { return x; }
VLOG(1) << "Generated: " << g->output_filename();
break;
}
case language::python: {
VLOG(1) << "Adding python generator";
auto g = std::make_unique<python_generator>(*parser_.get(),
input_filename, output_dir);
x = g->gen();
if (x) { return x; }
VLOG(1) << "Generated: " << g->output_filename();
break;
}
default:
LOG(ERROR) << "Uknown code generator";
}
}
return smf::compat::nullopt;
}
} // namespace smf_gen
|
{"hexsha": "fd6f865f091d5208ecdc1a291c95642419567e6d", "size": 3393, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/smfc/codegen.cc", "max_stars_repo_name": "mmaslankaprv/smf", "max_stars_repo_head_hexsha": "1ff81eeb69a73faa42f7b178521748e1747d55f9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 262.0, "max_stars_repo_stars_event_min_datetime": "2016-11-28T04:20:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T03:20:55.000Z", "max_issues_repo_path": "src/smfc/codegen.cc", "max_issues_repo_name": "mmaslankaprv/smf", "max_issues_repo_head_hexsha": "1ff81eeb69a73faa42f7b178521748e1747d55f9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 200.0, "max_issues_repo_issues_event_min_datetime": "2016-11-17T22:28:47.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-03T05:03:29.000Z", "max_forks_repo_path": "src/smfc/codegen.cc", "max_forks_repo_name": "mmaslankaprv/smf", "max_forks_repo_head_hexsha": "1ff81eeb69a73faa42f7b178521748e1747d55f9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 28.0, "max_forks_repo_forks_event_min_datetime": "2016-11-16T19:07:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-10T03:38:15.000Z", "avg_line_length": 28.7542372881, "max_line_length": 80, "alphanum_fraction": 0.5968169761, "num_tokens": 875}
|
"""
This module tests the predictive posterior for missing data in the Bayesian GP-LVM, MRD, and DP-GP-LVM models. This
module uses PoseTrack data to perform these tests.
"""
from distributions.normal import mvn_log_pdf
from models.dp_gp_lvm import dp_gp_lvm
from models.gaussian_process import bayesian_gp_lvm, manifold_relevance_determination
from utils.types import get_training_variables, get_prediction_variables
import numpy as np
from os.path import isfile
from sys import path
import tensorflow as tf
from time import time
def prepare_data(data_file_path, seed_val=1, mask_size=1):
"""
TODO
:return:
"""
original_data = np.load(data_file_path)
num_samples, num_dimensions = original_data.shape
# Randomly permute rows and columns.
# Permute columns by grouping by mask size, i.e., keep 2D coordinates together if mask_size=2.
np.random.seed(seed=seed_val)
row_indices = np.random.permutation(num_samples)
cols = np.arange(num_dimensions).reshape((np.int(num_dimensions / mask_size), mask_size))
col_indices = cols[np.random.permutation(np.int(num_dimensions / mask_size)), :].flatten()
permuted_data = original_data[row_indices, :][:, col_indices]
# Data has already been normalised.
normalised_data = permuted_data
return normalised_data, permuted_data, original_data
def prepare_missing_data(data, percent_samples_observe=0.75, percent_dimensions_observe=0.85):
"""
TODO
:return:
"""
num_samples, num_dimensions = data.shape
# Separate into training and test data.
num_training_samples = int(np.ceil(percent_samples_observe * num_samples))
num_observed_dimensions = int(np.ceil(percent_dimensions_observe * num_dimensions))
training_data = data[:num_training_samples, :]
test_data = data[num_training_samples:, :]
test_data_observed = test_data[:, :num_observed_dimensions]
test_data_unobserved_ground_truth = test_data[:, num_observed_dimensions:]
return training_data, test_data_observed, test_data_unobserved_ground_truth
def run_bgplvm(y_train, y_test_observed, y_test_unobserved, num_latent_dimensions, num_inducing_points,
train_iter, predict_iter, learning_rate, save_file, seed_val=1):
"""
TODO
:param y_train:
:param y_test_observed:
:param y_test_unobserved:
:param num_latent_dimensions:
:param num_inducing_points:
:param train_iter:
:param predict_iter:
:param learning_rate:
:param save_file:
:param seed_val:
:return:
"""
# Set seed.
np.random.seed(seed=seed_val)
# Define instance of Bayesian GP-LVM.
bgplvm = bayesian_gp_lvm(y_train=y_train,
num_latent_dims=num_latent_dimensions,
num_inducing_points=num_inducing_points)
num_unobserved_dimensions = np.shape(y_test_unobserved)[1]
# Define objectives.
training_objective = bgplvm.objective
predict_lower_bound, x_mean_test, x_covar_test, \
predicted_mean, predicted_covar = bgplvm.predict_missing_data(y_test=y_test_observed)
predict_objective = tf.negative(predict_lower_bound)
# Optimisation.
training_var_list = get_training_variables()
predict_var_list = get_prediction_variables()
opt_train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss=training_objective,
var_list=training_var_list)
opt_predict = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss=predict_objective,
var_list=predict_var_list)
with tf.Session() as s:
# Initialise variables.
s.run(tf.variables_initializer(var_list=training_var_list)) # Initialise training variables first.
s.run(tf.variables_initializer(var_list=predict_var_list)) # Then initialise prediction variables.
s.run(tf.global_variables_initializer()) # Finally initialise any remaining global variables such as opt ones.
# Training optimisation loop.
start_time = time()
print('\nTraining BGPLVM..')
for c in range(train_iter):
s.run(opt_train)
if (c % 100) == 0:
print(' BGPLVM opt iter {:5}: {}'.format(c, s.run(training_objective)))
end_time = time()
train_opt_time = end_time - start_time
print('Final iter {:5}:'.format(c))
print(' BGPLVM: {}'.format(s.run(training_objective)))
print('Time to optimise: {} s'.format(train_opt_time))
# Get converged values as numpy arrays.
ard_weights, noise_precision, signal_variance, inducing_input = s.run((bgplvm.ard_weights,
bgplvm.noise_precision,
bgplvm.signal_variance,
bgplvm.inducing_input))
x_mean, x_covar = s.run(bgplvm.q_x)
# Initialise prediction variables.
s.run(tf.variables_initializer(var_list=predict_var_list))
# Prediction optimisation loop.
start_time = time()
print('\nOptimising Predictions..')
for c in range(predict_iter):
s.run(opt_predict)
if (c % 100) == 0:
print(' BGPLVM opt iter {:5}: {}'.format(c, s.run(predict_objective)))
end_time = time()
predict_opt_time = end_time - start_time
print('Final iter {:5}:'.format(c))
print(' BGPLVM: {}'.format(s.run(predict_objective)))
print('Time to optimise: {} s'.format(predict_opt_time))
# Get converged values as numpy arrays.
x_mean_test_np, x_covar_test_np, predicted_mean_np, predicted_covar_np = s.run((x_mean_test,
x_covar_test,
predicted_mean,
predicted_covar))
# Calculate log-likelihood of ground truth with predicted posteriors.
gt_log_likelihoods = [
mvn_log_pdf(x=tf.transpose(tf.slice(y_test_unobserved, begin=[0, du], size=[-1, 1])),
mean=tf.transpose(tf.slice(predicted_mean, begin=[0, du], size=[-1, 1])),
covariance=tf.squeeze(tf.slice(predicted_covar, begin=[du, 0, 0], size=[1, -1, -1]),
axis=0))
for du in range(num_unobserved_dimensions)]
gt_log_likelihoods_np = np.array(s.run(gt_log_likelihoods))
gt_log_likelihood = np.sum(gt_log_likelihoods_np)
# Save results.
np.savez(save_file, y_train=y_train, y_test_observed=y_test_observed, y_test_unobserved=y_test_unobserved,
ard_weights=ard_weights, noise_precision=noise_precision, signal_variance=signal_variance,
x_u=inducing_input, x_mean=x_mean, x_covar=x_covar, train_opt_time=train_opt_time,
x_mean_test=x_mean_test_np, x_covar_test=x_covar_test_np, predicted_mean=predicted_mean_np,
predicted_covar=predicted_covar_np, predict_opt_time=predict_opt_time,
gt_log_likelihoods=gt_log_likelihoods_np, gt_log_likelihood=gt_log_likelihood)
# Print results.
print('\nBGPLVM:')
print(' Ground Truth Predicted Posterior Log-Likelihood: {}'.format(gt_log_likelihood))
print(' Noise Precision: {}'.format(np.squeeze(noise_precision)))
def run_mrd(y_train, y_test_observed, y_test_unobserved, num_latent_dimensions, num_inducing_points, view_mask,
train_iter, predict_iter, learning_rate, save_file, seed_val=1):
"""
TODO
:param y_train:
:param y_test_observed:
:param y_test_unobserved:
:param num_latent_dimensions:
:param num_inducing_points:
:param view_mask:
:param train_iter:
:param predict_iter:
:param learning_rate:
:param save_file:
:param seed_val:
:return:
"""
# Set seed.
np.random.seed(seed=seed_val)
# Segment training data into views of size view_mask.
num_output_dimensions = np.shape(y_train)[1]
views_train = [y_train[:, i:i+view_mask] for i in range(0, num_output_dimensions, view_mask)]
# Define instance of MRD.
mrd = manifold_relevance_determination(views_train=views_train,
num_latent_dims=num_latent_dimensions,
num_inducing_points=num_inducing_points)
# Segment observed and unobserved data into views of size view_mask.
num_observed_dimensions = np.shape(y_test_observed)[1]
num_unobserved_dimensions = np.shape(y_test_unobserved)[1]
# Need to make sure observed dimensions is multiple of view_mask, otherwise iterate until it is.
if num_observed_dimensions % view_mask == 0:
views_test_observed = [y_test_observed[:, i:i+view_mask] for i in range(0,
num_observed_dimensions,
view_mask)]
views_test_unobserved = [y_test_unobserved[:, i:i+view_mask] for i in range(0,
num_unobserved_dimensions,
view_mask)]
else:
y_test = np.hstack((y_test_observed, y_test_unobserved))
# Correct number of observed and unobserved dimensions.
num_observed_dimensions = num_observed_dimensions + (view_mask - 1)
num_unobserved_dimensions = num_output_dimensions - num_observed_dimensions
views_test_observed = [y_test[:, i:i+view_mask] for i in range(0,
num_observed_dimensions,
view_mask)]
views_test_unobserved = [y_test[:, i:i+view_mask] for i in range(num_observed_dimensions,
num_output_dimensions,
view_mask)]
# Define ground truth depending on how test set was broken up.
ground_truth = np.hstack(views_test_unobserved)
# Define objectives.
training_objective = mrd.objective
predict_lower_bound, x_mean_test, x_covar_test, \
predicted_means, predicted_covars = mrd.predict_missing_data(views_test=views_test_observed)
predict_objective = tf.negative(predict_lower_bound)
# Optimisation.
training_var_list = get_training_variables()
predict_var_list = get_prediction_variables()
opt_train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss=training_objective,
var_list=training_var_list)
opt_predict = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss=predict_objective,
var_list=predict_var_list)
with tf.Session() as s:
# Initialise variables.
s.run(tf.variables_initializer(var_list=training_var_list)) # Initialise training variables first.
s.run(tf.variables_initializer(var_list=predict_var_list)) # Then initialise prediction variables.
s.run(tf.global_variables_initializer()) # Finally initialise any remaining global variables such as opt ones.
# Training optimisation loop.
start_time = time()
print('\nTraining MRD..')
for c in range(train_iter):
s.run(opt_train)
if (c % 100) == 0:
print(' MRD opt iter {:5}: {}'.format(c, s.run(training_objective)))
end_time = time()
train_opt_time = end_time - start_time
print('Final iter {:5}:'.format(c))
print(' MRD: {}'.format(s.run(training_objective)))
print('Time to optimise: {} s'.format(train_opt_time))
# Get converged values as numpy arrays.
ard_weights, noise_precisions, signal_variances, inducing_inputs = s.run((mrd.ard_weights,
mrd.noise_precisions,
mrd.signal_variances,
mrd.inducing_inputs))
x_mean, x_covar = s.run(mrd.q_x)
# Initialise prediction variables.
s.run(tf.variables_initializer(var_list=predict_var_list))
# Prediction optimisation loop.
start_time = time()
print('\nOptimising Predictions..')
for c in range(predict_iter):
s.run(opt_predict)
if (c % 100) == 0:
print(' MRD opt iter {:5}: {}'.format(c, s.run(predict_objective)))
end_time = time()
predict_opt_time = end_time - start_time
print('Final iter {:5}:'.format(c))
print(' MRD: {}'.format(s.run(predict_objective)))
print('Time to optimise: {} s'.format(predict_opt_time))
# Get converged values as numpy arrays.
x_mean_test_np, x_covar_test_np, list_predicted_means, list_predicted_covars = s.run((x_mean_test,
x_covar_test,
predicted_means,
predicted_covars))
# Convert lists to numpy arrays.
predicted_means_np = np.hstack(list_predicted_means)
predicted_covars_np = np.concatenate(list_predicted_covars, axis=0)
# Calculate log-likelihood of ground truth with predicted posteriors.
gt_log_likelihoods = [
mvn_log_pdf(x=tf.transpose(tf.slice(ground_truth, begin=[0, du], size=[-1, 1])),
mean=tf.transpose(tf.slice(predicted_means_np, begin=[0, du], size=[-1, 1])),
covariance=tf.squeeze(tf.slice(predicted_covars_np, begin=[du, 0, 0], size=[1, -1, -1]),
axis=0))
for du in range(num_unobserved_dimensions)]
gt_log_likelihoods_np = np.array(s.run(gt_log_likelihoods))
gt_log_likelihood = np.sum(gt_log_likelihoods_np)
# Save results. Converting lists to numpy arrays.
np.savez(save_file, y_train=y_train, y_test_observed=y_test_observed, y_test_unobserved=y_test_unobserved,
views_train=views_train, views_test_observed=views_test_observed,
views_test_unobserved=views_test_unobserved, ard_weights=np.array(ard_weights),
noise_precision=np.array(noise_precisions), signal_variance=np.array(signal_variances),
x_u=np.array(inducing_inputs), x_mean=x_mean, x_covar=x_covar, train_opt_time=train_opt_time,
x_mean_test=x_mean_test_np, x_covar_test=x_covar_test_np, predicted_mean=predicted_means_np,
predicted_covar=predicted_covars_np, predict_opt_time=predict_opt_time,
gt_log_likelihoods=gt_log_likelihoods_np, gt_log_likelihood=gt_log_likelihood)
# Print results.
print('\nMRD:')
print(' Ground Truth Predicted Posterior Log-Likelihood: {}'.format(gt_log_likelihood))
print(' Noise Precisions: {}'.format(np.squeeze(np.array(noise_precisions))))
def run_gpdp(y_train, y_test_observed, y_test_unobserved, num_latent_dimensions, num_inducing_points, truncation_level,
dp_mask_size, train_iter, predict_iter, learning_rate, save_file, seed_val=1):
"""
TODO
:param y_train:
:param y_test_observed:
:param y_test_unobserved:
:param num_latent_dimensions:
:param num_inducing_points:
:param truncation_level:
:param dp_mask_size:
:param train_iter:
:param predict_iter:
:param learning_rate:
:param save_file:
:param seed_val:
:return:
"""
# Set seed.
np.random.seed(seed=seed_val)
# Define instance of DP-GP-LVM .
gpdp = dp_gp_lvm(y_train=y_train,
num_latent_dims=num_latent_dimensions,
num_inducing_points=num_inducing_points,
truncation_level=truncation_level,
mask_size=dp_mask_size)
num_unobserved_dimensions = np.shape(y_test_unobserved)[1]
# Define objectives.
training_objective = gpdp.objective
predict_lower_bound, x_mean_test, x_covar_test, \
predicted_mean, predicted_covar = gpdp.predict_missing_data(y_test=y_test_observed)
predict_objective = tf.negative(predict_lower_bound)
# Optimisation.
training_var_list = get_training_variables()
predict_var_list = get_prediction_variables()
opt_train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss=training_objective,
var_list=training_var_list)
opt_predict = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss=predict_objective,
var_list=predict_var_list)
with tf.Session() as s:
# Initialise variables.
s.run(tf.variables_initializer(var_list=training_var_list)) # Initialise training variables first.
s.run(tf.variables_initializer(var_list=predict_var_list)) # Then initialise prediction variables.
s.run(tf.global_variables_initializer()) # Finally initialise any remaining global variables such as opt ones.
# Training optimisation loop.
start_time = time()
print('\nTraining GP-DP..')
for c in range(train_iter):
s.run(opt_train)
if (c % 100) == 0:
print(' GP-DP opt iter {:5}: {}'.format(c, s.run(training_objective)))
end_time = time()
train_opt_time = end_time - start_time
print('Final iter {:5}:'.format(c))
print(' GP-DP: {}'.format(s.run(training_objective)))
print('Time to optimise: {} s'.format(train_opt_time))
# Get converged values as numpy arrays.
ard_weights, noise_precision, signal_variance, inducing_input, assignments = \
s.run((gpdp.ard_weights, gpdp.noise_precision, gpdp.signal_variance, gpdp.inducing_input, gpdp.assignments))
x_mean, x_covar = s.run(gpdp.q_x)
gamma_atoms, alpha_atoms, beta_atoms = s.run(gpdp.dp_atoms)
# Initialise prediction variables.
s.run(tf.variables_initializer(var_list=predict_var_list))
# Prediction optimisation loop.
start_time = time()
print('\nOptimising Predictions..')
for c in range(predict_iter):
s.run(opt_predict)
if (c % 100) == 0:
print(' GP-DP opt iter {:5}: {}'.format(c, s.run(predict_objective)))
end_time = time()
predict_opt_time = end_time - start_time
print('Final iter {:5}:'.format(c))
print(' GP-DP: {}'.format(s.run(predict_objective)))
print('Time to optimise: {} s'.format(predict_opt_time))
# Get converged values as numpy arrays.
x_mean_test_np, x_covar_test_np, predicted_mean_np, predicted_covar_np = s.run((x_mean_test,
x_covar_test,
predicted_mean,
predicted_covar))
# Calculate log-likelihood of ground truth with predicted posteriors.
gt_log_likelihoods = [
mvn_log_pdf(x=tf.transpose(tf.slice(y_test_unobserved, begin=[0, du], size=[-1, 1])),
mean=tf.transpose(tf.slice(predicted_mean, begin=[0, du], size=[-1, 1])),
covariance=tf.squeeze(tf.slice(predicted_covar, begin=[du, 0, 0], size=[1, -1, -1]),
axis=0))
for du in range(num_unobserved_dimensions)]
gt_log_likelihoods_np = np.array(s.run(gt_log_likelihoods))
gt_log_likelihood = np.sum(gt_log_likelihoods_np)
# Save results.
np.savez(save_file, y_train=y_train, y_test_observed=y_test_observed, y_test_unobserved=y_test_unobserved,
ard_weights=ard_weights, noise_precision=noise_precision, signal_variance=signal_variance,
x_u=inducing_input, x_mean=x_mean, x_covar=x_covar, gamma_atoms=gamma_atoms, alpha_atoms=alpha_atoms,
beta_atoms=beta_atoms,train_opt_time=train_opt_time, x_mean_test=x_mean_test_np,
x_covar_test=x_covar_test_np, predicted_mean=predicted_mean_np, predicted_covar=predicted_covar_np,
predict_opt_time=predict_opt_time, gt_log_likelihoods=gt_log_likelihoods_np,
gt_log_likelihood=gt_log_likelihood)
# Print results.
print('\nGP-DP:')
print(' Ground Truth Predicted Posterior Log-Likelihood: {}'.format(gt_log_likelihood))
print(' Noise Precisions: {}'.format(np.squeeze(noise_precision)))
if __name__ == '__main__':
# Script booleans.
show_plots = False
print_results = False
save_results = True
# Define paths.
absolute_path = [ap for ap in path if 'aistats_2019' in ap]
# data_path = absolute_path[-1] + '/test/data/posetrack_8820_normalised_2_people.npy'
data_path = absolute_path[-1] + '/test/data/posetrack_8820_normalised_4_people.npy'
results_path = absolute_path[-1] + '/test/results/'
# Optimisation variables.
learning_rate = 0.05
num_iter_train_bgplvm = 1500
num_iter_predict_bgplvm = 1000
num_iter_train_mrd = 750 # 2000 - Can reduce a lot for 4 people scenario.
num_iter_predict_mrd = 500 # 1000 - Can reduce a lot for 4 people scenario.
num_iter_train_gpdp = 2500
num_iter_predict_gpdp = 1500
# Different configurations
seeds = np.arange(1, 11, dtype=int) # [1 - 10].
# seeds = np.arange(1, 6, dtype=int) # [1 - 5].
# dp_masks = np.arange(1, 3, dtype=int) # [1, 2].
dp_masks = np.array([2], dtype=int)
# percent_samples_observed = np.linspace(0.5, 1.0, 5, endpoint=False) # [0.5, 0.6, 0.7, 0.8, 0.9]
# percent_dimensions_observed = np.linspace(0.5, 1.0, 5, endpoint=False) # [0.5, 0.6, 0.7, 0.8, 0.9]
# As results are poor for large missing N and D:
# As N is so small for 8820 2 people already, reduce number of missing observations.
percent_samples_observed = np.array([0.8, 0.9])
# Also reduce number of tested D missing scenarios to speed up script.
percent_dimensions_observed = np.array([0.7, 0.8, 0.9])
# Maybe try [0.75, 0.8, 0.85, 0.9, 0.95].
# GP-LVM parameters
percent_inducing = 0.75
percent_latent = 0.25
# Additional DP-GP-LVM parameters.
truncation_level = 10
# Total number of test scenarios.
counter = 0.0
total_num_tests = np.size(dp_masks) * np.size(seeds) * \
np.size(percent_samples_observed) * np.size(percent_dimensions_observed)
for mask in dp_masks:
for seed in seeds:
# Get randomly permuted, normalised data.
# config = '8820_2_people_seed_{0}_mask_{1}'.format(seed, mask)
config = '8820_4_people_seed_{0}_mask_{1}'.format(seed, mask)
np_file = results_path + 'posetrack_missing_data_{0}.npz'.format(config)
if isfile(np_file):
temp = np.load(np_file)
normalised_data = temp['normalised_data']
permuted_data = temp['permuted_data']
original_data = temp['original_data']
else:
normalised_data, permuted_data, original_data = prepare_data(data_file_path=data_path,
seed_val=seed,
mask_size=mask)
np.savez(np_file, normalised_data=normalised_data,
permuted_data=permuted_data,
original_data=original_data)
for n in percent_samples_observed:
for d in percent_dimensions_observed:
# Print current test scenario.
print('\n--------------------------------------------------------------------------------')
print('\nCurrent Test Scenario:')
print(' DP Mask: {}'.format(mask))
print(' Random seed: {}'.format(seed))
print(' Percent samples observed (n%): {}'.format(n))
print(' Percent dimensions observed (d%): {}'.format(d))
# Separate into training and test data.
config_ext = 'n_percent_{0}_d_percent_{1}'.format(n, d)
np_file = results_path + 'posetrack_missing_data_{0}_{1}.npz'.format(config, config_ext)
if isfile(np_file):
temp = np.load(np_file)
training_data = temp['training_data']
test_data_observed = temp['test_data_observed']
test_data_unobserved_ground_truth = temp['test_data_unobserved_ground_truth']
else:
training_data, test_data_observed, test_data_unobserved_ground_truth = \
prepare_missing_data(data=normalised_data,
percent_samples_observe=n,
percent_dimensions_observe=d)
np.savez(np_file, training_data=training_data,
test_data_observed=test_data_observed,
test_data_unobserved_ground_truth=test_data_unobserved_ground_truth)
# Determine parameters for models. This ensures we do not use more latent dims or inducing points
# than possible for the model.
num_training_samples, num_output_dimensions = np.shape(training_data)
num_test_samples, num_observed_dimensions = np.shape(test_data_observed)
num_unobserved_dimensions = np.shape(test_data_unobserved_ground_truth)[1]
assert num_output_dimensions == num_observed_dimensions + num_unobserved_dimensions, \
'Missing data dimensions do not match.'
assert num_test_samples == np.shape(test_data_unobserved_ground_truth)[0], \
'Number of observations in test data and missing data must be the same.'
num_inducing_points = int(np.ceil(percent_inducing * num_training_samples))
num_latent_dimensions = int(np.ceil(percent_latent * num_observed_dimensions))
while num_latent_dimensions > np.min((num_training_samples, num_output_dimensions)):
num_latent_dimensions -= 1
# Print info.
print('\nPoseTrack Data:')
print('Total number of observations: {}'.format(num_training_samples + num_test_samples))
print('Total number of output dimensions: {}'.format(num_output_dimensions))
print('\nTraining Data:')
print('Number of training samples: {}'.format(num_training_samples))
print('Number of training dimensions: {}'.format(num_output_dimensions))
print('\nMissing Data:')
print('Number of test samples: {}'.format(num_test_samples))
print('Number of provided/observed dimensions: {}'.format(num_observed_dimensions))
print('Number of missing/unobserved dimensions: {}'.format(num_unobserved_dimensions))
# Run each model.
bgplvm_file = results_path + 'bgplvm_posetrack_missing_data_{0}_{1}.npz'.format(config, config_ext)
if isfile(bgplvm_file):
print('\nAlready ran this configuration for BGPLVM.')
else:
# Reset default graph before building new model graph. This speeds up script.
tf.reset_default_graph()
# Build BGP-LVM graph and run it for current configuration.
run_bgplvm(y_train=training_data,
y_test_observed=test_data_observed,
y_test_unobserved=test_data_unobserved_ground_truth,
num_latent_dimensions=num_latent_dimensions,
num_inducing_points=num_inducing_points,
train_iter=num_iter_train_bgplvm,
predict_iter=num_iter_predict_bgplvm,
learning_rate=learning_rate,
save_file=bgplvm_file,
seed_val=seed)
mrd_file = results_path + 'mrd_posetrack_missing_data_{0}_{1}.npz'.format(config, config_ext)
if isfile(mrd_file):
print('\nAlready ran this configuration for MRD.')
else:
# Reset default graph before building new model graph. This speeds up script.
tf.reset_default_graph()
# Build MRD model graph and run it for current configuration.
run_mrd(y_train=training_data,
y_test_observed=test_data_observed,
y_test_unobserved=test_data_unobserved_ground_truth,
num_latent_dimensions=num_latent_dimensions,
num_inducing_points=num_inducing_points,
view_mask=mask,
train_iter=num_iter_train_mrd,
predict_iter=num_iter_predict_mrd,
learning_rate=learning_rate,
save_file=mrd_file,
seed_val=seed)
gpdp_file = results_path + 'gpdp_posetrack_missing_data_{0}_{1}.npz'.format(config, config_ext)
if isfile(gpdp_file):
print('\nAlready ran this configuration for GP-DP.')
else:
# Reset default graph before building new model graph. This speeds up script.
tf.reset_default_graph()
# Build GP-DP model graph and run it for current configuration.
run_gpdp(y_train=training_data,
y_test_observed=test_data_observed,
y_test_unobserved=test_data_unobserved_ground_truth,
num_latent_dimensions=num_latent_dimensions,
num_inducing_points=num_inducing_points,
truncation_level=truncation_level,
dp_mask_size=mask,
train_iter=num_iter_train_gpdp,
predict_iter=num_iter_predict_gpdp,
learning_rate=learning_rate,
save_file=gpdp_file,
seed_val=seed)
# Update number of tests run.
counter += 1.0
print('\n--------------------------------------------------------------------------------')
print('\nPercent tests completed: {}'.format(100.0 * counter / total_num_tests))
# if print_results:
# # Print results.
# print('\nBGPLVM:')
# print(' Ground Truth Predicted Posterior Log-Likelihood: {}'.format(np.sum(final_bgplvm_gt_log_likelihoods)))
# print(' Noise Precision: {}'.format(np.squeeze(bgplvm_noise_precision)))
# print(' Ground Truth Predicted Posterior Log-Likelihoods: {}'.format(final_bgplvm_gt_log_likelihoods))
#
# print('\nGP-DP:')
# print(' Ground Truth Predicted Posterior Log-Likelihood: {}'.format(np.sum(final_gpdp_gt_log_likelihoods)))
# print(' Noise Precisions: {}'.format(np.squeeze(gpdp_noise_precision)))
# print(' Ground Truth Predicted Posterior Log-Likelihoods: {}'.format(final_gpdp_gt_log_likelihoods))
#
# if show_plots:
# # Plot results.
# plot.figure()
# plot.imshow(bgplvm_ard_weights.T)
# plot.title('BGPLVM ARD Weights')
#
# plot.figure()
# plot.imshow(gpdp_ard_weights.T)
# plot.title('GP-DP ARD Weights')
#
# plot.figure()
# plot.imshow(gpdp_assignments.T)
# plot.title('GP-DP Assignments')
#
# plot.show()
|
{"hexsha": "0677dd24751a1821f54fdb49473d39f451738b50", "size": 33673, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/posetrack_missing_data_test.py", "max_stars_repo_name": "AndrewRLawrence/dp_gp_lvm", "max_stars_repo_head_hexsha": "b0d4c776714f22e83de31127fbfbbd511f017dcd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-17T11:44:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-17T11:44:36.000Z", "max_issues_repo_path": "test/posetrack_missing_data_test.py", "max_issues_repo_name": "AndrewRLawrence/dp_gp_lvm", "max_issues_repo_head_hexsha": "b0d4c776714f22e83de31127fbfbbd511f017dcd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-19T20:47:02.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-19T20:47:02.000Z", "max_forks_repo_path": "test/posetrack_missing_data_test.py", "max_forks_repo_name": "AndrewRLawrence/dp_gp_lvm", "max_forks_repo_head_hexsha": "b0d4c776714f22e83de31127fbfbbd511f017dcd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-21T07:13:13.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-21T07:13:13.000Z", "avg_line_length": 51.0971168437, "max_line_length": 120, "alphanum_fraction": 0.5917203694, "include": true, "reason": "import numpy", "num_tokens": 6961}
|
"""Template to turn the .csv files in data/ into work-precision plots."""
import matplotlib.pyplot as plt
import numpy as np
from _styles import LINESTYLES, MARKERS
from probnumeval.timeseries import chi2_confidence_intervals
plt.style.use(
[
"./visualization/stylesheets/fontsize/7pt.mplstyle",
"./visualization/stylesheets/figsize/neurips/one_of_12_tile.mplstyle",
"./visualization/stylesheets/misc/thin_lines.mplstyle",
"./visualization/stylesheets/misc/bottomleftaxes.mplstyle",
"./visualization/stylesheets/color/probnum_colors.mplstyle",
]
)
orders = [3]
q = 3
num_samples = 10
colors = ["C1", "C0"]
styles = ["-", "-"]
PATH = "./data/prior_samples/samples_"
grid = np.load(PATH + "grid.npy")
fig, axes = plt.subplots(ncols=2, dpi=200, constrained_layout=True, sharey=True)
# for ax in axes:
# ax.spines["left"].set_position(("outward", 2))
# ax.spines["bottom"].set_position(("outward", 2))
for idx in range(num_samples):
samples_bridge = np.load(PATH + str(q) + str(idx) + ".npy")
samples = np.load(PATH + str(q) + str(idx) + "2.npy")
axes[1].plot(
grid,
samples_bridge[:, 0],
color="C1",
alpha=0.75,
linewidth=1,
)
axes[0].plot(
grid,
samples[:, 0],
color="C0",
alpha=0.75,
linewidth=1,
)
for ax in axes:
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
# ax.set_xlabel(r"Time")
# axes.set_title(f"Order $\\nu = {q}$")
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((0.0, 1.0))
ax.set_ylim((0.8, 1.8))
# axes[0].set_title(r"$\bf B$", loc="left", fontweight="bold", pad=5)
# axes[1].set_title(r"$\bf C$", loc="left", fontweight="bold", pad=5)
# axes.set_title(r"$\bf B$" + " ", loc="left", fontweight="bold", ha="right")
# axes[0].set_ylabel(r"Samples")
# plt.legend(fancybox=False, edgecolor="black").get_frame().set_linewidth(0.5)
plt.savefig("./figures/prior_samples.pdf")
plt.show()
|
{"hexsha": "28c93f02450197d7551eedc739facb157080c669", "size": 2026, "ext": "py", "lang": "Python", "max_stars_repo_path": "visualization/prior_samples.py", "max_stars_repo_name": "feimeng93/probabilistic-bvp-solver", "max_stars_repo_head_hexsha": "d6b38d4ff7b3ab6cf3003de30eb2f6eeb42c0beb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-11-26T21:05:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-28T00:29:10.000Z", "max_issues_repo_path": "visualization/prior_samples.py", "max_issues_repo_name": "feimeng93/probabilistic-bvp-solver", "max_issues_repo_head_hexsha": "d6b38d4ff7b3ab6cf3003de30eb2f6eeb42c0beb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "visualization/prior_samples.py", "max_forks_repo_name": "feimeng93/probabilistic-bvp-solver", "max_forks_repo_head_hexsha": "d6b38d4ff7b3ab6cf3003de30eb2f6eeb42c0beb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-23T10:02:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-23T10:02:36.000Z", "avg_line_length": 28.5352112676, "max_line_length": 80, "alphanum_fraction": 0.6298124383, "include": true, "reason": "import numpy", "num_tokens": 589}
|
import faiss
import numpy as np
from sklearn import preprocessing
from utils import load_image, get_image_paths
class Analyst:
def __init__(self, V_dict):
self.classes = list(V_dict)
self.V_dict = V_dict
self._V_dict = V_dict # save a copy that won't be
@property
def V_norm_dict(self):
d = {}
for class_, V in self.V_dict.items():
V_norm = preprocessing.normalize(V, norm='l2')
d[class_] = V_norm
return d
@property
def V_norm_mean_dict(self):
d = {}
for class_, V_norm in self.V_norm_dict.items():
V_norm_mean = np.mean(V_norm, axis=0)
d[class_] = V_norm_mean
return d
@property
def V_mean_norm_dict(self):
d = {}
for class_, V in self.V_dict.items():
V_mean = np.mean(V, axis=0)
V_mean = np.expand_dims(V_mean, axis=0)
V_mean_norm = preprocessing.normalize(V_mean, norm='l2')
V_mean_norm = V_mean_norm.flatten()
d[class_] = V_mean_norm
return d
@property
def V_norm_mean_res_dict(self):
V_norm_mean_avg = np.mean(np.stack(list(self.V_norm_mean_dict.values())), axis=0)
d = {class_: mean - V_norm_mean_avg for class_, mean in self.V_norm_mean_dict.items()}
return d
@property
def V_mean_norm_res_dict(self):
V_mean_norm_avg = np.mean(np.stack(list(self.V_mean_norm_dict.values())), axis=0)
d = {class_: mean - V_mean_norm_avg for class_, mean in self.V_mean_norm_dict.items()}
return d
@property
def silhouette_score(self):
V_list = []
labels = []
for class_, class_V in self.V_dict.items():
labels += len(class_V) * [class_]
V_list.append(class_V)
V = np.vstack(V_list)
score = silhouette_score(V, labels, metric='cosine')
return score
class DenseAnalyst(Analyst):
@classmethod
def build(cls, img_by_class, extractor):
V_dict = cls._build_V_dict(img_by_class, extractor)
inst = cls(V_dict)
return inst
@staticmethod
def _build_V_dict(img_by_class, extractor):
V_dict = dict()
for class_, paths in img_by_class.items():
x_list = [load_image(path, extractor.input_shape[1:3])[1] for path in paths]
X = np.vstack(x_list)
V = extractor.predict(X)
V_dict[class_] = V
return V_dict
class AnalystExtras(Analyst):
@property
def avg_intraclass_dist_dict(self):
d = {}
for class_, V_norm in self.V_norm_dict.items():
dist_arr = np.array([])
dim = V_norm.shape[1]
index = faiss.IndexFlatIP(dim)
index.add(np.ascontiguousarray(V_norm))
for i, v in enumerate(V_norm[:-1]):
ref_idx = i + 1
V_ref = V_norm[ref_idx:]
V_ref_indices = list(range(ref_idx, ref_idx + len(V_ref)))
v = np.expand_dims(v, axis=0)
labels_iter_range = list(range(1, len(V_ref) + 1))
labels = np.array([V_ref_indices, labels_iter_range])
distances = np.empty((1, len(V_ref)), dtype='float32')
index.compute_distance_subset(
1, faiss.swig_ptr(v), len(V_ref),
faiss.swig_ptr(distances), faiss.swig_ptr(labels))
distances = distances.flatten()
dist_arr = np.append(dist_arr, distances)
print(f'intraclass distances: {dist_arr}')
avg_dist = np.mean(dist_arr)
d[class_] = avg_dist
return d
@property
def new_avg_interclass_dist_dict(self):
d = {}
class_indices = {}
i = 0
V_norm = np.vstack(self.V_norm_dict.values())
dim = V_norm.shape[1]
index = faiss.IndexFlatIP(dim)
index.add(np.ascontiguousarray(V_norm))
for class_, V_norm_class in self.V_norm_dict.items():
class_indices[class_] = list(range(i, len(V_norm_class)))
distances, closest_indices = index.search(V_norm_class, len(V_norm))
dist_avg = np.mean(distances)
d[class_] = dist_avg
return d
@property
def mean_avg_intraclass_dist(self):
return np.mean(self.avg_intraclass_dist_dict.values())
@property
def avg_interclass_centroid_dist_dict(self):
# Should I use norm_mean or mean_norm for centroid?
d = {}
class_list = []
v_list = []
# TODO: TEMPORARILY CHANGED TO V_mean_norm
for class_, v in self.V_mean_norm_dict.items():
class_list.append(class_)
v_list.append(v)
V_norm_mean = np.stack(v_list)
dim = V_norm_mean.shape[1]
index = faiss.IndexFlatIP(dim)
index.add(np.ascontiguousarray(V_norm_mean))
for i, v in enumerate(V_norm_mean):
V_ref_indices = list(chain(range(0, i), range(i + 1, len(V_norm_mean))))
v = np.expand_dims(v, axis=0)
labels_iter_range = list(range(1, len(V_norm_mean)))
labels = np.array([list(V_ref_indices), labels_iter_range])
distances = np.empty((1, len(V_norm_mean) - 1), dtype='float32')
index.compute_distance_subset(
1, faiss.swig_ptr(v), len(V_norm_mean),
faiss.swig_ptr(distances), faiss.swig_ptr(labels))
distances = distances.flatten()
print(f'centroid distances: {distances}')
avg_dist = np.mean(distances)
d[class_list[i]] = avg_dist
return d
|
{"hexsha": "a2a744ed03ad8975ff805aa12867a13db01807a4", "size": 5659, "ext": "py", "lang": "Python", "max_stars_repo_path": "style_stack/analysts.py", "max_stars_repo_name": "TheAustinator/style_similarity_search", "max_stars_repo_head_hexsha": "3f7b35ca0255eb93f5e109f05b31032576ee9c1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-02-13T18:09:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-27T23:09:26.000Z", "max_issues_repo_path": "style_stack/analysts.py", "max_issues_repo_name": "TheAustinator/style_similarity_search", "max_issues_repo_head_hexsha": "3f7b35ca0255eb93f5e109f05b31032576ee9c1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "style_stack/analysts.py", "max_forks_repo_name": "TheAustinator/style_similarity_search", "max_forks_repo_head_hexsha": "3f7b35ca0255eb93f5e109f05b31032576ee9c1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-01-15T23:41:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-13T03:02:51.000Z", "avg_line_length": 35.149068323, "max_line_length": 94, "alphanum_fraction": 0.5905637038, "include": true, "reason": "import numpy", "num_tokens": 1339}
|
InstallMethod( JupyterRender, [ IsRecord ],
r -> Objectify( JupyterRenderableType
, rec( data := rec( text\/plain := String(r) )
, metadata := rec() ) ) );
# This is still an ugly hack, but its already much better than before!
BindGlobal("JupyterSplashDot",
function(dot)
local fn, fd, r;
fn := TmpName();
fd := IO_File(fn, "w");
IO_Write(fd, dot);
IO_Close(fd);
fd := IO_Popen(IO_FindExecutable("dot"), ["-Tsvg", fn], "r");
r := IO_ReadUntilEOF(fd);
IO_close(fd);
IO_unlink(fn);
return JupyterRenderable( rec( ("image/svg+xml") := r )
, rec( ("image/svg+xml") := rec( width := 500, height := 500 ) ) );
end);
# Splash the subgroup lattice of a group
BindGlobal("JupyterSplashSubgroupLattice",
function(group)
local fn, fd, r, L, dot;
fn := TmpName();
L := LatticeSubgroups(group);
DotFileLatticeSubgroups(L, fn);
fd := IO_Popen(IO_FindExecutable("dot"), ["-Tsvg", fn], "r");
r := IO_ReadUntilEOF(fd);
IO_close(fd);
IO_unlink(fn);
return JupyterRenderable( rec( ("image/svg+xml") := r )
, rec( ("image/svg+xml") := rec( width := 500, height := 500 ) ) ) ;
end);
# To show TikZ in a GAP jupyter notebook
BindGlobal("JupyterSplashTikZ",
function(tikz)
local tmpdir, fn, header, ltx, svgfile, stream, svgdata, tojupyter;
header:=Concatenation( "\\documentclass[crop,tikz]{standalone}\n",
"\\usepackage{pgfplots}",
"\\makeatletter\n",
"\\batchmode\n",
"\\nonstopmode\n",
"\\begin{document}",
"\\begin{tikzpicture}");
header:=Concatenation(header, tikz);
header:=Concatenation(header,"\\end{tikzpicture}\n\\end{document}");
tmpdir := DirectoryTemporary();
fn := Filename( tmpdir, "svg_get" );
PrintTo( Concatenation( fn, ".tex" ), header );
ltx := Concatenation( "pdflatex -shell-escape --output-directory ",
Filename( tmpdir, "" ), " ",
Concatenation( fn, ".tex" ), " > ", Concatenation( fn, ".log2" ) );
Exec( ltx );
if not( IsExistingFile( Concatenation(fn, ".pdf") ) ) then
tojupyter := rec( json := true, name := "stdout",
data := "No pdf was created; pdflatex is installed in your system?" );
else
svgfile := Concatenation( fn, ".svg" );
ltx := Concatenation( "pdf2svg ", Concatenation( fn, ".pdf" ), " ",
svgfile, " >> ", Concatenation( fn, ".log2" ) );
Exec( ltx );
if not( IsExistingFile( svgfile ) ) then
tojupyter := rec( json := true, name := "stdout",
data := "No svg was created; pdf2svg is installed in your system?" );
else
stream := InputTextFile( svgfile );
if stream <> fail then
svgdata := ReadAll( stream );
tojupyter := rec( json := true, source := "gap",
data := rec( ( "image/svg+xml" ) := svgdata ),
metadata := rec( ( "image/svg+xml" ) := rec( width := 500, height := 500 ) ) );
CloseStream( stream );
else
tojupyter := rec( json := true, name := "stdout",
data := Concatenation( "Unable to render ", tikz ), metadata := rec() );
fi;
fi;
fi;
return JupyterRenderable(tojupyter.data, tojupyter.metadata);
end);
# This is really not what I should be doing here...
InstallGlobalFunction(ISO8601Stamp,
function()
local tz, gm, pad;
tz := IO_gettimeofday();
pad := function(i, l, c)
local s;
s := String(i);
if Length(s) < l then
return Concatenation(RepeatedString(c, l - Length(s)), s);
else
return s;
fi;
end;
gm := IO_gmtime(tz.tv_sec);
return STRINGIFY( 1900 + gm.tm_year, "-"
, pad(gm.tm_mon + 1, 2, '0'), "-"
, pad(gm.tm_mday, 2, '0'), "T"
, pad(gm.tm_hour, 2, '0'), ":"
, pad(gm.tm_min, 2, '0'), ":"
, pad(gm.tm_sec, 2, '0'), "."
, pad(tz.tv_usec, 6, '0') );
end);
|
{"hexsha": "09889f62a487f049fcc48b2d1e6378789eb1c06f", "size": 4412, "ext": "gi", "lang": "GAP", "max_stars_repo_path": "gap/JupyterUtil.gi", "max_stars_repo_name": "ZachNewbery/JupyterKernel", "max_stars_repo_head_hexsha": "5bf0e17031271bc641c4e604c9562eb48dd33633", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2017-10-06T06:11:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T18:28:56.000Z", "max_issues_repo_path": "gap/JupyterUtil.gi", "max_issues_repo_name": "ZachNewbery/JupyterKernel", "max_issues_repo_head_hexsha": "5bf0e17031271bc641c4e604c9562eb48dd33633", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 111, "max_issues_repo_issues_event_min_datetime": "2017-10-03T15:30:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T12:55:13.000Z", "max_forks_repo_path": "gap/JupyterUtil.gi", "max_forks_repo_name": "ZachNewbery/JupyterKernel", "max_forks_repo_head_hexsha": "5bf0e17031271bc641c4e604c9562eb48dd33633", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2017-10-18T14:48:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-30T09:46:37.000Z", "avg_line_length": 35.296, "max_line_length": 113, "alphanum_fraction": 0.5027198549, "num_tokens": 1146}
|
# -*- coding: utf-8 -*-
"""TASK#1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1u-kmG1USY74r39hqZBePjvvD3GZOJkcJ
# **LetsGrowMore**
---
---
# ***Data Science Internship***
---
---
## `Author: UMER FAROOQ`
## `Task Level: Beginner Level`
## `Task Number: 1`
## `Task Title: Iris Flower Classification`
## `Language: Python`
## `IDE: Google Colab`
# **Steps**:
# **Step:1**
***Importing Libraries***
"""
# Commented out IPython magic to ensure Python compatibility.
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_palette('husl')
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
"""# **Step:2**
***Loading the Dataset:*** I have pick the dataset from the following link. You can download the dataset as well as you can use by URL.
"""
url = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv'
# Creating the list of column name:
column_name = ['sepal-lenght','sepal-width','petal-lenght','petal-width','class']
# Pandas read_csv() is used for reading the csv file:
dataset = pd.read_csv(url, names = column_name)
"""# **Step:3**
***Dataset Summarizing:*** Check the structure/shape of data on which we have to work on.
"""
dataset.shape
"""This shows that we have:
1. 150 rows,
2. 5 columns.
Thats enough for our Beginner Project.
*Displaying the First 5 records:*
"""
dataset.head()
"""Pandas info() method prints information about a DataFrame such as datatypes, cols, NAN values and usage of memory:"""
dataset.info()
dataset.isnull()
# Returns no. of missing records/values
dataset.isnull().sum()
""" Pandas describe() is used to view some basic statistical details like percentile, mean, std etc. of a data frame or a series of numeric values: """
dataset.describe()
"""**Now let’s check the number of rows that belongs to each class:**"""
dataset['class'].value_counts() # No of records/samples in each class
"""The above outputs shows that each class of flowers has 50 rows.
# **Step: 4 Data Visualization**
---
---
Data visualization is the process of translating large data sets and metrics into charts, graphs and other visuals.
---
---
**Violin Plot:** Plotting the violin plot to check the comparison of a variable distribution:
"""
sns.violinplot(y='class', x='sepal-lenght', data=dataset, inner='quartile')
plt.show()
print('\n')
sns.violinplot(y='class', x='sepal-width', data=dataset, inner='quartile')
plt.show()
print('\n')
sns.violinplot(y='class', x='petal-lenght', data=dataset, inner='quartile')
plt.show()
print('\n')
sns.violinplot(y='class', x='petal-width', data=dataset, inner='quartile')
plt.show()
print('\n')
"""Above-plotted violin plot says that Iris-Setosa class is having a smaller petal length and petal width as compared to other class.
**Pair Plot:** Plotting multiple pairwise bivariate distributions in a dataset using pairplot:
"""
sns.pairplot(dataset, hue='class', markers='+')
plt.show()
"""From the above, we can see that Iris-Setosa is separated from both other species in all the features.
**Heatmap:** Plotting the heatmap to check the correlation.
**dataset.corr()** is used to find the pairwise correlation of all columns in the dataframe.
"""
plt.figure(figsize=(8,5))
sns.heatmap(dataset.corr(), annot=True, cmap= 'PuOr')
plt.show()
"""# **Step: 5 Model Construction (Splitting, Training and Model Creation)**
---
---
**SPLITTING THE DATASET:**
X have dependent variables.
Y have an independent variables.
"""
x= dataset.drop(['class'], axis=1)
y= dataset['class'] # Class is an independent variable
print('X shape: {}\nY Shape: {}'.format(x.shape, y.shape))
"""The output shows that X has 150 records/rows and 4 cols, whereas, Y has 150 records and only 1 col.
**TRAINING THE TEST SPLIT:**
Splitting our dataset into train and test using train_test_split(), what we are doing here is taking 80% of data to train our model, and 20% that we will hold back as a validation dataset:
"""
x_train, x_test, y_train, y_test = train_test_split (x, y, test_size=0.20, random_state=1)
"""**MODEL CONSTRUCTION PART:1:**
We have no idea which algorithms might work best in this situation.
Let's run each algorithm in a loop and print its accuracy so we can choose the best one. Following are the algorithms:
1. Logistic Regression (LR)
2. Linear Discriminant Analysis (LDA)
1. K-Nearest Neighbors (KNN).
2. Classification and Regression Trees (CART).
1. Gaussian Naive Bayes (NB).
2. Support Vector Machines (SVM).
"""
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVC', SVC(gamma='auto')))
# evaluate each model in turn
results = []
model_names = []
for name, model in models:
kfold = StratifiedKFold(n_splits=10, random_state=1, shuffle=True)
cv_results = cross_val_score(model, x_train, y_train, cv=kfold, scoring='accuracy')
results.append(cv_results)
model_names.append(name)
print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()))
"""***Support Vector Classifier (SVC) is performing better than other algorithms.
Let’s train SVC model on our training set and predict on test set in the next step.***
**MODEL CONSTRUCTION PART:2**
We are defining our SVC model and passing gamma as auto.
After that fitting/training the model on X_train and Y_train using .fit() method.
Then we are predicting on X_test using .predict() method.
"""
model = SVC(gamma='auto')
model.fit(x_train, y_train)
prediction = model.predict(x_test)
"""**Now checking the accuracy of the model using accuracy_score(y_test, prediction).**
y_test is actually values of x_test
prediction: it predicts values of x_test as mentioned earlier (*then we are predicting on X_test using .predict() method*).
**Printing out the classfication report using:** classification_report(y_test, prediction)
"""
print(f"Test Accuracy: {accuracy_score(y_test, prediction)} \n")
print(f'Classification Report:\n \n {classification_report(y_test, prediction)}')
|
{"hexsha": "4d030e9615684503b23c3033d4dcf31a0c1eff62", "size": 6767, "ext": "py", "lang": "Python", "max_stars_repo_path": "task_1.py", "max_stars_repo_name": "Umer86/IRIS_FLOWER_Classfication_ML_Project", "max_stars_repo_head_hexsha": "18b0ae5385cb23dff296b7b0fb1f3db6756deaac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "task_1.py", "max_issues_repo_name": "Umer86/IRIS_FLOWER_Classfication_ML_Project", "max_issues_repo_head_hexsha": "18b0ae5385cb23dff296b7b0fb1f3db6756deaac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "task_1.py", "max_forks_repo_name": "Umer86/IRIS_FLOWER_Classfication_ML_Project", "max_forks_repo_head_hexsha": "18b0ae5385cb23dff296b7b0fb1f3db6756deaac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.537254902, "max_line_length": 188, "alphanum_fraction": 0.7320821634, "include": true, "reason": "import numpy", "num_tokens": 1650}
|
/*
Script for converting the mask images to the mmsegmentation form
*/
import glob
from PIL import Image
import os
import numpy as np
# src_class="pldu"
src_dir = "C:/Users/Xyedj/Desktop/datasets/wires/PLDU/gt/aug_gt/90.0_0" #image dir with default mask images
dst_dir = "C:/Users/Xyedj/Desktop/datasets/wires/PLDU/gt/pmode/" #image dir to store the converted mask images in mmsegmentation format
def convert_mode(): #convert data to 'P' mode
for imgfile in glob.iglob((os.path.join(src_dir, "*.png"))):
filename = imgfile.split("\\")[1] # extract filename with .png extension
final_file_name= dst_dir+filename
pimg = Image.open(imgfile)
for pixel in pimg.getdata():
if (pixel[0] == 255 and pixel[1] == 255 and pixel[2] == 255): # white pixels
pixel=[1,1,1]
# print(pimg.format)
# print(pimg.size)
# print(pimg.mode)
palette = [[120, 120, 120], [6, 230, 230]]
pimg = pimg.convert("P")
pimg.putpalette(np.array(palette, dtype=np.uint8))
# print(pimg.mode)
pimg=pimg.save(final_file_name)
convert_mode()
|
{"hexsha": "87191240d4f9d31fda32d068bf8bcbd810660e6b", "size": 1137, "ext": "py", "lang": "Python", "max_stars_repo_path": "mmseg/utils/convertmode.py", "max_stars_repo_name": "rubeea/pl_mmseg_point_rend", "max_stars_repo_head_hexsha": "2b92b03a4d0356871f2bd18b79ed34d4594a6470", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mmseg/utils/convertmode.py", "max_issues_repo_name": "rubeea/pl_mmseg_point_rend", "max_issues_repo_head_hexsha": "2b92b03a4d0356871f2bd18b79ed34d4594a6470", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-01T01:47:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T05:10:53.000Z", "max_forks_repo_path": "mmseg/utils/convertmode.py", "max_forks_repo_name": "rubeea/pl_mmseg_point_rend", "max_forks_repo_head_hexsha": "2b92b03a4d0356871f2bd18b79ed34d4594a6470", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.53125, "max_line_length": 135, "alphanum_fraction": 0.6455584872, "include": true, "reason": "import numpy", "num_tokens": 324}
|
import h5py
import numpy as np
from pyVHR.datasets.dataset import Dataset
from pyVHR.signals.bvp import BVPsignal
class COHFACE(Dataset):
"""
Cohface dataset structure:
-----------------
datasetDIR/
|
|-- subjDIR_1/
| |-- vidDIR1/
| |-- videoFile1.avi
| |-- ...
| |-- videoFileN.avi
|...
| |-- vidDIRM/
| |-- videoFile1.avi
| |-- ...
| |-- videoFileM.avi
|...
|-- subjDIR_n/
|...
"""
name = 'COHFACE'
signalGT = 'BVP' # GT signal type
numLevels = 2 # depth of the filesystem collecting video and BVP files
numSubjects = 40 # number of subjects
video_EXT = 'avi' # extension of the video files
frameRate = 20 # vieo frame rate
VIDEO_SUBSTRING = 'data' # substring contained in the filename
SIG_EXT = 'hdf5' # extension of the BVP files
SIG_SUBSTRING = 'data' # substring contained in the filename
SIG_SampleRate = 256 # sample rate of the BVP files
skinThresh = [40,60] # thresholds for skin detection
def readSigfile(self, filename):
""" Load BVP signal.
Must return a 1-dim (row array) signal
"""
f = h5py.File(filename, 'r')
data = np.array(f['pulse']) # load the signal
data = data.reshape(1,len(data)) # monodimentional signal
return BVPsignal(data, self.SIG_SampleRate)
|
{"hexsha": "a84bb310c7fcb9a1956bdbbcb02b5cb359b61630", "size": 1534, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyVHR/pyVHR/datasets/cohface.py", "max_stars_repo_name": "chiccheung/PRD_rPPG_method_3DCNN", "max_stars_repo_head_hexsha": "c2da5c523dc960644b444c14b1417a8ec86eba25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyVHR/pyVHR/datasets/cohface.py", "max_issues_repo_name": "chiccheung/PRD_rPPG_method_3DCNN", "max_issues_repo_head_hexsha": "c2da5c523dc960644b444c14b1417a8ec86eba25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyVHR/pyVHR/datasets/cohface.py", "max_forks_repo_name": "chiccheung/PRD_rPPG_method_3DCNN", "max_forks_repo_head_hexsha": "c2da5c523dc960644b444c14b1417a8ec86eba25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9583333333, "max_line_length": 85, "alphanum_fraction": 0.5338983051, "include": true, "reason": "import numpy", "num_tokens": 397}
|
[STATEMENT]
lemma floor_eq_iff: "\<lfloor>x\<rfloor> = a \<longleftrightarrow> of_int a \<le> x \<and> x < of_int a + 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lfloor>x\<rfloor> = a) = (of_int a \<le> x \<and> x < of_int a + (1::'a))
[PROOF STEP]
using floor_correct floor_unique
[PROOF STATE]
proof (prove)
using this:
of_int \<lfloor>?x\<rfloor> \<le> ?x \<and> ?x < of_int (\<lfloor>?x\<rfloor> + 1)
\<lbrakk>of_int ?z \<le> ?x; ?x < of_int ?z + (1::?'a)\<rbrakk> \<Longrightarrow> \<lfloor>?x\<rfloor> = ?z
goal (1 subgoal):
1. (\<lfloor>x\<rfloor> = a) = (of_int a \<le> x \<and> x < of_int a + (1::'a))
[PROOF STEP]
by auto
|
{"llama_tokens": 300, "file": null, "length": 2}
|
import numpy as np
import matplotlib.pyplot as plt
import os
import getopt
import sys
HISTORY_FILE = 'model_history.npy'
def main(argv):
global HISTORY_FILE
if(argv):
inputFile = argv[0]
else:
print("History file or model directory needed")
sys.exit(0)
if(os.path.isdir(inputFile)):
model_name = os.path.basename(inputFile)
HISTORY_FILE = os.path.join(inputFile, HISTORY_FILE)
else:
model_name = 'model'
HISTORY_FILE = inputFile
print("Model: ", model_name)
print("Using History File: ", HISTORY_FILE)
history = np.load(HISTORY_FILE).item()
plot_history(history, model_name)
print("Saved output to ", os.path.abspath(model_name + '.png'))
def plot_history(history, model_name):
# summarize history for accuracy
plt.figure(1)
plt.subplot(211)
plt.plot(history['acc'])
plt.plot(history['val_acc'])
plt.title('{model_name} accuracy'.format(model_name=model_name))
plt.ylabel('accuracy')
plt.legend(['train', 'test'], loc='upper left')
# summarize history for loss
plt.subplot(212)
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('{model_name} loss'.format(model_name=model_name))
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.tight_layout()
plt.savefig('{model_name}.png'.format(model_name=model_name))
if __name__ == "__main__":
main(sys.argv[1:])
|
{"hexsha": "d93c471033fa42b4e9f36ae74c292d75be88a90f", "size": 1491, "ext": "py", "lang": "Python", "max_stars_repo_path": "graph_training.py", "max_stars_repo_name": "MichaelSeaman/char-rnn", "max_stars_repo_head_hexsha": "68e2763b3f904873bc580af6f465300c415fa575", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "graph_training.py", "max_issues_repo_name": "MichaelSeaman/char-rnn", "max_issues_repo_head_hexsha": "68e2763b3f904873bc580af6f465300c415fa575", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graph_training.py", "max_forks_repo_name": "MichaelSeaman/char-rnn", "max_forks_repo_head_hexsha": "68e2763b3f904873bc580af6f465300c415fa575", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.625, "max_line_length": 68, "alphanum_fraction": 0.6606304494, "include": true, "reason": "import numpy", "num_tokens": 351}
|
import numpy as np
import pandas as pd
import json
# rand_post = pd.read_excel(
# 'D:\Epay\Epay\Dashboard\Python code\Proxy Payday Loan Data Corrected.xlsx', sheet_name='Rand Post Data')
def gini_total(loc,sheet_name):
data = pd.read_excel(loc, sheet_name, na_values=0)
data.sort_values(by=['PD'], ascending=False, inplace=True)
N = len(data)
D = data['Default'].sum()
default = np.array(data.Default)
proxy_data_arr = np.cumsum(default)
num_arr = np.arange(N)+1
best_arr = np.where(num_arr >=D, D, num_arr)
worst_arr = num_arr*D/N
return [proxy_data_arr.tolist(), best_arr.tolist(), worst_arr.tolist()]
if __name__ == '__main__':
# params
loc = 'D:\Epay\Epay\Data\Payday Data.xlsx'
sheet_name = 'DatawPD'
# write file
fileLocation = 'D:/Epay/Epay/Dashboard/dashboard_prototype/data/'
json_total = gini_total(loc, sheet_name)
with open(fileLocation + 'payday_gini.json', 'w') as fp:
json.dump(json_total, fp)
|
{"hexsha": "83257a19809e7c6cb89cff23db954fee8a04b1dd", "size": 996, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/payday/gini_data.py", "max_stars_repo_name": "LiLyBabe/dashboard_prototype", "max_stars_repo_head_hexsha": "4857844f24a4cb87fda5e9b0386cfe84faf7028a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/payday/gini_data.py", "max_issues_repo_name": "LiLyBabe/dashboard_prototype", "max_issues_repo_head_hexsha": "4857844f24a4cb87fda5e9b0386cfe84faf7028a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-05T10:16:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-05T10:16:22.000Z", "max_forks_repo_path": "python/payday/gini_data.py", "max_forks_repo_name": "LiLyBabe/dashboard_prototype", "max_forks_repo_head_hexsha": "4857844f24a4cb87fda5e9b0386cfe84faf7028a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1818181818, "max_line_length": 110, "alphanum_fraction": 0.6797188755, "include": true, "reason": "import numpy", "num_tokens": 276}
|
#include "ros/ros.h"
#include <iostream>
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <stdlib.h> /* srand, rand */
#include <time.h>
#include <opencv2/core/core.hpp>
#include <opencv2/contrib/contrib.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <image_transport/image_transport.h>
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/image_encodings.h>
#include <sensor_msgs/CameraInfo.h>
#include <tf/transform_listener.h>
#include <sensor_msgs/PointCloud.h>
#include <boost/math/distributions/normal.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/normal_distribution.hpp>
#include <boost/random/variate_generator.hpp>
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/path.hpp>
namespace fs = boost::filesystem;
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/kdtree/kdtree_flann.h>
#include <pcl/features/normal_3d.h>
#include <pcl/surface/gp3.h>
#include <pcl/io/vtk_io.h>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/PCLPointCloud2.h>
//#include <pcl/PCLPointCloud.h>
#include <pcl/conversions.h>
#include <pcl_ros/transforms.h>
#include <sensor_msgs/point_cloud_conversion.h>
#include <tinyxml.h>
#include "ColorMaps.h"
using std::cout;
using std::endl;
sensor_msgs::PointCloud cloud_in_;
sensor_msgs::PointCloud cloud_;
image_transport::Publisher img_pub_;
ros::Publisher camera_info_pub_;
sensor_msgs::CameraInfo camera_info_;
//cv::VideoWriter record_;
int frame_count = 0;
std::string output_dir = "/home/syllogismrxs/temp/sonar";
#define PI 3.14159265359
// Normal Distribution Setup
typedef boost::mt19937 ENG; // Mersenne Twister
typedef boost::normal_distribution<double> DIST; // Normal Distribution
typedef boost::variate_generator<ENG,DIST> GEN; // Variate generator
ENG eng;
DIST dist(0,0.01); // mean, std
GEN gauss_sample(eng,dist);
void add_salt_and_pepper(cv::Mat& I, int amt)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
//struct color_t c;
const int channels = I.channels();
switch(channels) {
case 1: {
cv::MatIterator_<uchar> it, end;
for( it = I.begin<uchar>(), end = I.end<uchar>(); it != end; ++it) {
*it += rand() % amt;
}
break;
}
case 3: {
cv::MatIterator_<cv::Vec3b> it, end;
for( it = I.begin<cv::Vec3b>(), end = I.end<cv::Vec3b>(); it != end; ++it) {
//(*it)[0] += scale(c.b); // B
//(*it)[1] += scale(c.g); // G
//(*it)[2] += scale(c.r); // R
}
}}
}
double min_angle_;
double max_angle_;
double beam_width_;
std::string sonar_link_name;
void cloudCallback(const sensor_msgs::PointCloudConstPtr& msg)
{
cloud_in_ = *msg;
tf::TransformListener listener;
tf::StampedTransform transform;
try{
listener.transformPointCloud(sonar_link_name, cloud_in_, cloud_);
}
catch (tf::TransformException ex){
ROS_ERROR("%s",ex.what());
return;
}
if (cloud_.points.size () == 0) {
cout << "Point Cloud of size 0" << endl;
return;
}
/////////////////////////////////////////////
// Add Gaussian Distribution to PointCloud (in range direction)
/////////////////////////////////////////////
for (unsigned int i = 0; i < cloud_.points.size(); i++) {
cloud_.points[i].x += gauss_sample();
}
double x_min = 0.1; // R_min
double x_max = 10.0; // R_max
double y_min = 0; //
//double y_max = sin(0.392699082)*x_max*2;
double y_max = sin(max_angle_)*x_max*2;
int img_width = 600;
//int img_width = 1500;
int img_height = 600;
camera_info_.height = img_height;
camera_info_.width = img_width;
cv::Mat img = cv::Mat::zeros(img_height, img_width, CV_8UC1);
//add_salt_and_pepper(img, 10);
#if 0
sensor_msgs::PointCloud2 sm_pcl2;
sensor_msgs::convertPointCloudToPointCloud2(cloud_, sm_pcl2);
pcl::PCLPointCloud2 pcl_pc2;
pcl_conversions::toPCL(sm_pcl2, pcl_pc2);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
pcl::fromPCLPointCloud2(pcl_pc2, *cloud);
// Normal estimation*
pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> n;
pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);
pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ>);
tree->setInputCloud (cloud);
n.setInputCloud (cloud);
n.setSearchMethod (tree);
n.setKSearch (20);
n.compute (*normals);
// Concatenate the XYZ and normal fields*
pcl::PointCloud<pcl::PointNormal>::Ptr cloud_with_normals (new pcl::PointCloud<pcl::PointNormal>);
pcl::concatenateFields (*cloud, *normals, *cloud_with_normals);
//* cloud_with_normals = cloud + normals
// Create search tree*
pcl::search::KdTree<pcl::PointNormal>::Ptr tree2 (new pcl::search::KdTree<pcl::PointNormal>);
tree2->setInputCloud (cloud_with_normals);
// Initialize objects
pcl::GreedyProjectionTriangulation<pcl::PointNormal> gp3;
pcl::PolygonMesh triangles;
// Set the maximum distance between connected points (maximum edge length)
//gp3.setSearchRadius (0.025);
gp3.setSearchRadius (15);
// Set typical values for the parameters
gp3.setMu (2.5);
gp3.setMaximumNearestNeighbors (100);
gp3.setMaximumSurfaceAngle(M_PI/4); // 45 degrees
gp3.setMinimumAngle(M_PI/18); // 10 degrees
gp3.setMaximumAngle(2*M_PI/3); // 120 degrees
gp3.setNormalConsistency(false);
// Get result
gp3.setInputCloud (cloud_with_normals);
gp3.setSearchMethod (tree2);
gp3.reconstruct (triangles);
// Additional vertex information
//std::vector<int> parts = gp3.getPartIDs();
//std::vector<int> states = gp3.getPointStates();
std::vector<pcl::Vertices>::iterator it = triangles.polygons.begin();
for(; it != triangles.polygons.end(); it++) {
pcl::Vertices verts = *it;
std::vector<uint32_t>::iterator it2 = verts.vertices.begin();
int count = 0;
double retro_sum = 0;
cv::Point points[0][3];
for (; it2 != verts.vertices.end(); it2++) {
//int x_pos = img_width/2 + -cloud->points[*it2].y / (y_max/2) * img_height*sin(0.392699082);
int x_pos = img_width/2 + -cloud->points[*it2].y / (y_max/2) * img_height*sin(max_angle_);
int y_pos = cloud->points[*it2].x / x_max * img_height;
points[0][count++] = cv::Point( x_pos, y_pos );
retro_sum += cloud_.channels[0].values[*it2];
cv::circle(img,cv::Point(x_pos, y_pos),1, cloud_.channels[0].values[*it2], 1,8,0);
}
const cv::Point* ppt[1] = { points[0] };
int npt[] = { 3 };
double retro = retro_sum / 3.0;
if (retro > 255) {
retro = 255;
} else if (retro < 0) {
retro = 0;
}
cv::fillPoly( img,
ppt,
npt,
1,
retro,
8,
0,
cv::Point(0,0));
}
#else
// Add the returned values
for (unsigned int i = 0; i < cloud_.points.size(); i++) {
if (cloud_.points[i].x > x_max) {
cout << "Over x_max: " << cloud_.points[i].x << endl;
}
if (cloud_.points[i].y > y_max) {
cout << "Over y_max: " << cloud_.points[i].y << endl;
}
//int x_pos = img_width/2 + -cloud_.points[i].y / (y_max/2) * img_height*sin(0.392699082);
int x_pos = img_width/2 + -cloud_.points[i].y / (y_max/2) * img_height*sin(max_angle_);
int y_pos = cloud_.points[i].x / x_max * img_height;
if (x_pos < 0 || x_pos > img.cols || y_pos < 0 || y_pos > img.rows) {
// error, skip
cout << "bounds" << endl;
continue;
}
double retro = cloud_.channels[0].values[i];
if (retro > 255) {
retro = 255;
} else if (retro < 0) {
retro = 0;
}
cv::circle(img,cv::Point(x_pos, y_pos),1,retro,1,8,0);
}
#endif
cv::medianBlur(img,img,3);
//add_salt_and_pepper(img,20);
cv::Mat img_color;
//cv::applyColorMap(img,img_color,cv::COLORMAP_JET);
/// COLORMAP_AUTUMN
/// COLORMAP_BONE
/// COLORMAP_COOL
/// COLORMAP_HOT
/// COLORMAP_HSV
/// COLORMAP_JET
/// COLORMAP_OCEAN
/// COLORMAP_PINK
/// COLORMAP_RAINBOW
/// COLORMAP_SPRING
/// COLORMAP_SUMMER
/// COLORMAP_WINTER
//cv::applyColorMap(img,img_color,cv::COLORMAP_JET);
Gray2Jet_matlab(img, img_color);
///////////////////////////////
//double start_angle = 1.178097245;
double start_angle = (PI - beam_width_) / 2 ;
double end_angle = 90.0 * PI / 180.0;//1.963495408;
cv::Point start(img_width/2,0);
cv::Point end_1(start.x+img_height*cos(start_angle), start.y+img_height*sin(start_angle));
cv::Point end_2(start.x+img_height*cos(end_angle), start.y+img_height*sin(end_angle));
std::vector<cv::Point> contour;
contour.push_back(start);
contour.push_back(end_1);
double num_pts = 25;
double step = (end_angle - start_angle) / num_pts;
for (double ang = start_angle; ang <= end_angle; ang += step) {
contour.push_back(cv::Point(start.x+img_height*cos(ang), start.y+img_height*sin(ang)));
}
contour.push_back(end_2);
contour.push_back(cv::Point(img_width-1,img_height-1));
contour.push_back(cv::Point(img_width-1,0));
// create a pointer to the data as an array of points (via a conversion to
// a Mat() object)
const cv::Point *pts = (const cv::Point*) cv::Mat(contour).data;
int npts = cv::Mat(contour).rows;
// Fill the outside of the sonar image with black
cv::fillPoly(img_color, &pts, &npts, 1, cv::Scalar(0,0,0), 8, 0, cv::Point(0,0));
///////////////////
//start_angle = 112.5 * PI / 180.0;
start_angle = ((PI - beam_width_) / 2) + beam_width_;
end_angle = 90 * PI / 180.0;//1.963495408;
end_1 = cv::Point(start.x+img_height*cos(start_angle), start.y+img_height*sin(start_angle));
end_2 = cv::Point(start.x+img_height*cos(end_angle), start.y+img_height*sin(end_angle));
contour.clear();
contour.push_back(start);
contour.push_back(end_1);
num_pts = 25;
step = (start_angle - end_angle) / num_pts;
for (double ang = start_angle; ang >= end_angle; ang -= step) {
contour.push_back(cv::Point(start.x+img_height*cos(ang), start.y+img_height*sin(ang)));
}
contour.push_back(end_2);
contour.push_back(cv::Point(0,img_height-1));
contour.push_back(cv::Point(0,0));
// create a pointer to the data as an array of points (via a conversion to
// a Mat() object)
pts = (const cv::Point*) cv::Mat(contour).data;
npts = cv::Mat(contour).rows;
// Fill the outside of the sonar image with black
cv::fillPoly(img_color, &pts, &npts, 1, 0, 8, 0, cv::Point(0,0));
////////////////////
// Rotate the sonar image to pointing "up"
cv::Point center = cv::Point( img_color.cols/2, img_color.rows/2 );
double angle = 180.0;
double rot_scale = 1.0;
cv::Mat rot_mat(2,3,CV_8UC1);
rot_mat = cv::getRotationMatrix2D( center, angle, rot_scale );
cv::warpAffine(img_color, img_color, rot_mat, img_color.size());
//cv::imshow("sonar_image",img);
//cv::waitKey(1);
std_msgs::Header img_header;
img_header.stamp = ros::Time::now();
sensor_msgs::ImagePtr img_msg = cv_bridge::CvImage(img_header,
"bgr8",
img_color).toImageMsg();
camera_info_.header.stamp = img_header.stamp;
camera_info_pub_.publish(camera_info_);
img_pub_.publish(img_msg);
std::ostringstream convert;
convert << frame_count;
std::string out_str = output_dir + "/sonar" + convert.str() + ".png";
cv::imwrite(out_str, img_color);
frame_count++;
//if (!record_.isOpened()) {
// record_.open("/home/syllogismrxs/output.avi", CV_FOURCC('M','J','P','G'), 10, img_color.size(), true);
//}
//
//if (record_.isOpened()) {
// record_ << img_color;
//}
}
int main(int argc, char * argv[])
{
srand (time(NULL));
ros::init(argc, argv, "imaging_sonar_sim");
ros::NodeHandle n_;
ros::param::get("~sonar_link_name", sonar_link_name);
std::string key;
std::string robot_description;
if (n_.searchParam("robot_description", key)) {
n_.getParam(key, robot_description);
} else {
cout << "===========================" << endl;
cout << "Warning: unable to find robot_description." << endl;
}
// Create output directory for sonar images if it doesn't exist
fs::path dir(output_dir);
if (!fs::is_directory(dir)) {
fs::create_directories(output_dir);
}
// Need to extract the min and max angles for sonar point cloud
TiXmlDocument doc;
const char* pTest = doc.Parse(robot_description.c_str(), 0 , TIXML_ENCODING_UTF8);
if(pTest != NULL){
cout << "Error parsing robot_description" << endl;
}
TiXmlElement * robot_element = doc.FirstChildElement("robot");
// Search for gazebo with reference="sonar_link"
TiXmlElement *elem = robot_element->FirstChildElement();
TiXmlElement *sonar_link_elem;
while (elem) {
if (std::string(elem->Value()) == "gazebo") {
if (elem->Attribute("reference") != NULL) {
if (std::string(elem->Attribute("reference")) == sonar_link_name) {
sonar_link_elem = elem;
break;
}
}
}
elem = elem->NextSiblingElement();
}
TiXmlElement *min_angle_elem = sonar_link_elem->FirstChildElement("sensor")->FirstChildElement("ray")->FirstChildElement("scan")->FirstChildElement("horizontal")->FirstChildElement("min_angle");
TiXmlElement *max_angle_elem = sonar_link_elem->FirstChildElement("sensor")->FirstChildElement("ray")->FirstChildElement("scan")->FirstChildElement("horizontal")->FirstChildElement("max_angle");
if ( ! (std::istringstream(min_angle_elem->GetText()) >> min_angle_) ) min_angle_ = -1;
if ( ! (std::istringstream(max_angle_elem->GetText()) >> max_angle_) ) max_angle_ = 1;
cout << "Min Angle: " << min_angle_ << endl;
cout << "Max Angle: " << max_angle_ << endl;
beam_width_ = max_angle_ * 2;
std::string cloud_topic_name;
ros::param::get("~cloud_topic_name", cloud_topic_name);
// Setup sonar cloud subscription
ros::Subscriber cloud_sub = n_.subscribe<sensor_msgs::PointCloud>(cloud_topic_name, 0, cloudCallback);
// Setup sonar_image publication
std::string image_topic_name;
ros::param::get("~image_topic_name", image_topic_name);
image_transport::ImageTransport it_(n_);
img_pub_ = it_.advertise(image_topic_name, 1);
cout << "=======" << endl;
cout << "cloud name: " << cloud_topic_name << endl;
cout << "image name: " << image_topic_name << endl;
cout << "sonar link name: " << sonar_link_name << endl;
camera_info_pub_ = n_.advertise<sensor_msgs::CameraInfo>("camera_info", 1);
ros::Rate loop_rate(10);
while (ros::ok()) {
ros::spinOnce();
loop_rate.sleep();
}
return 0;
}
|
{"hexsha": "8621eb727c7ce75c26f1d910805c2a81bdcade9d", "size": 16704, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "catkin_ws/src/imaging_sonar_sim/src/imaging_sonar_sim.cpp", "max_stars_repo_name": "SyllogismRXS/syllo-gazebo", "max_stars_repo_head_hexsha": "81fe461c97ae42104a3d4591084c5b8616841bf5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2015-10-27T14:31:22.000Z", "max_stars_repo_stars_event_max_datetime": "2015-10-27T14:31:22.000Z", "max_issues_repo_path": "catkin_ws/src/imaging_sonar_sim/src/imaging_sonar_sim.cpp", "max_issues_repo_name": "SyllogismRXS/syllo-gazebo", "max_issues_repo_head_hexsha": "81fe461c97ae42104a3d4591084c5b8616841bf5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "catkin_ws/src/imaging_sonar_sim/src/imaging_sonar_sim.cpp", "max_forks_repo_name": "SyllogismRXS/syllo-gazebo", "max_forks_repo_head_hexsha": "81fe461c97ae42104a3d4591084c5b8616841bf5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2020-02-26T22:00:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-25T04:58:05.000Z", "avg_line_length": 34.7276507277, "max_line_length": 204, "alphanum_fraction": 0.5772868774, "num_tokens": 4329}
|
from jax.config import config
config.update("jax_enable_x64", True)
import jax.numpy as jnp
from jax import jit, grad, vmap
from jax import random
from jax.ops import index, index_update
from jax.flatten_util import ravel_pytree
from jax.scipy.special import logsumexp
from slicereparam.functional import setup_slice_sampler
from slicereparam.slicesampler import slicesampler
def test_grad_diagonal_gaussian_KL():
# set up randomness
key = random.PRNGKey(131313)
# Set up params
D = 5 # number of dimensions
scale = 0.1
key, *subkeys = random.split(key, 3)
_params = [scale * random.normal(subkeys[0], (D, )), scale * random.normal(subkeys[1], (D, ))]
def _log_pdf(x, params):
mu = params[0]
sigma_diag = jnp.exp(params[1])
return jnp.sum(-0.5 * (x - mu) **2 / sigma_diag)
params, unflatten = ravel_pytree(_params)
log_pdf = jit(lambda x, params : _log_pdf(x, unflatten(params)))
vmapped_log_pdf = jit(vmap(log_pdf, (0,None)))
xstar = jnp.zeros(D)
Sigma = jnp.eye(D)
def gaussian_log_pdf(x, mu, Sigma):
out = -0.5 * (x - mu).T @ jnp.linalg.inv(Sigma) @ (x - mu)
out = out - 0.5 * jnp.log(jnp.linalg.det(Sigma))
out = out - D / 2.0 * jnp.log(2.0 * jnp.pi)
return out
vmap_gaussian_log_pdf = vmap(gaussian_log_pdf, (0, None, None))
def loss(xs, params):
loss = -1.0 * jnp.sum(vmap_gaussian_log_pdf(xs, xstar, Sigma))
loss = loss + jnp.sum(vmapped_log_pdf(xs, params))
return loss
total_loss = jit(lambda x, params : loss(x, params))
num_chains = 10000
Sc = 100
model = slicesampler(
params, log_pdf, D, total_loss=total_loss, Sc=Sc, num_chains=num_chains)
dL_dtheta, loss, key = model.estimate_gradient(params, key)
def true_loss(params):
mu, log_sigsqr = params
return 0.5 * jnp.sum(jnp.exp(log_sigsqr) + mu**2 + 1.0 - log_sigsqr)
true_grad = grad(lambda params : true_loss(unflatten(params)))
assert jnp.linalg.norm(dL_dtheta - true_grad(params)) < 1e-2
def test_sampler_cdf():
var1, var2, var3 = 2.0, 1.0, 1.5
w1, w2, w3 = 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0
def _log_pdf(x, params):
mu1, mu2, mu3 = params[:3]
log1 = -0.5 * (x - mu1)**2 / var1 - 0.5 * jnp.sqrt(2.0 * jnp.pi * var1) + jnp.log(w1)
log2 = -0.5 * (x - mu2)**2 / var2 - 0.5 * jnp.sqrt(2.0 * jnp.pi * var2) + jnp.log(w2)
log3 = -0.5 * (x - mu3)**2 / var3 - 0.5 * jnp.sqrt(2.0 * jnp.pi * var3) + jnp.log(w3)
return jnp.sum(logsumexp(jnp.array([log1,log2,log3]),axis=0))
_params = [-4.0, 0.0, 4.0]
params, unflatten = ravel_pytree(_params)
log_pdf = jit(lambda x, params : _log_pdf(x, unflatten(params)))
D = 1
Sc = 5000
num_chains = 100
key = random.PRNGKey(5)
model = slicesampler(params, log_pdf, D, Sc=Sc, num_chains=num_chains)
out = model.forwards_sample(params, key)
xs0 = out[0]
xs = xs0[:,1:,:].reshape(num_chains * Sc, D)
dx = 0.01
x_range = jnp.arange(-12,12,dx)
pdf = jnp.array([jnp.exp(log_pdf(x, params)) for x in x_range])
numerical_cdf = jnp.cumsum(pdf / jnp.sum(pdf))
empirical_cdf = jnp.array([jnp.sum(xs < x) for x in x_range]) / (Sc * num_chains)
assert jnp.linalg.norm(numerical_cdf - empirical_cdf) < 0.1
def test_finite_difference():
# write a test to estimate gradient via slice sampling and finite diff
# make sure close
# set up randomness
key = random.PRNGKey(1234)
# Set up params
D = 5 # number of dimensions
scale = 0.1
key, *subkeys = random.split(key, 3)
_params = [scale * random.normal(subkeys[0], (D, )), scale * random.normal(subkeys[1], (D, ))]
def _log_pdf(x, params):
mu = params[0]
sigma_diag = jnp.exp(params[1])
return jnp.sum(-0.5 * (x - mu) **2 / sigma_diag)
params, unflatten = ravel_pytree(_params)
log_pdf = jit(lambda x, params : _log_pdf(x, unflatten(params)))
vmapped_log_pdf = jit(vmap(log_pdf, (0,None)))
def _total_loss(xs, params):
loss = jnp.sum(xs**2)
return loss
total_loss = jit(lambda x, params : _total_loss(x, params))
loss_grad_xs = jit(grad(total_loss))
# run test over 1, >1 number of MCMC chains and 1, >1 number of samples
num_chains_vals = [1, 2]
Sc_vals = [1, 5]
for num_chains, Sc in zip(num_chains_vals, Sc_vals):
model = slicesampler(params, log_pdf, D, Sc=Sc, num_chains=num_chains)
forwards_out = model.forwards_sample(params, key)
xs = forwards_out[0][:, 1:, :]
dL_dxs = loss_grad_xs(xs, params)
dL_dtheta = model.compute_gradient(params, dL_dxs, forwards_out)
# compute gradient via finite differences
dx = 1e-3
M = params.shape[0]
dthetas = [jnp.zeros_like(params) for nc in range(num_chains)]
for m, v in enumerate(jnp.eye(M)):
params1 = params - dx * v
params2 = params + dx * v
forwards_out1 = model.forwards_sample(params1, key)
model.params = params2
forwards_out2 = model.forwards_sample(params2, key)
# xs1 = forwards_out1[0][1:].reshape((num_chains, Sc, D), order='F')
# xs2 = forwards_out2[0][1:].reshape((num_chains, Sc, D), order='F')
xs1 = forwards_out1[0][:, 1:, :]
xs2 = forwards_out2[0][:, 1:, :]
for nc in range(num_chains):
loss1 = total_loss(xs1[nc], params1)
loss2 = total_loss(xs2[nc], params2)
dthetas[nc] = dthetas[nc] + (loss2 - loss1) / (2.0 * dx) * v
dthetas = jnp.mean(jnp.asarray(dthetas), axis=0)
assert jnp.linalg.norm(dL_dtheta - dthetas) < 1e-2
key = forwards_out[-1]
# def test_root_finder():
# return
def test_custom_vjp_finite_difference():
# write a test to estimate gradient via slice sampling and finite diff
# make sure close
# set up randomness
key = random.PRNGKey(123)
# Set up params
D = 5 # number of dimensions
scale = 0.1
key, *subkeys = random.split(key, 3)
_params = [scale * random.normal(subkeys[0], (D, )), scale * random.normal(subkeys[1], (D, ))]
def _log_pdf(x, params):
mu = params[0]
sigma_diag = jnp.exp(params[1])
return jnp.sum(-0.5 * (x - mu) **2 / sigma_diag)
params, unflatten = ravel_pytree(_params)
log_pdf = jit(lambda x, params : _log_pdf(x, unflatten(params)))
# run test over 1, >1 number of MCMC chains and 1, >1 number of samples
num_chains_vals = [1, 1, 2, 2]
S_vals = [1, 5, 1, 5]
for S, num_chains in zip(S_vals, num_chains_vals):
# slice_sample = setup_slice_sampler(log_pdf, params, D, S, num_chains=num_chains)
slice_sample = setup_slice_sampler(log_pdf, D, S, num_chains=num_chains)
key, *subkeys = random.split(key, 3)
x0 = random.normal(subkeys[0], (num_chains, D))
# out = slice_sample(subkeys[1], params, x0)
out = slice_sample(params, x0, subkeys[1])
def loss(xs):
return jnp.mean(xs**2)
def compute_loss(params, x0, key):
xs = slice_sample(params, x0, key)
return loss(xs)
grad_loss = jit(grad(compute_loss))
key, *subkeys = random.split(key, 3)
x0 = random.normal(subkeys[0], (num_chains, D))
grad_params_ad = grad_loss(params, x0, subkeys[1])
# compute gradient via finite differences
dx = 1e-3
M = params.shape[0]
dthetas = [jnp.zeros_like(params) for nc in range(num_chains)]
for m, v in enumerate(jnp.eye(M)):
params1 = params - dx * v
params2 = params + dx * v
xs1 = slice_sample(params1, x0, subkeys[1])
xs2 = slice_sample(params2, x0, subkeys[1])
for nc in range(num_chains):
loss1 = loss(xs1[nc])
loss2 = loss(xs2[nc])
dthetas[nc] = dthetas[nc] + (loss2 - loss1) / (2.0 * dx) * v
grad_params_fd = jnp.mean(jnp.asarray(dthetas), axis=0)
grad_x0 = jit(grad(compute_loss, argnums=1))
grad_x0_ad = grad_x0(params, x0, subkeys[1])
dx = 1e-3
dxs = [jnp.zeros(D) for nc in range(num_chains)]
for nc in range(num_chains):
for m, v in enumerate(jnp.eye(D)):
x01 = x0[nc] - dx * v
x02 = x0[nc] + dx * v
x01 = index_update(x0, index[nc, :], x01)
x02 = index_update(x0, index[nc, :], x02)
xs1 = slice_sample(params, x01, subkeys[1])
xs2 = slice_sample(params, x02, subkeys[1])
loss1 = loss(xs1)
loss2 = loss(xs2)
dxs[nc] = dxs[nc] + (loss2 - loss1) / (2.0 * dx) * v
grad_x0_fd = jnp.asarray(dxs)
assert jnp.linalg.norm(grad_params_ad - grad_params_fd) < 1e-3
assert jnp.linalg.norm(grad_x0_ad - grad_x0_fd) < 1e-3
if __name__ == "__main__":
test_grad_diagonal_gaussian_KL()
test_sampler_cdf()
test_finite_difference()
test_custom_vjp_finite_difference()
|
{"hexsha": "7c4e8eaaa7a4dc7e9bb61bfa71b15f3d7af336e7", "size": 9167, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/tests.py", "max_stars_repo_name": "PrincetonLIPS/slicereparam", "max_stars_repo_head_hexsha": "d393a4e0f052b8c420dcb890db10e62731d29f57", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/tests.py", "max_issues_repo_name": "PrincetonLIPS/slicereparam", "max_issues_repo_head_hexsha": "d393a4e0f052b8c420dcb890db10e62731d29f57", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/tests.py", "max_forks_repo_name": "PrincetonLIPS/slicereparam", "max_forks_repo_head_hexsha": "d393a4e0f052b8c420dcb890db10e62731d29f57", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6692607004, "max_line_length": 98, "alphanum_fraction": 0.5980146177, "include": true, "reason": "import jax,from jax", "num_tokens": 2877}
|
function read_map(map::String)
map_dir = joinpath(dirname(@__DIR__), "maps")
datac = RGB24.(PNGFiles.load(joinpath(map_dir,map)*".png"))
datah = Float32.(reinterpret(UInt8,PNGFiles.load(joinpath(map_dir,MAP_LIST[map])*".png")))
if size(datac,1) == 2*size(datah,1) # some height maps need to be upsampled
datah = upsample_map_2x(datah)
end
return datac, datah
end
function upsample_map_2x(data::Matrix)
# simple linear interpolation
w,h = size(data)
data_2x = similar(data,2w,2h)
@inbounds for j = 1:h, i = 1:w
ii,jj = 2*(i-1)+1, 2*(j-1)+1
data_2x[ii, jj] = data[i,j]
data_2x[ii+1,jj] = (data[i,j] + data[mod1(i+1,w),j])/2
data_2x[ii, jj+1] = (data[i,j] + data[i,mod1(j+1,h)])/2
data_2x[ii+1,jj+1] = (data[i,j] + data[mod1(i+1,w),mod1(j+1,h)])/2
end
return data_2x
end
const MAP_LIST = Dict{String,String}(
"C1W" => "D1",
"C2W" => "D2",
"C3" => "D3",
"C4" => "D4",
"C5W" => "D5",
"C6W" => "D6",
"C7W" => "D7",
"C8" => "D6",
"C9W" => "D9",
"C10W" => "D10",
"C11W" => "D11",
"C12W" => "D11",
"C13" => "D13",
"C14" => "D14",
"C14W" => "D14",
"C15" => "D15",
"C16W" => "D16",
"C17W" => "D17",
"C18W" => "D18",
"C19W" => "D19",
"C20W" => "D20",
"C21" => "D21",
"C22W" => "D22",
"C23W" => "D21",
"C24W" => "D24",
"C25W" => "D25",
"C26W" => "D18",
"C27W" => "D15",
"C28W" => "D25",
"C29W" => "D16",
)
|
{"hexsha": "01f6748aaa488963e07925c62c216bcee1a0e1ea", "size": 1521, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/map_utils.jl", "max_stars_repo_name": "jebej/VoxelSpace", "max_stars_repo_head_hexsha": "7fa2e92cacf00a010970e5198d5efb197d053eab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/map_utils.jl", "max_issues_repo_name": "jebej/VoxelSpace", "max_issues_repo_head_hexsha": "7fa2e92cacf00a010970e5198d5efb197d053eab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/map_utils.jl", "max_forks_repo_name": "jebej/VoxelSpace", "max_forks_repo_head_hexsha": "7fa2e92cacf00a010970e5198d5efb197d053eab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-17T20:44:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-17T20:44:25.000Z", "avg_line_length": 26.6842105263, "max_line_length": 94, "alphanum_fraction": 0.486522025, "num_tokens": 666}
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn import svm
from sklearn import metrics
def classification(latent_code, random_seed=42, ten_fold=False):
tumour_type = pd.read_csv('data/PANCAN/GDC-PANCAN_both_samples_tumour_type.tsv', sep='\t', index_col=0)
latent_code_label = pd.merge(latent_code, tumour_type, left_index=True, right_index=True)
# separate latent variables and targets
label = latent_code_label[['tumour_type']]
data = latent_code_label.iloc[:, :-1]
X = data.values
y = label.values.ravel()
if ten_fold:
# 10-fold cross-validation
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=random_seed)
accuracy_array = np.zeros(10)
precision_array = np.zeros(10)
recall_array = np.zeros(10)
f1_array = np.zeros(10)
i = 0
for train_index, test_index in skf.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# Use SVM as classifier
clf = svm.SVC(gamma='scale', random_state=random_seed)
clf.fit(X_train, y_train)
# Test the classifier using the testing set
y_pred = clf.predict(X_test)
accuracy = metrics.accuracy_score(y_test, y_pred)
precision = metrics.precision_score(y_test, y_pred, average='weighted')
recall = metrics.recall_score(y_test, y_pred, average='weighted')
f1 = metrics.f1_score(y_test, y_pred, average='weighted')
# Store the metrics
accuracy_array[i] = accuracy
precision_array[i] = precision
recall_array[i] = recall
f1_array[i] = f1
i = i + 1
accuracy_average = np.mean(accuracy_array)
precision_average = np.mean(precision_array)
recall_average = np.mean(recall_array)
f1_average = np.mean(f1_array)
accuracy_std = accuracy_array.std()
precision_std = precision_array.std()
recall_std = recall_array.std()
f1_std = f1_array.std()
print('{:.2f}±{:.2f}%'.format(accuracy_average * 100, accuracy_std * 100))
print('{:.3f}±{:.3f}'.format(precision_average, precision_std))
print('{:.3f}±{:.3f}'.format(recall_average, recall_std))
print('{:.3f}±{:.3f}'.format(f1_average, f1_std))
else:
testset_ratio = 0.2
valset_ratio = 0.5
# Just one separation
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testset_ratio, random_state=random_seed,
stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=valset_ratio,
random_state=random_seed, stratify=y_test)
# Use SVM as classifier
clf = svm.SVC(gamma='scale', random_state=random_seed)
clf.fit(X_train, y_train)
# Test the classifier using the testing set
y_pred = clf.predict(X_test)
accuracy = metrics.accuracy_score(y_test, y_pred)
precision = metrics.precision_score(y_test, y_pred, average='weighted')
recall = metrics.recall_score(y_test, y_pred, average='weighted')
f1 = metrics.f1_score(y_test, y_pred, average='weighted')
print('{:.2f}'.format(accuracy * 100))
print('{:.2f}'.format(precision * 100))
print('{:.2f}'.format(recall * 100))
print('{:.2f}'.format(f1 * 100))
|
{"hexsha": "4b485d13de04e9ab78e5a24bfbc9c508de3e7e56", "size": 3618, "ext": "py", "lang": "Python", "max_stars_repo_path": "classification.py", "max_stars_repo_name": "biprodip/multi_omic_vae", "max_stars_repo_head_hexsha": "e174bfd97f9a47b2b916390fcf46830fccae063b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2019-08-11T12:29:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T15:47:50.000Z", "max_issues_repo_path": "classification.py", "max_issues_repo_name": "biprodip/multi_omic_vae", "max_issues_repo_head_hexsha": "e174bfd97f9a47b2b916390fcf46830fccae063b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-23T08:38:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-24T02:40:35.000Z", "max_forks_repo_path": "classification.py", "max_forks_repo_name": "biprodip/multi_omic_vae", "max_forks_repo_head_hexsha": "e174bfd97f9a47b2b916390fcf46830fccae063b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2019-08-15T09:03:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T14:23:04.000Z", "avg_line_length": 41.1136363636, "max_line_length": 116, "alphanum_fraction": 0.6241017137, "include": true, "reason": "import numpy", "num_tokens": 876}
|
module neutraldata2Daxisymmobj
use, intrinsic :: iso_fortran_env, only: stderr=>error_unit
use phys_consts, only: wp,debug,pi,Re
use meshobj, only: curvmesh
use config, only: gemini_cfg
use inputdataobj, only: inputdata
use neutraldataobj, only: neutraldata
use neutraldata2Dobj, only: neutraldata2D
use reader, only: get_simsize3
implicit none (type, external)
public :: neutraldata2Daxisymm
!> type extension for neutral 2D axisymmetric input data
type, extends(neutraldata2D) :: neutraldata2Daxisymm
integer, pointer :: lrhon
real(wp), dimension(:), pointer :: rhon
real(wp), dimension(:), pointer :: rhoi
contains
procedure :: init=>init_neu2Daxisymm
procedure :: load_sizeandgrid_neu2D=>load_sizeandgrid_neu2Daxisymm
procedure :: set_coordsi=>set_coordsi_neu2Daxisymm
final :: destructor
end type neutraldata2Daxisymm
contains
subroutine init_neu2Daxisymm(self,cfg,sourcedir,x,dtmodel,dtdata,ymd,UTsec)
class(neutraldata2Daxisymm), intent(inout) :: self
type(gemini_cfg), intent(in) :: cfg
character(*), intent(in) :: sourcedir
class(curvmesh), intent(in) :: x
real(wp), intent(in) :: dtmodel,dtdata
integer, dimension(3), intent(in) :: ymd ! target date of initiation
real(wp), intent(in) :: UTsec ! target time of initiation
character(:), allocatable :: strname
! need to allow interpolation from 2D to 3D
self%flagallow2D3D=.true.
! basic init for any 2D neutral input
call self%init_neu2D_simple(cfg,sourcedir,x,dtmodel,dtdata,ymd,UTsec)
! append type of interp. to dataname
strname=self%dataname//' axisymmetric' ! append type of 2D interpolation to name
call self%set_name(strname) ! overwrite generic neutral 2D data name
print*, '...update to dataset name: ',self%dataname
! bind axisymmetric specific pointers for convenience, in case they are needed elsewhere
self%lrhon=>self%lhorzn
self%rhon=>self%horzn
self%rhoi=>self%horzi
end subroutine init_neu2Daxisymm
!> set coordinates for target interpolation points; for neutral inputs we are forced to do some of the property array allocations here
subroutine set_coordsi_neu2Daxisymm(self,cfg,x)
class(neutraldata2Daxisymm), intent(inout) :: self
type(gemini_cfg), intent(in) :: cfg
class(curvmesh), intent(in) :: x
real(wp) :: theta1,phi1,theta2,phi2,gammarads,theta3,phi3,gamma1,gamma2,phip
real(wp) :: xp,yp
real(wp), dimension(3) :: ezp,erhop,tmpvec,exprm
real(wp) :: tmpsca
integer :: ix1,ix2,ix3,iyn,izn,ixn,iid,ierr
! Space for coordinate sites and projections in neutraldata2D object
allocate(self%coord1i(x%lx1*x%lx2*x%lx3),self%coord2i(x%lx1*x%lx2*x%lx3))
allocate(self%coord3i(0)) ! destructor expects this allocated
self%zi=>self%coord1i; self%horzi=>self%coord2i; ! coordinates of interpolation sites
allocate(self%horzimat(x%lx1,x%lx2,x%lx3),self%zimat(x%lx1,x%lx2,x%lx3))
allocate(self%proj_ezp_e1(x%lx1,x%lx2,x%lx3),self%proj_ezp_e2(x%lx1,x%lx2,x%lx3),self%proj_ezp_e3(x%lx1,x%lx2,x%lx3))
allocate(self%proj_ehorzp_e1(x%lx1,x%lx2,x%lx3),self%proj_ehorzp_e2(x%lx1,x%lx2,x%lx3),self%proj_ehorzp_e3(x%lx1,x%lx2,x%lx3))
!Neutral source locations specified in input file, here referenced by spherical magnetic coordinates.
phi1=cfg%sourcemlon*pi/180
theta1=pi/2-cfg%sourcemlat*pi/180
!Convert plasma simulation grid locations to z,rho values to be used in interoplation. altitude ~ zi; lat/lon --> rhoi. Also compute unit vectors and projections
print *, 'Computing alt,radial distance values for plasma grid and completing rotations'
self%zimat=x%alt !vertical coordinate
do ix3=1,x%lx3
do ix2=1,x%lx2
do ix1=1,x%lx1
!INTERPOLATION BASED ON GEOMAGNETIC COORDINATES
theta2=x%theta(ix1,ix2,ix3) !field point zenith angle
if (x%lx2/=1 .and. x%lx3/=1) then
phi2=x%phi(ix1,ix2,ix3) !field point azimuth, full 3D calculation
else
phi2=phi1 !assume the longitude is the samem as the source in 2D, i.e. assume the source epicenter is in the meridian of the grid
end if
!COMPUTE DISTANCES
gammarads=cos(theta1)*cos(theta2)+sin(theta1)*sin(theta2)*cos(phi1-phi2) !this is actually cos(gamma)
if (gammarads > 1) then !handles weird precision issues in 2D
gammarads = 1
else if (gammarads < -1) then
gammarads= -1
end if
gammarads=acos(gammarads) !angle between source location annd field point (in radians)
self%horzimat(ix1,ix2,ix3)=Re*gammarads !rho here interpreted as the arc-length defined by angle between epicenter and ``field point''
!we need a phi locationi (not spherical phi, but azimuth angle from epicenter), as well, but not for interpolation - just for doing vector rotations
theta3=theta2
phi3=phi1
gamma1=cos(theta2)*cos(theta3)+sin(theta2)*sin(theta3)*cos(phi2-phi3)
if (gamma1 > 1) then !handles weird precision issues in 2D
gamma1 = 1
else if (gamma1 < -1) then
gamma1 = -1
end if
gamma1=acos(gamma1)
gamma2=cos(theta1)*cos(theta3)+sin(theta1)*sin(theta3)*cos(phi1-phi3)
if (gamma2 > 1) then !handles weird precision issues in 2D
gamma2 = 1
else if (gamma2< -1) then
gamma2= -1
end if
gamma2=acos(gamma2)
xp=Re*gamma1
yp=Re*gamma2 !this will likely always be positive, since we are using center of earth as our origin, so this should be interpreted as distance as opposed to displacement
!COMPUTE COORDINATES FROM DISTANCES
if (theta3>theta1) then !place distances in correct quadrant, here field point (theta3=theta2) is is SOUTHward of source point (theta1), whreas yp is distance northward so throw in a negative sign
yp = -yp !do we want an abs here to be safe
end if
if (phi2<phi3) then !assume we aren't doing a global grid otherwise need to check for wrapping, here field point (phi2) less than source point (phi3=phi1)
xp = -xp
end if
phip=atan2(yp,xp)
!PROJECTIONS FROM NEUTURAL GRID VECTORS TO PLASMA GRID VECTORS
!projection factors for mapping from axisymmetric to dipole (go ahead and compute projections so we don't have to do it repeatedly as sim runs
ezp=x%er(ix1,ix2,ix3,:)
tmpvec=ezp*x%e2(ix1,ix2,ix3,:)
tmpsca=sum(tmpvec)
self%proj_ezp_e2(ix1,ix2,ix3)=tmpsca
tmpvec=ezp*x%e1(ix1,ix2,ix3,:)
tmpsca=sum(tmpvec)
self%proj_ezp_e1(ix1,ix2,ix3)=tmpsca
tmpvec=ezp*x%e3(ix1,ix2,ix3,:)
tmpsca=sum(tmpvec) !should be zero, but leave it general for now
self%proj_ezp_e3(ix1,ix2,ix3)=tmpsca
erhop=cos(phip)*x%e3(ix1,ix2,ix3,:) - sin(phip)*x%etheta(ix1,ix2,ix3,:) !unit vector for azimuth (referenced from epicenter - not geocenter!!!) in cartesian geocentric-geomagnetic coords.
tmpvec=erhop*x%e1(ix1,ix2,ix3,:)
tmpsca=sum(tmpvec)
self%proj_ehorzp_e1(ix1,ix2,ix3)=tmpsca
tmpvec=erhop*x%e2(ix1,ix2,ix3,:)
tmpsca=sum(tmpvec)
self%proj_ehorzp_e2(ix1,ix2,ix3)=tmpsca
tmpvec=erhop*x%e3(ix1,ix2,ix3,:)
tmpsca=sum(tmpvec)
self%proj_ehorzp_e3(ix1,ix2,ix3)=tmpsca
!end if
end do
end do
end do
print*, ' Done computing interpolation sites and rotations...'
!Assign values for flat lists of grid points
self%zi=pack(self%zimat,.true.) !create a flat list of grid points to be used by interpolation ffunctions
self%horzi=pack(self%horzimat,.true.)
print*, ' Done packing arrays...'
!call clear_unitvecs(x)
!PRINT OUT SOME BASIC INFO ABOUT THE GRID THAT WE'VE LOADED
print *, 'Min/max rhoi,zi values',minval(self%horzi),maxval(self%horzi),minval(self%zi),maxval(self%zi)
print *, 'Source lat/long: ',cfg%sourcemlat,cfg%sourcemlon
print *, 'Plasma grid lat range: ',minval(x%glat(:,:,:)),maxval(x%glat(:,:,:))
print *, 'Plasma grid lon range: ',minval(x%glon(:,:,:)),maxval(x%glon(:,:,:))
self%flagcoordsi=.true.
end subroutine set_coordsi_neu2Daxisymm
!> load source data size and grid information -- all workers will separates read these data.
! Note that this routine will allocate sizes for source coordinates grids in constrast
! with other inputdata type extensions which have separate load_size, allocate, and
! load_grid procedures.
subroutine load_sizeandgrid_neu2Daxisymm(self,cfg)
class(neutraldata2Daxisymm), intent(inout) :: self
type(gemini_cfg), intent(in) :: cfg
real(wp), dimension(:), allocatable :: xn,yn ! for root to break off pieces of the entire grid array
integer :: ix1,ix2,ix3,ihorzn,izn,iid,ierr
integer :: lxntmp,lyntmp ! local copies for root, eventually these need to be stored in object
real(wp) :: maxzn
real(wp), dimension(2) :: xnrange,ynrange ! these eventually get stored in extents
integer, dimension(6) :: indices ! these eventually get stored in indx
integer :: ixn,iyn
integer :: lxn,lyn
real(wp) :: meanxn,meanyn
real(wp) :: dhorzn
!horizontal grid spacing
dhorzn=cfg%drhon
self%lxn=1 ! treat as a 3D dataset with singleton dimension along x
!Establish the size of the grid based on input file and distribute to workers
print '(A,/,A)', 'Inputting neutral size from: ',self%sourcedir
! bit of a tricky issue here; for neutral input, according to makedneuframes.m, the first integer in the size file is
! the horizontal grid point count for the input - which get_simsize3 interprets as lx1...
call get_simsize3(cfg%sourcedir, lx1=self%lhorzn, lx2all=self%lzn)
print *, 'Neutral data has lhorzn,lz size: ',self%lhorzn,self%lzn,' with spacing dhorzn,dz',dhorzn,cfg%dzn
if (self%lhorzn < 1 .or. self%lzn < 1) then
write(stderr,*) 'ERROR: reading ' // self%sourcedir
error stop 'neutral:gridproj_dneu2D: grid size must be strictly positive'
endif
self%lrhon=>self%lhorzn
!Everyone must allocate space for the grid of input data
allocate(self%coord1(self%lzn)) !these are module-scope variables
allocate(self%coord2(self%lhorzn)) ! FIXME: default to axisymmetric?
allocate(self%coord3(0)) ! must be allocated
self%zn=>self%coord1; self%horzn=>self%coord2;
self%rhon=>self%coord2
self%horzn=[ ((real(ihorzn, wp)-1)*dhorzn, ihorzn=1,self%lhorzn) ]
self%zn=[ ((real(izn, wp)-1)*cfg%dzn, izn=1,self%lzn) ]
print *, 'Creating neutral grid with rho,z extent: ',minval(self%horzn),maxval(self%horzn),minval(self%zn),maxval(self%zn)
self%flagdatasize=.true.
end subroutine load_sizeandgrid_neu2Daxisymm
subroutine destructor(self)
type(neutraldata2Daxisymm), intent(inout) :: self
! de facto neutral2D destructor takes care of most everything
call self%dissociate_neutral2D_pointers()
! now extension-specific quantities
nullify(self%lrhon,self%rhon,self%rhoi)
end subroutine destructor
end module neutraldata2Daxisymmobj
|
{"hexsha": "d0b8bbd01227a600a92b4ceeaf9d43ab29560d8f", "size": 11553, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/inputdata/neutraldata2Daxisymmobj.f90", "max_stars_repo_name": "gemini3d/GEMINI", "max_stars_repo_head_hexsha": "4655db755101a127bf1bfeddefd6c021f39b1bdb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2019-06-17T20:51:31.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-12T17:46:00.000Z", "max_issues_repo_path": "src/inputdata/neutraldata2Daxisymmobj.f90", "max_issues_repo_name": "gemini3d/gemini", "max_issues_repo_head_hexsha": "4655db755101a127bf1bfeddefd6c021f39b1bdb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-04-08T22:24:40.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-15T14:06:41.000Z", "max_forks_repo_path": "src/inputdata/neutraldata2Daxisymmobj.f90", "max_forks_repo_name": "mattzett/GEMINI", "max_forks_repo_head_hexsha": "4655db755101a127bf1bfeddefd6c021f39b1bdb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-10-10T16:01:08.000Z", "max_forks_repo_forks_event_max_datetime": "2018-12-17T16:08:50.000Z", "avg_line_length": 46.5846774194, "max_line_length": 212, "alphanum_fraction": 0.6692633948, "num_tokens": 3480}
|
module ReturnTypes
using BinaryTraits: has_proper_return_type
using Test
import Base: +
struct MyInt <: Integer
value::Int
end
+(x::MyInt, y::Integer) = x.value + y
function test()
# concrete return type, zero ambiguity
f1(x::Int) = x + 1
@test has_proper_return_type(f1, Tuple{Int}, Int)
@test has_proper_return_type(f1, Tuple{Int}, Float64) == false
# abstract type argument, multiple possible return types
f2(x::Integer) = x + 1
@test has_proper_return_type(f2, Tuple{Int}, Int)
@test has_proper_return_type(f2, Tuple{Int8}, Int) # due to promotion rule
@test has_proper_return_type(f2, Tuple{UInt8}, Int) # due to promotion rule
@test has_proper_return_type(f2, Tuple{UInt64}, UInt64)
@test has_proper_return_type(f2, Tuple{Integer}, Int) == false
# One more level up, int's promoting to Float64. Complex remains complex.
f3(x::Number) = x + 1.0
@test has_proper_return_type(f3, Tuple{Int}, Float64) # promoted to Float64
@test has_proper_return_type(f3, Tuple{Complex}, Complex) # remains complex as input
@test has_proper_return_type(f3, Tuple{Int}, Int) == false # negative: no longer int
# Using custom type
# We know this has to be an Int
@test has_proper_return_type(+, Tuple{MyInt,Int}, Int)
# In this case, the returned type may not be Int because we don't have the concrete
# type for the 2nd argument. The result could be anything.
@test has_proper_return_type(+, Tuple{MyInt,Integer}, Int) == false
# Same as before, we cannot say the result would be an Integer either.
@test has_proper_return_type(+, Tuple{MyInt,Integer}, Integer) == false
end
end # module
using Test
@testset "Return Types" begin
import .ReturnTypes
ReturnTypes.test()
end
|
{"hexsha": "6b458719de936052cd5eaae2bc72691e0d642eb5", "size": 1795, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_return_types.jl", "max_stars_repo_name": "tk3369/BinaryTraits.jl", "max_stars_repo_head_hexsha": "cdac46ec724112b45ef66200fc4e9fd471d31700", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2020-03-09T17:22:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-12T05:30:44.000Z", "max_issues_repo_path": "test/test_return_types.jl", "max_issues_repo_name": "tk3369/BinaryTraits.jl", "max_issues_repo_head_hexsha": "cdac46ec724112b45ef66200fc4e9fd471d31700", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 50, "max_issues_repo_issues_event_min_datetime": "2020-03-14T23:59:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-26T19:48:18.000Z", "max_forks_repo_path": "test/test_return_types.jl", "max_forks_repo_name": "tk3369/BinaryTraits.jl", "max_forks_repo_head_hexsha": "cdac46ec724112b45ef66200fc4e9fd471d31700", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-04-13T02:04:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-18T02:46:28.000Z", "avg_line_length": 34.5192307692, "max_line_length": 90, "alphanum_fraction": 0.7008356546, "num_tokens": 516}
|
#include <Rcpp.h>
#include <RcppEigen.h>
#include <Eigen/Dense>
#include <queue>
// #include<Eigen/SparseCore>
using namespace Rcpp;
using namespace Eigen;
using namespace std;
// [[Rcpp::depends(RcppEigen)]]
//
using Eigen::Map; // 'maps' rather than copies
using Eigen::Matrix; // matrix generic
using Eigen::MatrixXd; // variable size matrix, double precision
using Eigen::VectorXd; // variable size vector, double precision
using Eigen::Transpositions;
using Eigen::HouseholderQR; // Fast scalable QR solver
using Eigen::ColPivHouseholderQR; // Fast scalable QR solver
using Eigen::FullPivHouseholderQR; // slow full (colsand rows pivoting)
using Eigen::JacobiSVD;
using Eigen::GeneralizedSelfAdjointEigenSolver; // one of the eigenvalue solvers
using Eigen::SelfAdjointEigenSolver; // one of the eigenvalue solvers
using Eigen::LLT;
using Eigen::LDLT;
using Rcpp::List;
using Rcpp::wrap;
// ########## OK vrsione Sept 04 works
// copied to fspca_sept.cpp
// =========================================================================
// creates a sub-mat of S with indices in e
Eigen::MatrixXd makeSubS(Eigen::MatrixXd S, Eigen::VectorXi e){
int n = S.cols();
int r = S.rows();
int d = e.size();
if (d >= n) {
Rf_error("Too many indices to eliminate.\n");
}
if (e.maxCoeff() > n){
Rf_error("largest index greater than the number of columns.\n");
}
Eigen::MatrixXd M(r, d );
for (int i = 0; i < d; ++i){
M.col(i) = S.col(e(i));
}
for (int i = 0; i < d; ++i){
M.row(i) = M.row(e(i));
}
return M.topLeftCorner(d, d);
}
// retruns the rows in e and keeps first c columns
Eigen::MatrixXd selectRowsC(Eigen::MatrixXd A, Eigen::VectorXi e, int c){
// ATTENZIONE INDICES BASE 0
// ATTENZIONE e must be sorted e(0) < e(1)
int n = A.cols();
int r = A.rows();
int d = e.size();
if (d >= n) {
Rf_error("Too many indices to eliminate.\n");
}
if (e.maxCoeff() > n){
Rf_error("largest index greater than the number of columns.\n");
}
Eigen::MatrixXd M(A.topLeftCorner(r,c));
for (int i = 0; i < d; ++i){
M.row(i) = M.row(e(i));
}
return M.topLeftCorner(d, c);
}
// Deflates S and D (pass already deflated and vector current loads)
// returns vexp by ref
void deflSandDC(Eigen::VectorXd a, Eigen::MatrixXd& K,
Eigen::MatrixXd& D, Eigen::VectorXi ind, double& vexp){
// # pass only a nonzero loads
// S = deflated matrix
// # D = SS
// # K <-- (S - Saa'S/(a'Sa) // deflated S matrix
// # KK deflated product corr matrix D = KK
// # KK = D - Daa'S/(a'Sa) - Saa'D/(a'Sa) + Saa'Daa'S/(a'Sa)^2
// ## ===
const int n = ind.size();
const int p = K.cols();
// t = Sa
Eigen::VectorXd t = Eigen::VectorXd::Zero(p);
for (int i = 0; i < p; i++)
for(int k = 0; k < n; k++)
t(i) += K(i, ind(k)) * a(k ); // only elements in ind
// tt = a'Sa = t'a
double tt = 0.0;
for(int k = 0; k < n; k++)
tt += a(k) * t(ind(k));
if (tt > 0)
tt = 1/tt;
else
Rf_error("defSandD: tt is not > 0");
// O = Sa/(tt)
const Eigen::VectorXd O = (t.array()*tt).matrix();
const double cvk = K.trace();
// K = S - Saa'S/(a'Sa) deflated S
Eigen::MatrixXd L = t * O.transpose();
K = K - t * O.transpose(); //deflated S
vexp = cvk - K.trace() ;
// deflate D
// N = aa'S/(tt) = a*t'/(tt) = a*O' (n x p)
Eigen::MatrixXd N = Eigen::MatrixXd::Zero(n, p);
for (int i = 0; i < n; i++)
for (int j = 0; j < p; j++)
N(i, j) += a(i) * O(j);
//M = Daa'S/(a'Sa) = D.transpose() * N; // (p, p)
Eigen::MatrixXd M = Eigen::MatrixXd::Zero(p,p);
for (int i = 0; i < p; i++)
for (int j = 0; j < p; j++)
for(int k = 0; k < n; k++)
M(i, j) += D(i, ind(k)) * N(k, j); // (p, p)
// H = N.transpose() * M (p x p)
Eigen::MatrixXd H = Eigen::MatrixXd::Zero(p,p); //
for (int i = 0; i < p; i++)
for (int j = 0; j < p; j++)
for(int k = 0; k < n; k++)
H(i, j) += N(k, i) * M(ind(k), j);
D = (D.array() - M.array() - M.transpose().array() + H.array()).matrix();
return;
}
//
// finds max part corr exclude small ss, pdates indnot returns ind
int findmax(Eigen::VectorXi& indnot, Eigen::VectorXd vt){
double p = indnot.size();
double m = 0.0;
int ind = 0;
for (int i = 0; i < p; i++){
if (indnot(i) == -2){
if(vt(i) > m){
m = vt(i);
ind = i;
}
}
}
indnot(ind) = ind;
return ind;
}
// fixed
void fwd_selectC(Eigen::MatrixXd S, Eigen::VectorXi& ind, int& card,
Eigen::VectorXd si, double totvexp, double pvexp,
double fullrank = 0.0){
Eigen::VectorXd sik = si;
int p = S.cols();
// int induno;
double tmp;
Eigen::VectorXd vexpt(p);
Eigen::VectorXd cvexpt(p);
Eigen::VectorXd vt(p);
Eigen::VectorXi indnot = Eigen::VectorXi::Constant(p, -2);
Eigen::VectorXd ba(p);
for (int i=0; i < p; i++)
vt(i) = sik(i) * sik(i) / S(i,i);
ind(0) = findmax(indnot, vt);
vexpt(0) = vt(ind(0));
cvexpt(0) = vt(ind(0));
int i = 1;
bool stopSelect = false;
// start looping ============================================
while (stopSelect == false){
tmp = sik(ind(i - 1))/S(ind(i - 1), ind(i - 1));
for (int j = 0; j < p; j++){
if ( indnot(j) == -2){
sik(j) = sik(j) - (tmp * S(ind(i-1), j));
}
else{
sik(j) = 0;
}
}
ba = (S.col(ind(i-1)).array()/sqrt(S(ind(i-1), ind(i-1)))).matrix();
S = S - ba * ba.transpose();
for (int j = 0; j < p; j++){
if ( indnot(j) == -2){
if (S(j,j)> fullrank)
vt(j) = sik(j) * sik(j)/S(j,j);
else{
indnot(j) = -1;
vt(j) = 0;
}
}
else{
vt(j) = 0;
}
}
ind(i) = findmax(indnot, vt);
indnot(ind(i)) = 0;
vexpt(i) = vt(ind(i));
cvexpt(i) = cvexpt(i-1) + vexpt(i);
if (cvexpt(i) >= pvexp*totvexp){
card = i + 1;
stopSelect = true;
}
else{
i = i + 1;
}
// Rcpp::checkUserInterrupt();
}
}
/* non serve
// power method computes only first eigvec, about 82 times faster tha eigen!
Eigen::VectorXd eigvecPMC(Eigen::MatrixXd& X, double& val, double eps = 10E-5){
const int p = X.cols();
double sqp = sqrt(double(p));
Eigen::VectorXd v0 = VectorXd::Constant(p, 1.0/sqp);
Eigen::VectorXd v = VectorXd::Constant(p, 0.0);
double stp = 1.0;
int k = 0;
while (stp > eps){
v = X * v0;
val = v.norm();
v = v.array()/val;
stp = (v0.array() - v.array()).matrix().norm();
v0 = v;
k++;
if (k > 100){
Rf_warning("Powermethod: not converged in 100 iterations. Error is", k);
break;//here should use try-catch
}
}
// Rcout << "k = " << k << "; stp = " << stp << endl;
return (v.array() * val);
}
*/
// This is the main function for R
// S correl matrix
// pvexpfs is proportion of PC to explain by each block
// pvexp is proportion total variance of matrix to explain to terminate computing comps
// ncomps nistead of pvexp maximum number of comps (priority)
// full rank small eps to discard vars from selection
// newpc if false uses PCs of S not compute newpc each block, for large mats
// pass D
// [[Rcpp::export]]
List fspcaCD(Eigen::MatrixXd S, Eigen::MatrixXd D, double pvexpfs = 0.95, double pvexp = 0.95,
int ncomps = 0, double fullrank = 0, bool newpc = true, double eps = 10E-8){
int p = S.cols();
if (ncomps == 0)
ncomps = p;
Eigen::MatrixXd K(S);
Eigen::MatrixXd M = D;
SelfAdjointEigenSolver<Eigen::MatrixXd> es(S);
// here could compute D as vec * diag(val^2) * vec.transpose
Eigen::MatrixXd vec = es.eigenvectors().rowwise().reverse();
Eigen::VectorXd vexppc = es.eigenvalues().reverse();
double totvexp = vexppc.sum();// total variance S
double maxvexp = vexppc(0);// this is vexp by first PC for fow_select
Eigen::VectorXd si = vec.col(0) * vexppc(0);
Eigen::VectorXd a(p);
Eigen::MatrixXd A = Eigen::MatrixXd::Zero(p, ncomps);
// List load(p);
List indout(p);
Eigen::VectorXd vexp = Eigen::VectorXd::Zero(ncomps);
Eigen::VectorXd cvexp = vexp;
double cvt;
Eigen::VectorXi indj(p);//this to pass to fwd_select
Eigen::MatrixXd Sd(p, p);// this takes S[onlyind, onlyind]
Eigen::MatrixXd Dd(p, p);// this takes D[onlyind,onlyind] deflated
int cardt = 0;
int totcard = 0;
Eigen::VectorXi card(p);
int nc = 0;
bool stopComp = false;
int j = 0;
while (stopComp == false){
fwd_selectC(S, indj, cardt, si, maxvexp, pvexpfs, fullrank);
card(j) = cardt;
std::sort(indj.data(),indj.data() + cardt);
// if ( j == 2)
// Rf_error("done 1");
totcard = totcard + cardt;// a che serve ?
// create submatrices for computing loaidngs
Sd.topLeftCorner(cardt, cardt) = makeSubS(S, indj.head(cardt));
Dd.topLeftCorner(cardt, cardt) = makeSubS(M, indj.head(cardt));
// compute loadings
GeneralizedSelfAdjointEigenSolver<Eigen::MatrixXd> es(Dd.topLeftCorner(cardt, cardt),
Sd.topLeftCorner(cardt, cardt));
// save loadings
a.head(cardt) = es.eigenvectors().col(cardt - 1);
// save loadings in column j
for (int i = 0; i < cardt; i++){
A(indj(i), j) = es.eigenvectors()(i, cardt - 1);
}
// save loadings in list
indout[j] = indj.head(cardt).array() + 1;
nc = nc + 1;
// this new func deflates S and M using only last vector of loads
// returns deflated matr by references and vexp (not cum vexp)
deflSandDC(a.head(cardt), K, M, indj.head(cardt), cvt);
vexp(j) = cvt;
if (j > 0)
cvexp(j) = cvt + cvexp(j-1);
else
cvexp(j) = cvt;
// checks if stopComp met
if ((cvexp(j) > pvexp * totvexp) || ((j + 1) == ncomps)){
stopComp = true;
ncomps = nc;
}
else{
if (newpc == true){
SelfAdjointEigenSolver<Eigen::MatrixXd> es(K);
// // this is ok because X'K = K'K, so X'Kv = K'Kv = v*lambda_1
maxvexp = es.eigenvalues()(p-1);
si = es.eigenvectors().col(p-1).array() * maxvexp;
// this power method, returns si and passes maxvexp byref
//si = eigvecPMC(K, maxvexp, eps);
}
else{// this takes the jth pc
maxvexp = vexppc(j);
si = vec.col(j).array() * maxvexp;
}
j = j + 1;
}
}//end compute comps
IntegerVector idx = Rcpp::seq(0, nc - 1);
return List::create(Named("loadings") = A.topLeftCorner(p,nc), Named("ncomps") = nc,
Named("ind") = indout[idx], Named("card") = card.head(nc),
Named("vexp") = vexp.head(nc), Named("cvexp") = cvexp.head(nc));
}
|
{"hexsha": "9cc12e26adf6ec47edc26d1b60dd394212928456", "size": 11309, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "fspcaC_sept_passD.cpp", "max_stars_repo_name": "denis-rinfret/gioden", "max_stars_repo_head_hexsha": "39f5fab1311420e4b6f9b74e67eb24e9b6a0ab77", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fspcaC_sept_passD.cpp", "max_issues_repo_name": "denis-rinfret/gioden", "max_issues_repo_head_hexsha": "39f5fab1311420e4b6f9b74e67eb24e9b6a0ab77", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fspcaC_sept_passD.cpp", "max_forks_repo_name": "denis-rinfret/gioden", "max_forks_repo_head_hexsha": "39f5fab1311420e4b6f9b74e67eb24e9b6a0ab77", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6047120419, "max_line_length": 96, "alphanum_fraction": 0.5278981342, "num_tokens": 3603}
|
/**
* Project: The Stock Libraries
*
* File: utilities.cpp
* Created: Jun 25, 2012
*
* Author: Abhinav Sarje <abhinav.sarje@gmail.com>
*
* Copyright (c) 2012-2017 Abhinav Sarje
* Distributed under the Boost Software License.
* See accompanying LICENSE file.
*/
#include <iostream>
#include <cmath>
#include <boost/math/special_functions/fpclassify.hpp>
#include "utilities.hpp"
namespace stock {
/**
* specialization for vector3_t
*/
template <>
vector3_t min <vector3_t> (vector3_t a, vector3_t b) {
return vector3_t((a[0] < b[0] ? a[0] : b[0]), (a[1] < b[1] ? a[1] : b[1]),
(a[2] < b[2] ? a[2] : b[2]));
} // min <vector3_t>
/**
* specialization for vector3_t
*/
template <>
vector3_t max <vector3_t> (vector3_t a, vector3_t b) {
return vector3_t((a[0] > b[0] ? a[0] : b[0]), (a[1] > b[1] ? a[1] : b[1]),
(a[2] > b[2] ? a[2] : b[2]));
} // max <vector3_t>
/**
* apply log10 to all elements of the 2D matrix
*/
bool mat_log10_2d(unsigned int x_size, unsigned int y_size, real_t* &data) {
if(data == NULL) {
std::cerr << "error: data is null while calculating log10" << std::endl;
return false;
} // if
for(unsigned int i = 0; i < x_size * y_size; ++ i) {
if(data[i] <= 0) {
if(data[i] == 0) {
data[i] = 0.0;
continue;
} else {
std::cerr << "error: matrix has a negative value. cannot calculate logarithm"
<< std::endl;
return false;
} // if-else
} else
data[i] = log10(data[i]);
} // for
return true;
} // mat_log10()
/**
* compute the transpose of a matrix
* use boost libs ...
*/
bool transpose(unsigned int x_size, unsigned int y_size, const real_t *matrix, real_t* &transp) {
if(matrix == NULL) {
std::cerr << "error: matrix is NULL while tranposing" << std::endl;
return false;
} // if
transp = new (std::nothrow) real_t[x_size * y_size];
for(unsigned int y = 0; y < y_size; ++ y) {
for(unsigned int x = 0; x < x_size; ++ x) {
transp[y_size * x + y] = matrix[x_size * y + x];
} // for x
} // for y
return true;
} // transpose()
/**
* matrix multiplication for two 3x3 matrices
* operation is:
* x1 x2 x3 a1 a2 a3 d1 d2 d3
* y1 y2 y3 = b1 b2 b3 x e1 e2 e3
* z1 z2 z3 c1 c2 c3 f1 f2 f3
* use boost libs ... and make it general ...
*/
bool mat_mul_3x3(vector3_t a, vector3_t b, vector3_t c,
vector3_t d, vector3_t e, vector3_t f,
vector3_t& x, vector3_t& y, vector3_t& z) {
real_t *A = new (std::nothrow) real_t[9];
real_t *B = new (std::nothrow) real_t[9];
real_t *C = new (std::nothrow) real_t[9];
A[0] = a[0]; A[1] = a[1]; A[2] = a[2];
A[3] = b[0]; A[4] = b[1]; A[5] = b[2];
A[6] = c[0]; A[7] = c[1]; A[8] = c[2];
B[0] = d[0]; B[1] = d[1]; B[2] = d[2];
B[3] = e[0]; B[4] = e[1]; B[5] = e[2];
B[6] = f[0]; B[7] = f[1]; B[8] = f[2];
for(int i = 0; i < 3; i ++) {
for(int j = 0; j < 3; j ++) {
C[3 * i + j] = 0.0;
for(int k = 0; k < 3; k ++) {
C[3 * i + j] += A[3 * i + k] * B[3 * k + j];
} // for k
} // for j
} // for i
x[0] = C[0]; x[1] = C[1]; x[2] = C[2];
y[0] = C[3]; y[1] = C[4]; y[2] = C[5];
z[0] = C[6]; z[1] = C[7]; z[2] = C[8];
delete[] C;
delete[] B;
delete[] A;
return true;
} // mat_mul_3x3()
/**
* matrix vector product for matrix of size 3x3 and vector of size 1x3
* operation is:
* x1 a1 a2 a3 d1
* x2 = b1 b2 b3 x d2
* x3 c1 c2 c3 d3
* note: transpose of d is used
* use boost libs ...
*/
bool mat_mul_3x1(vector3_t a, vector3_t b, vector3_t c, vector3_t d, vector3_t& x) {
x[0] = a[0] * d[0] + a[1] * d[1] + a[2] * d[2];
x[1] = b[0] * d[0] + b[1] * d[1] + b[2] * d[2];
x[2] = c[0] * d[0] + c[1] * d[1] + c[2] * d[2];
return true;
} // mat_mul_3x1()
/**
* specialized floor function
*/
vector3_t floor(vector3_t a) {
return vector3_t(std::floor(a[0]), std::floor(a[1]), std::floor(a[2]));
} // floor()
/**
* comparison of complex numbers
*/
bool operator<(complex_t a, complex_t b) {
if(std::real(a) < std::real(b)) return true;
if(std::real(a) == std::real(b) && std::imag(a) < std::imag(b)) return true;
//if(a.x < b.x) return true;
//if(a.x == b.x && a.y < b.y) return true;
return false;
} // operator<()
/**
* arithmetic operators for complex types
*/
complex_t operator*(complex_t c, complex_t s) {
return complex_t(c.real() * s.real() - c.imag() * s.imag(),
c.real() * s.imag() + c.imag() * s.real());
} // operator*()
complex_t operator*(complex_t c, real_t s) {
return complex_t(c.real() * s, c.imag() * s);
} // operator*()
complex_t operator*(real_t s, complex_t c) {
return complex_t(c.real() * s, c.imag() * s);
} // operator*()
std::complex<long double> operator*(std::complex<long double> c, long double s) {
return std::complex<long double>(c.real() * s, c.imag() * s);
} // operator*()
std::complex<long double> operator/(std::complex<long double> c, long double s) {
return std::complex<long double>(c.real() / s, c.imag() / s);
} // operator*()
/**
* constructs element-by-element sum of two matrices into result
*/
bool mat_add(unsigned int x1_size, unsigned int y1_size, unsigned int z1_size,
const std::vector<complex_t>& matrix1,
unsigned int x2_size, unsigned int y2_size, unsigned int z2_size,
const std::vector<complex_t>& matrix2,
std::vector<complex_t>& result) {
if(x1_size != x2_size || y1_size != y2_size || z1_size != z2_size
|| matrix1.size() != matrix2.size()) {
std::cerr << "error: matrix sizes are not the same for addition operation" << std::endl;
return false;
} // if
result.clear();
std::vector<complex_t>::const_iterator i1 = matrix1.begin();
std::vector<complex_t>::const_iterator i2 = matrix2.begin();
for(; i1 != matrix1.end(); ++ i1, ++ i2) {
result.push_back((*i1) + (*i2));
} // for
return true;
} // mat_add()
/**
* performs in-place element-by-element sum of two matrices into first matrix
*/
bool mat_add_in(unsigned int x1_size, unsigned int y1_size, unsigned int z1_size,
std::vector<complex_t>& matrix1,
unsigned int x2_size, unsigned int y2_size, unsigned int z2_size,
std::vector<complex_t>& matrix2) {
if(x1_size != x2_size || y1_size != y2_size || z1_size != z2_size
|| matrix1.size() != matrix2.size()) {
std::cerr << "error: matrix sizes are not the same for addition operation" << std::endl;
return false;
} // if
std::vector<complex_t>::iterator i1 = matrix1.begin();
std::vector<complex_t>::iterator i2 = matrix2.begin();
for(; i1 != matrix1.end(); ++ i1, ++ i2) {
*i1 = (*i1) + (*i2);
} // for
return true;
} // mat_add()
/**
* scalar-matrix multiplication into result
*/
bool mat_mul(real_t scalar, const complex_vec_t& matrix, complex_vec_t& result) {
result.clear();
for(std::vector<complex_t>::const_iterator i = matrix.begin(); i != matrix.end(); ++ i) {
result.push_back((*i) * scalar);
} // for
return true;
} // mat_mul()
bool mat_mul(complex_t scalar, const complex_vec_t& matrix, complex_vec_t& result) {
result.clear();
for(complex_vec_t::const_iterator i = matrix.begin(); i != matrix.end(); ++ i) {
result.push_back((*i) * scalar);
} // for
return true;
} // mat_mul()
bool mat_mul(const complex_vec_t& matrix, real_t scalar, complex_vec_t& result) {
return mat_mul(scalar, matrix, result);
} // mat_mul()
bool mat_mul(const complex_vec_t& matrix, complex_t scalar, complex_vec_t& result) {
return mat_mul(scalar, matrix, result);
} // mat_mul()
/**
* in-place scalar-matrix multiplication
*/
bool mat_mul_in(real_t scalar, complex_vec_t& matrix) {
for(complex_vec_t::iterator i = matrix.begin(); i != matrix.end(); ++ i) {
*i = (*i) * scalar;
} // for
return true;
} // mat_mul()
bool mat_mul_in(complex_t scalar, complex_vec_t& matrix) {
for(complex_vec_t::iterator i = matrix.begin(); i != matrix.end(); ++ i) {
*i = (*i) * scalar;
} // for
return true;
} // mat_mul()
bool mat_mul_in(complex_vec_t& matrix, real_t scalar) {
return mat_mul_in(scalar, matrix);
} // mat_mul()
bool mat_mul_in(complex_vec_t& matrix, complex_t scalar) {
return mat_mul_in(scalar, matrix);
} // mat_mul()
/**
* computes element-by-element product of two matrices into result
*/
bool mat_dot_prod(unsigned int x1_size, unsigned int y1_size, unsigned int z1_size,
const complex_vec_t& matrix1,
unsigned int x2_size, unsigned int y2_size, unsigned int z2_size,
const complex_vec_t& matrix2,
complex_vec_t& result) {
if(x1_size != x2_size || y1_size != y2_size || z1_size != z2_size
|| matrix1.size() != matrix2.size()) {
std::cerr << "error: matrix sizes are not the same for dot product operation" << std::endl;
return false;
} // if
result.clear();
complex_vec_t::const_iterator i1 = matrix1.begin();
complex_vec_t::const_iterator i2 = matrix2.begin();
for(; i1 != matrix1.end(); ++ i1, ++ i2) {
result.push_back((*i1) * (*i2));
} // for
return true;
} // mat_dot_prod()
/**
* performs in-place element-by-element product of two matrices
*/
bool mat_dot_prod_in(unsigned int x1_size, unsigned int y1_size, unsigned int z1_size,
std::vector<complex_t>& matrix1,
unsigned int x2_size, unsigned int y2_size, unsigned int z2_size,
std::vector<complex_t>& matrix2) {
if(x1_size != x2_size || y1_size != y2_size || z1_size != z2_size
|| matrix1.size() != matrix2.size()) {
std::cerr << "error: matrix sizes are not the same for dot product operation" << std::endl;
return false;
} // if
std::vector<complex_t>::iterator i1 = matrix1.begin();
std::vector<complex_t>::iterator i2 = matrix2.begin();
for(; i1 != matrix1.end(); ++ i1, ++ i2) {
*i1 = (*i1) * (*i2);
} // for
return true;
} // mat_dot_prod()
/**
* computes element-by-element division of two matrices (matrix1 / matrix2) into result
*/
bool mat_dot_div(unsigned int nx1, unsigned int ny1, unsigned int nz1,
const complex_vec_t& matrix1,
unsigned int nx2, unsigned int ny2, unsigned int nz2,
const complex_vec_t& matrix2,
complex_vec_t& result) {
if(nx1 != nx2 || ny1 != ny2 || nz1 != nz2 || matrix1.size() != matrix2.size()) {
std::cerr << "error: matrix sizes are not the same for dot division operation"
<< std::endl;
return false;
} // if
result.clear();
complex_vec_t::const_iterator i1 = matrix1.begin();
complex_vec_t::const_iterator i2 = matrix2.begin();
for(; i1 != matrix1.end(); ++ i1, ++ i2) {
result.push_back((*i1) / (*i2));
} // for
return true;
} // mat_dot_div()
/**
* performs in-place element-by-element division of two matrices (matrix1/matrix2) into matrix1
*/
bool mat_dot_div_in(unsigned int nx1, unsigned int ny1, unsigned int nz1,
std::vector<complex_t>& matrix1,
unsigned int nx2, unsigned int ny2, unsigned int nz2,
std::vector<complex_t>& matrix2) {
if(nx1 != nx2 || ny1 != ny2 || nz1 != nz2 || matrix1.size() != matrix2.size()) {
std::cerr << "error: matrix sizes are not the same for dot division operation"
<< std::endl;
return false;
} // if
std::vector<complex_t>::iterator i1 = matrix1.begin();
std::vector<complex_t>::iterator i2 = matrix2.begin();
for(; i1 != matrix1.end(); ++ i1, ++ i2) {
*i1 = (*i1) / (*i2);
} // for
return true;
} // mat_dot_div()
bool mat_sqr(const complex_vec_t& matrix, complex_vec_t& result) {
result.clear();
for(complex_vec_t::const_iterator i = matrix.begin(); i != matrix.end(); ++ i) {
result.push_back((*i) * (*i));
} // for
return true;
} // mat_sqr()
bool mat_sqr_in(//unsigned int nx, unsigned int ny, unsigned int nz,
std::vector<complex_t>& matrix) {
for(std::vector<complex_t>::iterator i = matrix.begin(); i != matrix.end(); ++ i) {
*i = (*i) * (*i);
} // for
return true;
} // mat_sqr()
bool mat_sqrt(const complex_vec_t& matrix, complex_vec_t& result) {
result.clear();
for(complex_vec_t::const_iterator i = matrix.begin(); i != matrix.end(); ++ i) {
result.push_back(sqrt(*i));
} // for
return true;
} // mat_sqrt()
bool mat_sqrt_in(//unsigned int nx, unsigned int ny, unsigned int nz,
std::vector<complex_t>& matrix) {
for(std::vector<complex_t>::iterator i = matrix.begin(); i != matrix.end(); ++ i) {
*i = sqrt(*i);
} // for
return true;
} // mat_sqrt()
bool mat_exp(complex_vec_t& matrix, complex_vec_t& result) {
result.clear();
for(complex_vec_t::iterator i = matrix.begin(); i != matrix.end(); ++ i)
result.push_back(exp(*i));
} // mat_exp()
bool mat_exp_in(complex_vec_t& matrix) {
for(complex_vec_t::iterator i = matrix.begin(); i != matrix.end(); ++ i) *i = exp(*i);
} // mat_exp_in()
// compute integral of e^(ikx) between x1 and x2
complex_t integral_e(real_t x1, real_t x2, complex_t k) {
if(boost::math::fpclassify(k.real()) == FP_ZERO &&
boost::math::fpclassify(k.imag()) == FP_ZERO) {
return complex_t(x2 - x1, 0);
} else {
complex_t ik = complex_t(0.0, 1.0) * k;
return (((real_t) 1.0 / ik) * (exp(ik * x2) - exp(ik * x1)));
} // if-else
} // integral_e
// compute integral of (ax + b) e^(ikx) between x1 and x2
complex_t integral_xe(real_t x1, real_t x2, real_t a, real_t b, complex_t k) {
if(boost::math::fpclassify(k.real()) == FP_ZERO &&
boost::math::fpclassify(k.imag()) == FP_ZERO) {
return complex_t(a * (x2 * x2 - x1 * x1) / 2 + b * (x2 - x1), 0.0);
} else {
complex_t ik = complex_t(0.0, 1.0) * k;
return (((real_t) 1.0 / ik) * ((a * x2 + b - a / ik) * exp(ik * x2) -
(a * x1 + b - a / ik) * exp(ik * x1)));
} // if-else
} // integral_xe()
} // namespace stock
|
{"hexsha": "699698811de216e8f559f7bbc3f93d6897e9ddba", "size": 13746, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "image/utilities.cpp", "max_stars_repo_name": "mywoodstock/woo", "max_stars_repo_head_hexsha": "7a6e39b2914ec8ff5bf52c3aa5217214532390e4", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2017-05-09T14:25:18.000Z", "max_stars_repo_stars_event_max_datetime": "2017-05-09T14:25:18.000Z", "max_issues_repo_path": "image/utilities.cpp", "max_issues_repo_name": "mywoodstock/woo", "max_issues_repo_head_hexsha": "7a6e39b2914ec8ff5bf52c3aa5217214532390e4", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "image/utilities.cpp", "max_forks_repo_name": "mywoodstock/woo", "max_forks_repo_head_hexsha": "7a6e39b2914ec8ff5bf52c3aa5217214532390e4", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8206278027, "max_line_length": 98, "alphanum_fraction": 0.6049032446, "num_tokens": 4614}
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import gdspy
import uuid
import picwriter.toolkit as tk
class Taper(gdspy.Cell):
""" Taper Cell class (subclass of gdspy.Cell).
Args:
* **wgt** (WaveguideTemplate): WaveguideTemplate object
* **length** (float): Length of the taper
* **end_width** (float): Final width of the taper (initial width received from WaveguieTemplate)
Keyword Args:
* **port** (tuple): Cartesian coordinate of the input port. Defaults to (0,0).
* **direction** (string): Direction that the component will point *towards*, can be of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, OR an angle (float, in radians)
* **end_clad_width** (float): Clad width at the end of the taper. Defaults to the regular clad width.
* **extra_clad_length** (float): Extra cladding beyond the end of the taper. Defaults to 2*end_clad_width.
Members:
* **portlist** (dict): Dictionary with the relevant port information
Portlist format:
* portlist['input'] = {'port': (x1,y1), 'direction': 'dir1'}
* portlist['output'] = {'port': (x2, y2), 'direction': 'dir2'}
Where in the above (x1,y1) is the same as the 'port' input, (x2, y2) is the end of the taper, and 'dir1', 'dir2' are of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, *or* an angle in *radians*.
'Direction' points *towards* the waveguide that will connect to it.
"""
def __init__(self, wgt, length, end_width, end_clad_width=None, extra_clad_length=None, port=(0,0), direction='EAST'):
gdspy.Cell.__init__(self, "Taper--"+str(uuid.uuid4()))
self.portlist = {}
self.port = port
self.trace=[port, tk.translate_point(port, length, direction)]
self.direction = direction
self.start_width = wgt.wg_width
self.end_width = end_width
self.end_clad_width = wgt.clad_width if end_clad_width==None else end_clad_width
self.extra_clad_length = 2*self.end_clad_width if extra_clad_length==None else extra_clad_length
self.resist = wgt.resist
self.wgt = wgt
self.wg_spec = {'layer': wgt.wg_layer, 'datatype': wgt.wg_datatype}
self.clad_spec = {'layer': wgt.clad_layer, 'datatype': wgt.clad_datatype}
self.type_check_trace()
self.build_cell()
self.build_ports()
def type_check_trace(self):
trace = []
""" Round each trace value to the nearest 1e-6 -- prevents
some typechecking errors
"""
for t in self.trace:
trace.append((round(t[0], 6), round(t[1], 5)))
self.trace = trace
""" Make sure all waypoints specify 90degree angles. This might be
updated in the future to allow for 45deg, or arbitrary bends. For now,
though, rotations are supported via gdspy library
"""
dx = abs(self.trace[1][0]-self.trace[0][0])
dy = abs(self.trace[1][1]-self.trace[0][1])
if dx>=1e-6 and dy>=1e-6:
raise ValueError("Warning! Both waypoints *must* specify horizontal "
"or vertical tapers.")
def build_cell(self):
# Sequentially build all the geometric shapes using gdspy path functions
# for waveguide, then add it to the Cell
angle = tk.get_angle(self.trace[0], self.trace[1])
# Add waveguide taper
path = gdspy.Path(self.wgt.wg_width, self.trace[0])
path.segment(tk.dist(self.trace[0], self.trace[1]),
direction=angle, final_width=self.end_width, **self.wg_spec)
# Cladding for waveguide taper
path2 = gdspy.Path(2*self.wgt.clad_width+self.wgt.wg_width, self.trace[0])
path2.segment(tk.dist(self.trace[0], self.trace[1]), direction=angle,
final_width=2*self.end_clad_width+self.end_width, **self.clad_spec)
path2.segment(self.extra_clad_length, **self.clad_spec)
self.add(path)
self.add(path2)
def build_ports(self):
# Portlist format:
# example: example: {'port':(x_position, y_position), 'direction': 'NORTH'}
self.portlist["input"] = {'port':self.trace[0], 'direction':tk.flip_direction(self.direction)}
self.portlist["output"] = {'port':self.trace[1], 'direction':self.direction}
if __name__ == "__main__":
from . import *
top = gdspy.Cell("top")
wgt = WaveguideTemplate(bend_radius=50, resist='+')
wg1=Waveguide([(0,0), (100,0)], wgt)
tk.add(top, wg1)
tp1 = Taper(wgt, 100.0, 0.3, end_clad_width=50, **wg1.portlist["input"])
tp2 = Taper(wgt, 100.0, 0.5, end_clad_width=15, **wg1.portlist["output"])
tk.add(top, tp1)
tk.add(top, tp2)
gdspy.LayoutViewer()
# gdspy.write_gds('taper.gds', unit=1.0e-6, precision=1.0e-9)
|
{"hexsha": "a6f3e068a923a0a155484bce9dd28fdef4c1b2d8", "size": 4941, "ext": "py", "lang": "Python", "max_stars_repo_path": "picwriter/components/taper.py", "max_stars_repo_name": "crioso/PICwriter", "max_stars_repo_head_hexsha": "24b4ca37361899cba9d23c057b14429055a3da0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "picwriter/components/taper.py", "max_issues_repo_name": "crioso/PICwriter", "max_issues_repo_head_hexsha": "24b4ca37361899cba9d23c057b14429055a3da0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "picwriter/components/taper.py", "max_forks_repo_name": "crioso/PICwriter", "max_forks_repo_head_hexsha": "24b4ca37361899cba9d23c057b14429055a3da0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5135135135, "max_line_length": 202, "alphanum_fraction": 0.6235579842, "include": true, "reason": "import numpy", "num_tokens": 1346}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import time
import numpy as np
import pandas as pd
from hurry.filesize import size
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score, calinski_harabasz_score
from tqdm import tqdm
from dpu_kmeans import KMeans as DPU_KMeans
from dpu_kmeans import _dimm
n_clusters = 2
n_init = 10
max_iter = 500
verbose = False
tol = 1e-4
random_state = 42
n_dpu_set = [256, 512, 1024, 2048, 2523]
DPU_times = []
DPU_kernel_runtimes = []
DPU_preprocessing_times = []
DPU_init_times = []
DPU_main_loop_timers = []
DPU_iterations = []
DPU_scores = []
DPU_time_per_iter = []
DPU_inter_pim_core_times = []
DPU_cpu_pim_times = []
DPU_pim_cpu_times = []
DPU_inertia_times = []
DPU_reallocate_times = []
DPU_train_times = []
CPU_times = []
CPU_main_loop_timers = []
CPU_iterations = []
CPU_scores = []
CPU_time_per_iter = []
CPU_preprocessing_times = []
CPU_main_loop_timers = []
CPU_train_times = []
cross_scores = []
##################################################
# DATA READ #
##################################################
if len(sys.argv) >= 2:
higgs_file = sys.argv[1]
else:
higgs_file = "data/higgs.pq"
df = pd.read_parquet(higgs_file)
data, tags = np.require(
df.iloc[:, 1:].to_numpy(dtype=np.float32), requirements=["C", "A", "O"]
), np.require(df.iloc[:, 0].to_numpy(dtype=int), requirements=["O"])
n_points, n_dim = data.shape
del df
print(f"raw data size : {size(sys.getsizeof(data))}")
##################################################
# CPU PERF #
##################################################
# perform clustering on CPU
tic = time.perf_counter()
CPU_kmeans = KMeans(
n_clusters,
init="random",
n_init=n_init,
max_iter=max_iter,
tol=tol,
verbose=verbose,
copy_x=False,
random_state=random_state,
algorithm="full",
)
CPU_kmeans.fit(data)
toc = time.perf_counter()
# read timers
CPU_centroids = CPU_kmeans.cluster_centers_
CPU_iter_counter = CPU_kmeans.n_iter_
CPU_main_loop_timer = CPU_kmeans.main_loop_timer_
CPU_preprocessing_timer = CPU_kmeans.preprocessing_timer_
CPU_train_timer = CPU_kmeans.train_time_
CPU_timer = toc - tic
for i_n_dpu, n_dpu in enumerate(pbar := tqdm(n_dpu_set, file=sys.stdout)):
pbar.set_description(f"{n_dpu} dpus, raw data size : {size(sys.getsizeof(data))}")
##################################################
# DPU PERF #
##################################################
# load the DPUS
_dimm.free_dpus()
tic = time.perf_counter()
_dimm.set_n_dpu(n_dpu)
_dimm.load_kernel("kmeans", verbose)
toc = time.perf_counter()
DPU_init_time = toc - tic
# perform clustering on DPU
tic = time.perf_counter()
DPU_kmeans = DPU_KMeans(
n_clusters,
reload_data=True,
init="random",
n_init=n_init,
max_iter=max_iter,
tol=tol,
verbose=verbose,
copy_x=False,
random_state=random_state,
)
DPU_kmeans.fit(data)
toc = time.perf_counter()
# read timers
DPU_centroids = DPU_kmeans.cluster_centers_
DPU_iter_counter = DPU_kmeans.n_iter_
DPU_kernel_runtime = DPU_kmeans.dpu_run_time_
DPU_main_loop_timer = DPU_kmeans.main_loop_timer_
DPU_preprocessing_timer = DPU_kmeans.preprocessing_timer_
DPU_cpu_pim_timer = DPU_kmeans.cpu_pim_time_
DPU_pim_cpu_timer = DPU_kmeans.pim_cpu_time_
DPU_inertia_timer = DPU_kmeans.inertia_timer_
DPU_reallocate_timer = DPU_kmeans.reallocate_timer_
DPU_train_timer = DPU_kmeans.train_time_
DPU_timer = toc - tic
pbar.set_description(f"{n_dpu} dpus, quantized size : {size(_dimm._data_size)}")
##################################################
# LOGGING #
##################################################
DPU_times.append(DPU_timer)
DPU_kernel_runtimes.append(DPU_kernel_runtime)
DPU_preprocessing_times.append(DPU_preprocessing_timer)
DPU_iterations.append(DPU_iter_counter)
DPU_time_per_iter.append(DPU_main_loop_timer / DPU_iter_counter)
DPU_init_times.append(DPU_init_time)
DPU_main_loop_timers.append(DPU_main_loop_timer)
DPU_inter_pim_core_times.append(DPU_main_loop_timer - DPU_kernel_runtime)
DPU_cpu_pim_times.append(DPU_cpu_pim_timer)
DPU_pim_cpu_times.append(DPU_pim_cpu_timer)
DPU_inertia_times.append(DPU_inertia_timer)
DPU_reallocate_times.append(DPU_reallocate_timer)
DPU_train_times.append(DPU_train_timer)
CPU_times.append(CPU_timer)
CPU_iterations.append(CPU_iter_counter)
CPU_time_per_iter.append(CPU_main_loop_timer / CPU_iter_counter)
CPU_main_loop_timers.append(CPU_main_loop_timer)
CPU_preprocessing_times.append(CPU_preprocessing_timer)
CPU_train_times.append(CPU_train_timer)
# rand index for CPU and DPU (measures the similarity of the clustering with the ground truth)
DPU_scores.append(calinski_harabasz_score(data, DPU_kmeans.labels_))
CPU_scores.append(calinski_harabasz_score(data, CPU_kmeans.labels_))
cross_scores.append(adjusted_rand_score(DPU_kmeans.labels_, CPU_kmeans.labels_))
# creating and exporting the dataframe at each iteration in case we crash early
df = pd.DataFrame(
{
"DPU_times": DPU_times,
"DPU_train_times": DPU_train_times,
"DPU_init_times": DPU_init_times,
"DPU_preprocessing_times": DPU_preprocessing_times,
"DPU_cpu_pim_times": DPU_cpu_pim_times,
"DPU_pim_cpu_times": DPU_pim_cpu_times,
"DPU_inertia_times": DPU_inertia_times,
"DPU_reallocate_times": DPU_reallocate_times,
"DPU_single_kmeans_times": DPU_main_loop_timers,
"DPU_kernel_runtimes": DPU_kernel_runtimes,
"DPU_inter_pim_core_times": DPU_inter_pim_core_times,
"DPU_iterations": DPU_iterations,
"DPU_times_one_iter": DPU_time_per_iter,
"CPU_times": CPU_times,
"CPU_train_times": CPU_train_times,
"CPU_preprocessing_times": CPU_preprocessing_times,
"CPU_single_kmeans_times": CPU_main_loop_timers,
"CPU_iterations": CPU_iterations,
"CPU_times_one_iter": CPU_time_per_iter,
"DPU_scores": DPU_scores,
"CPU_scores": CPU_scores,
"cross_scores": cross_scores,
},
index=n_dpu_set[: i_n_dpu + 1],
)
df.index.rename("DPUs")
df.to_pickle("higgs.pkl")
df.to_csv("higgs.csv")
|
{"hexsha": "460c9937bef3be455d60ede3b57c50018ff67082", "size": 6652, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmarks/higgs/CPU+DPU.py", "max_stars_repo_name": "upmem/dpu_kmeans", "max_stars_repo_head_hexsha": "cd84ca1cd9b49116aeb22c500f7d1c78600b3269", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-10T11:20:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-10T11:20:26.000Z", "max_issues_repo_path": "benchmarks/higgs/CPU+DPU.py", "max_issues_repo_name": "upmem/dpu_kmeans", "max_issues_repo_head_hexsha": "cd84ca1cd9b49116aeb22c500f7d1c78600b3269", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-12-14T23:29:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T09:30:39.000Z", "max_forks_repo_path": "benchmarks/higgs/CPU+DPU.py", "max_forks_repo_name": "upmem/dpu_kmeans", "max_forks_repo_head_hexsha": "cd84ca1cd9b49116aeb22c500f7d1c78600b3269", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8277511962, "max_line_length": 98, "alphanum_fraction": 0.6579975947, "include": true, "reason": "import numpy", "num_tokens": 1665}
|
import numpy
class CharsLengthExtractor:
metrics = {
'mean': numpy.mean,
'min': numpy.min,
'max': numpy.max
}
def __init__(self, metric):
self.metric = metric
def dfw(self, nodes, lengths):
for node in nodes:
lengths.append(len(node['chars']))
if 'children' in node:
lengths = self.dfw(node['children'], lengths)
return lengths
def extract(self, ast, params):
lengths = self.dfw(ast, [])
return self.metrics[self.metric](lengths)
|
{"hexsha": "4649e2175a08029af59fa7c5d92b6529ac31bc7d", "size": 561, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/FeatureExtraction/Features/CharsLengthExtractor.py", "max_stars_repo_name": "PetukhovVictor/ast2vec", "max_stars_repo_head_hexsha": "f6420d11583caa3e92e0df25fee1d2149da5e7ca", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-04-20T07:36:13.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-03T10:33:39.000Z", "max_issues_repo_path": "lib/FeatureExtraction/Features/CharsLengthExtractor.py", "max_issues_repo_name": "PetukhovVictor/ast2vec", "max_issues_repo_head_hexsha": "f6420d11583caa3e92e0df25fee1d2149da5e7ca", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/FeatureExtraction/Features/CharsLengthExtractor.py", "max_forks_repo_name": "PetukhovVictor/ast2vec", "max_forks_repo_head_hexsha": "f6420d11583caa3e92e0df25fee1d2149da5e7ca", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.5769230769, "max_line_length": 61, "alphanum_fraction": 0.5561497326, "include": true, "reason": "import numpy", "num_tokens": 127}
|
function [R, t] = read4PCSResults(filename)
M = dlmread(filename);
R = M(1:3, 1:3);
t = M(1:3, 4);
end
|
{"author": "intellhave", "repo": "SDRSAC", "sha": "b081721e9dfd7843d75aa12f30025b2bd7c8f024", "save_path": "github-repos/MATLAB/intellhave-SDRSAC", "path": "github-repos/MATLAB/intellhave-SDRSAC/SDRSAC-b081721e9dfd7843d75aa12f30025b2bd7c8f024/utils/read4PCSResults.m"}
|
program rand_test
use, intrinsic :: iso_fortran_env, only : sp => REAL32, dp => REAL64
use random_mod, only : random_normal_number
implicit none
integer, parameter :: N = 10000
real(kind=dp) :: r
integer :: i
do i = 1, N
call random_normal_number(r)
print *, r
end do
end program rand_test
|
{"hexsha": "f46834121ada0f6c3c6ba261ca4d62e39df40e83", "size": 340, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Fortran/Types/rand_test.f90", "max_stars_repo_name": "Gjacquenot/training-material", "max_stars_repo_head_hexsha": "16b29962bf5683f97a1072d961dd9f31e7468b8d", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 115, "max_stars_repo_stars_event_min_datetime": "2015-03-23T13:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T00:27:21.000Z", "max_issues_repo_path": "Fortran/Types/rand_test.f90", "max_issues_repo_name": "Gjacquenot/training-material", "max_issues_repo_head_hexsha": "16b29962bf5683f97a1072d961dd9f31e7468b8d", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 56, "max_issues_repo_issues_event_min_datetime": "2015-02-25T15:04:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-03T07:42:48.000Z", "max_forks_repo_path": "Fortran/Types/rand_test.f90", "max_forks_repo_name": "Gjacquenot/training-material", "max_forks_repo_head_hexsha": "16b29962bf5683f97a1072d961dd9f31e7468b8d", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 59, "max_forks_repo_forks_event_min_datetime": "2015-11-26T11:44:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T00:27:22.000Z", "avg_line_length": 22.6666666667, "max_line_length": 72, "alphanum_fraction": 0.6264705882, "num_tokens": 97}
|
C
C $Id: pcsetr.f,v 1.16 2008-07-27 00:17:20 haley Exp $
C
C Copyright (C) 2000
C University Corporation for Atmospheric Research
C All Rights Reserved
C
C The use of this Software is governed by a License Agreement.
C
SUBROUTINE PCSETR (WHCH,RVAL)
C
CHARACTER*(*) WHCH
C
C The subroutine PCSETR may be used to set PLCHHQ parameters which have
C values of type REAL.
C
C COMMON block declarations.
C
COMMON /PCPRMS/ ADDS,CONS,DSTB,DSTL,DSTR,DSTT,HPIC(3),IBNU,
+ IBXC(3),IBXF,ICEN,IORD,IOUC,IOUF,IPCC,IQUF,
+ ISHC,ISHF,ITEF,JCOD,LSCI(16),NFCC,NODF,RBXL,
+ RBXM,RBXX,RBXY,ROLW,RPLW,RSLW,SHDX,SHDY,SIZA,
+ SSIC,SSPR,SUBS,VPIC(3),WPIC(3),XBEG,XCEN,XEND,
+ XMUL(3),YBEG,YCEN,YEND,YMUL(3),ZINX,ZINY,ZINZ
SAVE /PCPRMS/
C
COMMON /PCPFLQ/ IMAP,OORV,RHTW
SAVE /PCPFLQ/
C
C Do a call forcing a BLOCKDATA to be loaded from a binary library.
C
CALL PCBLDA
C
C Check for an uncleared prior error.
C
IF (ICFELL('PCSETR - UNCLEARED PRIOR ERROR',1).NE.0) RETURN
C
C Set the selected parameter.
C
IF (WHCH(1:2).EQ.'AS'.OR.WHCH(1:2).EQ.'as') THEN
ADDS=RVAL
ELSE IF (WHCH(1:2).EQ.'BC'.OR.WHCH(1:2).EQ.'bc') THEN
CALL PCGPAI (WHCH,3,IPAI)
IF (IPAI.EQ.0) THEN
IBXC(1)=INT(RVAL)
ELSE IF (IPAI.GE.1.AND.IPAI.LE.3) THEN
IBXC(IPAI)=INT(RVAL)
ELSE
CALL SETER ('PCSETR - BOX COLOR ARRAY INDEX IS OUT OF RANGE',
+ 2,1)
RETURN
END IF
ELSE IF (WHCH(1:2).EQ.'BF'.OR.WHCH(1:2).EQ.'bf') THEN
IBXF=INT(RVAL)
ELSE IF (WHCH(1:2).EQ.'BL'.OR.WHCH(1:2).EQ.'bl') THEN
RBXL=RVAL
ELSE IF (WHCH(1:2).EQ.'BM'.OR.WHCH(1:2).EQ.'bm') THEN
RBXM=RVAL
ELSE IF (WHCH(1:2).EQ.'BX'.OR.WHCH(1:2).EQ.'bx') THEN
RBXX=RVAL
ELSE IF (WHCH(1:2).EQ.'BY'.OR.WHCH(1:2).EQ.'by') THEN
RBXY=RVAL
ELSE IF (WHCH(1:2).EQ.'CC'.OR.WHCH(1:2).EQ.'cc') THEN
CALL PCGPAI (WHCH,3,IPAI)
IF (IPAI.EQ.0) THEN
IPCC=INT(RVAL)
ELSE IF (IPAI.GE.1.AND.IPAI.LE.16) THEN
LSCI(IPAI)=INT(RVAL)
ELSE
CALL SETER ('PCSETR - COLOR ARRAY INDEX IS OUT OF RANGE',3,1)
RETURN
END IF
ELSE IF (WHCH(1:2).EQ.'CD'.OR.WHCH(1:2).EQ.'cd') THEN
JCOD=MAX(0,MIN(1,INT(RVAL)))
ELSE IF (WHCH(1:2).EQ.'CE'.OR.WHCH(1:2).EQ.'ce') THEN
ICEN=MAX(0,MIN(1,INT(RVAL)))
ELSE IF (WHCH(1:2).EQ.'CH'.OR.WHCH(1:2).EQ.'ch') THEN
HPIC(3)=MAX(0.,RVAL)
YMUL(3)=HPIC(3)/9.
ELSE IF (WHCH(1:2).EQ.'CL'.OR.WHCH(1:2).EQ.'cl') THEN
RPLW=MAX(0.,RVAL)
ELSE IF (WHCH(1:2).EQ.'CS'.OR.WHCH(1:2).EQ.'cs') THEN
CONS=RVAL/2.
ELSE IF (WHCH(1:2).EQ.'CV'.OR.WHCH(1:2).EQ.'cv') THEN
VPIC(3)=MAX(0.,RVAL)
ELSE IF (WHCH(1:2).EQ.'CW'.OR.WHCH(1:2).EQ.'cw') THEN
WPIC(3)=MAX(0.,RVAL)
XMUL(3)=WPIC(3)/8.
ELSE IF (WHCH(1:2).EQ.'DO'.OR.WHCH(1:2).EQ.'do') THEN
IORD=MAX(-2,MIN(+2,INT(RVAL)))
IF (IORD.EQ.0) IORD=1
ELSE IF (WHCH(1:2).EQ.'FB'.OR.WHCH(1:2).EQ.'fb') THEN
CALL BCSETR ('FTL',RVAL)
IF (ICFELL('PCSETR',4).NE.0) RETURN
ELSE IF (WHCH(1:2).EQ.'FN'.OR.WHCH(1:2).EQ.'fn') THEN
NODF=ABS(INT(RVAL))
IF ((NODF.GE. 23.AND.NODF.LE. 24).OR.
+ (NODF.GE. 27.AND.NODF.LE. 28).OR.
+ (NODF.GE. 31.AND.NODF.LE. 32).OR.
+ (NODF.GE. 38.AND.NODF.LE.120).OR.
+ (NODF.GE.123.AND.NODF.LE.124).OR.
+ (NODF.GE.127.AND.NODF.LE.128).OR.
+ (NODF.GE.131.AND.NODF.LE.132).OR.NODF.GE.138) NODF=1
ELSE IF (WHCH(1:2).EQ.'HW'.OR.WHCH(1:2).EQ.'hw') THEN
RHTW=RVAL
ELSE IF (WHCH(1:2).EQ.'IH'.OR.WHCH(1:2).EQ.'ih') THEN
HPIC(2)=MAX(0.,RVAL)
YMUL(2)=HPIC(2)/13.
ELSE IF (WHCH(1:2).EQ.'IS'.OR.WHCH(1:2).EQ.'is') THEN
SSIC=MAX(0.,RVAL)
ELSE IF (WHCH(1:2).EQ.'IV'.OR.WHCH(1:2).EQ.'iv') THEN
VPIC(2)=MAX(0.,RVAL)
ELSE IF (WHCH(1:2).EQ.'IW'.OR.WHCH(1:2).EQ.'iw') THEN
WPIC(2)=MAX(0.,RVAL)
XMUL(2)=WPIC(2)/12.
ELSE IF (WHCH(1:2).EQ.'MA'.OR.WHCH(1:2).EQ.'ma') THEN
IMAP=MAX(0,INT(RVAL))
ELSE IF (WHCH(1:2).EQ.'OC'.OR.WHCH(1:2).EQ.'oc') THEN
IOUC=INT(RVAL)
ELSE IF (WHCH(1:2).EQ.'OF'.OR.WHCH(1:2).EQ.'of') THEN
IOUF=MAX(0,MIN(1,INT(RVAL)))
ELSE IF (WHCH(1:2).EQ.'OL'.OR.WHCH(1:2).EQ.'ol') THEN
ROLW=MAX(0.,RVAL)
ELSE IF (WHCH(1:2).EQ.'OR'.OR.WHCH(1:2).EQ.'or') THEN
OORV=RVAL
ELSE IF (WHCH(1:2).EQ.'PH'.OR.WHCH(1:2).EQ.'ph') THEN
HPIC(1)=MAX(0.,RVAL)
YMUL(1)=HPIC(1)/21.
ELSE IF (WHCH(1:2).EQ.'PS'.OR.WHCH(1:2).EQ.'ps') THEN
SSPR=MAX(0.,RVAL)
ELSE IF (WHCH(1:2).EQ.'PV'.OR.WHCH(1:2).EQ.'pv') THEN
VPIC(1)=MAX(0.,RVAL)
ELSE IF (WHCH(1:2).EQ.'PW'.OR.WHCH(1:2).EQ.'pw') THEN
WPIC(1)=MAX(0.,RVAL)
XMUL(1)=WPIC(1)/16.
ELSE IF (WHCH(1:2).EQ.'QU'.OR.WHCH(1:2).EQ.'qu') THEN
IQUF=MAX(0,MIN(2,INT(RVAL)))
ELSE IF (WHCH(1:2).EQ.'SA'.OR.WHCH(1:2).EQ.'sa') THEN
SIZA=MAX(0.,RVAL)
ELSE IF (WHCH(1:2).EQ.'SC'.OR.WHCH(1:2).EQ.'sc') THEN
ISHC=INT(RVAL)
ELSE IF (WHCH(1:2).EQ.'SF'.OR.WHCH(1:2).EQ.'sf') THEN
ISHF=MAX(0,MIN(1,INT(RVAL)))
ELSE IF (WHCH(1:2).EQ.'SL'.OR.WHCH(1:2).EQ.'sl') THEN
RSLW=MAX(0.,RVAL)
ELSE IF (WHCH(1:2).EQ.'SS'.OR.WHCH(1:2).EQ.'ss') THEN
SUBS=RVAL
ELSE IF (WHCH(1:2).EQ.'SX'.OR.WHCH(1:2).EQ.'sx') THEN
SHDX=RVAL
ELSE IF (WHCH(1:2).EQ.'SY'.OR.WHCH(1:2).EQ.'sy') THEN
SHDY=RVAL
ELSE IF (WHCH(1:2).EQ.'TE'.OR.WHCH(1:2).EQ.'te') THEN
ITEF=MAX(0,MIN(1,INT(RVAL)))
ELSE IF (WHCH(1:2).EQ.'UN'.OR.WHCH(1:2).EQ.'un') THEN
IBNU=MAX(1,INT(RVAL))
ELSE IF (WHCH(1:2).EQ.'ZX'.OR.WHCH(1:2).EQ.'zx') THEN
ZINX=MAX(0.,RVAL)
ELSE IF (WHCH(1:2).EQ.'ZY'.OR.WHCH(1:2).EQ.'zy') THEN
ZINY=MAX(0.,RVAL)
ELSE IF (WHCH(1:2).EQ.'ZZ'.OR.WHCH(1:2).EQ.'zz') THEN
ZINZ=MAX(0.,RVAL)
ELSE
CALL SETER ('PCSETR - UNRECOGNIZED PARAMETER NAME',5,1)
RETURN
END IF
C
C Done.
C
RETURN
C
END
|
{"hexsha": "9f7bb4f550f581a4f03bdce746f6208b0f48e91e", "size": 6447, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ncarg2d/src/libncarg/plotchar/pcsetr.f", "max_stars_repo_name": "tenomoto/ncl", "max_stars_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 210, "max_stars_repo_stars_event_min_datetime": "2016-11-24T09:05:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T19:15:32.000Z", "max_issues_repo_path": "ncarg2d/src/libncarg/plotchar/pcsetr.f", "max_issues_repo_name": "tenomoto/ncl", "max_issues_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 156, "max_issues_repo_issues_event_min_datetime": "2017-09-22T09:56:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T07:02:21.000Z", "max_forks_repo_path": "ncarg2d/src/libncarg/plotchar/pcsetr.f", "max_forks_repo_name": "tenomoto/ncl", "max_forks_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 58, "max_forks_repo_forks_event_min_datetime": "2016-12-14T00:15:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T09:13:00.000Z", "avg_line_length": 37.4825581395, "max_line_length": 71, "alphanum_fraction": 0.5207073057, "num_tokens": 2778}
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.2
# kernelspec:
# display_name: Julia 1.5.0
# language: julia
# name: julia-1.5
# ---
using LinearAlgebra
include("SOneTo.jl")
"""
abstract type StaticArrays{S,T,N} <: AbstractArray{T, N} end
StaticScalar{T} = StaticArray{Tuple{}, T, 0}
StaticVector{N,T} = StaticArray{Tuple{N}, T, 1}
StaticMatrix{N, T} = StaticArray{Tuple{N, M}, T, 2}
`StaticArray`s are Julia arrays with fixed, known size.
"""
abstract type StaticArray{S <: Tuple, T, N} <: AbstractArray{T, N} end
const StaticScalar{T} = StaticArray{Tuple{}, T, 0}
const StaticVector{N, T} = StaticArray{Tuple{N}, T, 1}
const StaticMatrix{N, M, T} = StaticArray{Tuple{N, M}, T, 2}
const StaticVecOrMat{T} = Union{StaticVector{<:Any, T}, StaticMatrix{<:Any, <:Any, T}}
# +
# Being a member of StaticMatrixLike, StaticVecOrMatLike, or StaticArrayLike implies that Size(A)
# returns a static Size instance (none of the dimensions are Dynamic). The converse may not be true.
# These are akin to aliases like StridedArray and in similarly bad taste, but the current approach
# in Base necessitates their existence.
const StaticMatrixLike{T} = Union{
StaticMatrix{<:Any, <:Any, T},
Transpose{T, <:StaticVecOrMat{T}},
Adjoint{T, <:StaticVecOrMat{T}},
Symmetric{T, <:StaticMatrix{<:Any, <:Any, T}},
Hermitian{T, <:StaticMatrix{<:Any, <:Any, T}},
Diagonal{T, <:StaticVector{<:Any, T}},
# We specifically list *Triangular here rather than using
# AbstractTriangular to avoid ambiguities in size() etc.
UpperTriangular{T, <:StaticMatrix{<:Any, <:Any, T}},
LowerTriangular{T, <:StaticMatrix{<:Any, <:Any, T}},
UnitUpperTriangular{T, <:StaticMatrix{<:Any, <:Any, T}},
UnitLowerTriangular{T, <:StaticMatrix{<:Any, <:Any, T}}
}
const StaticVecOrMatLike{T} = Union{StaticVector{<:Any, T}, StaticMatrixLike{T}}
const StaticArrayLike{T} = Union{StaticVecOrMatLike{T}, StaticArray{<:Tuple, T}}
const AbstractScalar{T} = AbstractArray{T, 0} # not exported, but useful none-the-less
const StaticArrayNoEltype{S, N, T} = StaticArray{S, T, N}
# +
# utils.jl
# -
# For convenience
TupleN{T,N} = NTuple{N,T}
@inline convert_ntuple(::Type{T}, d::T) where {T} = T # For zero-dimensional arrays
@inline convert_ntuple(::Type{T}, d::NTuple{N,T}) where {N,T} = d
@generated function convert_ntuple(::Type{T}, d::NTuple{N,Any}) where {N,T}
exprs = ntuple(i->:(convert(T, d[$i])), Val(N))
return quote
@_inline_meta
$(Expr(:tuple, exprs...))
end
end
# Base gives up on tuples for promote_eltype... (TODO can we improve Base?)
@generated function promote_tuple_eltype(::Union{T,Type{T}}) where T <: Tuple
t = Union{}
for i = 1:length(T.parameters)
tmp = T.parameters[i]
if tmp <: Vararg
tmp = tmp.parameters[1]
end
t = :(promote_type($t, $tmp))
end
return quote
@_inline_meta
$t
end
end
# The ::Tuple variants exist to make sure anything that calls with a tuple
# instead of a Tuple gets through to the constructor, so the user gets a nice error message
Base.@pure tuple_length(T::Type{<:Tuple}) = length(T.parameters)
Base.@pure tuple_length(T::Tuple) = length(T)
Base.@pure tuple_prod(T::Type{<:Tuple}) = length(T.parameters) == 0 ? 1 : *(T.parameters...)
Base.@pure tuple_prod(T::Tuple) = prod(T)
Base.@pure tuple_minimum(T::Type{<:Tuple}) = length(T.parameters) == 0 ? 0 : minimum(tuple(T.parameters...))
Base.@pure tuple_minimum(T::Tuple) = minimum(T)
"""
size_to_tuple(::Type{S}) where S<:Tuple
Convert a size given by `Tuple{N,M,...}` into a tuple `(N, M, ...)`
"""
Base.@pure function size_to_tuple(::Type{S}) where S<:Tuple
return tuple(S.parameters...)
end
# +
# Something doesn't match up type wise
function check_array_parameters(Size, T, N, L)
(!isa(Size, DataType) || (Size.name !== Tuple.name)) && throw(ArgumentError("Static Array parameter Size must be a Tuple type, got $Size"))
!isa(T, Type) && throw(ArgumentError("Static Array parameter T must be a type, got $T"))
!isa(N.parameters[1], Int) && throw(ArgumenError("Static Array parameter N must be an integer, got $(N.parameters[1])"))
!isa(L.parameters[1], Int) && throw(ArgumentError("Static Array parameter L must be an integer, got $(L.parameters[1])"))
# shouldn't reach here. Anything else should have made it to the function below
error("Internal error. Please file a bug")
end
@generated function check_array_parameters(::Type{Size}, ::Type{T}, ::Type{Val{N}}, ::Type{Val{L}}) where {Size,T,N,L}
if !all(x->isa(x, Int), Size.parameters)
return :(throw(ArgumentError("Static Array parameter Size must be a tuple of Ints (e.g. `SArray{Tuple{3,3}}` or `SMatrix{3,3}`).")))
end
if L != tuple_prod(Size) || L < 0 || tuple_minimum(Size) < 0 || tuple_length(Size) != N
return :(throw(ArgumentError("Size mismatch in Static Array parameters. Got size $Size, dimension $N and length $L.")))
end
return nothing
end
# +
"""
TrivialView
Use to drop static dimensions to override dispatch
"""
struct TrivialView{A, T, N} <: AbstractArray{T, N}
a::A
end
size(a::TrivialView) = size(a.a)
getindex(a::TrivialView) = getindex(a.a, inds...)
setindex!(a::TrivialView) = setindex!(a.a, inds...)
Base.IndexStyle(::Type{<:TrivialView{A}}) where {A} = IndexStyle{A}
# +
@inline drop_sdims(a::StaticArrayLike) = TrivialView(a)
@inline drop_sdims(a) = a
Base.@propagate_inbounds function invperm(p::StaticVector)
# in difference to base, this does not check if p is a permutation (every value unique)
ip = similar(p)
ip[p] = 1:length(p)
similar_type(p)(ip)
end
|
{"hexsha": "a8858d891a3dd032238b4e62957c436287832acd", "size": 5806, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/StaticArray.jl/util.jl", "max_stars_repo_name": "terasakisatoshi/Gomagorithm.jl", "max_stars_repo_head_hexsha": "2a120a8298e011ffd016c88d6626eecd4cd86510", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/StaticArray.jl/util.jl", "max_issues_repo_name": "terasakisatoshi/Gomagorithm.jl", "max_issues_repo_head_hexsha": "2a120a8298e011ffd016c88d6626eecd4cd86510", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/StaticArray.jl/util.jl", "max_forks_repo_name": "terasakisatoshi/Gomagorithm.jl", "max_forks_repo_head_hexsha": "2a120a8298e011ffd016c88d6626eecd4cd86510", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2179487179, "max_line_length": 143, "alphanum_fraction": 0.6655184292, "num_tokens": 1712}
|
from __future__ import division
from builtins import range
import numpy as np
from numpy import newaxis as na
from inspect import getargspec
from functools import wraps
import itertools
from nose.plugins.attrib import attr
from pyhsmm import models as m, distributions as d
##########
# util #
##########
def likelihood_check(obs_distns,trans_matrix,init_distn,data,target_val):
for cls in [m.HMMPython, m.HMM]:
hmm = cls(alpha=6.,init_state_concentration=1, # placeholders
obs_distns=obs_distns)
hmm.trans_distn.trans_matrix = trans_matrix
hmm.init_state_distn.weights = init_distn
hmm.add_data(data)
# test default log_likelihood method
assert np.isclose(target_val, hmm.log_likelihood())
# manual tests of the several message passing methods
states = hmm.states_list[-1]
states.clear_caches()
states.messages_forwards_normalized()
assert np.isclose(target_val,states._normalizer)
states.clear_caches()
states.messages_forwards_log()
assert np.isinf(target_val) or np.isclose(target_val,states._normalizer)
states.clear_caches()
states.messages_backwards_log()
assert np.isinf(target_val) or np.isclose(target_val,states._normalizer)
# test held-out vs in-model
assert np.isclose(target_val, hmm.log_likelihood(data))
def compute_likelihood_enumeration(obs_distns,trans_matrix,init_distn,data):
N = len(obs_distns)
T = len(data)
Al = np.log(trans_matrix)
aBl = np.hstack([o.log_likelihood(data)[:,na] for o in obs_distns])
tot = -np.inf
for stateseq in itertools.product(range(N),repeat=T):
loglike = 0.
loglike += np.log(init_distn[stateseq[0]])
for a,b in zip(stateseq[:-1],stateseq[1:]):
loglike += Al[a,b]
for t,a in enumerate(stateseq):
loglike += aBl[t,a]
tot = np.logaddexp(tot,loglike)
return tot
def random_model(nstates):
init_distn = np.random.dirichlet(np.ones(nstates))
trans_matrix = np.vstack([np.random.dirichlet(np.ones(nstates)) for i in range(nstates)])
return dict(init_distn=init_distn,trans_matrix=trans_matrix)
def runmultiple(n):
def dec(fn):
@wraps(fn)
def wrapper():
for i in range(n):
yield fn
return wrapper
return dec
###########
# tests #
###########
@attr('hmm','likelihood','messages','basic')
def like_hand_test_1():
likelihood_check(
obs_distns=[d.Categorical(weights=row) for row in np.eye(2)],
trans_matrix=np.eye(2),
init_distn=np.array([1.,0.]),
data=np.zeros(10,dtype=int),
target_val=0.)
@attr('hmm','likelihood','messages','basic','robust')
def like_hand_test_2():
likelihood_check(
obs_distns=[d.Categorical(weights=row) for row in np.eye(2)],
trans_matrix=np.eye(2),
init_distn=np.array([0.,1.]),
data=np.zeros(10,dtype=int),
target_val=-np.inf)
@attr('hmm','likelihood','messages','basic')
def like_hand_test_3():
likelihood_check(
obs_distns=[d.Categorical(weights=row) for row in np.eye(2)],
trans_matrix=np.array([[0.,1.],[1.,0.]]),
init_distn=np.array([1.,0.]),
data=np.tile([0,1],5).astype(int),
target_val=0.)
@attr('hmm','likelihood','messages','basic')
def like_hand_test_4():
likelihood_check(
obs_distns=[d.Categorical(weights=row) for row in np.eye(2)],
trans_matrix=np.array([[0.,1.],[1.,0.]]),
init_distn=np.array([1.,0.]),
data=np.tile([0,1],5).astype(int),
target_val=0.)
@attr('hmm','likelihood','messages','basic')
def like_hand_test_5():
likelihood_check(
obs_distns=[d.Categorical(weights=row) for row in np.eye(2)],
trans_matrix=np.array([[0.9,0.1],[0.2,0.8]]),
init_distn=np.array([1.,0.]),
data=np.tile((0,1),5),
target_val=5*np.log(0.1) + 4*np.log(0.2))
@attr('hmm','likelihood','messages')
@runmultiple(3)
def discrete_exhaustive_test():
model = random_model(2)
obs_distns = [d.Categorical(K=3,alpha_0=1.),d.Categorical(K=3,alpha_0=1.)]
stateseq = np.random.randint(2,size=10)
data = np.array([obs_distns[a].rvs() for a in stateseq])
target_val = compute_likelihood_enumeration(obs_distns=obs_distns,data=data,**model)
likelihood_check(target_val=target_val,data=data,obs_distns=obs_distns,**model)
@attr('hmm','likelihood','messages')
@runmultiple(3)
def gaussian_exhaustive_test():
model = random_model(3)
obs_distns = [
d.Gaussian(mu=np.random.randn(2),sigma=np.eye(2)),
d.Gaussian(mu=np.random.randn(2),sigma=np.eye(2)),
d.Gaussian(mu=np.random.randn(2),sigma=np.eye(2))]
stateseq = np.random.randint(3,size=10)
data = np.vstack([obs_distns[a].rvs() for a in stateseq])
target_val = compute_likelihood_enumeration(obs_distns=obs_distns,data=data,**model)
likelihood_check(target_val=target_val,data=data,obs_distns=obs_distns,**model)
|
{"hexsha": "58af94b5fd8da7161675cfa3db440247328a1eee", "size": 5078, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_hmm_likelihood.py", "max_stars_repo_name": "edfincham/pyhsmm", "max_stars_repo_head_hexsha": "e6cfde5acb98401c2e727ca59a49ee0bfe86cf9d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 436, "max_stars_repo_stars_event_min_datetime": "2015-01-13T01:08:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T01:55:19.000Z", "max_issues_repo_path": "tests/test_hmm_likelihood.py", "max_issues_repo_name": "edfincham/pyhsmm", "max_issues_repo_head_hexsha": "e6cfde5acb98401c2e727ca59a49ee0bfe86cf9d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 81, "max_issues_repo_issues_event_min_datetime": "2015-01-11T22:58:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-04T23:16:15.000Z", "max_forks_repo_path": "tests/test_hmm_likelihood.py", "max_forks_repo_name": "edfincham/pyhsmm", "max_forks_repo_head_hexsha": "e6cfde5acb98401c2e727ca59a49ee0bfe86cf9d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 181, "max_forks_repo_forks_event_min_datetime": "2015-01-13T01:09:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T03:45:41.000Z", "avg_line_length": 33.6291390728, "max_line_length": 93, "alphanum_fraction": 0.6522252855, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1337}
|
from __future__ import absolute_import, division, print_function
import math
import itertools
import operator
import pytest
from datetime import datetime, date
from cytoolz import pluck
import datashape
import blaze
from blaze.compute.python import (nunique, mean, rrowfunc, rowfunc,
reduce_by_funcs)
from blaze import dshape, discover
from blaze.compute.core import compute, compute_up
from blaze.expr import (Symbol, by, union, merge, join, count, Distinct,
Apply, sum, min, max, any, summary, Symbol,
count, std, head, Symbol)
import numpy as np
from blaze import cos, sin
from blaze.compatibility import builtins
from blaze.utils import raises
t = Symbol('t', 'var * {name: string, amount: int, id: int}')
data = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
tbig = Symbol('tbig', 'var * {name: string, sex: string[1], amount: int, id: int}')
databig = [['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]]
def test_dispatched_rowfunc():
cw = t['amount'] + 100
assert rowfunc(t)(t) == t
assert rowfunc(cw)(('Alice', 100, 1)) == 200
def test_reduce_by_funcs():
e = summary(number=t.id.max(), sum=t.amount.sum())
b = by(t, e)
assert reduce_by_funcs(b)[2]([1,2,3], [4,5,6]) == (1, 7)
def test_symbol():
assert compute(t, data) == data
def test_projection():
assert list(compute(t['name'], data)) == [x[0] for x in data]
def test_eq():
assert list(compute(t['amount'] == 100, data)) == [x[1] == 100 for x in data]
def test_selection():
assert list(compute(t[t['amount'] == 0], data)) == \
[x for x in data if x[1] == 0]
assert list(compute(t[t['amount'] > 150], data)) == \
[x for x in data if x[1] > 150]
def test_arithmetic():
assert list(compute(t['amount'] + t['id'], data)) == \
[b + c for a, b, c, in data]
assert list(compute(t['amount'] * t['id'], data)) == \
[b * c for a, b, c, in data]
assert list(compute(t['amount'] % t['id'], data)) == \
[b % c for a, b, c, in data]
def test_unary_ops():
for op in ('cos', 'sin', 'exp', 'ceil', 'floor', 'trunc', 'isnan'):
f = getattr(blaze, op)
pyf = getattr(math, op)
result = list(compute(f(t['amount']), data))
assert result == [pyf(x[1]) for x in data]
def test_neg():
assert list(compute(-t['amount'], data)) == [-x[1] for x in data]
def test_reductions():
assert compute(sum(t['amount']), data) == 100 + 200 + 50
assert compute(min(t['amount']), data) == 50
assert compute(max(t['amount']), data) == 200
assert compute(nunique(t['amount']), data) == 3
assert compute(nunique(t['name']), data) == 2
assert compute(count(t['amount']), data) == 3
assert compute(any(t['amount'] > 150), data) is True
assert compute(any(t['amount'] > 250), data) is False
def test_1d_reductions_keepdims():
for r in [sum, min, max, nunique, count]:
assert compute(r(t.amount, keepdims=True), data) == \
(compute(r(t.amount), data),)
def test_count():
t = Symbol('t', '3 * int')
assert compute(t.count(), [1, None, 2]) == 2
def reduction_runner(funcs):
from blaze.compatibility import builtins as bts
exprs = sum, min, max
for blaze_expr, py_func in itertools.product(exprs, funcs):
f = getattr(operator, py_func)
reduc_f = getattr(bts, blaze_expr.__name__)
ground_truth = f(reduc_f([100, 200, 50]), 5)
assert compute(f(blaze_expr(t['amount']), 5), data) == ground_truth
def test_reduction_arithmetic():
funcs = 'add', 'mul'
reduction_runner(funcs)
def test_reduction_compare():
funcs = 'eq', 'ne', 'lt', 'gt', 'le', 'ge'
reduction_runner(funcs)
def test_mean():
assert compute(mean(t['amount']), data) == float(100 + 200 + 50) / 3
assert 50 < compute(std(t['amount']), data) < 100
def test_std():
amt = [row[1] for row in data]
assert np.allclose(compute(t.amount.std(), data), np.std(amt))
assert np.allclose(compute(t.amount.std(unbiased=True), data),
np.std(amt, ddof=1))
assert np.allclose(compute(t.amount.var(), data), np.var(amt))
assert np.allclose(compute(t.amount.var(unbiased=True), data),
np.var(amt, ddof=1))
def test_by_no_grouper():
names = t['name']
assert set(compute(by(names, names.count()), data)) == \
set([('Alice', 2), ('Bob', 1)])
def test_by_one():
print(compute(by(t['name'], t['amount'].sum()), data))
assert set(compute(by(t['name'], t['amount'].sum()), data)) == \
set([('Alice', 150), ('Bob', 200)])
def test_by_compound_apply():
print(compute(by(t['name'], (t['amount'] + 1).sum()), data))
assert set(compute(by(t['name'], (t['amount'] + 1).sum()), data)) == \
set([('Alice', 152), ('Bob', 201)])
def test_by_two():
result = compute(by(tbig[['name', 'sex']], tbig['amount'].sum()),
databig)
expected = [('Alice', 'F', 200),
('Drew', 'F', 100),
('Drew', 'M', 300)]
print(set(result))
assert set(result) == set(expected)
def test_by_three():
result = compute(by(tbig[['name', 'sex']],
(tbig['id'] + tbig['amount']).sum()),
databig)
expected = [('Alice', 'F', 204),
('Drew', 'F', 104),
('Drew', 'M', 310)]
print(result)
assert set(result) == set(expected)
def test_works_on_generators():
assert list(compute(t['amount'], iter(data))) == \
[x[1] for x in data]
assert list(compute(t['amount'], (i for i in data))) == \
[x[1] for x in data]
def test_join():
left = [['Alice', 100], ['Bob', 200]]
right = [['Alice', 1], ['Bob', 2]]
L = Symbol('L', 'var * {name: string, amount: int}')
R = Symbol('R', 'var * {name: string, id: int}')
joined = join(L, R, 'name')
assert dshape(joined.schema) == \
dshape('{name: string, amount: int, id: int}')
result = list(compute(joined, {L: left, R: right}))
expected = [('Alice', 100, 1), ('Bob', 200, 2)]
assert result == expected
def test_outer_join():
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
L = Symbol('L', 'var * {id: int, name: string, amount: real}')
R = Symbol('R', 'var * {city: string, id: int}')
assert set(compute(join(L, R), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='left'), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='right'), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='outer'), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
L = Symbol('L', 'var * {x: int, y: int, z: int}')
R = Symbol('R', 'var * {x: int, y: int, w: int}')
j = join(L, R, ['x', 'y'])
print(list(compute(j, {L: left, R: right})))
assert list(compute(j, {L: left, R: right})) == [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
@pytest.mark.xfail(reason="This doesn't necessarily make sense")
def test_column_of_column():
assert list(compute(t['name']['name'], data)) == \
list(compute(t['name'], data))
def test_Distinct():
assert set(compute(Distinct(t['name']), data)) == set(['Alice', 'Bob'])
assert set(compute(Distinct(t), data)) == set(map(tuple, data))
e = Distinct(t)
assert compute(e, []) == ()
def test_Distinct_count():
t2 = t['name'].distinct()
gby = by(t2, t2.count())
result = set(compute(gby, data))
assert result == set([('Alice', 1), ('Bob', 1)])
def test_sort():
assert list(compute(t.sort('amount'), data)) == \
sorted(data, key=lambda x: x[1], reverse=False)
assert list(compute(t.sort('amount', ascending=True), data)) == \
sorted(data, key=lambda x: x[1], reverse=False)
assert list(compute(t.sort(['amount', 'id']), data)) == \
sorted(data, key=lambda x: (x[1], x[2]), reverse=False)
def test_fancy_sort():
assert list(compute(t.sort(t['amount']), data)) ==\
list(compute(t.sort('amount'), data))
assert list(compute(t.sort(t[['amount', 'id']]), data)) ==\
list(compute(t.sort(['amount', 'id']), data))
assert list(compute(t.sort(0-t['amount']), data)) ==\
list(compute(t.sort('amount'), data))[::-1]
def test_head():
assert list(compute(t.head(1), data)) == [data[0]]
e = head(t, 101)
p = list(range(1000))
assert len(list(compute(e, p))) == 101
def test_graph_double_join():
idx = [['A', 1],
['B', 2],
['C', 3],
['D', 4],
['E', 5],
['F', 6]]
arc = [[1, 3],
[2, 3],
[4, 3],
[5, 3],
[3, 1],
[2, 1],
[5, 1],
[1, 6],
[2, 6],
[4, 6]]
wanted = [['A'],
['F']]
t_idx = Symbol('t_idx', 'var * {name: string, b: int32}')
t_arc = Symbol('t_arc', 'var * {a: int32, b: int32}')
t_wanted = Symbol('t_wanted', 'var * {name: string}')
j = join(join(t_idx, t_arc, 'b'), t_wanted, 'name')[['name', 'b', 'a']]
result = compute(j, {t_idx: idx, t_arc: arc, t_wanted: wanted})
result = sorted(map(tuple, result))
expected = sorted([('A', 3, 1),
('A', 2, 1),
('A', 5, 1),
('F', 1, 6),
('F', 2, 6),
('F', 4, 6)])
assert result == expected
def test_label():
assert list(compute((t['amount'] * 1).label('foo'), data)) == \
list(compute((t['amount'] * 1), data))
def test_relabel_join():
names = Symbol('names', 'var * {first: string, last: string}')
siblings = join(names.relabel({'first': 'left'}),
names.relabel({'first': 'right'}),
'last')[['left', 'right']]
data = [('Alice', 'Smith'),
('Bob', 'Jones'),
('Charlie', 'Smith')]
print(set(compute(siblings, {names: data})))
assert ('Alice', 'Charlie') in set(compute(siblings, {names: data}))
assert ('Alice', 'Bob') not in set(compute(siblings, {names: data}))
def test_map_column():
inc = lambda x: x + 1
assert list(compute(t['amount'].map(inc, 'int'), data)) == [x[1] + 1 for x in data]
def test_map():
assert (list(compute(t.map(lambda _, amt, id: amt + id, 'int'), data)) ==
[x[1] + x[2] for x in data])
def test_apply_column():
result = compute(Apply(t['amount'], builtins.sum), data)
expected = compute(t['amount'].sum(), data)
assert result == expected
def test_apply():
data2 = tuple(map(tuple, data))
assert compute(Apply(t, hash), data2) == hash(data2)
def test_map_datetime():
from datetime import datetime
data = [['A', 0], ['B', 1]]
t = Symbol('t', 'var * {foo: string, datetime: int64}')
result = list(compute(t['datetime'].map(datetime.utcfromtimestamp,
'datetime'), data))
expected = [datetime(1970, 1, 1, 0, 0, 0), datetime(1970, 1, 1, 0, 0, 1)]
assert result == expected
def test_by_multi_column_grouper():
t = Symbol('t', 'var * {x: int, y: int, z: int}')
expr = by(t[['x', 'y']], t['z'].count())
data = [(1, 2, 0), (1, 2, 0), (1, 1, 0)]
print(set(compute(expr, data)))
assert set(compute(expr, data)) == set([(1, 2, 2), (1, 1, 1)])
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
assert list(compute(expr, data)) == [(row[0], row[1] * 2) for row in data]
def test_map_columnwise():
colwise = t['amount'] * t['id']
expr = colwise.map(lambda x: x / 10, 'int64', name='mod')
assert list(compute(expr, data)) == [((row[1]*row[2]) / 10) for row in data]
def test_map_columnwise_of_selection():
tsel = t[t['name'] == 'Alice']
colwise = tsel['amount'] * tsel['id']
expr = colwise.map(lambda x: x / 10, 'int64', name='mod')
assert list(compute(expr, data)) == [((row[1]*row[2]) / 10) for row in data[::2]]
def test_selection_out_of_order():
expr = t['name'][t['amount'] < 100]
assert list(compute(expr, data)) == ['Alice']
def test_recursive_rowfunc():
f = rrowfunc(t['name'], t)
assert [f(row) for row in data] == [row[0] for row in data]
f = rrowfunc(t['amount'] + t['id'], t)
assert [f(row) for row in data] == [row[1] + row[2] for row in data]
assert raises(Exception, lambda: rrowfunc(t[t['amount'] < 0]['name'], t))
def test_recursive_rowfunc_is_used():
expr = by(t['name'], (2 * (t['amount'] + t['id'])).sum())
expected = [('Alice', 2*(101 + 53)),
('Bob', 2*(202))]
assert set(compute(expr, data)) == set(expected)
class TestFunctionExpressions(object):
def test_compound(self):
s = t.amount.mean()
r = compute(s, data)
assert isinstance(r, float)
expr = cos(s) ** 2 + sin(s) ** 2
result = compute(expr, data)
expected = math.cos(r) ** 2 + math.sin(r) ** 2
assert result == expected
def test_user_defined_function(self):
s = t.amount.count()
r = compute(s, data)
assert isinstance(r, int)
def myfunc(x):
return (cos(x) + sin(x)) ** 2 / math.pi
result = compute(myfunc(s), data)
expected = (math.cos(r) + math.sin(r)) ** 2 / math.pi
assert result == expected
def test_user_defined_calls(self):
s = t.amount.count()
r = compute(s, data)
def myother(y):
return 2 + y ** 10
def myfunc(x):
return myother((cos(x) + sin(x)) ** 2 / math.pi)
result = compute(myfunc(s), data)
expected = myother((math.cos(r) + math.sin(r)) ** 2 / math.pi)
assert result == expected
def test_union():
L1 = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
L2 = [['Alice', 100, 4],
['Bob', 200, 5],
['Alice', 50, 6]]
L3 = [['Alice', 100, 7],
['Bob', 200, 8],
['Alice', 50, 9]]
t1 = Symbol('t1', 'var * {name: string, amount: int, id: int}')
t2 = Symbol('t2', 'var * {name: string, amount: int, id: int}')
t3 = Symbol('t3', 'var * {name: string, amount: int, id: int}')
expr = union(t1, t2, t3)
result = list(compute(expr, {t1: L1, t2: L2, t3: L3}))
assert result == L1 + L2 + L3
def test_by_groupby_deep():
data = [(1, 2, 'Alice'),
(1, 3, 'Bob'),
(2, 4, 'Alice'),
(2, 4, '')]
schema = '{x: int, y: int, name: string}'
t = Symbol('t', datashape.var * schema)
t2 = t[t['name'] != '']
t3 = merge(t2.x, t2.name)
expr = by(t3.name, t3.x.mean())
result = set(compute(expr, data))
assert result == set([('Alice', 1.5), ('Bob', 1.0)])
def test_by_then_sort_dict_items_sequence():
expr = by(tbig.name, tbig.amount.sum()).sort('name')
assert compute(expr, databig)
def test_summary():
expr = summary(count=t.id.count(), sum=t.amount.sum())
assert compute(expr, data) == (3, 350)
assert compute(expr, iter(data)) == (3, 350)
def test_summary_keepdims():
assert compute(summary(count=t.id.count(), sum=t.amount.sum(),
keepdims=True), data) == \
(compute(summary(count=t.id.count(), sum=t.amount.sum(),
keepdims=False), data),)
def test_summary_by():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum()))
assert set(compute(expr, data)) == set([('Alice', 2, 150),
('Bob', 1, 200)])
expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum()))
assert set(compute(expr, data)) == set([('Alice', 2, 152),
('Bob', 1, 201)])
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1))
assert set(compute(expr, data)) == set([('Alice', 2, 151),
('Bob', 1, 201)])
def test_reduction_arithmetic():
expr = t.amount.sum() + 1
assert compute(expr, data) == 351
def test_scalar_arithmetic():
x = Symbol('x', 'real')
y = Symbol('y', 'real')
assert compute(x + y, {x: 2, y: 3}) == 5
assert compute_up(x + y, 2, 3) == 5
assert compute_up(x * y, 2, 3) == 6
assert compute_up(x / y, 6, 3) == 2
assert compute_up(x % y, 4, 3) == 1
assert compute_up(x ** y, 4, 3) == 64
assert compute(x + 1, {x: 2}) == 3
assert compute(x * 2, {x: 2}) == 4
assert compute(1 + x, {x: 2}) == 3
assert compute(2 * x, {x: 2}) == 4
assert compute_up(-x, 1) == -1
assert compute_up(blaze.sin(x), 1) == math.sin(1)
def test_like():
t = Symbol('t', 'var * {name: string, city: string}')
data = [('Alice Smith', 'New York'),
('Bob Smith', 'Chicago'),
('Alice Walker', 'LA')]
assert list(compute(t.like(name='Alice*'), data)) == [data[0], data[2]]
assert list(compute(t.like(name='lice*'), data)) == []
assert list(compute(t.like(name='*Smith*'), data)) == [data[0], data[1]]
assert list(compute(t.like(name='*Smith*', city='New York'), data)) == [data[0]]
def test_datetime_comparison():
data = [['Alice', date(2000, 1, 1)],
['Bob', date(2000, 2, 2)],
['Alice', date(2000, 3, 3)]]
t = Symbol('t', 'var * {name: string, when: date}')
assert list(compute(t[t.when > '2000-01-01'], data)) == data[1:]
def test_datetime_access():
data = [['Alice', 100, 1, datetime(2000, 1, 1, 1, 1, 1)],
['Bob', 200, 2, datetime(2000, 1, 1, 1, 1, 1)],
['Alice', 50, 3, datetime(2000, 1, 1, 1, 1, 1)]]
t = Symbol('t',
'var * {amount: float64, id: int64, name: string, when: datetime}')
assert list(compute(t.when.year, data)) == [2000, 2000, 2000]
assert list(compute(t.when.second, data)) == [1, 1, 1]
assert list(compute(t.when.date, data)) == [date(2000, 1, 1)] * 3
payments = [{'name': 'Alice', 'payments': [
{'amount': 100, 'when': datetime(2000, 1, 1, 1, 1 ,1)},
{'amount': 200, 'when': datetime(2000, 2, 2, 2, 2, 2)}
]},
{'name': 'Bob', 'payments': [
{'amount': 300, 'when': datetime(2000, 3, 3, 3, 3 ,3)},
{'amount': -400, 'when': datetime(2000, 4, 4, 4, 4, 4)},
{'amount': 500, 'when': datetime(2000, 5, 5, 5, 5, 5)}
]},
]
payments_ordered = [('Alice', [( 100, datetime(2000, 1, 1, 1, 1 ,1)),
( 200, datetime(2000, 2, 2, 2, 2, 2))]),
('Bob', [( 300, datetime(2000, 3, 3, 3, 3 ,3)),
(-400, datetime(2000, 4, 4, 4, 4, 4)),
( 500, datetime(2000, 5, 5, 5, 5, 5))])]
payment_dshape = 'var * {name: string, payments: var * {amount: int32, when: datetime}}'
def test_nested():
t = Symbol('t', payment_dshape)
assert list(compute(t.name, payments_ordered)) == ['Alice', 'Bob']
assert list(compute(t.payments, payments_ordered)) == \
[p[1] for p in payments_ordered]
assert list(compute(t.payments.amount, payments_ordered)) == \
[(100, 200), (300, -400, 500)]
assert list(compute(t.payments.amount + 1, payments_ordered)) ==\
[(101, 201), (301, -399, 501)]
def test_scalar():
s = Symbol('s', '{name: string, id: int32, payments: var * {amount: int32, when: datetime}}')
data = ('Alice', 1, ((100, datetime(2000, 1, 1, 1, 1 ,1)),
(200, datetime(2000, 2, 2, 2, 2, 2)),
(300, datetime(2000, 3, 3, 3, 3, 3))))
assert compute(s.name, data) == 'Alice'
assert compute(s.id + 1, data) == 2
assert tuple(compute(s.payments.amount, data)) == (100, 200, 300)
assert tuple(compute(s.payments.amount + 1, data)) == (101, 201, 301)
def test_slice():
assert compute(t[0], data) == data[0]
assert list(compute(t[:2], data)) == list(data[:2])
assert list(compute(t.name[:2], data)) == [data[0][0], data[1][0]]
|
{"hexsha": "b7714909cad5ccdaf4556dd21b1f0b30b019b556", "size": 21384, "ext": "py", "lang": "Python", "max_stars_repo_path": "blaze/compute/tests/test_python_compute.py", "max_stars_repo_name": "vitan/blaze", "max_stars_repo_head_hexsha": "0cddb630ad1cf6be3967943337529adafa006ef5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-11-06T00:46:56.000Z", "max_stars_repo_stars_event_max_datetime": "2015-11-06T00:46:56.000Z", "max_issues_repo_path": "blaze/compute/tests/test_python_compute.py", "max_issues_repo_name": "vitan/blaze", "max_issues_repo_head_hexsha": "0cddb630ad1cf6be3967943337529adafa006ef5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "blaze/compute/tests/test_python_compute.py", "max_forks_repo_name": "vitan/blaze", "max_forks_repo_head_hexsha": "0cddb630ad1cf6be3967943337529adafa006ef5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.724137931, "max_line_length": 97, "alphanum_fraction": 0.5153385709, "include": true, "reason": "import numpy", "num_tokens": 6618}
|
using Metrics
using Test
using Random
Random.seed!(0)
@testset "Metrics.jl" begin
include("regression.jl")
include("classification.jl")
include("rank.jl")
include("nlp.jl")
include("cv.jl")
end
|
{"hexsha": "33d45f41b14b52876900033ee0ed0ae4577fd1d6", "size": 216, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "yuehhua/Metrics.jl", "max_stars_repo_head_hexsha": "6dc6fd6155afe551dd6424debdf7f034e68acb29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-06-02T14:09:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T00:08:26.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "yuehhua/Metrics.jl", "max_issues_repo_head_hexsha": "6dc6fd6155afe551dd6424debdf7f034e68acb29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-12-22T06:28:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T02:47:49.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "yuehhua/Metrics.jl", "max_forks_repo_head_hexsha": "6dc6fd6155afe551dd6424debdf7f034e68acb29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-13T11:32:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T06:04:19.000Z", "avg_line_length": 15.4285714286, "max_line_length": 32, "alphanum_fraction": 0.6712962963, "num_tokens": 62}
|
""" Code to train diagnosis classification models. """
from dermosxai import datasets
from dermosxai import transforms
from dermosxai import utils
from dermosxai import models
from dermosxai import train_abl
import numpy as np
import h5py
from os import path
# Set directory to save results
DDSM_dir = '/src/dermosxai/data/DDSM/classifiers'
HAM10000_dir = '/src/dermosxai/data/DDSM/classifiers'
def train_linear(X, y, wds=np.logspace(-4, 4, 17)):
""" Train linear models (sklearn.linear_model).
Binary classification uses a sigmoid activation function to produce final probabilities
for the positive class, while multi-class classification (>2)uses a softmax. Thus, for
binary classification the model willl only learn weights for one class.
Arguments:
X (np.array): Datasets (num_examples x num_features)
y, val_y (np array): Targets (num_examples) as categorical variables (0-n_classes).
wds (list of floats): Regularization strengths to try.
Returns:
weights (np.array): A num_regs x num_classes x num_features array with the weights
for each regularization weight.
biases (np.array): A num_regs x num_classes array with the biases.
"""
from sklearn import linear_model
from sklearn import exceptions
import warnings
weights = []
biases = []
for reg in wds:
model = linear_model.LogisticRegression(C=1/reg, max_iter=500)
with warnings.catch_warnings():
warnings.simplefilter("ignore", exceptions.ConvergenceWarning)
model.fit(X, y)
# Save weights and biases
weights.append(model.coef_)
biases.append(model.intercept_)
weights = np.stack(weights)
biases = np.stack(biases)
return weights, biases
def extract_features(feature_extractor, dset, avgpool=True, batch_size=256):
""" Pass images in a dataset through a feature extractor.
Arguments:
feature_extractor (nn.Module): Receives an image and maps it to a feature vector.
dsets (torch.Dataset): Dataset with (image, label) pairs.
avgpool (bool): Whether to average any extra dimensions outputted by the feature
extractor, usually the spatial x, y dimensions.
batch_size (int): Batch size to use when extracting features.
Returns
features (np.array): Returns the extracted features (num_examples x num_features).
"""
import torch
from torch.utils import data
# Create dloader
dloader = data.DataLoader(dset, batch_size=batch_size, num_workers=4)
# Extract features
features = []
with torch.no_grad():
for im, _ in dloader:
feats = feature_extractor(im)
if feats.ndim > 2 and avgpool:
feats = feats.mean(dim=tuple(range(2, feats.ndim)))
features.append(feats)
features = torch.concat(features).cpu().numpy()
return features
def compute_metrics_linear(weights, biases, X, y):
""" Compute metrics for a linear model.
If the weight vector has a single class assumes binary classification (using sigmoid
to predict probabilities for the positive class); in multi-class setting, softmax is
used.
Arguments:
weights (np.array): Coefficients of the linear model (num_classes x num_features).
biases (np.array): Bias (num_classes)
X (np.array): Dataset (num_examples x num_features)
y (np.array): Targets (num_examples)
Returns
metrics (np.array): Array with metrics (as returned by utils.compute_metrics)
"""
from scipy import special
linear_output = np.dot(X, weights.T) + biases # num_examples x num_classes
if linear_output.shape[-1] == 1: # binary case
pos_probs = special.expit(linear_output) # sigmoid
probs = np.concatenate([1-pos_probs, pos_probs], axis=-1)
else:
probs = special.softmax(linear_output, axis=-1)
# Compute metrics
metrics = np.array(utils.compute_metrics(probs, y))
return metrics
def train_linear_on_resnet(train_dset, val_dset, save_dir):
""" Train linear models on top of ResNet features.
Images are send throught the resnet and normalized. Trains models across 4 resnet
depths/blocksand a number of regularization strengths, evaluates them on training and
validation set and saves the results in an h5 file.
Arguments:
train_dset, val_dset (torch.Dataset): Datasets with images, labels. Will be send
to the ResNet to extract features.
save_dir (string): Path to the folder where the trained models (and training/val
metrics) will be saved.
"""
resnet_blocks = [1, 2, 3, 4]
reg_values = np.logspace(-4, 4, 17)
results = []
for resnet_block in resnet_blocks:
utils.tprint(f'Creating ResNet features for resnet_block {resnet_block}')
# Get ResNet
resnet = models.ResNetBase(num_blocks=resnet_block, pretrained=True)
resnet.eval()
#resnet.cuda()
# Get resnet features
train_features = extract_features(resnet, train_dset)
val_features = extract_features(resnet, val_dset)
# # Normalize features
train_mean, train_std = train_features.mean(0), train_features.std(0)
train_features = (train_features - train_mean) / train_std
val_features = (val_features - train_mean) / train_std
# Get labels
train_labels = train_dset.labels
val_labels = val_dset.labels
# Train models
utils.tprint('Training linear models...')
weights, biases = train_linear(train_features, train_labels, reg_values)
# Evaluate
utils.tprint('Evaluating...')
train_metrics = np.stack([
compute_metrics_linear(w, b, train_features, train_labels)
for w, b in zip(weights, biases)])
val_metrics = np.stack([
compute_metrics_linear(w, b, val_features, val_labels)
for w, b in zip(weights, biases)])
# Save results
results.append({
'weights': weights, 'biases': biases, 'train_metrics': train_metrics,
'val_metrics': val_metrics})
train_metrics = np.stack([r['train_metrics'] for r in results]) # num_resnets x num_regs x num_metrics
val_metrics = np.stack([r['val_metrics'] for r in results]) # num_resnets x num_regs x num_metrics
# Save
utils.tprint('Saving models...')
with h5py.File(path.join(save_dir, 'linear_on_resnet.h5'), 'w') as f:
f.create_dataset('resnet_blocks', data=resnet_blocks, dtype=np.int)
f.create_dataset('reg_values', data=reg_values)
f.create_dataset('train_metrics', data=train_metrics)
f.create_dataset('val_metrics', data=val_metrics)
for i, res in enumerate(results):
f.create_dataset(f'{i}/weights', data=res['weights'])
f.create_dataset(f'{i}/biases', data=res['biases'])
def train_linear_on_human(train_dset, val_dset, abl_model, save_dir):
""" Train a linear model on top of predicted human attributes.
AbL is used to obtain the probabilities per attributes; these are concatenated into a
single vector of probs. Trains models across a number of regularization strengths,
evaluates them on training and validation set and saves the results in an h5 file.
Arguments:
train_dset, val_dset (torch.Dataset): Datasets with images, labels. Will be send
to the ResNet to extract features.
abl_model (nn.Module): Attribute predictor model. Receives an image, predicts a
list with the pre-softmax logits per attribute.
save_dir (string): Path to the folder where the trained models (and training/val
metrics) will be saved.
"""
from torch import nn
utils.tprint('Predicting human attributes...')
abl_model = nn.Sequential(abl_model, models.SoftmaxPlusConcat())
abl_model.eval()
#abl_model.cuda()
# Get features
train_features = extract_features(abl_model, train_dset)
val_features = extract_features(abl_model, val_dset)
# Get labels
train_labels = train_dset.labels
val_labels = val_dset.labels
# Train models
utils.tprint('Training linear models...')
reg_values = np.logspace(-4, 4, 17)
weights, biases = train_linear(train_features, train_labels, reg_values)
# Evaluate
utils.tprint('Evaluating...')
train_metrics = np.stack([compute_metrics_linear(w, b, train_features, train_labels)
for w, b in zip(weights, biases)])
val_metrics = np.stack([compute_metrics_linear(w, b, val_features, val_labels)
for w, b in zip(weights, biases)])
# Save
utils.tprint('Saving models...')
with h5py.File(path.join(save_dir, 'linear_on_human.h5'), 'w') as f:
f.create_dataset('reg_values', data=reg_values)
f.create_dataset('train_metrics', data=train_metrics)
f.create_dataset('val_metrics', data=val_metrics)
f.create_dataset('weights', data=weights)
f.create_dataset('biases', data=biases)
def train_linear_on_joint(train_dset, val_dset, abl_model, save_dir):
""" Train a linear model on top of resnet_features AND predicted human attributes.
Concatenates resnet features (after normalization) with the predicted human attribute
probabilities. Trains models across different resnet depths/blocka and a number of
regularization strengths, evaluates them on training and validation set and saves the
results in an h5 file.
Arguments:
train_dset, val_dset (torch.Dataset): Datasets with images, labels. Will be send
to the ResNet to extract features.
abl_model (nn.Module): Attribute predictor model. Receives an image, predicts a
list with the pre-softmax logits per attribute.
save_dir (string): Path to the folder where the trained models (and training/val
metrics) will be saved.
"""
from torch import nn
utils.tprint('Predicting human attributes...')
abl_model = nn.Sequential(abl_model, models.SoftmaxPlusConcat())
abl_model.eval()
#abl_model.cuda()
# Get features
train_human_features = extract_features(abl_model, train_dset)
val_human_features = extract_features(abl_model, val_dset)
# Get resnet features and train models on the concatenation of features
resnet_blocks = [1, 2, 3, 4]
reg_values = np.logspace(-4, 4, 17)
results = []
for resnet_block in resnet_blocks:
utils.tprint(f'Creating ResNet features for resnet_block {resnet_block}')
# Get ResNet
resnet = models.ResNetBase(num_blocks=resnet_block, pretrained=True)
resnet.eval()
#resnet.cuda()
# Get resnet features
train_resnet_features = extract_features(resnet, train_dset)
val_resnet_features = extract_features(resnet, val_dset)
# # Normalize features
train_mean = train_resnet_features.mean(0)
train_std = train_resnet_features.std(0)
train_resnet_features = (train_resnet_features - train_mean) / train_std
val_resnet_features = (val_resnet_features - train_mean) / train_std
# Create joint features
train_features = np.concatenate([train_human_features, train_resnet_features], -1)
val_features = np.concatenate([val_human_features, val_resnet_features], -1)
# Get labels
train_labels = train_dset.labels
val_labels = val_dset.labels
# Train models
utils.tprint('Training linear models...')
weights, biases = train_linear(train_features, train_labels, reg_values)
# Evaluate
utils.tprint('Evaluating...')
train_metrics = np.stack([
compute_metrics_linear(w, b, train_features, train_labels)
for w, b in zip(weights, biases)])
val_metrics = np.stack([
compute_metrics_linear(w, b, val_features, val_labels)
for w, b in zip(weights, biases)])
# Save results
results.append({
'weights': weights, 'biases': biases, 'train_metrics': train_metrics,
'val_metrics': val_metrics})
train_metrics = np.stack([r['train_metrics']
for r in results]) # num_resnets x num_regs x num_metrics
val_metrics = np.stack([r['val_metrics']
for r in results]) # num_resnets x num_regs x num_metrics
# Save
utils.tprint('Saving models...')
with h5py.File(path.join(save_dir, 'linear_on_joint.h5'), 'w') as f:
f.create_dataset('resnet_blocks', data=resnet_blocks, dtype=np.int)
f.create_dataset('reg_values', data=reg_values)
f.create_dataset('train_metrics', data=train_metrics)
f.create_dataset('val_metrics', data=val_metrics)
for i, res in enumerate(results):
f.create_dataset(f'{i}/weights', data=res['weights'])
f.create_dataset(f'{i}/biases', data=res['biases'])
# Set directory to save results
DDSM_dir = '/src/dermosxai/data/DDSM/classifiers'
HAM10000_dir = '/src/dermosxai/data/HAM10000/classifiers'
def train_DDSM_linear_on_resnet():
""" Train linear models on the resnet features for the DDSM dataset."""
# Get dsets
utils.tprint('Getting datasets')
train_dset = datasets.DDSM('train')
val_dset = datasets.DDSM('val')
# Add transforms
_, val_transform = transforms.get_DDSM_transforms(train_dset.img_mean,
train_dset.img_std, make_rgb=True)
train_dset.transform = val_transform
val_dset.transform = val_transform
train_linear_on_resnet(train_dset, val_dset, DDSM_dir)
def train_HAM10000_linear_on_resnet():
""" Train linear models on the resnet features for the HAM10000 dataset."""
# Get dsets
utils.tprint('Getting datasets')
train_dset = datasets.HAM10000('train')
val_dset = datasets.HAM10000('val')
# Add transforms
_, val_transform = transforms.get_HAM10000_transforms(train_dset.img_mean,
train_dset.img_std)
train_dset.transform = val_transform
val_dset.transform = val_transform
train_linear_on_resnet(train_dset, val_dset, HAM10000_dir)
def train_DDSM_linear_on_human():
""" Train linear models on top of predicted human attributes. """
# Get dsets
utils.tprint('Getting datasets')
train_dset = datasets.DDSM('train')
val_dset = datasets.DDSM('val')
# Add transforms
_, val_transform = transforms.get_DDSM_transforms(train_dset.img_mean,
train_dset.img_std, make_rgb=True)
train_dset.transform = val_transform
val_dset.transform = val_transform
# Get attribute predictor
abl_model = train_abl.get_DDSM_AbL()
train_linear_on_human(train_dset, val_dset, abl_model, DDSM_dir)
def train_HAM10000_linear_on_human():
""" Train linear models on top of predicted human attributes. """
# Get dsets
utils.tprint('Getting datasets')
train_dset = datasets.HAM10000('train')
val_dset = datasets.HAM10000('val')
# Add transforms
_, val_transform = transforms.get_HAM10000_transforms(train_dset.img_mean,
train_dset.img_std)
train_dset.transform = val_transform
val_dset.transform = val_transform
# Get attribute predictor
abl_model = train_abl.get_HAM10000_AbL()
train_linear_on_human(train_dset, val_dset, abl_model, HAM10000_dir)
def train_DDSM_linear_on_joint():
""" Train linear models on top of resnet features + predicted human attributes. """
# Get dsets
utils.tprint('Getting datasets')
train_dset = datasets.DDSM('train')
val_dset = datasets.DDSM('val')
# Add transforms
_, val_transform = transforms.get_DDSM_transforms(train_dset.img_mean,
train_dset.img_std, make_rgb=True)
train_dset.transform = val_transform
val_dset.transform = val_transform
# Get attribute predictor
abl_model = train_abl.get_DDSM_AbL()
train_linear_on_joint(train_dset, val_dset, abl_model, DDSM_dir)
def train_HAM10000_linear_on_joint():
""" Train linear models on top of resnet fatures + predicted human attributes. """
# Get dsets
utils.tprint('Getting datasets')
train_dset = datasets.HAM10000('train')
val_dset = datasets.HAM10000('val')
# Add transforms
_, val_transform = transforms.get_HAM10000_transforms(train_dset.img_mean,
train_dset.img_std)
train_dset.transform = val_transform
val_dset.transform = val_transform
# Get attribute predictor
abl_model = train_abl.get_HAM10000_AbL()
train_linear_on_joint(train_dset, val_dset, abl_model, HAM10000_dir)
################ Evaluation code
def _find_best_resnet_model(h5_path):
with h5py.File(h5_path, 'r') as f:
val_mcc = f['val_metrics'][..., 2]
i, j = np.unravel_index(val_mcc.argmax(), val_mcc.shape)
best_weight = f[f'{i}/weights'][j]
best_bias = f[f'{i}/biases'][j]
best_resnet_block = f['resnet_blocks'][i]
best_reg_value = f['reg_values'][j]
return best_weight, best_bias, (best_resnet_block, best_reg_value)
def eval_DDSM_linear_on_resnet():
""" Test metrics for the best linear_on_resnet model."""
# Load test set
train_dset = datasets.DDSM('train') # need this for normalization
test_dset = datasets.DDSM('test')
# Add transforms
_, val_transform = transforms.get_DDSM_transforms(train_dset.img_mean,
train_dset.img_std, make_rgb=True)
train_dset.transform = val_transform
test_dset.transform = val_transform
# Find the best model
h5_path = path.join(DDSM_dir, 'linear_on_resnet.h5')
weight, bias, (resnet_block, _) = _find_best_resnet_model(h5_path)
# Get resnet
resnet = models.ResNetBase(num_blocks=resnet_block, pretrained=True)
resnet.eval()
# Get resnet features
train_features = extract_features(resnet, train_dset)
test_features = extract_features(resnet, test_dset)
# Normalize
train_mean, train_std = train_features.mean(0), train_features.std(0)
test_features = (test_features - train_mean) / train_std
# Evaluate
metrics = compute_metrics_linear(weight, bias, test_features, test_dset.labels)
print('Test metrics (resnet, DDSM):', metrics)
def eval_HAM10000_linear_on_resnet():
""" Test metrics for the best linear_on_resnet model."""
# Load test set
train_dset = datasets.HAM10000('train') # need this for normalization
test_dset = datasets.HAM10000('test')
# Add transforms
_, val_transform = transforms.get_HAM10000_transforms(train_dset.img_mean,
train_dset.img_std)
train_dset.transform = val_transform
test_dset.transform = val_transform
# Find the best model
h5_path = path.join(HAM10000_dir, 'linear_on_resnet.h5')
weight, bias, (resnet_block, _) = _find_best_resnet_model(h5_path)
# Get resnet
resnet = models.ResNetBase(num_blocks=resnet_block, pretrained=True)
resnet.eval()
# Get resnet features
train_features = extract_features(resnet, train_dset)
test_features = extract_features(resnet, test_dset)
# Normalize
train_mean, train_std = train_features.mean(0), train_features.std(0)
test_features = (test_features - train_mean) / train_std
# Evaluate
metrics = compute_metrics_linear(weight, bias, test_features, test_dset.labels)
print('Test metrics (resnet, HAM10000):', metrics)
def eval_DDSM_linear_on_human():
""" Test metrics for the best linear_on_human model."""
from torch import nn
# Load test set
train_dset = datasets.DDSM('train') # need this for normalization
test_dset = datasets.DDSM('test')
# Add transforms
_, val_transform = transforms.get_DDSM_transforms(train_dset.img_mean,
train_dset.img_std, make_rgb=True)
train_dset.transform = val_transform
test_dset.transform = val_transform
# Get attribute learner
abl_model = train_abl.get_DDSM_AbL()
abl_model = nn.Sequential(abl_model, models.SoftmaxPlusConcat())
abl_model.eval()
# Get features
test_features = extract_features(abl_model, test_dset)
# Find the best model
h5_path = path.join(DDSM_dir, 'linear_on_human.h5')
with h5py.File(h5_path, 'r') as f:
val_mcc = f['val_metrics'][..., 2]
weight = f['weights'][val_mcc.argmax()]
bias = f['biases'][val_mcc.argmax()]
# Evaluate
metrics = compute_metrics_linear(weight, bias, test_features, test_dset.labels)
print('Test metrics (human, DDSM):', metrics)
def eval_HAM10000_linear_on_human():
""" Test metrics for the best linear_on_human model."""
from torch import nn
# Load test set
train_dset = datasets.HAM10000('train') # need this for normalization
test_dset = datasets.HAM10000('test')
# Add transforms
_, val_transform = transforms.get_HAM10000_transforms(train_dset.img_mean,
train_dset.img_std)
train_dset.transform = val_transform
test_dset.transform = val_transform
# Get attribute learner
abl_model = train_abl.get_HAM10000_AbL()
abl_model = nn.Sequential(abl_model, models.SoftmaxPlusConcat())
abl_model.eval()
# Get features
test_features = extract_features(abl_model, test_dset)
# Find the best model
h5_path = path.join(HAM10000_dir, 'linear_on_human.h5')
with h5py.File(h5_path, 'r') as f:
val_mcc = f['val_metrics'][..., 2]
weight = f['weights'][val_mcc.argmax()]
bias = f['biases'][val_mcc.argmax()]
# Evaluate
metrics = compute_metrics_linear(weight, bias, test_features, test_dset.labels)
print('Test metrics (human, HAM10000):', metrics)
def eval_DDSM_linear_on_joint():
""" Test metrics for the best linear_on_joint model."""
from torch import nn
# Load test set
train_dset = datasets.DDSM('train') # need this for normalization
test_dset = datasets.DDSM('test')
# Add transforms
_, val_transform = transforms.get_DDSM_transforms(train_dset.img_mean,
train_dset.img_std, make_rgb=True)
train_dset.transform = val_transform
test_dset.transform = val_transform
# Get attribute learner
abl_model = train_abl.get_DDSM_AbL()
abl_model = nn.Sequential(abl_model, models.SoftmaxPlusConcat())
abl_model.eval()
# Get features
test_human_features = extract_features(abl_model, test_dset)
# Find the best model
h5_path = path.join(DDSM_dir, 'linear_on_joint.h5')
weight, bias, (resnet_block, _) = _find_best_resnet_model(h5_path)
# Get resnet
resnet = models.ResNetBase(num_blocks=resnet_block, pretrained=True)
resnet.eval()
# Get resnet features
train_resnet_features = extract_features(resnet, train_dset)
test_resnet_features = extract_features(resnet, test_dset)
# Normalize
train_mean = train_resnet_features.mean(0)
train_std = train_resnet_features.std(0)
test_resnet_features = (test_resnet_features - train_mean) / train_std
# Concatenate features
test_features = np.concatenate([test_human_features, test_resnet_features], -1)
# Evaluate
metrics = compute_metrics_linear(weight, bias, test_features, test_dset.labels)
print('Test metrics (joint, DDSM):', metrics)
def eval_HAM10000_linear_on_joint():
""" Test metrics for the best linear_on_joint model."""
from torch import nn
# Load test set
train_dset = datasets.HAM10000('train') # need this for normalization
test_dset = datasets.HAM10000('test')
# Add transforms
_, val_transform = transforms.get_HAM10000_transforms(train_dset.img_mean,
train_dset.img_std)
train_dset.transform = val_transform
test_dset.transform = val_transform
# Get attribute learner
abl_model = train_abl.get_HAM10000_AbL()
abl_model = nn.Sequential(abl_model, models.SoftmaxPlusConcat())
abl_model.eval()
# Get features
test_human_features = extract_features(abl_model, test_dset)
# Find the best model
h5_path = path.join(HAM10000_dir, 'linear_on_joint.h5')
weight, bias, (resnet_block, _) = _find_best_resnet_model(h5_path)
# Get resnet
resnet = models.ResNetBase(num_blocks=resnet_block, pretrained=True)
resnet.eval()
# Get resnet features
train_resnet_features = extract_features(resnet, train_dset)
test_resnet_features = extract_features(resnet, test_dset)
# Normalize
train_mean = train_resnet_features.mean(0)
train_std = train_resnet_features.std(0)
test_resnet_features = (test_resnet_features - train_mean) / train_std
# Concatenate features
test_features = np.concatenate([test_human_features, test_resnet_features], -1)
# Evaluate
metrics = compute_metrics_linear(weight, bias, test_features, test_dset.labels)
print('Test metrics (joint, HAM10000):', metrics)
if __name__ == '__main__':
""" For reference. Takes around 40 mins"""
# Train all models
train_DDSM_linear_on_resnet()
train_DDSM_linear_on_human()
train_DDSM_linear_on_joint()
train_HAM10000_linear_on_resnet()
train_HAM10000_linear_on_human()
train_HAM10000_linear_on_joint()
# Test all models (this jusrt prints stuff)
eval_DDSM_linear_on_resnet()
eval_DDSM_linear_on_human()
eval_DDSM_linear_on_joint()
eval_HAM10000_linear_on_resnet()
eval_HAM10000_linear_on_human()
eval_HAM10000_linear_on_joint()
"""
Test metrics (resnet, DDSM): [0.72727273 0.44052187 0.44359055 0.67206478 0.78299092 0.72841511]
Test metrics (human, DDSM): [0.74074074 0.45628967 0.45639209 0.6695279 0.80807453 0.73024843]
Test metrics (joint, DDSM): [0.68686869 0.34539637 0.34561048 0.60425532 0.73884376 0.66322116]
Test metrics (resnet, HAM10000): [0.87411598 0.67191378 0.67273709 0.7323828 0.94910305 0.77070733]
Test metrics (human, HAM10000): [0.78925035 0.3352286 0.3610032 0.4917989 0.8627192 0.55125827]
Test metrics (joint, HAM10000): [0.85148515 0.62600131 0.62618153 0.70036548 0.9366731 0.74395133]
"""
def compute_DDSM_joint_MI():
"""Computes mutual information between resnet features and predicted DDSM attributes."""
import torch
from torch import nn
# Load test set
train_dset = datasets.DDSM('train')
val_dset = datasets.DDSM('val')
test_dset = datasets.DDSM('test')
# Add transforms
_, val_transform = transforms.get_DDSM_transforms(train_dset.img_mean,
train_dset.img_std, make_rgb=True)
train_dset.transform = val_transform
val_dset.transform = val_transform
test_dset.transform = val_transform
# Get attribute learner
abl_model = train_abl.get_DDSM_AbL()
abl_model = nn.Sequential(abl_model, models.SoftmaxPlusConcat())
abl_model.eval()
# Get features
train_human_features = extract_features(abl_model, train_dset)
val_human_features = extract_features(abl_model, val_dset)
test_human_features = extract_features(abl_model, test_dset)
# Find the best resnet_layer
h5_path = path.join(DDSM_dir, 'linear_on_joint.h5')
_, _, (resnet_block, _) = _find_best_resnet_model(h5_path)
# Get resnet
resnet = models.ResNetBase(num_blocks=resnet_block, pretrained=True)
resnet.eval()
# Get resnet features
train_resnet_features = extract_features(resnet, train_dset)
val_resnet_features = extract_features(resnet, val_dset)
test_resnet_features = extract_features(resnet, test_dset)
# Normalize
train_mean = train_resnet_features.mean(0)
train_std = train_resnet_features.std(0)
train_resnet_features = (train_resnet_features - train_mean) / train_std
val_resnet_features = (val_resnet_features - train_mean) / train_std
test_resnet_features = (test_resnet_features - train_mean) / train_std
# Make them tensors
train_human_features = torch.tensor(train_human_features)
val_human_features = torch.tensor(val_human_features)
test_human_features = torch.tensor(test_human_features)
train_resnet_features = torch.tensor(train_resnet_features)
val_resnet_features = torch.tensor(val_resnet_features)
test_resnet_features = torch.tensor(test_resnet_features)
# Train MI estimator
from dermosxai import mi
mi_estimator = mi.train_mi(train_human_features, train_resnet_features,
val_human_features, val_resnet_features)[0]
# Compute MI
mi_estimator.eval()
with torch.no_grad():
dv, _, jsd, infonce = mi_estimator(test_human_features, test_resnet_features)
print('Test MI (DDSM): ', dv.item(), jsd.item(), infonce.item())
def compute_HAM10000_joint_MI():
"""Computes mutual information between resnet features and predicted DDSM attributes."""
import torch
from torch import nn
# Load test set
train_dset = datasets.HAM10000('train')
val_dset = datasets.HAM10000('val')
test_dset = datasets.HAM10000('test')
# Add transforms
_, val_transform = transforms.get_HAM10000_transforms(train_dset.img_mean,
train_dset.img_std)
train_dset.transform = val_transform
val_dset.transform = val_transform
test_dset.transform = val_transform
# Get attribute learner
abl_model = train_abl.get_HAM10000_AbL()
abl_model = nn.Sequential(abl_model, models.SoftmaxPlusConcat())
abl_model.eval()
# Get features
train_human_features = extract_features(abl_model, train_dset)
val_human_features = extract_features(abl_model, val_dset)
test_human_features = extract_features(abl_model, test_dset)
# Find the best resnet_layer
h5_path = path.join(HAM10000_dir, 'linear_on_joint.h5')
_, _, (resnet_block, _) = _find_best_resnet_model(h5_path)
# Get resnet
resnet = models.ResNetBase(num_blocks=resnet_block, pretrained=True)
resnet.eval()
# Get resnet features
train_resnet_features = extract_features(resnet, train_dset)
val_resnet_features = extract_features(resnet, val_dset)
test_resnet_features = extract_features(resnet, test_dset)
# Normalize
train_mean = train_resnet_features.mean(0)
train_std = train_resnet_features.std(0)
train_resnet_features = (train_resnet_features - train_mean) / train_std
val_resnet_features = (val_resnet_features - train_mean) / train_std
test_resnet_features = (test_resnet_features - train_mean) / train_std
# Make them tensors
train_human_features = torch.tensor(train_human_features)
val_human_features = torch.tensor(val_human_features)
test_human_features = torch.tensor(test_human_features)
train_resnet_features = torch.tensor(train_resnet_features)
val_resnet_features = torch.tensor(val_resnet_features)
test_resnet_features = torch.tensor(test_resnet_features)
# Train MI estimator
from dermosxai import mi
mi_estimator = mi.train_mi(train_human_features, train_resnet_features,
val_human_features, val_resnet_features)[0]
# Compute MI
mi_estimator.eval()
with torch.no_grad():
dv, _, jsd, infonce = mi_estimator(test_human_features, test_resnet_features)
print('Test MI (HAM10000): ', dv.item(), jsd.item(), infonce.item())
"""
Test MI (DDSM): 0.31409886479377747 -1.252334713935852 -5.379660129547119
Test MI (HAM10000): 3.1195478439331055 -0.5205932855606079 -3.4473540782928467
"""
|
{"hexsha": "7d62ccccc5bf76ca0836983d8108154ba1ba010c", "size": 32118, "ext": "py", "lang": "Python", "max_stars_repo_path": "dermosxai/train_classifier.py", "max_stars_repo_name": "ecobost/dermosxai", "max_stars_repo_head_hexsha": "c851edaf6edaae063c3fd52749994a0225d4a7d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dermosxai/train_classifier.py", "max_issues_repo_name": "ecobost/dermosxai", "max_issues_repo_head_hexsha": "c851edaf6edaae063c3fd52749994a0225d4a7d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dermosxai/train_classifier.py", "max_forks_repo_name": "ecobost/dermosxai", "max_forks_repo_head_hexsha": "c851edaf6edaae063c3fd52749994a0225d4a7d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-10T14:17:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-10T14:17:50.000Z", "avg_line_length": 37.5649122807, "max_line_length": 107, "alphanum_fraction": 0.6908275733, "include": true, "reason": "import numpy,from scipy", "num_tokens": 7832}
|
#impoting dataset
dataset = read.csv('Experience.csv')
# dataset = dataset[, 2:3]
#Splitting the Dataset into Training set and Test set
#install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Salary, SplitRatio = 2/3)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# #Feature Scaling
# training_set[,2:3] = scale(training_set[,2:3])
# test_set[,2:3] = scale(test_set[,2:3])
# Fittring SLR to the Training set
regressor = lm(formula = Salary ~ YearsExperience, data = training_set)
# Predicting the Test set result
y_pred = predict(regressor, newdata = test_set)
#Visualizing the Training set results
#install.packages('ggplot2')
# library(ggplot2)
ggplot() +
geom_point(aes(x = training_set$YearsExperience, y = training_set$Salary),
colour = 'red') +
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Salary vs Experience (Training set)') +
xlab('Years of Experience') +
ylab('Salary')
# Visualizing the test set
ggplot() +
geom_point(aes(x = test_set$YearsExperience, y = test_set$Salary),
colour = 'red') +
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Salary vs Experience (Test set)') +
xlab('Years of Experience') +
ylab('Salary')
|
{"hexsha": "e694350b53e378a77c60f2aa7ace31af8381312c", "size": 1492, "ext": "r", "lang": "R", "max_stars_repo_path": "R/Regression/Simple_Linear_Regression/Simple_Linear_Regression.r", "max_stars_repo_name": "luther001/ML", "max_stars_repo_head_hexsha": "d83696e9f9bdc3d2f58de1754f69dab658e029be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R/Regression/Simple_Linear_Regression/Simple_Linear_Regression.r", "max_issues_repo_name": "luther001/ML", "max_issues_repo_head_hexsha": "d83696e9f9bdc3d2f58de1754f69dab658e029be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/Regression/Simple_Linear_Regression/Simple_Linear_Regression.r", "max_forks_repo_name": "luther001/ML", "max_forks_repo_head_hexsha": "d83696e9f9bdc3d2f58de1754f69dab658e029be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6976744186, "max_line_length": 99, "alphanum_fraction": 0.672922252, "num_tokens": 401}
|
/*
* Copyright (C) 2014 Martin Preisler <martin@preisler.me>
*
* This file is part of oclcrypto.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <oclcrypto/BLOWFISH_ECB.h>
#include <oclcrypto/System.h>
#include <oclcrypto/DataBuffer.h>
#include <boost/test/unit_test.hpp>
struct BLOWFISH_ECB_Fixture
{
BLOWFISH_ECB_Fixture():
system(true)
{}
oclcrypto::System system;
};
BOOST_FIXTURE_TEST_SUITE(BLOWFISH_ECB, BLOWFISH_ECB_Fixture)
/*
BOOST_AUTO_TEST_CASE(EncryptInvalid)
{
BOOST_REQUIRE_GT(system.getDeviceCount(), 0);
for (size_t i = 0; i < system.getDeviceCount(); ++i)
{
oclcrypto::Device& device = system.getDevice(i);
oclcrypto::AES_ECB_Encrypt encrypt(system, device);
BOOST_CHECK_THROW(encrypt.setKey("abcd", 4), std::invalid_argument);
BOOST_CHECK_THROW(encrypt.setKey(static_cast<const unsigned char*>(nullptr), 16), std::invalid_argument);
}
}*/
BOOST_AUTO_TEST_CASE(Encrypt)
{
BOOST_REQUIRE_GT(system.getDeviceCount(), 0);
{
const unsigned char plaintext[] =
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
const unsigned char key[] =
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
const unsigned char expected_ciphertext[] =
{
0x4e, 0xf9, 0x97, 0x45, 0x61, 0x98, 0xdd, 0x78
};
for (size_t i = 0; i < system.getDeviceCount(); ++i)
{
oclcrypto::Device& device = system.getDevice(i);
oclcrypto::BLOWFISH_ECB_Encrypt encrypt(system, device);
encrypt.setKey(key, 8);
encrypt.setPlainText(plaintext, 8);
encrypt.execute(1);
{
auto data = encrypt.getCipherText()->lockRead<unsigned char>();
for (size_t j = 0; j < data.size(); ++j)
BOOST_CHECK_EQUAL(data[j], expected_ciphertext[j]);
}
}
}
{
const unsigned char plaintext[] =
{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
const unsigned char key[] =
{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
const unsigned char expected_ciphertext[] =
{
0x51, 0x86, 0x6f, 0xd5, 0xb8, 0x5e, 0xcb, 0x8a
};
for (size_t i = 0; i < system.getDeviceCount(); ++i)
{
oclcrypto::Device& device = system.getDevice(i);
oclcrypto::BLOWFISH_ECB_Encrypt encrypt(system, device);
encrypt.setKey(key, 8);
encrypt.setPlainText(plaintext, 8);
encrypt.execute(1);
{
auto data = encrypt.getCipherText()->lockRead<unsigned char>();
for (size_t j = 0; j < data.size(); ++j)
BOOST_CHECK_EQUAL(data[j], expected_ciphertext[j]);
}
}
}
{
const unsigned char plaintext[] =
{
0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF
};
const unsigned char key[] =
{
0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
};
const unsigned char expected_ciphertext[] =
{
0x7d, 0x0c, 0xc6, 0x30, 0xaf, 0xda, 0x1e, 0xc7 // WRONG
};
for (size_t i = 0; i < system.getDeviceCount(); ++i)
{
oclcrypto::Device& device = system.getDevice(i);
oclcrypto::BLOWFISH_ECB_Encrypt encrypt(system, device);
encrypt.setKey(key, 8);
encrypt.setPlainText(plaintext, 8);
encrypt.execute(1);
{
auto data = encrypt.getCipherText()->lockRead<unsigned char>();
for (size_t j = 0; j < data.size(); ++j)
BOOST_CHECK_EQUAL(data[j], expected_ciphertext[j]);
}
}
}
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "ff092b026fa67ae52b00462d54255a25bea3d721", "size": 4941, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/BLOWFISH_ECB.cpp", "max_stars_repo_name": "wdv4758h/oclcrypto", "max_stars_repo_head_hexsha": "fe66b6f0aa523bd5ea5f24039c1e70def3460ad8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2017-03-15T03:07:04.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-08T10:05:04.000Z", "max_issues_repo_path": "tests/BLOWFISH_ECB.cpp", "max_issues_repo_name": "wdv4758h/oclcrypto", "max_issues_repo_head_hexsha": "fe66b6f0aa523bd5ea5f24039c1e70def3460ad8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/BLOWFISH_ECB.cpp", "max_forks_repo_name": "wdv4758h/oclcrypto", "max_forks_repo_head_hexsha": "fe66b6f0aa523bd5ea5f24039c1e70def3460ad8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5868263473, "max_line_length": 113, "alphanum_fraction": 0.5948188626, "num_tokens": 1350}
|
! @@name: threadprivate.5f
! @@type: F-fixed
! @@compilable: yes
! @@linkable: yes
! @@expect: success
PROGRAM INC_GOOD2
INTEGER, ALLOCATABLE, SAVE :: A(:)
INTEGER, POINTER, SAVE :: PTR
INTEGER, SAVE :: I
INTEGER, TARGET :: TARG
LOGICAL :: FIRSTIN = .TRUE.
!$OMP THREADPRIVATE(A, I, PTR)
ALLOCATE (A(3))
A = (/1,2,3/)
PTR => TARG
I = 5
!$OMP PARALLEL COPYIN(I, PTR)
!$OMP CRITICAL
IF (FIRSTIN) THEN
TARG = 4 ! Update target of ptr
I = I + 10
IF (ALLOCATED(A)) A = A + 10
FIRSTIN = .FALSE.
END IF
IF (ALLOCATED(A)) THEN
PRINT *, 'a = ', A
ELSE
PRINT *, 'A is not allocated'
END IF
PRINT *, 'ptr = ', PTR
PRINT *, 'i = ', I
PRINT *
!$OMP END CRITICAL
!$OMP END PARALLEL
END PROGRAM INC_GOOD2
|
{"hexsha": "d0086c3a05e732b736679b5ee01e53b5311afcff", "size": 983, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "test/openmp_examples/sources/Example_threadprivate.5f.f", "max_stars_repo_name": "kammerdienerb/flang", "max_stars_repo_head_hexsha": "8cc4a02b94713750f09fe6b756d33daced0b4a74", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 716, "max_stars_repo_stars_event_min_datetime": "2017-05-17T17:58:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T11:20:58.000Z", "max_issues_repo_path": "test/openmp_examples/sources/Example_threadprivate.5f.f", "max_issues_repo_name": "kammerdienerb/flang", "max_issues_repo_head_hexsha": "8cc4a02b94713750f09fe6b756d33daced0b4a74", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 794, "max_issues_repo_issues_event_min_datetime": "2017-05-18T19:27:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:22:11.000Z", "max_forks_repo_path": "test/openmp_examples/sources/Example_threadprivate.5f.f", "max_forks_repo_name": "kammerdienerb/flang", "max_forks_repo_head_hexsha": "8cc4a02b94713750f09fe6b756d33daced0b4a74", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 157, "max_forks_repo_forks_event_min_datetime": "2017-05-17T18:50:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:06:45.000Z", "avg_line_length": 23.9756097561, "max_line_length": 55, "alphanum_fraction": 0.4506612411, "num_tokens": 287}
|
/*==============================================================================
Copyright (c) 2009 Peter Dimov
Copyright (c) 2005-2010 Joel de Guzman
Copyright (c) 2010 Thomas Heller
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#include <boost/phoenix/core.hpp>
#include <boost/phoenix/bind.hpp>
#include <boost/ref.hpp>
#include <boost/detail/lightweight_test.hpp>
struct X
{
int f( int x )
{
return x;
}
int g( int x ) const
{
return -x;
}
};
int main()
{
using boost::phoenix::bind;
using boost::phoenix::placeholders::_1;
X x;
BOOST_TEST( bind( &X::f, _1, 1 )( boost::ref( x ) ) == 1 );
BOOST_TEST( bind( &X::g, _1, 2 )( boost::cref( x ) ) == -2 );
return boost::report_errors();
}
|
{"hexsha": "5843b8ee98077b34794e6b26bbf847eefa16c38b", "size": 999, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/phoenix/test/boost_bind_compatibility/bind_ref_test.cpp", "max_stars_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_stars_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 198.0, "max_stars_repo_stars_event_min_datetime": "2015-01-13T05:47:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T04:46:46.000Z", "max_issues_repo_path": "libs/boost/libs/phoenix/test/boost_bind_compatibility/bind_ref_test.cpp", "max_issues_repo_name": "flingone/frameworks_base_cmds_remoted", "max_issues_repo_head_hexsha": "4509d9f0468137ed7fd8d100179160d167e7d943", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9.0, "max_issues_repo_issues_event_min_datetime": "2015-01-28T16:33:19.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-12T23:03:28.000Z", "max_forks_repo_path": "libs/boost/libs/phoenix/test/boost_bind_compatibility/bind_ref_test.cpp", "max_forks_repo_name": "flingone/frameworks_base_cmds_remoted", "max_forks_repo_head_hexsha": "4509d9f0468137ed7fd8d100179160d167e7d943", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 139.0, "max_forks_repo_forks_event_min_datetime": "2015-01-15T20:09:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T15:21:16.000Z", "avg_line_length": 25.6153846154, "max_line_length": 81, "alphanum_fraction": 0.4914914915, "num_tokens": 248}
|
"""obstacle_avoid_test controller."""
from robot_manager import RobotManager
from args_manager import ArgsManager
import numpy as np
np.random.seed(13482737)
# np.random.seed(13482736)
argsManager = ArgsManager()
robotManager = RobotManager(argsManager.process_args())
robotManager.execute()
|
{"hexsha": "3b0cc784687590c4010b5abd5d97bc5f03ba9277", "size": 296, "ext": "py", "lang": "Python", "max_stars_repo_path": "webots-project/controllers/pos-prediction/pos-prediction.py", "max_stars_repo_name": "joangerard/webots-thesis", "max_stars_repo_head_hexsha": "c18c53b281af6c68431b9b3abde07d1934c37dd9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-04-24T01:22:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-05T12:50:41.000Z", "max_issues_repo_path": "webots-project/controllers/pos-prediction/pos-prediction.py", "max_issues_repo_name": "joangerard/webots-thesis", "max_issues_repo_head_hexsha": "c18c53b281af6c68431b9b3abde07d1934c37dd9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-03-25T18:38:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:25:06.000Z", "max_forks_repo_path": "webots-project/controllers/pos-prediction/pos-prediction.py", "max_forks_repo_name": "joangerard/webots-thesis", "max_forks_repo_head_hexsha": "c18c53b281af6c68431b9b3abde07d1934c37dd9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-04-28T05:07:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-23T01:02:39.000Z", "avg_line_length": 21.1428571429, "max_line_length": 55, "alphanum_fraction": 0.8108108108, "include": true, "reason": "import numpy", "num_tokens": 64}
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import pandas_profiling
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from joblib import Parallel,delayed
import numpy as np
import json
import re
import time
from sklearn.utils import shuffle
# tqdm.pandas()
def time_transform(t):
# 先转换为时间数组
timeArray = time.strptime(t, "%Y-%m-%d %H:%M:%S")
# 转换为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def Z_score(mean_, std_,x):
return (x-mean_)/std_
def max_mean_std(data):
return np.max(data), np.mean(data), np.std(data)
def calculate_acc(predictions, truth):
hit = 0
for i in range(len(predictions)):
if predictions[i] == truth[i]:
hit = hit +1
return hit/len(predictions)
train_set = pd.read_csv('train_set.csv',converters={'label_list': eval})
test_set = pd.read_csv('test_set.csv',converters={'label_list': eval})
course_info = pd.read_json('course_info.json',lines=True)
video_info = pd.read_json('video_info.json',lines=True)
videoID = video_info['id'].values.tolist()
courseID = course_info['course_id'].values.tolist()
videoID_encoder = LabelEncoder()
courseID_encoder = LabelEncoder()
videoID_encoder.fit(videoID)
courseID_encoder.fit(courseID)
course_info['courseID'] = course_info['course_id'].progress_apply(lambda x : courseID_encoder.transform([x]))
course_info['videoIDs'] = course_info['item'].progress_apply(lambda x : videoID_encoder.transform(x))
video_info['videoID'] = video_info['id'].progress_apply(lambda x : videoID_encoder.transform([x]))
course_video_num = {}
def count_videos(courseId, videoIds):
number_of_video = len(videoIds)
course_video_num[courseId[0]] = number_of_video
course_info.progress_apply(lambda row: count_videos(row['courseID'],row['videoIDs']), axis=1)
video_lists = {}
def init_video_list(videoID):
video_lists[videoID[0]] = []
# video_info.progress_apply(lambda row: init_video_list(row['courseID'],row['videoIDs']), axis=1)
video_info['videoID'].progress_apply(lambda x : init_video_list(x))
course_frequence= {}
frequence_list = []
course_ids = course_info['courseID'].values.tolist()
course_ids =[ item for elem in course_ids for item in elem] #706 courses
for i in range(len(course_ids)):
course_frequence[course_ids[i]]=0
# course_frequence[470]
def course_frequence_calculate(courseListIDs):
courseListIDs=re.sub('\s+', ' ',courseListIDs[1:-1].strip(' '))
courses_ = [int(i) for i in courseListIDs.split(' ')]
for i in range(len(courses_)):
num = course_frequence[courses_[i]] +1
course_frequence[courses_[i]] = num
train_set['courseListIDs'].progress_apply(lambda x : course_frequence_calculate(x))
for k,v in course_frequence.items():
frequence_list.append(v)
mean_course_frequence, max_course_frequence, std_course_frequence = max_mean_std(frequence_list)
video_start_times_info = []
video_end_times_info = []
local_start_times_info = []
local_end_times_info = []
video_durations_info = []
local_watching_times_info = []
video_progress_times_info = []
watching_counts_info = []
local_interval_info = []
#2020-10-24 new feature
video_watching_speed_info = []
per_time_watching_percentage_info = [] #平均每次观看视频完成百分比
video_finish_percentage_info = [] #完成观看视频百分比
num_of_cos_info = []
num_of_vdo_info = []
local_max_watching_counts_info = []
local_mean_watching_counts_info = []
local_std_watching_counts_info = []
local_max_video_durations_info = []
local_mean_video_durations_info = []
local_std_video_durations_info = []
local_max_local_watching_times_info = []
local_mean_local_watching_times_info = []
local_std_local_watching_times_info = []
local_max_video_progress_times_info = []
local_mean_video_progress_times_info = []
local_std_video_progress_times_info = []
local_max_video_watching_speed_info = []
local_mean_video_watching_speed_info = []
local_std_video_watching_speed_info = []
local_max_video_start_times_info = []
local_mean_video_start_times_info = []
local_std_video_start_times_info = []
local_max_video_end_times_info = []
local_mean_video_end_times_info = []
local_std_video_end_times_info = []
local_max_z_score_interval_info = []
local_mean_z_score_interval_info = []
local_std_z_score_interval_info = []
local_total_video_num_info = []
local_percentage_viewed_info = []
local_max_z_score_local_start_time_info = []
local_mean_z_score_local_start_time_info = []
local_std_z_score_local_start_time_info = []
local_max_z_score_local_end_time_info = []
local_mean_z_score_local_end_time_info = []
local_std_z_score_local_end_time_info = []
local_course_frequence_z_score_info = []
local_max_time_per_watching_info = []
local_mean_time_per_watching_info = []
local_std_time_per_watching_info = []
local_total_watching_counts_info = []
local_total_video_durations_info = []
local_total_local_watching_times_info = []
local_total_video_progress_times_info = []
video_duration ={}
def collect_info(course_ids,video_ids,watching_counts,video_durations,local_watching_times,video_progress_times,
video_start_times, video_end_times, local_start_times,local_end_times,courseListIDs):
course_ids = eval(course_ids)
video_ids = eval(video_ids)
watching_counts = eval(watching_counts)
video_durations = eval(video_durations)
local_watching_times = eval(local_watching_times)
video_progress_times = eval(video_progress_times)
video_start_times = eval(video_start_times)
video_end_times = eval(video_end_times)
local_start_times = eval(local_start_times)
local_end_times = eval(local_end_times)
courseListIDs=re.sub('\s+', ' ',courseListIDs[1:-1].strip(' '))
courses_ = [int(i) for i in courseListIDs.split(' ')]
courses = {}
for i in range(len(courses_)):
courses[courses_[i]] = []
for i in range(len(course_ids)):
course = course_ids[i]
local_start_times_ = time_transform(local_start_times[i])
local_end_times_ = time_transform(local_end_times[i])
interval = local_end_times_ - local_start_times_
video_start_times_info.append(video_start_times[i])
video_end_times_info.append(video_end_times[i])
local_start_times_info.append(local_start_times_)
local_end_times_info.append(local_end_times_)
video_durations_info.append(video_durations[i])
local_watching_times_info.append(local_watching_times[i])
video_progress_times_info.append(video_progress_times[i])
watching_counts_info.append(watching_counts[i])
local_interval_info.append(interval)
#2020-10-24 new feature
video_watching_speed_info.append(local_watching_times[i]/video_progress_times[i])
per_time_watching_percentage_info.append((local_watching_times[i]/watching_counts[i])/video_durations[i])
video_finish_percentage_info.append(local_watching_times[i]/video_durations[i])
#2020-10-26 new feature
video_lists[video_ids[i]].append(local_watching_times[i])
info_vec = [course_ids[i],watching_counts[i],video_durations[i],local_watching_times[i],video_progress_times[i],
video_start_times[i], video_end_times[i]]
courses[course].append(info_vec)
for i in range(len(courses_)):
info_vecs = courses[courses_[i]]
total_video_num = course_video_num[courses_[i]]
num_of_vdo = len(info_vecs)
percentage_viewed = num_of_vdo/total_video_num
num_of_cos = len(courses_)
course_watching_counts = [info_vecs[j][1] for j in range(len(info_vecs))]
course_time_per_watching = [info_vecs[j][3]/info_vecs[j][1] for j in range(len(info_vecs))]
course_video_durations = [info_vecs[j][2] for j in range(len(info_vecs))]
course_local_watching_times = [info_vecs[j][3] for j in range(len(info_vecs))]
course_video_progress_times = [info_vecs[j][4] for j in range(len(info_vecs))]
course_video_watching_speed = [info_vecs[j][3]/info_vecs[j][4] for j in range(len(info_vecs))]
course_video_start_times = [info_vecs[j][5] for j in range(len(info_vecs))]
course_video_end_times = [info_vecs[j][6] for j in range(len(info_vecs))]
# course_z_score_interval = [info_vecs[j][7] for j in range(len(info_vecs))]
# course_z_score_local_start_time =[info_vecs[j][8] for j in range(len(info_vecs))]
# course_z_score_local_end_time =[info_vecs[j][9] for j in range(len(info_vecs))]
max_watching_counts, mean_watching_counts, std_watching_counts = max_mean_std(course_watching_counts)
max_video_durations, mean_video_durations, std_video_durations = max_mean_std(course_video_durations)
max_local_watching_times, mean_local_watching_times, std_local_watching_times = max_mean_std(course_local_watching_times)
max_video_progress_times, mean_video_progress_times, std_video_progress_times = max_mean_std(course_video_progress_times)
max_video_watching_speed, mean_video_watching_speed, std_video_watching_speed = max_mean_std(course_video_watching_speed)
max_video_start_times, mean_video_start_times, std_video_start_times = max_mean_std(course_video_start_times)
max_video_end_times, mean_video_end_times, std_video_end_times = max_mean_std(course_video_end_times)
# max_z_score_interval, mean_z_score_interval, std_z_score_interval = max_mean_std(course_z_score_interval)
# max_z_score_local_start_time, mean_z_score_local_start_time, std_z_score_local_start_time = max_mean_std(course_z_score_local_start_time)
# max_z_score_local_end_time, mean_z_score_local_end_time, std_z_score_local_end_time = max_mean_std(course_z_score_local_end_time)
max_time_per_watching, mean_time_per_watching, std_time_per_watching = max_mean_std(course_time_per_watching)
# course_frequence_z_score = Z_score(mean_course_frequence,std_course_frequence,course_frequence[courses_[i]])
total_watching_counts = np.sum(course_watching_counts)
total_video_durations = np.sum(course_video_durations)
total_local_watching_times = np.sum(course_local_watching_times)
total_video_progress_times = np.sum(course_video_progress_times)
num_of_cos_info.append(num_of_cos)
num_of_vdo_info.append(num_of_vdo)
local_max_watching_counts_info.append(max_watching_counts)
local_mean_watching_counts_info.append(mean_watching_counts)
local_std_watching_counts_info.append(std_watching_counts)
local_max_video_durations_info.append(max_video_durations)
local_mean_video_durations_info.append(mean_video_durations)
local_std_video_durations_info.append(std_video_durations)
local_max_local_watching_times_info.append(max_local_watching_times)
local_mean_local_watching_times_info.append(mean_local_watching_times)
local_std_local_watching_times_info.append(std_local_watching_times)
local_max_video_progress_times_info.append(max_video_progress_times)
local_mean_video_progress_times_info.append(mean_video_progress_times)
local_std_video_progress_times_info.append(std_video_progress_times)
local_max_video_watching_speed_info.append(max_video_watching_speed)
local_mean_video_watching_speed_info.append(mean_video_watching_speed)
local_std_video_watching_speed_info.append(std_video_watching_speed)
local_max_video_start_times_info.append(max_video_start_times)
local_mean_video_start_times_info.append(mean_video_start_times)
local_std_video_start_times_info.append(std_video_start_times)
local_max_video_end_times_info.append(max_video_end_times)
local_mean_video_end_times_info.append(mean_video_end_times)
local_std_video_end_times_info.append(std_video_end_times)
# max_z_score_interval_info = []
# mean_z_score_interval_info = []
# std_z_score_interval_info = []
local_total_video_num_info.append(total_video_num)
local_percentage_viewed_info.append(percentage_viewed)
# max_z_score_local_start_time_info = []
# mean_z_score_local_start_time_info = []
# std_z_score_local_start_time_info = []
# max_z_score_local_end_time_info = []
# mean_z_score_local_end_time_info = []
# std_z_score_local_end_time_info = []
# course_frequence_z_score_info = []
local_max_time_per_watching_info.append(max_time_per_watching)
local_mean_time_per_watching_info.append(mean_time_per_watching)
local_std_time_per_watching_info.append(std_time_per_watching)
local_total_watching_counts_info.append(total_watching_counts)
local_total_video_durations_info.append(total_video_durations)
local_total_local_watching_times_info.append(total_local_watching_times)
local_total_video_progress_times_info.append(total_video_progress_times)
train_set.progress_apply(lambda row: collect_info(row['course_ids'],row['video_ids'],row['watching_counts'],row['video_durations'],row['local_watching_times'],row['video_progress_times'],
row['video_start_times'], row['video_end_times'], row['local_start_times'],row['local_end_times'],row['courseListIDs']), axis=1)
#global stastic info for CNN
std_local_start_times_info = np.std(local_start_times_info, ddof=1)
mean_local_start_times_info = np.mean(local_start_times_info)
std_video_start_times_info = np.std(video_start_times_info, ddof=1)
mean_video_start_times_info = np.mean(video_start_times_info)
std_video_end_times_info = np.std(video_end_times_info, ddof=1)
mean_video_end_times_info = np.mean(video_end_times_info)
std_local_end_times_info = np.std(local_end_times_info, ddof=1)
mean_local_end_times_info = np.mean(local_end_times_info)
std_video_durations_info = np.std(video_durations_info, ddof=1)
mean_video_durations_info = np.mean(video_durations_info)
std_local_watching_times_info = np.std(local_watching_times_info, ddof=1)
mean_local_watching_times_info = np.mean(local_watching_times_info)
std_video_progress_times_info = np.std(video_progress_times_info, ddof=1)
mean_video_progress_times_info = np.mean(video_progress_times_info)
std_watching_counts_info = np.std(watching_counts_info, ddof=1)
mean_watching_counts_info = np.mean(watching_counts_info)
std_local_interval_info = np.std(local_interval_info, ddof=1)
mean_local_interval_info = np.mean(local_interval_info)
#2020-10-24 new feature
std_video_watching_speed_info = np.std(video_watching_speed_info, ddof=1)
mean_video_watching_speed_info = np.mean(video_watching_speed_info)
std_per_time_watching_percentage_info = np.std(per_time_watching_percentage_info, ddof=1)
mean_per_time_watching_percentage_info = np.mean(per_time_watching_percentage_info)
std_video_finish_percentage_info = np.std(video_finish_percentage_info, ddof=1)
mean_video_finish_percentage_info = np.mean(video_finish_percentage_info)
#2020-10-26 new feature
std_local_watching_times_info2 = {} #for a specific video locally
mean_local_watching_times_info2 = {}
for k,v in video_lists.items():
vid = k
watching_time_list = v
if len(watching_time_list)> 1:
mean = np.mean(watching_time_list)
std = np.std(watching_time_list, ddof=1)
std_local_watching_times_info2[vid] = std
mean_local_watching_times_info2[vid] = mean
# elif len(watching_time_list) = 1:
# std_local_watching_times_info2[vid] = -1
# mean_local_watching_times_info2[vid] = watching_time_list[0]
else:
std_local_watching_times_info2[vid] = -1
mean_local_watching_times_info2[vid] = -1
#global stastic info for LR
std_num_of_cos_info = np.std(num_of_cos_info, ddof =1)
mean_num_of_cos_info = np.mean(num_of_cos_info)
std_num_of_vdo_info = np.std(num_of_vdo_info, ddof=1)
mean_num_of_vdo_info = np.mean(num_of_vdo_info)
std_max_watching_counts_info = np.std(local_max_watching_counts_info, ddof = 1)
mean_max_watching_counts_info = np.std(local_max_watching_counts_info)
std_mean_watching_counts_info = np.std(local_mean_watching_counts_info, ddof =1)
mean_mean_watching_counts_info = np.mean(local_mean_watching_counts_info)
std_std_watching_counts_info = np.std(local_std_watching_counts_info, ddof =1)
mean_std_watching_counts_info = np.mean(local_std_watching_counts_info)
std_max_video_durations_info = np.std(local_max_video_durations_info, ddof =1)
mean_max_video_durations_info = np.mean(local_max_video_durations_info)
std_mean_video_durations_info = np.std(local_mean_video_durations_info, ddof=1)
mean_mean_video_durations_info = np.mean(local_mean_video_durations_info)
std_std_video_durations_info = np.std(local_std_video_durations_info, ddof = 1)
mean_std_video_durations_info = np.mean(local_std_video_durations_info)
std_max_local_watching_times_info = np.std(local_max_local_watching_times_info, ddof =1)
mean_max_local_watching_times_info = np.mean(local_max_local_watching_times_info)
std_mean_local_watching_times_info = np.std(local_mean_local_watching_times_info,ddof=1)
mean_mean_local_watching_times_info = np.mean(local_mean_local_watching_times_info)
std_std_local_watching_times_info = np.std(local_std_local_watching_times_info,ddof = 1)
mean_std_local_watching_times_info = np.mean(local_std_local_watching_times_info)
std_max_video_progress_times_info = np.std(local_max_video_progress_times_info, ddof=1)
mean_max_video_progress_times_info = np.mean(local_max_video_progress_times_info)
std_mean_video_progress_times_info = np.std(local_mean_video_progress_times_info, ddof=1)
mean_mean_video_progress_times_info = np.mean(local_mean_video_progress_times_info)
std_std_video_progress_times_info = np.std(local_std_video_progress_times_info,ddof=1)
mean_std_video_progress_times_info = np.mean(local_std_video_progress_times_info)
std_max_video_watching_speed_info = np.std(local_max_video_watching_speed_info, ddof=1)
mean_max_video_watching_speed_info = np.mean(local_max_video_watching_speed_info)
std_mean_video_watching_speed_info = np.std(local_mean_video_watching_speed_info,ddof=1)
mean_mean_video_watching_speed_info = np.mean(local_mean_video_watching_speed_info)
std_std_video_watching_speed_info = np.std(local_std_video_watching_speed_info,ddof=1)
mean_std_video_watching_speed_info = np.mean(local_std_video_watching_speed_info)
std_max_video_start_times_info = np.std(local_max_video_start_times_info,ddof=1)
mean_max_video_start_times_info = np.mean(local_max_video_start_times_info)
std_mean_video_start_times_info = np.std(local_mean_video_start_times_info, ddof=1)
mean_mean_video_start_times_info = np.mean(local_mean_video_start_times_info)
std_std_video_start_times_info = np.std(local_std_video_start_times_info, ddof=1)
mean_std_video_start_times_info = np.mean(local_std_video_start_times_info)
std_max_video_end_times_info = np.std(local_max_video_end_times_info, ddof=1)
mean_max_video_end_times_info = np.mean(local_max_video_end_times_info)
std_mean_video_end_times_info = np.std(local_mean_video_end_times_info, ddof=1)
mean_mean_video_end_times_info = np.mean(local_mean_video_end_times_info)
std_std_video_end_times_info = np.std(local_std_video_end_times_info, ddof=1)
mean_std_video_end_times_info = np.mean(local_std_video_end_times_info)
std_total_video_num_info = np.std(local_total_video_num_info, ddof=1)
mean_total_video_num_info = np.mean(local_total_video_num_info)
std_percentage_viewed_info = np.std(local_percentage_viewed_info, ddof=1)
mean_percentage_viewed_info = np.mean(local_percentage_viewed_info)
std_max_time_per_watching_info = np.std(local_max_time_per_watching_info, ddof=1)
mean_max_time_per_watching_info = np.mean(local_max_time_per_watching_info)
std_mean_time_per_watching_info = np.std(local_mean_time_per_watching_info, ddof=1)
mean_mean_time_per_watching_info = np.mean(local_mean_time_per_watching_info)
std_std_time_per_watching_info = np.std(local_std_time_per_watching_info, ddof =1)
mean_std_time_per_watching_info = np.mean(local_std_time_per_watching_info)
std_total_watching_counts_info = np.std(local_total_watching_counts_info, ddof =1)
mean_total_watching_counts_info = np.mean(local_total_watching_counts_info)
std_total_video_durations_info = np.std(local_total_video_durations_info, ddof=1)
mean_total_video_durations_info = np.mean(local_total_video_durations_info)
std_total_local_watching_times_info = np.std(local_total_local_watching_times_info, ddof=1)
mean_total_local_watching_times_info = np.mean(local_total_local_watching_times_info)
std_total_video_progress_times_info = np.std(local_total_video_progress_times_info, ddof=1)
mean_total_video_progress_times_info = np.mean(local_total_video_progress_times_info)
def feature_genration_LR(course_ids,video_ids,watching_counts,video_durations,local_watching_times,video_progress_times,
video_start_times, video_end_times, local_start_times,local_end_times,courseListIDs):
course_ids = eval(course_ids)
video_ids = eval(video_ids)
watching_counts = eval(watching_counts)
video_durations = eval(video_durations)
local_watching_times = eval(local_watching_times)
video_progress_times = eval(video_progress_times)
video_start_times = eval(video_start_times)
video_end_times = eval(video_end_times)
local_start_times = eval(local_start_times)
local_end_times = eval(local_end_times)
unix_start_time = [time_transform(i) for i in local_start_times]
unix_end_time = [time_transform(i) for i in local_end_times]
unix_interval = [unix_end_time[i] - unix_start_time[i] for i in range(len(unix_start_time))]
z_score_local_start_time = [Z_score(mean_local_start_times_info,std_local_start_times_info,i) for i in unix_start_time]
z_score_local_end_time = [Z_score(mean_local_end_times_info,std_local_end_times_info,i) for i in unix_end_time]
z_score_interval = [Z_score(mean_local_interval_info,std_local_interval_info,i) for i in unix_interval]
courseListIDs=re.sub('\s+', ' ',courseListIDs[1:-1].strip(' '))
courses_ = [int(i) for i in courseListIDs.split(' ')]
courses = {}
for i in range(len(courses_)):
courses[courses_[i]] = []
for i in range(len(course_ids)):
course = course_ids[i]
info_vec = [course_ids[i],watching_counts[i],video_durations[i],local_watching_times[i],video_progress_times[i],
video_start_times[i], video_end_times[i],z_score_interval[i],z_score_local_start_time[i],z_score_local_end_time[i]]
courses[course].append(info_vec)
course_vec = []
for i in range(len(courses_)):
info_vecs = courses[courses_[i]]
total_video_num = course_video_num[courses_[i]]
num_of_vdo = len(info_vecs)
percentage_viewed = num_of_vdo/total_video_num
num_of_cos = len(courses_)
course_watching_counts = [info_vecs[j][1] for j in range(len(info_vecs))]
course_time_per_watching = [info_vecs[j][3]/info_vecs[j][1] for j in range(len(info_vecs))]
course_video_durations = [info_vecs[j][2] for j in range(len(info_vecs))]
course_local_watching_times = [info_vecs[j][3] for j in range(len(info_vecs))]
course_video_progress_times = [info_vecs[j][4] for j in range(len(info_vecs))]
course_video_watching_speed = [info_vecs[j][3]/info_vecs[j][4] for j in range(len(info_vecs))]
course_video_start_times = [info_vecs[j][5] for j in range(len(info_vecs))]
course_video_end_times = [info_vecs[j][6] for j in range(len(info_vecs))]
course_z_score_interval = [info_vecs[j][7] for j in range(len(info_vecs))]
course_z_score_local_start_time =[info_vecs[j][8] for j in range(len(info_vecs))]
course_z_score_local_end_time =[info_vecs[j][9] for j in range(len(info_vecs))]
max_watching_counts, mean_watching_counts, std_watching_counts = max_mean_std(course_watching_counts)
max_video_durations, mean_video_durations, std_video_durations = max_mean_std(course_video_durations)
max_local_watching_times, mean_local_watching_times, std_local_watching_times = max_mean_std(course_local_watching_times)
max_video_progress_times, mean_video_progress_times, std_video_progress_times = max_mean_std(course_video_progress_times)
max_video_watching_speed, mean_video_watching_speed, std_video_watching_speed = max_mean_std(course_video_watching_speed)
max_video_start_times, mean_video_start_times, std_video_start_times = max_mean_std(course_video_start_times)
max_video_end_times, mean_video_end_times, std_video_end_times = max_mean_std(course_video_end_times)
max_z_score_interval, mean_z_score_interval, std_z_score_interval = max_mean_std(course_z_score_interval)
max_z_score_local_start_time, mean_z_score_local_start_time, std_z_score_local_start_time = max_mean_std(course_z_score_local_start_time)
max_z_score_local_end_time, mean_z_score_local_end_time, std_z_score_local_end_time = max_mean_std(course_z_score_local_end_time)
max_time_per_watching, mean_time_per_watching, std_time_per_watching = max_mean_std(course_time_per_watching)
#compared to global stastic
course_frequence_z_score = Z_score(mean_course_frequence,std_course_frequence,course_frequence[courses_[i]])
total_watching_counts = np.sum(course_watching_counts)
total_video_durations = np.sum(course_video_durations)
total_local_watching_times = np.sum(course_local_watching_times)
total_video_progress_times = np.sum(course_video_progress_times)
vec = [courses_[i],Z_score(mean_num_of_cos_info,std_num_of_cos_info,num_of_cos),
Z_score(mean_num_of_vdo_info,std_num_of_vdo_info,num_of_vdo),
Z_score(mean_max_watching_counts_info,std_max_watching_counts_info,max_watching_counts),
Z_score(mean_mean_watching_counts_info,std_mean_watching_counts_info,mean_watching_counts),
Z_score(mean_std_watching_counts_info,std_std_watching_counts_info,std_watching_counts),
Z_score(mean_max_video_durations_info,std_max_video_durations_info,max_video_durations),
Z_score(mean_mean_video_durations_info,std_mean_video_durations_info,mean_video_durations),
Z_score(mean_std_video_durations_info,std_std_video_durations_info,std_video_durations),
Z_score(mean_max_local_watching_times_info,std_max_local_watching_times_info,max_local_watching_times),
Z_score(mean_mean_local_watching_times_info,std_mean_local_watching_times_info,mean_local_watching_times),
Z_score(mean_std_local_watching_times_info,std_std_local_watching_times_info,std_local_watching_times),
Z_score(mean_max_video_progress_times_info,std_max_video_progress_times_info,max_video_progress_times),
Z_score(mean_mean_video_progress_times_info,std_mean_video_progress_times_info,mean_video_progress_times),
Z_score(mean_std_video_progress_times_info,std_std_video_progress_times_info,std_video_progress_times),
Z_score(mean_max_video_watching_speed_info,std_max_video_watching_speed_info,max_video_watching_speed),
Z_score(mean_mean_video_watching_speed_info,std_mean_video_watching_speed_info,mean_video_watching_speed),
Z_score(mean_std_video_watching_speed_info,std_std_video_watching_speed_info,std_video_watching_speed),
Z_score(mean_max_video_start_times_info,std_max_video_start_times_info,max_video_start_times),
Z_score(mean_mean_video_start_times_info,std_mean_video_start_times_info,mean_video_start_times),
Z_score(mean_std_video_start_times_info,std_std_video_start_times_info,std_video_start_times),
Z_score(mean_max_video_end_times_info,std_max_video_end_times_info,max_video_end_times),
Z_score(mean_mean_video_end_times_info,std_mean_video_end_times_info,mean_video_end_times),
Z_score(mean_std_video_end_times_info,std_std_video_end_times_info,std_video_end_times),
max_z_score_interval, mean_z_score_interval, std_z_score_interval,
Z_score(mean_total_video_num_info,std_total_video_num_info,total_video_num),
Z_score(mean_percentage_viewed_info,std_percentage_viewed_info,percentage_viewed),
max_z_score_local_start_time, mean_z_score_local_start_time, std_z_score_local_start_time,
max_z_score_local_end_time, mean_z_score_local_end_time, std_z_score_local_end_time,
course_frequence_z_score,
Z_score(mean_max_time_per_watching_info,std_max_time_per_watching_info,max_time_per_watching),
Z_score(mean_mean_time_per_watching_info,std_mean_time_per_watching_info,mean_time_per_watching),
Z_score(mean_std_time_per_watching_info,std_std_time_per_watching_info,std_time_per_watching),
Z_score(mean_total_watching_counts_info,std_total_watching_counts_info,total_watching_counts),
Z_score(mean_total_video_durations_info,std_total_video_durations_info,total_video_durations),
Z_score(mean_total_local_watching_times_info,std_total_local_watching_times_info,total_local_watching_times),
Z_score(mean_total_video_progress_times_info,std_total_video_progress_times_info,total_video_progress_times)
]
course_vec.append(vec)
return course_vec
def feature_generate_CNN(course_ids,video_ids,watching_counts,video_durations,local_watching_times,video_progress_times,
video_start_times, video_end_times, local_start_times,local_end_times,courseListIDs):
course_ids = eval(course_ids)
video_ids = eval(video_ids)
watching_counts = eval(watching_counts)
video_durations = eval(video_durations)
local_watching_times = eval(local_watching_times)
video_progress_times = eval(video_progress_times)
video_start_times = eval(video_start_times)
video_end_times = eval(video_end_times)
local_start_times = eval(local_start_times)
local_end_times = eval(local_end_times)
courseListIDs=re.sub('\s+', ' ',courseListIDs[1:-1].strip(' '))
courses_ = [int(i) for i in courseListIDs.split(' ')]
courses = {}
courses_textual_content = {}
courses_textual_name = {}
for i in range(len(courses_)):
courses[courses_[i]] = []
# courses_textual_name[courses_[i]] = []
# courses_textual_content[courses_[i]] = []
for i in range(len(course_ids)):
course = course_ids[i]
if len(courses[course])< 70:
local_start_times_ = time_transform(local_start_times[i])
local_end_times_ = time_transform(local_end_times[i])
interval = local_end_times_ - local_start_times_
video_watching_speed = local_watching_times[i]/video_progress_times[i]
per_time_watching_percentage = (local_watching_times[i]/watching_counts[i])/video_durations[i]
video_finish_percentage = local_watching_times[i]/video_durations[i]
std_local_watching_times2 = std_local_watching_times_info2[video_ids[i]]
mean_local_watching_times2 = mean_local_watching_times_info2[video_ids[i]]
if std_local_watching_times2 <= 0 or mean_local_watching_times2 <= 0: # cold start
z_score_local_watching_times2 = 0
# elif std_local_watching_times2 < 0 and mean_local_watching_times2 >=0: #only one historical record
# z_score_local_watching_times2 = local_watching_times[i]/(mean_local_watching_times2+1)
else:
z_score_local_watching_times2 = Z_score(mean_local_watching_times2,std_local_watching_times2,local_watching_times[i])
video_vec = [course_ids[i],video_ids[i],
Z_score(mean_watching_counts_info,std_watching_counts_info,watching_counts[i]),
Z_score(mean_video_durations_info,std_video_durations_info,video_durations[i]),
Z_score(mean_local_watching_times_info,std_local_watching_times_info,local_watching_times[i]),
Z_score(mean_video_progress_times_info,std_video_progress_times_info,video_progress_times[i]),
Z_score(mean_video_start_times_info,std_video_start_times_info,video_start_times[i]),
Z_score(mean_video_end_times_info,std_video_end_times_info,video_end_times[i]),
Z_score(mean_local_start_times_info,std_local_start_times_info,local_start_times_),
Z_score(mean_local_end_times_info,std_local_end_times_info,local_end_times_),
Z_score(mean_local_interval_info,std_local_interval_info,interval),
Z_score(mean_video_watching_speed_info,std_video_watching_speed_info,video_watching_speed),
Z_score(mean_per_time_watching_percentage_info,std_per_time_watching_percentage_info,per_time_watching_percentage),
Z_score(mean_video_finish_percentage_info,std_video_finish_percentage_info,video_finish_percentage),
z_score_local_watching_times2]
courses[course].append(video_vec)
course_vec = []
# for i in range(len(courses_)):
# course_vec.append(courses[courses_[i]])
for i in range(len(courses_)):
videos = courses[courses_[i]]
temp = pd.DataFrame(videos, columns=['courseid','videoid','1','2','3','4','5','6','local_start_time','7','8','9','10','11','12'])
temp=temp.sort_values(by=['local_start_time'],ignore_index=True)
c = temp.values.tolist()
course_vec.append(c)
return course_vec#,content_vec,name_vec
train_set['course_vecs_LR'] = train_set.progress_apply(lambda row: feature_genration_LR(row['course_ids'],row['video_ids'],row['watching_counts'],row['video_durations'],row['local_watching_times'],row['video_progress_times'],
row['video_start_times'], row['video_end_times'], row['local_start_times'],row['local_end_times'],row['courseListIDs']), axis=1)
train_set['course_vecs_CNN'] = train_set.progress_apply(lambda row: feature_generate_CNN(row['course_ids'],row['video_ids'],row['watching_counts'],row['video_durations'],row['local_watching_times'],row['video_progress_times'],
row['video_start_times'], row['video_end_times'], row['local_start_times'],row['local_end_times'],row['courseListIDs']), axis=1)
train_set.to_csv("train_set_course_vec.csv",index=0)
test_set['course_vecs_LR'] = test_set.progress_apply(lambda row: feature_genration_LR(row['course_ids'],row['video_ids'],row['watching_counts'],row['video_durations'],row['local_watching_times'],row['video_progress_times'],
row['video_start_times'], row['video_end_times'], row['local_start_times'],row['local_end_times'],row['courseListIDs']), axis=1)
test_set['course_vecs_CNN'] = test_set.progress_apply(lambda row: feature_generate_CNN(row['course_ids'],row['video_ids'],row['watching_counts'],row['video_durations'],row['local_watching_times'],row['video_progress_times'],
row['video_start_times'], row['video_end_times'], row['local_start_times'],row['local_end_times'],row['courseListIDs']), axis=1)
test_set.to_csv("test_set_course_vec.csv",index=0)
|
{"hexsha": "b39a80e2cb962bfa185f007e44cac7f7c04688ed", "size": 35914, "ext": "py", "lang": "Python", "max_stars_repo_path": "DataLoader.py", "max_stars_repo_name": "JiayinL/Dropout-Prediction", "max_stars_repo_head_hexsha": "fd1de819579b641ff8c7aa416c1fb5cb6c6a7114", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DataLoader.py", "max_issues_repo_name": "JiayinL/Dropout-Prediction", "max_issues_repo_head_hexsha": "fd1de819579b641ff8c7aa416c1fb5cb6c6a7114", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DataLoader.py", "max_forks_repo_name": "JiayinL/Dropout-Prediction", "max_forks_repo_head_hexsha": "fd1de819579b641ff8c7aa416c1fb5cb6c6a7114", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.3527696793, "max_line_length": 226, "alphanum_fraction": 0.7731246868, "include": true, "reason": "import numpy", "num_tokens": 8193}
|
import unittest
from prime.input import Field, Parametrization, Intertwiner, InverseIntertwiner
from prime.utils import phis, to_tensor
from prime.output import ConstantOutputCoefficient
import sympy as sp
import numpy as np
class GenerateConstantOutputCoefficient(unittest.TestCase):
# Setup
phis = phis(6)
g = Field([[-1 + phis[0], phis[1] / sp.sqrt(2), phis[2] / sp.sqrt(2)],
[phis[1]/sp.sqrt(2), -1 + phis[3], phis[4] / sp.sqrt(2)],
[phis[2]/sp.sqrt(2), phis[4] / sp.sqrt(2), -1 + phis[5]]], [+1,+1])
# Setup the parametrization
param = Parametrization(fields=[g])
# Setup the intertwiners
I = Intertwiner(param)
J = InverseIntertwiner(I, order=1)
def test_others(self):
phis = phis(4)
g = Field(1+phis[0], [])
gg = Field([phis[1], phis[2], phis[3]])
param = Parametrization(fields, [g])
I = Intertwiner(param)
J = InverseIntertwiner(I, order=1)
coeff = ConstantOutputCoefficient(param, J, 0, [2,2])
cJ = J.components[0]
gamma = np.eye(3)
def test_eights(self):
coeff = ConstantOutputCoefficient(GenerateConstantOutputCoefficient.param, GenerateConstantOutputCoefficient.J, 0, [2,2])
J = self.J.components[0]
gamma = np.eye(3)
def contracted(indices):
x = [chr(ord('a') + i) for i in indices]
s = "gamma[{},{}] * gamma[{},{}] * gamma[{},{}] * gamma[{},{}]".format(*x)
@to_tensor(shape=(6,3,3,6,3,3))
def fn(A,c,d,B,g,h):
return sum([
J[A,a,b] * J[B,e,f] * eval(s, { "J": J, "gamma": gamma, "a": a, "b": b, "c": c, "d": d, "e": e, "f": f, "g": g, "h": h })
for a in range(3) for b in range(3) for e in range(3) for f in range(3)])
return fn
basisTensors = [[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 6, 5, 7],
[0, 1, 2, 4, 3, 5, 6, 7],
[0, 1, 2, 4, 3, 6, 5, 7],
[0, 1, 2, 6, 3, 7, 4, 5],
[0, 2, 1, 3, 4, 5, 6, 7],
[0, 2, 1, 3, 4, 6, 5, 7],
[0, 2, 1, 4, 3, 5, 6, 7],
[0, 2, 1, 4, 3, 6, 5, 7],
[0, 2, 1, 6, 3, 4, 5, 7],
[0, 2, 1, 6, 3, 7, 4, 5],
[0, 4, 1, 5, 2, 3, 6, 7],
[0, 4, 1, 5, 2, 6, 3, 7],
[0, 4, 1, 6, 2, 3, 5, 7],
[0, 4, 1, 6, 2, 5, 3, 7],
[0, 6, 1, 7, 2, 3, 4, 5],
[0, 6, 1, 7, 2, 4, 3, 5]]
# Generate the correct result
#results = [contracted(t) for t in basisTensors]
#reshaped = [t.reshape(-1) for t in results]
# Generate the contraction
contraction = coeff.generateAllContractions()[0]
# For each contraction generate the tensor shape
tensorShape = coeff.generateTensorShape(contraction)
# Generate the basis tensors
basisTensor = coeff.generateBasisTensor(contraction, tensorShape)
calculatedResults = [coeff.generateContractedBasisTensor(contraction, tensorShape, t) for t in basisTensor.indices]
calculatedReshaped = [t.reshape(-1) for t in calculatedResults]
#self.assertEqual(len(results), len(calculatedResults))
# Check if all the results are the same
#for a,b in zip(reshaped, calculatedReshaped):
# self.assertTrue(np.array_equal(a,b))
# Symmetrize the tensors in (c d) and (g h)
results = [(t.transpose((0,1,2,3,4,5)) + t.transpose((0,2,1,3,4,5)))/2 for t in calculatedResults]
results = [(t + t.transpose((0,1,2,3,5,4)))/2 for t in results]
results = [(t + t.transpose((3,4,5,0,1,2)))/2 for t in results]
reshaped = [t.reshape(-1) for t in results]
_, ids = sp.Matrix(reshaped).T.rref(simplify=True, iszerofunc=lambda x:abs(x)<1e-13)
print(np.linalg.matrix_rank(reshaped))
print(len(ids))
print(ids)
return
|
{"hexsha": "c34e17ddcbfccd45312beeb59cb6642dd06f8d3f", "size": 4013, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/output/test_constant_coefficient.py", "max_stars_repo_name": "florianwolz/prime", "max_stars_repo_head_hexsha": "5ac29c523c5730e1d50c5d4703658427fdddfac3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/output/test_constant_coefficient.py", "max_issues_repo_name": "florianwolz/prime", "max_issues_repo_head_hexsha": "5ac29c523c5730e1d50c5d4703658427fdddfac3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-04T07:19:36.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-04T07:19:36.000Z", "max_forks_repo_path": "tests/output/test_constant_coefficient.py", "max_forks_repo_name": "florianwolz/prime", "max_forks_repo_head_hexsha": "5ac29c523c5730e1d50c5d4703658427fdddfac3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2991452991, "max_line_length": 141, "alphanum_fraction": 0.5220533267, "include": true, "reason": "import numpy,import sympy", "num_tokens": 1395}
|
from __future__ import division
from collections import defaultdict
from skimage import io
import numpy as np
import os, glob, sys
"""This script requests all of the tiles required to build up a
given jp2 image, and after fetching each of those tiles, rebuilds
the retrieved images into one large jp2 file"""
def request_image_tiles(query_root, image_id, offset=0, dry_run=0):
"""Read in an image id (e.g. 15691352) and request each of
the image tiles for the current image"""
# the query root contains the base url for queries. One must add
# x,y to the query for it to resolve, where x = zoom depth {1:5}
# and y is the tile index
query_root += str(image_id) + "/" + str(image_id) + ".jp2&CNT=1&SDS=0,90&JTL="
if dry_run:
print query_root
sys.exit()
if not os.path.exists(out_dir):
os.mkdir(out_dir)
for i in xrange(tiles_to_request):
try:
query = 'wget "' + query_root + str(zoom_depth) + ',' + str(i+offset)
query += '" -O ' + out_dir + "/" + str(i+offset) + ".jp2"
print "request image from:", query
os.popen(query)
except Exception as exc:
print exc
def combine_image_tiles():
"""Read in all of the image tiles just produced and create an
output image that combines those image tiles"""
# create a dictionary to map image index to image array
d = defaultdict(lambda: defaultdict())
# iterate over all images
for f in glob.glob(out_dir + "/*.jp2"):
# if a request did not fetch an image, it will be < 1000 bytes
size = os.path.getsize(f)
if size < 700:
continue
else:
# fetch the image index position
index_position = int( os.path.basename(f).split(".jp2")[0] )
# use the index position to identify row and column values
row_index = int(index_position / images_per_row)
# then use the row and index positions to get the column
# -1 at the end because we're using 0-based indexing
column_index = (index_position - (row_index * images_per_row))
# read the jp2 data into memory as a numpy array
jp2_array = io.imread(f, plugin='freeimage')
d[row_index][column_index] = jp2_array
# with all the arrays in memory, build the master image
master_image = []
for row in d.iterkeys():
combined_row = []
for column in d[row].iterkeys():
combined_row.append(d[row][column])
# combine the arrays horizontally
combined_array = np.hstack(combined_row)
master_image.append(combined_array)
#io.imsave(str(row) + ".png", combined_array)
# combine all the rows into a single image
master_array = np.vstack(master_image)
# write the result
io.imsave(str(image_id) + ".png", master_array)
if __name__ == "__main__":
# specify the query root for the url to fetch
query_root = "http://brbl-zoom.library.yale.edu/fcgi-bin/iipsrv.fcgi?FIF=SML_MAPS_TRANSFER/7/"
# specify the zoom depth to fetch
zoom_depth = 6
# specify the number of tiles to request
tiles_to_request = 2000
# specify the output directory
out_dir = "yale_map_image_tiles"
# specify the image id to fetch
image_id = "15691437"
request_image_tiles(query_root, image_id, offset=0)
# once the individual images are retrieved, manually examine
# them to see how many images are placed on each row
# (i.e. the first image with black border on its left)
images_per_row = 34
#combine_image_tiles()
|
{"hexsha": "a433ff394ea73e7c0356153aef798ac29ff1afc7", "size": 3429, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/utils/request_yale_map_image_tiles.py", "max_stars_repo_name": "YaleDHLab/gathering-a-building", "max_stars_repo_head_hexsha": "f8ad3f2be0e93090ab4e50755ef81f3b412901c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-14T15:49:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-07T16:50:46.000Z", "max_issues_repo_path": "app/utils/request_yale_map_image_tiles.py", "max_issues_repo_name": "YaleDHLab/gathering-a-building", "max_issues_repo_head_hexsha": "f8ad3f2be0e93090ab4e50755ef81f3b412901c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 46, "max_issues_repo_issues_event_min_datetime": "2016-07-25T13:59:39.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-16T20:32:06.000Z", "max_forks_repo_path": "app/utils/request_yale_map_image_tiles.py", "max_forks_repo_name": "YaleDHLab/gathering-a-building", "max_forks_repo_head_hexsha": "f8ad3f2be0e93090ab4e50755ef81f3b412901c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2016-07-27T17:40:33.000Z", "max_forks_repo_forks_event_max_datetime": "2016-08-05T21:44:32.000Z", "avg_line_length": 28.575, "max_line_length": 96, "alphanum_fraction": 0.6859142607, "include": true, "reason": "import numpy", "num_tokens": 899}
|
import pandas as pd
import numpy as np
import scipy.stats
import scipy.special
import pysam
import h5py
import multiprocessing as mp
import itertools as it
import math
import os
from DIGDriver.sequence_model import sequence_tools
from DIGDriver.sequence_model import nb_model
DNA53 = 'NTCGA'
DNA35 = 'NAGCT'
trans = DNA53.maketrans(DNA53, DNA35)
DNA = 'ACGT'
NUC = 'CT'
prod_items = [DNA] + [NUC] + [DNA]
keys = [''.join(tup) for tup in it.product(*prod_items)]
def reverse_complement(seq):
return seq[::-1].translate(trans)
##called by genic_model_parallel
# def genic_model(df_obs, genes_lst, f_pretrained_str, f_genic_str, f_genome_counts, mapp):
def genic_model(genes_lst, f_pretrained_str, f_genic_str, counts_key, indels_direct):
df_out = pd.DataFrame()
f_genic = h5py.File(f_genic_str, 'r')
subst_idx = f_genic['substitution_idx'][:].astype(str)
cds_counts = pd.read_hdf(f_genic_str, counts_key)
all_windows_df = pd.read_hdf(f_pretrained_str, 'region_params')
window = all_windows_df.iloc[0][2]-all_windows_df.iloc[0][1]
df_mut = pd.read_hdf(f_pretrained_str, key='sequence_model_192')
mut_probs_idx = [r[1] + '>' + r[1][0] + r[0][2] + r[1][2] for r in zip(df_mut.MUT_TYPE, df_mut.CONTEXT)]
d_pr = pd.DataFrame(df_mut.FREQ.values, mut_probs_idx)
if indels_direct:
all_indels_df = pd.read_hdf(f_pretrained_str, 'region_params_indels')
chrom_lst = []
genes_used = []
# mis_obs_lst = []
# nons_obs_lst = []
# stop_loss_obs_lst = []
# silent_obs_lst = []
# splice_obs_lst = []
# trunc_obs_lst = []
p_silent_lst = []
p_mis_lst = []
p_nons_lst = []
p_splice_lst = []
p_trunc_lst = []
p_ind_lst = []
mu_lst = []
s_lst = []
mu_ind_lst = []
s_ind_lst = []
# pvals_mis = []
# pvals_nons = []
# pvals_silent = []
# pvals_splice = []
# pvals_trunc = []
R_size_lst = []
R_obs_lst = []
R_ind_lst = []
flag_lst = []
gene_len_lst = []
# exp_mis_lst = []
# exp_nons_lst = []
# exp_silent_lst = []
# exp_splice_lst = []
# exp_trunc_lst = []
for gene in genes_lst:
chrom = f_genic['chr'][gene][:][0].decode("utf-8")
if chrom == 'X' or chrom =='Y':
continue
# obs_counts = df_obs.loc[gene]
# obs_mis = obs_counts['Missense']
# obs_nons = obs_counts['Nonsense']
# obs_stop_loss = obs_counts['Stop_loss']
# obs_silent = obs_counts['Synonymous']
# obs_splice = obs_counts['Essential_Splice']
# obs_trunc = obs_nons + obs_splice
# mis_obs_lst.append(obs_mis)
# nons_obs_lst.append(obs_nons)
# stop_loss_obs_lst.append(obs_stop_loss)
# silent_obs_lst.append(obs_silent)
# splice_obs_lst.append(obs_splice)
# trunc_obs_lst.append(obs_trunc)
genes_used.append(gene)
chrom_lst.append(chrom)
intervals = f_genic['cds_intervals'][gene][:]
L = pd.DataFrame(f_genic['L_data'][gene][:].T, index = subst_idx, columns=[0,1,2,3])
context_counts = pd.DataFrame(cds_counts.loc[gene].T.values, index=cds_counts.columns)
#replacing genic_seq_model with precounted cds
prob_sum = context_counts * d_pr
t_pi = d_pr / prob_sum[0].sum()
t_pi = pd.concat([t_pi] * (4), axis=1, ignore_index=True)
pi_sums = t_pi * L
p_silent = pi_sums[0].sum()
p_mis = pi_sums[1].sum()
p_nons = pi_sums[2].sum()
p_splice = pi_sums[3].sum()
p_trunc = p_nons + p_splice
p_mis_lst.append(p_mis)
p_nons_lst.append(p_nons)
p_silent_lst.append(p_silent)
p_splice_lst.append(p_splice)
p_trunc_lst.append(p_trunc)
mu,sigma,R_obs,flag = get_region_params(all_windows_df, chrom, intervals, window)
mu_lst.append(mu)
s_lst.append(sigma)
flag_lst.append(flag)
# pval_mis = calc_pvalue(mu, sigma, p_mis, obs_mis)
# pvals_mis.append(pval_mis)
# pval_nons = calc_pvalue(mu, sigma, p_nons, obs_nons)
# pvals_nons.append(pval_nons)
# pval_silent = calc_pvalue(mu, sigma, p_silent, obs_silent)
# pvals_silent.append(pval_silent)
# pval_splice = calc_pvalue(mu, sigma, p_splice, obs_splice)
# pvals_splice.append(pval_splice)
# pval_trunc = calc_pvalue(mu, sigma, p_trunc, obs_trunc)
# pvals_trunc.append(pval_trunc)
R_size_lst.append(int(cds_counts.loc[gene, :].sum() / 3)) ## length of region containing gene
R_obs_lst.append(R_obs)
# alpha, theta = nb_model.normal_params_to_gamma(mu,sigma)
# exp_mis_lst.append(alpha*theta*p_mis)
# exp_nons_lst.append(alpha*theta*p_nons)
# exp_silent_lst.append(alpha*theta*p_silent)
# exp_splice_lst.append(alpha*theta*p_splice)
# exp_trunc_lst.append(alpha*theta*p_trunc)
## Deal with indel model
# gene_len_lst.append(int(np.sum(L.values) / 3))
gene_len_lst.append(np.sum(intervals[1, :] - intervals[0, :] + 1))
p_ind_lst.append(gene_len_lst[-1] / R_size_lst[-1])
if indels_direct:
mu_ind,sigma_ind,R_ind, _ = get_region_params(all_indels_df, chrom, intervals, window)
else:
mu_ind, sigma_ind, R_ind = mu, sigma, R_obs
mu_ind_lst.append(mu_ind)
s_ind_lst.append(sigma_ind)
R_ind_lst.append(R_ind)
df_out['CHROM'] = chrom_lst
df_out['GENE'] = genes_used
# df_out['OBS_MIS'] = mis_obs_lst
# df_out['OBS_NONS'] = nons_obs_lst
# df_out['OBS_SILENT']= silent_obs_lst
# df_out['OBS_SPLICE']= splice_obs_lst
# df_out['OBS_TRUNC']= trunc_obs_lst
# df_out['EXP_MIS'] = exp_mis_lst
# df_out['EXP_NONS'] = exp_nons_lst
# df_out['EXP_SILENT'] = exp_silent_lst
# df_out['EXP_SPLICE']= exp_splice_lst
# df_out['EXP_TRUNC']= exp_trunc_lst
df_out['GENE_LENGTH'] = gene_len_lst
df_out['R_SIZE'] = R_size_lst
df_out['R_OBS'] = R_obs_lst
df_out['R_INDEL'] = R_ind_lst
df_out['MU'] = mu_lst
df_out['SIGMA'] = s_lst
df_out['MU_INDEL'] = mu_ind_lst
df_out['SIGMA_INDEL'] = s_ind_lst
df_out['FLAG'] = flag_lst
# df_out['PVAL_MIS'] = pvals_mis
# df_out['PVAL_NONS'] = pvals_nons
# df_out['PVAL_SILENT'] = pvals_silent
# df_out['PVAL_SPLICE'] = pvals_splice
# df_out['PVAL_TRUNC'] = pvals_trunc
df_out['P_MIS'] = p_mis_lst
df_out['P_NONS'] = p_nons_lst
df_out['P_SILENT'] = p_silent_lst
df_out['P_SPLICE'] = p_splice_lst
df_out['P_TRUNC'] = p_trunc_lst
df_out['P_INDEL'] = p_ind_lst
f_genic.close()
return df_out
# def genic_model_parallel(mut_obs_df, f_pretrained_str, f_genic_str, f_genome_counts, mapp, N_procs):
def genic_model_parallel(f_pretrained_str, f_genic_str, N_procs, counts_key="window_10kb/counts", indels_direct=False):
## Parallel chunk parameters:
f_genic = h5py.File(f_genic_str, 'r')
all_genes = list(f_genic['cds_intervals'].keys())
f_genic.close()
chunksize = int(np.ceil(len(all_genes) / N_procs))
res = []
pool = mp.Pool(N_procs)
for i in np.arange(0, len(all_genes), chunksize):
gene_chunk = all_genes[i:i+chunksize]
r = pool.apply_async(genic_model, (gene_chunk, f_pretrained_str, f_genic_str, counts_key, indels_direct))
# r = pool.apply_async(genic_model, (mut_obs_df, gene_chunk, f_pretrained_str, f_genic_str, f_genome_counts, mapp))
res.append(r)
pool.close()
pool.join()
res_lst = [r.get() for r in res]
complete = pd.concat(res_lst)
return complete
# finds estimated region parameters (mu, sigma) for a given gene
# inputs : df - gp results df with chr locs and region parameters
# chrom - gene chrom
# intervals - 2d numpy array of start, end positions of cds regions
# output : average mu and sigma values for the non-duplicated overlapping regions
# or returns -1,-1 if no regions are overlapped
def get_region_params(df, chrom, intervals, window):
ideal = get_ideal_overlaps(chrom, intervals, window)
mu_sum = 0
var_sum = 0
R_obs_sum = 0
FLAG = False
ideal = [trip_to_str(r) for r in ideal]
for r in ideal:
mu_sum += df.loc[r, 'Y_PRED']
var_sum += df.loc[r, 'STD']**2
R_obs_sum += df.loc[r, 'Y_TRUE']
FLAG += df.loc[r, 'FLAG']
mu = mu_sum
sigma = np.sqrt(var_sum)
return mu, sigma, R_obs_sum, FLAG
# finds estimated region parameters (mu, sigma) for a given gene
#using overlaps directly
# inputs : df - gp results df with chr locs and region parameters
#
# output : average mu and sigma values for the non-duplicated overlapping regions
# or returns -1,-1 if no regions are overlapped
def get_region_params_direct(df, overlaps, window):
mu_sum = 0
var_sum = 0
R_obs_sum = 0
FLAG = False
ideal = [trip_to_str(r) for r in overlaps]
for r in ideal:
mu_sum += df.loc[r, 'Y_PRED']
var_sum += df.loc[r, 'STD']**2
R_obs_sum += df.loc[r, 'Y_TRUE']
FLAG += df.loc[r, 'FLAG']
mu = mu_sum
sigma = np.sqrt(var_sum)
return mu, sigma, R_obs_sum, FLAG
def get_ideal_overlaps(chrom, intervals, window):
region_lst = []
for i in intervals.T:
low = math.floor(i[0].min() / window) * window
high = math.ceil(i[1].max() / window) * window
borders = np.arange(low, high +window, window)
for i in range(len(borders)-1):
region_lst.append((chrom,int(borders[i]), int(borders[i+1])))
return list(set(region_lst))
def trip_to_str(trip):
return 'chr{}:{}-{}'.format(trip[0], trip[1], trip[2])
def get_elt_ideal_overlaps(chrom, start, end, window):
region_lst = []
low = math.floor(start / window) * window
high = math.ceil(end / window) * window
borders = np.arange(low, high + window, window)
for i in range(len(borders)-1):
region_lst.append((int(chrom),borders[i], borders[i+1]))
return list(set(region_lst))
# def nonc_model(df_nonc_obs, f_pretrained, f_nonc_data, save_key, f_sites = False):
def nonc_model(elt_lst, f_pretrained, f_nonc_data, save_key, indels_direct):
# if f_sites:
# df_nonc_obs = df_nonc_obs.copy().astype({'ELT':str, 'OBS_SAMPLES':int, 'OBS_MUT':int})
# else:
# df_nonc_obs = df_nonc_obs.copy().astype({'CHROM':int, 'ELT':str, 'STRAND':str,
# 'BLOCK_STARTS':object, 'BLOCK_ENDS':object, 'OBS_SAMPLES':int,'OBS_MUT':int})
all_windows_df = pd.read_hdf(f_pretrained, 'region_params')
window = all_windows_df.iloc[0][2]-all_windows_df.iloc[0][1]
window_key = 'window_{}'.format(window)
# if window < 1000:
# print("Warning: Model is not intended for use with windows < 1kb")
# window_key = '{}bp'.format(window)
# else:
# window_key = '{}kb'.format(int(window/1000))
if indels_direct:
all_indels_df = pd.read_hdf(f_pretrained, 'region_params_indels')
nonc_data = h5py.File(f_nonc_data, 'r')
df_mut = pd.read_hdf(f_pretrained, key='sequence_model_192')
mut_model_idx = [r[1] + '>' + r[1][0] + r[0][2] + r[1][2] for r in zip(df_mut.MUT_TYPE, df_mut.CONTEXT)]
d_pr = pd.DataFrame(df_mut.FREQ.values, mut_model_idx)
d_pr = d_pr.sort_index()[0].values
p_mut_lst = []
p_ind_lst = []
mu_lst = []
s_lst = []
mu_ind_lst = []
s_ind_lst = []
# pvals_lst = []
R_obs_lst = []
R_size_lst = []
R_ind_lst = []
flag_lst = []
elt_len_lst = []
# exp_lst = []
# exp_samples_lst = []
# pval_samples_lst = []
# for _, row in df_nonc_obs.iterrows():
for elt in elt_lst:
# obs_samp = row.OBS_SAMPLES
# obs_mut = row.OBS_MUT
# elt = row.ELT
# if f_sites:
# region_counts = nonc_data['{}/sites_data/{}/{}/region_counts'.format(window_key, save_key, elt)][:]
# L = nonc_data['{}/sites_data/{}/{}/L_counts'.format(window_key, save_key, elt)][:]
# overlaps = nonc_data['{}/sites_data/{}/{}'.format(window_key, save_key, elt)].attrs['overlaps']
# else:
region_counts = nonc_data['{}/{}/{}/region_counts'.format(window_key, save_key, elt)][:]
L = nonc_data['{}/{}/{}/L_counts'.format(window_key, save_key, elt)][:]
overlaps = nonc_data['{}/{}/{}'.format(window_key, save_key, elt)].attrs['overlaps']
prob_sum = region_counts * d_pr
if prob_sum.sum() == 0:
print(elt, overlaps)
t_pi = d_pr / prob_sum.sum()
p_mut = (t_pi * L).sum()
p_mut_lst.append(p_mut)
mu, sigma, R_obs, FLAG = get_region_params_direct(all_windows_df, overlaps, window)
mu_lst.append(mu)
s_lst.append(sigma)
flag_lst.append(FLAG)
# pval_mut = calc_pvalue(mu, sigma, p_mut, obs_mut)
# pvals_lst.append(pval_mut)
R_size_lst.append(int(region_counts.sum() / 3)) ## length of region containing gene
R_obs_lst.append(R_obs)
# alpha, theta = nb_model.normal_params_to_gamma(mu,sigma)
# exp_lst.append(alpha*theta*p_mut)
elt_len_lst.append(int(np.sum(L) / 3))
p_ind_lst.append(elt_len_lst[-1] / R_size_lst[-1])
if indels_direct:
mu_ind,sigma_ind,R_ind, _ = get_region_params_direct(all_indels_df, overlaps, window)
else:
mu_ind, sigma_ind, R_ind = mu, sigma, R_obs
mu_ind_lst.append(mu_ind)
s_ind_lst.append(sigma_ind)
R_ind_lst.append(R_ind)
# if obs_mut == 0:
# samples_scaling = 1
# else:
# samples_scaling = obs_samp / obs_mut
# pval_sample = calc_pvalue(mu, sigma, samples_scaling * p_mut, obs_samp)
# pval_samples_lst.append(pval_sample)
# exp_samples_lst.append(alpha*theta*p_mut*samples_scaling)
nonc_data.close()
# q_lst = nb_model.get_q_vals(np.array(pvals_lst))
df_nonc = pd.DataFrame({
'ELT': elt_lst,
'ELT_SIZE': elt_len_lst,
'FLAG': flag_lst,
'R_SIZE': R_size_lst,
'R_OBS': R_obs_lst,
'R_INDEL': R_ind_lst,
'MU': mu_lst,
'SIGMA': s_lst,
'MU_INDEL': mu_ind_lst,
'SIGMA_INDEL': s_ind_lst,
'P_SUM': p_mut_lst,
'P_INDEL': p_ind_lst
})
# df_nonc_obs['EXP_MUTS'] = exp_lst
# df_nonc_obs['EXP_SAMPLES'] = exp_samples_lst
# df_nonc_obs['R_OBS'] = R_obs_lst
# df_nonc_obs['MU'] = mu_lst
# df_nonc_obs['SIGMA'] = s_lst
# df_nonc_obs['PVAL'] = pvals_lst
# df_nonc_obs['PVAL_SAMPLES'] = pval_samples_lst
# df_nonc_obs['P_SUM'] = p_mut_lst #sum of pi values
# df_nonc_obs['QVAL'] = q_lst #fdr corrected q value
# if not f_sites:
# df_nonc_obs = df_nonc_obs.drop(['BLOCK_STARTS', 'BLOCK_ENDS'], axis=1)
# return df_nonc_obs
return df_nonc
# def nonc_model_parallel(df_nonc_obs, f_pretrained, f_nonc_data, nonc_L_key, N_procs, f_sites = False):
def nonc_model_parallel(f_pretrained, f_nonc_data, nonc_L_key, N_procs, indels_direct=False):
with h5py.File(f_pretrained, 'r') as h5_pre:
window = h5_pre['idx'][0, 2] - h5_pre['idx'][0, 1]
window_key = 'window_{}'.format(window)
with h5py.File(f_nonc_data, 'r') as h5_nc:
elt_lst = list(h5_nc['{}/{}'.format(window_key, nonc_L_key)].keys())
## Parallel chunk parameters:
chunksize = int(np.ceil(len(elt_lst) / N_procs))
# chunksize = int(np.ceil(len(df_nonc_obs) / N_procs))
res = []
pool = mp.Pool(N_procs)
for i in np.arange(0, len(elt_lst), chunksize):
# df = df_nonc_obs.iloc[i:i+chunksize]
elt_chunk = elt_lst[i:i+chunksize]
r = pool.apply_async(nonc_model,(elt_chunk, f_pretrained, f_nonc_data, nonc_L_key, indels_direct))
# r = pool.apply_async(nonc_model,(df, f_pretrained, f_nonc_data, nonc_L_key, f_sites))
res.append(r)
pool.close()
pool.join()
res_lst = [r.get() for r in res]
complete = pd.concat(res_lst)
return complete
def nonc_model_region_parallel(f_bed, f_pretrained, f_nonc_data, nonc_L_key, N_procs, f_sites = None):
""" Pretrain a noncoding model based on regions defined in a bed file
Context counts for each (sub)element should have been precounted and
stored in f_nonc_data under key nonc_L_key
"""
### Read in regions from bed file
print('Parsing regions bed file')
df_nonc = pd.read_table(f_bed, names=['CHROM', 'START', 'END', "ELT", "SCORE", "STRAND", 'thickStart', 'thickEnd', 'rgb', 'blockCount', 'blockSizes', 'blockStarts'], low_memory=False)
df_nonc.CHROM = df_nonc.CHROM.astype(str)
df_nonc = df_nonc[df_nonc.CHROM.isin([str(c) for c in range(1, 23)])]
df_nonc.CHROM = df_nonc.CHROM.astype(int)
def _get_starts(row):
str_starts = row.blockStarts
if str_starts.endswith(','):
str_starts = str_starts[:-1]
return [int(x)+row.START for x in str_starts.split(",")]
def _get_ends(row):
str_sizes = row.blockSizes
if str_sizes.endswith(','):
str_sizes = str_sizes[:-1]
sizes = [int(x) for x in str_sizes.split(',')]
return [START + SIZE for START, SIZE in zip(row.BLOCK_STARTS, sizes)]
df_nonc['BLOCK_STARTS'] = df_nonc.apply(_get_starts, axis=1)
df_nonc['BLOCK_ENDS'] = df_nonc.apply(_get_ends, axis=1)
df_nonc = df_nonc[['CHROM', 'ELT', 'STRAND', 'BLOCK_STARTS', 'BLOCK_ENDS']]
# print(df_nonc[df_nonc.ELT == 'TMEM240'])
# return
## Parallel chunk parameters:
print('Pretraining model')
chunksize = int(np.ceil(len(df_nonc) / N_procs))
res = []
pool = mp.Pool(N_procs)
for i in np.arange(0, len(df_nonc), chunksize):
df = df_nonc.iloc[i:i+chunksize]
r = pool.apply_async(nonc_model_region,(df, f_pretrained, f_nonc_data, nonc_L_key, f_sites))
res.append(r)
pool.close()
pool.join()
res_lst = [r.get() for r in res]
complete = pd.concat(res_lst)
return complete
def nonc_model_region(df_nonc, f_pretrained, f_nonc_data, nonc_L_key, f_sites = None, return_intermediates=False):
df_nonc = df_nonc.copy().astype({'CHROM':int, 'ELT':str, 'STRAND':str, 'BLOCK_STARTS':object, 'BLOCK_ENDS':object,})
L_counts = pd.read_hdf(f_nonc_data, nonc_L_key)
all_windows_df = pd.read_hdf(f_pretrained, 'region_params')
window = all_windows_df.iloc[0][2]-all_windows_df.iloc[0][1]
nonc_data = h5py.File(f_nonc_data, 'r')
idx = nonc_data['full_window_si_index'][:]
idx_dict = dict(zip(map(tuple, idx), range(len(idx))))
df_mut = pd.read_hdf(f_pretrained, key='sequence_model_192')
mut_model_idx = [r[1] + '>' + r[1][0] + r[0][2] + r[1][2] for r in zip(df_mut.MUT_TYPE, df_mut.CONTEXT)]
subst_idx = sorted(mut_model_idx)
revc_subst_idx = [sequence_tools.reverse_complement(sub.split('>')[0]) + '>' + sequence_tools.reverse_complement(sub.split('>')[-1]) for sub in subst_idx]
revc_dic = dict(zip(subst_idx, revc_subst_idx))
d_pr = pd.DataFrame(df_mut.FREQ.values, mut_model_idx)
d_pr = d_pr.sort_index()[0].values
keys = set(list(subst_idx))
d = {key: 0 for key in sorted(keys)}
p_mut_lst = []
mu_lst = []
s_lst = []
pvals_lst = []
R_obs_lst = []
exp_lst = []
exp_samples_lst = []
pval_samples_lst = []
L_lst = []
t_pi_lst = []
for _, row in df_nonc.iterrows():
chrom = row.CHROM
strand = row.STRAND
block_starts = row[3]
block_ends = row[4]
elts_as_intervals = np.vstack((block_starts, block_ends))
overlaps = get_ideal_overlaps(chrom, elts_as_intervals, window)
region_counts = np.array([np.repeat(nonc_data['full_window_si_values'][idx_dict[region], :], 3) for region in overlaps]).sum(axis=0)
# if negative strand, take the reverse complement of the region counts
if strand == '-1' or strand == '-':
region_counts = [r[1] for r in sorted(enumerate(region_counts), key=lambda k: revc_dic[subst_idx[k[0]]])]
L = np.zeros((192))
for start, end in zip(block_starts, block_ends):
L += L_counts.loc['chr{}:{}-{}'.format(chrom, start,end)].values
prob_sum = region_counts * d_pr
t_pi = d_pr / prob_sum.sum()
p_mut = (t_pi * L).sum()
p_mut_lst.append(p_mut)
mu,sigma,R_obs = get_region_params(all_windows_df, chrom, elts_as_intervals, window)
mu_lst.append(mu)
s_lst.append(sigma)
R_obs_lst.append(R_obs)
if return_intermediates:
L_lst.append(L)
t_pi_lst.append(t_pi)
df_nonc['R_OBS'] = R_obs_lst
df_nonc['MU'] = mu_lst
df_nonc['SIGMA'] = s_lst
df_nonc['P_SUM'] = p_mut_lst #sum of pi values
if return_intermediates:
idx = sorted(mut_model_idx)
df_L = pd.DataFrame(L_lst, columns=idx, index=df_nonc.ELT)
df_pi = pd.DataFrame(t_pi_lst, columns=idx, index=df_nonc.ELT)
return df_nonc.drop(['BLOCK_STARTS', 'BLOCK_ENDS'], axis=1), df_L, df_pi
else:
return df_nonc.drop(['BLOCK_STARTS', 'BLOCK_ENDS'], axis=1)
def tiled_nonc_model(elt_lst, f_pretrained, f_nonc_data, save_key):
all_windows_df = pd.read_hdf(f_pretrained, 'region_params')
window = all_windows_df.iloc[0][2]-all_windows_df.iloc[0][1]
window_key = 'window_{}'.format(window)
nonc_data = h5py.File(f_nonc_data, 'r')
L_table = pd.read_hdf(f_nonc_data, "{}/L_counts".format(save_key))
idx = nonc_data['{}/full_window_si_index'.format(window_key)][:]
idx_dict = dict(zip(map(tuple, idx), range(len(idx))))
df_mut = pd.read_hdf(f_pretrained, key='sequence_model_192')
mut_model_idx = [r[1] + '>' + r[1][0] + r[0][2] + r[1][2] for r in zip(df_mut.MUT_TYPE, df_mut.CONTEXT)]
d_pr = pd.DataFrame(df_mut.FREQ.values, mut_model_idx)
d_pr = d_pr.sort_index()[0].values
p_mut_lst = []
mu_lst = []
s_lst = []
R_obs_lst = []
elt_len_lst = []
flag_lst = []
R_size_lst = []
R_ind_lst = []
mu_ind_lst = []
s_ind_lst = []
p_ind_lst = []
for elt in elt_lst:
pos = elt.split(":")[1]
chrom = int(elt.split(":")[0].lstrip("chr"))
start = int(pos.split("-")[0])
region_start = int(np.floor(start / 10000) * 10000)
L = L_table.loc[elt]
#L = nonc_data['{}/{}/{}/L_counts'.format(window_key, save_key, elt)][:]
#overlaps = nonc_data['{}/{}/{}'.format(window_key, save_key, elt)].attrs['overlaps']
region = (chrom, region_start, region_start+window)
region_counts = np.repeat(nonc_data['{}/full_window_si_values'.format(
window_key)][idx_dict[region], :], 3)
overlaps = [region]
prob_sum = region_counts * d_pr
t_pi = d_pr / prob_sum.sum()
p_mut = (t_pi * L).sum()
p_mut_lst.append(p_mut)
mu,sigma,R_obs,FLAG = get_region_params_direct(all_windows_df, overlaps, window)
flag_lst.append(FLAG)
R_size_lst.append(int(region_counts.sum() / 3))
elt_len_lst.append(int(np.sum(L) / 3))
p_ind_lst.append(elt_len_lst[-1] / R_size_lst[-1])
mu_ind, sigma_ind, R_ind = mu, sigma, R_obs
mu_ind_lst.append(mu_ind)
s_ind_lst.append(sigma_ind)
R_ind_lst.append(R_ind)
mu_lst.append(mu)
s_lst.append(sigma)
R_obs_lst.append(R_obs)
nonc_data.close()
#for compatibility with region files
elt_lst = [_index_transform(i) for i in elt_lst]
df_nonc = pd.DataFrame({
'ELT': elt_lst,
'ELT_SIZE': elt_len_lst,
'FLAG': flag_lst,
'R_SIZE': R_size_lst,
'R_OBS': R_obs_lst,
'R_INDEL': R_ind_lst,
'MU': mu_lst,
'SIGMA': s_lst,
'MU_INDEL': mu_ind_lst,
'SIGMA_INDEL': s_ind_lst,
'P_SUM': p_mut_lst,
'P_INDEL': p_ind_lst
})
return df_nonc
def tiled_model_parallel(f_pretrained, f_nonc_data, save_key, N_procs):
with h5py.File(f_pretrained, 'r') as h5_pre:
window = h5_pre['idx'][0, 2] - h5_pre['idx'][0, 1]
window_key = 'window_{}'.format(window)
elt_table = pd.read_hdf(f_nonc_data, "{}/L_counts".format(save_key))
elt_lst = elt_table.index
## Parallel chunk parameters:
chunksize = int(np.ceil(len(elt_lst) / N_procs))
# chunksize = int(np.ceil(len(df_nonc_obs) / N_procs))
res = []
pool = mp.Pool(N_procs)
for i in np.arange(0, len(elt_lst), chunksize):
# df = df_nonc_obs.iloc[i:i+chunksize]
elt_chunk = elt_lst[i:i+chunksize]
r = pool.apply_async(tiled_nonc_model,(elt_chunk, f_pretrained, f_nonc_data, save_key))
# r = pool.apply_async(nonc_model,(df, f_pretrained, f_nonc_data, nonc_L_key, f_sites))
res.append(r)
pool.close()
pool.join()
res_lst = [r.get() for r in res]
complete = pd.concat(res_lst)
return complete
def _index_transform(s):
chrom = int(s.split(":")[0].lstrip("chr"))
start = int(s.split(":")[-1].split('-')[0])
end = int(s.split(":")[-1].split('-')[1])
return "region_{}_{}_{}".format(chrom,start, end)
|
{"hexsha": "8ecd78a5fe4a78c8ad8452d1c4047f168402cc5e", "size": 25159, "ext": "py", "lang": "Python", "max_stars_repo_path": "DIGDriver/sequence_model/genic_driver_tools.py", "max_stars_repo_name": "maxwellsh/DIGDriver", "max_stars_repo_head_hexsha": "1f8503c8c22861d6f9b601fd8c5a131e3dc31fc1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-07T00:05:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-07T00:05:48.000Z", "max_issues_repo_path": "DIGDriver/sequence_model/genic_driver_tools.py", "max_issues_repo_name": "maxwellsh/DIGDriver", "max_issues_repo_head_hexsha": "1f8503c8c22861d6f9b601fd8c5a131e3dc31fc1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DIGDriver/sequence_model/genic_driver_tools.py", "max_forks_repo_name": "maxwellsh/DIGDriver", "max_forks_repo_head_hexsha": "1f8503c8c22861d6f9b601fd8c5a131e3dc31fc1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6542699725, "max_line_length": 187, "alphanum_fraction": 0.634007711, "include": true, "reason": "import numpy,import scipy", "num_tokens": 7390}
|
[STATEMENT]
lemma the_pw_cat_lKe_colimit_components:
shows "the_pw_cat_lKe_colimit \<alpha> \<KK> \<TT> \<FF> c\<lparr>UObj\<rparr> = \<FF>\<lparr>ObjMap\<rparr>\<lparr>c\<rparr>"
and "the_pw_cat_lKe_colimit \<alpha> \<KK> \<TT> \<FF> c\<lparr>UArr\<rparr> = op_ntcf
(
the_pw_cat_rKe_limit \<alpha> (op_cf \<KK>) (op_cf \<TT>) (op_cf \<FF>) c\<lparr>UArr\<rparr> \<circ>\<^sub>N\<^sub>T\<^sub>C\<^sub>F\<^sub>-\<^sub>C\<^sub>F
op_cf_obj_comma \<KK> c
)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. the_pw_cat_lKe_colimit \<alpha> \<KK> \<TT> \<FF> c\<lparr>UObj\<rparr> = \<FF>\<lparr>ObjMap\<rparr>\<lparr>c\<rparr> &&& the_pw_cat_lKe_colimit \<alpha> \<KK> \<TT> \<FF> c\<lparr>UArr\<rparr> = op_ntcf (the_pw_cat_rKe_limit \<alpha> (op_cf \<KK>) (op_cf \<TT>) (op_cf \<FF>) c\<lparr>UArr\<rparr> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M op_cf_obj_comma \<KK> c)
[PROOF STEP]
unfolding the_pw_cat_lKe_colimit_def ua_field_simps
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. [\<FF>\<lparr>ObjMap\<rparr>\<lparr>c\<rparr>, op_ntcf (the_pw_cat_rKe_limit \<alpha> (op_cf \<KK>) (op_cf \<TT>) (op_cf \<FF>) c\<lparr>1\<^sub>\<nat>\<rparr> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M op_cf_obj_comma \<KK> c)]\<^sub>\<circ>\<lparr>[]\<^sub>\<circ>\<rparr> = \<FF>\<lparr>ObjMap\<rparr>\<lparr>c\<rparr> &&& [\<FF>\<lparr>ObjMap\<rparr>\<lparr>c\<rparr>, op_ntcf (the_pw_cat_rKe_limit \<alpha> (op_cf \<KK>) (op_cf \<TT>) (op_cf \<FF>) c\<lparr>1\<^sub>\<nat>\<rparr> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M op_cf_obj_comma \<KK> c)]\<^sub>\<circ>\<lparr>1\<^sub>\<nat>\<rparr> = op_ntcf (the_pw_cat_rKe_limit \<alpha> (op_cf \<KK>) (op_cf \<TT>) (op_cf \<FF>) c\<lparr>1\<^sub>\<nat>\<rparr> \<circ>\<^sub>T\<^sub>D\<^sub>G\<^sub>H\<^sub>M\<^sub>-\<^sub>D\<^sub>G\<^sub>H\<^sub>M op_cf_obj_comma \<KK> c)
[PROOF STEP]
by (simp_all add: nat_omega_simps)
|
{"llama_tokens": 1016, "file": "CZH_Universal_Constructions_czh_ucategories_CZH_UCAT_PWKan", "length": 2}
|
"""
WeightedBinaryLoss{L,W} <: SupervisedLoss
Can an be used to represent a re-weighted version of some type of
binary loss `L`. The weight-factor `W`, which must be in `[0, 1]`,
denotes the relative weight of the positive class, while the
relative weight of the negative class will be `1 - W`.
For example: To create a typealias for a 1:4 weighted version of
`L2HingeLoss`, type:
```julia
const WeightedL2HingeLoss = LossFunctions.WeightedBinaryLoss{L2HingeLoss,0.2}
```
This new loss-type can then be instantiated in the same manner and
with the same parameters as the original unscaled loss-type.
In contrast, in order to only create a re-weighted instance of some
specific loss you can use `weightedloss(L2HingeLoss(), Val(0.2))`.
See `?weightedloss` for more information.
"""
struct WeightedBinaryLoss{L<:MarginLoss,W} <: SupervisedLoss
loss::L
WeightedBinaryLoss{L,W}(loss::L) where {L<:MarginLoss, W} = new{L,W}(loss)
end
@generated function (::Type{WeightedBinaryLoss{L,W}})(args...) where {L<:MarginLoss, W}
typeof(W) <: Number && 0 <= W <= 1 || _werror()
:(WeightedBinaryLoss{L,W}(L(args...)))
end
_werror() = throw(ArgumentError("The given \"weight\" has to be a number in the interval [0, 1]"))
@generated function WeightedBinaryLoss(loss::L, ::Val{W}) where {L<:MarginLoss,W}
typeof(W) <: Number && 0 <= W <= 1 || _werror()
:(WeightedBinaryLoss{L,W}(loss))
end
function WeightedBinaryLoss(loss::SupervisedLoss, w::Number)
WeightedBinaryLoss(loss, Val(w))
end
@generated function WeightedBinaryLoss(s::WeightedBinaryLoss{T,W1}, ::Val{W2}) where {T,W1,W2}
:(WeightedBinaryLoss(s.loss, Val($(W1*W2))))
end
for fun in (:value, :deriv, :deriv2)
@eval function ($fun)(l::WeightedBinaryLoss{L,W}, target::Number, output::Number) where {L,W}
# We interpret the W to be the weight of the positive class
if target > 0
W * ($fun)(l.loss, target, output)
else
(1-W) * ($fun)(l.loss, target, output)
end
end
end
"""
weightedloss(loss, weight)
Returns a weighted version of `loss` for which the value of the
positive class is changed to be `weight` times its original, and the
negative class `1 - weight` times its original respectively.
Note: If `typeof(weight) <: Number`, then this method will poison the
type-inference of the calling scope. This is because `weight` will be
promoted to a type parameter. For a typestable version use the
following signature: `weightedloss(loss, Val(weight))`
"""
weightedloss(loss::Loss, weight::Number) = WeightedBinaryLoss(loss, weight)
weightedloss(loss::Loss, ::Val{W}) where {W} = WeightedBinaryLoss(loss, Val(W))
# An α-weighted version of a classification callibrated margin loss is
# itself classification callibrated if and only if α == 1/2
isclasscalibrated(l::WeightedBinaryLoss{T,W}) where {T,W} = W == 0.5 && isclasscalibrated(l.loss)
# TODO: Think about this semantic
issymmetric(::WeightedBinaryLoss) = false
for prop in [:isminimizable, :isdifferentiable,
:istwicedifferentiable,
:isconvex, :isstrictlyconvex,
:isstronglyconvex, :isnemitski,
:isunivfishercons, :isfishercons,
:islipschitzcont, :islocallylipschitzcont,
:isclipable, :ismarginbased,
:isdistancebased]
@eval ($prop)(l::WeightedBinaryLoss) = ($prop)(l.loss)
end
for prop_param in (:isdifferentiable, :istwicedifferentiable)
@eval ($prop_param)(l::WeightedBinaryLoss, at) = ($prop_param)(l.loss, at)
end
|
{"hexsha": "da0f655b00571f28f36cc0bea99d87937fee415d", "size": 3538, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/supervised/weightedbinary.jl", "max_stars_repo_name": "johnnychen94/LossFunctions.jl", "max_stars_repo_head_hexsha": "ad125ad0749e1f99e4a82164d0eb2e631e91aa53", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/supervised/weightedbinary.jl", "max_issues_repo_name": "johnnychen94/LossFunctions.jl", "max_issues_repo_head_hexsha": "ad125ad0749e1f99e4a82164d0eb2e631e91aa53", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-15T06:37:50.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-15T06:37:50.000Z", "max_forks_repo_path": "src/supervised/weightedbinary.jl", "max_forks_repo_name": "johnnychen94/LossFunctions.jl", "max_forks_repo_head_hexsha": "ad125ad0749e1f99e4a82164d0eb2e631e91aa53", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6382978723, "max_line_length": 98, "alphanum_fraction": 0.697003957, "num_tokens": 1004}
|
# This should probably be its own package
_cufunc(f,x) = f
_cufunc(f,x,xs...) = _cufunc(f, xs...)
using MacroTools
isbcastop(x) = isexpr(x, :call) && x.args[1] in :[.*,./,.+,.-].args
broadcast_inputs(ex) =
ex isa Symbol ? [ex] :
@capture(ex, f_.(args__)) ? vcat(broadcast_inputs.(args)...) :
isbcastop(ex) ? vcat(broadcast_inputs.(ex.args[2:end])...) :
[]
cudata(x) = x
macro fix(ex)
fs = []
ex = MacroTools.prewalk(ex) do ex
@capture(ex, f_.(args__)) || return ex
# May not work in cases like `x .+ log(1.0)` but w/e
xs = broadcast_inputs(ex)
f_ = gensym()
push!(fs, :($f_ = $_cufunc($f, $cudata.(($(xs...),))...)))
:($f_.($(args...)))
end
:($(fs...); $ex) |> esc
end
|
{"hexsha": "a431e0d6e819a6b925a938283922860e8e32981e", "size": 715, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/cubroadcast.jl", "max_stars_repo_name": "gustafsson/NNlib.jl", "max_stars_repo_head_hexsha": "6b58df43a4de71bda3246e6cac2e073af10da899", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/cubroadcast.jl", "max_issues_repo_name": "gustafsson/NNlib.jl", "max_issues_repo_head_hexsha": "6b58df43a4de71bda3246e6cac2e073af10da899", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/cubroadcast.jl", "max_forks_repo_name": "gustafsson/NNlib.jl", "max_forks_repo_head_hexsha": "6b58df43a4de71bda3246e6cac2e073af10da899", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8333333333, "max_line_length": 67, "alphanum_fraction": 0.5566433566, "num_tokens": 263}
|
module m_prms_streamflow
use variableKind
use prms_constants
! use, intrinsic :: iso_c_binding, only: c_sizeof
use, intrinsic :: iso_fortran_env, only: output_unit
use Control_class, only: Control
use Simulation_class, only: Simulation
use PRMS_BASIN, only: Basin
use PRMS_MUSKINGUM, only: Muskingum
! use ieee_arithmetic
! use ieee_features
implicit none
type :: prms_streamflow_model
!! Name of the control file
type(Control) :: control_data
!! Class of control file related parameters
type(Basin) :: parameter_data
!! Class of input parameters
type(Simulation) :: model_simulation
!! PRMS model simulation class
integer(i64) :: start_rtc
!! Starting system clock value
integer(i64) :: end_rtc
!! Ending system clock value
integer(i64) :: max_rtc
!! Maximum system clock ticks per second
integer(i64) :: rate_rtc
!! System clock ticks per second
real(i64) :: delta_rtc_sec
!! Elapsed system clock in seconds
real(r64) :: start_ct
!! Starting cpu time value
real(r64) :: end_ct
!! Ending cpu time value
real(r64) :: dummy_r64
real(r32) :: dummy_r32
end type prms_streamflow_model
contains
subroutine initialize_from_file(model, config_file)
use variableKind
use prms_constants
use Control_class, only: Control
!use Parameters_class, only: Parameters
use Simulation_class, only: Simulation
use, intrinsic :: iso_fortran_env, only: output_unit
implicit none
character (len=*), intent(in) :: config_file
type(prms_streamflow_model), target, intent(out) :: model
real(r64) :: dummy_r64
real(r32) :: dummy_r32
associate(start_rtc => model%start_rtc, end_rtc => model%end_rtc, max_rtc => model%max_rtc, &
rate_rtc => model%rate_rtc, delta_rtc_sec => model%delta_rtc_sec, &
start_ct => model%start_ct, end_ct => model%end_ct, &
control_data => model%control_data, &
parameter_data => model%parameter_data, &
model_simulation => model%model_simulation)
call system_clock(count=start_rtc, count_rate=rate_rtc, count_max=max_rtc)
call cpu_time(time=start_ct)
print *, 'CLOSEZERO, NEARZERO, DNEARZERO'
print *, CLOSEZERO, NEARZERO, DNEARZERO
print *, 'Ranges'
print *, 'r32: ', range(dummy_r32)
print *, 'smallest r32 value: ', tiny(dummy_r32)
print *, 'minexponent of r32: ', minexponent(dummy_r32)
print *, 'r64: ', range(dummy_r64)
! print *, 'r32 array memory footprint (109951 x 13505):', c_sizeof(arr_dummy_r32)
write(output_unit, fmt='(a)') repeat('=', 72)
!control_data = Control(config_file)
call Control_data%init(config_file)
! TODO: Other stuff to consider
! - variable: kkiter; Current iteration in GSFLOW simulation (when model_mode=GSFLOW)
! - code behavior when init_vars_from_file==1
! * how to handle having different combinations of physics modules
! TODO: How to handle allocation and reading of parameter variables depending
! on which physics modules are selected?
!parameter_data = Parameters(Control_data)
! TODO: Need routines for setting up output variables
! Initialize the simulation object
!model_simulation = Simulation(Control_data, Parameter_data)
call model_simulation%init(Control_data)
! 2019-08-08 PAN: This is rather kludgy...
! Close the parameter file
call Control_data%param_file_hdl%close()
write(output_unit, fmt='(a)') repeat('=', 72)
end associate
end subroutine
subroutine cleanup(model)
use, intrinsic :: iso_fortran_env, only: output_unit
implicit none
type(prms_streamflow_model), intent(inout) :: model
associate(start_rtc => model%start_rtc, end_rtc => model%end_rtc, max_rtc => model%max_rtc, &
rate_rtc => model%rate_rtc, delta_rtc_sec => model%delta_rtc_sec, &
start_ct => model%start_ct, end_ct => model%end_ct, &
model_simulation => model%model_simulation, &
control_data => model%control_data)
! Cleanup everything
write(output_unit, fmt='(a)') repeat('-', 72)
write(output_unit, fmt='(a)') 'Cleaning up...'
write(output_unit, fmt='(a)') repeat('-', 72)
call model%model_simulation%cleanup(Control_data)
call cpu_time(time=end_ct)
call system_clock(count=end_rtc)
if (Control_data%print_debug%value > -1) then
delta_rtc_sec = real(end_rtc - start_rtc, r64) / real(rate_rtc, r64)
write(output_unit, fmt='(a)') repeat('-', 72)
write(output_unit, fmt='(a, 1x, f16.4, 1x, a)') 'Elapsed system clock:', delta_rtc_sec, 'seconds.'
write(output_unit, fmt='(a, 1x, f16.4, 1x, a)') 'Elapsed cpu time:', end_ct - start_ct, 'seconds.'
endif
end associate
end subroutine cleanup
subroutine advance_in_time(model)
implicit none
type(prms_streamflow_model), intent(inout) :: model
type(Control) :: ctl_data
type(Simulation) :: this
logical :: res
associate(this=>model%model_simulation, &
ctl_data => model%control_data)
!if (.not. this%model_time%next(ctl_data, this%model_basin)) then
! call cleanup(model)
!else
! call solve_prms(model)
!endif
if(this%model_time%next()) then
call solve_prms(model)
endif
end associate
end subroutine advance_in_time
subroutine solve_prms(model)
use iso_fortran_env, only: output_unit
implicit none
type(prms_streamflow_model), intent(inout) :: model
!! Name of the control file
type(Control) :: ctl_data
!! Class of control file related parameters
!type(Parameters) :: param_data
!! Class of input parameters
type(Simulation) :: this
!! PRMS model simulation class
associate( this =>model%model_simulation, &
ctl_data => model%control_data)
!if (.not. this%model_time%next(ctl_data, this%model_basin)) exit
! print *, this%model_time%Nowyear, this%model_time%Nowmonth, this%model_time%Nowday
call this%model_basin%run(ctl_data, this%model_time)
call this%model_temp%run(ctl_data, this%model_basin, this%model_time, this%model_summary)
!! print *, '1'
!call this%model_precip%run(ctl_data, this%model_basin, this%model_temp, this%model_time, &
! this%model_summary)
!! call this%climate_by_hru%run(ctl_data, param_data, this%model_time, &
!! this%model_basin, this%potet, this%model_temp, &
!! this%climate)
!! print *, '2'
!call this%solrad%run(ctl_data, this%model_time, &
! this%model_precip, this%model_basin, this%model_temp)
!
!! print *, '3'
!call this%transpiration%run(ctl_data, this%model_time, &
! this%model_basin, this%model_temp)
!
!! print *, '4'
!call this%potet%run(ctl_data, this%model_basin, this%model_time, &
! this%solrad, this%model_temp)
!
!! print *, '5'
!call this%intcp%run(ctl_data, this%model_basin, this%potet, &
! this%model_precip, this%transpiration, this%climate, this%model_time)
!
!! print *, '6'
!call this%snow%run(ctl_data, this%model_basin, this%model_time, this%climate, &
! this%model_precip, this%model_temp, &
! this%intcp, this%solrad, this%potet, this%transpiration)
!
!! print *, '7'
!call this%runoff%run(ctl_data, this%model_basin, this%climate, &
! this%potet, this%intcp, this%snow, this%model_time)
!
!! print *, '8'
!call this%soil%run(ctl_data, this%model_basin, this%model_time, &
! this%potet, this%model_precip, this%climate, this%intcp, &
! this%snow, this%transpiration, this%runoff)
! print *, '9'
!call this%groundwater%run(ctl_data, this%model_basin, &
! this%climate, this%intcp, this%soil, this%runoff, &
! this%model_time)
!
! call this%model_route%run(ctl_data, param_data, this%model_basin, this%climate, this%groundwater, this%soil, this%runoff, this%model_time, this%solrad)
!! print *, '10'
call this%model_streamflow%run(ctl_data, this%model_basin, &
this%potet, this%groundwater, this%soil, &
this%runoff, this%model_time, this%solrad, &
this%model_obs)
if (ctl_data%outVarON_OFF%value == 1) then
call this%model_summary%run(ctl_data, this%model_time, this%model_basin)
end if
if (ctl_data%print_debug%value == 1) then
call this%model_waterbal%run(ctl_data, this%model_basin, &
this%climate, this%groundwater, this%intcp, &
this%model_precip, this%snow, this%soil, &
this%runoff, this%model_time)
endif
end associate
end subroutine solve_prms
end module m_prms_streamflow
|
{"hexsha": "874f55b88b23d1af079c5f781ce4a11ca5dda685", "size": 9662, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/m_prms_streamflow.f90", "max_stars_repo_name": "nhm-usgs/bmi-prms6-streamflow", "max_stars_repo_head_hexsha": "f670f20e3da156e3b1935cf23245d4c13fd0fbc9", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/m_prms_streamflow.f90", "max_issues_repo_name": "nhm-usgs/bmi-prms6-streamflow", "max_issues_repo_head_hexsha": "f670f20e3da156e3b1935cf23245d4c13fd0fbc9", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-14T22:18:31.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-14T22:18:31.000Z", "max_forks_repo_path": "src/m_prms_streamflow.f90", "max_forks_repo_name": "nhm-usgs/bmi-prms6-streamflow", "max_forks_repo_head_hexsha": "f670f20e3da156e3b1935cf23245d4c13fd0fbc9", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4678111588, "max_line_length": 161, "alphanum_fraction": 0.6095011385, "num_tokens": 2491}
|
[STATEMENT]
lemma fMin_eqI: "(\<And>y. y |\<in>| A \<Longrightarrow> x \<le> y) \<Longrightarrow> x |\<in>| A \<Longrightarrow> fMin A = x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<And>y. y |\<in>| A \<Longrightarrow> x \<le> y; x |\<in>| A\<rbrakk> \<Longrightarrow> fMin A = x
[PROOF STEP]
by transfer (rule Min_eqI)
|
{"llama_tokens": 141, "file": null, "length": 1}
|
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../TestUtils.hpp"
#include <Optimizer.hpp>
#include <boost/test/unit_test.hpp>
using namespace armnn;
BOOST_AUTO_TEST_SUITE(Optimizer)
using namespace armnn::optimizations;
BOOST_AUTO_TEST_CASE(PermuteAsReshapeTest)
{
armnn::Graph graph;
std::string permuteLayerName = "permute";
const armnn::TensorInfo infoIn({ 1, 2, 3, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo infoOut({ 1, 1, 2, 3 }, armnn::DataType::Float32);
auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input")
->GetOutputHandler()
.SetTensorInfo(infoIn);
// Inserts permute.
graph
.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0), armnn::PermuteDescriptor({ 0, 2, 3, 1 }),
permuteLayerName.c_str())
->GetOutputHandler()
.SetTensorInfo(infoOut);
BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(PermuteAsReshape()));
// The permute is replaced by an equivalent reshape.
auto checkReshape = [&infoOut](const armnn::Layer* const layer) -> bool {
const auto reshapeLayer = static_cast<const armnn::ReshapeLayer*>(layer);
return IsLayerOfType<armnn::ReshapeLayer>(layer) &&
(reshapeLayer->GetParameters().m_TargetShape == infoOut.GetShape()) &&
(reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == infoOut.GetShape());
};
BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
&IsLayerOfType<armnn::OutputLayer>));
std::list<std::string> testRelatedLayers = { permuteLayerName };
BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "b44331c9fcdc1ba207749813b366c16451514042", "size": 2155, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/armnn/test/optimizations/PermuteAsReshapeTests.cpp", "max_stars_repo_name": "tom-gall/armnn", "max_stars_repo_head_hexsha": "a21620d32a8a0a8d527c061e2a22d51009d75877", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/armnn/test/optimizations/PermuteAsReshapeTests.cpp", "max_issues_repo_name": "tom-gall/armnn", "max_issues_repo_head_hexsha": "a21620d32a8a0a8d527c061e2a22d51009d75877", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/armnn/test/optimizations/PermuteAsReshapeTests.cpp", "max_forks_repo_name": "tom-gall/armnn", "max_forks_repo_head_hexsha": "a21620d32a8a0a8d527c061e2a22d51009d75877", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9166666667, "max_line_length": 111, "alphanum_fraction": 0.6686774942, "num_tokens": 557}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.