text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
[STATEMENT]
theorem \<theta>_asymptotics: "\<theta> \<sim>[at_top] (\<lambda>x. x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
from \<MM>_minus_ln_limit
[PROOF STATE]
proof (chain)
picking this:
(\<And>c. ((\<lambda>x. \<MM> x - ln x) \<longlongrightarrow> c) at_top \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis
[PROOF STEP]
obtain c where c: "((\<lambda>x. \<MM> x - ln x) \<longlongrightarrow> c) at_top"
[PROOF STATE]
proof (prove)
using this:
(\<And>c. ((\<lambda>x. \<MM> x - ln x) \<longlongrightarrow> c) at_top \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>c. ((\<lambda>x. \<MM> x - ln x) \<longlongrightarrow> c) at_top \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
((\<lambda>x. \<MM> x - ln x) \<longlongrightarrow> c) at_top
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
define r where "r = (\<lambda>x. \<MM> x - ln x - c)"
[PROOF STATE]
proof (state)
this:
r = (\<lambda>x. \<MM> x - ln x - c)
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
have \<MM>_expand: "\<MM> = (\<lambda>x. ln x + c + r x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<MM> = (\<lambda>x. ln x + c + r x)
[PROOF STEP]
by (simp add: r_def)
[PROOF STATE]
proof (state)
this:
\<MM> = (\<lambda>x. ln x + c + r x)
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
have r: "r \<in> o(\<lambda>_. 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r \<in> o(\<lambda>_. 1)
[PROOF STEP]
unfolding r_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. \<MM> x - ln x - c) \<in> o(\<lambda>_. 1)
[PROOF STEP]
using tendsto_add[OF c tendsto_const[of "-c"]]
[PROOF STATE]
proof (prove)
using this:
((\<lambda>x. \<MM> x - ln x + - c) \<longlongrightarrow> c + - c) at_top
goal (1 subgoal):
1. (\<lambda>x. \<MM> x - ln x - c) \<in> o(\<lambda>_. 1)
[PROOF STEP]
by (intro smalloI_tendsto) auto
[PROOF STATE]
proof (state)
this:
r \<in> o(\<lambda>_. 1)
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
define r' where "r' = (\<lambda>x. integral {2..x} r)"
[PROOF STATE]
proof (state)
this:
r' = (\<lambda>x. integral {2..x} r)
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
have integrable_r: "r integrable_on {x..y}"
if "2 \<le> x" for x y :: real
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r integrable_on {x..y}
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
2 \<le> x
goal (1 subgoal):
1. r integrable_on {x..y}
[PROOF STEP]
unfolding r_def
[PROOF STATE]
proof (prove)
using this:
2 \<le> x
goal (1 subgoal):
1. (\<lambda>x. \<MM> x - ln x - c) integrable_on {x..y}
[PROOF STEP]
by (intro integrable_diff integrable_primes_M)
(auto intro!: integrable_continuous_real continuous_intros)
[PROOF STATE]
proof (state)
this:
2 \<le> ?x \<Longrightarrow> r integrable_on {?x..?y}
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
hence integral: "(r has_integral r' x) {2..x}" if "x \<ge> 2" for x
[PROOF STATE]
proof (prove)
using this:
2 \<le> ?x \<Longrightarrow> r integrable_on {?x..?y}
goal (1 subgoal):
1. (r has_integral r' x) {2..x}
[PROOF STEP]
by (auto simp: has_integral_iff r'_def)
[PROOF STATE]
proof (state)
this:
2 \<le> ?x \<Longrightarrow> (r has_integral r' ?x) {2..?x}
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
have r': "r' \<in> o(\<lambda>x. x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r' \<in> o(\<lambda>x. x)
[PROOF STEP]
using integrable_r
[PROOF STATE]
proof (prove)
using this:
2 \<le> ?x \<Longrightarrow> r integrable_on {?x..?y}
goal (1 subgoal):
1. r' \<in> o(\<lambda>x. x)
[PROOF STEP]
unfolding r'_def
[PROOF STATE]
proof (prove)
using this:
2 \<le> ?x \<Longrightarrow> r integrable_on {?x..?y}
goal (1 subgoal):
1. (\<lambda>x. integral {2..x} r) \<in> o(\<lambda>x. x)
[PROOF STEP]
by (intro integral_smallo[OF r]) (auto simp: filterlim_ident)
[PROOF STATE]
proof (state)
this:
r' \<in> o(\<lambda>x. x)
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
define C where "C = 2 * (c + ln 2 - 1)"
[PROOF STATE]
proof (state)
this:
C = 2 * (c + ln 2 - 1)
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
have "\<theta> \<sim>[at_top] (\<lambda>x. x + (r x * x + C - r' x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x + (r x * x + C - r' x))
[PROOF STEP]
proof (intro asymp_equiv_refl_ev eventually_mono[OF eventually_gt_at_top])
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. ?c2 < x \<Longrightarrow> \<theta> x = x + (r x * x + C - r' x)
[PROOF STEP]
fix x :: real
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. ?c2 < x \<Longrightarrow> \<theta> x = x + (r x * x + C - r' x)
[PROOF STEP]
assume x: "x > 2"
[PROOF STATE]
proof (state)
this:
2 < x
goal (1 subgoal):
1. \<And>x. ?c2 < x \<Longrightarrow> \<theta> x = x + (r x * x + C - r' x)
[PROOF STEP]
have "(\<MM> has_integral ((x * ln x - x + c * x) - (2 * ln 2 - 2 + c * 2) + r' x)) {2..x}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<MM> has_integral x * ln x - x + c * x - (2 * ln 2 - 2 + c * 2) + r' x) {2..x}
[PROOF STEP]
unfolding \<MM>_expand
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. ln x + c + r x) has_integral x * ln x - x + c * x - (2 * ln 2 - 2 + c * 2) + r' x) {2..x}
[PROOF STEP]
using x
[PROOF STATE]
proof (prove)
using this:
2 < x
goal (1 subgoal):
1. ((\<lambda>x. ln x + c + r x) has_integral x * ln x - x + c * x - (2 * ln 2 - 2 + c * 2) + r' x) {2..x}
[PROOF STEP]
by (intro has_integral_add[OF fundamental_theorem_of_calculus integral])
(auto simp flip: has_real_derivative_iff_has_vector_derivative
intro!: derivative_eq_intros continuous_intros)
[PROOF STATE]
proof (state)
this:
(\<MM> has_integral x * ln x - x + c * x - (2 * ln 2 - 2 + c * 2) + r' x) {2..x}
goal (1 subgoal):
1. \<And>x. ?c2 < x \<Longrightarrow> \<theta> x = x + (r x * x + C - r' x)
[PROOF STEP]
from has_integral_unique[OF \<theta>_conv_\<MM>_integral this]
[PROOF STATE]
proof (chain)
picking this:
2 \<le> x \<Longrightarrow> \<MM> x * x - \<theta> x = x * ln x - x + c * x - (2 * ln 2 - 2 + c * 2) + r' x
[PROOF STEP]
show "\<theta> x = x + (r x * x + C - r' x)"
[PROOF STATE]
proof (prove)
using this:
2 \<le> x \<Longrightarrow> \<MM> x * x - \<theta> x = x * ln x - x + c * x - (2 * ln 2 - 2 + c * 2) + r' x
goal (1 subgoal):
1. \<theta> x = x + (r x * x + C - r' x)
[PROOF STEP]
using x
[PROOF STATE]
proof (prove)
using this:
2 \<le> x \<Longrightarrow> \<MM> x * x - \<theta> x = x * ln x - x + c * x - (2 * ln 2 - 2 + c * 2) + r' x
2 < x
goal (1 subgoal):
1. \<theta> x = x + (r x * x + C - r' x)
[PROOF STEP]
by (simp add: field_simps \<MM>_expand C_def)
[PROOF STATE]
proof (state)
this:
\<theta> x = x + (r x * x + C - r' x)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<theta> \<sim>[at_top] (\<lambda>x. x + (r x * x + C - r' x))
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<theta> \<sim>[at_top] (\<lambda>x. x + (r x * x + C - r' x))
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
have "(\<lambda>x. r x * x + C - r' x) \<in> o(\<lambda>x. x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. r x * x + C - r' x) \<in> o(\<lambda>x. x)
[PROOF STEP]
proof (intro sum_in_smallo r)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. (\<lambda>x. r x * x) \<in> o(\<lambda>x. x)
2. (\<lambda>x. C) \<in> o(\<lambda>x. x)
3. r' \<in> o(\<lambda>x. x)
[PROOF STEP]
show "(\<lambda>_. C) \<in> o(\<lambda>x. x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>_. C) \<in> o(\<lambda>x. x)
[PROOF STEP]
by real_asymp
[PROOF STATE]
proof (state)
this:
(\<lambda>_. C) \<in> o(\<lambda>x. x)
goal (2 subgoals):
1. (\<lambda>x. r x * x) \<in> o(\<lambda>x. x)
2. r' \<in> o(\<lambda>x. x)
[PROOF STEP]
qed (insert landau_o.small_big_mult[OF r, of "\<lambda>x. x"] r', simp_all)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. r x * x + C - r' x) \<in> o(\<lambda>x. x)
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
hence "(\<lambda>x. x + (r x * x + C - r' x)) \<sim>[at_top] (\<lambda>x. x)"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. r x * x + C - r' x) \<in> o(\<lambda>x. x)
goal (1 subgoal):
1. (\<lambda>x. x + (r x * x + C - r' x)) \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
by (subst asymp_equiv_add_right) auto
[PROOF STATE]
proof (state)
this:
(\<lambda>x. x + (r x * x + C - r' x)) \<sim>[at_top] (\<lambda>x. x)
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(\<And>c d. c \<sim>[at_top] d \<Longrightarrow> c \<sim>[at_top] d) \<Longrightarrow> \<theta> \<sim>[at_top] (\<lambda>a. a)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(\<And>c d. c \<sim>[at_top] d \<Longrightarrow> c \<sim>[at_top] d) \<Longrightarrow> \<theta> \<sim>[at_top] (\<lambda>a. a)
goal (1 subgoal):
1. \<theta> \<sim>[at_top] (\<lambda>x. x)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<theta> \<sim>[at_top] (\<lambda>x. x)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4441, "file": "Prime_Number_Theorem_Prime_Number_Theorem", "length": 48}
|
import numpy
'''a=list(map(int,input().split()))
n=a[0]
m=a[1]
print( numpy.eye(n, m))'''
import numpy
print(str(numpy.eye(*map(int,input().split()))).replace('1',' 1').replace('0',' 0'))
#helps make identity matrices
|
{"hexsha": "6e163b891a054dc976b32d4e88858f948a101dd7", "size": 220, "ext": "py", "lang": "Python", "max_stars_repo_path": "Numpy/eyeandidentity.py", "max_stars_repo_name": "TheG0dfath3r/Python", "max_stars_repo_head_hexsha": "73f40e9828b953c3e614a21a8980eaa81b5c066e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Numpy/eyeandidentity.py", "max_issues_repo_name": "TheG0dfath3r/Python", "max_issues_repo_head_hexsha": "73f40e9828b953c3e614a21a8980eaa81b5c066e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Numpy/eyeandidentity.py", "max_forks_repo_name": "TheG0dfath3r/Python", "max_forks_repo_head_hexsha": "73f40e9828b953c3e614a21a8980eaa81b5c066e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-09-30T21:17:57.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-01T16:23:33.000Z", "avg_line_length": 20.0, "max_line_length": 84, "alphanum_fraction": 0.6318181818, "include": true, "reason": "import numpy", "num_tokens": 67}
|
import cv2
import turicreate as tc
from tqdm import tqdm
import numpy as np
from tools.utils.draw import *
from tools.utils.segment import *
def predict_on_video(video_path, model_path, confidence_threshold=0.75, iou_threshold=0.25, target_label=None, num_objs=-1, draw_masks=False, draw_frame_num=True):
model = tc.load_model(model_path)
frames = read_video(video_path)
pred_frames = []
for i in tqdm(range(len(frames)), desc='Predicting'):
frame = frames[i]
# Predict and draw
pred = model.predict(get_tc_img(frame),
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=False)
pred = clean_predictions(pred, target_label=target_label, num_objs=num_objs)
if draw_masks:
crops = get_crops(frame, pred)
segs = segment(crops)
frame = draw(frame, pred, segs)
if draw_frame_num:
frame = draw_text(frame, str(i))
frame = tc.object_detector.util.draw_bounding_boxes(get_tc_img(frame), pred).pixel_data
pred_frames.append(frame)
return pred_frames
def get_prediction_sframe(video_path, model_path, target_label=None, num_objs=-1):
frames, model = read_video(video_path), tc.load_model(model_path)
sf_dict = {'image': [], 'annotations': []}
for i in tqdm(range(len(frames)), desc='Predicting'):
frame = frames[i]
# Predict and draw
pred = model.predict(get_tc_img(frame), verbose=False)
pred = clean_predictions(pred, target_label=target_label, num_objs=num_objs)
sf_dict['image'].append(frame)
sf_dict['annotations'].append(pred)
return tc.SFrame(sf_dict)
def clean_predictions(pred, target_label=None, num_objs=-1):
if target_label:
pred = [p for p in pred if p['label'] == target_label]
if num_objs > 0:
pred = sorted(pred, reverse=True, key=lambda x: x['confidence'])[:num_objs]
return pred
def read_video(video_path, downsize=0.3):
video = cv2.VideoCapture(video_path)
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
ret, frame = video.read()
frames = []
pbar = tqdm(total=frame_count)
while ret:
if downsize != 1:
frame = cv2.resize(frame, None, fx=downsize, fy=downsize)
frames.append(frame)
ret, frame = video.read()
pbar.update(1)
pbar.close()
return frames
def compile_video(frames, fps=30, target='./output.mp4'):
assert len(frames) > 0, 'Frames list is empty.'
height, width, layers = frames[0].shape
codec = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter(target, codec, fps, (width,height))
for frame in tqdm(frames, desc='Writing'):
video.write(frame)
cv2.destroyAllWindows()
video.release()
def get_tc_img(img):
assert (isinstance(img, np.ndarray)), 'Image is not of type numpy.ndarray.'
RAW_FORMAT = 2
return tc.Image(_image_data=img.tobytes(),
_width=img.shape[1],
_height=img.shape[0],
_channels=img.shape[2],
_format_enum=RAW_FORMAT,
_image_data_size=img.size)
def extract_imgs_from_sframe(sframe, target_label='mainPlate', buffer=64, draw_center=False, draw_center_line=False, draw_boundings=False, draw_masks=False, draw_frame_num=True, annotations_col='annotations', image_col='image', masks_col='stateMasks'):
sf = list(tc.load_sframe(sframe))
frames = []
frame_num = 0
centers = {}
for x in tqdm(sf, desc='Parsing'):
img = x[image_col].pixel_data
append_centers(x[annotations_col], centers, buffer=buffer, target_label=target_label)
if draw_boundings:
img = tc.object_detector.util.draw_bounding_boxes(get_tc_img(img), x[annotations_col]).pixel_data
if draw_masks:
img = draw_mask_data(img, x[masks_col])
if draw_center:
img = draw_nearest_centers(img, centers)
if draw_center_line:
img = draw_center_lines(img, centers, buffer=buffer)
if draw_frame_num:
img = draw_text(img, str(frame_num))
frames.append(img)
frame_num += 1
return frames
|
{"hexsha": "d9c39ec336e5f4dbde09b4738d4add5c7c8e4d67", "size": 3795, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/utils/parse.py", "max_stars_repo_name": "vitae-gravitas/model-tester", "max_stars_repo_head_hexsha": "c6de6f7e26043047fd30c9ed66f4dfb75a68a29b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/utils/parse.py", "max_issues_repo_name": "vitae-gravitas/model-tester", "max_issues_repo_head_hexsha": "c6de6f7e26043047fd30c9ed66f4dfb75a68a29b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/utils/parse.py", "max_forks_repo_name": "vitae-gravitas/model-tester", "max_forks_repo_head_hexsha": "c6de6f7e26043047fd30c9ed66f4dfb75a68a29b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1071428571, "max_line_length": 252, "alphanum_fraction": 0.7354413702, "include": true, "reason": "import numpy", "num_tokens": 994}
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Precompute Fraunhofer fixed representations (logSTFT, logMel)
"""
import os
from pathlib import Path
#
from omegaconf import OmegaConf
import numpy as np
#
from d2021umaps.utils import IncrementalHDF5
from d2021umaps.logging import ColorLogger, make_timestamp
from d2021umaps.features import wavpath_to_mel, wavpath_to_stft
# ##############################################################################
# # GLOBALS
# ##############################################################################
CONF = OmegaConf.create()
CONF.ROOT_PATH = None # must be given by user!
#
CONF.WAV_NORM = "none"
CONF.WAV_SR = 16000 # WAVs will be resampled to this when loaded
CONF.STFT_WINSIZE = 1024 # powers of 2 ideally
CONF.STFT_HOPSIZE = 512
CONF.NUM_MELS = 128
CONF.OUT_DIR = "precomputed_features"
log_ts = make_timestamp(timezone="Europe/London", with_tz_output=False)
CONF.LOG_OUTPATH = os.path.join("logs", "{}_[{}].log".format(log_ts, __file__))
cli_conf = OmegaConf.from_cli()
CONF = OmegaConf.merge(CONF, cli_conf)
assert CONF.ROOT_PATH is not None, \
"Please provide a ROOT_PATH=... containing train_cut and test_cut!"
CONF.ROOT_PATH = str(Path(CONF.ROOT_PATH).resolve()) # in case of softlinks
# these variables may depend on CLI input so we set them at the end
TRAIN_PATH = os.path.join(CONF.ROOT_PATH, "train_cut")
TEST_PATH = os.path.join(CONF.ROOT_PATH, "test_cut")
STFT_FREQBINS = int(CONF.STFT_WINSIZE / 2 + 1)
STFT_OUTPATH = os.path.join(
CONF.OUT_DIR,
f"fraunhofer_wavnorm={CONF.WAV_NORM}_stft_win{CONF.STFT_WINSIZE}_" +
f"hop{CONF.STFT_HOPSIZE}.h5")
MEL_OUTPATH = os.path.join(
CONF.OUT_DIR,
f"fraunhofer_wavnorm={CONF.WAV_NORM}_mel_win{CONF.STFT_WINSIZE}_" +
f"hop{CONF.STFT_HOPSIZE}_m{CONF.NUM_MELS}.h5")
# ##############################################################################
# # MAIN ROUTINE
# ##############################################################################
LOGGER = ColorLogger(__file__, CONF.LOG_OUTPATH, filemode="w")
LOGGER.info(f"\n\n\nSTARTED SCRIPT: {__file__}")
LOGGER.info(OmegaConf.to_yaml(CONF))
def save_stft_dataset(out_path, *paths, in_db=True, root_path=None):
"""
"""
ds_len = len(paths)
with IncrementalHDF5(out_path, STFT_FREQBINS, np.float32) as ihdf5:
LOGGER.info(f"Writing to {out_path}")
for i, abspath in enumerate(paths, 1):
if root_path is not None:
metadata_str = str(abspath.relative_to(root_path))
else:
metadata_str = str(abspath)
if i % 100 == 0:
LOGGER.info(f"[{i}/{ds_len}] save_stft_dataset: {metadata_str}")
arr = wavpath_to_stft(
str(abspath), CONF.WAV_SR, wav_norm=CONF.WAV_NORM,
n_fft=CONF.STFT_WINSIZE, hop_length=CONF.STFT_HOPSIZE,
pad_mode="constant", in_decibels=in_db, logger=LOGGER)
if arr is None: # if None, wav had zero samples
continue
ihdf5.append(arr, metadata_str)
# check that file is indeed storing the exact array
_, arr_w = arr.shape
assert (arr == ihdf5.data_ds[:, -arr_w:]).all(), \
"Should never happen"
LOGGER.info(f"Finished writing to {out_path}")
def save_mel_dataset(out_path, *paths, in_db=True, root_path=None):
"""
"""
ds_len = len(paths)
with IncrementalHDF5(out_path, CONF.NUM_MELS, np.float32) as ihdf5:
LOGGER.info(f"Writing to {out_path}")
for i, abspath in enumerate(paths, 1):
if root_path is not None:
metadata_str = str(abspath.relative_to(root_path))
else:
metadata_str = str(abspath)
if i % 100 == 0:
LOGGER.info(f"[{i}/{ds_len}] save_mel_dataset: {metadata_str}")
arr = wavpath_to_mel(
str(abspath), CONF.WAV_SR, wav_norm=CONF.WAV_NORM,
n_mels=CONF.NUM_MELS, hop_length=CONF.STFT_HOPSIZE,
pad_mode="constant", in_decibels=in_db, logger=LOGGER)
if arr is None: # if None, wav had zero samples
continue
ihdf5.append(arr, metadata_str)
# check that file is indeed storing the exact array
_, arr_w = arr.shape
assert (arr == ihdf5.data_ds[:, -arr_w:]).all(), \
"Should never happen"
LOGGER.info(f"Finished writing to {out_path}")
train_paths = list(Path(TRAIN_PATH).glob("**/*.wav"))
test_paths = list(Path(TEST_PATH).glob("**/*.wav"))
paths = train_paths + test_paths
save_mel_dataset(MEL_OUTPATH, *paths, root_path=CONF.ROOT_PATH)
save_stft_dataset(STFT_OUTPATH, *paths, root_path=CONF.ROOT_PATH)
|
{"hexsha": "e8bc0e0cfb812d7c32521e012f61a1e17efddd71", "size": 4763, "ext": "py", "lang": "Python", "max_stars_repo_path": "00c_precompute_fraunhofer_fixed.py", "max_stars_repo_name": "andres-fr/dcase2021_umaps", "max_stars_repo_head_hexsha": "0418b256d484a66958763061170bb2346cb6030a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-17T14:12:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T09:28:21.000Z", "max_issues_repo_path": "00c_precompute_fraunhofer_fixed.py", "max_issues_repo_name": "andres-fr/dcase2021_umaps", "max_issues_repo_head_hexsha": "0418b256d484a66958763061170bb2346cb6030a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "00c_precompute_fraunhofer_fixed.py", "max_forks_repo_name": "andres-fr/dcase2021_umaps", "max_forks_repo_head_hexsha": "0418b256d484a66958763061170bb2346cb6030a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-22T14:54:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-22T14:54:59.000Z", "avg_line_length": 37.8015873016, "max_line_length": 80, "alphanum_fraction": 0.6115893345, "include": true, "reason": "import numpy", "num_tokens": 1227}
|
\subsection{Content Analyzer}
\label{sec:content-analyzer}
\index{Content Analyzer}
For this project the data describing the products, offered by an online shop have been semi-structured.
It was a text file where each line described a product.
An example is given in listing~\ref{lst:product-data}.
\begin{lstlisting}[caption={Example product data},label={lst:product-data}
,keywordstyle=\color{black}
,commentstyle=\itshape\color{black}
,identifierstyle=\color{black}
,stringstyle=\color{black}
]
ImgURL Brand Product Price Shoulder_Width Model_Length Collar_Type Material
http://i1.ztat.net/large/4E/M2/1E/00/0K/11/4EM21E000-K11@4.jpg Emoi en Plus Bluse - dazzling blue 24,95 °(\euro{})° 50 cm 70 cm bei Gr°(\"{o}\ss{})°e 44 Rundhals 100% Polyester
http://i2.ztat.net/large/NA/52/1D/03/NA/11/NA521D03N-A11@3.jpg NAF NAF WENT - Bluse - ecru/noir 38,95 °(\euro{})° 55 cm bei Gr°(\"o{}\ss{})°e S Rundhals 64% Viskose, 22% Baumwolle, 10% Modal, 4% Polyamid
\end{lstlisting}
%\noindent
%Since the structure of the input was known, it was possible to filter out all relevant product information without using too fancy IR methods.
%With regular expressions all relevant informations such as the product\_image-url, brand, product, price, collar type and material have been extracted and stored in a database.
From each line describing a product information such as the image URL and materials have been extracted.
Except from the image URL every word separated by whitespace has been interpreted as term.
Since a RS could theoretically handle any kind of item afar from products, a distinction has been made between documents in general and products.
The relation between a product and a document has been illustrated in figure~\ref{fig:ertermdocumentassignment}.
\begin{figure}[h]
\center
\includegraphics[scale=0.5]{inc/implementation/contentanalyzer/er_term_document_assignment}
\caption{ER diagram of documents, products and associated terms}
\label{fig:ertermdocumentassignment}
\end{figure}
Because there is a N-to-N relation between \textit{Product} and \textit{Term}, an intermediate table is necessary when transforming the ER-diagram into a relational model.
Therefore the table \textit{TermDocumentAssigner} has been introduced.
The relational model looks as follows:
\begin{quote}
\textbf{Document}{(\underline{document\_id})}\\
\textbf{Product}{(\underline{document\_id[Document]}, image\_name)}\\
\textbf{Term}{(\underline{term\_id}, name)}\\
\textbf{TermDocumentAssigner}{(\underline{document\_id[Document], term\_id[Term]}, count)}\\
\end{quote}
Because \textit{TermDocumentAssigner} will be very often used to query all terms of a document, it might be useful to create an index on \textit{document\_id}.
Implicitly there will also be one on the combined primary key \textit{document\_id, term\_id}.
The tables, implemented by the RS built for this thesis, filled with example data may look as in table~\ref{tab:tablestermdocumentproduct}.
Table~\ref{tab:tablestermdocumentproduct} will serve as resource for terms and documents to illustrate subsequent examples.
\begin{table}
\center
% Document
\rowcolors{1}{\dustRowFirst}{\dustRowSecond}
\begin{tabular}{ l }
\rowcolor{\dustRowHead}
\textbf{Document}\\\hline
document\_id\\\hline
1\\
2\\
3\\
\end{tabular}
\quad
% Product
\rowcolors{1}{\dustRowFirst}{\dustRowSecond}
\begin{tabular}{ l | l }
\rowcolor{\dustRowHead}
\multicolumn{2}{ c }{\textbf{Product}}\\\hline
document\_id & image\_name\\\hline
1 & image\_1.png\\
2 & image\_2.png\\
3 & image\_3.png\\
\end{tabular}
%~\\
\hfill\\
%TermDocumentAssigner
\rowcolors{1}{\dustRowFirst}{\dustRowSecond}
\begin{tabular}{ l | l | l }
\rowcolor{\dustRowHead}
\multicolumn{3}{c}{\textbf{TermDocumentAssigner}}\\\hline
document\_id & term\_id & count\\\hline
1 & 1 & 1\\
1 & 2 & 1\\
1 & 4 & 1\\
2 & 1 & 1\\
2 & 3 & 1\\
2 & 7 & 1\\
3 & 4 & 1\\
3 & 5 & 1\\
3 & 6 & 1\\
\end{tabular}
\quad
% Term
\rowcolors{1}{\dustRowFirst}{\dustRowSecond}
\begin{tabular}{ l | l }
\rowcolor{\dustRowHead}
\multicolumn{2}{ c }{\textbf{Term}}\\\hline
term\_id & name\\\hline
1 & blouse\\
2 & blue\\
3 & polyester\\
4 & cotton\\
5 & green\\
6 & trouser\\
7 & white\\
\end{tabular}
\caption{Table layout defined by figure~\ref{fig:ertermdocumentassignment}}
\label{tab:tablestermdocumentproduct}
\end{table}
%\noindent
As already mentioned before, Rocchio's algorithm works best with tf-idf vectors.
With the theory described in section~\ref{sec:tfidf} the next big step is to show how vectors for this project have been built.
As a short reminder: all products are described through their terms.
In order to build tf-idf vectors, one also has to build term frequency-, as well as inverse document frequency vectors - these are the preconditions.
Since these tasks are fairly similar, some design patterns help realizing them.
The \gls{abstract factory} pattern proofed to be very handy for this task.
For each necessary vector (tf, idf, tf-idf) one can build a vector creator which shares the design of the other vector creators.
Therefore the abstract class \textit{VectorCreator} has been introduced.
The \textit{VectorCreator} offers the abstract method \textit{\_get\_vector(document\_id:int):DocumentVector} which will be responsible for creating all vectors.
All inherited classes will implement the abstract method with a procedure to create an instance of \textit{DocumentVector}.
A UML-diagram of \textit{DocumentVector} and some derived classes is shown in figure~\ref{fig:uml-document-vectors}.
\begin{figure}[h]
\center
\includegraphics[scale=0.4]{inc/implementation/contentanalyzer/uml_document_vectors}
\caption{Document vectors}
\label{fig:uml-document-vectors}
\end{figure}
\FloatBarrier
\paragraph{Term frequency vector}
\index{Term Frequency}
A term frequency vector representing a document consists of the count of a term's occurrences.
The current implementation uses the SQL query displayed in listing~\ref{lst:tf-query} for generating the tf vector.
The result of a query may look as in table~\ref{tab:tf-query-result} (based on the tables shown in figure~\ref{fig:ertermdocumentassignment}).
The result of the query shown in listing~\ref{lst:tf-query} will finally be stored in the \textit{TermFrequencyVector} class derived from \textit{DocumentVector}.
\begin{table}
\center
\rowcolors{1}{\dustRowFirst}{\dustRowSecond}
\begin{tabular}{ l|l|l }
\rowcolor{\dustRowHead}
\multicolumn{3}{c}{\textbf{$\text{tf}_\text{document\_1}$}}\\\hline
term\_id & name & value \\\hline
1 & blouse & 1\\
2 & blue & 1\\
3 & polyester & 0\\
4 & cotton & 1\\
5 & green & 0\\
6 & trouser & 0\\
7 & white & 0\\
\end{tabular}
\quad
\rowcolors{1}{\dustRowFirst}{\dustRowSecond}
\begin{tabular}{ l|l|l }
\rowcolor{\dustRowHead}
\multicolumn{3}{c}{\textbf{$\text{tf}_\text{document\_2}$}}\\\hline
term\_id & name & value\\\hline
1 & blouse & 1\\
2 & blue & 0\\
3 & polyester & 1\\
4 & cotton & 0\\
5 & green & 0\\
6 & trouser & 0\\
7 & white & 1\\
\end{tabular}
\hfill\\
\rowcolors{1}{\dustRowFirst}{\dustRowSecond}
\begin{tabular}{ l|l|l }
\rowcolor{\dustRowHead}
\multicolumn{3}{ c }{\textbf{$\text{tf}_\text{document\_3}$}}\\\hline
term\_id & name & value\\\hline
1 & blouse & 0\\
2 & blue & 0\\
3 & polyester & 0\\
4 & cotton & 1\\
5 & green & 1\\
6 & trouser & 1\\
7 & white & 0\\
\end{tabular}
\caption{Possible result of the query in listing~\ref{lst:tf-query}}
\label{tab:tf-query-result}
\end{table}
\begin{lstlisting}[language=SQL,caption={SQL query for generating tf-vectors},label={lst:tf-query},float=h]
-- :document_id is a parameter given to the method
SELECT
[t].[term_id]
, [t].[name]
, CASE WHEN [a].[document_id] IS NULL
THEN 0
ELSE [a].[count]
END AS [value]
FROM
[Term] AS [t]
LEFT OUTER JOIN [TermDocumentAssigner] AS [a]
ON [t].[term_id] = [a].[term_id]
AND [document_id] = :document_id
ORDER BY
[t].[term_id]
;
\end{lstlisting}
\FloatBarrier
\paragraph{Document frequency vector}
\index{Document Frequency}
In contrast to \textit{TermFrequencyVector} the \textit{DocumentFrequencyVector} resembles the whole collection of documents, rather than a single one.
Therefore the parameter \textit{document\_id} can be omitted.
But in order to sustain uniformity between all classes inheriting from \textit{VectorCreator} it will be carried along but set to a null value.
To make the source code more readable, the SQL code for querying the document-frequency values has been outsourced to a SQL-View as shown in listing~\ref{lst:df-view}.
This little tweak (which has no influence on execution-speed) left the query for df-vectors as simple as shown in listing~\ref{lst:df-query}.
The result for the example is given in table~\ref{tab:df-query-result}.
\begin{lstlisting}[language=SQL,caption={SQL statement to create the \textit{DocumentFrequency}-view},label={lst:df-view},float=h]
CREATE VIEW IF NOT EXISTS [DocumentFrequency] AS
SELECT
[t].[term_id]
, [t].[name]
, CASE WHEN [a].[count] IS NULL
THEN 0
ELSE [a].[count]
END AS [value]
FROM
[Term] as [t]
LEFT OUTER JOIN
(
SELECT
[term_id]
, SUM([document_id]) AS [count]
FROM
[TermDocumentAssigner]
GROUP BY
[term_id]
) AS [a]
ON [t].[term_id] = [a].[term_id]
ORDER BY
[t].[term_id]
;
\end{lstlisting}
\begin{lstlisting}[language=SQL,caption={SQL query for generating df-vectors},label={lst:df-query},float=h]
SELECT
[term_id]
, [name]
, [value]
FROM
[DocumentFrequency]
;
\end{lstlisting}
\begin{table}
\center
\rowcolors{1}{\dustRowFirst}{\dustRowSecond}
\begin{tabular}{ l | l | l }
\rowcolor{\dustRowHead}
\multicolumn{3}{ c }{\textbf{df}}\\\hline
term\_id & name & value\\\hline
1 & blouse & 2\\
2 & blue & 1\\
3 & polyester & 1\\
4 & cotton & 2\\
5 & green & 1\\
6 & trouser & 1\\
7 & white & 1\\
\end{tabular}
\caption{Possible results of the query in figure~\ref{lst:df-query}}
\label{tab:df-query-result}
\end{table}
\FloatBarrier
\paragraph{Inverse document frequency vector}
\index{Inverse Document Frequency}
For building idf-vectors one can use df-vectors (and their source code) as basis.
The inverse document frequency is the logarithm of the total count of documents in a collection divided by a term's document frequency.
Another SQL-View called N-view will provide the number of documents, while the code for creating idf-vectors get outsourced into its own view once more.
Since the SQL implementation of \gls{sqlite3} does not offer a logarithm-function, one has to define his very own.
Fortunately the standard python library for connecting to sqlite3-databases supports the creation of functions as shown in listing~\ref{lst:idf-log-function}.
\begin{lstlisting}[language=Python,caption={Preparing the log-function for SQL-statement in listing~\ref{lst:idf-view}},label={lst:idf-log-function},float=h]
def _create_log_function(self, conn):
conn.create_function('log10', 1, InverseDocumentFrequencyVectorCreator.log_10)
pass
def log_10(x):
base = 10
return math.log(x, base)
\end{lstlisting}
\begin{lstlisting}[language=SQL,caption={SQL-statement to create the InverseDocumentFrequency-view},label={lst:idf-view},float=h]
CREATE VIEW IF NOT EXISTS [InverseDocumentFrequency] AS
SELECT
[term_id]
, [name]
, log10
(
CAST ((SELECT [document_count] from [N]) AS REAL) / [df].[value]
)
AS [value]
FROM
[DocumentFrequency] AS [df]
ORDER BY
[term_id]
;
\end{lstlisting}
\begin{lstlisting}[language=SQL,caption={SQL-statement to create the N-view},label={lst:n-view},float=h]
CREATE VIEW IF NOT EXISTS [N] AS
SELECT
(SELECT COUNT(*) FROM [Document]) AS [document_count]
, (SELECT COUNT(*) FROM [Term]) AS [term_count]
;
\end{lstlisting}
\begin{lstlisting}[language=SQL,caption={SQL-query for generating idf-vectors},label={fig:idf-query},float=h]
SELECT
[term_id]
, [name]
, [value]
FROM
[InverseDocumentFrequency]
;
\end{lstlisting}
\begin{table}
\center
\rowcolors{1}{\dustRowFirst}{\dustRowSecond}
\begin{tabular}{ l | l | l }
\rowcolor{\dustRowHead}
\multicolumn{3}{ c }{\textbf{idf}}\\\hline
term\_id & name & value\\\hline
1 & blouse & $\log_{10}(3/2) \approx 0.18$\\
2 & blue & $\log_{10}(3/1) \approx 0.48$\\
3 & polyester & $\log_{10}(3/1) \approx 0.48$\\
4 & cotton & $\log_{10}(3/2) \approx 0.18$\\
5 & green & $\log_{10}(3/1) \approx 0.48$\\
6 & trouser & $\log_{10}(3/1) \approx 0.48$\\
7 & white & $\log_{10}(3/1) \approx 0.48$\\
\end{tabular}
\caption{Possible results of the query in figure~\ref{fig:idf-query}}
\label{tab:idf-query-result}
\end{table}
\FloatBarrier
\paragraph{Tf-idf vector}
\index{TfIdf}
Finally one can create tf-idf vectors which are the combination of tf- and idf-vectors (as explained in section~\ref{sec:tfidf}).
Since tf-idf is merely the multiplication of term frequency with the corresponding inverse document frequency, it is rather simple to create.
Listing~\ref{lst:tfidf-code} shows the creation of a \textit{TfIdfVector} in the method \textit{\_create\_vector()}, whereas the multiplication is in method \textit{\_get\_values()}.
\begin{lstlisting}[language=Python,caption={Python code for calculating tfidf-vectos on basis on tf- and idf-vectors},label={lst:tfidf-code},float=h]
class TfIdfVectorCreator(VectorCreator):
def __init__(self, db_connection_str):
super(TfIdfVectorCreator, self).__init__(db_connection_str)
self._tf_creator = TermFrequencyVectorCreator(db_connection_str)
self._idf_creator = InverseDocumentFrequencyVectorCreator(db_connection_str)
pass
def _create_vector(self, document_id):
tf_vector = self._tf_creator.get_vector(document_id)
idf_vector = self._idf_creator.get_vector(document_id)
tfidf_vector = TfIdfVector()
for triple in self._get_values(tf_vector, idf_vector):
tfidf_vector.add_to_vector(triple)
return tfidf_vector
def _get_values(self, tfv, idfv):
ingredients = zip(tfv.term_id, tfv.description, tfv.values, idfv.values)
for (tf_tid, tf_desc, tf_val, idf_val) in ingredients:
yield (tf_tid, tf_desc, tf_val * idf_val)
pass
pass
\end{lstlisting}
\begin{table}
\center
\rowcolors{1}{\dustRowFirst}{\dustRowSecond}
\begin{tabular}{ l|l|l }
\rowcolor{\dustRowHead}
\multicolumn{3}{ c }{\textbf{$\text{tf-idf}_\text{document\_1}$}}\\\hline
term\_id & name & value\\\hline
1 & blouse & $\approx 0.18$\\
2 & blue & $\approx 0.48$\\
3 & polyester & 0\\
4 & cotton & $\approx 0.18$\\
5 & green & 0\\
6 & trouser & 0\\
7 & white & 0\\
\end{tabular}
\quad
\rowcolors{1}{\dustRowFirst}{\dustRowSecond}
\begin{tabular}{ l|l|l }
\rowcolor{\dustRowHead}
\multicolumn{3}{ c }{\textbf{$\text{tf-idf}_\text{document\_2}$}}\\\hline
term\_id & name & value\\\hline
1 & blouse & $\approx 0.18$\\
2 & blue & 0\\
3 & polyester& $\approx 0.48$\\
4 & cotton & 0\\
5 & green & 0\\
6 & trouser & 0\\
7 & white & $\approx 0.48$\\
\end{tabular}
\hfill\\
\rowcolors{1}{\dustRowFirst}{\dustRowSecond}
\begin{tabular}{ l|l|l }
\rowcolor{\dustRowHead}
\multicolumn{3}{ c }{\textbf{$\text{tf-idf}_\text{document\_3}$}}\\\hline
term\_id & name & value\\\hline
1 & blouse & 0\\
2 & blue & 0\\
3 & polyester & 0\\
4 & cotton & $\approx 0.18$\\
5 & green & $\approx 0.48$\\
6 & trouser & $\approx 0.48$\\
7 & white & 0\\
\end{tabular}
\caption{Possible result of the function in figure~\ref{lst:tfidf-code}}
\label{tab:tfidf-query-result}
\end{table}
\FloatBarrier
With the vectors built, the main task of the content analyzer is done.
However there is still one more enhancement one can implement to make the repetitive creation of vectors faster.
Through \gls{dynamic programming} one can easily ``re-create" vectors which have already been used once.
Since it proves useful, if all inheriting classes of \textit{VectorCreator} can use dynamic programming without explicitly implementing it, one can implement the \textit{VectorCreator} as \gls{proxy}.
As a result the \textit{VectorCreator} gets another method called \textit{get\_vector(document\_id:int)} to which the same rules apply as to \textit{\_create\_vector(document\_id:int)}.
The buffering of \textit{DocumentVectors} can now be implemented in \textit{get\_vector(document\_id:int)} and all inheriting classes will also posses caching capabilities.
The code for dynamic programming is shown in listing~\ref{lst:dynamic-programming}.\\
In order to get a picture of all important vectors and their creation, a UML-diagram has been included (figure~\ref{fig:uml-vectorssimple}).
\begin{lstlisting}[language=Python,caption={Dynamic programming},label={lst:dynamic-programming},float=h]
class VectorCreator(object):
# ... omitted unnecessary code
def get_vector(self, document_id=None):
if document_id is not None and not isinstance(document_id, int):
raise TypeError('document_id must either be an integer or None')
if not document_id in self._cache:
vector = self._create_vector(document_id)
if vector.document_id is None:
vector.document_id = document_id
self._cache[document_id] = vector
return self._cache[document_id]}
def _create_vector(self, document_id):
# ... creating vector
\end{lstlisting}
\begin{figure}[h]
\center
\includegraphics[scale=0.33,angle=90]{inc/implementation/contentanalyzer/uml_vectors_simple}
\caption{Abstract and concrete vector fabric}
\label{fig:uml-vectorssimple}
\end{figure}
|
{"hexsha": "6164b915564edd4963cac9c9b6c599aa85dba70f", "size": 19833, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "thesis/inc/implementation/contentanalyzer/contentanalyzer.tex", "max_stars_repo_name": "dustywind/bachelor-thesis", "max_stars_repo_head_hexsha": "be06aaeb1b4d73f727a19029a3416a9b8043194d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "thesis/inc/implementation/contentanalyzer/contentanalyzer.tex", "max_issues_repo_name": "dustywind/bachelor-thesis", "max_issues_repo_head_hexsha": "be06aaeb1b4d73f727a19029a3416a9b8043194d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thesis/inc/implementation/contentanalyzer/contentanalyzer.tex", "max_forks_repo_name": "dustywind/bachelor-thesis", "max_forks_repo_head_hexsha": "be06aaeb1b4d73f727a19029a3416a9b8043194d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6413934426, "max_line_length": 203, "alphanum_fraction": 0.6394897393, "num_tokens": 5643}
|
-- An Agda example file
module test where
open import Coinduction
open import Data.Bool
open import {- pointless comment between import and module name -} Data.Char
open import Data.Nat
open import Data.Nat.Properties
open import Data.String
open import Data.List hiding ([_])
open import Data.Vec hiding ([_])
open import Relation.Nullary.Core
open import Relation.Binary.PropositionalEquality using (_≡_; refl; cong; trans; inspect; [_])
open SemiringSolver
{- this is a {- nested -} comment -}
-- Factorial
_! : ℕ → ℕ
0 ! = 1
(suc n) ! = (suc n) * n !
-- The binomial coefficient
_choose_ : ℕ → ℕ → ℕ
_ choose 0 = 1
0 choose _ = 0
(suc n) choose (suc m) = (n choose m) + (n choose (suc m)) -- Pascal's rule
choose-too-many : ∀ n m → n ≤ m → n choose (suc m) ≡ 0
choose-too-many .0 m z≤n = refl
choose-too-many (suc n) (suc m) (s≤s le) with n choose (suc m) | choose-too-many n m le | n choose (suc (suc m)) | choose-too-many n (suc m) (≤-step le)
... | .0 | refl | .0 | refl = refl
_++'_ : ∀ {a n m} {A : Set a} → Vec A n → Vec A m → Vec A (m + n)
_++'_ {_} {n} {m} v₁ v₂ rewrite solve 2 (λ a b → b :+ a := a :+ b) refl n m = v₁ Data.Vec.++ v₂
++'-test : (1 ∷ 2 ∷ 3 ∷ []) ++' (4 ∷ 5 ∷ []) ≡ (1 ∷ 2 ∷ 3 ∷ 4 ∷ 5 ∷ [])
++'-test = refl
data Coℕ : Set where
co0 : Coℕ
cosuc : ∞ Coℕ → Coℕ
nanana : Coℕ
nanana = let two = ♯ cosuc (♯ (cosuc (♯ co0))) in cosuc two
abstract
data VacuumCleaner : Set where
Roomba : VacuumCleaner
pointlessLemmaAboutBoolFunctions : (f : Bool → Bool) → f (f (f true)) ≡ f true
pointlessLemmaAboutBoolFunctions f with f true | inspect f true
... | true | [ eq₁ ] = trans (cong f eq₁) eq₁
... | false | [ eq₁ ] with f false | inspect f false
... | true | _ = eq₁
... | false | [ eq₂ ] = eq₂
mutual
isEven : ℕ → Bool
isEven 0 = true
isEven (suc n) = not (isOdd n)
isOdd : ℕ → Bool
isOdd 0 = false
isOdd (suc n) = not (isEven n)
foo : String
foo = "Hello World!"
nl : Char
nl = '\n'
private
intersperseString : Char → List String → String
intersperseString c [] = ""
intersperseString c (x ∷ xs) = Data.List.foldl (λ a b → a Data.String.++ Data.String.fromList (c ∷ []) Data.String.++ b) x xs
baz : String
baz = intersperseString nl (Data.List.replicate 5 foo)
postulate
Float : Set
{-# BUILTIN FLOAT Float #-}
pi : Float
pi = 3.141593
-- Astronomical unit
au : Float
au = 1.496e11 -- m
plusFloat : Float → Float → Float
plusFloat a b = {! !}
record Subset (A : Set) (P : A → Set) : Set where
constructor _#_
field
elem : A
.proof : P elem
|
{"hexsha": "d930a77b6abbd22305d4945b4169cee9ab1a80f0", "size": 2558, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "vendor/bundle/ruby/2.0.0/gems/pygments.rb-0.6.1/vendor/pygments-main/tests/examplefiles/test.agda", "max_stars_repo_name": "agent010101/agent010101.github.io", "max_stars_repo_head_hexsha": "b8bf8ef1bc30999223deb95506b0685b4ec8449a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2015-02-17T18:34:15.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-25T17:11:52.000Z", "max_issues_repo_path": "vendor/bundle/ruby/2.0.0/gems/pygments.rb-0.6.1/vendor/pygments-main/tests/examplefiles/test.agda", "max_issues_repo_name": "agent010101/agent010101.github.io", "max_issues_repo_head_hexsha": "b8bf8ef1bc30999223deb95506b0685b4ec8449a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 34, "max_issues_repo_issues_event_min_datetime": "2015-05-12T17:54:27.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-28T14:30:46.000Z", "max_forks_repo_path": "vendor/bundle/ruby/2.0.0/gems/pygments.rb-0.6.1/vendor/pygments-main/tests/examplefiles/test.agda", "max_forks_repo_name": "agent010101/agent010101.github.io", "max_forks_repo_head_hexsha": "b8bf8ef1bc30999223deb95506b0685b4ec8449a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2015-02-06T20:43:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-14T10:05:12.000Z", "avg_line_length": 24.8349514563, "max_line_length": 152, "alphanum_fraction": 0.6082877248, "num_tokens": 922}
|
/****************************************************************************
*
* fkie_potree_rviz_plugin
* Copyright © 2018 Fraunhofer FKIE
* Author: Timo Röhling
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
****************************************************************************/
#include "cloud_loader.h"
#include "potree_node.h"
#include <OgreManualObject.h>
#include <boost/filesystem.hpp>
#include <ros/console.h>
#include <queue>
namespace fkie_potree_rviz_plugin
{
CloudLoader::CloudLoader(const fs::path& path)
{
std::string error_msg;
if (!isValid(path, error_msg))
throw std::runtime_error(error_msg);
fs::path cloud_file = path / "cloud.js";
meta_data_ = std::make_shared<CloudMetaData>();
meta_data_->readFromJson(cloud_file);
}
bool CloudLoader::isValid(const fs::path& path, std::string& error_msg)
{
error_msg.clear();
if (!fs::is_directory(path))
{
error_msg = "not an existing folder";
return false;
}
if (fs::is_regular(path / "metadata.json"))
{
error_msg = "unsupported Potree 2.0 format";
return false;
}
if (!fs::is_regular(path / "cloud.js"))
{
error_msg = "not a Potree folder";
return false;
}
try
{
CloudMetaData meta_data;
meta_data.readFromJson(path / "cloud.js");
return true;
}
catch (std::exception& e)
{
error_msg = e.what();
return false;
}
}
std::shared_ptr<PotreeNode> CloudLoader::loadHierarchy() const
{
std::shared_ptr<PotreeNode> root_node =
std::make_shared<PotreeNode>("", meta_data_, meta_data_->bounding_box_);
loadNodeHierarchy(root_node);
return root_node;
}
void CloudLoader::loadNodeHierarchy(
const std::shared_ptr<PotreeNode>& root_node) const
{
std::queue<std::shared_ptr<PotreeNode>> next_nodes;
next_nodes.push(root_node);
char cfg[5];
fs::path hrc_file = fileName(meta_data_, root_node->name(), ".hrc");
std::ifstream f{hrc_file.c_str()};
if (!f.good())
ROS_ERROR_STREAM("failed to read file: " << hrc_file);
f.read(cfg, 5);
while (f.good())
{
std::shared_ptr<PotreeNode> node = next_nodes.front();
next_nodes.pop();
for (int j = 0; j < 8; ++j)
{
if (cfg[0] & (1 << j))
{
if (!node->children_[j])
{
std::shared_ptr<PotreeNode> child =
std::make_shared<PotreeNode>(
node->name() + std::to_string(j), meta_data_,
childBB(node->boundingBox(), j), node);
node->children_[j] = child;
}
next_nodes.push(node->children_[j]);
}
}
f.read(cfg, 5);
}
std::set<PotreeNode*> seen; // save the shared_ptr copy overhead and just
// track seen nodes by their address
while (!next_nodes.empty())
{
std::shared_ptr<PotreeNode> node = next_nodes.front()->parent().lock();
next_nodes.pop();
if (node && seen.insert(node.get()).second)
loadNodeHierarchy(node);
}
}
void CloudLoader::loadPoints(const std::shared_ptr<PotreeNode>& node,
bool recursive) const
{
fs::path bin_file = fileName(meta_data_, node->name(), ".bin");
if (!fs::is_regular_file(bin_file))
{
ROS_ERROR_STREAM("file not found: " << bin_file);
return;
}
std::size_t size = fs::file_size(bin_file);
std::ifstream f{bin_file.c_str()};
if (!f.good())
{
ROS_ERROR_STREAM("failed to open file: " << bin_file);
return;
}
std::vector<char> data;
data.resize(size);
if (!f.read(data.data(), size))
{
ROS_ERROR_STREAM("failed to read file: " << bin_file);
return;
}
std::size_t point_count = data.size() / meta_data_->point_byte_size_;
if (point_count == 0)
{
ROS_WARN_STREAM("empty node: " << node->name());
std::lock_guard<std::mutex> lock{node->mutex_};
node->vertex_data_.reset();
node->points_.clear();
node->colors_.clear();
node->point_count_ = 0;
node->loaded_ = true;
return;
}
std::vector<Ogre::Vector3> points;
std::vector<Ogre::ColourValue> colors;
points.reserve(point_count);
colors.reserve(point_count);
std::size_t offset = 0;
Ogre::Vector3 translate = node->bounding_box_.getMinimum();
for (const std::string& attr : meta_data_->point_attributes_)
{
if (attr == "POSITION_CARTESIAN")
{
for (std::size_t i = 0; i < point_count; ++i)
{
std::size_t index = offset + i * meta_data_->point_byte_size_;
float x = *reinterpret_cast<std::uint32_t*>(&data[index + 0])
* meta_data_->scale_
+ translate.x;
float y = *reinterpret_cast<std::uint32_t*>(&data[index + 4])
* meta_data_->scale_
+ translate.y;
float z = *reinterpret_cast<std::uint32_t*>(&data[index + 8])
* meta_data_->scale_
+ translate.z;
points.push_back(Ogre::Vector3(x, y, z));
}
}
else if (attr == "COLOR_PACKED")
{
for (std::size_t i = 0; i < point_count; ++i)
{
std::size_t index = offset + i * meta_data_->point_byte_size_;
float r = 1.f * data[index + 0] / 255.f;
float g = 1.f * data[index + 1] / 255.f;
float b = 1.f * data[index + 2] / 255.f;
float a = 1.f * data[index + 3] / 255.f;
colors.push_back(Ogre::ColourValue(r, g, b, a));
}
}
offset += CloudMetaData::sizeOf(attr);
}
if (points.empty())
{
ROS_WARN_STREAM("no POSITION_CARTESIAN data: " << node->name());
std::lock_guard<std::mutex> lock{node->mutex_};
node->vertex_data_.reset();
node->points_.clear();
node->colors_.clear();
node->point_count_ = 0;
node->loaded_ = true;
return;
}
{
std::lock_guard<std::mutex> lock{node->mutex_};
node->points_ = std::move(points);
node->colors_ = std::move(colors);
node->point_count_ = point_count;
node->unique_id_ = "potree:" + bin_file.string();
node->loaded_ = true;
}
if (recursive)
{
for (const std::shared_ptr<PotreeNode>& child : node->children_)
{
if (child)
loadPoints(child, true);
}
}
}
fs::path CloudLoader::fileName(const std::shared_ptr<CloudMetaData>& meta_data,
const std::string& name,
const std::string& extension)
{
fs::path octree_dir = meta_data->cloud_path_ / meta_data->octree_dir_;
fs::path result;
std::size_t levels = name.length() / meta_data->hierarchy_step_size_;
for (std::size_t i = 0; i < levels; ++i)
{
result /= name.substr(i * meta_data->hierarchy_step_size_,
meta_data->hierarchy_step_size_);
}
result /= std::string("r") + name + extension;
if (fs::is_regular_file(octree_dir / "u" / result))
return octree_dir / "u" / result;
return octree_dir / "r" / result;
}
Ogre::AxisAlignedBox CloudLoader::childBB(const Ogre::AxisAlignedBox& parent,
int index)
{
assert(!parent.isInfinite());
Ogre::Vector3 min = parent.getMinimum(), max = parent.getMaximum(),
half_size = parent.getHalfSize();
if (index & 1)
min.z += half_size.z;
else
max.z -= half_size.z;
if (index & 2)
min.y += half_size.y;
else
max.y -= half_size.y;
if (index & 4)
min.x += half_size.x;
else
max.x -= half_size.x;
return Ogre::AxisAlignedBox(min, max);
}
} // namespace fkie_potree_rviz_plugin
|
{"hexsha": "955dbed0a04680a7d3ecd4df79107c1e4a83b0c2", "size": 8716, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "fkie_potree_rviz_plugin/src/cloud_loader.cpp", "max_stars_repo_name": "fkie/potree_rviz_plugin", "max_stars_repo_head_hexsha": "883c305dd924b8c8ae35c192c087f2bb25899f8d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13.0, "max_stars_repo_stars_event_min_datetime": "2018-12-03T08:46:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-09T19:32:33.000Z", "max_issues_repo_path": "fkie_potree_rviz_plugin/src/cloud_loader.cpp", "max_issues_repo_name": "fkie/potree_rviz_plugin", "max_issues_repo_head_hexsha": "883c305dd924b8c8ae35c192c087f2bb25899f8d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-06-13T22:18:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-14T11:01:54.000Z", "max_forks_repo_path": "fkie_potree_rviz_plugin/src/cloud_loader.cpp", "max_forks_repo_name": "fkie/potree_rviz_plugin", "max_forks_repo_head_hexsha": "883c305dd924b8c8ae35c192c087f2bb25899f8d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4014869888, "max_line_length": 80, "alphanum_fraction": 0.5491050941, "num_tokens": 2111}
|
// Copyright (c) 2007-2013 Hartmut Kaiser
// Copyright (c) 2011 Bryce Lelbach
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#if !defined(HPX_UTIL_FULLEMPTYSTORE_JUN_16_2008_0128APM)
#define HPX_UTIL_FULLEMPTYSTORE_JUN_16_2008_0128APM
#include <memory>
#include <hpx/hpx_fwd.hpp>
#include <hpx/util/move.hpp>
#include <hpx/lcos/local/spinlock.hpp>
#include <hpx/util/scoped_unlock.hpp>
#include <hpx/util/stringstream.hpp>
#include <hpx/runtime/threads/thread_data.hpp>
#include <hpx/runtime/threads/thread_helpers.hpp>
#include <boost/aligned_storage.hpp>
#include <boost/type_traits/alignment_of.hpp>
#include <boost/type_traits/add_pointer.hpp>
#include <boost/intrusive/slist.hpp>
///////////////////////////////////////////////////////////////////////////////
namespace hpx { namespace lcos { namespace detail
{
///////////////////////////////////////////////////////////////////////////
enum full_empty_state
{
empty = false,
full = true
};
///////////////////////////////////////////////////////////////////////////
// data structure holding all counters for full_empty entries
struct full_empty_counter_data
{
full_empty_counter_data()
: constructed_(0), destructed_(0),
read_enqueued_(0), read_dequeued_(0), set_full_(0)
{}
boost::atomic_int64_t constructed_;
boost::atomic_int64_t destructed_;
boost::atomic_int64_t read_enqueued_;
boost::atomic_int64_t read_dequeued_;
boost::atomic_int64_t set_full_;
};
HPX_EXPORT extern full_empty_counter_data full_empty_counter_data_;
// call this to register all counter types for full_empty entries
void register_counter_types();
///////////////////////////////////////////////////////////////////////////
template <typename Data>
class full_empty_entry
{
public:
typedef lcos::local::spinlock mutex_type;
private:
typedef threads::thread_id_type thread_id_type;
// define data structures needed for intrusive slist container used for
// the queues
struct queue_entry
{
typedef boost::intrusive::slist_member_hook<
boost::intrusive::link_mode<boost::intrusive::normal_link>
> hook_type;
queue_entry(thread_id_type id)
: id_(id)
{}
thread_id_type id_;
hook_type list_hook_;
};
typedef boost::intrusive::member_hook<
queue_entry, typename queue_entry::hook_type,
&queue_entry::list_hook_
> list_option_type;
typedef boost::intrusive::slist<
queue_entry, list_option_type,
boost::intrusive::cache_last<true>,
boost::intrusive::constant_time_size<false>
> queue_type;
struct reset_queue_entry
{
reset_queue_entry(queue_entry& e, queue_type& q)
: e_(e), q_(q), last_(q.last())
{}
~reset_queue_entry()
{
if (e_.id_)
q_.erase(last_); // remove entry from queue
}
queue_entry& e_;
queue_type& q_;
typename queue_type::const_iterator last_;
};
void log_non_empty_queue(char const* const desc, queue_type& queue)
{
mutex_type::scoped_lock l(mtx_);
while (!queue.empty()) {
threads::thread_id_type id = queue.front().id_;
queue.front().id_ = 0;
queue.pop_front();
// we know that the id is actually the pointer to the thread
threads::thread_data_base* thrd =
reinterpret_cast<threads::thread_data_base*>(id);
LERR_(info) << "~full_empty_entry: aborting pending thread in "
<< desc << ": "
<< get_thread_state_name(thrd->get_state())
<< "(" << id << "): " << thrd->get_description();
// forcefully abort thread, do not throw
error_code ec(lightweight);
threads::set_thread_state(id, threads::pending,
threads::wait_abort, threads::thread_priority_default, ec);
if (ec) {
LERR_(error) << "~full_empty_entry: could not abort thread"
<< get_thread_state_name(thrd->get_state())
<< "(" << id << "): " << thrd->get_description();
}
}
}
public:
full_empty_entry()
: data_(), state_(empty)
{
++full_empty_counter_data_.constructed_;
}
template <typename T0>
explicit full_empty_entry(BOOST_FWD_REF(T0) t0)
: data_(boost::forward<T0>(t0)), state_(empty)
{
++full_empty_counter_data_.constructed_;
}
~full_empty_entry()
{
if (is_used()) {
LERR_(info) << "~full_empty_entry: one of the queues is not empty";
log_non_empty_queue("write_queue", write_queue_);
log_non_empty_queue("read_and_empty_queue", read_and_empty_queue_);
log_non_empty_queue("read_queue", read_queue_);
}
++full_empty_counter_data_.destructed_;
}
// returns whether this entry is currently empty
bool is_empty() const
{
mutex_type::scoped_lock l(mtx_);
return state_ == empty;
}
// sets this entry to empty
bool set_empty(error_code& ec = throws)
{
mutex_type::scoped_lock l(mtx_);
return set_empty_locked(ec);
}
// sets this entry to full
bool set_full(error_code& ec = throws)
{
mutex_type::scoped_lock l(mtx_);
return set_full_locked(ec);
}
template <typename F>
bool peek(F f) const
{
mutex_type::scoped_lock l(mtx_);
if (state_ == empty)
return false;
return f(data_); // pass the data to the provided function
}
///////////////////////////////////////////////////////////////////////
// enqueue a get operation if full/full queue if entry is empty
template <typename T>
void enqueue_full_full(T& dest, error_code& ec = throws)
{
mutex_type::scoped_lock l(mtx_);
// block if this entry is empty
if (state_ == empty) {
threads::thread_self* self = threads::get_self_ptr_checked(ec);
if (ec) return;
threads::thread_id_type id = self->get_thread_id();
// enqueue the request and block this thread
queue_entry f(id);
read_queue_.push_back(f);
++full_empty_counter_data_.read_enqueued_;
reset_queue_entry r(f, read_queue_);
{
// yield this thread
util::scoped_unlock<mutex_type::scoped_lock> ul(l);
this_thread::suspend(threads::suspended,
"full_empty_entry::enqueue_full_full", ec);
if (ec) return;
}
++full_empty_counter_data_.read_dequeued_;
}
// copy the data to the destination
dest = data_;
if (&ec != &throws)
ec = make_success_code();
}
// same as above, but for entries without associated data
void enqueue_full_full(error_code& ec = throws)
{
mutex_type::scoped_lock l(mtx_);
// block if this entry is empty
if (state_ == empty) {
threads::thread_self* self = threads::get_self_ptr_checked(ec);
if (ec) return;
threads::thread_id_type id = self->get_thread_id();
// enqueue the request and block this thread
queue_entry f(id);
read_queue_.push_back(f);
++full_empty_counter_data_.read_enqueued_;
reset_queue_entry r(f, read_queue_);
{
// yield this thread
util::scoped_unlock<mutex_type::scoped_lock> ul(l);
this_thread::suspend(threads::suspended,
"full_empty_entry::enqueue_full_full", ec);
if (ec) return;
}
++full_empty_counter_data_.read_dequeued_;
}
if (&ec != &throws)
ec = make_success_code();
}
///////////////////////////////////////////////////////////////////////
// enqueue a get operation in full/empty queue if entry is empty
template <typename T>
void enqueue_full_empty(T& dest, error_code& ec = throws)
{
mutex_type::scoped_lock l(mtx_);
// block if this entry is empty
if (state_ == empty) {
threads::thread_self* self = threads::get_self_ptr_checked(ec);
if (ec) return;
threads::thread_id_type id = self->get_thread_id();
// enqueue the request and block this thread
queue_entry f(id);
read_and_empty_queue_.push_back(f);
reset_queue_entry r(f, read_and_empty_queue_);
{
// yield this thread
util::scoped_unlock<mutex_type::scoped_lock> ul(l);
this_thread::suspend(threads::suspended,
"full_empty_entry::enqueue_full_empty", ec);
if (ec) return;
}
// move the data to the destination
dest = boost::move(data_);
}
else {
// move the data to the destination
dest = boost::move(data_);
set_empty_locked(ec); // state_ = empty;
if (ec) return;
}
if (&ec != &throws)
ec = make_success_code();
}
// same as above, but for entries without associated data
void enqueue_full_empty(error_code& ec = throws)
{
mutex_type::scoped_lock l(mtx_);
// block if this entry is empty
if (state_ == empty) {
threads::thread_self* self = threads::get_self_ptr_checked(ec);
if (ec) return;
threads::thread_id_type id = self->get_thread_id();
// enqueue the request and block this thread
queue_entry f(id);
read_and_empty_queue_.push_back(f);
reset_queue_entry r(f, read_and_empty_queue_);
{
// yield this thread
util::scoped_unlock<mutex_type::scoped_lock> ul(l);
this_thread::suspend(threads::suspended,
"full_empty_entry::enqueue_full_empty", ec);
if (ec) return;
}
}
else {
set_empty_locked(ec); // state_ = empty
if (ec) return;
}
if (&ec != &throws)
ec = make_success_code();
}
///////////////////////////////////////////////////////////////////////
// enqueue if entry is full, otherwise fill it
template <typename T>
void enqueue_if_full(BOOST_FWD_REF(T) src, error_code& ec = throws)
{
mutex_type::scoped_lock l(mtx_);
// block if this entry is already full
if (state_ == full) {
threads::thread_self* self = threads::get_self_ptr_checked(ec);
if (ec) return;
threads::thread_id_type id = self->get_thread_id();
// enqueue the request and block this thread
queue_entry f(id);
write_queue_.push_back(f);
reset_queue_entry r(f, write_queue_);
{
// yield this thread
util::scoped_unlock<mutex_type::scoped_lock> ul(l);
this_thread::suspend(threads::suspended,
"full_empty_entry::enqueue_if_full", ec);
if (ec) return;
}
}
// set the data
data_ = boost::forward<T>(src);
// make sure the entry is full
set_full_locked(ec); // state_ = full
if (ec) return;
if (&ec != &throws)
ec = make_success_code();
}
// same as above, but for entries without associated data
void enqueue_if_full(error_code& ec = throws)
{
mutex_type::scoped_lock l(mtx_);
// block if this entry is already full
if (state_ == full) {
threads::thread_self* self = threads::get_self_ptr_checked(ec);
if (ec) return;
threads::thread_id_type id = self->get_thread_id();
// enqueue the request and block this thread
queue_entry f(id);
write_queue_.push_back(f);
reset_queue_entry r(f, write_queue_);
{
// yield this thread
util::scoped_unlock<mutex_type::scoped_lock> ul(l);
this_thread::suspend(threads::suspended,
"full_empty_entry::enqueue_if_full", ec);
if (ec) return;
}
}
// make sure the entry is full
set_full_locked(ec); // state_ = full
if (ec) return;
if (&ec != &throws)
ec = make_success_code();
}
///////////////////////////////////////////////////////////////////////
// unconditionally set the data and set the entry to full
template <typename T>
void set_and_fill(BOOST_FWD_REF(T) src, error_code& ec = throws)
{
mutex_type::scoped_lock l(mtx_);
// set the data
data_ = boost::forward<T>(src);
// make sure the entry is full
set_full_locked(ec); // state_ = full
}
// same as above, but for entries without associated data
void set_and_fill(error_code& ec = throws)
{
mutex_type::scoped_lock l(mtx_);
// make sure the entry is full
set_full_locked(ec); // state_ = full
}
// returns whether this entry is still in use
bool is_used() const
{
mutex_type::scoped_lock l(mtx_);
return is_used_locked();
}
protected:
bool set_empty_locked(error_code& ec)
{
state_ = empty;
if (!write_queue_.empty()) {
threads::thread_id_type id = write_queue_.front().id_;
write_queue_.front().id_ = 0;
write_queue_.pop_front();
threads::set_thread_state(id, threads::pending,
threads::wait_timeout, threads::thread_priority_default, ec);
set_full_locked(ec); // state_ = full
if (ec) return false;
}
// return whether this block needs to be removed
return state_ == full && !is_used_locked();
}
bool set_full_locked(error_code& ec)
{
state_ = full;
// handle all threads waiting for the block to become full
while (!read_queue_.empty()) {
threads::thread_id_type id = read_queue_.front().id_;
read_queue_.front().id_ = 0;
read_queue_.pop_front();
threads::set_thread_state(id, threads::pending,
threads::wait_timeout, threads::thread_priority_default, ec);
if (ec) return false;
++full_empty_counter_data_.set_full_;
}
// since we got full now we need to re-activate one thread waiting
// for the block to become full
if (!read_and_empty_queue_.empty()) {
threads::thread_id_type id = read_and_empty_queue_.front().id_;
read_and_empty_queue_.front().id_ = 0;
read_and_empty_queue_.pop_front();
threads::set_thread_state(id, threads::pending,
threads::wait_timeout, threads::thread_priority_default, ec);
if (ec) return false;
set_empty_locked(ec); // state_ = empty
if (ec) return false;
}
// return whether this block needs to be removed
return state_ == full && !is_used_locked();
}
bool is_used_locked() const
{
return !(write_queue_.empty() && read_and_empty_queue_.empty() && read_queue_.empty());
}
private:
typedef Data value_type;
mutable mutex_type mtx_;
queue_type write_queue_; // threads waiting in write
queue_type read_and_empty_queue_; // threads waiting in read_and_empty
queue_type read_queue_; // threads waiting in read
value_type data_; // protected data
full_empty_state state_; // current full/empty state
};
}}}
#endif
|
{"hexsha": "02eb15eb5953f8c048f2011af7dd6337c2cf8797", "size": 17787, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "hpx/lcos/detail/full_empty_entry.hpp", "max_stars_repo_name": "andreasbuhr/hpx", "max_stars_repo_head_hexsha": "4366a90aacbd3e95428a94ab24a1646a67459cc2", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hpx/lcos/detail/full_empty_entry.hpp", "max_issues_repo_name": "andreasbuhr/hpx", "max_issues_repo_head_hexsha": "4366a90aacbd3e95428a94ab24a1646a67459cc2", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hpx/lcos/detail/full_empty_entry.hpp", "max_forks_repo_name": "andreasbuhr/hpx", "max_forks_repo_head_hexsha": "4366a90aacbd3e95428a94ab24a1646a67459cc2", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8155893536, "max_line_length": 99, "alphanum_fraction": 0.5138022151, "num_tokens": 3533}
|
import cv2
import numpy as np
import time
cv2.namedWindow('Mywindow')
cameraCapture = cv2.VideoCapture(0)
#cameraCapture.set(3,64)
#cameraCapture.set(4,64)
success, image = cameraCapture.read(0)
while success and cv2.waitKey(1) == -1:
image = cv2.resize(image, (64, 64),interpolation=cv2.INTER_AREA)
cv2.imshow('Mywindow',image)
print(image.shape)
time.sleep(1)
success, image = cameraCapture.read(0)
cv2.waitKey(30)
success, image = cameraCapture.read(0)
cv2.destroyWindow('Mywindow')
cv2.destroyWindow('ColorWindow')
cameraCapture.release()
|
{"hexsha": "d221e0837c81dcc2e1726953735f56afdab09122", "size": 559, "ext": "py", "lang": "Python", "max_stars_repo_path": "camera.py", "max_stars_repo_name": "Thisislegit/RTCS_hand-_counting_project", "max_stars_repo_head_hexsha": "1c6bbc48cab9dd579809e0919ec00e2be3721dac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "camera.py", "max_issues_repo_name": "Thisislegit/RTCS_hand-_counting_project", "max_issues_repo_head_hexsha": "1c6bbc48cab9dd579809e0919ec00e2be3721dac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "camera.py", "max_forks_repo_name": "Thisislegit/RTCS_hand-_counting_project", "max_forks_repo_head_hexsha": "1c6bbc48cab9dd579809e0919ec00e2be3721dac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.36, "max_line_length": 65, "alphanum_fraction": 0.7495527728, "include": true, "reason": "import numpy", "num_tokens": 155}
|
from __future__ import absolute_import
from tensorflow.keras import activations, constraints, initializers, regularizers
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer, Dropout, LeakyReLU, Dense, Concatenate,Reshape
import tensorflow as tf
import numpy as np
import pdb
#Custom Layer to extract offense and defense nodes for home and away teams
class Game_Vec(Layer):
def __init__(self,attention_feat_size,N):
super(Game_Vec,self).__init__()
self.feat_dim = attention_feat_size
self.N = N
def call(self,inputs):
index = tf.math.add(inputs[0],tf.constant([0,self.N + 1],dtype = tf.int64))
stack = tf.gather(inputs[1],index, axis=1)
return stack
class To_Sparse(Layer):
def __init__(self,):
super(To_Sparse,self).__init__()
def call(self,inputs):
sparse_t = tf.sparse.from_dense(inputs[0], name = 'adj_mat')
return sparse_t
|
{"hexsha": "b7cf183fbaa8cd02b46ffb7ee70f2157d3200937", "size": 964, "ext": "py", "lang": "Python", "max_stars_repo_path": "ncaabGNNs/src/extract_team_GAT.py", "max_stars_repo_name": "joewilaj/sportsGNNs", "max_stars_repo_head_hexsha": "78beb1a7908afa0ff2c6b2d425f4e81fd7dee3c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-02-21T19:06:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T11:16:56.000Z", "max_issues_repo_path": "ncaabGNNs/src/extract_team_GAT.py", "max_issues_repo_name": "joewilaj/sportsGNNs", "max_issues_repo_head_hexsha": "78beb1a7908afa0ff2c6b2d425f4e81fd7dee3c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ncaabGNNs/src/extract_team_GAT.py", "max_forks_repo_name": "joewilaj/sportsGNNs", "max_forks_repo_head_hexsha": "78beb1a7908afa0ff2c6b2d425f4e81fd7dee3c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-28T13:37:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-28T13:37:47.000Z", "avg_line_length": 23.512195122, "max_line_length": 89, "alphanum_fraction": 0.7043568465, "include": true, "reason": "import numpy", "num_tokens": 232}
|
struct Start <: EdgeModifier
s
end
@testset "Unmeta Node" begin
@test unmeta(Node(:a)) == Node(:a)
@test unmeta(Node(:a) * Start(4)) == Node(:a)
end
@testset "Unmeta Edge" begin
@test unmeta(Edge(Node(:a), Node(:b))) == Edge(Node(:a), Node(:b))
@test unmeta(Edge(Node(:a), Node(:b)) * Start(4)) == Edge(Node(:a), Node(:b))
end
|
{"hexsha": "2df7f8c2b1ab787c01d53904adcc55ec8ed2c840", "size": 350, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/keep.jl", "max_stars_repo_name": "aaronpeikert/Semi.jl", "max_stars_repo_head_hexsha": "7fbb585ccd8068c3fc48077693d9fff71a024561", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2022-02-04T21:50:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T07:01:17.000Z", "max_issues_repo_path": "test/keep.jl", "max_issues_repo_name": "aaronpeikert/Semi.jl", "max_issues_repo_head_hexsha": "7fbb585ccd8068c3fc48077693d9fff71a024561", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2022-02-04T11:39:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T07:00:14.000Z", "max_forks_repo_path": "test/keep.jl", "max_forks_repo_name": "aaronpeikert/StenoGraphs.jl", "max_forks_repo_head_hexsha": "6901c0d17ed91e7b34eb4ea0896bf9e04687cc03", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.3333333333, "max_line_length": 81, "alphanum_fraction": 0.5857142857, "num_tokens": 121}
|
"""
Collection of tests for unified device functions
"""
# global
import math
import pytest
import numpy as np
from numbers import Number
# local
import ivy
import ivy.functional.backends.numpy
import ivy_tests.test_ivy.helpers as helpers
# Tests #
# ------#
# Device Queries #
# dev
@pytest.mark.parametrize("x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_dev(x, dtype, tensor_fn, device, call):
# smoke test
if (
(isinstance(x, Number) or len(x) == 0)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, device)
ret = ivy.dev(x, as_str=True)
# type test
assert isinstance(ret, str)
# value test
assert ret == device
# dev_to_str
@pytest.mark.parametrize("x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_dev_to_str(x, dtype, tensor_fn, device, call):
# smoke test
if (
(isinstance(x, Number) or len(x) == 0)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, device)
device = ivy.dev(x)
ret = ivy.dev_to_str(device)
# type test
assert isinstance(ret, str)
# dev_from_str
@pytest.mark.parametrize("x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_dev_from_str(x, dtype, tensor_fn, device, call):
# smoke test
if (
(isinstance(x, Number) or len(x) == 0)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, device)
device = ivy.dev_from_str(device)
ret = ivy.dev_from_str(ivy.dev(x, as_str=True))
# value test
if call in [helpers.tf_call, helpers.tf_graph_call]:
assert "/" + ":".join(ret[1:].split(":")[-2:]) == "/" + ":".join(
device[1:].split(":")[-2:]
)
elif call is helpers.torch_call:
assert ret.type == device.type
else:
assert ret == device
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not handle converting string to device
return
# memory_on_dev
@pytest.mark.parametrize("dev_to_check", ["cpu", "gpu:0"])
def test_memory_on_dev(dev_to_check, device, call):
if "gpu" in dev_to_check and ivy.num_gpus() == 0:
# cannot get amount of memory for gpu which is not present
pytest.skip()
ret = ivy.total_mem_on_dev(dev_to_check)
# type test
assert isinstance(ret, float)
# value test
assert 0 < ret < 64
# compilation test
if call is helpers.torch_call:
# global variables aren't supported for pytorch scripting
pytest.skip()
# Device Allocation #
# default_device
def test_default_device(device, call):
# setting and unsetting
orig_len = len(ivy.default_device_stack)
ivy.set_default_device("cpu")
assert len(ivy.default_device_stack) == orig_len + 1
ivy.set_default_device("cpu")
assert len(ivy.default_device_stack) == orig_len + 2
ivy.unset_default_device()
assert len(ivy.default_device_stack) == orig_len + 1
ivy.unset_default_device()
assert len(ivy.default_device_stack) == orig_len
# with
assert len(ivy.default_device_stack) == orig_len
with ivy.DefaultDevice("cpu"):
assert len(ivy.default_device_stack) == orig_len + 1
with ivy.DefaultDevice("cpu"):
assert len(ivy.default_device_stack) == orig_len + 2
assert len(ivy.default_device_stack) == orig_len + 1
assert len(ivy.default_device_stack) == orig_len
# to_dev
@pytest.mark.parametrize("x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
@pytest.mark.parametrize("with_out", [False, True])
def test_to_dev(x, dtype, tensor_fn, with_out, device, call):
# smoke test
if (
(isinstance(x, Number) or len(x) == 0)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, device)
# create a dummy array for out that is broadcastable to x
out = ivy.zeros(ivy.shape(x)) if with_out else None
device = ivy.dev(x)
x_on_dev = ivy.to_dev(x, device, out=out)
dev_from_new_x = ivy.dev(x_on_dev)
if with_out:
# should be the same array test
assert np.allclose(ivy.to_numpy(x_on_dev), ivy.to_numpy(out))
# should be the same device
assert ivy.dev(x_on_dev) == ivy.dev(out)
# check if native arrays are the same
if ivy.current_framework_str() in ["tensorflow", "jax"]:
# these frameworks do not support native inplace updates
return
assert x_on_dev.data is out.data
# value test
if call in [helpers.tf_call, helpers.tf_graph_call]:
assert "/" + ":".join(dev_from_new_x[1:].split(":")[-2:]) == "/" + ":".join(
device[1:].split(":")[-2:]
)
elif call is helpers.torch_call:
assert dev_from_new_x.type == device.type
else:
assert dev_from_new_x == device
# Function Splitting #
@pytest.mark.parametrize(
"x0", [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[9, 8, 7], [6, 5, 4], [3, 2, 1]]]
)
@pytest.mark.parametrize(
"x1",
[[[2, 4, 6], [8, 10, 12], [14, 16, 18]], [[18, 16, 14], [12, 10, 8], [6, 4, 2]]],
)
@pytest.mark.parametrize("chunk_size", [1, 3])
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_split_func_call(x0, x1, chunk_size, axis, tensor_fn, device, call):
# inputs
in0 = tensor_fn(x0, "float32", device)
in1 = tensor_fn(x1, "float32", device)
# function
def func(t0, t1):
return t0 * t1, t0 - t1, t1 - t0
# predictions
a, b, c = ivy.split_func_call(
func, [in0, in1], "concat", chunk_size=chunk_size, input_axes=axis
)
# true
a_true, b_true, c_true = func(in0, in1)
# value test
assert np.allclose(ivy.to_numpy(a), ivy.to_numpy(a_true))
assert np.allclose(ivy.to_numpy(b), ivy.to_numpy(b_true))
assert np.allclose(ivy.to_numpy(c), ivy.to_numpy(c_true))
@pytest.mark.parametrize(
"x0", [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[9, 8, 7], [6, 5, 4], [3, 2, 1]]]
)
@pytest.mark.parametrize(
"x1",
[[[2, 4, 6], [8, 10, 12], [14, 16, 18]], [[18, 16, 14], [12, 10, 8], [6, 4, 2]]],
)
@pytest.mark.parametrize("chunk_size", [1, 3])
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_split_func_call_with_cont_input(
x0, x1, chunk_size, axis, tensor_fn, device, call
):
# inputs
in0 = ivy.Container(cont_key=tensor_fn(x0, "float32", device))
in1 = ivy.Container(cont_key=tensor_fn(x1, "float32", device))
# function
def func(t0, t1):
return t0 * t1, t0 - t1, t1 - t0
# predictions
a, b, c = ivy.split_func_call(
func, [in0, in1], "concat", chunk_size=chunk_size, input_axes=axis
)
# true
a_true, b_true, c_true = func(in0, in1)
# value test
assert np.allclose(ivy.to_numpy(a.cont_key), ivy.to_numpy(a_true.cont_key))
assert np.allclose(ivy.to_numpy(b.cont_key), ivy.to_numpy(b_true.cont_key))
assert np.allclose(ivy.to_numpy(c.cont_key), ivy.to_numpy(c_true.cont_key))
@pytest.mark.parametrize("x", [[0, 1, 2, 3, 4, 5]])
@pytest.mark.parametrize("axis", [0])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
@pytest.mark.parametrize("devs_as_dict", [True, False])
def test_distribute_array(x, axis, tensor_fn, devs_as_dict, device, call):
# inputs
x = tensor_fn(x, "float32", device)
# devices
devices = list()
dev0 = device
devices.append(dev0)
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = device[:-1] + str(idx)
devices.append(dev1)
if devs_as_dict:
devices = dict(
zip(devices, [int((1 / len(devices)) * x.shape[axis])] * len(devices))
)
# return
x_split = ivy.dev_dist_array(x, devices, axis)
# shape test
assert x_split[dev0].shape[axis] == math.floor(x.shape[axis] / len(devices))
# value test
assert min([ivy.dev(x_sub, as_str=True) == ds for ds, x_sub in x_split.items()])
@pytest.mark.parametrize("x", [[0, 1, 2, 3, 4]])
@pytest.mark.parametrize("axis", [0])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_clone_array(x, axis, tensor_fn, device, call):
# inputs
x = tensor_fn(x, "float32", device)
# devices
devices = list()
dev0 = device
devices.append(dev0)
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = device[:-1] + str(idx)
devices.append(dev1)
# return
x_split = ivy.dev_clone_array(x, devices)
# shape test
assert x_split[dev0].shape[0] == math.floor(x.shape[axis] / len(devices))
# value test
assert min([ivy.dev(x_sub, as_str=True) == ds for ds, x_sub in x_split.items()])
@pytest.mark.parametrize("xs", [([0, 1, 2], [3, 4])])
@pytest.mark.parametrize("axis", [0])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_unify_array(xs, axis, tensor_fn, device, call):
# devices and inputs
devices = list()
dev0 = device
x = {dev0: tensor_fn(xs[0], "float32", dev0)}
devices.append(dev0)
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = device[:-1] + str(idx)
x[dev1] = tensor_fn(xs[1], "float32", dev1)
devices.append(dev1)
# output
x_unified = ivy.dev_unify_array(ivy.DevDistItem(x), dev0, "concat", axis)
# shape test
expected_size = 0
for ds in devices:
expected_size += x[ds].shape[axis]
assert x_unified.shape[axis] == expected_size
# value test
assert ivy.dev(x_unified, as_str=True) == dev0
@pytest.mark.parametrize("args", [[[0, 1, 2, 3, 4], "some_str", ([1, 2])]])
@pytest.mark.parametrize("kwargs", [{"a": [0, 1, 2, 3, 4], "b": "another_str"}])
@pytest.mark.parametrize("axis", [0])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_distribute_args(args, kwargs, axis, tensor_fn, device, call):
# inputs
args = [tensor_fn(args[0], "float32", device)] + args[1:]
kwargs = {"a": tensor_fn(kwargs["a"], "float32", device), "b": kwargs["b"]}
# devices
devices = list()
dev0 = device
devices.append(dev0)
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = device[:-1] + str(idx)
devices.append(dev1)
# returns
dist_args, dist_kwargs = ivy.dev_dist_nest(args, kwargs, devices, axis=axis)
# device specific args
for ds in devices:
assert dist_args.at_dev(ds)
assert dist_kwargs.at_dev(ds)
# value test
assert min(
[
ivy.dev(dist_args_ds[0], as_str=True) == ds
for ds, dist_args_ds in dist_args.at_devs().items()
]
)
assert min(
[
ivy.dev(dist_kwargs_ds["a"], as_str=True) == ds
for ds, dist_kwargs_ds in dist_kwargs.at_devs().items()
]
)
@pytest.mark.parametrize("args", [[[0, 1, 2, 3, 4], "some_str", ([1, 2])]])
@pytest.mark.parametrize("kwargs", [{"a": [0, 1, 2, 3, 4], "b": "another_str"}])
@pytest.mark.parametrize("axis", [0])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_clone_args(args, kwargs, axis, tensor_fn, device, call):
# inputs
args = [tensor_fn(args[0], "float32", device)] + args[1:]
kwargs = {"a": tensor_fn(kwargs["a"], "float32", device), "b": kwargs["b"]}
# devices
devices = list()
dev0 = device
devices.append(dev0)
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = device[:-1] + str(idx)
devices.append(dev1)
# returns
cloned_args, cloned_kwargs = ivy.dev_clone_nest(args, kwargs, devices)
# device specific args
for ds in devices:
assert cloned_args.at_dev(ds)
assert cloned_kwargs.at_dev(ds)
# value test
assert min(
[
ivy.dev(dist_args_ds[0], as_str=True) == ds
for ds, dist_args_ds in cloned_args.at_devs().items()
]
)
assert min(
[
ivy.dev(dist_kwargs_ds["a"], as_str=True) == ds
for ds, dist_kwargs_ds in cloned_kwargs.at_devs().items()
]
)
@pytest.mark.parametrize("args", [[[[0, 1, 2], [3, 4]], "some_str", ([1, 2])]])
@pytest.mark.parametrize("kwargs", [{"a": [[0, 1, 2], [3, 4]], "b": "another_str"}])
@pytest.mark.parametrize("axis", [0])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_unify_args(args, kwargs, axis, tensor_fn, device, call):
# devices
devices = list()
dev0 = device
devices.append(dev0)
args_dict = dict()
args_dict[dev0] = tensor_fn(args[0][0], "float32", dev0)
kwargs_dict = dict()
kwargs_dict[dev0] = tensor_fn(kwargs["a"][0], "float32", dev0)
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = device[:-1] + str(idx)
devices.append(dev1)
args_dict[dev1] = tensor_fn(args[0][1], "float32", dev1)
kwargs_dict[dev1] = tensor_fn(kwargs["a"][1], "float32", dev1)
# inputs
args = ivy.DevDistNest([ivy.DevDistItem(args_dict)] + args[1:], devices)
kwargs = ivy.DevDistNest(
{"a": ivy.DevDistItem(kwargs_dict), "b": kwargs["b"]}, devices
)
# outputs
args_uni, kwargs_uni = ivy.dev_unify_nest(args, kwargs, dev0, "concat", axis=axis)
# shape test
expected_size_arg = 0
expected_size_kwarg = 0
for ds in devices:
expected_size_arg += args._data[0][ds].shape[axis]
expected_size_kwarg += kwargs._data["a"][ds].shape[axis]
assert args_uni[0].shape[axis] == expected_size_arg
assert kwargs_uni["a"].shape[axis] == expected_size_kwarg
# value test
assert ivy.dev(args_uni[0], as_str=True) == dev0
assert ivy.dev(kwargs_uni["a"], as_str=True) == dev0
|
{"hexsha": "6b200402418eb2e91b33f481f753b719c2bfd9fd", "size": 14729, "ext": "py", "lang": "Python", "max_stars_repo_path": "ivy_tests/test_ivy/test_functional/test_core/test_device.py", "max_stars_repo_name": "mattbarrett98/ivy", "max_stars_repo_head_hexsha": "a706e59b907c0f78edb819959cc2035ebf48946f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ivy_tests/test_ivy/test_functional/test_core/test_device.py", "max_issues_repo_name": "mattbarrett98/ivy", "max_issues_repo_head_hexsha": "a706e59b907c0f78edb819959cc2035ebf48946f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ivy_tests/test_ivy/test_functional/test_core/test_device.py", "max_forks_repo_name": "mattbarrett98/ivy", "max_forks_repo_head_hexsha": "a706e59b907c0f78edb819959cc2035ebf48946f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1395348837, "max_line_length": 86, "alphanum_fraction": 0.6177608799, "include": true, "reason": "import numpy", "num_tokens": 4344}
|
from __future__ import absolute_import
import numpy as np
from sklearn.metrics import pairwise_distances
from graphs import Graph
__all__ = ['incremental_neighbor_graph']
def incremental_neighbor_graph(X, precomputed=False, k=None, epsilon=None,
weighting='none'):
'''See neighbor_graph.'''
assert ((k is not None) or (epsilon is not None)
), "Must provide `k` or `epsilon`"
assert (_issequence(k) ^ _issequence(epsilon)
), "Exactly one of `k` or `epsilon` must be a sequence."
assert weighting in ('binary','none'), "Invalid weighting param: " + weighting
is_weighted = weighting == 'none'
if precomputed:
D = X
else:
D = pairwise_distances(X, metric='euclidean')
# pre-sort for efficiency
order = np.argsort(D)[:,1:]
if k is None:
k = D.shape[0]
# generate the sequence of graphs
# TODO: convert the core of these loops to Cython for speed
W = np.zeros_like(D)
I = np.arange(D.shape[0])
if _issequence(k):
# varied k, fixed epsilon
if epsilon is not None:
D[D > epsilon] = 0
old_k = 0
for new_k in k:
idx = order[:, old_k:new_k]
dist = D[I, idx.T]
W[I, idx.T] = dist if is_weighted else 1
yield Graph.from_adj_matrix(W)
old_k = new_k
else:
# varied epsilon, fixed k
idx = order[:,:k]
dist = D[I, idx.T].T
old_i = np.zeros(D.shape[0], dtype=int)
for eps in epsilon:
for i, row in enumerate(dist):
oi = old_i[i]
ni = oi + np.searchsorted(row[oi:], eps)
rr = row[oi:ni]
W[i, idx[i,oi:ni]] = rr if is_weighted else 1
old_i[i] = ni
yield Graph.from_adj_matrix(W)
def _issequence(x):
# Note: isinstance(x, collections.Sequence) fails for numpy arrays
return hasattr(x, '__len__')
|
{"hexsha": "a3349b2b40d0e93044d2c461f3bf11ac984eb63e", "size": 1809, "ext": "py", "lang": "Python", "max_stars_repo_path": "graphs/construction/incremental.py", "max_stars_repo_name": "vishalbelsare/graphs", "max_stars_repo_head_hexsha": "4fbeb025dfe33340335f34300f58dd3809228822", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2015-12-31T21:48:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-09T13:34:41.000Z", "max_issues_repo_path": "graphs/construction/incremental.py", "max_issues_repo_name": "perimosocordiae/graphs", "max_issues_repo_head_hexsha": "4fbeb025dfe33340335f34300f58dd3809228822", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphs/construction/incremental.py", "max_forks_repo_name": "perimosocordiae/graphs", "max_forks_repo_head_hexsha": "4fbeb025dfe33340335f34300f58dd3809228822", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2015-09-18T14:26:00.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-21T11:46:11.000Z", "avg_line_length": 28.265625, "max_line_length": 80, "alphanum_fraction": 0.6246545053, "include": true, "reason": "import numpy", "num_tokens": 500}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 25 08:30:01 2021
@author: Ngoc Anh
"""
from .MovieLens import MovieLens
from surprise import KNNBasic
from surprise import NormalPredictor
from .Evaluator import Evaluator
import random
import numpy as np
def LoadMovieLensData():
ml = MovieLens()
print("Loading movie ratings...")
data = ml.loadMovieLensLatestSmall()
print("\nComputing movie popularity ranks so we can measure novelty later...")
rankings = ml.getPopularityRanks()
return (ml, data, rankings)
def RecommendMovie(user_id):
# user_id = input((" UserID "))
np.random.seed(0)
random.seed(0)
# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()
# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)
# User-based KNN
UserKNN = KNNBasic(sim_options = {'name': 'cosine', 'user_based': True})
evaluator.AddAlgorithm(UserKNN, "User KNN")
# Item-based KNN
#ItemKNN = KNNBasic(sim_options = {'name': 'cosine', 'user_based': False})
#evaluator.AddAlgorithm(ItemKNN, "Item KNN")
# Just make random recommendations
#Random = NormalPredictor()
#evaluator.AddAlgorithm(Random, "Random")
# Fight!
evaluator.Evaluate(False)
res = evaluator.SampleTopNRecs(ml, testSubject=user_ID)
return res
|
{"hexsha": "61e26f1c06755c02e5b5f9ea9a809852751c6048", "size": 1410, "ext": "py", "lang": "Python", "max_stars_repo_path": "my_code/Process.py", "max_stars_repo_name": "anhtpn/app_flask", "max_stars_repo_head_hexsha": "ba1509a9bffdec8c4e6c5c98d211d75e3d87541f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "my_code/Process.py", "max_issues_repo_name": "anhtpn/app_flask", "max_issues_repo_head_hexsha": "ba1509a9bffdec8c4e6c5c98d211d75e3d87541f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "my_code/Process.py", "max_forks_repo_name": "anhtpn/app_flask", "max_forks_repo_head_hexsha": "ba1509a9bffdec8c4e6c5c98d211d75e3d87541f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6363636364, "max_line_length": 82, "alphanum_fraction": 0.695035461, "include": true, "reason": "import numpy", "num_tokens": 361}
|
import matplotlib.pyplot as plt
import numpy as np
import os
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.gridspec as gridspec
sample_num = 5
def connect_2D_line(inputs_use, sample_num):
# sample_joint = np.reshape( np.asarray(s), (16,2))
sample_joint = np.reshape( np.asarray(inputs_use[sample_num]), (16,2))
plt.figure()
for i in range(len(sample_joint)):
x, y = sample_joint[i]
plt.scatter(x, y)
plt.gca().invert_yaxis()
def simple_3d_line(tars):
sample_joint = np.reshape(np.asarray(tars[0]), (16, 3))
start_points = np.array([6, 5, 4, 0, 1, 2, 0, 7, 8, 10, 11, 8, 13, 14, 8 ]) # start points
end_points = np.array([ 5, 4, 0, 1, 2, 3, 7, 8, 10, 11, 12, 13, 14, 15, 9 ]) # end points
x_coord_p, y_coord_p, z_coord_p = [], [], []
x_coord_sub_p, y_coord_sub_p, z_coord_sub_p = [], [], []
fig = plt.figure()
ax_lb = fig.add_subplot(111, projection='3d')
for idx in range(len(sample_joint)):
x_coor_p, y_coor_p, z_coor_p = sample_joint[idx]
x_coord_p.append(x_coor_p)
y_coord_p.append(y_coor_p)
z_coord_p.append(z_coor_p)
for i in range(len(start_points)):
x_coord_sub_p.append([x_coord_p[start_points[i]], x_coord_p[end_points[i]]])
y_coord_sub_p.append([y_coord_p[start_points[i]], y_coord_p[end_points[i]]])
z_coord_sub_p.append([z_coord_p[start_points[i]], z_coord_p[end_points[i]]])
for j in range(len(start_points)):
ax_lb.plot(x_coord_sub_p[j], y_coord_sub_p[j], z_coord_sub_p[j])
# ax_lb.plot(x_coord_sub_l[j], y_coord_sub_l[j], z_coord_sub_l[j])
def connect_3D_line(outputs_use, targets_use, sample_num):
# start_points = np.array([6, 2, 1, 6, 3, 4, 6, 7, 8, 8, 13, 14, 8, 12, 11]) # start points
# end_points = np.array([2, 1, 0, 3, 4, 5, 7, 8, 9, 13, 14, 15, 12, 11, 10]) # end points
start_points = np.array([6, 5, 4, 0, 1, 2, 0, 7, 8, 11, 12, 8, 14, 15, 8, 9]) # start points
end_points = np.array( [5, 4, 0, 1, 2, 3, 7, 8, 11, 12, 13, 14, 15, 16, 9, 10]) # end points
# prediction list
x_coord_p, y_coord_p, z_coord_p = [], [], []
x_coord_sub_p, y_coord_sub_p, z_coord_sub_p = [], [], []
# label list
x_coord_l, y_coord_l, z_coord_l = [], [], []
x_coord_sub_l, y_coord_sub_l, z_coord_sub_l = [], [], []
pred = np.reshape(outputs_use[sample_num], (17,3))
lb = np.reshape(targets_use[sample_num], (17,3))
fig = plt.figure()
ax_pred = fig.add_subplot(121 , projection='3d')
ax_pred.title.set_text('Predictions')
ax_lb = fig.add_subplot(122, projection='3d')
ax_lb.title.set_text('Labels')
for idx in range(len(pred)):
x_coor_p, y_coor_p, z_coor_p = pred[idx]
x_coord_p.append(x_coor_p)
y_coord_p.append(y_coor_p)
z_coord_p.append(z_coor_p)
x_coor_l, y_coor_l, z_coor_l = lb[idx]
x_coord_l.append(x_coor_l)
y_coord_l.append(y_coor_l)
z_coord_l.append(z_coor_l)
for i in range(len(start_points)):
x_coord_sub_p.append([x_coord_p[start_points[i]], x_coord_p[end_points[i]]])
y_coord_sub_p.append([y_coord_p[start_points[i]], y_coord_p[end_points[i]]])
z_coord_sub_p.append([z_coord_p[start_points[i]], z_coord_p[end_points[i]]])
x_coord_sub_l.append([x_coord_l[start_points[i]], x_coord_l[end_points[i]]])
y_coord_sub_l.append([y_coord_l[start_points[i]], y_coord_l[end_points[i]]])
z_coord_sub_l.append([z_coord_l[start_points[i]], z_coord_l[end_points[i]]])
for j in range(len(start_points)):
ax_pred.plot(x_coord_sub_p[j], y_coord_sub_p[j], z_coord_sub_p[j])
ax_lb.plot(x_coord_sub_l[j], y_coord_sub_l[j], z_coord_sub_l[j])
# connect_2D_line(inputs_use, sample_num )
# connect_3D_line(outputs_use, targets_use, sample_num)
########################################################################
# start_points = np.array([6, 5, 4, 0, 1, 2, 0, 7, 8, 11, 12, 8, 14, 15, 8, 9]) # start points
# end_points = np.array([5, 4, 0, 1, 2, 3, 7, 8, 11, 12, 13, 14, 15, 16, 9, 10]) # end points
#
# # 2d inputs list
# x_coord, y_coord = [], []
# x_coord_sub, y_coord_sub = [], []
#
# # prediction list
# x_coord_p, y_coord_p, z_coord_p = [], [], []
# x_coord_sub_p, y_coord_sub_p, z_coord_sub_p = [], [], []
#
# # label list
# x_coord_l, y_coord_l, z_coord_l = [], [], []
# x_coord_sub_l, y_coord_sub_l, z_coord_sub_l = [], [], []
#
# pred = np.reshape(outputs_use[0], (17, 3))
# lb = np.reshape(targets_use[0], (17, 3))
#
# inp = np.reshape(inputs_use[0], (16, 2))
# inp = np.vstack(([0, 0], inp))
#
# fig2 = plt.figure()
# ax_inp = fig2.add_subplot(131)
# ax_inp.title.set_text('2D inputs')
#
# fig = plt.figure()
#
# ax_pred = fig.add_subplot(132, projection='3d')
# ax_pred.title.set_text('Predictions')
#
# ax_lb = fig.add_subplot(133, projection='3d')
# ax_lb.title.set_text('Labels')
#
# for idx in range(len(pred)):
# x_coor, y_coor = inp[idx]
# x_coord.append(x_coor)
# y_coord.append(y_coor)
#
# x_coor_p, y_coor_p, z_coor_p = pred[idx]
# x_coord_p.append(x_coor_p)
# y_coord_p.append(y_coor_p)
# z_coord_p.append(z_coor_p)
#
# x_coor_l, y_coor_l, z_coor_l = lb[idx]
# x_coord_l.append(x_coor_l)
# y_coord_l.append(y_coor_l)
# z_coord_l.append(z_coor_l)
#
# for i in range(len(start_points)):
# x_coord_sub.append([x_coord[start_points[i]], x_coord[end_points[i]]])
# y_coord_sub.append([y_coord[start_points[i]], y_coord[end_points[i]]])
#
# x_coord_sub_p.append([x_coord_p[start_points[i]], x_coord_p[end_points[i]]])
# y_coord_sub_p.append([y_coord_p[start_points[i]], y_coord_p[end_points[i]]])
# z_coord_sub_p.append([z_coord_p[start_points[i]], z_coord_p[end_points[i]]])
#
# x_coord_sub_l.append([x_coord_l[start_points[i]], x_coord_l[end_points[i]]])
# y_coord_sub_l.append([y_coord_l[start_points[i]], y_coord_l[end_points[i]]])
# z_coord_sub_l.append([z_coord_l[start_points[i]], z_coord_l[end_points[i]]])
#
# for j in range(len(start_points)):
# ax_inp.plot(x_coord_sub[j], y_coord_sub[j])
# ax_pred.plot(x_coord_sub_p[j], y_coord_sub_p[j], z_coord_sub_p[j])
# ax_lb.plot(x_coord_sub_l[j], y_coord_sub_l[j], z_coord_sub_l[j])
|
{"hexsha": "022fa02e2cbb4106ed3651e5702b31c1cf874551", "size": 6177, "ext": "py", "lang": "Python", "max_stars_repo_path": "joint_visualization.py", "max_stars_repo_name": "damonchang23/3d_pose_baseline_pytorch", "max_stars_repo_head_hexsha": "5fedd6b2026be43155829a87d5c7ba6d5db64af6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "joint_visualization.py", "max_issues_repo_name": "damonchang23/3d_pose_baseline_pytorch", "max_issues_repo_head_hexsha": "5fedd6b2026be43155829a87d5c7ba6d5db64af6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-08-28T08:41:36.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-28T08:41:36.000Z", "max_forks_repo_path": "joint_visualization.py", "max_forks_repo_name": "damonchang23/3d_pose_baseline_pytorch", "max_forks_repo_head_hexsha": "5fedd6b2026be43155829a87d5c7ba6d5db64af6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7540983607, "max_line_length": 99, "alphanum_fraction": 0.654848632, "include": true, "reason": "import numpy", "num_tokens": 2125}
|
import argparse
import os
import torch
import json
import numpy as np
import src.utils.interface_train_tool as train_tool
import src.utils.interface_audio_io as audio_io
import matplotlib.pyplot as plt
import src.trainers.trainer as trainer
import src.trainers.tester as tester
import src.utils.interface_tensorboard as tensorboard
import src.data.dataset as dataset
import src.models.model as model
import src.optimizers.optimizer as optimizer
import src.optimizers.loss as loss
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
def main(sample_audio):
parser = argparse.ArgumentParser(description='waverdeep - WaveBYOL - Feature extractor')
parser.add_argument("--configuration", required=False,
default='./config/T10-urbansound-WaveBYOL-ResNet50-Adam-15200.json')
args = parser.parse_args()
now = train_tool.setup_timestamp()
with open(args.configuration, 'r') as configuration:
config = json.load(configuration)
print(">> load pretext model ...")
pretext_model = model.load_model(config=config, model_name=config["pretext_model_name"],
checkpoint_path=config['pretext_checkpoint'])
if config['use_cuda']:
pretext_model = pretext_model.cuda()
for audio in sample_audio:
waveform, sr = audio_io.audio_loader(audio)
waveform = waveform.unsqueeze(0)
if config['use_cuda']:
waveform = waveform.cuda()
with torch.no_grad():
out_representation = pretext_model.get_representation(waveform)
out_representation = out_representation.detach()
print(out_representation.size())
out_representation = out_representation.cpu().numpy()
temp = []
for j in range(1):
temp.append(out_representation[0][j])
temp = np.vstack(temp)
plt.matshow(temp)
plt.xlabel('time')
plt.colorbar()
plt.show()
for audio in sample_audio:
waveform, sr = audio_io.audio_loader(audio)
waveform = waveform.unsqueeze(0)
if config['use_cuda']:
waveform = waveform.cuda()
with torch.no_grad():
out_representation = pretext_model.get_early_representation(waveform)
out_representation = out_representation.detach()
print(out_representation.size())
out_representation = out_representation.cpu().numpy()
temp = []
temp.append(out_representation[0])
temp = np.vstack(temp)
plt.matshow(temp)
plt.xlabel('time')
plt.ylabel('channel')
plt.colorbar()
plt.show()
if __name__ == '__main__':
audio_set = [
'./dataset_test/41_0514_301_0_07111_00.wav',
'./dataset_test/41_0514_301_0_07111_01.wav',
'./dataset_test/41_0514_301_0_07111_02.wav',
]
main(audio_set)
|
{"hexsha": "d7b583004a825117f2f701077401c48cd90fff02", "size": 2853, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils/extract_latent_space.py", "max_stars_repo_name": "waverDeep/WaveBYOL", "max_stars_repo_head_hexsha": "ab062c26598e0fa6ab8426498f9920048988b5c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-15T00:00:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T00:00:57.000Z", "max_issues_repo_path": "src/utils/extract_latent_space.py", "max_issues_repo_name": "waverDeep/WaveBYOL", "max_issues_repo_head_hexsha": "ab062c26598e0fa6ab8426498f9920048988b5c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils/extract_latent_space.py", "max_forks_repo_name": "waverDeep/WaveBYOL", "max_forks_repo_head_hexsha": "ab062c26598e0fa6ab8426498f9920048988b5c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1744186047, "max_line_length": 92, "alphanum_fraction": 0.6691202243, "include": true, "reason": "import numpy", "num_tokens": 594}
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from src.embed import L2Embedding as Embedding
from src.module import Encoder, Decoder, Postnet, CBHG, Linear
#from src.util import get_audio_feat_mask
class Tacotron2(nn.Module):
"""Tacotron2 text-to-speech model (w/o stop prediction)
"""
def __init__(self, n_mels, linear_dim, in_embed_dim, spkr_embed_dim, paras):
super(Tacotron2, self).__init__()
self.n_mels = n_mels
self.linear_dim = linear_dim
if 'separate_postnet' in paras:
self.separate_postnet = paras['separate_postnet']
else:
self.separate_postnet = False
self.encoder = Encoder(in_embed_dim, **paras['encoder'])
self.decoder = Decoder(
n_mels, enc_embed_dim=self.encoder.enc_embed_dim, spkr_embed_dim=spkr_embed_dim, **paras['decoder'])
self.prenet_dim = self.decoder.prenet_dim
self.prenet_dropout = self.decoder.prenet_dropout
self.loc_aware = self.decoder.loc_aware
self.use_summed_weights = self.decoder.use_summed_weights
self.n_frames_per_step = self.decoder.n_frames_per_step
# Whether to use CBHG to convert mel to linear or not
self.postnet = None
if linear_dim is not None:
self.postnet = nn.Sequential(
CBHG(n_mels, K=8),
# CBHG output size is 2 * input size
nn.Linear(n_mels * 2, linear_dim))
def forward(self, txt_embed, txt_lengths, teacher, spkr_embed, tf_rate=0.0, unpair_max_frame=None):
"""
Arg:
txt_embed: the output of TextEmbedding of shape (B, L, enc_embed_dim)
txt_lengths: text lengths before padding (B)
teacher: max_dec_step for inference. (B, T, n_mels) for training
tf_rate: teacher forcing rate, `1.0` for pure teacher forcing.
"""
enc_output = self.encoder(txt_embed, txt_lengths)
mel_pred, alignment, stop = self.decoder(enc_output, txt_lengths, teacher, spkr_embed,
tf_rate=tf_rate, unpair_max_frame=unpair_max_frame)
if self.separate_postnet:
linear_pred = self.postnet(mel_pred.detach()) if self.postnet is not None else None # For demo
else:
linear_pred = self.postnet(mel_pred) if self.postnet is not None else None
return mel_pred, linear_pred, alignment, stop
def create_msg(self):
msg = []
msg.append('Model spec.| Model = `TACO-2`\t| Prenet dim = {}\t| Prenet dropout = {}\t'.format(
self.prenet_dim, self.prenet_dropout))
msg.append(' | Loc. aware = {}\t| frames/step = {}\t| mel2linear = {}\t| sep_post = {}\t'.format(
self.loc_aware. self.n_frames_per_step, self.postnet is not None, self.separate_postnet))
return msg
class Tacotron2withCodebook(nn.Module):
def __init__(self, n_mels, linear_dim, vocab_size, paras_tts, paras_codebook):
super(Tacotron2withCodebook, self).__init__()
# Remember to pop 'bone'
paras_codebook.pop('bone')
self.codebook = Embedding(vocab_size, False, **paras_codebook)
self.tts = Tacotron2(n_mels, linear_dim, self.codebook.out_dim, paras_tts)
self.n_frames_per_step = self.tts.decoder.n_frames_per_step
self.create_msg = self.tts.create_msg
def forward(self, txt, txt_lengths, teacher, tf_rate=0.0):
txt_embed = self.codebook.inference(txt)
mel_pred, alignment, stop = self.tts(txt_embed, txt_lengths, teacher, tf_rate)
return mel_pred, alignment, stop
|
{"hexsha": "f479e58ffc4c6a7806f98758c5b8cb79f85a048c", "size": 3683, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tts.py", "max_stars_repo_name": "ttaoREtw/semi-tts", "max_stars_repo_head_hexsha": "46750fc68d1547e82bda9341f5029595ded984c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2020-07-22T04:49:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-19T15:11:30.000Z", "max_issues_repo_path": "src/tts.py", "max_issues_repo_name": "ttaoREtw/semi-tts", "max_issues_repo_head_hexsha": "46750fc68d1547e82bda9341f5029595ded984c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-13T02:39:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-13T02:39:29.000Z", "max_forks_repo_path": "src/tts.py", "max_forks_repo_name": "ttaoREtw/semi-tts", "max_forks_repo_head_hexsha": "46750fc68d1547e82bda9341f5029595ded984c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-08-16T08:43:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-19T09:53:26.000Z", "avg_line_length": 48.4605263158, "max_line_length": 115, "alphanum_fraction": 0.6535433071, "include": true, "reason": "import numpy", "num_tokens": 916}
|
////////////////////////////////////////////////////////////////////////
//
// This file is part of gmic-8bf, a filter plug-in module that
// interfaces with G'MIC-Qt.
//
// Copyright (c) 2020, 2021 Nicholas Hayes
//
// This file is licensed under the MIT License.
// See LICENSE.txt for complete licensing and attribution information.
//
////////////////////////////////////////////////////////////////////////
#include "ClipboardUtilWin.h"
#include "FileUtil.h"
#include "ImageConversionWin.h"
#include <vector>
#include <boost/algorithm/string.hpp>
namespace
{
std::vector<UINT> GetAvailableClipboardFormats()
{
std::vector<UINT> formats;
UINT format = EnumClipboardFormats(0);
while (format != 0)
{
formats.push_back(format);
format = EnumClipboardFormats(format);
}
return formats;
}
OSErr GetFileDropPath(std::wstring& path)
{
OSErr err = noErr;
HANDLE hGlobal = GetClipboardData(CF_HDROP);
if (hGlobal != nullptr)
{
HDROP hDrop = static_cast<HDROP>(GlobalLock(hGlobal));
if (hDrop != nullptr)
{
try
{
UINT requiredLength = DragQueryFileW(hDrop, 0, nullptr, 0) + 1;
if (requiredLength > 1)
{
path = std::wstring(static_cast<size_t>(requiredLength), '\0');
if (DragQueryFileW(hDrop, 0, &path[0], requiredLength) > 0)
{
// Remove the NUL-terminator from the end of the string.
path.resize(static_cast<size_t>(requiredLength) - 1);
}
else
{
err = ioErr;
}
}
else
{
err = ioErr;
}
}
catch (const std::bad_alloc&)
{
err = memFullErr;
}
catch (...)
{
err = ioErr;
}
GlobalUnlock(hGlobal);
}
else
{
err = ioErr;
}
}
else
{
err = ioErr;
}
return err;
}
bool FileDropIsImage(const std::wstring& path)
{
static const std::vector<std::wstring> fileExtensions
{
L".bmp",
L".png",
L".jpg",
L".jpe",
L".jpeg",
L".jfif",
L".gif",
L".tif",
L".tiff"
};
for (const auto& ext : fileExtensions)
{
if (boost::algorithm::iends_with(path, ext))
{
return true;
}
}
return false;
}
OSErr ProcessFileDrop(
const FilterRecordPtr filterRecord,
const std::wstring& path,
const boost::filesystem::path& gmicInputPath)
{
return ConvertImageToGmicInputFormatNative(filterRecord, path, gmicInputPath);
}
OSErr ProcessDib(
const FilterRecordPtr filterRecord,
UINT format,
const boost::filesystem::path& gmicInputPath)
{
HANDLE hGlobal = GetClipboardData(format);
if (hGlobal == nullptr)
{
return ioErr;
}
const SIZE_T handleSize = GlobalSize(hGlobal);
if (handleSize < sizeof(BITMAPINFOHEADER))
{
return ioErr;
}
OSErr err = noErr;
PVOID data = GlobalLock(hGlobal);
if (data != nullptr)
{
try
{
PBITMAPINFOHEADER pbih = static_cast<PBITMAPINFOHEADER>(data);
uint64_t imageDataSize = static_cast<uint64_t>(pbih->biSizeImage);
if (imageDataSize == 0)
{
if (pbih->biCompression != BI_RGB)
{
err = ioErr;
}
else
{
uint64_t stride = ((static_cast<uint64_t>(pbih->biWidth) * pbih->biBitCount + 31) & ~31) / 8;
uint64_t height = pbih->biHeight < 0 ? -pbih->biHeight : pbih->biHeight;
imageDataSize = stride * height;
}
}
if (err == noErr)
{
const uint64_t dibSize = static_cast<uint64_t>(pbih->biSize) +
static_cast<uint64_t>(pbih->biClrUsed) * sizeof(RGBQUAD) +
imageDataSize;
if (dibSize > handleSize)
{
err = ioErr;
}
else
{
// Compute the size of the entire file.
const uint64_t fileSize = sizeof(BITMAPFILEHEADER) + dibSize;
if (fileSize > std::numeric_limits<DWORD>::max())
{
err = ioErr;
}
else
{
std::vector<BYTE> memoryBmp(static_cast<size_t>(fileSize));
BITMAPFILEHEADER* bfh = reinterpret_cast<BITMAPFILEHEADER*>(memoryBmp.data());
bfh->bfType = 0x4d42; // 0x42 = "B" 0x4d = "M"
bfh->bfSize = static_cast<DWORD>(fileSize);
bfh->bfReserved1 = 0;
bfh->bfReserved2 = 0;
bfh->bfOffBits = sizeof(BITMAPFILEHEADER) + pbih->biSize + pbih->biClrUsed * sizeof(RGBQUAD);
BYTE* dst = memoryBmp.data() + sizeof(BITMAPFILEHEADER);
std::memcpy(dst, data, static_cast<size_t>(dibSize));
err = ConvertImageToGmicInputFormatNative(
filterRecord,
memoryBmp.data(),
memoryBmp.size(),
gmicInputPath);
}
}
}
}
catch (const std::bad_alloc&)
{
err = memFullErr;
}
catch (...)
{
err = ioErr;
}
GlobalUnlock(hGlobal);
}
else
{
err = ioErr;
}
return err;
}
OSErr ProcessPng(
const FilterRecordPtr filterRecord,
UINT format,
const boost::filesystem::path& gmicInputPath)
{
HANDLE hGlobal = GetClipboardData(format);
if (hGlobal == nullptr)
{
return ioErr;
}
const SIZE_T handleSize = GlobalSize(hGlobal);
OSErr err = noErr;
PVOID data = GlobalLock(hGlobal);
if (data != nullptr)
{
try
{
err = ConvertImageToGmicInputFormatNative(
filterRecord,
data,
handleSize,
gmicInputPath);
}
catch (const std::bad_alloc&)
{
err = memFullErr;
}
catch (...)
{
err = ioErr;
}
GlobalUnlock(hGlobal);
}
else
{
err = ioErr;
}
return err;
}
OSErr TryProcessClipboardImage(
const FilterRecordPtr filterRecord,
const std::vector<UINT>& availableFormats,
const boost::filesystem::path& gmicInputPath)
{
static const UINT pngFormatId = RegisterClipboardFormatW(L"PNG");
static const UINT pngMimeFormatId = RegisterClipboardFormatW(L"image/png"); // Used by Qt-based applications
OSErr err = noErr;
for (const auto& format : availableFormats)
{
// Pick the first format in the list that we support.
if (format == CF_HDROP)
{
// Web Browsers often download the image and place a link on the clipboard.
std::wstring path;
if (GetFileDropPath(path) == noErr && FileDropIsImage(path))
{
err = ProcessFileDrop(filterRecord, path, gmicInputPath);
if (err == noErr)
{
break;
}
}
}
else if (format == CF_DIB || format == CF_DIBV5)
{
err = ProcessDib(filterRecord, format, gmicInputPath);
if (err == noErr)
{
break;
}
}
else if (format == pngFormatId || format == pngMimeFormatId)
{
err = ProcessPng(filterRecord, format, gmicInputPath);
if (err == noErr)
{
break;
}
}
}
return err;
}
#if DEBUG_BUILD
#include <map>
void DumpClipboardFormats(const std::vector<UINT>& availableFormats)
{
static std::map<UINT, std::string> predefinedFormats
{
{ CF_TEXT, "CF_TEXT" },
{ CF_BITMAP, "CF_BITMAP" },
{ CF_METAFILEPICT, "CF_METAFILEPICT" },
{ CF_SYLK, "CF_SYLK" },
{ CF_DIF, "CF_DIF" },
{ CF_TIFF, "CF_TIFF" },
{ CF_OEMTEXT, "CF_OEMTEXT" },
{ CF_DIB, "CF_DIB" },
{ CF_PALETTE, "CF_PALETTE" },
{ CF_PENDATA, "CF_PENDATA" },
{ CF_RIFF, "CF_RIFF" },
{ CF_WAVE, "CF_WAVE" },
{ CF_UNICODETEXT, "CF_UNICODETEXT" },
{ CF_ENHMETAFILE, "CF_ENHMETAFILE" },
{ CF_HDROP, "CF_HDROP" },
{ CF_LOCALE, "CF_LOCALE" },
{ CF_DIBV5, "CF_DIBV5" }
};
DebugOut("The clipboard contains %zd formats.", availableFormats.size());
constexpr int formatNameBufferLength = 256;
char formatNameBuffer[formatNameBufferLength]{};
for (auto& format : availableFormats)
{
const auto& predefinedItem = predefinedFormats.find(format);
if (predefinedItem != predefinedFormats.end())
{
DebugOut("Predefined format %u (%s)", format, predefinedItem->second.c_str());
}
else
{
if (GetClipboardFormatNameA(format, formatNameBuffer, formatNameBufferLength) > 0)
{
DebugOut("Registered format %u (%s)", format, formatNameBuffer);
}
else
{
if (format >= CF_PRIVATEFIRST && format <= CF_PRIVATELAST)
{
DebugOut("Private format %u", format);
}
else
{
DebugOut("Unknown format %u", format);
}
}
}
}
}
#endif // DEBUG_BUILD
}
OSErr ConvertClipboardImageToGmicInputNative(
const FilterRecordPtr filterRecord,
const boost::filesystem::path& gmicInputPath)
{
OSErr err = noErr;
// Failure to open the clipboard is not a fatal error, if it cannot be opened
// G'MIC will only get one input image.
// The same thing that would happen if the clipboard does not contain an image.
if (OpenClipboard(nullptr))
{
try
{
const std::vector<UINT>& availableFormats = GetAvailableClipboardFormats();
#if DEBUG_BUILD
DumpClipboardFormats(availableFormats);
#endif // DEBUG_BUILD
err = TryProcessClipboardImage(filterRecord, availableFormats, gmicInputPath);
}
catch (const std::bad_alloc&)
{
err = memFullErr;
}
catch (...)
{
err = ioErr;
}
CloseClipboard();
}
return err;
}
|
{"hexsha": "345cbe6d2abc8c53194f77660b3a16bf3c1c49d4", "size": 12331, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/win/ClipboardUtilWin.cpp", "max_stars_repo_name": "ganego/gmic-8bf", "max_stars_repo_head_hexsha": "ee49ae507da60d648df582772163e059faa9f4f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/win/ClipboardUtilWin.cpp", "max_issues_repo_name": "ganego/gmic-8bf", "max_issues_repo_head_hexsha": "ee49ae507da60d648df582772163e059faa9f4f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/win/ClipboardUtilWin.cpp", "max_forks_repo_name": "ganego/gmic-8bf", "max_forks_repo_head_hexsha": "ee49ae507da60d648df582772163e059faa9f4f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4124423963, "max_line_length": 121, "alphanum_fraction": 0.4461925229, "num_tokens": 2558}
|
```python
from IPython.display import Image
Image('../../../python_for_probability_statistics_and_machine_learning.jpg')
```
# Support Vector Machines
Support Vector Machines (SVM) originated from the statistical learning theory
developed by Vapnik-Chervonenkis. As such, it represents a deep application of
statistical theory that incorporates the VC dimension concepts we
discussed in the first section. Let's start by looking at some pictures.
Consider the two-dimensional classification problem shown in
[Figure](#fig:svm_001). [Figure](#fig:svm_001) shows two classes (gray and
white
circles) that can be separated by any of the lines shown. Specifically, any
such separating line can be written as the locus of points ($\mathbf{x}$) in
the two-dimensional plane that satisfy the following,
<!-- dom:FIGURE: [fig-machine_learning/svm_001.png, width=500 frac=0.45] In the
two-dimensional plane, the two classes (gray and white circles) are easily
separated by any one of the lines shown. <div id="fig:svm_001"></div> -->
<!-- begin figure -->
<div id="fig:svm_001"></div>
<p>In the two-dimensional plane, the two classes (gray and white circles) are
easily separated by any one of the lines shown.</p>
<!-- end figure -->
$$
\beta_0 + \boldsymbol{\beta}^T \mathbf{x} = 0
$$
To classify an arbitrary $\mathbf{x}$ using this line, we just
compute the sign of $\beta_0+\boldsymbol{\beta}^T \mathbf{x}$ and assign one
class to the positive sign and the other class to the negative sign. To
uniquely specify such a separating line (or, hyperplane in a higher-dimensional
space) we need additional criteria.
[Figure](#fig:svm_002) shows the data with two bordering parallel lines that
form a margin around the central separating line. The *maximal margin
algorithm* finds the widest margin and the unique separating line. As a
consequence, the algorithm uncovers the elements in the data that touch the
margins. These are the *support* elements. The other elements
away from the border are not relevent to the solution. This reduces
model variance because the solution is insensitive to the removal of
elements other than these supporting elements (usually a small minority).
<!-- dom:FIGURE: [fig-machine_learning/svm_002.png, width=500 frac=0.55] The
maximal margin algorithm finds the separating line that maximizes the margin
shown. The elements that touch the margins are the support elements. The dotted
elements are not relevent to the solution. <div id="fig:svm_002"></div> -->
<!-- begin figure -->
<div id="fig:svm_002"></div>
<p>The maximal margin algorithm finds the separating line that maximizes the
margin shown. The elements that touch the margins are the support elements. The
dotted elements are not relevent to the solution.</p>
<!-- end figure -->
To see how this works for linearly separable classes, consider a
training set consisting of $\lbrace (\mathbf{x},y) \rbrace$ where
$y\in \lbrace -1,1 \rbrace$. For any point $\mathbf{x}_i$, we
compute the functional margin as $\hat{ \gamma_i }=y_i (\beta_0 +
\boldsymbol{\beta}^T \mathbf{x}_i)$. Thus, $\hat{\gamma}_i >0$ when
$\mathbf{x}_i$ is correctly classified. The geometrical margin is
$\gamma = \hat{\gamma}/\lVert\boldsymbol{\beta}\rVert$. When
$\mathbf{x}_i$ is correctly classified, the geometrical margin is
equal to the perpendicular distance from $\mathbf{x}_i$ to the line.
Let's look see how the maximal margin algorithm works.
Let $M$ be the width of the margin. The maximal margin algorithm is can be
formulated as a quadratic programming problem. We want to simultaneously
maximize the margin $M$ while ensuring that all of the data points are
correctly classified.
$$
\begin{aligned}
& \underset{\beta_0,\boldsymbol{\beta},\lVert\boldsymbol{\beta}\rVert=1}{\text{m
aximize}}
& & M \\\
& \text{subject to:}
& & y_i(\beta_0+\boldsymbol{\beta}^T \mathbf{x}_i) \geq M, \; i = 1, \ldots, N.
\end{aligned}
$$
The first line says we want to generate a maximum value for $M$ by
adjusting $\beta_0$ and $\boldsymbol{\beta}$ while keeping
$\lVert\boldsymbol{\beta}\rVert=1$. The functional margins for each $i^{th}$
data element are the constraints to the problem and must be satisfied for every
proposed solution. In words, the constraints enforce that the elements have to
be correctly classified and outside of the margin around the separating line.
With some reformulation, it turns out that
$M=1/\lVert\boldsymbol{\beta}\rVert$ and this can be put into the following
standard format,
$$
\begin{aligned}
& \underset{\beta_0,\boldsymbol{\beta}}{\text{minimize}}
& & \lVert\boldsymbol{\beta}\rVert \\\
& \text{subject to:}
& & y_i(\beta_0+\boldsymbol{\beta}^T \mathbf{x}_i) \geq 1, \; i = 1, \ldots, N.
\end{aligned}
$$
This is a convex optimization problem and can be solved using
powerful
methods in that area.
The situation becomes more complex when the two classes are not separable and
we have to allow some unavoidable mixing between the two classes in the
solution. This means that the contraints have to modified as in the following,
$$
y_i(\beta_0+\boldsymbol{\beta}^T \mathbf{x}_i) \geq M(1-\xi_i)
$$
where the $\xi_i$ are the slack variables and represent the
proportional amount tha the prediction is on the wrong side of the margin. Thus,
elements are misclassified when $\xi_i>1$. With these additional variables,
we have a more general formulation of the convex optimization problem,
$$
\begin{aligned}
& \underset{\beta_0,\boldsymbol{\beta}}{\text{minimize}}
& & \lVert\boldsymbol{\beta}\rVert \\\
& \text{subject to:}
& & y_i(\beta_0+\boldsymbol{\beta}^T \mathbf{x}_i) \geq 1-\xi_i, \\\
& & & \xi_i \geq 0, \sum \xi_i \leq \texttt{constant}, \; i = 1, \ldots, N.
\end{aligned}
$$
which can be rewritten in the following equivalent form,
<!-- Equation labels as ordinary links -->
<div id="eq:svm"></div>
$$
\begin{equation}
\begin{aligned}
& \underset{\beta_0,\boldsymbol{\beta}}{\text{minimize}}
& & \frac{1}{2}\lVert\boldsymbol{\beta}\rVert + C \sum \xi_i \\\
& \text{subject to:}
& & y_i(\beta_0+\boldsymbol{\beta}^T \mathbf{x}_i) \geq 1-\xi_i, \xi_i \geq 0 \;
i = 1, \ldots, N.
\end{aligned}
\end{equation}
\label{eq:svm} \tag{1}
$$
Because the $\xi_i$ terms are all positive, the objective
is to maximize the margin (i.e., minimize $\lVert\boldsymbol{\beta}\rVert$)
while minimizing the proportional drift of the predictions to the wrong side
of the margin (i.e., $C \sum \xi_i$). Thus, large values of $C$ shunt
algorithmic focus towards the correctly classified points near the
decision boundary and small values focus on further data. The value $C$ is
a hyperparameter for the SVM.
The good news is that all of these complicated pieces are handled neatly inside
of Scikit-learn. The following sets up the linear *kernel* for the SVM (more on
kernels soon),
```python
from sklearn.datasets import make_blobs
from sklearn.svm import SVC
sv = SVC(kernel='linear')
```
We can create some synthetic data using `make_blobs` and then
fit it to the SVM,
```python
X,y=make_blobs(n_samples=200, centers=2, n_features=2,
random_state=0,cluster_std=.5)
sv.fit(X,y)
```
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='linear',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
After fitting, the SVM now has the estimated support vectors and the
coefficients of the $\boldsymbol{\beta}$ in the `sv.support_vectors_` and
`sv.coef_` attributes, respectively. [Figure](#fig:svm_003) shows the two
sample classes (white and gray circles) and the line separating them that was
found by the maximal margin algorithm. The two parallel dotted lines show the
margin. The large circles enclose the support vectors, which are the data
elements that are relevent to the solution. Notice that only these elements
can touch the edges of the margins.
```python
%matplotlib inline
from matplotlib.pylab import subplots
import numpy as np
xi = np.linspace(X[:,0].min(),X[:,0].max(),100)
fig,ax=subplots()
_=ax.scatter(X[:,0],X[:,1],c=y,s=50,cmap='gray',marker='o',alpha=.3)
_=ax.plot(sv.support_vectors_[:,0],sv.support_vectors_[:,1],'ko',markersize=20,alpha=.2)
_=ax.plot(xi,-sv.coef_[0,0]/sv.coef_[0,1]*xi- sv.intercept_/sv.coef_[0,1],'k',lw=3.)
margin = np.linalg.norm(sv.coef_)
_=ax.plot(xi,-sv.coef_[0,0]/sv.coef_[0,1]*xi-(sv.intercept_+margin/2.)/sv.coef_[0,1],'--k',lw=3.)
_=ax.plot(xi,-sv.coef_[0,0]/sv.coef_[0,1]*xi-(sv.intercept_-margin/2.)/sv.coef_[0,1],'--k',lw=3.)
```
<!-- dom:FIGURE: [fig-machine_learning/svm_003.png, width=500 frac=0.75] The
two class shown (white and gray circles) are linearly separable. The maximal
margin solution is shown by the dark black line in the middle. The dotted lines
show the extent of the margin. The large circles indicate the support vectors
for the maximal margin solution. <div id="fig:svm_003"></div> -->
<!-- begin figure -->
<div id="fig:svm_003"></div>
<p>The two class shown (white and gray circles) are linearly separable. The
maximal margin solution is shown by the dark black line in the middle. The
dotted lines show the extent of the margin. The large circles indicate the
support vectors for the maximal margin solution.</p>
<!-- end figure -->
```python
def draw_margins(sv,X,y,ax=None):
sv.fit(X,y)
xi = np.linspace(X[:,0].min(),X[:,0].max(),100)
if ax is None: fig,ax=subplots()
_=ax.scatter(X[:,0],X[:,1],c=y,s=50,cmap='gray',marker='o',alpha=.3)
_=ax.plot(sv.support_vectors_[:,0],sv.support_vectors_[:,1],'ko',markersize=20,alpha=.2)
_=ax.plot(xi,-sv.coef_[0,0]/sv.coef_[0,1]*xi- sv.intercept_/sv.coef_[0,1],'k',lw=3.)
margin = np.linalg.norm(sv.coef_)
_=ax.plot(xi,-sv.coef_[0,0]/sv.coef_[0,1]*xi- (sv.intercept_+margin/2.)/sv.coef_[0,1],'--k',lw=3.)
_=ax.plot(xi,-sv.coef_[0,0]/sv.coef_[0,1]*xi- (sv.intercept_-margin/2.)/sv.coef_[0,1],'--k',lw=3.)
```
```python
X, y = make_blobs(n_samples=50, centers=2, n_features=2,
cluster_std=1,random_state=0)
fig,axs = subplots(2,2,sharex=True,sharey=True)
#fig.set_size_inches((12,6))
sv = SVC(kernel='linear',C=.0100)
draw_margins(sv,X,y,ax=axs[0,0])
_=axs[0,0].set_title('C=0.01')
sv = SVC(kernel='linear',C=1)
draw_margins(sv,X,y,ax=axs[0,1])
_=axs[0,1].set_title('C=1')
sv = SVC(kernel='linear',C=100)
draw_margins(sv,X,y,ax=axs[1,0])
_=axs[1,0].set_title('C=100')
sv = SVC(kernel='linear',C=10000)
draw_margins(sv,X,y,ax=axs[1,1])
_=axs[1,1].set_title('C=10000')
```
[Figure](#fig:svm_004) shows what happens when the value of $C$ changes.
Increasing this value emphasizes the $\xi$ part of the objective function in
Equation [eq:svm](#eq:svm). As shown in the top left panel, a small value for
$C$ means that
the algorithm is willing to accept many support vectors at the expense of
maximizing the margin. That is, the proportional amount that predictions are on
the wrong side of the margin is more acceptable with smaller $C$. As the value
of $C$ increases, there are fewer support vectors because the optimization
process prefers to eliminate support vectors that are far away from the margins
and accept fewer of these that encroach into the margin. Note that as the value
of $C$ progresses through this figure, the separating line tilts slightly.
<!-- dom:FIGURE: [fig-machine_learning/svm_004.png, width=500 frac=0.95] The
maximal margin algorithm finds the separating line that maximizes the margin
shown. The elements that touch the margins are the support elements. The dotted
elements are not relevent to the solution. <div id="fig:svm_004"></div> -->
<!-- begin figure -->
<div id="fig:svm_004"></div>
<p>The maximal margin algorithm finds the separating line that maximizes the
margin shown. The elements that touch the margins are the support elements. The
dotted elements are not relevent to the solution.</p>
<!-- end figure -->
## Kernel Tricks
Support Vector Machines provide a powerful method to deal with linear
separations, but they can also apply to non-linear boundaries by
exploiting the so-called *kernel trick*. The convex optimization
formulation of the SVM includes a *dual* formulation that leads to a
solution that requires only the inner-products of the features. The
kernel trick is to substitute inner-products by nonlinear kernel
functions. This can be thought of as mapping the original features
onto a possibly infinite dimensional space of new features. That is,
if the data are not linearly separable in two-dimensional space (for
example) maybe they are separable in three-dimensional space (or
higher)?
To make this concrete, suppose the original input space is
$\mathbb{R}^n$ and we want to use a non-linear mapping
$\psi:\mathbf{x} \mapsto \mathcal{F}$ where $\mathcal{F}$ is an
inner-product space of higher dimension. The kernel trick is to
calculate the inner-product in $\mathcal{F}$ using a kernel
function, $K(\mathbf{x}_i,\mathbf{x}_j) = \langle
\psi(\mathbf{x}_i),\psi(\mathbf{x}_j)\rangle$. The long way to
compute this is to first compute $\psi(\mathbf{x})$ and then do the
inner-product. The kernel-trick way to do it is to use the kernel
function and avoid computing $\psi$. In other words, the kernel
function returns what the inner-product in $\mathcal{F}$ would have
returned if $\psi$ had been applied. For example, to achieve an
$n^{th}$ polynomial mapping of the input space, we can use
$\kappa(\mathbf{x}_i,\mathbf{x}_j)=(\mathbf{x}_i^T\mathbf{x}_j+\theta)^n$.
For example, suppose the input space is $\mathbb{R}^2$ and
$\mathcal{F}=\mathbb{R}^4$ and we have the following mapping,
$$
\psi(\mathbf{x}) : (x_0,x_1) \mapsto (x_0^2,x_1^2,x_0 x_1, x_1 x_0)
$$
The inner product in $\mathcal{F}$ is then,
$$
\langle \psi(\mathbf{x}),\psi(\mathbf{y}) \rangle = \langle
\mathbf{x},\mathbf{y} \rangle^2
$$
In other words, the kernel is the square of the inner
product in input space. The advantage of using the kernel instead of
simply enlarging the feature space is computational because you only
need to compute the kernel on all distinct pairs of the input space.
The following example should help make this concrete. First we create
some Sympy variables,
```python
import sympy as S
x0,x1=S.symbols('x:2',real=True)
y0,y1=S.symbols('y:2',real=True)
```
Next, we create the $\psi$ function that maps into $\mathbb{R}^4$
and the corresponding kernel function,
```python
psi = lambda x,y: (x**2,y**2,x*y,x*y)
kern = lambda x,y: S.Matrix(x).dot(y)**2
```
Notice that the inner product in $\mathbb{R}^4$ is
equal to the kernel function, which only uses wthe $\mathbb{R}^2$
variables.
```python
print(S.Matrix(psi(x0,x1)).dot(psi(y0,y1)))
print(S.expand(kern((x0,x1),(y0,y1)))) # same as above
```
x0**2*y0**2 + 2*x0*x1*y0*y1 + x1**2*y1**2
x0**2*y0**2 + 2*x0*x1*y0*y1 + x1**2*y1**2
**Polynomial Regression Using Kernels.** Recall our favorite
linear regression problem from the regularization chapter,
$$
\min_{\boldsymbol{\beta}} \Vert y - \mathbf{X}\boldsymbol{\beta}\Vert^2
$$
where $\mathbf{X}$ is a $n\times m$ matrix with $m>n$. As
we discussed, there are multiple solutions to this problem. The
least-squares solution is the following:
$$
\boldsymbol{\beta}_{LS}=\mathbf{X}^T(\mathbf{X}\mathbf{X}^T)^{\text{-1}}\mathbf{
y}
$$
Given a new feature vector $\mathbf{x}$, the corresponding estimator
for $\mathbf{y}$ is the following,
$$
\hat{\mathbf{y}} = \mathbf{x}^T\boldsymbol{\beta}_{LS}=\mathbf{x}^T\mathbf{X}^T(
\mathbf{X}\mathbf{X}^T)^{\text{-1}}\mathbf{y}
$$
Using the kernel trick, the solution can be written more generally as
the following,
$$
\hat{\mathbf{y}}=\mathbf{k}(\mathbf{x})^T\mathbf{K}^{\text{-1}}\mathbf{y}
$$
where the $n\times n$ kernel matrix $\mathbf{K}$ replaces
$\mathbf{X}\mathbf{X}^T$ and where $\mathbf{k}(\mathbf{x})$ is a $n$-vector of
components $\mathbf{k}(\mathbf{x})=[\kappa(\mathbf{x}_i,\mathbf{x})]$ and where
$\mathbf{K}_{i,j}=\kappa(\mathbf{x}_i,\mathbf{x}_j)$ for the kernel function
$\kappa$. With this more general setup, we can substitute
$\kappa(\mathbf{x}_i,\mathbf{x}_j)=(\mathbf{x}_i^T\mathbf{x}_j+\theta)^n$ for
$n^{th}$-order polynomial regression [[bauckhagenumpy]](#bauckhagenumpy). Note
that ridge
regression can also be incorporated by inverting $(\mathbf{K}+\alpha
\mathbf{I})$, which can help stabilize poorly conditioned $\mathbf{K}$ matrices
with a tunable $\alpha$ hyper-parameter [[bauckhagenumpy]](#bauckhagenumpy).
For some kernels, the enlarged $\mathcal{F}$ space is infinite-dimensional.
Mercer's conditions provide technical restrictions on the kernel functions.
Powerful, well-studied kernels have been implemented in Scikit-learn. The
advantage of kernel functions may evaporate for when $n\rightarrow m$ in which
case using the $\psi$ functions instead can be more practicable.
<!-- !bt -->
<!-- \begin{pyconsole} -->
<!-- sv = SVC(kernel='rbf',C=1000) -->
<!-- sv.fit(X,y) -->
<!-- \end{pyconsole} -->
<!-- !et -->
<!-- FIGURE: [fig-machine_learning/svm_005.png, width=500 frac=0.85] Using a
radial basis function kernel, the SVM can generate a curved separating surface
that can classify the two classes shown. <div id="fig:svm_005"></div> -->
<!-- As shown in [Figure](#fig:svm_002), the maximal margin algorithm finds the
-->
<!-- separating line that maximizes the margin shown. As a result, the data
shown by -->
<!-- the dotted circles are no longer relevant to the *support* of the line.
That -->
<!-- is, the dotted circles could be removed with changing the final result. -->
<!-- Kernel trick -->
<!-- objective function includes VC dimension -->
<!-- *Modern Multivariate Statistical Techniques Izenman, p. 371* -->
<!-- *Learning and Soft computing by Kecman, p.154, 171, 186* -->
<!-- *Mastering machine learning with Scikit-learn, p.174* -->
<!-- *Gaussian Processes for Machine Learning, p. 163* -->
<!-- *Elements of statistical learning p.418* -->
<!-- *Kernel methods pattern Taylor p.43* -->
<!-- *Learning with Kernels, p.43* -->
<!-- *An Intro to Machine Learning by james, p.362* -->
```python
from matplotlib.pylab import cm
xi = np.linspace(X[:,0].min(),X[:,0].max(),100)
yi = np.linspace(X[:,1].min(),X[:,1].max(),100)
fig,ax=subplots()
_=ax.scatter(X[:,0],X[:,1],c=y,s=50,cmap='gray',marker='o',alpha=.3)
Xi,Yi = np.meshgrid(xi,yi)
Zi=sv.predict(np.c_[Xi.ravel(),Yi.ravel()]).reshape(Xi.shape)
_=ax.contourf(Xi,Yi,Zi,cmap=cm.Paired,alpha=0.2);
```
|
{"hexsha": "8bf326becb141dc929f1ec4ead3eac5c14561ade", "size": 236626, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "chapters/machine_learning/notebooks/svm.ipynb", "max_stars_repo_name": "nsydn/Python-for-Probability-Statistics-and-Machine-Learning", "max_stars_repo_head_hexsha": "d3e0f8ea475525a694a975dbfd2bf80bc2967cc6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 570, "max_stars_repo_stars_event_min_datetime": "2016-05-05T19:08:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T05:09:19.000Z", "max_issues_repo_path": "chapters/machine_learning/notebooks/svm.ipynb", "max_issues_repo_name": "crlsmcl/https-github.com-unpingco-Python-for-Probability-Statistics-and-Machine-Learning", "max_issues_repo_head_hexsha": "6fd69459a28c0b76b37fad79b7e8e430d09a86a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-05-12T22:18:58.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-06T14:37:06.000Z", "max_forks_repo_path": "chapters/machine_learning/notebooks/svm.ipynb", "max_forks_repo_name": "crlsmcl/https-github.com-unpingco-Python-for-Probability-Statistics-and-Machine-Learning", "max_forks_repo_head_hexsha": "6fd69459a28c0b76b37fad79b7e8e430d09a86a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 276, "max_forks_repo_forks_event_min_datetime": "2016-05-27T01:42:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T11:20:27.000Z", "avg_line_length": 341.9450867052, "max_line_length": 114721, "alphanum_fraction": 0.9165856668, "converted": true, "num_tokens": 5402}
|
{-
True
0
False
False
4
42
42
True
False
[33, 42, 42, 42, 42]
[42, 42, 33, 42, 42]
[42, 42, 42, 42, 33]
[33, 42, 42, 42]
[42, 42, 33, 42]
[42, 42, 42, 33]
False
True
-}
import Data.Vector
main : IO ()
main = do
let e = the (Vector Int) empty
printLn (null e)
printLn (length e)
printLn (elem 42 e)
let a = replicate 4 (the Int 42)
printLn (null a)
printLn (length a)
printLn (a !! 0)
printLn (a !! 3)
printLn (elem 42 a)
printLn (elem 33 a)
printLn (unsafeInsertAt 0 33 a)
printLn (unsafeInsertAt 2 33 a)
printLn (unsafeInsertAt (length a) 33 a)
printLn (unsafeReplaceAt 0 33 a)
printLn (unsafeReplaceAt 2 33 a)
printLn (maybe a (\index => unsafeReplaceAt index 33 a) (lastIndex a))
printLn (elem 33 a)
printLn (elem 33 (singleton 33))
-- Local Variables:
-- idris-load-packages: ("cil")
-- End:
|
{"hexsha": "2a9896936aea44753a6b0e1cd06d05375a37a551", "size": 837, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "Tests/Vector.idr", "max_stars_repo_name": "timjs/iris-clean", "max_stars_repo_head_hexsha": "b2ed1f982beec936cb6fe32e8fa6b97a1da4a4f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 146, "max_stars_repo_stars_event_min_datetime": "2015-07-27T12:33:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T22:26:35.000Z", "max_issues_repo_path": "Tests/Vector.idr", "max_issues_repo_name": "timjs/iris-clean", "max_issues_repo_head_hexsha": "b2ed1f982beec936cb6fe32e8fa6b97a1da4a4f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2015-09-21T15:08:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-20T13:55:12.000Z", "max_forks_repo_path": "Tests/Vector.idr", "max_forks_repo_name": "timjs/iris-clean", "max_forks_repo_head_hexsha": "b2ed1f982beec936cb6fe32e8fa6b97a1da4a4f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2015-09-21T12:24:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-16T21:10:06.000Z", "avg_line_length": 17.4375, "max_line_length": 72, "alphanum_fraction": 0.6379928315, "num_tokens": 342}
|
C Copyright(C) 1999-2020 National Technology & Engineering Solutions
C of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
C NTESS, the U.S. Government retains certain rights in this software.
C
C See packages/seacas/LICENSE for details
SUBROUTINE LINE3 (COORD, NUMNP, DIST, T, NDIM, P1, P2, TOLER,
* NODEL, BOUND, SORTYP, MAP, SORUP, INUM, OPT, SELECT)
DIMENSION COORD (NUMNP,*), DIST(*), T(*), P1(*), P2(*),
* TOLER(2), MAP(*)
CHARACTER*(*) NODEL, BOUND, SORTYP, OPT
LOGICAL SORUP, SELECT(*), ISABRT
include 'nu_io.blk'
CALL LOCOUT ('LINE', NDIM, NODEL, TOLER, SORTYP, P1, P2, BOUND)
IF (BOUND(:3) .EQ. 'BOU') THEN
BMULT = 1.0
ELSE
BMULT = 0.0
END IF
TEMP = TOLER(1)
TOLER(1) = MAX(0.0, TEMP - TOLER(2))
TOLER(2) = MAX(0.0, TEMP + TOLER(2))
A = P2(1) - P1(1)
B = P2(2) - P1(2)
C = P2(3) - P1(3)
X1 = P1(1)
Y1 = P1(2)
Z1 = P1(3)
DLINE = A**2 + B**2 + C**2
IF (DLINE .EQ. 0.0) THEN
CALL PRTERR ('CMDERR', 'Zero length line input')
RETURN
END IF
DO 10 I=1, NUMNP
IF (SELECT(I)) THEN
X0 = COORD(I,1)
Y0 = COORD(I,2)
Z0 = COORD(I,3)
T(I) = -1. * (A * (X1 - X0) + B * (Y1 - Y0) + C * (Z1 - Z0))
* / (A**2 + B**2 + C**2)
X = X1 + A * T(I)
Y = Y1 + B * T(I)
Z = Z1 + C * T(I)
DIST(I) = (X - X0)**2 + (Y - Y0)**2 + (Z - Z0)**2
END IF
10 CONTINUE
INUM = 0
DISMIN = 1.0E38
DO 20 I=1, NUMNP
IF (SELECT(I)) THEN
DISMIN = MIN(DIST(I), ABS(DISMIN-TEMP))
IF (DIST(I) .GE. TOLER(1)**2 .AND. DIST(I) .LE. TOLER(2)**2
* .AND. BMULT * T(I) .GE. 0.0 .AND. BMULT * T(I) .LE. 1.0)
* THEN
INUM = INUM + 1
MAP(INUM) = I
END IF
END IF
20 CONTINUE
IF (SORTYP .EQ. 'X') THEN
CALL INDEXX (COORD(1,1), MAP, INUM, .FALSE.)
ELSE IF (SORTYP .EQ. 'Y') THEN
CALL INDEXX (COORD(1,2), MAP, INUM, .FALSE.)
ELSE IF (SORTYP .EQ. 'Z') THEN
CALL INDEXX (COORD(1,3), MAP, INUM, .FALSE.)
ELSE IF (SORTYP .EQ. 'T' .OR. SORTYP .EQ. 'PARAMETR') THEN
CALL INDEXX (T, MAP, INUM, .FALSE.)
ELSE IF (SORTYP .EQ. 'DISTANCE') THEN
CALL INDEXX (DIST, MAP, INUM, .FALSE.)
END IF
IF (SORUP) THEN
IBEG = 1
IEND = INUM
IINC = 1
ELSE
IBEG = INUM
IEND = 1
IINC = -1
END IF
IF (OPT .EQ. '*' .OR. INDEX(OPT, 'P') .GT. 0) THEN
DO 30 IO=IOMIN, IOMAX
WRITE (IO, 40) NODEL
30 CONTINUE
40 FORMAT (/,T50,'DISTANCE',/2X,A8,T16,'X',T26,'Y',T36,'Z',
* T45,'NORMAL',T55,'PARAMETRIC',/)
DO 60 IN = IBEG, IEND, IINC
IF (ISABRT()) RETURN
I = MAP(IN)
DO 50 IO=IOMIN, IOMAX
WRITE (IO, 90) I, (COORD(I,J),J=1,3), SQRT(DIST(I)),
* T(I)
50 CONTINUE
60 CONTINUE
IF (INUM .EQ. 0) THEN
DO 70 IO=IOMIN, IOMAX
WRITE (IO, 80) SQRT(DISMIN)
70 CONTINUE
END IF
END IF
80 FORMAT (/' None found within range, minimum distance = ',
* 1PE12.3,/)
90 FORMAT (I10, 3(F10.4), 2(1PE12.3))
RETURN
END
|
{"hexsha": "0b57d85ddca2d18e8aa274d5420295cabb30d141", "size": 3495, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/seacas/applications/numbers/nu_line3.f", "max_stars_repo_name": "jschueller/seacas", "max_stars_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_stars_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_stars_count": 82, "max_stars_repo_stars_event_min_datetime": "2016-02-04T18:38:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T03:01:49.000Z", "max_issues_repo_path": "packages/seacas/applications/numbers/nu_line3.f", "max_issues_repo_name": "jschueller/seacas", "max_issues_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_issues_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_issues_count": 206, "max_issues_repo_issues_event_min_datetime": "2015-11-20T01:57:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:12:04.000Z", "max_forks_repo_path": "packages/seacas/applications/numbers/nu_line3.f", "max_forks_repo_name": "jschueller/seacas", "max_forks_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_forks_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_forks_count": 68, "max_forks_repo_forks_event_min_datetime": "2016-01-13T22:46:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:25:05.000Z", "avg_line_length": 30.1293103448, "max_line_length": 75, "alphanum_fraction": 0.4615164521, "num_tokens": 1321}
|
/**
* Copyright (C) 2012 ciere consulting, ciere.com
* Copyright (C) 2011, 2012 Object Modeling Designs
*
* Distributed under the Boost Software License, Version 1.0. (See accompanying
* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
*
*
*/
#ifndef CIERE_JSON_IO_IMPL_HPP
#define CIERE_JSON_IO_IMPL_HPP
#include <string>
#include <fstream>
#include <istream>
#include <ios>
#include <boost/foreach.hpp>
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/include/support_istream_iterator.hpp>
#include "../io.hpp"
#include "../parser/grammar.hpp"
namespace ciere { namespace json
{
namespace spirit = boost::spirit;
namespace detail
{
struct printer : public boost::static_visitor<>
{
printer(std::ostream& s) : stream(s) {}
void operator()(string_t const & utf) const
{
stream << '"';
typedef ::boost::uint32_t ucs4_char;
typedef boost::u8_to_u32_iterator<std::string::const_iterator> iter_t;
iter_t f = utf.begin();
iter_t l = utf.end();
for (iter_t i = f; i != l; ++i)
{
ucs4_char c = *i;
switch (c)
{
case 0: stream << "\\0"; break;
case 0x7: stream << "\\a"; break;
case 0x8: stream << "\\b"; break;
case 0x9: stream << "\\t"; break;
case 0xA: stream << "\\n"; break;
case 0xB: stream << "\\v"; break;
case 0xC: stream << "\\f"; break;
case 0xD: stream << "\\r"; break;
case 0x1B: stream << "\\e"; break;
case '"': stream << "\\\""; break;
case '\\': stream << "\\\\"; break;
case 0xA0: stream << "\\_"; break;
case 0x85: stream << "\\N"; break;
case 0x2028: stream << "\\L"; break;
case 0x2029: stream << "\\P"; break;
default: stream << boost::spirit::to_utf8(c);
}
}
stream << '"';
}
template< typename T >
void operator()(T const & value) const
{
stream << value;
}
void operator()(double d) const
{
// javascript's handling of NaN and +/-Infinity
// isn't so great. JSON simply follows the javascript
// standard. We can output nan and infinity; however,
// we cannot actually parse it back in afaict because
// the javascript side is generating a null?
//
// TODO: clear this up with something definitive
if(boost::math::isnan(d))
{
stream << "NaN";
return;
}
if(boost::math::isinf(d))
{
if(d < 0.0) { stream << '-'; }
stream << "Infinity";
return;
}
stream << d;
}
void operator()(bool_t value) const
{
stream << (value?"true":"false");
}
void operator()(null_t value) const
{
stream << "null";
}
void operator()(object_t const & obj) const
{
stream << "{";
bool first = true;
BOOST_FOREACH( object_t::value_type const & v, obj )
{
if( first ) { first = false; }
else { stream << ", "; }
stream << '"' << v.first << "\":";
boost::apply_visitor( *this,v.second);
}
stream << "}";
}
void operator()(array_t const & arr) const
{
stream << "[";
bool first = true;
BOOST_FOREACH( value const & v, arr )
{
if( first ) { first = false; }
else { stream << ", "; }
boost::apply_visitor(*this,v);
}
stream << "]";
}
std::ostream& stream;
};
}
inline std::ostream& operator<<(std::ostream& stream, value const & v)
{
boost::apply_visitor(detail::printer(stream),v);
return stream;
}
inline std::istream& operator>>( std::istream& stream, value& object )
{
if( !json::read( stream, object ) )
{
stream.setstate( std::ios_base::failbit );
}
return stream;
}
inline bool read( std::istream& stream, value& object)
{
typedef parser::grammar< spirit::istream_iterator > grammar_t;
stream.unsetf( std::ios::skipws );
spirit::istream_iterator iter( stream );
spirit::istream_iterator end_iter;
grammar_t grammar;
return( spirit::qi::phrase_parse( iter, end_iter,
grammar,
spirit::ascii::space_type(),
object ) );
}
inline bool read( std::string const & filename, value& object)
{
std::ifstream stream( filename.c_str() );
if( !stream.is_open() )
{
return false;
}
return read( stream, object );
}
inline value construct( std::string const & input )
{
typedef std::string::const_iterator iter_t;
typedef parser::grammar<iter_t> grammar_t;
grammar_t grammar;
json::value value;
iter_t iter = input.begin();
iter_t end = input.end();
spirit::qi::phrase_parse( iter, end,
grammar,
spirit::ascii::space_type(),
value );
return value;
}
}}
#endif // CIERE_JSON_IO_IMPL_HPP
|
{"hexsha": "0b310382052277597cc06ec1ea1d648ef775a45d", "size": 6120, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "libs/spirit/example/qi/json/json/detail/io_impl.hpp", "max_stars_repo_name": "Abce/boost", "max_stars_repo_head_hexsha": "2d7491a27211aa5defab113f8e2d657c3d85ca93", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 85.0, "max_stars_repo_stars_event_min_datetime": "2015-02-08T20:36:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-14T20:38:31.000Z", "max_issues_repo_path": "libs/boost/libs/spirit/example/qi/json/json/detail/io_impl.hpp", "max_issues_repo_name": "flingone/frameworks_base_cmds_remoted", "max_issues_repo_head_hexsha": "4509d9f0468137ed7fd8d100179160d167e7d943", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9.0, "max_issues_repo_issues_event_min_datetime": "2015-01-28T16:33:19.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-12T23:03:28.000Z", "max_forks_repo_path": "libs/boost/libs/spirit/example/qi/json/json/detail/io_impl.hpp", "max_forks_repo_name": "flingone/frameworks_base_cmds_remoted", "max_forks_repo_head_hexsha": "4509d9f0468137ed7fd8d100179160d167e7d943", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 27.0, "max_forks_repo_forks_event_min_datetime": "2015-01-28T16:33:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T05:04:39.000Z", "avg_line_length": 29.2822966507, "max_line_length": 83, "alphanum_fraction": 0.4514705882, "num_tokens": 1328}
|
from keras.models import Sequential
from keras.layers import Dense, Activation
import numpy as np
# 定义模型
if False: # 一种定义模型的写法
model1 = Sequential([
Dense(32, units=784),
Activation('relu'),
Dense(10),
Activation('softmax'),
])
if False: # 一种定义模型的写法
model2 = Sequential()
model2.add(Dense(32, input_shape=(784, None)))
model2.add(Activation('relu'))
model2.add(Dense(10))
model2.add(Activation('softmax'))
if True: # 一种定义模型的写法,个人更倾向于这种写法,简单明了分层更清晰
model = Sequential()
model.add(Dense(32, activation='rule', input_shape=(784, None)))
model.add(Dense(10, activation='softmax'))
# 编译:指定损失函数loss 优化器optimizer 指标列表metrics
model.compile(
optimizer='rmsprop',
loss = 'categorical_crossentropy',
metrics = ['accuracy']
)
# 训练
data = np.random.random((1000, 100))
labels = np.random.randint(10, size=(1000, 1))
model.fit(data, labels, epochs=10, batch_size=32)
|
{"hexsha": "5ec818a97cacf311726f4c886fb22bc7452e2854", "size": 942, "ext": "py", "lang": "Python", "max_stars_repo_path": "kkeras/sequential_first_try.py", "max_stars_repo_name": "daigouwei/TensorFlow", "max_stars_repo_head_hexsha": "3716b1cdf79f9203adfc2bc77eb3a367a153cc22", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kkeras/sequential_first_try.py", "max_issues_repo_name": "daigouwei/TensorFlow", "max_issues_repo_head_hexsha": "3716b1cdf79f9203adfc2bc77eb3a367a153cc22", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kkeras/sequential_first_try.py", "max_forks_repo_name": "daigouwei/TensorFlow", "max_forks_repo_head_hexsha": "3716b1cdf79f9203adfc2bc77eb3a367a153cc22", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4594594595, "max_line_length": 68, "alphanum_fraction": 0.6719745223, "include": true, "reason": "import numpy", "num_tokens": 303}
|
# -*- coding: utf-8 -*-
"""
Copyright 2020 Andrea López Incera.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
Please acknowledge the authors when re-using this code and maintain this notice intact.
Code written by Andrea López Incera, used and analysed in,
'Honeybee communication during collective defence is shaped by predation.'
Andrea López-Incera, Morgane Nouvian, Katja Ried, Thomas Müller and Hans J. Briegel.
"""
import numpy as np
import beesting_predator_pheromone
import ps_agent_bee
#parameters we set for environment:
group_size=100
num_bins=10
min_units_perbin=3
full_resolution=False
range_predators=[16,40]
perc_false_alarms=0
rew_scaling='linear'
#predator's parameters:
kill_rate=1
time_attack=0
visual_time_delay=10
#parameters we set for the PS agent:
gamma_damping=0.003#float between 0 and 1. Forgetting.
eta_glow_damping=0#float between 0 and 1. Setting it to 1 effectively deactivates glow. 0 means that there is no damping (remembers everything with equal intensity).
policy_type='standard'#usual computation of prob distribution according to h matrix.
beta_softmax=1#irrelevant if policy_type is standard.
num_reflections=0 #effectively deactivates reflections.
init_mode='standard'#standard means that the initial probabilities are 0.5 for stinging and 0.5 for chilling.
init_psting=0.2
#simulation parameters:
num_trials=80000 #number of trials (defensive events).
num_pop=50 #number of populations (trained independently).
#initialization of the "environment".
env=beesting_predator_pheromone.Beesting_predator(group_size, num_bins, min_units_perbin, full_resolution,range_predators,perc_false_alarms,rew_scaling)
#record of performance for all the populations.
learning_curve_allpop=np.zeros([num_pop,num_trials])
prob_stinging_allpop=np.zeros([num_pop,env.num_percepts_list[0]])
number_stung_allpop=np.zeros([num_pop,num_trials])
which_sting=np.zeros([num_pop,group_size])
predator_sth_allpop=np.zeros([num_pop,num_trials])
predator_kills_allpop=np.zeros([num_pop,num_trials])
for pop in range(num_pop):
#initialize ensemble of PS agents
agent_list=[]
for i in range(group_size):
agent_list.append(ps_agent_bee.BasicPSAgent(env.num_actions,env.num_percepts_list,\
gamma_damping, eta_glow_damping, policy_type, beta_softmax, num_reflections,init_mode,init_psting))
#initialize a record of performance for this population.
learning_curve=np.zeros(num_trials)
number_stung=np.zeros(num_trials)
which_stingevol=np.zeros([1000,group_size])
sth=np.zeros(num_trials)
kills=np.zeros(num_trials)
ps_evolution=np.zeros([num_trials,env.num_percepts_list[0]])
#interaction of this population
for i_trial in range(num_trials):
for i in range(group_size):#reset g matrix to not mix the actions of current trial with past trials.
agent_list[i].g_matrix=np.zeros((agent_list[i].num_actions, agent_list[i].num_percepts), dtype=np.float64)
#define global g matrix, where the collective performance will be stored.
global_g_matrix=np.zeros((agent_list[i].num_actions, agent_list[i].num_percepts), dtype=np.float64)
#initialize defensive event
test_sting=np.zeros(group_size) #array with the boolean value of each agent's action (0 hasn't stung yet, 1 has stung).
test_killed=0 #no kills yet.
test_predator=1 #predator is there.
predator_leaving=0 #predator is not leaving.
counter_pher=0 #no pheromone in the air.
counter_rounds_nopredator=0
ps=np.zeros(env.num_percepts_list[0]) #initialize record of p_s for each percept at the current trial.
predator_resistance=env.rchoose() #random choice of predator (s_th)
#sequential decision process of the group.
for i in range(group_size):
action=agent_list[i].deliberate_and_learn(env.get_percept(counter_pher,predator_leaving),0)
counter_pher+=np.copy(action)
test_sting[i]=action
test_predator=env.scare_predator(np.sum(test_sting),predator_resistance) #check if predator is scared away.
if test_predator and i>=time_attack:#predator's attack, only if it is not already scared away.
test_killed+=kill_rate
if (test_predator+1)%2: #if predator is scared away...
counter_rounds_nopredator+=1
if counter_rounds_nopredator>=visual_time_delay:
predator_leaving=1 #...bee perceives the visual stimulus of "predator is leaving" after some time delay.
reward=env.get_reward(group_size-min(group_size,np.sum(test_sting)+np.sum(test_killed)))
#save performance for this trial
learning_curve[i_trial]=reward
number_stung[i_trial]=np.sum(test_sting)
which_stingevol[i_trial%1000]=test_sting
sth[i_trial]=predator_resistance
kills[i_trial]=np.sum(test_killed)
for i in range(env.num_percepts_list[0]): #save current probability of stinging for each percept.
ps[i]=agent_list[0].h_matrix[1,i]/np.sum(agent_list[0].h_matrix[:,i])
ps_evolution[i_trial]=ps
#update h matrix
for i in range(group_size):#sum up all g matrices into one.
global_g_matrix += agent_list[i].g_matrix
for i in range(group_size):#update the h matrix.
agent_list[i].h_matrix = agent_list[i].h_matrix - agent_list[i].gamma_damping * (agent_list[i].h_matrix - 1.) + global_g_matrix * reward
#save data for this population
learning_curve_allpop[pop]=learning_curve
for i in range(env.num_percepts_list[0]):
prob_stinging_allpop[pop,i]=agent_list[0].h_matrix[1,i]/np.sum(agent_list[0].h_matrix[:,i])#all agents have same h matrix.
number_stung_allpop[pop]=number_stung
which_sting[pop]=np.sum(which_stingevol,axis=0)/1000
predator_sth_allpop[pop]=sth
predator_kills_allpop[pop]=kills
|
{"hexsha": "2f08f9a7e5f29f666a7083e8369d18f210e5dca2", "size": 6353, "ext": "py", "lang": "Python", "max_stars_repo_path": "learning.py", "max_stars_repo_name": "qic-ibk/CollectiveStinging", "max_stars_repo_head_hexsha": "da59a19e9e4cab6dbfd91bfb63d982bfd4ee0f70", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "learning.py", "max_issues_repo_name": "qic-ibk/CollectiveStinging", "max_issues_repo_head_hexsha": "da59a19e9e4cab6dbfd91bfb63d982bfd4ee0f70", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "learning.py", "max_forks_repo_name": "qic-ibk/CollectiveStinging", "max_forks_repo_head_hexsha": "da59a19e9e4cab6dbfd91bfb63d982bfd4ee0f70", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.8137931034, "max_line_length": 166, "alphanum_fraction": 0.7023453487, "include": true, "reason": "import numpy", "num_tokens": 1532}
|
#include <stdlib.h>
#include <math.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_permutation.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_rng.h>
#include "design.h"
/*This function computes the p-distance between 2 points in D dimensions:
the exponent p is tunable*/
double distance(double *x,double *y,int D,double p){
double dist = 0.0;
int i;
for(i=0;i<D;i++){
dist += pow(fabs(x[i]-y[i]),p);
}
return pow(dist,1.0/p);
}
/*This is the cost function of the problem: it is a sum of all the reciprocal of
the pairs distances, with some tunable exponent lambda; note that the final
1/lambda exponentiation is not performed here!*/
double cost(gsl_matrix *data,int Npoints,int D,double p,double lambda){
double sum = 0.0;
int i,j;
for(i=0;i<Npoints;i++){
for(j=i+1;j<Npoints;j++){
//Add the contribution of pair (i,j) to the cost function
sum += pow(pow(D,1.0/p)/distance(gsl_matrix_ptr(data,i,0),gsl_matrix_ptr(data,j,0),D,p),lambda);
}
}
return (2.0/(Npoints*(Npoints-1)))*sum;
}
/*This computes the cost function in the particular case in which all the points are
equally spaced on the diagonal of the hypercube*/
double diagonalCost(int Npoints,double lambda){
double sum = 0.0;
int i,j;
for(i=0;i<Npoints;i++){
for(j=i+1;j<Npoints;j++){
//Add the contribution of pair (i,j) to the cost function
sum += pow((Npoints-1)*1.0/(j-i),lambda);
}
}
return (2.0/(Npoints*(Npoints-1)))*sum;
}
/*This function computes the variation of the cost when a pair of coordinates is exchanged;
this function also performs an in-place swap of the coordinates. More specifically:
this function swaps coordinate d of points i1 and i2 and returns the variation of the cost
function due to this swap*/
/**/
double swap(gsl_matrix *data,int Npoints,int D,double p,double lambda,int i1,int i2, int d){
double costBefore,costAfter,temp;
int i;
//initialize to 0
costBefore = costAfter = 0.0;
/*compute the contribution of points i1 and i2 to the cost function, before swapping;
sum over all the particles except i1 and i2*/
for(i=0;i<Npoints;i++){
if(i!=i1 && i!=i2){
costBefore += pow(pow(D,1.0/p)/distance(gsl_matrix_ptr(data,i,0),gsl_matrix_ptr(data,i1,0),D,p),lambda) + pow(pow(D,1.0/p)/distance(gsl_matrix_ptr(data,i,0),gsl_matrix_ptr(data,i2,0),D,p),lambda);
}
}
//perform the coordinate swap
temp = gsl_matrix_get(data,i1,d);
gsl_matrix_set(data,i1,d,gsl_matrix_get(data,i2,d));
gsl_matrix_set(data,i2,d,temp);
/*compute the contribution of points i1 and i2 to the cost function, after swapping;
sum over all the particles except i1 and i2*/
for(i=0;i<Npoints;i++){
if(i!=i1 && i!=i2){
costAfter += pow(pow(D,1.0/p)/distance(gsl_matrix_ptr(data,i,0),gsl_matrix_ptr(data,i1,0),D,p),lambda) + pow(pow(D,1.0/p)/distance(gsl_matrix_ptr(data,i,0),gsl_matrix_ptr(data,i2,0),D,p),lambda);
}
}
//return the cost difference
return (2.0/(Npoints*(Npoints-1)))*(costAfter - costBefore);
}
/*This function swaps the particle pair back: needed if the original swap didn't improve the cost function*/
void swapBack(gsl_matrix *data,int i1,int i2,int d){
double temp;
//perform the coordinate swap
temp = gsl_matrix_get(data,i1,d);
gsl_matrix_set(data,i1,d,gsl_matrix_get(data,i2,d));
gsl_matrix_set(data,i2,d,temp);
}
/*Main design sampler*/
double sample(int Npoints,int D,double p,double lambda,int seed,int maxIterations,gsl_matrix *data,double *costValues){
int i,d,i1,i2,iterCount;
const gsl_rng_type *T;
gsl_rng *r;
gsl_permutation *perm = gsl_permutation_alloc(Npoints);
double currentCost,deltaCost,deltaPerc;
//Initialize random number generator
gsl_rng_env_setup();
T = gsl_rng_default;
r = gsl_rng_alloc(T);
//Initialize permutation
gsl_permutation_init(perm);
//Initialize random number generator with provided seed
gsl_rng_set(r,seed);
//Initialize the point coordinates in data with random permutations of (1..Npoints) to enforce latin hypercube structure
for(d=0;d<D;d++){
//Shuffle the numbers
gsl_ran_shuffle(r,perm->data,Npoints,sizeof(size_t));
//Permute coordinates
for(i=0;i<Npoints;i++){
gsl_matrix_set(data,i,d,(double)perm->data[i]/(Npoints-1));
}
}
/*The loop does the following: it swaps a random coordinate of a random pair,
checks if the cost is lower. If so, it keeps the configuration, otherwise it
reverses it and tries a new one.*/
iterCount = 0;
currentCost = cost(data,Npoints,D,p,lambda);
deltaPerc = 0.0;
while(1){
//Decide which coordinate to swap of which pair
i1 = gsl_rng_uniform_int(r,Npoints);
while((i2=gsl_rng_uniform_int(r,Npoints))==i1);
d = gsl_rng_uniform_int(r,D);
//Compute the change in the cost function
deltaCost = swap(data,Npoints,D,p,lambda,i1,i2,d);
/*Check if gain in cost is positive or negative: if positive, revert the swap, if negative keep it;
anyway, log the result*/
if(deltaCost>=0){
swapBack(data,i1,i2,d);
} else{
currentCost += deltaCost;
deltaPerc = deltaCost/currentCost;
}
//Save the current cost to array
costValues[iterCount] = currentCost;
//Criterion to break the loop
if(++iterCount == maxIterations) break;
}
//Release resources for random number generator and permutations
gsl_rng_free(r);
gsl_permutation_free(perm);
//Return the relative cost change due to the last iteration
return deltaPerc;
}
|
{"hexsha": "f99fec28cf49bce2c454c7f2beae670453f83130", "size": 5389, "ext": "c", "lang": "C", "max_stars_repo_path": "lenstools/extern/design.c", "max_stars_repo_name": "asabyr/LensTools", "max_stars_repo_head_hexsha": "e155d6d39361e550906cec00dbbc57686a4bca5c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-04-27T02:03:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-27T02:03:11.000Z", "max_issues_repo_path": "lenstools/extern/design.c", "max_issues_repo_name": "asabyr/LensTools", "max_issues_repo_head_hexsha": "e155d6d39361e550906cec00dbbc57686a4bca5c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lenstools/extern/design.c", "max_forks_repo_name": "asabyr/LensTools", "max_forks_repo_head_hexsha": "e155d6d39361e550906cec00dbbc57686a4bca5c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9086538462, "max_line_length": 199, "alphanum_fraction": 0.7133048803, "num_tokens": 1542}
|
'''This script will perform a classification task on the data generated in sim_data.py.
Each positive sample has approximately half it's variants a specific sequence.
It is a simple task so should quickly achieve perfect accuracy unless you start with bad weights.
Note: the loss will be much higher than the cross entropy loss due to regularization.'''
##perform imports and set the GPU you want to use
import numpy as np
import pickle
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
disable_eager_execution()
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[-1], True)
tf.config.experimental.set_visible_devices(physical_devices[-1], 'GPU')
import pathlib
path = pathlib.Path.cwd()
if path.stem == 'ATGC':
cwd = path
else:
cwd = list(path.parents)[::-1][path.parts.index('ATGC')]
import sys
sys.path.append(str(cwd))
from model.CustomKerasModels import InputFeatures, ATGC
from model.CustomKerasTools import BatchGenerator, Losses
##load the instance and sample data
D, samples = pickle.load(open(cwd / 'figures' / 'controls' / 'samples' / 'sim_data.pkl', 'rb'))
##perform embeddings with a zero vector for index 0
strand_emb_mat = np.concatenate([np.zeros(2)[np.newaxis, :], np.diag(np.ones(2))], axis=0)
D['strand_emb'] = strand_emb_mat[D['strand']]
chr_emb_mat = np.concatenate([np.zeros(24)[np.newaxis, :], np.diag(np.ones(24))], axis=0)
D['chr_emb'] = chr_emb_mat[D['chr']]
frame_emb_mat = np.concatenate([np.zeros(3)[np.newaxis, :], np.diag(np.ones(3))], axis=0)
D['cds_emb'] = frame_emb_mat[D['cds']]
##choose your instance concepts, here a sequence concept of length 6, embedding dim 4, strand 2, and 4 kernels per 5p, 3p, ref, alt.
features = [InputFeatures.VariantSequence(6, 4, 2, [4, 4, 4, 4],
{'5p': D['seq_5p'], '3p': D['seq_3p'], 'ref': D['seq_ref'], 'alt': D['seq_alt'], 'strand': D['strand_emb'], 'cds': D['cds_emb']},
use_frame=False, fusion_dimension=64)
]
##choose your sample concepts
sample_features = ()
# set y label and weights
y_label = np.stack([[0, 1] if i==1 else [1, 0] for i in samples['classes']])
y_strat = np.argmax(y_label, axis=-1)
class_counts = dict(zip(*np.unique(y_strat, return_counts=True)))
y_weights = np.array([1 / class_counts[_] for _ in y_strat])
y_weights /= np.sum(y_weights)
##build the model
atgc = ATGC(features, sample_features=sample_features, aggregation_dimension=64, fusion_dimension=32)
atgc.build_instance_encoder_model(return_latent=False)
atgc.build_sample_encoder_model()
atgc.build_mil_model(output_dim=y_label.shape[1], output_extra=1, output_type='anlulogits', aggregation='recursion', mil_hidden=(16, 8))
metrics = [Losses.Weighted.CrossEntropyfromlogits.cross_entropy_weighted, Losses.Weighted.Accuracy.accuracy]
atgc.mil_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001, clipvalue=10000), loss=Losses.Weighted.CrossEntropyfromlogits.cross_entropy_weighted, metrics=metrics)
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_cross_entropy_weighted', min_delta=0.001, patience=5, mode='min', restore_best_weights=True)]
initial_weights = atgc.mil_model.get_weights()
##perform 8 fold stratification
weights = []
test_idxs = []
for idx_train, idx_test in StratifiedKFold(n_splits=8, random_state=0, shuffle=True).split(y_strat, y_strat):
idx_train, idx_valid = [idx_train[idx] for idx in list(StratifiedShuffleSplit(n_splits=1, test_size=50, random_state=0).split(np.zeros_like(y_strat)[idx_train], y_strat[idx_train]))[0]]
batch_gen_train = BatchGenerator(x_instance_sample_idx=D['sample_idx'], x_instance_features=features, x_sample=sample_features,
y_label=y_label, y_stratification=y_strat, y_weights=y_weights, sampling_approach='minibatch', batch_size=32, idx_sample=idx_train)
data_valid = next(BatchGenerator(x_instance_sample_idx=D['sample_idx'], x_instance_features=features, x_sample=sample_features,
y_label=y_label, y_stratification=y_strat, y_weights=y_weights, sampling_approach=None, idx_sample=idx_valid).data_generator())
atgc.mil_model.set_weights(initial_weights)
atgc.mil_model.fit(batch_gen_train.data_generator(),
steps_per_epoch=batch_gen_train.n_splits,
epochs=10000,
validation_data=data_valid,
shuffle=False,
callbacks=callbacks)
weights.append(atgc.mil_model.get_weights())
test_idxs.append(idx_test)
##check evaluations on the test set for each Kfold
for weight, idx in zip(weights, test_idxs):
atgc.mil_model.set_weights(weight)
data_test = next(BatchGenerator(x_instance_sample_idx=D['sample_idx'], x_instance_features=features, x_sample=sample_features,
y_label=y_label, y_stratification=y_strat, y_weights=y_weights, sampling_approach=None, idx_sample=idx).data_generator())
print(atgc.mil_model.evaluate(data_test[0], data_test[1]))
|
{"hexsha": "4064535a4ff1ca8023bbd401ae9c122e3644c0ef", "size": 5256, "ext": "py", "lang": "Python", "max_stars_repo_path": "figures/controls/samples/sim_run.py", "max_stars_repo_name": "OmnesRes/ATGC", "max_stars_repo_head_hexsha": "c4fc4d6a0ac99bf083232686dcd0b634ff597f8a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "figures/controls/samples/sim_run.py", "max_issues_repo_name": "OmnesRes/ATGC", "max_issues_repo_head_hexsha": "c4fc4d6a0ac99bf083232686dcd0b634ff597f8a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "figures/controls/samples/sim_run.py", "max_forks_repo_name": "OmnesRes/ATGC", "max_forks_repo_head_hexsha": "c4fc4d6a0ac99bf083232686dcd0b634ff597f8a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.5849056604, "max_line_length": 189, "alphanum_fraction": 0.7300228311, "include": true, "reason": "import numpy", "num_tokens": 1289}
|
import torch
import numpy as np
import torch.nn as nn
import warnings
warnings.filterwarnings('ignore')
if torch.cuda.is_available():
device = 'cuda'
else :
device = 'cpu'
def entropy_threshold(teacher,confidence_loader,n_class):
dct = {}
sample_entropy = {}
soft = nn.Softmax()
for i in range(n_class):
dct[i] = dct.get(i,0)
sample_entropy[i] = sample_entropy.get(i,[])
for i,(image,label) in enumerate(confidence_loader):
batch_size = image.shape[0]
image = image.to(device)
with torch.no_grad():
batch_pred = teacher(image)
for j in range(batch_size):
logits= batch_pred[j]
final_pred = soft(logits)
topk_vals, pred_classes= final_pred.topk(2)
top_class = pred_classes[0].item()
entropy = 0
final_pred = final_pred.cpu().numpy()
for k in range(n_class):
entropy+=(-1*final_pred[k]*np.log(final_pred[k]))
sample_entropy[top_class].append(entropy)
top_entropy = {}
for i in range(n_class):
temp_lst = sample_entropy[i]
temp_lst.sort()
top_entropy[i] = temp_lst[:100] # taking top 10 % into account
new_entropy={}
for i in range(n_class):
new_entropy[i] = sum(top_entropy[i])/100
return new_entropy
|
{"hexsha": "b9368fdceb17df772463c7191e5b3358766f1f29", "size": 1401, "ext": "py", "lang": "Python", "max_stars_repo_path": "entropy.py", "max_stars_repo_name": "shauryat97/SampleSelectionBasedKnowledgeDistillation", "max_stars_repo_head_hexsha": "13cb7e201378230cada0cc7476d1b517da75129e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "entropy.py", "max_issues_repo_name": "shauryat97/SampleSelectionBasedKnowledgeDistillation", "max_issues_repo_head_hexsha": "13cb7e201378230cada0cc7476d1b517da75129e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "entropy.py", "max_forks_repo_name": "shauryat97/SampleSelectionBasedKnowledgeDistillation", "max_forks_repo_head_hexsha": "13cb7e201378230cada0cc7476d1b517da75129e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5813953488, "max_line_length": 71, "alphanum_fraction": 0.5881513205, "include": true, "reason": "import numpy", "num_tokens": 320}
|
/*
Copyright (c) 2016, 2020, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TORRENT_STORE_BUFFER
#define TORRENT_STORE_BUFFER
#include <unordered_map>
#include <mutex>
#include "libtorrent/storage_defs.hpp"
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <boost/functional/hash.hpp>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
namespace libtorrent {
namespace aux {
// uniquely identifies a torrent and offset. It is used as the key in the
// dictionary mapping locations to write jobs
struct torrent_location
{
torrent_location(storage_index_t const t, piece_index_t const p, int o)
: torrent(t), piece(p), offset(o) {}
storage_index_t torrent;
piece_index_t piece;
int offset;
bool operator==(torrent_location const& rhs) const
{
return std::tie(torrent, piece, offset)
== std::tie(rhs.torrent, rhs.piece, rhs.offset);
}
};
}
}
namespace std {
template <>
struct hash<libtorrent::aux::torrent_location>
{
using argument_type = libtorrent::aux::torrent_location;
using result_type = std::size_t;
std::size_t operator()(argument_type const& l) const
{
using namespace libtorrent;
std::size_t ret = 0;
boost::hash_combine(ret, std::hash<storage_index_t>{}(l.torrent));
boost::hash_combine(ret, std::hash<piece_index_t>{}(l.piece));
boost::hash_combine(ret, std::hash<int>{}(l.offset));
return ret;
}
};
}
namespace libtorrent {
namespace aux {
struct store_buffer
{
template <typename Fun>
bool get(torrent_location const loc, Fun f)
{
std::unique_lock<std::mutex> l(m_mutex);
auto it = m_store_buffer.find(loc);
if (it != m_store_buffer.end())
{
f(it->second);
return true;
}
return false;
}
void insert(torrent_location const loc, char* buf)
{
std::lock_guard<std::mutex> l(m_mutex);
m_store_buffer.insert({loc, buf});
}
void erase(torrent_location const loc)
{
std::lock_guard<std::mutex> l(m_mutex);
auto it = m_store_buffer.find(loc);
TORRENT_ASSERT(it != m_store_buffer.end());
m_store_buffer.erase(it);
}
private:
std::mutex m_mutex;
std::unordered_map<torrent_location, char*> m_store_buffer;
};
}
}
#endif
|
{"hexsha": "0b5c5351084b83272316284995bc5fee7a99732f", "size": 3585, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/libtorrent/aux_/store_buffer.hpp", "max_stars_repo_name": "bitwiseworks/libtorrent-os2", "max_stars_repo_head_hexsha": "6bb656e0938ee517b87ecdc3f9309890691a0d11", "max_stars_repo_licenses": ["BSL-1.0", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/libtorrent/aux_/store_buffer.hpp", "max_issues_repo_name": "bitwiseworks/libtorrent-os2", "max_issues_repo_head_hexsha": "6bb656e0938ee517b87ecdc3f9309890691a0d11", "max_issues_repo_licenses": ["BSL-1.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/libtorrent/aux_/store_buffer.hpp", "max_forks_repo_name": "bitwiseworks/libtorrent-os2", "max_forks_repo_head_hexsha": "6bb656e0938ee517b87ecdc3f9309890691a0d11", "max_forks_repo_licenses": ["BSL-1.0", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3664122137, "max_line_length": 78, "alphanum_fraction": 0.7520223152, "num_tokens": 840}
|
function convective_adjust!(x)
# remove negative gradients from temperature profile
for i in length(x)-3:-1:2
if x[i] > x[i+1]
if x[i-1] > x[i]; x[i] = x[i+1]
else; x[i] = (x[i-1]+x[i+1])/2
end
end
end
end
|
{"hexsha": "26ddac46de15d47d64fd46c5f7fbe62e23049f75", "size": 270, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/data/convective_adjust.jl", "max_stars_repo_name": "adelinehillier/LearnConvection", "max_stars_repo_head_hexsha": "2a5b0cebe1a31777578293d59bff60b0808343b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-14T19:53:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-15T08:51:59.000Z", "max_issues_repo_path": "src/data/convective_adjust.jl", "max_issues_repo_name": "adelinehillier/LearnConvection", "max_issues_repo_head_hexsha": "2a5b0cebe1a31777578293d59bff60b0808343b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-06T00:19:00.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-06T00:19:00.000Z", "max_forks_repo_path": "src/data/convective_adjust.jl", "max_forks_repo_name": "adelinehillier/LearnConvection", "max_forks_repo_head_hexsha": "2a5b0cebe1a31777578293d59bff60b0808343b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5454545455, "max_line_length": 56, "alphanum_fraction": 0.4851851852, "num_tokens": 91}
|
function p = vonMises_prob( x, m, k, use_log )
%VONMIS_PROB Calculates the probability of x coming from a Von Mises
%distribution with mean mu and concentration parameter k.
% p = vonMises_prob( x, m, k, use_log )
if nargin < 4, use_log = 0; end
[d N] = size(x);
m = m(:);
M = m*ones(1,N);
denom = (2*pi)*besseli(0,k);
mahal = (k*cos(x-M));
if use_log
p = mahal-log(denom);
else
p = exp(mahal)/denom;
end
|
{"author": "bayesnet", "repo": "bnt", "sha": "bebba5f437b4e1e29169f0f3669df59fb5392e62", "save_path": "github-repos/MATLAB/bayesnet-bnt", "path": "github-repos/MATLAB/bayesnet-bnt/bnt-bebba5f437b4e1e29169f0f3669df59fb5392e62/KPMstats/vonMises_prob.m"}
|
function extract_params!(pm)
if haskey(pm, "user_defined_params")
user_defined_params = pm["user_defined_params"]
pm = delete!(pm, "user_defined_params")
# else
# user_defined_param = NaN
end
return pm, user_defined_param
end
|
{"hexsha": "4bf1f6fb194bef239890b34535036939fb658242", "size": 266, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/input/tools.jl", "max_stars_repo_name": "lvzhibai/PandaModels.jl", "max_stars_repo_head_hexsha": "4f69c5d4bac95904039413478d0dbc8e734b01cd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/input/tools.jl", "max_issues_repo_name": "lvzhibai/PandaModels.jl", "max_issues_repo_head_hexsha": "4f69c5d4bac95904039413478d0dbc8e734b01cd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/input/tools.jl", "max_forks_repo_name": "lvzhibai/PandaModels.jl", "max_forks_repo_head_hexsha": "4f69c5d4bac95904039413478d0dbc8e734b01cd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6, "max_line_length": 55, "alphanum_fraction": 0.6729323308, "num_tokens": 66}
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
import ast
import collections
import numbers
import os
import random
import string
import sys
from six import iteritems
from six.moves import urllib
import numpy as np
def smart_str(val):
if type(val) in [float, np.float32, np.float64] and val:
return '%.6f' % val if abs(val) > 1e-6 else '%e' % val
if type(val) == dict:
return '{%s}' % ', '.join(['%s: %s' % (repr(k), smart_str(val[k])) for k in sorted(val.keys())])
if type(val) in [list, tuple]:
return '[%s]' % ', '.join(['%s' % smart_str(i) for i in val])
return repr(val)
def str_to_dict(s):
return ast.literal_eval(s)
def zip_longest(list1, list2):
len1 = len(list1)
len2 = len(list2)
for i in range(max(len1, len2)):
yield (list1[i % len1], list2[i % len2])
def deep_update(dict_, upd):
for key, value in iteritems(upd):
if isinstance(value, collections.Mapping):
recursive = deep_update(dict_.get(key, {}), value)
dict_[key] = recursive
else:
dict_[key] = upd[key]
return dict_
def mini_batch(total, size):
return zip(range(0, total, size),
range(size, total + size, size))
def random_id(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def safe_concat(list_):
list_ = [i for i in list_ if i is not None]
if len(list_) == 0:
return None
if type(list_[0]) == np.ndarray:
return np.concatenate(list_)
return list_
def call(obj, *args):
if callable(obj):
return obj(*args)
apply = getattr(obj, 'apply', None)
if callable(apply):
return apply(*args)
def slice_dict(d, key_prefix):
return {key[len(key_prefix):]: value for key, value in iteritems(d) if key.startswith(key_prefix)}
def as_function(val, presets, default=None):
if callable(val):
return val
preset = presets.get(val, default)
if preset is not None:
return preset
raise ValueError('Value is not recognized: ', val)
def as_numeric_function(val, presets, default=None):
if isinstance(val, numbers.Number):
def const(*_):
return val
return const
return as_function(val, presets, default)
def download_if_needed(url, path, filename=None):
from .logging import info, debug
if not os.path.exists(path):
os.makedirs(path)
filename = filename or os.path.basename(url)
full_path = os.path.join(path, filename)
if not os.path.exists(full_path):
info('Downloading %s, please wait...' % filename)
result_path, _ = urllib.request.urlretrieve(url, full_path, _report_hook)
stat = os.stat(result_path)
info('Successfully downloaded "%s" (%d Kb)' % (filename, stat.st_size / 1024))
return result_path
else:
debug('Already downloaded:', full_path)
return full_path
def _report_hook(block_num, block_size, total_size):
read_so_far = block_num * block_size
if total_size > 0:
percent = read_so_far * 1e2 / total_size
s = '\r%5.1f%% %*d / %d' % (percent, len(str(total_size)), read_so_far, total_size)
sys.stdout.write(s)
if read_so_far >= total_size: # near the end
sys.stdout.write('\n')
else: # total size is unknown
sys.stdout.write('read %d\n' % (read_so_far,))
|
{"hexsha": "bb7cdc8c38bf6aa850a48f04de72443577c3a532", "size": 3243, "ext": "py", "lang": "Python", "max_stars_repo_path": "hyperengine/base/util.py", "max_stars_repo_name": "KOLANICH/hyper-engine", "max_stars_repo_head_hexsha": "60ba73438fdbef9320a849ee65f36da977f68eca", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hyperengine/base/util.py", "max_issues_repo_name": "KOLANICH/hyper-engine", "max_issues_repo_head_hexsha": "60ba73438fdbef9320a849ee65f36da977f68eca", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hyperengine/base/util.py", "max_forks_repo_name": "KOLANICH/hyper-engine", "max_forks_repo_head_hexsha": "60ba73438fdbef9320a849ee65f36da977f68eca", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.025, "max_line_length": 100, "alphanum_fraction": 0.6688251619, "include": true, "reason": "import numpy", "num_tokens": 896}
|
[STATEMENT]
lemma subgraph_no_last_branch_chain:
assumes "subgraph C T"
and "finite (verts T)"
and "verts C \<subseteq> verts T - {x. \<exists>y\<in>last_branching_points. x \<rightarrow>\<^sup>*\<^bsub>T\<^esub> y}"
shows "wf_digraph.is_chain C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wf_digraph.is_chain C
[PROOF STEP]
using assms finite_branch_impl_last_branch subgraph_no_branch_chain last_branch_is_branch
[PROOF STATE]
proof (prove)
using this:
Shortest_Path_Tree.subgraph C T
finite (verts T)
verts C \<subseteq> verts T - {x. \<exists>y\<in>last_branching_points. x \<rightarrow>\<^sup>*\<^bsub>T\<^esub> y}
\<lbrakk>finite (verts T); \<exists>y\<in>branching_points. ?x \<rightarrow>\<^sup>*\<^bsub>T\<^esub> y; directed_tree T ?r\<rbrakk> \<Longrightarrow> \<exists>z\<in>last_branching_points. ?x \<rightarrow>\<^sup>*\<^bsub>T\<^esub> z
\<lbrakk>Shortest_Path_Tree.subgraph ?C T; verts ?C \<subseteq> verts T - {x. \<exists>y\<in>branching_points. x \<rightarrow>\<^sup>*\<^bsub>T\<^esub> y}\<rbrakk> \<Longrightarrow> wf_digraph.is_chain ?C
?y \<in> last_branching_points \<Longrightarrow> ?y \<in> branching_points
goal (1 subgoal):
1. wf_digraph.is_chain C
[PROOF STEP]
by (smt (verit, ccfv_SIG) Collect_cong directed_tree_axioms)
|
{"llama_tokens": 503, "file": "Query_Optimization_Directed_Tree_Additions", "length": 2}
|
! nicked and adapted from IFEFFIT, the Interactive XAFS Analysis Library
SUBROUTINE PGVPORT (XLEFT, XRIGHT, YBOT, YTOP)
REAL XLEFT, XRIGHT, YBOT, YTOP
END
SUBROUTINE PGWNAD (X1, X2, Y1, Y2)
REAL X1, X2, Y1, Y2
END
SUBROUTINE PGCONS (A, IDIM, JDIM, I1, I2, J1, J2, C, NC, TR)
INTEGER IDIM, JDIM, I1, I2, J1, J2, NC
REAL A(IDIM,JDIM), C(*), TR(6)
END
INTEGER FUNCTION PGBEGIN (UNIT, FILE, NXSUB, NYSUB)
INTEGER UNIT
CHARACTER*(*) FILE
INTEGER NXSUB, NYSUB
PGBEGIN=1
END
SUBROUTINE PGSVP (XLEFT, XRIGHT, YBOT, YTOP)
REAL XLEFT, XRIGHT, YBOT, YTOP
END
SUBROUTINE PGQWIN (X1, X2, Y1, Y2)
REAL X1, X2, Y1, Y2
END
SUBROUTINE PGQVP (UNITS, X1, X2, Y1, Y2)
INTEGER UNITS
REAL X1, X2, Y1, Y2
END
SUBROUTINE PGLAB (XLBL, YLBL, TOPLBL)
CHARACTER*(*) XLBL, YLBL, TOPLBL
END
SUBROUTINE PGLABEL (XLBL, YLBL, TOPLBL)
CHARACTER*(*) XLBL, YLBL, TOPLBL
END
SUBROUTINE PGENV (XMIN, XMAX, YMIN, YMAX, JUST, AXIS)
REAL XMIN, XMAX, YMIN, YMAX
INTEGER JUST, AXIS
END
subroutine pgend
return
end
subroutine pgclos
return
end
integer function pgopen(i)
integer i
pgopen = 0
return
end
subroutine pgqid(i)
integer i
i = 0
return
end
c
subroutine pgslct(i)
integer i
return
end
c
subroutine pgbeg(i, file, j, k)
integer i,j, k
character*(*) file
print*, ' pgplot not installed'
return
end
c
subroutine pgqinf(type, arg, i)
integer i
character*(*) type, arg
i = 0
return
end
c
subroutine pgpage
return
end
c
subroutine pgbbuf
return
end
c
subroutine pgask(flag)
logical flag
return
end
c
subroutine pgeras
return
end
c
subroutine pgsls(i)
integer i
return
end
c
subroutine pgsch(x)
real x
return
end
c
subroutine pgscf(i)
integer i
return
end
c
subroutine pgslw(i)
integer i
return
end
c
subroutine pgvstd
return
end
c
subroutine pgpt1(x1,x2,i)
real x1,x2
integer i
return
end
c
subroutine pgrnge(x1,x2,x3,x4)
real x1,x2,x3,x4
return
end
c
subroutine pgsci(i)
integer i
return
end
c
subroutine pgswin(x1,x2,x3,x4)
real x1,x2,x3,x4
return
end
c
subroutine pgbox(s1,x1,i1,s2,x2,i2)
integer i1, i2
character*(*) s1, s2
real x1, x2
return
end
c
subroutine pgmtxt(s1, x1, x2, x3, s2)
character*(*) s1, s2
real x1, x2, x3
return
end
c
subroutine pgline(i1,x1,x2)
integer i1
real x1(*), x2(*)
return
end
c
subroutine pgpt(i1,x1,x2,i2)
integer i1, i2
real x1(*), x2(*)
return
end
c
subroutine pgtext(x1, x2, s1)
character*(*) s1
real x1, x2
return
end
c
subroutine pgebuf
return
end
c
subroutine pgscrn(i1,s1,i2)
character*(*) s1
integer i1, i2
return
end
c
subroutine pgscr(i,r,g,b)
integer i
real r, g, b
return
end
c
integer function pgcurs(x,y,c)
real x,y
character*(*) c
pgcurs = 1
x = 0
y = 0
c = 'a'
return
end
c
subroutine pgsah(i,x,y)
real x,y
integer i
return
end
subroutine pgarro(x,y,u,v)
real x,y,u,v
return
end
integer function pgband(m,n,x1,y1,x,y,c)
real x1,y1,x,y
character*(*) c
integer m,n
pgband = 1
x = x1
y = y1
c = 'a'
m = 0
return
end
subroutine pgqndt(i)
integer i
return
end
subroutine pgqdt(i,s1,i2,s2,i3,i4)
integer i, i2, i3, i4
character s1*(*), s2*(*)
return
end
subroutine pgerry(n,x,y1,y2,t)
integer i
real x(*), y1(*), y2(*), t
return
end
subroutine pgerrx(n,x,y1,y2,t)
integer i
real x(*), y1(*), y2(*), t
return
end
|
{"hexsha": "476174d2b9e29cded3aee245bd8cfb913f6f39f9", "size": 4642, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/amuse/community/galactics/gas_src/src/pgstub.f", "max_stars_repo_name": "rknop/amuse", "max_stars_repo_head_hexsha": "85d5bdcc29cfc87dc69d91c264101fafd6658aec", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 131, "max_stars_repo_stars_event_min_datetime": "2015-06-04T09:06:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T12:11:29.000Z", "max_issues_repo_path": "src/amuse/community/galactics/gas_src/src/pgstub.f", "max_issues_repo_name": "rknop/amuse", "max_issues_repo_head_hexsha": "85d5bdcc29cfc87dc69d91c264101fafd6658aec", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 690, "max_issues_repo_issues_event_min_datetime": "2015-10-17T12:18:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:15:58.000Z", "max_forks_repo_path": "src/amuse/community/galactics/gas_src/src/pgstub.f", "max_forks_repo_name": "rieder/amuse", "max_forks_repo_head_hexsha": "3ac3b6b8f922643657279ddee5c8ab3fc0440d5e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 102, "max_forks_repo_forks_event_min_datetime": "2015-01-22T10:00:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T13:29:43.000Z", "avg_line_length": 18.6425702811, "max_line_length": 73, "alphanum_fraction": 0.4862128393, "num_tokens": 1502}
|
# ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
DETR model and criterion classes.
"""
import copy
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from typing import List
from util import box_ops
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate, get_rank,
is_dist_avail_and_initialized, inverse_sigmoid)
from models.structures import Instances, Boxes, pairwise_iou, matched_boxlist_iou
from .backbone import build_backbone
from .matcher import build_matcher
from .deformable_transformer_plus import build_deforamble_transformer
from .qim import build as build_query_interaction_layer
from .memory_bank import build_memory_bank
from .deformable_detr import SetCriterion, MLP
from .segmentation import sigmoid_focal_loss
class ClipMatcher(SetCriterion):
def __init__(self, num_classes,
matcher,
weight_dict,
losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__(num_classes, matcher, weight_dict, losses)
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.losses = losses
self.focal_loss = True
self.losses_dict = {}
self._current_frame_idx = 0
def initialize_for_single_clip(self, gt_instances: List[Instances]):
self.gt_instances = gt_instances
self.num_samples = 0
self.sample_device = None
self._current_frame_idx = 0
self.losses_dict = {}
def _step(self):
self._current_frame_idx += 1
def calc_loss_for_track_scores(self, track_instances: Instances):
frame_id = self._current_frame_idx - 1
gt_instances = self.gt_instances[frame_id]
outputs = {
'pred_logits': track_instances.track_scores[None],
}
device = track_instances.track_scores.device
num_tracks = len(track_instances)
src_idx = torch.arange(num_tracks, dtype=torch.long, device=device)
tgt_idx = track_instances.matched_gt_idxes # -1 for FP tracks and disappeared tracks
track_losses = self.get_loss('labels',
outputs=outputs,
gt_instances=[gt_instances],
indices=[(src_idx, tgt_idx)],
num_boxes=1)
self.losses_dict.update(
{'frame_{}_track_{}'.format(frame_id, key): value for key, value in
track_losses.items()})
def get_num_boxes(self, num_samples):
num_boxes = torch.as_tensor(num_samples, dtype=torch.float, device=self.sample_device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
return num_boxes
def get_loss(self, loss, outputs, gt_instances, indices, num_boxes, **kwargs):
loss_map = {
'labels': self.loss_labels,
'cardinality': self.loss_cardinality,
'boxes': self.loss_boxes,
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, gt_instances, indices, num_boxes, **kwargs)
def loss_boxes(self, outputs, gt_instances: List[Instances], indices: List[tuple], num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size.
"""
# We ignore the regression loss of the track-disappear slots.
#TODO: Make this filter process more elegant.
filtered_idx = []
for src_per_img, tgt_per_img in indices:
keep = tgt_per_img != -1
filtered_idx.append((src_per_img[keep], tgt_per_img[keep]))
indices = filtered_idx
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([gt_per_img.boxes[i] for gt_per_img, (_, i) in zip(gt_instances, indices)], dim=0)
# for pad target, don't calculate regression loss, judged by whether obj_id=-1
target_obj_ids = torch.cat([gt_per_img.obj_ids[i] for gt_per_img, (_, i) in zip(gt_instances, indices)], dim=0) # size(16)
mask = (target_obj_ids != -1)
loss_bbox = F.l1_loss(src_boxes[mask], target_boxes[mask], reduction='none')
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes[mask]),
box_ops.box_cxcywh_to_xyxy(target_boxes[mask])))
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
losses['loss_giou'] = loss_giou.sum() / num_boxes
return losses
def loss_labels(self, outputs, gt_instances: List[Instances], indices, num_boxes, log=False):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
# The matched gt for disappear track query is set -1.
labels = []
for gt_per_img, (_, J) in zip(gt_instances, indices):
labels_per_img = torch.ones_like(J)
# set labels of track-appear slots to 0.
if len(gt_per_img) > 0:
labels_per_img[J != -1] = gt_per_img.labels[J[J != -1]]
labels.append(labels_per_img)
target_classes_o = torch.cat(labels)
target_classes[idx] = target_classes_o
if self.focal_loss:
gt_labels_target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[:, :, :-1] # no loss for the last (background) class
gt_labels_target = gt_labels_target.to(src_logits)
loss_ce = sigmoid_focal_loss(src_logits.flatten(1),
gt_labels_target.flatten(1),
alpha=0.25,
gamma=2,
num_boxes=num_boxes, mean_in_dim1=False)
loss_ce = loss_ce.sum()
else:
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
def match_for_single_frame(self, outputs: dict):
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
gt_instances_i = self.gt_instances[self._current_frame_idx] # gt instances of i-th image.
track_instances: Instances = outputs_without_aux['track_instances']
pred_logits_i = track_instances.pred_logits # predicted logits of i-th image.
pred_boxes_i = track_instances.pred_boxes # predicted boxes of i-th image.
obj_idxes = gt_instances_i.obj_ids
obj_idxes_list = obj_idxes.detach().cpu().numpy().tolist()
obj_idx_to_gt_idx = {obj_idx: gt_idx for gt_idx, obj_idx in enumerate(obj_idxes_list)}
outputs_i = {
'pred_logits': pred_logits_i.unsqueeze(0),
'pred_boxes': pred_boxes_i.unsqueeze(0),
}
# step1. inherit and update the previous tracks.
num_disappear_track = 0
for j in range(len(track_instances)):
obj_id = track_instances.obj_idxes[j].item()
# set new target idx.
if obj_id >= 0:
if obj_id in obj_idx_to_gt_idx:
track_instances.matched_gt_idxes[j] = obj_idx_to_gt_idx[obj_id]
else:
num_disappear_track += 1
track_instances.matched_gt_idxes[j] = -1 # track-disappear case.
else:
track_instances.matched_gt_idxes[j] = -1
full_track_idxes = torch.arange(len(track_instances), dtype=torch.long).to(pred_logits_i.device)
matched_track_idxes = (track_instances.obj_idxes >= 0) # occu
prev_matched_indices = torch.stack(
[full_track_idxes[matched_track_idxes], track_instances.matched_gt_idxes[matched_track_idxes]], dim=1).to(
pred_logits_i.device)
# step2. select the unmatched slots.
# note that the FP tracks whose obj_idxes are -2 will not be selected here.
unmatched_track_idxes = full_track_idxes[track_instances.obj_idxes == -1]
# step3. select the untracked gt instances (new tracks).
tgt_indexes = track_instances.matched_gt_idxes
tgt_indexes = tgt_indexes[tgt_indexes != -1]
tgt_state = torch.zeros(len(gt_instances_i)).to(pred_logits_i.device)
tgt_state[tgt_indexes] = 1
untracked_tgt_indexes = torch.arange(len(gt_instances_i)).to(pred_logits_i.device)[tgt_state == 0]
# untracked_tgt_indexes = select_unmatched_indexes(tgt_indexes, len(gt_instances_i))
untracked_gt_instances = gt_instances_i[untracked_tgt_indexes]
def match_for_single_decoder_layer(unmatched_outputs, matcher):
new_track_indices = matcher(unmatched_outputs,
[untracked_gt_instances]) # list[tuple(src_idx, tgt_idx)]
src_idx = new_track_indices[0][0]
tgt_idx = new_track_indices[0][1]
# concat src and tgt.
new_matched_indices = torch.stack([unmatched_track_idxes[src_idx], untracked_tgt_indexes[tgt_idx]],
dim=1).to(pred_logits_i.device)
return new_matched_indices
# step4. do matching between the unmatched slots and GTs.
unmatched_outputs = {
'pred_logits': track_instances.pred_logits[unmatched_track_idxes].unsqueeze(0),
'pred_boxes': track_instances.pred_boxes[unmatched_track_idxes].unsqueeze(0),
}
new_matched_indices = match_for_single_decoder_layer(unmatched_outputs, self.matcher)
# step5. update obj_idxes according to the new matching result.
track_instances.obj_idxes[new_matched_indices[:, 0]] = gt_instances_i.obj_ids[new_matched_indices[:, 1]].long()
track_instances.matched_gt_idxes[new_matched_indices[:, 0]] = new_matched_indices[:, 1]
# step6. calculate iou.
active_idxes = (track_instances.obj_idxes >= 0) & (track_instances.matched_gt_idxes >= 0)
active_track_boxes = track_instances.pred_boxes[active_idxes]
if len(active_track_boxes) > 0:
gt_boxes = gt_instances_i.boxes[track_instances.matched_gt_idxes[active_idxes]]
active_track_boxes = box_ops.box_cxcywh_to_xyxy(active_track_boxes)
gt_boxes = box_ops.box_cxcywh_to_xyxy(gt_boxes)
track_instances.iou[active_idxes] = matched_boxlist_iou(Boxes(active_track_boxes), Boxes(gt_boxes))
# step7. merge the unmatched pairs and the matched pairs.
matched_indices = torch.cat([new_matched_indices, prev_matched_indices], dim=0)
# step8. calculate losses.
self.num_samples += len(gt_instances_i) + num_disappear_track
self.sample_device = pred_logits_i.device
for loss in self.losses:
new_track_loss = self.get_loss(loss,
outputs=outputs_i,
gt_instances=[gt_instances_i],
indices=[(matched_indices[:, 0], matched_indices[:, 1])],
num_boxes=1)
self.losses_dict.update(
{'frame_{}_{}'.format(self._current_frame_idx, key): value for key, value in new_track_loss.items()})
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
unmatched_outputs_layer = {
'pred_logits': aux_outputs['pred_logits'][0, unmatched_track_idxes].unsqueeze(0),
'pred_boxes': aux_outputs['pred_boxes'][0, unmatched_track_idxes].unsqueeze(0),
}
new_matched_indices_layer = match_for_single_decoder_layer(unmatched_outputs_layer, self.matcher)
matched_indices_layer = torch.cat([new_matched_indices_layer, prev_matched_indices], dim=0)
for loss in self.losses:
if loss == 'masks':
# Intermediate masks losses are too costly to compute, we ignore them.
continue
l_dict = self.get_loss(loss,
aux_outputs,
gt_instances=[gt_instances_i],
indices=[(matched_indices_layer[:, 0], matched_indices_layer[:, 1])],
num_boxes=1, )
self.losses_dict.update(
{'frame_{}_aux{}_{}'.format(self._current_frame_idx, i, key): value for key, value in
l_dict.items()})
self._step()
return track_instances
def forward(self, outputs, input_data: dict):
# losses of each frame are calculated during the model's forwarding and are outputted by the model as outputs['losses_dict].
losses = outputs.pop("losses_dict")
num_samples = self.get_num_boxes(self.num_samples)
for loss_name, loss in losses.items():
losses[loss_name] /= num_samples
return losses
class RuntimeTrackerBase(object):
def __init__(self, score_thresh=0.8, filter_score_thresh=0.6, miss_tolerance=5):
self.score_thresh = score_thresh
self.filter_score_thresh = filter_score_thresh
self.miss_tolerance = miss_tolerance
self.max_obj_id = 0
def clear(self):
self.max_obj_id = 0
def update(self, track_instances: Instances):
track_instances.disappear_time[track_instances.scores >= self.score_thresh] = 0
for i in range(len(track_instances)):
if track_instances.obj_idxes[i] == -1 and track_instances.scores[i] >= self.score_thresh:
# print("track {} has score {}, assign obj_id {}".format(i, track_instances.scores[i], self.max_obj_id))
track_instances.obj_idxes[i] = self.max_obj_id
self.max_obj_id += 1
elif track_instances.obj_idxes[i] >= 0 and track_instances.scores[i] < self.filter_score_thresh:
track_instances.disappear_time[i] += 1
if track_instances.disappear_time[i] >= self.miss_tolerance:
# Set the obj_id to -1.
# Then this track will be removed by TrackEmbeddingLayer.
track_instances.obj_idxes[i] = -1
class TrackerPostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
def __init__(self):
super().__init__()
@torch.no_grad()
def forward(self, track_instances: Instances, target_size) -> Instances:
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits = track_instances.pred_logits
out_bbox = track_instances.pred_boxes
prob = out_logits.sigmoid()
# prob = out_logits[...,:1].sigmoid()
scores, labels = prob.max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_size
scale_fct = torch.Tensor([img_w, img_h, img_w, img_h]).to(boxes)
boxes = boxes * scale_fct[None, :]
track_instances.boxes = boxes
track_instances.scores = scores
track_instances.labels = labels
# track_instances.remove('pred_logits')
# track_instances.remove('pred_boxes')
return track_instances
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class MOTR(nn.Module):
def __init__(self, backbone, transformer, num_classes, num_queries, num_feature_levels, criterion, track_embed,
aux_loss=True, with_box_refine=False, two_stage=False, memory_bank=None):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
with_box_refine: iterative bounding box refinement
two_stage: two-stage Deformable DETR
"""
super().__init__()
self.num_queries = num_queries
self.track_embed = track_embed
self.transformer = transformer
hidden_dim = transformer.d_model
self.num_classes = num_classes
self.class_embed = nn.Linear(hidden_dim, num_classes)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.num_feature_levels = num_feature_levels
if not two_stage:
self.query_embed = nn.Embedding(num_queries, hidden_dim * 2)
if num_feature_levels > 1:
num_backbone_outs = len(backbone.strides)
input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = backbone.num_channels[_]
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
))
for _ in range(num_feature_levels - num_backbone_outs):
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(32, hidden_dim),
))
in_channels = hidden_dim
self.input_proj = nn.ModuleList(input_proj_list)
else:
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
)])
self.backbone = backbone
self.aux_loss = aux_loss
self.with_box_refine = with_box_refine
self.two_stage = two_stage
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
self.class_embed.bias.data = torch.ones(num_classes) * bias_value
nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
for proj in self.input_proj:
nn.init.xavier_uniform_(proj[0].weight, gain=1)
nn.init.constant_(proj[0].bias, 0)
# if two-stage, the last class_embed and bbox_embed is for region proposal generation
num_pred = (transformer.decoder.num_layers + 1) if two_stage else transformer.decoder.num_layers
if with_box_refine:
self.class_embed = _get_clones(self.class_embed, num_pred)
self.bbox_embed = _get_clones(self.bbox_embed, num_pred)
nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)
# hack implementation for iterative bounding box refinement
self.transformer.decoder.bbox_embed = self.bbox_embed
else:
nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
self.transformer.decoder.bbox_embed = None
if two_stage:
# hack implementation for two-stage
self.transformer.decoder.class_embed = self.class_embed
for box_embed in self.bbox_embed:
nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)
self.post_process = TrackerPostProcess()
self.track_base = RuntimeTrackerBase()
self.criterion = criterion
self.memory_bank = memory_bank
self.mem_bank_len = 0 if memory_bank is None else memory_bank.max_his_length
def _generate_empty_tracks(self):
track_instances = Instances((1, 1))
num_queries, dim = self.query_embed.weight.shape # (300, 512)
device = self.query_embed.weight.device
track_instances.ref_pts = self.transformer.reference_points(self.query_embed.weight[:, :dim // 2])
track_instances.query_pos = self.query_embed.weight
track_instances.output_embedding = torch.zeros((num_queries, dim >> 1), device=device)
track_instances.obj_idxes = torch.full((len(track_instances),), -1, dtype=torch.long, device=device)
track_instances.matched_gt_idxes = torch.full((len(track_instances),), -1, dtype=torch.long, device=device)
track_instances.disappear_time = torch.zeros((len(track_instances), ), dtype=torch.long, device=device)
track_instances.iou = torch.zeros((len(track_instances),), dtype=torch.float, device=device)
track_instances.scores = torch.zeros((len(track_instances),), dtype=torch.float, device=device)
track_instances.track_scores = torch.zeros((len(track_instances),), dtype=torch.float, device=device)
track_instances.pred_boxes = torch.zeros((len(track_instances), 4), dtype=torch.float, device=device)
track_instances.pred_logits = torch.zeros((len(track_instances), self.num_classes), dtype=torch.float, device=device)
mem_bank_len = self.mem_bank_len
track_instances.mem_bank = torch.zeros((len(track_instances), mem_bank_len, dim // 2), dtype=torch.float32, device=device)
track_instances.mem_padding_mask = torch.ones((len(track_instances), mem_bank_len), dtype=torch.bool, device=device)
track_instances.save_period = torch.zeros((len(track_instances), ), dtype=torch.float32, device=device)
return track_instances.to(self.query_embed.weight.device)
def clear(self):
self.track_base.clear()
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_logits': a, 'pred_boxes': b, }
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
def _forward_single_image(self, samples, track_instances: Instances):
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
srcs = []
masks = []
for l, feat in enumerate(features):
src, mask = feat.decompose()
srcs.append(self.input_proj[l](src))
masks.append(mask)
assert mask is not None
if self.num_feature_levels > len(srcs):
_len_srcs = len(srcs)
for l in range(_len_srcs, self.num_feature_levels):
if l == _len_srcs:
src = self.input_proj[l](features[-1].tensors)
else:
src = self.input_proj[l](srcs[-1])
m = samples.mask
mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0]
pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype)
srcs.append(src)
masks.append(mask)
pos.append(pos_l)
hs, init_reference, inter_references, enc_outputs_class, enc_outputs_coord_unact = self.transformer(srcs, masks, pos, track_instances.query_pos, ref_pts=track_instances.ref_pts)
outputs_classes = []
outputs_coords = []
for lvl in range(hs.shape[0]):
if lvl == 0:
reference = init_reference
else:
reference = inter_references[lvl - 1]
reference = inverse_sigmoid(reference)
outputs_class = self.class_embed[lvl](hs[lvl])
tmp = self.bbox_embed[lvl](hs[lvl])
if reference.shape[-1] == 4:
tmp += reference
else:
assert reference.shape[-1] == 2
tmp[..., :2] += reference
outputs_coord = tmp.sigmoid()
outputs_classes.append(outputs_class)
outputs_coords.append(outputs_coord)
outputs_class = torch.stack(outputs_classes)
outputs_coord = torch.stack(outputs_coords)
ref_pts_all = torch.cat([init_reference[None], inter_references[:, :, :, :2]], dim=0)
out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1], 'ref_pts': ref_pts_all[5]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
with torch.no_grad():
if self.training:
track_scores = outputs_class[-1, 0, :].sigmoid().max(dim=-1).values
else:
track_scores = outputs_class[-1, 0, :, 0].sigmoid()
track_instances.scores = track_scores
track_instances.pred_logits = outputs_class[-1, 0]
track_instances.pred_boxes = outputs_coord[-1, 0]
track_instances.output_embedding = hs[-1, 0]
if self.training:
# the track id will be assigned by the mather.
out['track_instances'] = track_instances
track_instances = self.criterion.match_for_single_frame(out)
else:
# each track will be assigned an unique global id by the track base.
self.track_base.update(track_instances)
if self.memory_bank is not None:
track_instances = self.memory_bank(track_instances)
# track_instances.track_scores = track_instances.track_scores[..., 0]
# track_instances.scores = track_instances.track_scores.sigmoid()
if self.training:
self.criterion.calc_loss_for_track_scores(track_instances)
tmp = {}
tmp['init_track_instances'] = self._generate_empty_tracks()
tmp['track_instances'] = track_instances
out_track_instances = self.track_embed(tmp)
out['track_instances'] = out_track_instances
return out
@torch.no_grad()
def inference_single_image(self, img, ori_img_size, track_instances=None):
if not isinstance(img, NestedTensor):
img = nested_tensor_from_tensor_list(img)
# if track_instances is None:
# track_instances = self._generate_empty_tracks()
track_instances = self._generate_empty_tracks()
res = self._forward_single_image(img, track_instances=track_instances)
track_instances = res['track_instances']
track_instances = self.post_process(track_instances, ori_img_size)
ret = {'track_instances': track_instances}
if 'ref_pts' in res:
ref_pts = res['ref_pts']
img_h, img_w = ori_img_size
scale_fct = torch.Tensor([img_w, img_h]).to(ref_pts)
ref_pts = ref_pts * scale_fct[None]
ret['ref_pts'] = ref_pts
return ret
def forward(self, data: dict):
if self.training:
self.criterion.initialize_for_single_clip(data['gt_instances'])
frames = data['imgs'] # list of Tensor.
outputs = {
'pred_logits': [],
'pred_boxes': [],
}
track_instances = self._generate_empty_tracks()
for frame in frames:
if not isinstance(frame, NestedTensor):
frame = nested_tensor_from_tensor_list([frame])
frame_res = self._forward_single_image(frame, track_instances)
track_instances = frame_res['track_instances']
outputs['pred_logits'].append(frame_res['pred_logits'])
outputs['pred_boxes'].append(frame_res['pred_boxes'])
if not self.training:
outputs['track_instances'] = track_instances
else:
outputs['losses_dict'] = self.criterion.losses_dict
return outputs
def build(args):
dataset_to_num_classes = {
'coco': 91,
'coco_panoptic': 250,
'e2e_mot': 1,
'e2e_joint': 1,
'e2e_static_mot': 1
}
assert args.dataset_file in dataset_to_num_classes
num_classes = dataset_to_num_classes[args.dataset_file]
device = torch.device(args.device)
backbone = build_backbone(args)
transformer = build_deforamble_transformer(args)
d_model = transformer.d_model
hidden_dim = args.dim_feedforward
query_interaction_layer = build_query_interaction_layer(args, args.query_interaction_layer, d_model, hidden_dim, d_model*2)
img_matcher = build_matcher(args)
num_frames_per_batch = max(args.sampler_lengths)
weight_dict = {}
for i in range(num_frames_per_batch):
weight_dict.update({"frame_{}_loss_ce".format(i): args.cls_loss_coef,
'frame_{}_loss_bbox'.format(i): args.bbox_loss_coef,
'frame_{}_loss_giou'.format(i): args.giou_loss_coef,
})
# TODO this is a hack
if args.aux_loss:
for i in range(num_frames_per_batch):
for j in range(args.dec_layers - 1):
weight_dict.update({"frame_{}_aux{}_loss_ce".format(i, j): args.cls_loss_coef,
'frame_{}_aux{}_loss_bbox'.format(i, j): args.bbox_loss_coef,
'frame_{}_aux{}_loss_giou'.format(i, j): args.giou_loss_coef,
})
if args.memory_bank_type is not None and len(args.memory_bank_type) > 0:
memory_bank = build_memory_bank(args, d_model, hidden_dim, d_model * 2)
for i in range(num_frames_per_batch):
weight_dict.update({"frame_{}_track_loss_ce".format(i): args.cls_loss_coef})
else:
memory_bank = None
losses = ['labels', 'boxes']
criterion = ClipMatcher(num_classes, matcher=img_matcher, weight_dict=weight_dict, losses=losses)
criterion.to(device)
postprocessors = {}
model = MOTR(
backbone,
transformer,
track_embed=query_interaction_layer,
num_feature_levels=args.num_feature_levels,
num_classes=num_classes,
num_queries=args.num_queries,
aux_loss=args.aux_loss,
criterion=criterion,
with_box_refine=args.with_box_refine,
two_stage=args.two_stage,
memory_bank=memory_bank,
)
return model, criterion, postprocessors
|
{"hexsha": "b9f74fdf8520385a79653a557631fa4a9ac1b9fc", "size": 33011, "ext": "py", "lang": "Python", "max_stars_repo_path": "tutorials/motr/motr_det.py", "max_stars_repo_name": "hyperfraise/ByteTrack", "max_stars_repo_head_hexsha": "d742a3321c14a7412f024f2218142c7441c1b699", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1039, "max_stars_repo_stars_event_min_datetime": "2021-10-14T01:15:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T12:51:17.000Z", "max_issues_repo_path": "tutorials/motr/motr_det.py", "max_issues_repo_name": "messedad/ByteTrack", "max_issues_repo_head_hexsha": "9e5efb7bc237ea2c33ecbd1e29ba5cc99f09c1e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 133, "max_issues_repo_issues_event_min_datetime": "2021-10-14T10:53:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T10:26:29.000Z", "max_forks_repo_path": "tutorials/motr/motr_det.py", "max_forks_repo_name": "messedad/ByteTrack", "max_forks_repo_head_hexsha": "9e5efb7bc237ea2c33ecbd1e29ba5cc99f09c1e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 241, "max_forks_repo_forks_event_min_datetime": "2021-10-14T01:33:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:13:35.000Z", "avg_line_length": 48.6887905605, "max_line_length": 185, "alphanum_fraction": 0.6293659689, "include": true, "reason": "import numpy", "num_tokens": 7064}
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import sys, os
sys.path.append(os.pardir) # parent directory
#import tensorflow as tf
import numpy as np
#import matplotlib.pyplot as plt
#import matplotlib.animation as animation
#import matplotlib.gridspec as gridspec
from sklearn.feature_extraction import image
# from PIL import Image
import cv2
import glob
import random
import struct
def genImgListWithFilename(folderpath, imgType, start, end): # input : path # output : imgList # path안의 이미지들을 리스트로 만들어준다.
imgList = []
for i in range(start, end+1):
filepath = folderpath+ '/' + str(i) + '.' + imgType
image = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) # B, G, R
# cv2.imshow('ddd',image)
# cv2.waitKey(0)
imgList.append(image)
return imgList
def cvRotateImg(img, angle):
rows = img.shape[0]
cols = img.shape[1]
M = cv2.getRotationMatrix2D(((cols-1)/2.0, (rows-1)/2.0), angle, 1.0)
return cv2.warpAffine(img,M,(cols,rows))
def getImages(path, format='png'): # input : path # output : imgList
imgList = []
for filepath in glob.glob(path + "/*."+format): # make a image list with images in path
# img = Image.open(filepath)
# keep = img.copy()
# imgList.append(keep)
# img.close()
img = cv2.imread(filepath, cv2.IMREAD_COLOR) # B, G, R cv2.IMREAD_COLOR IMREAD_GRAYSCALE
print(filepath, img.shape)
# cv2.imshow('ddd',img)
# cv2.waitKey(0)
imgList.append(img)
return imgList
class dotdict(dict):
def __getattr__(self, name):
return self[name]
if __name__ == '__main__':
settings = dotdict({
# 'dataPath' : '../../../JKcloud/DB_JK/DAGM2007_dataset',
'dataPath' : '../../DB_JK/PaintCode_dataset/',
'image_shape' : (104, 113, 1),
'generated_image_folder' : 'Generated_Images2/',
'feature_shape' : (32,32,1),
})
temp_folder = settings.dataPath + "PaintCode/"
if not os.path.exists( temp_folder ):
os.mkdir( temp_folder )
train_dir = temp_folder + "Train/"
if not os.path.exists(train_dir):
os.mkdir( train_dir )
test_dir = temp_folder + "Test/"
if not os.path.exists(test_dir):
os.mkdir(test_dir)
for i in range(10):
no_dir = train_dir + str(i)
if not os.path.exists(no_dir):
os.mkdir(no_dir)
no_dir = test_dir + str(i)
if not os.path.exists(no_dir):
os.mkdir(no_dir)
image_li = getImages(settings.dataPath, format='jpg')
height = image_li[0].shape[0]
width = image_li[0].shape[1]
print("height :", height, "width :", width)
temp_folder = "Cropped_original_images/"
if not os.path.exists(temp_folder):
os.mkdir(temp_folder)
cropedImg_li = []
dh = int(width/4)
for i, img in enumerate(image_li):
for k in range(4):
cropedImg = img[:, k*dh:(k+1)*dh]
cropedImg = cropedImg[4:-10, 11:-12]
cropedImg = cv2.resize(cropedImg, (settings.feature_shape[0], settings.feature_shape[0]), interpolation=cv2.INTER_AREA)
# cropedImg = cv2.resize(cropedImg, (settings.feature_shape[0], settings.feature_shape[1]), interpolation=cv2.INTER_CUBIC+cv2.INTER_LINEAR)
# cv2.imshow('ddd', cropedImg)
# cv2.waitKey(0) #아무키나 누르면 지나감 안에 값이 1이면 그냥 지나가지만 키를 눌렀을때 반응함
cropedImg_li.append(cropedImg)
cv2.imwrite(temp_folder + str(i) + "-" + str(4+k) + ".jpg", cropedImg)
if not os.path.exists(settings.generated_image_folder):
os.mkdir(settings.generated_image_folder)
# one -> eight
for i, img in enumerate(cropedImg_li):
vertical_flip_img = cv2.flip(img,1)
for k in range(0,4):
augmentedImg1 = cvRotateImg(img, 90*k)
augmentedImg2 = cvRotateImg(vertical_flip_img, 90*k)
cv2.imwrite(settings.generated_image_folder + str(i) + "-" + str(90*k) + ".jpg", augmentedImg1)
cv2.imwrite(settings.generated_image_folder + str(i) + "-flip-" + str(90*k) + ".jpg", augmentedImg2)
|
{"hexsha": "c289d5e3350925e05d7996f6907e5262870a0e73", "size": 4494, "ext": "py", "lang": "Python", "max_stars_repo_path": "PaintCode_classification/gen_data.py", "max_stars_repo_name": "sjk0709/PaintCode_classification", "max_stars_repo_head_hexsha": "0663f68592b7685dc1c1008f6433ae1d60f21dc4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PaintCode_classification/gen_data.py", "max_issues_repo_name": "sjk0709/PaintCode_classification", "max_issues_repo_head_hexsha": "0663f68592b7685dc1c1008f6433ae1d60f21dc4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PaintCode_classification/gen_data.py", "max_forks_repo_name": "sjk0709/PaintCode_classification", "max_forks_repo_head_hexsha": "0663f68592b7685dc1c1008f6433ae1d60f21dc4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3053435115, "max_line_length": 151, "alphanum_fraction": 0.5763239875, "include": true, "reason": "import numpy", "num_tokens": 1202}
|
"""The orbit solution class."""
import numpy as np
import scipy.optimize as sp_optimize
import opihiexarata.library as library
import opihiexarata.library.error as error
import opihiexarata.library.hint as hint
import opihiexarata.orbit as orbit
class OrbitSolution(hint.ExarataSolution):
"""This is the class which solves a record of observations to derive the
orbital parameters of asteroids or objects in general. A record of
observations must be provided.
Attributes
----------
semimajor_axis : float
The semimajor axis of the orbit solved, in AU.
semimajor_axis_error : float
The error on the semimajor axis of the orbit solved, in AU.
eccentricity : float
The eccentricity of the orbit solved.
eccentricity_error : float
The error on the eccentricity of the orbit solved.
inclination : float
The angle of inclination of the orbit solved, in degrees.
inclination_error : float
The error on the angle of inclination of the orbit solved, in degrees.
longitude_ascending_node : float
The longitude of the ascending node of the orbit solved, in degrees.
longitude_ascending_node_error : float
The error on the longitude of the ascending node of the orbit solved, in degrees.
argument_perihelion : float
The argument of perihelion of the orbit solved, in degrees.
argument_perihelion_error : float
The error on the argument of perihelion of the orbit solved, in degrees.
mean_anomaly : float
The mean anomaly of the orbit solved, in degrees.
mean_anomaly_error : float
The error on the mean anomaly of the orbit solved, in degrees.
true_anomaly : float
The true anomaly of the orbit solved, in degrees. This value is
calculated from the mean anomaly.
true_anomaly_error : float
The error on the true anomaly of the orbit solved, in degrees. This
value is calculated from the error on the mean anomaly.
modified_julian_date : float
The modified Julian date used by the engine to calculate the osculating
orbital elements.
"""
def __init__(
self, observation_record: list[str], solver_engine: hint.OrbitEngine
) -> None:
"""The initialization function. Provided the list of observations,
solves the orbit for the Keplarian orbits.
Additional representations of the orbits in different coordinate
frames are provided via computation. TODO
Parameters
----------
observation_record : list
A list of the standard MPC 80-column observation records. Each
element of the list should be a string representing the observation.
solver_engine : OrbitEngine
The engine which will be used to complete the orbital elements
from the observation record.
Returns
-------
None
"""
# Check that the solver engine is a valid submission, that is is an
# expected engine class.
if isinstance(solver_engine, library.engine.OrbitEngine):
raise error.EngineError(
"The orbit solver engine provided should be the engine class"
" itself, not an instance thereof."
)
elif issubclass(solver_engine, library.engine.OrbitEngine):
# It is fine, the user submitted a valid orbit engine.
pass
else:
raise error.EngineError(
"The provided orbit engine is not a valid engine which can be"
" used for orbit solutions."
)
# Derive the orbital elements using the proper vehicle function for
# the desired engine is that is to be used.
if issubclass(solver_engine, orbit.OrbfitOrbitDeterminerEngine):
# Solve using Orbfit.
raw_orbit_results = _vehicle_orbfit_orbit_determiner(
observation_record=observation_record
)
else:
# There is no vehicle function, the engine is not supported.
raise error.EngineError(
"The provided orbit engine `{eng}` is not supported, there is no"
" associated vehicle function for it.".format(eng=str(solver_engine))
)
# Sanity check on the dictionary-like return.
if not isinstance(raw_orbit_results, dict):
raise error.DevelopmentError(
"The results of the orbit engines should be a dictionary. The orbit"
" engine used, and the subsequent vehicle function: {engtype}".format(
engtype=solver_engine
)
)
else:
# Quick type checking; everything should be float or at the least
# float-convertable. This may be unneeded but it does not hurt.
orbit_results = {
keydex: float(valuedex)
for keydex, valuedex in raw_orbit_results.items()
}
# Extract the needed values from the results of the engine.
try:
# Semimajor
self.semimajor_axis = orbit_results["semimajor_axis"]
self.semimajor_axis_error = orbit_results["semimajor_axis_error"]
# Eccentricity
self.eccentricity = orbit_results["eccentricity"]
self.eccentricity_error = orbit_results["eccentricity_error"]
# Inclination
self.inclination = orbit_results["inclination"]
self.inclination_error = orbit_results["inclination_error"]
# Longitude
self.longitude_ascending_node = orbit_results["longitude_ascending_node"]
self.longitude_ascending_node_error = orbit_results[
"longitude_ascending_node_error"
]
# Perihelion
self.argument_perihelion = orbit_results["argument_perihelion"]
self.argument_perihelion_error = orbit_results["argument_perihelion_error"]
# Mean anomaly
self.mean_anomaly = orbit_results["mean_anomaly"]
self.mean_anomaly_error = orbit_results["mean_anomaly_error"]
# MJD
self.modified_julian_date = orbit_results["modified_julian_date"]
except KeyError:
raise error.EngineError(
"The engine results provided are insufficient for this orbit"
" solver. Either the engine cannot be used because it cannot provide"
" the needed results, or the vehicle function does not pull the"
" required results from the engine."
)
# Calculating the eccentric anomaly and error from the provided values.
(
eccentric_anomaly,
eccentric_anomaly_error,
) = self.__calculate_eccentric_anomaly()
self.eccentric_anomaly = eccentric_anomaly
self.eccentric_anomaly_error = eccentric_anomaly_error
# Calculating the true anomaly and error from the provided values.
true_anomaly, true_anomaly_error = self.__calculate_true_anomaly()
self.true_anomaly = true_anomaly
self.true_anomaly_error = true_anomaly_error
# All done.
return None
def __calculate_eccentric_anomaly(self) -> tuple[float, float]:
"""Calculating the eccentric anomaly and error from the mean anomaly.
Parameters
----------
None
Returns
-------
eccentric_anomaly : float
The eccentric anomaly as derived from the mean anomaly.
eccentric_anomaly_error : float
The eccentric anomaly error derived as an average from the upper
and lower ranges of the mean anomaly.
"""
# Needed orbital values.
eccentricity = self.eccentricity
mean_anomaly = self.mean_anomaly
mean_anomaly_error = self.mean_anomaly_error
# Calculating the eccentric anomaly.
eccentric_anomaly = _calculate_eccentric_anomaly(
mean_anomaly=mean_anomaly, eccentricity=eccentricity
)
# And the error using upper and lower bound method.
lower_eccentric_anomaly = _calculate_eccentric_anomaly(
mean_anomaly=mean_anomaly - mean_anomaly_error, eccentricity=eccentricity
)
upper_eccentric_anomaly = _calculate_eccentric_anomaly(
mean_anomaly=mean_anomaly + mean_anomaly_error, eccentricity=eccentricity
)
bounds_eccentric_anomaly = np.array(
[lower_eccentric_anomaly, upper_eccentric_anomaly], dtype=float
)
eccentric_anomaly_error = np.mean(
np.abs(bounds_eccentric_anomaly - eccentric_anomaly)
)
return eccentric_anomaly, eccentric_anomaly_error
def __calculate_true_anomaly(self) -> tuple[float, float]:
"""Calculating the true anomaly and error from the eccentric anomaly.
Parameters
----------
None
Returns
-------
true_anomaly : float
The true anomaly as derived from the mean anomaly.
true_anomaly_error : float
The true anomaly error derived as an average from the upper
and lower ranges of the eccentric anomaly.
"""
# Needed orbital values.
eccentricity = self.eccentricity
eccentric_anomaly = self.eccentric_anomaly
eccentric_anomaly_error = self.eccentric_anomaly_error
# Calculating the eccentric anomaly.
true_anomaly = _calculate_true_anomaly(
eccentric_anomaly=eccentric_anomaly, eccentricity=eccentricity
)
# And the error using upper and lower bound method.
lower_true_anomaly = _calculate_true_anomaly(
eccentric_anomaly=eccentric_anomaly - eccentric_anomaly_error,
eccentricity=eccentricity,
)
upper_true_anomaly = _calculate_true_anomaly(
eccentric_anomaly=eccentric_anomaly + eccentric_anomaly_error,
eccentricity=eccentricity,
)
bounds_true_anomaly = np.array(
[lower_true_anomaly, upper_true_anomaly], dtype=float
)
true_anomaly_error = np.mean(np.abs(bounds_true_anomaly - true_anomaly))
return true_anomaly, true_anomaly_error
def _calculate_eccentric_anomaly(mean_anomaly: float, eccentricity: float) -> float:
"""Calculate the eccentric anomaly from the mean anomaly and eccentricity
of an orbit. This is found iteratively using Newton's method.
Parameters
----------
mean_anomaly : float
The mean anomaly of the orbit, in degrees.
Returns
-------
eccentric_anomaly : float
The eccentric anomaly as derived from the mean anomaly, in degrees.
"""
# Converting first to radians.
radian_mean_anomaly = mean_anomaly * (np.pi / 180)
# The main equation to solve using the root finding algorithm; as derived
# from Kepler's equation.
def root_kepler_equation(ecc_ano=0, mean_ano=0, eccen=0):
return ecc_ano - eccen * np.sin(ecc_ano) - mean_ano
def root_kepler_equation_prime(ecc_ano=0, mean_ano=0, eccen=0):
return 1 - eccen * np.cos(ecc_ano)
# Initial guess. High eccentricities are better served by a different
# initial guess than the native one.
if eccentricity <= 0.7:
initial_guess = radian_mean_anomaly
else:
initial_guess = np.pi
# Using the root finding algorithm to find the eccentric anomaly.
root_results = sp_optimize.root_scalar(
f=lambda ec_an: root_kepler_equation(
ecc_ano=ec_an, mean_ano=radian_mean_anomaly, eccen=eccentricity
),
fprime=lambda ec_an: root_kepler_equation_prime(
ecc_ano=ec_an, mean_ano=radian_mean_anomaly, eccen=eccentricity
),
method="newton",
x0=initial_guess,
)
# Scipy gives a class back rather than just a tuple of values. Who knows
# why.
radian_eccentric_anomaly = root_results.root
# Converting back to degrees.
eccentric_anomaly = radian_eccentric_anomaly * (180 / np.pi)
return eccentric_anomaly
def _calculate_true_anomaly(eccentric_anomaly: float, eccentricity: float) -> float:
"""Calculate the true anomaly from the mean anomaly and eccentricity
of an orbit.
Parameters
----------
eccentric_anomaly : float
The eccentric anomaly of the orbit, in degrees.
Returns
-------
true_anomaly : float
The true anomaly as derived from the eccentric anomaly, in degrees.
"""
# Converting first to radians.
radian_eccentric_anomaly = eccentric_anomaly * (np.pi / 180)
# Using just the tangent version. There is no expectation that the
# eccentricity will be close to 1.
e_ratio = (1 + eccentricity) / (1 - eccentricity)
radian_true_anomaly = 2 * np.arctan(
np.sqrt(e_ratio) * np.tan(radian_eccentric_anomaly / 2)
)
# Converting back to degrees.
true_anomaly = radian_true_anomaly * (180 / np.pi)
return true_anomaly
def _vehicle_orbfit_orbit_determiner(observation_record: list[str]) -> dict:
"""This uses the Orbfit engine to calculate orbital elements from the
observation record. The results are then returned to be managed by
the main class.
Parameters
----------
observation_record : list
The MPC standard 80-column record for observations of the asteroid
by which the orbit shall be computed from.
Returns
-------
orbit_results : dict
The results of the orbit computation using the Orbfit engine. Namely,
this returns the 6 classical Kepler elements, using mean anamonly.
"""
# Creating the Orbfit class. It does an correct installation check. If
# the installation is wrong, it is mentioned. Catching it should it fail
# to add context as well as the stack trace should give the error
# information.
try:
orbfit = orbit.OrbfitOrbitDeterminerEngine()
except error.InstallError:
raise error.InstallError(
"The Orbfit engine is not properly installed; thus it cannot be used to"
" compute the orbital elements for this solution class."
)
# Solving for the orbit. This engine has a record-based solution function
# so just using it.
kepler_elements, kepler_error, mjd = orbfit.solve_orbit_via_record(
observation_record=observation_record
)
# Converting the the results from this engine to the standard output
# expected by the vehicle functions for orbit solving.
orbit_results = {
"semimajor_axis": kepler_elements["semimajor_axis"],
"semimajor_axis_error": kepler_error["semimajor_axis_error"],
"eccentricity": kepler_elements["eccentricity"],
"eccentricity_error": kepler_error["eccentricity_error"],
"inclination": kepler_elements["inclination"],
"inclination_error": kepler_error["inclination_error"],
"longitude_ascending_node": kepler_elements["longitude_ascending_node"],
"longitude_ascending_node_error": kepler_error[
"longitude_ascending_node_error"
],
"argument_perihelion": kepler_elements["argument_perihelion"],
"argument_perihelion_error": kepler_error["argument_perihelion_error"],
"mean_anomaly": kepler_elements["mean_anomaly"],
"mean_anomaly_error": kepler_error["mean_anomaly_error"],
"modified_julian_date": mjd,
}
# All done.
return orbit_results
|
{"hexsha": "26df490e6a1fa040c74f9251d2d9fe5adbbd4ece", "size": 15598, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/opihiexarata/orbit/solution.py", "max_stars_repo_name": "psmd-iberutaru/OpihiExarata", "max_stars_repo_head_hexsha": "f0b595d7712ec68c972a7261e6bacc66410ba8b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/opihiexarata/orbit/solution.py", "max_issues_repo_name": "psmd-iberutaru/OpihiExarata", "max_issues_repo_head_hexsha": "f0b595d7712ec68c972a7261e6bacc66410ba8b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-03-02T03:37:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T11:05:32.000Z", "max_forks_repo_path": "src/opihiexarata/orbit/solution.py", "max_forks_repo_name": "psmd-iberutaru/OpihiExarata", "max_forks_repo_head_hexsha": "f0b595d7712ec68c972a7261e6bacc66410ba8b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.832460733, "max_line_length": 89, "alphanum_fraction": 0.6666239261, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3382}
|
[STATEMENT]
lemma lran_empty[simp]:
"lran a l l = []"
"lran a l h = [] \<longleftrightarrow> h\<le>l"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lran a l l = [] &&& (lran a l h = []) = (h \<le> l)
[PROOF STEP]
by (subst lran.simps; auto)+
|
{"llama_tokens": 116, "file": "IMP2_lib_IMP2_Aux_Lemmas", "length": 1}
|
"""
abstract type AbstractMetricParams{T} end
Abstract type used to dispatch different geodesic problems.
"""
abstract type AbstractMetricParams{T} end
# contains the full metric components (this type needed for DiffGeoSymbolics)
abstract type AbstractMetric{T} <: AbstractMatrix{T} end
metric_params(m::AbstractMetric{T}) where {T} =
error("Not implemented for metric $(typeof(m))")
"""
geodesic_eq(m::AbstractMetricParams{T}, u, v)
geodesic_eq!(m::AbstractMetricParams{T}, u, v)
Calculate the acceleration components of the geodesic equation given a position `u`, a velocity `v`, and a metric `m`.
"""
geodesic_eq(m::AbstractMetricParams{T}, u, v) where {T} =
error("Not implemented for metric parameters $(typeof(m))")
geodesic_eq!(m::AbstractMetricParams{T}, u, v) where {T} =
error("Not implemented for metric parameters $(typeof(m))")
"""
constrain(m::AbstractMetricParams{T}, u, v; μ::T=0.0f0)
Give time component which would constrain a velocity vector `v` at position `x` to be a
geodesic with mass `μ`.
"""
constrain(m::AbstractMetricParams{T}, u, v; μ::T = 0.0) where {T} =
error("Not implemented for metric parameters $(typeof(m))")
"""
on_chart(m::AbstractMetricParams{T}, u)
Check if point `u` is a valid point for the metric described by `m`.
Returns false is `u` is a singularity.
"""
on_chart(m::AbstractMetricParams{T}, u) where {T} = !(sum(u) ≈ 0)
"""
inner_radius(m::AbstractMetricParams{T})
Return the innermost valid coordinate relative to the origin, for use in geodesic tracing.
This usually represents some property of the metric, e.g. event horizon radius in Kerr/Schwarzschild metrics, or
throat diameter in worm hole metrics.
"""
inner_radius(m::AbstractMetricParams{T}) where {T} = convert(T, 0.0)
"""
metric_type(m::AbstractMetricParams{T})
Return the [`AbstractMetric`](@ref) type associated with the metric parameters `m`.
"""
metric_type(m::AbstractMetricParams{T}) where {T} =
error("Not implemented for metric parameters $(typeof(m))")
"""
metric(m::AbstractMetricParams{T}, u)
Numerically evaluate the corresponding metric for [`AbstractMetricParams`](@ref), given parameter values `m`
and some point `u`.
"""
metric(m::AbstractMetricParams{T}, u) where {T} =
error("Not implemented for metric $(typeof(m))")
# do we actually want to support this?
# since if it's a symbolic matrix, you can subs other ways better?
#"""
# metric(m::AbstractMetric{T}, u)
#
#Evaluate the metric at a point `u`.
#"""
#metric(m::AbstractMetric{T}, u) where {T} = error("Not implemented for metric $(typeof(m))")
export AbstractMetricParams,
geodesic_eq, geodesic_eq!, constrain, on_chart, inner_radius, metric_type
|
{"hexsha": "83db162a9298d212973cc037331f9f8ed77cd345", "size": 2719, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/metric-params.jl", "max_stars_repo_name": "astro-group-bristol/GeodesicBase.jl", "max_stars_repo_head_hexsha": "0cb60aeba81a2fe21e363824ae3fe86dbb52a15d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/metric-params.jl", "max_issues_repo_name": "astro-group-bristol/GeodesicBase.jl", "max_issues_repo_head_hexsha": "0cb60aeba81a2fe21e363824ae3fe86dbb52a15d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-12T14:17:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-12T14:17:44.000Z", "max_forks_repo_path": "src/metric-params.jl", "max_forks_repo_name": "astro-group-bristol/GeodesicBase.jl", "max_forks_repo_head_hexsha": "0cb60aeba81a2fe21e363824ae3fe86dbb52a15d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.369047619, "max_line_length": 118, "alphanum_fraction": 0.714233174, "num_tokens": 695}
|
"""
Kindly install these libraries before executing this code:
1. numpy
2. matplotlib
3. scipy
"""
import numpy as np
import matplotlib.pyplot as plt
import cmath
import math
from numpy import random
from scipy.special import beta
from scipy import stats
import time
# if using a Jupyter notebook, kindly uncomment following line:
%matplotlib inline
def boxMuller():
sample = [50, 5000]
random_numbers = []
for i in sample:
iter = 50
total_time = 0
for count in range(0, iter):
time1 = time.time()
U1 = np.random.uniform(size = i)
U2 = np.random.uniform(size = i)
R = [-2*math.log(ui) for ui in U1]
V = [2*math.pi*ui for ui in U2]
Z = []
for idx in range(0, i):
Z.append(math.sqrt(R[idx]) * math.cos(V[idx]))
Z.append(math.sqrt(R[idx]) * math.sin(V[idx]))
time2 = time.time()
random_numbers.append(Z)
count += 1
total_time += time2 - time1
print("\n\n****************** Box-Muller Method ******************")
print("Size of sample\t= {}".format(len(Z)))
print("Time difference\t= {} sec".format(total_time/iter))
return random_numbers
def marsagliaAndBray():
sample = [50, 5000]
random_numbers = []
for i in sample:
iter = 50
total_time = 0
for count in range(0, iter):
X = []
U1 = []
U2 = []
counter = 0
time1 = time.time()
while len(X) != i:
u1 = np.random.random()
u2 = np.random.random()
counter += 1
u1 = 2*u1 - 1
u2 = 2*u2 - 1
x = u1*u1 + u2*u2
if(x > 1):
continue
X.append(x)
U1.append(u1)
U2.append(u2)
Y = [math.sqrt(-2*math.log(x)/x) for x in X]
Z = []
for idx in range(0, i):
Z.append(U1[idx]*Y[idx])
Z.append(U2[idx]*Y[idx])
time2 = time.time()
random_numbers.append(Z)
count += 1
total_time += time2 - time1
print("\n\n****************** Marsaglia and Bray Method ******************")
print("Size of sample\t= {}".format(len(Z)))
print("Time difference\t= {} sec".format(total_time/iter))
return random_numbers
def main():
print(" ---------------------- Solutions for sample from N(0, 1) ----------------------")
sample1 = boxMuller()
sample2 = marsagliaAndBray()
if __name__ == "__main__":
main()
|
{"hexsha": "f956182779aad5caf5a3c003695ada5b370d4272", "size": 2536, "ext": "py", "lang": "Python", "max_stars_repo_path": "Lab5/Submission Files/180123053_VishishtPriyadarshi_q2.py", "max_stars_repo_name": "vishishtpriyadarshi/Monte-Carlo-Simulation", "max_stars_repo_head_hexsha": "0e162bdecf774e06ec209914ff16bc31b0f8fc74", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Lab5/Submission Files/180123053_VishishtPriyadarshi_q2.py", "max_issues_repo_name": "vishishtpriyadarshi/Monte-Carlo-Simulation", "max_issues_repo_head_hexsha": "0e162bdecf774e06ec209914ff16bc31b0f8fc74", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Lab5/Submission Files/180123053_VishishtPriyadarshi_q2.py", "max_forks_repo_name": "vishishtpriyadarshi/Monte-Carlo-Simulation", "max_forks_repo_head_hexsha": "0e162bdecf774e06ec209914ff16bc31b0f8fc74", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.2456140351, "max_line_length": 92, "alphanum_fraction": 0.5106466877, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 685}
|
"""
rendering.py
--------------
Functions to convert trimesh objects to pyglet/opengl objects.
"""
import numpy as np
try:
import pyglet
pyglet.options['shadow_window'] = False
from pyglet import gl
# bring in mode enum
GL_LINES, GL_POINTS, GL_TRIANGLES = (
gl.GL_LINES,
gl.GL_POINTS,
gl.GL_TRIANGLES)
except BaseException:
# otherwise provide mode flags
# this is so we can unit test without pyglet
GL_POINTS, GL_LINES, GL_TRIANGLES = (0, 1, 4)
from . import util
def convert_to_vertexlist(geometry, **kwargs):
"""
Try to convert various geometry objects to the constructor
args for a pyglet indexed vertex list.
Parameters
------------
obj : Trimesh, Path2D, Path3D, (n,2) float, (n,3) float
Object to render
Returns
------------
args : tuple
Args to be passed to pyglet indexed vertex list
constructor.
"""
if util.is_instance_named(geometry, 'Trimesh'):
return mesh_to_vertexlist(geometry, **kwargs)
elif util.is_instance_named(geometry, 'Path'):
# works for Path3D and Path2D
# both of which inherit from Path
return path_to_vertexlist(geometry, **kwargs)
elif util.is_instance_named(geometry, 'PointCloud'):
# pointcloud objects contain colors
return points_to_vertexlist(geometry.vertices,
colors=geometry.colors,
**kwargs)
elif util.is_instance_named(geometry, 'ndarray'):
# (n,2) or (n,3) points
return points_to_vertexlist(geometry, **kwargs)
else:
raise ValueError('Geometry passed is not a viewable type!')
def mesh_to_vertexlist(mesh,
group=None,
smooth=True,
smooth_threshold=60000):
"""
Convert a Trimesh object to arguments for an
indexed vertex list constructor.
Parameters
-------------
mesh : trimesh.Trimesh
Mesh to be rendered
group : str
Rendering group for the vertex list
smooth : bool
Should we try to smooth shade the mesh
smooth_threshold : int
Maximum number of faces to smooth shade
Returns
--------------
args : (7,) tuple
Args for vertex list constructor
"""
if hasattr(mesh.visual, 'uv') and mesh.visual.uv is not None:
# if the mesh has texture defined pass it to pyglet
vertex_count = len(mesh.vertices)
normals = mesh.vertex_normals.reshape(-1).tolist()
faces = mesh.faces.reshape(-1).tolist()
vertices = mesh.vertices.reshape(-1).tolist()
# get the per- vertex UV coordinates
uv = mesh.visual.uv
# if someone passed (n, 3) UVR cut it off here
if uv.shape[1] > 2:
uv = uv[:, :2]
# texcoord as (2,) float
color_gl = ('t2f/static',
uv.astype(np.float64).reshape(-1).tolist())
elif smooth and len(mesh.faces) < smooth_threshold:
# if we have a small number of faces and colors defined
# smooth the mesh by merging vertices of faces below
# the threshold angle
mesh = mesh.smoothed()
vertex_count = len(mesh.vertices)
normals = mesh.vertex_normals.reshape(-1).tolist()
faces = mesh.faces.reshape(-1).tolist()
vertices = mesh.vertices.reshape(-1).tolist()
color_gl = colors_to_gl(mesh.visual.vertex_colors,
vertex_count)
else:
# we don't have textures or want to smooth so
# send a polygon soup of disconnected triangles to opengl
vertex_count = len(mesh.triangles) * 3
normals = np.tile(mesh.face_normals,
(1, 3)).reshape(-1).tolist()
vertices = mesh.triangles.reshape(-1).tolist()
faces = np.arange(vertex_count).tolist()
colors = np.tile(mesh.visual.face_colors,
(1, 3)).reshape((-1, 4))
color_gl = colors_to_gl(colors, vertex_count)
# create the ordered tuple for pyglet, use like:
# `batch.add_indexed(*args)`
args = (vertex_count, # number of vertices
GL_TRIANGLES, # mode
group, # group
faces, # indices
('v3f/static', vertices),
('n3f/static', normals),
color_gl)
return args
def path_to_vertexlist(path, group=None, colors=None, **kwargs):
"""
Convert a Path3D object to arguments for an
indexed vertex list constructor.
Parameters
-------------
path : trimesh.path.Path3D object
Mesh to be rendered
group : str
Rendering group for the vertex list
Returns
--------------
args : (7,) tuple
Args for vertex list constructor
"""
# avoid cache check inside tight loop
vertices = path.vertices
# get (n, 2, (2|3)) lines
lines = np.vstack([util.stack_lines(e.discrete(vertices))
for e in path.entities])
count = len(lines)
# stack zeros for 2D lines
if util.is_shape(vertices, (-1, 2)):
lines = lines.reshape((-1, 2))
lines = np.column_stack((lines, np.zeros(len(lines))))
# index for GL is one per point
index = np.arange(count).tolist()
args = (count, # number of lines
GL_LINES, # mode
group, # group
index, # indices
('v3f/static', lines.reshape(-1)),
colors_to_gl(colors, count=count)) # default colors
return args
def points_to_vertexlist(points,
colors=None,
group=None,
**kwargs):
"""
Convert a numpy array of 3D points to args for
a vertex list constructor.
Parameters
-------------
points : (n, 3) float
Points to be rendered
colors : (n, 3) or (n, 4) float
Colors for each point
group : str
Rendering group for the vertex list
Returns
--------------
args : (7,) tuple
Args for vertex list constructor
"""
points = np.asanyarray(points, dtype=np.float64)
if util.is_shape(points, (-1, 2)):
points = np.column_stack((points, np.zeros(len(points))))
elif not util.is_shape(points, (-1, 3)):
raise ValueError('Pointcloud must be (n,3)!')
index = np.arange(len(points)).tolist()
args = (len(points), # number of vertices
GL_POINTS, # mode
group, # group
index, # indices
('v3f/static', points.reshape(-1)),
colors_to_gl(colors, len(points)))
return args
def colors_to_gl(colors, count):
"""
Given a list of colors (or None) return a GL- acceptable list of colors
Parameters
------------
colors: (count, (3 or 4)) float
Input colors as an array
Returns
---------
colors_type : str
Color type
colors_gl : (count,) list
Colors to pass to pyglet
"""
colors = np.asanyarray(colors)
count = int(count)
if util.is_shape(colors, (count, (3, 4))):
# convert the numpy dtype code to an opengl one
colors_dtype = {'f': 'f',
'i': 'B',
'u': 'B'}[colors.dtype.kind]
# create the data type description string pyglet expects
colors_type = 'c' + str(colors.shape[1]) + colors_dtype + '/static'
# reshape the 2D array into a 1D one and then convert to a python list
colors = colors.reshape(-1).tolist()
else:
# case where colors are wrong shape, use a default color
colors = np.tile([.5, .10, .20], (count, 1)).reshape(-1).tolist()
colors_type = 'c3f/static'
return colors_type, colors
def material_to_texture(material):
"""
Convert a trimesh.visual.texture.Material object into
a pyglet- compatible texture object.
Parameters
--------------
material : trimesh.visual.texture.Material
Material to be converted
Returns
---------------
texture : pyglet.image.Texture
Texture loaded into pyglet form
"""
# try to extract a PIL image from material
if hasattr(material, 'image'):
img = material.image
else:
img = material.baseColorTexture
if img is None:
return None
# use a PNG export to exchange into pyglet
# probably a way to do this with a PIL converter
with util.BytesIO() as f:
# export PIL image as PNG
img.save(f, format='png')
f.seek(0)
# filename used for format guess
gl_image = pyglet.image.load(filename='.png', file=f)
# turn image into pyglet texture
texture = gl_image.get_texture()
return texture
def matrix_to_gl(matrix):
"""
Convert a numpy row- major homogenous transformation matrix
to a flat column- major GLfloat transformation.
Parameters
-------------
matrix : (4,4) float
Row- major homogenous transform
Returns
-------------
glmatrix : (16,) gl.GLfloat
Transform in pyglet format
"""
matrix = np.asanyarray(matrix, dtype=np.float64)
if matrix.shape != (4, 4):
raise ValueError('matrix must be (4,4)!')
# switch to column major and flatten to (16,)
column = matrix.T.flatten()
# convert to GLfloat
glmatrix = (gl.GLfloat * 16)(*column)
return glmatrix
def vector_to_gl(array, *args):
"""
Convert an array and an optional set of args into a
flat vector of gl.GLfloat
"""
array = np.array(array)
if len(args) > 0:
array = np.append(array, args)
vector = (gl.GLfloat * len(array))(*array)
return vector
def light_to_gl(light, transform, lightN):
"""
Convert trimesh.scene.lighting.Light objects into
args for gl.glLightFv calls
Parameters
--------------
light : trimesh.scene.lighting.Light
Light object to be converted to GL
transform : (4, 4) float
Transformation matrix of light
lightN : int
Result of gl.GL_LIGHT0, gl.GL_LIGHT1, etc
Returns
--------------
multiarg : [tuple]
List of args to pass to gl.glLightFv eg:
[gl.glLightfb(*a) for a in multiarg]
"""
# convert color to opengl
gl_color = vector_to_gl(light.color.astype(np.float64) / 255.0)
assert len(gl_color) == 4
# cartesian translation from matrix
gl_position = vector_to_gl(transform[:3, 3])
# create the different position and color arguments
args = [(lightN, gl.GL_POSITION, gl_position),
(lightN, gl.GL_SPECULAR, gl_color),
(lightN, gl.GL_DIFFUSE, gl_color),
(lightN, gl.GL_AMBIENT, gl_color)]
return args
|
{"hexsha": "5a5aba2baaf9f7bbc4d2dbdf3245cf7e1c3daf21", "size": 10778, "ext": "py", "lang": "Python", "max_stars_repo_path": "trimesh/rendering.py", "max_stars_repo_name": "LinJiarui/trimesh", "max_stars_repo_head_hexsha": "5f925bbab447e733d6f1ebf0956b202d18271ee1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-22T13:56:05.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-22T13:56:05.000Z", "max_issues_repo_path": "trimesh/rendering.py", "max_issues_repo_name": "LinJiarui/trimesh", "max_issues_repo_head_hexsha": "5f925bbab447e733d6f1ebf0956b202d18271ee1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trimesh/rendering.py", "max_forks_repo_name": "LinJiarui/trimesh", "max_forks_repo_head_hexsha": "5f925bbab447e733d6f1ebf0956b202d18271ee1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2086720867, "max_line_length": 78, "alphanum_fraction": 0.5883280757, "include": true, "reason": "import numpy", "num_tokens": 2592}
|
#Imports
import os, sys
import base64
import glob
import time, sched
import datetime
from datetime import timezone
from datetime import timedelta
from collections import OrderedDict
import numpy as np
import pandas as pd
import socket
import psycopg2
import subprocess
import pytz
import json
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from bokeh.io import curdoc, save, export_png # , output_file, save
from bokeh.models import (TextInput, ColumnDataSource, DateFormatter, RadioGroup,CheckboxButtonGroup,Paragraph, Button, TextAreaInput, Select,CheckboxGroup, RadioButtonGroup, DateFormatter,CheckboxGroup)
from bokeh.models.widgets.markups import Div
from bokeh.layouts import layout, column, row
from bokeh.models.widgets import Panel, Tabs, FileInput
from bokeh.models.widgets.tables import DataTable, TableColumn
from bokeh.plotting import figure
import logging
from astropy.time import TimezoneInfo
import astropy.units.si as u
from util import sky_calendar
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
sys.path.append(os.getcwd())
sys.path.append('./ECLAPI-8.0.12/lib')
import nightlog as nl
from layout import Layout
class Report(Layout):
def __init__(self):
Layout.__init__(self)
self.test = False
self.report_type = None
self.kp_zone = TimezoneInfo(utc_offset=-7*u.hour)
self.datefmt = DateFormatter(format="%m/%d/%Y %H:%M:%S")
self.timefmt = DateFormatter(format="%m/%d %H:%M")
# Figure out where the App is being run: KPNO or NERSC
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
if 'desi' in hostname:
self.location = 'kpno'
self.conn = psycopg2.connect(host="desi-db", port="5442", database="desi_dev", user="desi_reader", password="reader")
elif 'app' in hostname: #this is not true. Needs to change.
self.location = 'nersc'
self.conn = psycopg2.connect(host="db.replicator.dev-cattle.stable.spin.nersc.org", port="60042", database="desi_dev", user="desi_reader", password="reader")
else:
self.location = 'nersc'
print(os.environ['NL_DIR'])
self.nw_dir = os.environ['NW_DIR']
self.nl_dir = os.environ['NL_DIR']
self.intro_subtitle = Div(text="Connect to Night Log", css_classes=['subt-style'])
self.time_note = Div(text="<b> Note: </b> Enter all times as HH:MM (18:18 = 1818 = 6:18pm) in Kitt Peak local time. Either enter the time or hit the <b> Now </b> button if it just occured.", css_classes=['inst-style'])
self.exp_info = Div(text="Mandatory fields have an asterisk*.", css_classes=['inst-style'],width=500)
self.img_upinst = Div(text="Include images in the Night Log by uploading a png image from your local computer. Select file, write a comment and click Add", css_classes=['inst-style'], width=1000)
self.img_upinst2 = Div(text=" Choose image to include with comment: ", css_classes=['inst-style'])
self.img_upload = FileInput(accept=".png")
self.img_upload.on_change('value', self.upload_image)
self.img_upload_comments_os = FileInput(accept=".png")
self.img_upload_comments_os.on_change('value', self.upload_image_comments_os)
self.img_upload_comments_dqs = FileInput(accept=".png")
self.img_upload_comments_dqs.on_change('value', self.upload_image_comments_dqs)
self.img_upload_problems = FileInput(accept=".png")
self.img_upload_problems.on_change('value', self.upload_image_problems)
self.nl_file = None
self.milestone_time = None
self.plan_time = None
self.full_time = None
self.DESI_Log = None
self.save_telem_plots = False
self.buffer = Div(text=' ')
self.my_name = 'None'
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
def clear_input(self, items):
""" After submitting something to the log, this will clear the form.
"""
if isinstance(items, list):
for item in items:
item.value = ' '
else:
items.value = ' '
def get_exposure_list(self):
try:
current_exp = self.exp_select.value
dir_ = os.path.join(self.nw_dir,self.night)
exposures = []
for path, subdirs, files in os.walk(dir_):
for s in subdirs:
exposures.append(s)
x = list([str(int(e)) for e in list(exposures)])
x = np.sort(x)[::-1]
self.exp_select.options = list(x)
if current_exp in ['',' ',np.nan,None]:
self.exp_select.value = x[0]
else:
self.exp_select.value = current_exp
except:
self.exp_select.options = []
def update_nl_list(self):
days = [f for f in os.listdir(self.nl_dir) if os.path.isdir(os.path.join(self.nl_dir,f))]
days_ = []
for day in days:
try:
int(day)
days_.append(day)
except:
pass
init_nl_list = np.sort([day for day in days_])[::-1][0:10]
self.date_init.options = list(init_nl_list)
self.date_init.value = init_nl_list[0]
def select_exp(self, attr, old, new):
self.exp_enter.value = self.exp_select.value
def add_all_to_bad_list(self):
self.bad_all = True
self.exp_layout_1.children[11] = self.exp_btn
self.bad_exp_add()
self.exp_alert.text = 'The whole exposure {} has been added to the bad exposure list'.format(self.bad_exp_val)
self.clear_input([self.exp_time, self.exp_enter, self.exp_select, self.exp_comment])
def add_some_to_bad_list(self):
self.bad_all = False
self.exp_layout_1.children[11] = self.bad_layout_2
def get_nightsum(self):
ns_date = self.ns_date_input.value
ns = {}
ns_html = ''
for dir_, sdir, f in os.walk(self.nl_dir):
for x in f:
if 'NightSummary' in x:
date = dir_.split('/')[-1]
ns[date] = os.path.join(dir_,x)
try:
filen = ns[ns_date]
ns_html += open(filen).read()
self.ns_html.text = ns_html
except:
self.ns_html.text = 'Cannot find NightSummary for this date'
def ns_next_date(self):
current_date = datetime.datetime.strptime(self.ns_date_input.value,'%Y%m%d')
next_night = current_date + timedelta(days=1)
self.ns_date_input.value = next_night.strftime('%Y%m%d')
self.get_nightsum()
def ns_last_date(self):
current_date = datetime.datetime.strptime(self.ns_date_input.value,'%Y%m%d')
last_night = current_date - timedelta(days=1)
self.ns_date_input.value = last_night.strftime('%Y%m%d')
self.get_nightsum()
def get_time(self, time):
"""Returns strptime with utc. Takes time zone selection
"""
date = datetime.datetime.strptime(self.night,'%Y%m%d')
try:
b = datetime.datetime.strptime(time, '%H:%M')
except:
try:
b = datetime.datetime.strptime(time, '%H%M')
except:
try:
b = datetime.datetime.strptime(time, '%I%M%p')
except:
print(time)
print('need format %H%M, %H:%M, %H:%M%p')
t = datetime.time(hour=b.hour, minute=b.minute)
if t < datetime.time(hour=12,minute=0):
d = date + datetime.timedelta(days=1)
else:
d = date
tt = datetime.datetime.combine(d, t)
try:
return tt.strftime("%Y%m%dT%H:%M")
except:
return time
def get_strftime(self, time):
date = self.night
d = datetime.datetime.strptime(date, "%Y%m%d")
dt = datetime.datetime.combine(d,time)
return dt.strftime("%Y%m%dT%H:%M")
def get_night(self):
try:
date = datetime.datetime.strptime(self.date_init.value, '%Y%m%d')
except:
date = datetime.datetime.now().date()
self.night = date.strftime("%Y%m%d")
self.DESI_Log = nl.NightLog(self.night, self.location, self.logger)
self.logger.info('Obsday is {}'.format(self.night))
def _dec_to_hm(self,hours):
#dec in seconds
seconds = hours*3600
hour = seconds // 3600
minutes = (seconds % 3600) // 60
sec = seconds % 60
str_ = '{}:{}'.format(int(hours), int(minutes))
return str_
def _hm_to_dec(self,hm):
#hm is a str H:M
tt = datetime.datetime.strptime(hm,'%H:%M')
dt = tt - datetime.datetime.strptime('00:00','%H:%M')
seconds = dt.total_seconds()
dec = seconds/3600
return dec
def connect_log(self):
"""Connect to Existing Night Log with Input Date
"""
self.get_night()
if not os.path.exists(self.DESI_Log.obs_dir):
for dir_ in [self.DESI_Log.obs_dir, self.DESI_Log.nobs_dir, self.DESI_Log.image_dir]:
os.makedirs(dir_)
self.connect_txt.text = 'Connected to Night Log for {}'.format(self.night)
#Load appropriate layout for each observer
self.observer = self.obs_type.active #0=LO; 1=SO
if self.observer == 0:
self.title.text = 'DESI Nightly Intake - Lead Observer'
self.layout.tabs = [self.intro_tab, self.plan_tab, self.milestone_tab_0, self.exp_tab_0, self.prob_tab, self.weather_tab_0, self.check_tab, self.nl_tab_0, self.ns_tab]
self.time_tabs = [None, None, None, self.exp_time, self.prob_time, None, None, None]
self.connect_txt.text = 'Connected to Night Log for {}'.format(self.night)
self.report_type = 'LO'
elif self.observer == 1:
self.title.text = 'DESI Nightly Intake - Support Observer'
self.layout.tabs = [self.intro_tab, self.milestone_tab_1, self.exp_tab_1, self.prob_tab, self.weather_tab_1, self.nl_tab_1, self.ns_tab]
self.time_tabs = [None, None, self.exp_time, self.prob_time, None, None, None]
self.connect_txt.text = 'Connected to Night Log for {}'.format(self.night)
self.report_type = 'SO'
elif self.observer == 2:
self.title.text = 'DESI Nightly Intake - Non-Observer'
self.layout.tabs = [self.intro_tab, self.exp_tab_2, self.prob_tab_1, self.weather_tab_1, self.nl_tab_1, self.ns_tab]
self.time_tabs = [None, self.exp_time, self.prob_time, None, None, None]
self.connect_txt.text = 'Connected to Night Log for {}'.format(self.night)
self.report_type = 'NObs'
else:
self.connect_txt.text = 'Please identify if you are an observer'
self.report_type = 'NObs'
#Connec to NightLog
self.nl_file = self.DESI_Log.nightlog_html
self.nl_subtitle.text = "Current DESI Night Log: {}".format(self.nl_file)
meta_dict_file = self.DESI_Log._open_kpno_file_first(self.DESI_Log.meta_json)
if os.path.exists(meta_dict_file):
try:
meta_dict = json.load(open(meta_dict_file,'r'))
plan_txt_text="https://desi.lbl.gov/trac/wiki/DESIOperations/ObservingPlans/OpsPlan{}".format(self.night)
self.plan_txt.text = '<a href={}>Tonights Plan Here</a>'.format(plan_txt_text)
self.so_name_1.value = meta_dict['so_1_firstname']+' '+meta_dict['so_1_lastname']
self.so_name_2.value = meta_dict['so_2_firstname']+' '+meta_dict['so_2_lastname']
self.LO_1.value = meta_dict['LO_firstname_1']+' '+meta_dict['LO_lastname_1']
self.LO_2.value = meta_dict['LO_firstname_2']+' '+meta_dict['LO_lastname_2']
self.OA.value = meta_dict['OA_firstname']+' '+meta_dict['OA_lastname']
self.plots_start = meta_dict['dusk_10_deg']
self.plots_end = meta_dict['dawn_10_deg']
self.display_current_header()
except Exception as e:
self.connect_txt.text = 'Error with Meta Data File: {}'.format(e)
else:
self.init_layout.children[10] = self.update_layout
self.update_log_status = True
contributer_file = self.DESI_Log._open_kpno_file_first(self.DESI_Log.contributer_file)
if os.path.exists(contributer_file):
try:
cont_txt = ''
f = open(contributer_file, "r")
for line in f:
cont_txt += line
self.contributer_list.value = cont_txt
except Exception as e:
self.connect_txt.text = 'Error with Contributer File: {}'.format(e)
time_use_file = self.DESI_Log._open_kpno_file_first(self.DESI_Log.time_use)
if os.path.exists(time_use_file):
try:
df = pd.read_csv(time_use_file)
data = df.iloc[0]
self.obs_time.value = self._dec_to_hm(data['obs_time'])
self.test_time.value = self._dec_to_hm(data['test_time'])
self.inst_loss_time.value = self._dec_to_hm(data['inst_loss'])
self.weather_loss_time.value = self._dec_to_hm(data['weather_loss'])
self.tel_loss_time.value = self._dec_to_hm(data['tel_loss'])
self.total_time.text = 'Time Documented (hrs): {}'.format(self._dec_to_hm(data['total']))
self.full_time = (datetime.datetime.strptime(meta_dict['dawn_18_deg'], '%Y%m%dT%H:%M') - datetime.datetime.strptime(meta_dict['dusk_18_deg'], '%Y%m%dT%H:%M')).seconds/3600
self.full_time_text.text = 'Total time between 18 deg. twilights (hrs): {}'.format(self._dec_to_hm(self.full_time))
except Exception as e:
self.milestone_alert.text = 'Issue with Time Use Data: {}'.format(e)
self.current_nl()
self.get_exposure_list()
def nonobs_entry_exp(self):
self.my_name = str(self.nonobs_input_exp.value)
self.layout.tabs[1] = self.exp_tab_0
def nonobs_entry_prob(self):
self.my_name = str(self.nonobs_input_prob.value)
self.layout.tabs[2] = self.prob_tab
def add_observer_info(self):
""" Initialize Night Log with Input Date
"""
if self.update_log_status:
meta = OrderedDict()
meta['LO_firstname_1'], meta['LO_lastname_1'] = self.LO_1.value.split(' ')[0], ' '.join(self.LO_1.value.split(' ')[1:])
meta['LO_firstname_2'], meta['LO_lastname_2'] = self.LO_2.value.split(' ')[0], ' '.join(self.LO_2.value.split(' ')[1:])
meta['so_1_firstname'], meta['so_1_lastname'] = self.so_name_1.value.split(' ')[0], ' '.join(self.so_name_1.value.split(' ')[1:])
meta['so_2_firstname'], meta['so_2_lastname'] = self.so_name_2.value.split(' ')[0], ' '.join(self.so_name_2.value.split(' ')[1:])
meta['OA_firstname'], meta['OA_lastname'] = self.OA.value.split(' ')[0], ' '.join(self.OA.value.split(' ')[1:])
eph = sky_calendar(self.night)
meta['time_sunset'] = eph['sunset']
meta['time_sunrise'] = eph['sunrise']
meta['time_moonrise'] = eph['moonrise']
meta['time_moonset'] = eph['moonset']
meta['illumination'] = eph['illumination']
meta['dusk_10_deg'] = eph['dusk_ten']
meta['dusk_12_deg'] = eph['dusk_nautical']
meta['dusk_18_deg'] = eph['dusk_astronomical']
meta['dawn_18_deg'] = eph['dawn_astronomical']
meta['dawn_12_deg'] = eph['dawn_nautical']
meta['dawn_10_deg'] = eph['dawn_ten']
self.full_time = (datetime.datetime.strptime(meta['dawn_18_deg'], '%Y%m%dT%H:%M') - datetime.datetime.strptime(meta['dusk_18_deg'], '%Y%m%dT%H:%M')).seconds/3600
self.full_time_text.text = 'Total time between 18 deg. twilights (hrs): {}'.format(self._dec_to_hm(self.full_time))
self.plots_start = meta['dusk_10_deg']
self.plots_end = meta['dawn_10_deg']
self.DESI_Log.get_started_os(meta)
self.connect_txt.text = 'Night Log Observer Data is Updated'
self.DESI_Log.write_intro()
self.display_current_header()
self.update_log_status = False
self.intro_layout.children[9] = self.init_btn
else:
self.intro_layout.children[9] = self.update_layout
self.update_log_status = True
def display_current_header(self):
path = self.DESI_Log._open_kpno_file_first(self.DESI_Log.header_html)
nl_file = open(path, 'r')
intro = '<h2> NightLog Info: {}</h2>'.format(self.night)
for line in nl_file:
intro = intro + line + '\n'
self.intro_txt.text = intro
nl_file.closed
def current_nl(self):
#try:
now = datetime.datetime.now()
self.DESI_Log.finish_the_night()
path = self.DESI_Log.nightlog_html
nl_file = open(path,'r')
nl_txt = ''
for line in nl_file:
nl_txt += line
nl_txt += '<h3> All Exposures </h3>'
self.nl_text.text = nl_txt
nl_file.closed
self.nl_alert.text = 'Last Updated on this page: {}'.format(now)
self.nl_subtitle.text = "Current DESI Night Log: {}".format(path)
self.get_exp_list()
self.get_weather()
try:
self.make_telem_plots()
return True
except:
#print('Something wrong with making telemetry plots')
return True
#except Exception as e:
# self.logger.info('current_nl Exception: %s' % str(e))
# self.nl_alert.text = 'You are not connected to a Night Log'
# return False
def get_exp_list(self):
try:
exp_df = pd.read_sql_query(f"SELECT * FROM exposure WHERE night = '{self.night}'", self.conn)
if len(exp_df.date_obs) > 0:
time = exp_df.date_obs.dt.tz_convert('US/Arizona')
exp_df['date_obs'] = time
self.explist_source.data = exp_df[['date_obs','id','tileid','program','sequence','flavor','exptime','airmass','seeing']].sort_values(by='id',ascending=False)
exp_df = exp_df.sort_values(by='id')
exp_df.to_csv(self.DESI_Log.explist_file, index=False)
else:
self.exptable_alert.text = f'No exposures available for night {self.night}'
except Exception as e:
self.exptable_alert.text = 'Cannot connect to Exposure Data Base. {}'.format(e)
def get_weather(self):
if os.path.exists(self.DESI_Log.weather):
obs_df = pd.read_csv(self.DESI_Log.weather)
t = [datetime.datetime.strptime(tt, "%Y%m%dT%H:%M") for tt in obs_df['Time']]
obs_df['Time'] = t
self.weather_source.data = obs_df.sort_values(by='Time')
else:
pass
def get_telem_list(self, df, l, item):
list_ = []
for r in list(df[l]):
try:
list_.append(r[item])
except:
list_.append(None)
return list_
def make_telem_plots(self):
start = datetime.datetime.strptime(self.plots_start, "%Y%m%dT%H:%M")
start_utc = start.astimezone(tz=timezone.utc).strftime('%Y-%m-%d %H:%M:%S')
end = datetime.datetime.strptime(self.plots_end, "%Y%m%dT%H:%M")
end_utc = end.astimezone(tz=timezone.utc).strftime('%Y-%m-%d %H:%M:%S')
exp_df = pd.read_sql_query(f"SELECT * FROM exposure WHERE date_obs > '{start_utc}' AND date_obs < '{end_utc}'", self.conn) #night = '{self.night}'", self.conn)
telem_data = pd.DataFrame(columns =
['time','exp','mirror_temp','truss_temp','air_temp','temp','humidity','wind_speed','airmass','exptime','seeing','tput','skylevel'])
if len(exp_df) > 0:
exp_df.sort_values('date_obs',inplace=True)
telem_data.time = exp_df.date_obs.dt.tz_convert('US/Arizona')
telem_data.exp = exp_df.id
telem_data.mirror_temp = self.get_telem_list(exp_df, 'telescope','mirror_temp') #[r['mirror_temp'] for r in list(exp_df['telescope'])] #['mirror_temp']
telem_data.truss_temp = self.get_telem_list(exp_df, 'telescope','truss_temp') #[r['truss_temp'] for r in list(exp_df['telescope'])] #exp_df['telescope']['truss_temp']
telem_data.air_temp = self.get_telem_list(exp_df, 'telescope','air_temp')#[r['air_temp'] for r in list(exp_df['telescope'])] #['air_temp']
telem_data.temp = self.get_telem_list(exp_df, 'tower','temperature') #[r['temperature'] for r in list(exp_df['tower'])] #['temperature']
telem_data.humidity = self.get_telem_list(exp_df, 'tower','humidity') #[r['humidity'] for r in list(exp_df['tower'])] #['humidity']
telem_data.wind_speed = self.get_telem_list(exp_df, 'tower','wind_speed') #[r['wind_speed'] for r in list(exp_df['tower'])] #['wind_speed']
telem_data.airmass = exp_df.airmass
telem_data.exptime = exp_df.exptime
telem_data.seeing = exp_df.seeing
tput = []
for x in exp_df['etc']:
if x is not None:
tput.append(x['transp'])
else:
tput.append(None)
telem_data.tput = tput #exp_df['etc']['transp']
telem_data.skylevel = exp_df.skylevel
self.telem_source.data = telem_data
#export_png(self.bk_plots)
if self.save_telem_plots:
plt.style.use('ggplot')
plt.rcParams.update({'axes.labelsize': 'small'})
from matplotlib.pyplot import cm
color=iter(cm.tab10(np.linspace(0,1,8)))
fig = plt.figure(figsize=(10,15))
ax1 = fig.add_subplot(8,1,1)
ax1.scatter(exp_df.date_obs.dt.tz_convert('US/Arizona'), self.get_telem_list(exp_df,'telescope','mirror_temp'), s=10, label='mirror temp')
ax1.scatter(exp_df.date_obs.dt.tz_convert('US/Arizona'), self.get_telem_list(exp_df,'telescope','truss_temp'), s=10, label='truss temp')
ax1.scatter(exp_df.date_obs.dt.tz_convert('US/Arizona'), self.get_telem_list(exp_df,'telescope','air_temp'), s=10, label='air temp')
ax1.set_ylabel("Telescope Temperature (C)")
ax1.legend()
ax1.grid(True)
ax1.tick_params(labelbottom=False)
ax2 = fig.add_subplot(8,1,2, sharex = ax1)
c=next(color)
ax2.scatter(exp_df.date_obs.dt.tz_convert('US/Arizona'), self.get_telem_list(exp_df,'tower','humidity'), s=10, color=c, label='humidity')
ax2.set_ylabel("Humidity %")
ax2.grid(True)
ax2.tick_params(labelbottom=False)
ax3 = fig.add_subplot(8,1,3, sharex=ax1)
c=next(color)
ax3.scatter(exp_df.date_obs.dt.tz_convert('US/Arizona'), self.get_telem_list(exp_df,'tower','wind_speed'), s=10, color=c, label='wind speed')
ax3.set_ylabel("Wind Speed (mph)")
ax3.grid(True)
ax3.tick_params(labelbottom=False)
ax4 = fig.add_subplot(8,1,4, sharex=ax1)
c=next(color)
ax4.scatter(exp_df.date_obs.dt.tz_convert('US/Arizona'), exp_df.airmass, s=10, color=c, label='airmass')
ax4.set_ylabel("Airmass")
ax4.grid(True)
ax4.tick_params(labelbottom=False)
ax5 = fig.add_subplot(8,1,5, sharex=ax1)
c=next(color)
ax5.scatter(exp_df.date_obs.dt.tz_convert('US/Arizona'), exp_df.exptime, s=10, color=c, label='exptime')
ax5.set_ylabel("Exposure time (s)")
ax5.grid(True)
ax5.tick_params(labelbottom=False)
ax6 = fig.add_subplot(8,1,6,sharex=ax1)
c=next(color)
ax6.scatter(exp_df.date_obs.dt.tz_convert('US/Arizona'), exp_df.seeing, s=10, color=c, label='seeing')
ax6.set_ylabel("Seeing")
ax6.grid(True)
ax6.tick_params(labelbottom=False)
ax7 = fig.add_subplot(8,1,7,sharex=ax1)
c=next(color)
ax7.scatter(exp_df.date_obs.dt.tz_convert('US/Arizona'), tput, s=10, color=c, label='transparency')
ax7.set_ylabel("Transparency (%)")
ax7.grid(True)
ax7.tick_params(labelbottom=False)
ax8 = fig.add_subplot(8,1,8,sharex=ax1)
c=next(color)
ax8.scatter(exp_df.date_obs.dt.tz_convert('US/Arizona'), exp_df.skylevel, s=10, color=c, label='Sky Level')
ax8.set_ylabel("Sky level (AB/arcsec^2)")
ax8.grid(True)
ax8.set_xlabel("Local Time (MST)")
ax8.xaxis.set_major_formatter(mdates.DateFormatter('%m/%d %H:%M', tz=pytz.timezone("US/Arizona")))
ax8.tick_params(labelrotation=45)
fig.suptitle("Telemetry for obsday {}".format(self.night),fontsize=14)
plt.subplots_adjust(top=0.85)
fig.tight_layout()
plt.savefig(self.DESI_Log.telem_plots_file)
self.save_telem_plots = False
def exp_to_html(self):
exp_df = pd.read_csv(self.DESI_Log.explist_file)
exp_df = exp_df[['date_obs','id','tileid','program','sequence','flavor','exptime','airmass','seeing']].sort_values(by='id',ascending=False)
exp_df = exp_df.rename(columns={"date_obs": "Time", "id":
"Exp","tileid":'Tile','program':'Program','sequence':'Sequence','flavor':'Flavor','exptime':'Exptime','airmass':'Airmass','seeing':'Seeing'})
exp_html = exp_df.to_html()
return exp_html
def bad_exp_add(self):
exp = self.bad_exp_val
cams_dict = {0:'a',1:'b',2:'r',3:'z'}
if self.bad_all:
bad = True
cameras = None
elif self.bad_all == False:
bad = False
cameras = ''
for i, cams in enumerate([self.bad_cams_0, self.bad_cams_1, self.bad_cams_2, self.bad_cams_3, self.bad_cams_4, self.bad_cams_5, self.bad_cams_6, self.bad_cams_7, self.bad_cams_8, self.bad_cams_9]):
if len(cams.active) == 0:
pass
else:
for c in cams.active:
cameras += '{}{}'.format(cams_dict[int(c)],i)
self.exp_layout_1.children[11] = self.exp_btn
self.exp_alert.text = 'Part of the exposure {} has been added to the bad exposure list'.format(exp)
comment = self.bad_comment
data = {}
data['NIGHT'] = [self.night]
data['EXPID'] = [exp]
data['BAD'] = [bad]
data['BADCAMS'] = [cameras]
data['COMMENT'] = [comment]
#self.bad_alert.text = 'Submitted Bad Exposure {} @ {}'.format(exp, datetime.datetime.now().strftime('%H:%M.%S'))
self.bad_cams_0.active = []
self.bad_cams_1.active = []
self.bad_cams_2.active = []
self.bad_cams_3.active = []
self.bad_cams_4.active = []
self.bad_cams_5.active = []
self.bad_cams_6.active = []
self.bad_cams_7.active = []
self.bad_cams_8.active = []
self.bad_cams_9.active = []
self.clear_input([self.exp_time, self.exp_enter, self.exp_select, self.exp_comment])
self.DESI_Log.add_bad_exp(data)
def plan_add_new(self):
self.plan_time = None
self.plan_add()
def milestone_add_new(self):
self.milestone_time = None
self.milestone_add()
def plan_add(self):
if self.plan_time is None:
ts = datetime.datetime.now().strftime("%Y%m%dT%H:%M:%S")
else:
ts = self.plan_time
data = [ts, self.plan_input.value]
self.DESI_Log.add_input(data, 'plan')
self.plan_alert.text = 'Last item input: {}'.format(self.plan_input.value)
self.clear_input([self.plan_order, self.plan_input])
self.plan_time = None
def milestone_add(self):
if self.milestone_time is None:
ts = datetime.datetime.now().strftime("%Y%m%dT%H:%M:%S")
else:
ts = self.milestone_time
data = [ts, self.milestone_input.value, self.milestone_exp_start.value, self.milestone_exp_end.value, self.milestone_exp_excl.value]
self.DESI_Log.add_input(data,'milestone')
self.milestone_alert.text = 'Last Milestone Entered: {}'.format(self.milestone_input.value)
self.clear_input([self.milestone_input, self.milestone_exp_start, self.milestone_exp_end, self.milestone_exp_excl])
self.milestone_time = None
def prob_add(self):
"""Adds problem to nightlog
"""
note = ' '
#try:
if self.prob_time.value in [None, 'None'," ",""]:
note = 'Enter a time'
else:
img_name, img_data, preview = self.image_uploaded('problem')
if self.report_type == 'NObs':
my_name = self.my_name
else:
my_name = self.report_type
data = [my_name, self.get_time(self.prob_time.value.strip()), self.prob_input.value.strip(), self.prob_alarm.value.strip(),
self.prob_action.value.strip()]
self.DESI_Log.add_input(data, 'problem',img_name=img_name, img_data=img_data)
self.prob_alert.text = "Last Problem Input: '{}' at {}".format(self.prob_input.value.strip(), self.prob_time.value.strip())
self.clear_input([self.prob_time, self.prob_input, self.prob_alarm, self.prob_action])
#except Exception as e:
# self.prob_alert.text = "Problem with your Input: {} - {}".format(note, e)
def exp_add(self):
quality = None
if self.os_exp_option.active == 0: #Time
if self.exp_time.value not in [None, 'None'," ", ""]:
try:
time = self.get_time(self.exp_time.value.strip())
comment = self.exp_comment.value.strip()
exp = None
except Exception as e:
self.exp_alert.text = 'There is something wrong with your input @ {}: {}'.format(datetime.datetime.now().strftime('%H:%M'),e)
else:
self.exp_alert.text = 'Fill in the time'
elif self.os_exp_option.active == 1: #Exposure
if self.exp_option.active == 0:
try:
exp = int(float(self.exp_select.value))
except Exception as e:
self.exp_alert.text = "Problem with the Exposure you Selected @ {}: {}".format(datetime.datetime.now().strftime('%H:%M'), e)
elif self.exp_option.active ==1:
try:
exp = int(float(self.exp_enter.value.strip()))
except Exception as e:
self.exp_alert.text = "Problem with the Exposure you Entered @ {}: {}".format(datetime.datetime.now().strftime('%H:%M'), e)
comment = self.exp_comment.value.strip()
time = self.get_time(datetime.datetime.now().strftime("%H:%M"))
if self.report_type == 'SO':
quality = self.quality_list[self.quality_btns.active]
if self.report_type == 'NObs':
your_name = self.my_name
elif self.report_type in ['LO','SO']:
your_name = self.report_type
#try:
img_name, img_data, preview = self.image_uploaded('comment')
now = datetime.datetime.now().astimezone(tz=self.kp_zone).strftime("%H:%M")
data = [self.get_time(now), exp, quality, self.exp_comment.value.strip(), your_name]
self.DESI_Log.add_input(data, 'exp', img_name=img_name, img_data=img_data)
self.exp_alert.text = 'Last Input was made @ {}: {}'.format(datetime.datetime.now().strftime("%H:%M"),self.exp_comment.value)
#except Exception as e:
# self.exp_alert.text = 'Error with your Input @ {}: {}'.format(datetime.datetime.now().strftime('%H:%M'), e)
if quality == 'Bad':
self.exp_layout_1.children[11] = self.bad_layout_1
self.bad_exp_val = exp
self.bad_comment = self.exp_comment.value.strip()
else:
self.clear_input([self.exp_time, self.exp_enter, self.exp_comment])
def check_add(self):
"""add checklist time to Night Log
"""
complete = self.checklist.active
check_time = datetime.datetime.now().strftime("%Y%m%dT%H:%M")
if len(complete) == len(self.checklist.labels):
data = [self.report_type, check_time, self.check_comment.value]
self.DESI_Log.add_input(data, 'checklist')
self.check_alert.text = "Checklist last submitted at {}".format(check_time[-5:])
else:
self.check_alert.text = "Must complete all tasks before submitting checklist"
self.clear_input(self.check_comment)
self.checklist.active = []
def weather_add(self):
"""Adds table to Night Log
"""
now = datetime.datetime.now().astimezone(tz=self.kp_zone).strftime("%Y%m%dT%H:%M")
try:
self.make_telem_plots()
telem_df = pd.DataFrame(self.telem_source.data)
this_data = telem_df.iloc[-1]
desc = self.weather_desc.value
temp = self.get_latest_val(telem_df.temp) #.dropna())[-1] #list(telem_df)[np.isfinite(list(telem_df.temp))][-1] #this_data.temp
wind = self.get_latest_val(telem_df.wind_speed) #list(telem_df.wind_speed.dropna())[-1]
humidity = self.get_latest_val(telem_df.humidity) #list(telem_df.humidity.dropna())[-1] #this_data.humidity
seeing = self.get_latest_val(telem_df.seeing) #list(telem_df.seeing.dropna())[-1] #this_data.seeing
tput = self.get_latest_val(telem_df.tput) #list(telem_df.tput.dropna())[-1]
skylevel = self.get_latest_val(telem_df.skylevel) #list(telem_df.skylevel.dropna())[-1]
data = [now, desc, temp, wind, humidity, seeing, tput, skylevel]
except:
data = [now, self.weather_desc.value, None, None, None, None, None, None]
self.weather_alert.text = 'Not connected to the telemetry DB. Only weather description will be recorded.'
df = self.DESI_Log.add_input(data,'weather')
self.clear_input([self.weather_desc])
self.get_weather()
def get_latest_val(self, l):
try:
x = list(l.dropna())[-1]
except:
x = np.nan
return x
def image_uploaded(self, mode='comment'):
img_data = None
img_name = None
if mode == 'comment':
if self.exp_comment.value not in [None, ''] and hasattr(self, 'img_upload_comments_os') and self.img_upload_comments_os.filename not in [None,'','nan',np.nan]:
img_data = self.img_upload_comments_os.value.encode('utf-8')
input_name = os.path.splitext(str(self.img_upload_comments_os.filename))
img_name = input_name[0] + '_{}.'.format(self.location) + input_name[1]
self.img_upload_comments_os.filename = None
elif mode == 'problem':
if hasattr(self, 'img_upload_problems') and self.img_upload_problems.filename not in [None, '',np.nan, 'nan']:
img_data = self.img_upload_problems.value.encode('utf-8')
input_name = os.path.splitext(str(self.img_upload_problems.filename))
img_name = input_name[0] + '_{}.'.format(self.location) + input_name[1]
self.img_upload_problems.filename = None
self.image_location_on_server = f'http://desi-www.kpno.noao.edu:8090/{self.night}/images/{img_name}'
width=400
height=400 #http://desi-www.kpno.noao.edu:8090/nightlogs
preview = '<img src="%s" width=%s height=%s alt="Uploaded image %s">\n' % (self.image_location_on_server,str(width),str(height),img_name)
return img_name, img_data, preview
def plan_delete(self):
time = self.plan_time
self.DESI_Log.delete_item(time, 'plan')
self.plan_alert.text = 'Deleted item @ {}: {}'.format(datetime.datetime.now().strftime('%H:%M'),self.plan_input.value)
self.clear_input([self.plan_input, self.plan_order])
self.plan_time = None
def milestone_delete(self):
time = self.milestone_time
self.DESI_Log.delete_item(time, 'milestone')
self.milestone_alert.text = 'Deleted item @ {}: {}'.format(datetime.datetime.now().strftime('%H:%M'), self.milestone_input.value)
self.clear_input([self.milestone_input, self.milestone_load_num])
self.milestone_time = None
def progress_delete(self):
time = self.get_time(self.exp_time.value.strip())
self.DESI_Log.delete_item(time, 'progress', self.report_type)
self.exp_alert.text = 'Deleted item @ {}: {}'.format(datetime.datetime.now().strftime('%H:%M'), self.exp_comment.value)
self.clear_input([self.exp_time, self.exp_comment, self.exp_exposure_start])
def problem_delete(self):
time = self.get_time(self.prob_time.value.strip())
self.DESI_Log.delete_item(time, 'problem',self.report_type)
self.prob_alert.text = 'Deleted item @ {}: {}'.format(datetime.datetime.now().strftime('%H:%M'), self.prob_input.value)
self.clear_input([self.prob_time, self.prob_input, self.prob_alarm, self.prob_action])
def plan_load(self):
try:
b, item = self.DESI_Log.load_index(self.plan_order.value, 'plan')
if b:
self.plan_input.value = str(item['Objective'])
self.plan_time = item['Time']
else:
self.plan_alert.text = "That plan item doesn't exist yet. {}".format(item)
except Exception as e:
self.plan_alert.text = "Issue with loading that plan item: {}".format(e)
def milestone_load(self):
try:
b, item = self.DESI_Log.load_index(int(self.milestone_load_num.value), 'milestone')
if b:
self.milestone_input.value = str(item['Desc'])
self.milestone_exp_start.value = str(item['Exp_Start'])
self.milestone_exp_end.value = str(item['Exp_Stop'])
self.milestone_exp_excl.value = str(item['Exp_Excl'])
self.milestone_time = item['Time']
else:
self.milestone_alert.text = "That milestone index doesn't exist yet. {}".format(item)
except Exception as e:
self.milestone_alert.text = "Issue with loading that milestone: {}".format(e)
def exposure_load(self):
#Check if progress has been input with a given timestamp
try:
_exists, item = self.DESI_Log.load_timestamp(self.get_time(self.exp_time.value.strip()), self.report_type, 'exposure')
if not _exists:
self.exp_alert.text = 'This timestamp does not yet have an input from this user. {}'.format(item)
else:
self.exp_comment.value = str(item['Comment'])
if str(item['Exp_Start']) not in ['', ' ','nan']:
self.exp_enter.value = str(item['Exp_Start'])
#self.loaded_exposure = True
self.exp_option.active = 1
self.os_exp_option.active = 1
#self.exp_exposure_finish.value = str(item['Exp_End'])
except Exception as e:
self.exp_alert.text = "Issue with loading that exposure: {}".format(e)
def problem_load(self):
#Check if progress has been input with a given timestamp
try:
_exists, item = self.DESI_Log.load_timestamp(self.get_time(self.prob_time.value.strip()), self.report_type, 'problem')
if not _exists:
self.prob_alert.text = 'This timestamp does not yet have an input from this user. {}'.format(item)
else:
self.prob_input.value = str(item['Problem'])
self.prob_alarm.value = str(item['alarm_id'])
self.prob_action.value = str(item['action'])
except Exception as e:
self.prob_alert.text = "Issue with loading that problem: {}".format(e)
def add_contributer_list(self):
cont_list = self.contributer_list.value
self.DESI_Log.add_contributer_list(cont_list)
def add_time(self):
data = OrderedDict()
time_items = OrderedDict({'obs_time':self.obs_time,'test_time':self.test_time,'inst_loss':self.inst_loss_time,
'weather_loss':self.weather_loss_time,'tel_loss':self.tel_loss_time})
total = 0
for name, item in time_items.items():
try:
data[name] = float(item.value)
total += float(item.value)
except:
try:
dec = self._hm_to_dec(str(item.value))
data[name] = dec
total += float(dec)
except:
data[name] = 0
total += 0
data['18deg'] = self.full_time
data['total'] = total
self.total_time.text = 'Time Documented (hrs): {}'.format(str(self._dec_to_hm(total)))
df = pd.DataFrame(data, index=[0])
df.to_csv(self.DESI_Log.time_use, index=False)
def summary_add(self):
now = datetime.datetime.now().strftime("%H:%M")
half = self.summary_option.active
data = OrderedDict()
data['SUMMARY_{}'.format(half)] = self.summary_input.value
self.DESI_Log.add_summary(data)
self.milestone_alert.text = 'Summary Information Entered at {}: {}'.format(now, self.summary_input.value)
self.clear_input([self.summary_input])
def summary_load(self):
half = self.summary_option.active
f = self.DESI_Log.summary_file
if os.path.exists(f):
try:
df = pd.read_csv(f)
d = df.iloc[0]
self.summary_input.value = d['SUMMARY_{}'.format(half)]
except Exception as e:
print('Issue loading summary: {}'.format(e))
else:
self.milestone_alert.text = 'That summary does not yet exist'
def upload_image(self, attr, old, new):
self.logger.info(f'Local image file upload: {self.img_upload.filename}')
def upload_image_comments_os(self, attr, old, new):
self.logger.info(f'Local image file upload (OS comments): {self.img_upload_comments_os.filename}')
def upload_image_comments_other(self, attr, old, new):
self.logger.info(f'Local image file upload (Other comments): {self.img_upload_comments_other.filename}')
def upload_image_comments_dqs(self, attr, old, new):
self.logger.info(f'Local image file upload (Other comments): {self.img_upload_comments_dqs.filename}')
def upload_image_problems(self, attr, old, new):
self.logger.info(f'Local image file upload (Other comments): {self.img_upload_problems.filename}')
def time_is_now(self):
now = datetime.datetime.now().astimezone(tz=self.kp_zone).strftime("%H:%M")
tab = self.layout.active
time_input = self.time_tabs[tab]
try:
time_input.value = now
except:
return time_input
def nl_submit(self):
if not self.current_nl():
self.nl_text.text = 'You cannot submit a Night Log to the eLog until you have connected to an existing Night Log or initialized tonights Night Log'
else:
self.logger.info("Starting Nightlog Submission Process")
try:
from ECLAPI import ECLConnection, ECLEntry
except ImportError:
ECLConnection = None
self.nl_text.text = "Can't connect to eLog"
f = self.DESI_Log._open_kpno_file_first(self.DESI_Log.nightlog_html)
nl_file=open(f,'r')
lines = nl_file.readlines()
nl_html = ' '
for line in lines:
nl_html += line
e = ECLEntry('Synopsis_Night', text=nl_html, textile=True)
subject = 'Night Summary {}'.format(self.night)
e.addSubject(subject)
url = 'http://desi-www.kpno.noao.edu:8090/ECL/desi'
user = 'dos'
pw = 'dosuser'
#make Paul's plot
try:
os.system("{}/bin/plotnightobs -n {}".format(os.environ['SURVEYOPSDIR'],self.night))
except Exception as e:
self.logger.info('Issues with Pauls plot: {}'.format(e))
if self.test:
pass
else:
elconn = ECLConnection(url, user, pw)
response = elconn.post(e)
elconn.close()
if response[0] != 200:
raise Exception(response)
self.submit_text.text = "You cannot post to the eLog on this machine"
#Add bad exposures
try:
survey_dir = os.path.join(os.environ['NL_DIR'],'ops')
bad_filen = 'bad_exp_list.csv'
bad_path = os.path.join(survey_dir, bad_filen)
bad_df = pd.read_csv(bad_path)
new_bad = self.DESI_Log._combine_compare_csv_files(self.DESI_Log.bad_exp_list, bad=True)
bad_df = pd.concat([bad_df, new_bad])
bad_df = bad_df.drop_duplicates(subset=['EXPID'], keep='last')
bad_df = bad_df.astype({"NIGHT":int, "EXPID": int,"BAD":bool,"BADCAMS":str,"COMMENT":str})
bad_df.to_csv(bad_path,index=False)
err1 = os.system('svn update --non-interactive {}'.format(bad_path))
self.logger.info('SVN added bad exp list {}'.format(err1))
err2 = os.system('svn commit --non-interactive -m "autocommit from night summary submission" {}'.format(bad_path))
self.logger.info('SVN commited bad exp list {}'.format(err2))
except Exception as e:
self.logger.info('Cant post to the bad exp list: {}'.format(e))
self.save_telem_plots = True
self.current_nl()
if self.test:
self.email_nightsum(user_email = ["parfa30@gmail.com","parkerf@berkeley.edu"])
else:
self.email_nightsum(user_email = ["parfa30@gmail.com","satya.gontcho@gmail.com","desi-nightlog@desi.lbl.gov"])
self.submit_text.text = "Night Log posted to eLog and emailed to collaboration at {}".format(datetime.datetime.now().strftime("%Y%m%d%H:%M")) + '</br>'
def email_nightsum(self,user_email = None):
try:
self.make_telem_plots()
except:
self.logger.info("Something wrong with telem plots")
sender = "noreply-ecl@noao.edu"
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('html')
msg['Subject'] = "Night Summary %s" % self.date_init.value #mjd2iso(mjd)
msg['From'] = sender
if len(user_email) == 1:
msg['To'] = user_email[0]
else:
msg['To'] = ', '.join(user_email)
# Create the body of the message (a plain-text and an HTML version).
f = self.DESI_Log._open_kpno_file_first(self.DESI_Log.nightlog_html)
nl_file=open(f,'r')
lines = nl_file.readlines()
nl_html = ""
img_names = []
for line in lines:
nl_html += line
# Add exposures
if os.path.exists(self.DESI_Log.explist_file):
exp_list = self.exp_to_html()
nl_html += ("<h3 id='exposures'>Exposures</h3>")
for line in exp_list:
nl_html += line
nl_text = MIMEText(nl_html, 'html')
msg.attach(nl_text)
Html_file = open(os.path.join(self.DESI_Log.root_dir,'NightSummary{}.html'.format(self.night)),"w")
Html_file.write(nl_html)
# Add Paul's plot
try:
nightops = open(os.path.join(os.environ['DESINIGHTSTATS'],'nightstats{}.png'.format(self.night)),'rb').read()
msgImage = MIMEImage(nightops)
data_uri = base64.b64encode(nightops).decode('utf-8')
img_tag = '<img src="data:image/png;base64,%s" \>' % data_uri
msgImage.add_header('Content-Disposition', 'attachment; filename=nightstats{}.png'.format(self.night))
msg.attach(msgImage)
Html_file.write(img_tag)
except Exception as e:
self.logger.info('Problem attachign pauls plot: {}'.format(e))
# Add images
if os.path.exists(self.DESI_Log.telem_plots_file):
telemplot = open(self.DESI_Log.telem_plots_file, 'rb').read()
msgImage = MIMEImage(telemplot)
data_uri = base64.b64encode(telemplot).decode('utf-8')
img_tag = '<img src="data:image/png;base64,%s" \>' % data_uri
msgImage.add_header('Content-Disposition', 'attachment; filename=telem_plots_{}.png'.format(self.night))
msg.attach(msgImage)
Html_file.write(img_tag)
Html_file.close()
text = msg.as_string()
# Send the message via local SMTP server.
#yag = yagmail.SMTP(sender)
#yag.send("parfa30@gmail.com",nl_html,self.DESI_Log.telem_plots_file)
s = smtplib.SMTP('localhost')
s.sendmail(sender, user_email, text)
s.quit()
self.logger.info("Email sent")
|
{"hexsha": "fcd5770a3afb534ad08b737f2011c576f9e7470a", "size": 50147, "ext": "py", "lang": "Python", "max_stars_repo_path": "dni/report.py", "max_stars_repo_name": "ClairePpt/desilo", "max_stars_repo_head_hexsha": "a3f64012d4aa899ed43cebfca06460172d20487d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dni/report.py", "max_issues_repo_name": "ClairePpt/desilo", "max_issues_repo_head_hexsha": "a3f64012d4aa899ed43cebfca06460172d20487d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 39, "max_issues_repo_issues_event_min_datetime": "2020-06-29T18:59:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-09T21:46:47.000Z", "max_forks_repo_path": "dni/report.py", "max_forks_repo_name": "ClairePpt/desilo", "max_forks_repo_head_hexsha": "a3f64012d4aa899ed43cebfca06460172d20487d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-14T19:29:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-14T19:29:58.000Z", "avg_line_length": 46.0064220183, "max_line_length": 226, "alphanum_fraction": 0.5993977706, "include": true, "reason": "import numpy,import astropy,from astropy", "num_tokens": 11916}
|
[STATEMENT]
lemma Disj_commute: "H \<turnstile> B OR A \<Longrightarrow> H \<turnstile> A OR B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. H \<turnstile> B OR A \<Longrightarrow> H \<turnstile> A OR B
[PROOF STEP]
using DisjConj [of B A B] Ident [of B]
[PROOF STATE]
proof (prove)
using this:
B OR A IMP (B IMP B) IMP A OR B \<in> boolean_axioms
B IMP B \<in> boolean_axioms
goal (1 subgoal):
1. H \<turnstile> B OR A \<Longrightarrow> H \<turnstile> A OR B
[PROOF STEP]
by (metis Bool MP_same)
|
{"llama_tokens": 209, "file": "Goedel_HFSet_Semanticless_SyntaxN", "length": 2}
|
write_to_table <- function(conn, name, value, indices=c(), new=F) {
if (RSQLite::dbExistsTable(conn, name) && new) RSQLite::dbRemoveTable(conn, name)
if (!RSQLite::dbExistsTable(conn, name)) {
RSQLite::dbCreateTable(conn, name, value)
for (i in indices) {
RSQLite::dbExecute(conn, sprintf('DROP INDEX IF EXISTS %s_%s', name, i))
RSQLite::dbExecute(conn, sprintf('CREATE INDEX %s_%s ON %s (%s);', name, i, name, i))
}
}
RSQLite::dbAppendTable(conn, name, value)
}
tc_to_db <- function(conn, tc) {
message("\nSaving tokens to DB")
d = tc_to_json(tc)
suppressWarnings({
write_to_table(conn, 'tc', d, indices='doc_id')
})
}
doc_exists <- function(conn, doc_ids) {
if (RSQLite::dbExistsTable(conn, 'tc'))
in_db = RSQLite::dbGetQuery(conn, sprintf('select doc_id from tc where doc_id in (%s);', paste(doc_ids, collapse=',')))$doc_id
else
in_db = c()
!as.character(doc_ids) %in% as.character(in_db)
}
get_tc <- function(db_file, doc_ids=NULL) {
conn <- RSQLite::dbConnect(RSQLite::SQLite(), db_file)
if (is.null(doc_ids)) {
tc = RSQLite::dbReadTable(conn, 'tc')
} else {
tc = RSQLite::dbGetQuery(conn, sprintf('select * from tc where doc_id in (%s);', paste(doc_ids, collapse=',')))
}
tokens = lapply(tc$tokens_json, jsonlite::fromJSON)
for (i in 1:length(tokens)) tokens[[i]]$doc_id = tc$doc_id[i]
meta = lapply(tc$meta_json, jsonlite::fromJSON)
for (i in 1:length(meta)) meta[[i]]$doc_id = tc$doc_id[i]
RSQLite::dbDisconnect(conn)
corpustools::tokens_to_tcorpus(tokens = data.table::rbindlist(tokens, use.names=T, fill=T),
meta = data.table::rbindlist(meta, use.names=T, fill=T))
}
tc_db <- function(d, db_file='shinyBZpers.db', udpipe_cores=NULL) {
conn <- RSQLite::dbConnect(RSQLite::SQLite(), db_file)
to_do = doc_exists(conn, unique(d$id))
if (any(to_do)) {
message(sprintf('\nNeed to parse %s documents', sum(to_do)))
if (!is.null(udpipe_cores))
tc = corpustools::create_tcorpus(d[to_do,], udpipe_cores=udpipe_cores, doc_col='id', text_columns = c('title','text'), remember_spaces=T, udpipe_model='dutch-alpino')
else
tc = corpustools::create_tcorpus(d[to_do,], doc_col='id', text_columns = c('title','text'), remember_spaces=T, udpipe_model='dutch-alpino')
tc$meta$date = as.POSIXct(tc$meta$date)
tc$meta$date = as.character(tc$meta$date)
tc$delete_columns(c('xpos','feats'))
tc_to_db(conn, tc)
} else {
message('All articles have already been parsed')
}
RSQLite::dbDisconnect(conn)
return(NULL)
}
tc_to_json <- function(tc) {
.SD = NULL
## for some reason, the original code in the (identical) database preparation
## in shinyBZpers does not work here... Hurray R
tokens = data.table::as.data.table(tc$tokens)
tokens = tokens[,list(tokens_json=jsonlite::toJSON(as.data.frame(.SD))), by='doc_id']
meta = data.table::as.data.table(tc$meta)
meta = meta[,list(meta_json=jsonlite::toJSON(as.data.frame(.SD))), by='doc_id']
merge(tokens,meta,by='doc_id')
}
|
{"hexsha": "ad4cf5b636b13b73fe0ba4e73c92fa75f592348a", "size": 3088, "ext": "r", "lang": "R", "max_stars_repo_path": "R/lib_db.r", "max_stars_repo_name": "vanatteveldt/shinyBZtopics", "max_stars_repo_head_hexsha": "524cad31395d20c9d33ad92660a38522d70342a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R/lib_db.r", "max_issues_repo_name": "vanatteveldt/shinyBZtopics", "max_issues_repo_head_hexsha": "524cad31395d20c9d33ad92660a38522d70342a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-03-07T08:48:51.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-07T10:16:26.000Z", "max_forks_repo_path": "R/lib_db.r", "max_forks_repo_name": "vanatteveldt/shinyBZtopics", "max_forks_repo_head_hexsha": "524cad31395d20c9d33ad92660a38522d70342a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-04T12:54:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-04T12:54:56.000Z", "avg_line_length": 36.3294117647, "max_line_length": 172, "alphanum_fraction": 0.6606217617, "num_tokens": 920}
|
import socket
import argparse
import numpy as np
import logging
BUFF_SIZE = 1024
class Lakeshore240_Simulator:
def __init__(self, port, num_channels=8, sn="LSSIM"):
self.log = logging.getLogger()
self.port = port
self.sn = sn
self.modname = "SIM_MODULE"
self.num_channels = num_channels
self.channels = [ChannelSim(i+1, "Channel {}".format(i+1))
for i in range(self.num_channels)]
self.cmds = {
"*IDN?": self.get_idn,
"CRDG?": lambda x: self.get_reading(*x, unit='C'),
"FRDG?": lambda x: self.get_reading(*x, unit='F'),
"KRDG?": lambda x: self.get_reading(*x, unit='K'),
"SRDG?": lambda x: self.get_reading(*x, unit='S'),
"MODNAME": self.set_modname,
"MODNAME?": self.get_modname,
"INNAME": self.set_channel_name,
"INNAME?": self.get_channel_name,
"INTYPE": self.set_channel_intype,
"INTYPE?": self.get_channel_intype,
"SET_VALUE": self.set_channel_value
}
def set_modname(self, name):
self.modname = name
def get_modname(self):
return self.modname
def set_channel_value(self, chan, value):
chan_index = int(chan) - 1
if not 0 <= chan_index < self.num_channels:
self.log.warning(f"chan num must be between 1 and {self.num_channels}")
return
self.channels[chan_index].set_value(value)
def get_channel_intype(self, chan):
chan_index = int(chan) - 1
if not 0 <= chan_index < self.num_channels:
self.log.warning(f"chan num must be between 1 and {self.num_channels}")
return
return self.channels[chan_index].get_intype()
def set_channel_intype(self, chan, *args):
chan_index = int(chan) - 1
if not 0 <= chan_index < self.num_channels:
self.log.warning(f"chan num must be between 1 and {self.num_channels}")
return
args = map(int, args)
self.channels[chan_index].set_intype(*args)
def get_channel_name(self, chan):
if not 0 < int(chan) <= self.num_channels:
self.log.warning(f"chan num must be between 1 and {self.num_channels}")
return
return self.channels[int(chan)-1].name
def set_channel_name(self, chan, name):
if not 0 < int(chan) <= self.num_channels:
self.log.warning(f"chan num must be between 1 and {self.num_channels}")
return
self.channels[int(chan)-1].name = name
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Checks self.port plus the next ten to see if they're open
for p in range(self.port, self.port+10):
try:
self.log.info(f"Trying to listen on port {p}")
sock.bind(('', p))
break
except OSError as e:
if e.errno == 48:
self.log.warning(f"Address {p} is already in use")
else:
raise(e)
else:
print(f"Could not connect to ports in {range(self.port, self.port+5)}")
sock.listen(1)
while True:
self.log.info('waiting for a connection....')
conn, client_address = sock.accept()
self.log.info(f"Made connection with {client_address}")
with conn:
# Main data loop
while True:
data = conn.recv(BUFF_SIZE)
if not data:
self.log.info("Connection closed by client")
break
self.log.debug("Command: {}".format(data))
# Only takes first command in case multiple commands are s
cmds = data.decode().split(';')
for c in cmds:
if c.strip() == '':
continue
cmd_list = c.strip().split(' ')
if len(cmd_list) == 1:
cmd, args = cmd_list[0], []
else:
cmd, args = cmd_list[0], cmd_list[1].split(',')
self.log.debug(f"{cmd} {args}")
try:
cmd_fn = self.cmds.get(cmd)
if cmd_fn is None:
self.log.warning(f"Command {cmd} is not registered")
continue
resp = cmd_fn(*args)
except TypeError as e:
self.log.error(f"Command error: {e}")
continue
if resp is not None:
conn.send(resp.encode())
def get_idn(self):
return ','.join([
"Lakeshore",
"LSSIM_{}P".format(self.num_channels),
self.sn,
'v0.0.0'
])
def get_reading(self, channel_num, unit='S'):
chan_index = int(channel_num) - 1
if not 0 <= chan_index < self.num_channels:
self.log.warning(f"chan num must be between 1 and {self.num_channels}")
return
return self.channels[chan_index].get_reading(unit=unit)
class ChannelSim:
def __init__(self, channel_num, name):
self.log = logging.getLogger()
self.channel_num = channel_num
self.name = name
self.sensor_type = 1
self.autorange = 0
self.range = 0
self.current_reversal = 0
self.units = 3
self.enabled = 1
self.value = 100
def get_intype(self):
return ','.join([
str(self.sensor_type),
str(self.autorange),
str(self.range),
str(self.current_reversal),
str(self.units),
str(self.enabled),
])
def set_intype(self, sensor_type, autorange, rng, current_reversal, units, enabled):
if sensor_type in [1,2,3]:
self.log.debug(f"Setting sensor type to {sensor_type}")
self.sensor_type = sensor_type
if autorange in [0,1]:
self.autorange = autorange
if (self.sensor_type in [1,2] and rng in [0]) or \
(self.sensor_type == 3 and rng in range(9)):
self.range = rng
if self.sensor_type in [2,3] and current_reversal in [0,1]:
self.log.debug(f"Setting chan {self.channel_num} "
f"current_reversal to {current_reversal}")
self.current_reversal = current_reversal
if units in [1,2,3,4]:
self.log.debug(f"Setting chan {self.channel_num} units to {units}")
self.units = units
if enabled in [0,1]:
self.log.debug(f"Setting chan {self.channel_num} enabled to {enabled}")
self.enabled = enabled
def set_value(self, value):
self.log.debug(f"Setting value to {value}")
self.value = float(value)
def get_reading(self, unit='S'):
if not self.enabled:
rv = 0
else:
rv = np.random.normal(self.value)
return str(rv)
def make_parser(parser=None):
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=1094,
help="Port which simulator will wait for a connection."
"If taken, it will test several consecutive ports"
"until it finds one that is free.")
parser.add_argument('--num-channels', type=int, default=8,
help="Number of channels which the simulator will have.")
parser.add_argument('--sn', type=str, default='LS_SIM',
help="Serial number for the device")
parser.add_argument('--log-file', type=str, default=None,
help="File where logs are written")
parser.add_argument('--log-level',
choices=['debug', 'info', 'warning', 'error'],
default='info',
help="Minimum log level to be displayed")
parser.add_argument('-o', '--log-stdout', action="store_true",
help="Log to stdout")
return parser
if __name__ == '__main__':
parser = make_parser()
args = parser.parse_args()
log_level = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR
}[args.log_level]
format_string = '%(asctime)-15s [%(levelname)s]: %(message)s'
# logging.basicConfig(level=log_level, format=format_string)
formatter = logging.Formatter(format_string)
log = logging.getLogger()
log.setLevel(log_level)
if args.log_file is None or args.log_stdout:
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
log.addHandler(consoleHandler)
if args.log_file is not None:
fileHandler = logging.FileHandler(args.log_file)
fileHandler.setFormatter(formatter)
log.addHandler(fileHandler)
ls = Lakeshore240_Simulator(args.port,
num_channels=args.num_channels,
sn=args.sn)
ls.run()
|
{"hexsha": "f6fdd31a041cebd798d1c562b76dc9771df69594", "size": 9451, "ext": "py", "lang": "Python", "max_stars_repo_path": "simulators/lakeshore240/ls240_simulator.py", "max_stars_repo_name": "gdevenyi/socs", "max_stars_repo_head_hexsha": "2f94cbee0246d23a200afdf1dec8208f2c561c71", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-09-02T14:16:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T20:49:35.000Z", "max_issues_repo_path": "simulators/lakeshore240/ls240_simulator.py", "max_issues_repo_name": "gdevenyi/socs", "max_issues_repo_head_hexsha": "2f94cbee0246d23a200afdf1dec8208f2c561c71", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 183, "max_issues_repo_issues_event_min_datetime": "2019-06-04T20:38:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T18:45:17.000Z", "max_forks_repo_path": "simulators/lakeshore240/ls240_simulator.py", "max_forks_repo_name": "gdevenyi/socs", "max_forks_repo_head_hexsha": "2f94cbee0246d23a200afdf1dec8208f2c561c71", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-06-28T15:55:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-02T16:27:44.000Z", "avg_line_length": 33.3957597173, "max_line_length": 88, "alphanum_fraction": 0.5361337425, "include": true, "reason": "import numpy", "num_tokens": 2048}
|
import warnings
import numpy as np
import empca
from apogee.tools.path import change_dr
from apogee.tools import bitmask as bm
from sklearn.decomposition import PCA
from delfiSpec import util, specproc, specsim
from fpca import FPCA
# Ignore warnings
warnings.filterwarnings("ignore")
# Read APOGEE DR14 catalogue
change_dr('14')
apCat = util.ApogeeCat()
# Read M67 cluster APOGEE spectra
M67_DM_apogee, M67_GM_apogee = apCat.read_OCCAM_cluster()
# Perform APOGEE cuts: Giant members with Iron abundance within M67 limits
print(r'M67 GM FE/H limits: [{:.3f}, {:.3f}]'.format(np.min(M67_GM_apogee['FE_H']),
np.max(M67_GM_apogee['FE_H'])))
print(r'Our GM FE/H limits: [{:.3f}, {:.3f}]'.format(-0.15, 0.15))
apogee_cat_cut = apCat.apogee_cat[(apCat.apogee_cat['FE_H'] > -0.15) &
(apCat.apogee_cat['FE_H'] < 0.15) &
(apCat.apogee_cat['LOGG'] < 4) & (apCat.apogee_cat['LOGG'] > -1)]
# High Signal-to-Noise Ratio cut
indx = apogee_cat_cut['SNR'] > 200
apogee_cat_cut = apogee_cat_cut[indx]
print('APOGEE giants spectra after FE_H cut: {}'.format(len(apogee_cat_cut)))
# Make sure all abundances have "physical values"
abundances = ['FE_H', 'C_FE', 'N_FE', 'O_FE', 'NA_FE', 'MG_FE', 'AL_FE', 'SI_FE', 'S_FE',
'K_FE', 'CA_FE', 'TI_FE', 'V_FE', 'MN_FE', 'NI_FE']
for i in range(1, len(abundances)):
apogee_cat_cut = apogee_cat_cut[(apogee_cat_cut[abundances[i]] > -1) &
(apogee_cat_cut[abundances[i]] < 1)]
# Read APOGEE spectra for the above APOGEE cut
data_set = apCat.read_allStar_spectra(apogee_cat_cut)
# Mask bad pixels using APOGEE bitmask
badcombpixmask = bm.badpixmask()
pix_err = np.array([bm.apogee_pixmask_int("SIG_SKYLINE"), bm.apogee_pixmask_int("SIG_TELLURIC"),
bm.apogee_pixmask_int("PERSIST_HIGH"), bm.apogee_pixmask_int("PERSIST_MED"),
bm.apogee_pixmask_int("PERSIST_LOW")])
badcombpixmask += np.sum(2**pix_err)
data_set_specproc, data_set_specerr, data_set_specweight = specproc.process_spectra(spectra_info=data_set,
badcombpixmask=badcombpixmask)
# Mask the spectra based on APOGEE bitmask
data_set_specmasked = np.ma.masked_array(data_set_specproc, mask=(data_set_specweight==0))
# Remove spectra with more than 50% masked pixels
data_set_maskpixels = np.sum(data_set_specmasked.mask, axis=1)
data_set_specmasked = data_set_specmasked[data_set_maskpixels < 50/100*7214]
data_set_specerr = data_set_specerr[data_set_maskpixels < 50/100*7214]
# Generate an APOGEE data cut that corresponds exactly to the data
apogee_cat_cut = apogee_cat_cut[data_set_maskpixels < 50/100*7214]
# Simulate theoretical spectra analogous to the training set using its APOGEE parameters
data_set_sim = specsim.sim_spectra(apogee_cat_cut)
# --------------- FPCA on APOGEE spectra data ---------------
# Choose 50 random basis functions
rand_ind = np.random.random_integers(0, len(data_set_sim)-1, 50)
basis = data_set_sim[rand_ind]
# FPCA of masked APOGEE spectra
fpca_train_dat = FPCA(data_set_specmasked, 50, phi=basis, xerr=data_set_specerr)
fpca_train_dat.alpha_regression()
fpca_train_dat.solve_eigenproblem()
np.savetxt('data/FPCA_apogee/fpca_dat_eigenvectors_psi_t.dat', fpca_train_dat.psi_cap_t.real[:, ::-1])
np.savetxt('data/FPCA_apogee/fpca_dat_eigenvalues_k_cap.dat', fpca_train_dat.perc_var[::-1])
np.savetxt('data/FPCA_apogee/fpca_dat_spec_mean.dat', fpca_train_dat.sample_mu)
# --------------- EMPCA on APOGEE spectral data ---------------
# EMPCA of masked APOGEE spectra after mean subtraction
data_set_data_mean = data_set_specmasked.mean(axis=0, keepdims=True)
data_set_spec_meansub = data_set_specproc - data_set_data_mean
model_empca = empca.empca(data_set_spec_meansub, data_set_weight, deltR2=2e-07, niter=10, nvec=50)
var_r2 = np.zeros(50)
for nvec in np.arange(1, 51):
var_r2[nvec-1] = model_empca.R2(nvec=nvec)
np.savetxt('data/EMPCA_apogee/empca_dat_eigenvectors_PCs.dat', model_empca.eigvec)
np.savetxt('data/EMPCA_apogee/empca_dat_eigenvalues_R2.dat', var_r2)
# ---------------- PCA on PSM simulated spectra ----------------
# PCA of simulated spectra
data_set_sim_mean = np.mean(data_set_sim, axis=0)
data_set_sim = data_set_sim - data_set_sim_mean
pca_sim = PCA(n_components=50)
pca_sim.fit(data_set_sim)
np.savetxt('data/PCA_sim/pca_sim_eigenvectors_PCs.dat', pca_sim.components_)
np.savetxt('data/PCA_sim/pca_sim_eigenvalues_percvar.dat', pca_sim.explained_variance_ratio_*100)
# Correlations
eigenfun_dat = fpca_train_dat.psi_cap_t.real[:, ::-1]
eigenvec_dat = model_empca.eigvec
eigenvec_sim = pca_sim.components_
# First 10 PCs
fpca_corr = np.zeros(10)
empca_corr = np.zeros(10)
for i in range(10):
fpca_corr[i] = 100*np.abs(np.corrcoef(eigenfun_dat[:, i], eigenvec_sim[i])[0][1])
empca_corr[i] = 100*np.abs(np.corrcoef(eigenvec_dat[i], eigenvec_sim[i])[0][1])
np.savetxt('data/fpca_correlations.dat', fpca_corr)
np.savetxt('data/empca_correlations.dat', empca_corr)
|
{"hexsha": "18d061e76abd1af7309cf629a3a4cee482535d8d", "size": 5127, "ext": "py", "lang": "Python", "max_stars_repo_path": "apogee_fpca.py", "max_stars_repo_name": "aaryapatil/specdims", "max_stars_repo_head_hexsha": "acfc644aa06b13c8b34cde984e207b42e948af41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-09-23T12:08:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T05:20:58.000Z", "max_issues_repo_path": "apogee_fpca.py", "max_issues_repo_name": "aaryapatil/specdims", "max_issues_repo_head_hexsha": "acfc644aa06b13c8b34cde984e207b42e948af41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "apogee_fpca.py", "max_forks_repo_name": "aaryapatil/specdims", "max_forks_repo_head_hexsha": "acfc644aa06b13c8b34cde984e207b42e948af41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.3719008264, "max_line_length": 107, "alphanum_fraction": 0.7193290423, "include": true, "reason": "import numpy", "num_tokens": 1525}
|
"""
Segmentation Viewer
This class allows you to view examples from the Fusion Gallery segmentation dataset.
Additionally you can generate an html view for all the files.
"""
import argparse
from pathlib import Path
import numpy as np
import igl
import meshplot as mp
import math
class SegmentationViewer:
def __init__(self, meshes_folder):
self.meshes_folder = Path(meshes_folder)
assert self.meshes_folder.exists(), "The meshes folder does not exist"
bit8_colors = np.array([
[235, 85, 79], # ExtrudeSide
[220, 198, 73], # ExtrudeEnd
[113, 227, 76], # CutSide
[0, 226, 124], # CutEnd
[23, 213, 221], # Fillet
[92, 99, 222], # Chamfer
[176, 57, 223], # RevolveSide
[238, 61, 178] # RevolveEnd
]
)
self.color_map = bit8_colors / 255.0
def obj_pathname(self, file_stem):
obj_pathname = self.meshes_folder / (file_stem + ".obj")
return obj_pathname
def seg_pathname(self, file_stem):
seg_pathname = self.meshes_folder / (file_stem + ".seg")
return seg_pathname
def load_mesh(self, obj_file):
v, f = igl.read_triangle_mesh(str(obj_file))
return v, f
def load_data(self, file_stem):
obj_pathname = self.obj_pathname(file_stem)
if not obj_pathname.exists():
print(f"Waring! -- The file {obj_pathname} does not exist")
return None, None, None
v, f = self.load_mesh(obj_pathname)
seg_pathname = self.seg_pathname(file_stem)
if not seg_pathname.exists():
print(f"Warning! -- The file {seg_pathname} does not exist")
return None, None, None
tris_to_segments = np.loadtxt(seg_pathname, dtype=np.uint64)
assert f.shape[0] == tris_to_segments.size, "Expect a segment index for every facet"
facet_colors = self.color_map[tris_to_segments]
return v, f, facet_colors
def view_segmentation(self, file_stem):
v, f, facet_colors = self.load_data(file_stem)
if v is None:
print(f"The data for {file_stem} could not be loaded")
return
p = mp.plot(v, f, c=facet_colors)
def save_html(self, file_stem, output_folder):
v, f, facet_colors = self.load_data(file_stem)
if v is None:
print(f"The data for {file_stem} could not be loaded. Skipping")
return
output_pathname = output_folder / (file_stem + ".html")
mp.website()
p = mp.plot(v, f, c=facet_colors)
p.save(str(output_pathname))
def create_html(meshes_folder, output_folder):
viewer = SegmentationViewer(meshes_folder)
obj_files = [ f for f in meshes_folder.glob("**/*.obj")]
for file in obj_files:
viewer.save_html(file.stem, output_folder)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--meshes_folder", type=str, required=True, help="Path segmentation meshes folder")
parser.add_argument("--output_folder", type=str, required=True, help="The folder where you would like to create images")
args = parser.parse_args()
meshes_folder = Path(args.meshes_folder)
if not meshes_folder.exists():
print(f"The folder {meshes_folder} was not found")
output_folder = Path(args.output_folder)
if not output_folder.exists():
output_folder.mkdir()
if not output_folder.exists():
print(f"Failed to create the output folder {output_folder}")
# Now create the images for all the files
create_html(meshes_folder, output_folder)
print("Completed segmentation_viewer.py")
|
{"hexsha": "1865e0f373d7052422cec106adc7278856efb65f", "size": 3868, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/segmentation_viewer/segmentation_viewer.py", "max_stars_repo_name": "AutodeskAILab/Fusion360GalleryDataset", "max_stars_repo_head_hexsha": "b6424f4c06535c426b59839a9355d49bd1d8a364", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": 193, "max_stars_repo_stars_event_min_datetime": "2020-10-16T12:48:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T22:01:16.000Z", "max_issues_repo_path": "tools/segmentation_viewer/segmentation_viewer.py", "max_issues_repo_name": "AutodeskAILab/Fusion360GalleryDataset", "max_issues_repo_head_hexsha": "b6424f4c06535c426b59839a9355d49bd1d8a364", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2020-11-04T15:24:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T15:35:59.000Z", "max_forks_repo_path": "tools/segmentation_viewer/segmentation_viewer.py", "max_forks_repo_name": "AutodeskAILab/Fusion360GalleryDataset", "max_forks_repo_head_hexsha": "b6424f4c06535c426b59839a9355d49bd1d8a364", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2020-10-17T23:55:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T01:56:52.000Z", "avg_line_length": 36.4905660377, "max_line_length": 125, "alphanum_fraction": 0.6181489142, "include": true, "reason": "import numpy", "num_tokens": 914}
|
#include <iostream>
#include <cmath>
#include <Eigen/Dense>
#include "traji.hpp"
using namespace std;
using namespace std::placeholders;
namespace traji
{
TFloat PathPosition::to_t(const Trajectory &traj) const
{
return traj._timestamps[segment] + (traj._timestamps[segment+1] - traj._timestamps[segment]) * fraction;
}
PathPosition PathPosition::from_t(const Trajectory &traj, TFloat t)
{
auto segment_iter = upper_bound(traj._timestamps.begin(), traj._timestamps.end(), t);
auto segment_idx = distance(traj._timestamps.begin(), segment_iter) - 1;
segment_idx = min((long)traj.size() - 1L, max(0L, segment_idx)); // clip the segment index
auto t0 = traj._timestamps[segment_idx], t1 = traj._timestamps[segment_idx+1];
return PathPosition(segment_idx, (t - t0) / (t1 - t0));
}
// this method is analog to PathPosition::from_s(path, s_list)
vector<PathPosition> PathPosition::from_t(const Trajectory &path, const std::vector<TFloat> &t_list)
{
vector<PathPosition> result;
result.reserve(t_list.size());
TFloat cur_t = 0;
size_t cur_idx = 1; // index of first point that has s larger than cur_s
for (auto t : t_list)
if (t < cur_t)
result.push_back(PathPosition::from_t(path, t));
else
{
while (t > path._timestamps[cur_idx] && cur_idx < path.size())
cur_idx++;
auto t0 = path._timestamps[cur_idx-1], t1 = path._timestamps[cur_idx];
result.push_back(PathPosition(cur_idx - 1, (t - t0) / (t1 - t0)));
cur_t = t;
}
return result;
}
Vector2 Trajectory::solve_velocity(size_t segment_idx) const
{
assert (segment_idx >= 0 && segment_idx <= _line.size() - 2);
TFloat dt = _timestamps[segment_idx+1] - _timestamps[segment_idx];
TFloat vx = (_line[segment_idx+1].get<0>() - _line[segment_idx].get<0>()) / dt;
TFloat vy = (_line[segment_idx+1].get<1>() - _line[segment_idx].get<1>()) / dt;
return Vector2(vx, vy);
}
Vector2 Trajectory::velocity_at(const PathPosition &pos, bool interpolate) const
{
if (!interpolate ||
(pos.segment == 0 && pos.fraction < 0.5) || // assume constant speed at both ends
(pos.segment == _line.size() - 2 && pos.fraction >= 0.5))
return solve_velocity(pos.segment);
else
{
// linear interpolate based on fraction (position)
Vector2 vel1, vel2;
TFloat w1, w2;
if (pos.fraction < 0.5)
{
vel1 = solve_velocity(pos.segment - 1);
w1 = 0.5 - pos.fraction;
vel2 = solve_velocity(pos.segment);
w2 = 0.5 + pos.fraction;
}
else
{
vel1 = solve_velocity(pos.segment);
w1 = 1.5 - pos.fraction;
vel2 = solve_velocity(pos.segment + 1);
w2 = pos.fraction - 0.5;
}
return vel1.array() * w1 + vel2.array() * w2;
}
}
Vector2 Trajectory::solve_acceleration(size_t point_idx) const
{
assert (point_idx >= 1 && point_idx <= _line.size() - 2);
TFloat dt = (_timestamps[point_idx+1] - _timestamps[point_idx-1]) / 2;
Vector2 vel1 = solve_velocity(point_idx - 1);
Vector2 vel2 = solve_velocity(point_idx);
return (vel2.array() - vel1.array()) / dt;
}
Vector2 Trajectory::acceleration_at(const PathPosition &pos, bool interpolate) const
{
if (_line.size() <= 2) return Vector2(0, 0); // no acceleration for one segment
if (pos.segment == 0)
return solve_acceleration(1);
else if (pos.segment == _line.size() - 2)
return solve_acceleration(pos.segment);
else if (interpolate)
{
return solve_acceleration(pos.segment).array() * (1 - pos.fraction) +
solve_acceleration(pos.segment + 1).array() * pos.fraction;
}
else
{
if (pos.fraction < 0.5)
return solve_acceleration(pos.segment);
else
return solve_acceleration(pos.segment+1);
}
}
Trajectory Trajectory::resample_at(const std::vector<TFloat> &t_list) const
{
vector<PathPosition> pos_list = PathPosition::from_t(*this, t_list);
vector<Point> plist; plist.reserve(t_list.size());
transform(pos_list.begin(), pos_list.end(),
std::back_inserter(plist), std::bind(&Path::point_at, this, _1));
return Trajectory(Path(move(plist)), t_list);
}
QuinticPolyTrajectory::QuinticPolyTrajectory(
TFloat T, const Vector3 &x0, const Vector3 &xT,
const Vector3 &y0, const Vector3 &yT, bool relax_sx
) : _x_coeffs(), _y_coeffs(), _T(T)
{
assert (T > 0);
TFloat T3 = T * T * T;
Vector3 c012x; c012x << x0(0), x0(1), x0(2) / 2;
Vector3 c012y; c012y << y0(0), y0(1), y0(2) / 2;
Matrix3 M1, M2;
M1 << 1, T, T*T,
0, 1, 2*T,
0, 0, 2;
M2 << T3, T3*T, T3*T*T,
3*T*T, 4*T3, 5*T3*T,
6*T, 12*T*T, 20*T3;
Matrix3 M2inv = M2.inverse();
auto c345y = M2inv * (yT - M1 * c012y);
_y_coeffs << c345y(2), c345y(1), c345y(0), c012y(2), c012y(1), c012y(0);
if (relax_sx)
{
auto c34x = M2.block(1,0,2,2).inverse() * (xT.tail(2) - M1.block(1,1,2,2) * c012x.tail(2));
_x_coeffs << 0, c34x(1), c34x(0), c012x(2), c012x(1), c012x(0);
}
else
{
auto c345x = M2inv * (xT - M1 * c012x);
_x_coeffs << c345x(2), c345x(1), c345x(0), c012x(2), c012x(1), c012x(0);
}
}
Point QuinticPolyTrajectory::point_at(TFloat t) const
{
TFloat x = _x_coeffs(0), y = _y_coeffs(0);
for (size_t i = 1; i < 6; i++)
{
x = x * t + _x_coeffs(i);
y = y * t + _y_coeffs(i);
}
return Point(x, y);
}
Vector2 QuinticPolyTrajectory::velocity_at(TFloat t) const
{
TFloat x = 5 * _x_coeffs(0), y = 5 * _y_coeffs(0);
for (size_t i = 1; i < 5; i++)
{
x = x * t + (5-i) * _x_coeffs(i);
y = y * t + (5-i) * _y_coeffs(i);
}
return Vector2(x, y);
}
Vector2 QuinticPolyTrajectory::acceleration_at(TFloat t) const
{
TFloat x = 20 * _x_coeffs(0), y = 20 * _y_coeffs(0);
for (size_t i = 1; i < 4; i++)
{
x = x * t + (5-i) * (4-i) * _x_coeffs(i);
y = y * t + (5-i) * (4-i) * _y_coeffs(i);
}
return Vector2(x, y);
}
TFloat QuinticPolyTrajectory::tangent_at(TFloat t) const
{
auto vel = velocity_at(t);
return atan2(vel(1), vel(0));
}
Trajectory QuinticPolyTrajectory::periodize(TFloat interval) const
{
auto t = VectorX::LinSpaced((size_t)ceil(_T / interval) + 1, 0, _T).array();
ArrayX x(t.rows()), y(t.rows());
x.setConstant(_x_coeffs(0));
y.setConstant(_y_coeffs(0));
for (size_t i = 1; i < 6; i++)
{
x = x * t + _x_coeffs(i);
y = y * t + _y_coeffs(i);
}
Trajectory result;
result._line.reserve(t.rows());
result._timestamps.reserve(t.rows());
for (size_t i = 0; i < t.rows(); i++)
{
result._line.emplace_back(x(i), y(i));
result._timestamps.emplace_back(t(i));
}
result.update_distance();
return result;
}
Point CTRATrajectory::point_at(TFloat t) const
{
TFloat x = _init_state(0), y = _init_state(1), th = _init_state(2);
TFloat v = _init_state(3), a = _init_state(4), w = _init_state(5);
TFloat nth = th + w * t;
TFloat nv = v + a * t;
TFloat nx, ny;
if (w == 0)
{
nx = x + (nv + v)/2 * cos(th) * t;
ny = y + (nv + v)/2 * sin(th) * t;
}
else
{
nx = x + ( nv*w*sin(nth) + a*cos(nth) - v*w*sin(th) - a*cos(th)) / (w*w);
ny = y + (-nv*w*cos(nth) + a*sin(nth) + v*w*cos(th) - a*sin(th)) / (w*w);
}
return Point(nx, ny);
}
Vector2 CTRATrajectory::velocity_at(TFloat t) const
{
TFloat nth = tangent_at(t);
TFloat nv = _init_state(3) + _init_state(4) * t;
return Vector2(nv * cos(nth), nv * sin(nth));
}
Trajectory CTRATrajectory::periodize(TFloat interval) const
{
auto t = VectorX::LinSpaced((size_t)ceil(_T / interval) + 1, 0, _T).array();
TFloat x = _init_state(0), y = _init_state(1), th = _init_state(2);
TFloat v = _init_state(3), a = _init_state(4), w = _init_state(5);
auto nth = th + w * t;
auto nv = v + a * t;
VectorX nx, ny;
if (w == 0)
{
nx = x + (nv + v)/2 * cos(th) * t;
ny = y + (nv + v)/2 * sin(th) * t;
}
else
{
nx = x + ( nv*w*sin(nth) + a*cos(nth) - v*w*sin(th) - a*cos(th)) / (w*w);
ny = y + (-nv*w*cos(nth) + a*sin(nth) + v*w*cos(th) - a*sin(th)) / (w*w);
}
Trajectory result;
result._line.reserve(t.rows());
result._timestamps.reserve(t.rows());
for (size_t i = 0; i < t.rows(); i++)
{
result._line.emplace_back(nx(i), ny(i));
result._timestamps.emplace_back(t(i));
}
result.update_distance();
return result;
}
TFloat tdistance(const Trajectory &lhs, const Trajectory &rhs)
{
// TODO: this implementation is not correct, we need 3D segment distance
TFloat min_dist = numeric_limits<TFloat>::max();
vector<PathPosition> rhs_pos = PathPosition::from_t(lhs, rhs.timestamps());
for (int i = 0; i < rhs_pos.size(); i++)
{
TFloat dist = distance(lhs.point_at(rhs_pos[i]), rhs.vertices()[i]);
min_dist = min(min_dist, dist);
}
vector<PathPosition> lhs_pos = PathPosition::from_t(rhs, lhs.timestamps());
for (int i = 0; i < lhs_pos.size(); i++)
{
TFloat dist = distance(rhs.point_at(lhs_pos[i]), lhs.vertices()[i]);
min_dist = min(min_dist, dist);
}
return min_dist;
}
}
namespace std
{
string to_string(const traji::Trajectory &value)
{
if (value.size() == 0)
return string("[]");
stringstream ss;
ss << '[' << to_string(value.vertices()[0]) << " @ " << value.timestamps()[0];
for (size_t i = 1; i < value.size(); i++)
ss << ", " << to_string(value.vertices()[i]) << " @ " << value.timestamps()[i];
ss << ']';
return ss.str();
}
string to_string(const traji::QuinticPolyTrajectory &value)
{
stringstream ss;
ss << "(T=" << value.T() << ", x_coeffs [" << value.x_coeffs()(0);
for (size_t i = 1; i < 6; i++)
ss << ", " << value.x_coeffs()(i);
ss << "], y_coeffs [" << value.y_coeffs()(0);
for (size_t i = 1; i < 6; i++)
ss << ", " << value.y_coeffs()(i);
ss << "])";
return ss.str();
}
} // namespace std
|
{"hexsha": "ab792c4674c6397f13b45f6363d730c661a00bb9", "size": 11440, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Trajectory.cpp", "max_stars_repo_name": "cmpute/traji", "max_stars_repo_head_hexsha": "192141dfdea26012a17cb0b5ddb99c0d085de0dc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2022-01-17T12:03:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T06:01:18.000Z", "max_issues_repo_path": "src/Trajectory.cpp", "max_issues_repo_name": "cmpute/traji", "max_issues_repo_head_hexsha": "192141dfdea26012a17cb0b5ddb99c0d085de0dc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Trajectory.cpp", "max_forks_repo_name": "cmpute/traji", "max_forks_repo_head_hexsha": "192141dfdea26012a17cb0b5ddb99c0d085de0dc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-12-22T05:02:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T05:02:17.000Z", "avg_line_length": 33.5483870968, "max_line_length": 112, "alphanum_fraction": 0.5196678322, "num_tokens": 3320}
|
"""
ReducedSpaceEvaluator{T} <: AbstractNLPEvaluator
Evaluator working in the reduced space corresponding to the
control variable `u`. Once a new point `u` is passed to the evaluator,
the user needs to call the method `update!` to find the corresponding
state `x(u)` satisfying the equilibrium equation `g(x(u), u) = 0`.
Taking as input a given `AbstractFormulation`, the reduced evaluator
builds the bounds corresponding to the control `u` and the state `x`,
and initiate an `AutoDiffFactory` tailored to the problem. The reduced evaluator
could be instantiated on the main memory, or on a specific device (currently,
only CUDA is supported).
"""
mutable struct ReducedSpaceEvaluator{T} <: AbstractNLPEvaluator
model::AbstractFormulation
x::AbstractVector{T}
p::AbstractVector{T}
λ::AbstractVector{T}
x_min::AbstractVector{T}
x_max::AbstractVector{T}
u_min::AbstractVector{T}
u_max::AbstractVector{T}
constraints::Array{Function, 1}
g_min::AbstractVector{T}
g_max::AbstractVector{T}
buffer::AbstractNetworkBuffer
autodiff::AutoDiffFactory
∇gᵗ::AbstractMatrix
linear_solver::LinearSolvers.AbstractLinearSolver
ε_tol::Float64
end
function ReducedSpaceEvaluator(
model, x, u, p;
constraints=Function[state_constraint, power_constraints],
linear_solver=DirectSolver(),
ε_tol=1e-12,
verbose_level=VERBOSE_LEVEL_NONE,
)
# First, build up a network buffer
buffer = get(model, PhysicalState())
# Initiate adjoint
λ = similar(x)
# Build up AutoDiff factory
jx, ju, adjoint_f = init_autodiff_factory(model, buffer)
ad = AutoDiffFactory(jx, ju, adjoint_f)
u_min, u_max = bounds(model, Control())
x_min, x_max = bounds(model, State())
MT = model.AT
g_min = MT{eltype(x), 1}()
g_max = MT{eltype(x), 1}()
for cons in constraints
cb, cu = bounds(model, cons)
append!(g_min, cb)
append!(g_max, cu)
end
return ReducedSpaceEvaluator(model, x, p, λ, x_min, x_max, u_min, u_max,
constraints, g_min, g_max,
buffer,
ad, jx.J, linear_solver, ε_tol)
end
function ReducedSpaceEvaluator(
datafile;
device=CPU(),
options...
)
# Load problem.
pf = PS.PowerNetwork(datafile)
polar = PolarForm(pf, device)
x0 = initial(polar, State())
p = initial(polar, Parameters())
uk = initial(polar, Control())
return ReducedSpaceEvaluator(
polar, x0, uk, p; options...
)
end
n_variables(nlp::ReducedSpaceEvaluator) = length(nlp.u_min)
n_constraints(nlp::ReducedSpaceEvaluator) = length(nlp.g_min)
initial(nlp::ReducedSpaceEvaluator) = initial(nlp.model, Control())
bounds(nlp::ReducedSpaceEvaluator, ::Variables) = nlp.u_min, nlp.u_max
bounds(nlp::ReducedSpaceEvaluator, ::Constraints) = nlp.g_min, nlp.g_max
function update!(nlp::ReducedSpaceEvaluator, u; verbose_level=0)
x₀ = nlp.x
jac_x = nlp.autodiff.Jgₓ
# Transfer x, u, p into the network cache
transfer!(nlp.model, nlp.buffer, nlp.x, u, nlp.p)
# Get corresponding point on the manifold
conv = powerflow(nlp.model, jac_x, nlp.buffer, tol=nlp.ε_tol;
solver=nlp.linear_solver, verbose_level=verbose_level)
if !conv.has_converged
@warn("Newton-Raphson algorithm failed to converge ($(conv.norm_residuals))")
return conv
end
∇gₓ = nlp.autodiff.Jgₓ.J
nlp.∇gᵗ = LinearSolvers.get_transpose(nlp.linear_solver, ∇gₓ)
# Switch preconditioner to transpose mode
if isa(nlp.linear_solver, LinearSolvers.AbstractIterativeLinearSolver)
LinearSolvers.update!(nlp.linear_solver, nlp.∇gᵗ)
end
# Update value of nlp.x with new network state
get!(nlp.model, State(), nlp.x, nlp.buffer)
# Refresh value of the active power of the generators
refresh!(nlp.model, PS.Generator(), PS.ActivePower(), nlp.buffer)
return conv
end
function objective(nlp::ReducedSpaceEvaluator, u)
# Take as input the current cache, updated previously in `update!`.
cost = cost_production(nlp.model, nlp.buffer.pg)
# TODO: determine if we should include λ' * g(x, u), even if ≈ 0
return cost
end
# compute inplace reduced gradient (g = ∇fᵤ + (∇gᵤ')*λₖ)
# equivalent to: g = ∇fᵤ - (∇gᵤ')*λₖ_neg
# (take λₖ_neg to avoid computing an intermediate array)
function _reduced_gradient!(g, ∇fᵤ, ∇gᵤ, λₖ_neg)
g .= ∇fᵤ
mul!(g, transpose(∇gᵤ), λₖ_neg, -1.0, 1.0)
end
function gradient!(nlp::ReducedSpaceEvaluator, g, u)
buffer = nlp.buffer
xₖ = nlp.x
∇gₓ = nlp.autodiff.Jgₓ.J
# Evaluate Jacobian of power flow equation on current u
∇gᵤ = jacobian(nlp.model, nlp.autodiff.Jgᵤ, buffer)
# Evaluate adjoint of cost function and update inplace AdjointStackObjective
∂cost(nlp.model, nlp.autodiff.∇f, buffer)
∇fₓ, ∇fᵤ = nlp.autodiff.∇f.∇fₓ, nlp.autodiff.∇f.∇fᵤ
# Update (negative) adjoint
λₖ_neg = nlp.λ
LinearSolvers.ldiv!(nlp.linear_solver, λₖ_neg, nlp.∇gᵗ, ∇fₓ)
_reduced_gradient!(g, ∇fᵤ, ∇gᵤ, λₖ_neg)
return nothing
end
function constraint!(nlp::ReducedSpaceEvaluator, g, u)
xₖ = nlp.x
ϕ = nlp.buffer
# First: state constraint
mf = 1
mt = 0
for cons in nlp.constraints
m_ = size_constraint(nlp.model, cons)
mt += m_
cons_ = @view(g[mf:mt])
cons(nlp.model, cons_, ϕ)
mf += m_
end
end
function jacobian_structure!(nlp::ReducedSpaceEvaluator, rows, cols)
m, n = n_constraints(nlp), n_variables(nlp)
idx = 1
for c in 1:m #number of constraints
for i in 1:n # number of variables
rows[idx] = c ; cols[idx] = i
idx += 1
end
end
end
function jacobian!(nlp::ReducedSpaceEvaluator, jac, u)
model = nlp.model
xₖ = nlp.x
∇gₓ = nlp.autodiff.Jgₓ.J
∇gᵤ = nlp.autodiff.Jgᵤ.J
nₓ = length(xₖ)
MT = nlp.model.AT
μ = similar(nlp.λ)
∂obj = nlp.autodiff.∇f
cnt = 1
for cons in nlp.constraints
mc_ = size_constraint(nlp.model, cons)
for i_cons in 1:mc_
jacobian(model, cons, i_cons, ∂obj, nlp.buffer)
jx, ju = ∂obj.∇fₓ, ∂obj.∇fᵤ
# Get adjoint
LinearSolvers.ldiv!(nlp.linear_solver, μ, nlp.∇gᵗ, jx)
jac[cnt, :] .= (ju .- ∇gᵤ' * μ)
cnt += 1
end
end
end
function jtprod!(nlp::ReducedSpaceEvaluator, cons, jv, u, v; start=1)
model = nlp.model
xₖ = nlp.x
∇gₓ = nlp.autodiff.Jgₓ.J
∇gᵤ = nlp.autodiff.Jgᵤ.J
nₓ = length(xₖ)
μ = nlp.λ
∂obj = nlp.autodiff.∇f
# Get adjoint
jtprod(model, cons, ∂obj, nlp.buffer, v)
jvx, jvu = ∂obj.∇fₓ, ∂obj.∇fᵤ
# jv .+= (ju .- ∇gᵤ' * μ)
LinearSolvers.ldiv!(nlp.linear_solver, μ, nlp.∇gᵗ, jvx)
jv .+= jvu
mul!(jv, transpose(∇gᵤ), μ, -1.0, 1.0)
end
function jtprod!(nlp::ReducedSpaceEvaluator, jv, u, v)
μ = nlp.λ
∇gᵤ = nlp.autodiff.Jgᵤ.J
∂obj = nlp.autodiff.∇f
jvx = ∂obj.jvₓ
jvu = ∂obj.jvᵤ
fill!(jvx, 0)
fill!(jvu, 0)
fr_ = 0
for cons in nlp.constraints
n = size_constraint(nlp.model, cons)
mask = fr_+1:fr_+n
vv = @view v[mask]
# Compute jtprod of current constraint
jtprod(nlp.model, cons, ∂obj, nlp.buffer, vv)
jvx .+= ∂obj.∇fₓ
jvu .+= ∂obj.∇fᵤ
fr_ += n
end
LinearSolvers.ldiv!(nlp.linear_solver, μ, nlp.∇gᵗ, jvx)
jv .+= jvu
mul!(jv, transpose(∇gᵤ), μ, -1.0, 1.0)
return
end
# Utils function
function primal_infeasibility!(nlp::ReducedSpaceEvaluator, cons, u)
constraint!(nlp, cons, u) # Evaluate constraints
(n_inf, err_inf, n_sup, err_sup) = _check(cons, nlp.g_min, nlp.g_max)
return max(err_inf, err_sup)
end
function primal_infeasibility(nlp::ReducedSpaceEvaluator, u)
cons = similar(nlp.g_min) ; fill!(cons, 0)
return primal_infeasibility!(nlp, cons, u)
end
# Printing
function sanity_check(nlp::ReducedSpaceEvaluator, u, cons)
println("Check violation of constraints")
print("Control \t")
(n_inf, err_inf, n_sup, err_sup) = _check(u, nlp.u_min, nlp.u_max)
@printf("UB: %.4e (%d) LB: %.4e (%d)\n",
err_sup, n_sup, err_inf, n_inf)
print("State \t")
(n_inf, err_inf, n_sup, err_sup) = _check(nlp.x, nlp.x_min, nlp.x_max)
@printf("UB: %.4e (%d) LB: %.4e (%d)\n",
err_sup, n_sup, err_inf, n_inf)
print("Constraints\t")
(n_inf, err_inf, n_sup, err_sup) = _check(cons, nlp.g_min, nlp.g_max)
@printf("UB: %.4e (%d) LB: %.4e (%d)\n",
err_sup, n_sup, err_inf, n_inf)
end
function Base.show(io::IO, nlp::ReducedSpaceEvaluator)
n = n_variables(nlp)
m = n_constraints(nlp)
println(io, "A ReducedSpaceEvaluator object")
println(io, " * device: ", nlp.model.device)
println(io, " * #vars: ", n)
println(io, " * #cons: ", m)
println(io, " * constraints:")
for cons in nlp.constraints
println(io, " - ", cons)
end
print(io, " * linear solver: ", nlp.linear_solver)
end
function reset!(nlp::ReducedSpaceEvaluator)
# Reset adjoint
fill!(nlp.λ, 0)
# Reset initial state
x0 = initial(nlp.model, State())
copy!(nlp.x, x0)
# Reset buffer
nlp.buffer = get(nlp.model, PhysicalState())
end
|
{"hexsha": "570427146d398506578901a0558fca64734a2113", "size": 9328, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Evaluators/reduced_evaluator.jl", "max_stars_repo_name": "lcw/ExaPF.jl", "max_stars_repo_head_hexsha": "9435f8a24ac44d08047169378bdd745269af3ef1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Evaluators/reduced_evaluator.jl", "max_issues_repo_name": "lcw/ExaPF.jl", "max_issues_repo_head_hexsha": "9435f8a24ac44d08047169378bdd745269af3ef1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Evaluators/reduced_evaluator.jl", "max_forks_repo_name": "lcw/ExaPF.jl", "max_forks_repo_head_hexsha": "9435f8a24ac44d08047169378bdd745269af3ef1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1973244147, "max_line_length": 85, "alphanum_fraction": 0.6366852487, "num_tokens": 3103}
|
/*
[auto_generated]
boost/numeric/odeint/external/thrust/thrust_algebra_dispatcher.hpp
[begin_description]
algebra_dispatcher specialization for thrust
[end_description]
Copyright 2013 Karsten Ahnert
Copyright 2013 Mario Mulansky
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or
copy at http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_NUMERIC_ODEINT_EXTERNAL_THRUST_THRUST_ALGEBRA_DISPATCHER_HPP_DEFINED
#define BOOST_NUMERIC_ODEINT_EXTERNAL_THRUST_THRUST_ALGEBRA_DISPATCHER_HPP_DEFINED
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <boost/numeric/odeint/external/thrust/thrust_algebra.hpp>
#include <boost/numeric/odeint/algebra/algebra_dispatcher.hpp>
namespace boost {
namespace numeric {
namespace odeint {
// specialization for thrust host_vector
template< class T , class A >
struct algebra_dispatcher< thrust::host_vector< T , A > >
{
typedef thrust_algebra algebra_type;
};
// specialization for thrust device_vector
template< class T , class A >
struct algebra_dispatcher< thrust::device_vector< T , A > >
{
typedef thrust_algebra algebra_type;
};
} // namespace odeint
} // namespace numeric
} // namespace boost
#endif // BOOST_NUMERIC_ODEINT_EXTERNAL_THRUST_THRUST_ALGEBRA_DISPATCHER_HPP_DEFINED
|
{"hexsha": "5deba2cb570a3919fc432705522db32fa10d9801", "size": 1339, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ReactAndroid/build/third-party-ndk/boost/boost_1_57_0/boost/numeric/odeint/external/thrust/thrust_algebra_dispatcher.hpp", "max_stars_repo_name": "kimwoongkyu/react-native-0-36-1-woogie", "max_stars_repo_head_hexsha": "4fb2d44945a6305ae3ca87be3872f9432d16f1fb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 133.0, "max_stars_repo_stars_event_min_datetime": "2018-04-20T14:09:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-15T11:51:25.000Z", "max_issues_repo_path": "ReactAndroid/build/third-party-ndk/boost/boost_1_57_0/boost/numeric/odeint/external/thrust/thrust_algebra_dispatcher.hpp", "max_issues_repo_name": "kimwoongkyu/react-native-0-36-1-woogie", "max_issues_repo_head_hexsha": "4fb2d44945a6305ae3ca87be3872f9432d16f1fb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 61.0, "max_issues_repo_issues_event_min_datetime": "2015-05-27T11:20:11.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-20T15:06:21.000Z", "max_forks_repo_path": "ReactAndroid/build/third-party-ndk/boost/boost_1_57_0/boost/numeric/odeint/external/thrust/thrust_algebra_dispatcher.hpp", "max_forks_repo_name": "kimwoongkyu/react-native-0-36-1-woogie", "max_forks_repo_head_hexsha": "4fb2d44945a6305ae3ca87be3872f9432d16f1fb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 83.0, "max_forks_repo_forks_event_min_datetime": "2018-04-27T03:58:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-11T09:23:40.000Z", "avg_line_length": 25.2641509434, "max_line_length": 84, "alphanum_fraction": 0.7998506348, "num_tokens": 325}
|
#
# author: Jungtaek Kim (jtkim@postech.ac.kr)
# last updated: March 22, 2021
#
"""It defines Gaussian process regression."""
import time
import numpy as np
import scipy.stats
from bayeso import covariance
from bayeso import constants
from bayeso.gp import gp_kernel
from bayeso.utils import utils_gp
from bayeso.utils import utils_covariance
from bayeso.utils import utils_common
from bayeso.utils import utils_logger
logger = utils_logger.get_logger('gp')
@utils_common.validate_types
def sample_functions(mu: np.ndarray, Sigma: np.ndarray,
num_samples: int=1
) -> np.ndarray:
"""
It samples `num_samples` functions from multivariate Gaussian distribution (mu, Sigma).
:param mu: mean vector. Shape: (n, ).
:type mu: numpy.ndarray
:param Sigma: covariance matrix. Shape: (n, n).
:type Sigma: numpy.ndarray
:param num_samples: the number of sampled functions
:type num_samples: int., optional
:returns: sampled functions. Shape: (num_samples, n).
:rtype: numpy.ndarray
:raises: AssertionError
"""
assert isinstance(mu, np.ndarray)
assert isinstance(Sigma, np.ndarray)
assert isinstance(num_samples, int)
assert len(mu.shape) == 1
assert len(Sigma.shape) == 2
assert mu.shape[0] == Sigma.shape[0] == Sigma.shape[1]
rv = scipy.stats.multivariate_normal(mean=mu, cov=Sigma)
list_rvs = [rv.rvs() for _ in range(0, num_samples)]
return np.array(list_rvs)
@utils_common.validate_types
def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray,
cov_X_X: np.ndarray, inv_cov_X_X: np.ndarray, hyps: dict,
str_cov: str=constants.STR_COV,
prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None,
debug: bool=False
) -> constants.TYPING_TUPLE_THREE_ARRAYS:
"""
This function returns posterior mean and posterior standard deviation
functions over `X_test`, computed by Gaussian process regression with
`X_train`, `Y_train`, `cov_X_X`, `inv_cov_X_X`, and `hyps`.
:param X_train: inputs. Shape: (n, d) or (n, m, d).
:type X_train: numpy.ndarray
:param Y_train: outputs. Shape: (n, 1).
:type Y_train: numpy.ndarray
:param X_test: inputs. Shape: (l, d) or (l, m, d).
:type X_test: numpy.ndarray
:param cov_X_X: kernel matrix over `X_train`. Shape: (n, n).
:type cov_X_X: numpy.ndarray
:param inv_cov_X_X: kernel matrix inverse over `X_train`. Shape: (n, n).
:type inv_cov_X_X: numpy.ndarray
:param hyps: dictionary of hyperparameters for Gaussian process.
:type hyps: dict.
:param str_cov: the name of covariance function.
:type str_cov: str., optional
:param prior_mu: None, or prior mean function.
:type prior_mu: NoneType, or callable, optional
:param debug: flag for printing log messages.
:type debug: bool., optional
:returns: a tuple of posterior mean function over `X_test`, posterior
standard deviation function over `X_test`, and posterior covariance
matrix over `X_test`. Shape: ((l, 1), (l, 1), (l, l)).
:rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray)
:raises: AssertionError
"""
utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug, X_test)
assert isinstance(cov_X_X, np.ndarray)
assert isinstance(inv_cov_X_X, np.ndarray)
assert isinstance(hyps, dict)
assert len(cov_X_X.shape) == 2
assert len(inv_cov_X_X.shape) == 2
assert (np.array(cov_X_X.shape) == np.array(inv_cov_X_X.shape)).all()
utils_covariance.check_str_cov('predict_with_cov', str_cov,
X_train.shape, shape_X2=X_test.shape)
prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train)
prior_mu_test = utils_gp.get_prior_mu(prior_mu, X_test)
cov_X_Xs = covariance.cov_main(str_cov, X_train, X_test, hyps, False)
cov_Xs_Xs = covariance.cov_main(str_cov, X_test, X_test, hyps, True)
cov_Xs_Xs = (cov_Xs_Xs + cov_Xs_Xs.T) / 2.0
mu_Xs = np.dot(np.dot(cov_X_Xs.T, inv_cov_X_X), Y_train - prior_mu_train) + prior_mu_test
Sigma_Xs = cov_Xs_Xs - np.dot(np.dot(cov_X_Xs.T, inv_cov_X_X), cov_X_Xs)
return mu_Xs, np.expand_dims(np.sqrt(np.maximum(np.diag(Sigma_Xs), 0.0)), axis=1), Sigma_Xs
@utils_common.validate_types
def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, hyps: dict,
str_cov: str=constants.STR_COV,
prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None,
debug: bool=False
) -> constants.TYPING_TUPLE_THREE_ARRAYS:
"""
This function returns posterior mean and posterior standard deviation
functions over `X_test`, computed by Gaussian process regression with
`X_train`, `Y_train`, and `hyps`.
:param X_train: inputs. Shape: (n, d) or (n, m, d).
:type X_train: numpy.ndarray
:param Y_train: outputs. Shape: (n, 1).
:type Y_train: numpy.ndarray
:param X_test: inputs. Shape: (l, d) or (l, m, d).
:type X_test: numpy.ndarray
:param hyps: dictionary of hyperparameters for Gaussian process.
:type hyps: dict.
:param str_cov: the name of covariance function.
:type str_cov: str., optional
:param prior_mu: None, or prior mean function.
:type prior_mu: NoneType, or callable, optional
:param debug: flag for printing log messages.
:type debug: bool., optional
:returns: a tuple of posterior mean function over `X_test`, posterior
standard deviation function over `X_test`, and posterior covariance
matrix over `X_test`. Shape: ((l, 1), (l, 1), (l, l)).
:rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray)
:raises: AssertionError
"""
utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug, X_test)
assert isinstance(hyps, dict)
utils_covariance.check_str_cov('predict_with_hyps', str_cov,
X_train.shape, shape_X2=X_test.shape)
cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train,
hyps, str_cov, debug=debug)
mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test,
cov_X_X, inv_cov_X_X, hyps, str_cov=str_cov,
prior_mu=prior_mu, debug=debug)
return mu_Xs, sigma_Xs, Sigma_Xs
@utils_common.validate_types
def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray,
str_cov: str=constants.STR_COV,
str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_GP,
prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None,
fix_noise: float=constants.FIX_GP_NOISE,
debug: bool=False
) -> constants.TYPING_TUPLE_THREE_ARRAYS:
"""
This function returns posterior mean and posterior standard deviation
functions over `X_test`, computed by the Gaussian process regression
optimized with `X_train` and `Y_train`.
:param X_train: inputs. Shape: (n, d) or (n, m, d).
:type X_train: numpy.ndarray
:param Y_train: outputs. Shape: (n, 1).
:type Y_train: numpy.ndarray
:param X_test: inputs. Shape: (l, d) or (l, m, d).
:type X_test: numpy.ndarray
:param str_cov: the name of covariance function.
:type str_cov: str., optional
:param str_optimizer_method: the name of optimization method.
:type str_optimizer_method: str., optional
:param prior_mu: None, or prior mean function.
:type prior_mu: NoneType, or callable, optional
:param fix_noise: flag for fixing a noise.
:type fix_noise: bool., optional
:param debug: flag for printing log messages.
:type debug: bool., optional
:returns: a tuple of posterior mean function over `X_test`, posterior
standard deviation function over `X_test`, and posterior covariance
matrix over `X_test`. Shape: ((l, 1), (l, 1), (l, l)).
:rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray)
:raises: AssertionError
"""
utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug, X_test)
assert isinstance(str_optimizer_method, str)
assert isinstance(fix_noise, bool)
utils_covariance.check_str_cov('predict_with_optimized_kernel', str_cov,
X_train.shape, shape_X2=X_test.shape)
assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP
time_start = time.time()
cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel(X_train, Y_train,
prior_mu, str_cov, str_optimizer_method=str_optimizer_method,
fix_noise=fix_noise, debug=debug)
mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test,
cov_X_X, inv_cov_X_X, hyps, str_cov=str_cov, prior_mu=prior_mu,
debug=debug)
time_end = time.time()
if debug:
logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start)
return mu_Xs, sigma_Xs, Sigma_Xs
|
{"hexsha": "b6db27deaeae8f1971599a29771cdd4314445fff", "size": 8729, "ext": "py", "lang": "Python", "max_stars_repo_path": "bayeso/gp/gp.py", "max_stars_repo_name": "jungtaekkim/bayeso", "max_stars_repo_head_hexsha": "d11c9ff8037cf7fd3f9b41362eaab120f1224c71", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 76, "max_stars_repo_stars_event_min_datetime": "2018-01-18T03:03:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T06:41:41.000Z", "max_issues_repo_path": "bayeso/gp/gp.py", "max_issues_repo_name": "POSTECH-CVLab/bayeso", "max_issues_repo_head_hexsha": "d11c9ff8037cf7fd3f9b41362eaab120f1224c71", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2018-06-29T16:48:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-19T00:30:57.000Z", "max_forks_repo_path": "bayeso/gp/gp.py", "max_forks_repo_name": "POSTECH-CVLab/bayeso", "max_forks_repo_head_hexsha": "d11c9ff8037cf7fd3f9b41362eaab120f1224c71", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-01-07T06:24:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-11T06:21:42.000Z", "avg_line_length": 39.4977375566, "max_line_length": 95, "alphanum_fraction": 0.7117653798, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2390}
|
using LinearAlgebra
using Test
using CompScienceMeshes
using SauterSchwabQuadrature
using StaticArrays
pI = point(1,5,3)
pII = point(2,5,3)
pIII = point(7,1,0)
pIV = point(5,1,-3)
Sourcechart = simplex(pI,pIII,pII)
Testchart = simplex(pI,pIV,pII)
Accuracy = 12
ce = CommonEdge(SauterSchwabQuadrature._legendre(Accuracy,0.0,1.0))
function integrand(x,y)
return(((x-pI)'*(y-pII))*exp(-im*1*norm(x-y))/(4pi*norm(x-y)))
end
function INTEGRAND(û,v̂)
n1 = neighborhood(Testchart, û)
n2 = neighborhood(Sourcechart, v̂)
x = cartesian(n1)
y = cartesian(n2)
output = integrand(x,y)*jacobian(n1)*jacobian(n2)
return(output)
end
result = sauterschwab_parameterized(INTEGRAND, ce)-
verifintegral2(Sourcechart, Testchart, integrand, Accuracy)
@test norm(result) < 1.e-3
kernel(x,y) = 1/norm(cartesian(x)-cartesian(y))
t1 = simplex(
@SVector[0.180878, -0.941848, -0.283207],
@SVector[0.0, -0.92388, -0.382683],
@SVector[0.0, -0.980785, -0.19509])
t2 = simplex(
@SVector[0.180878, -0.941848, -0.283207],
@SVector[0.0, -0.92388, -0.382683],
@SVector[0.158174, -0.881178, -0.44554])
@test indexin(t1.vertices, t2.vertices) == [1, 2, nothing]
rt = RTRefSpace{Float64}()
igd = generate_integrand_uv(kernel, rt, rt, t1, t2)
i5 = sauterschwab_parameterized(igd, CommonEdge(SauterSchwabQuadrature._legendre(5,0.0,1.0)))
i10 = sauterschwab_parameterized(igd, CommonEdge(SauterSchwabQuadrature._legendre(10,0.0,1.0)))
i15 = sauterschwab_parameterized(igd, CommonEdge(SauterSchwabQuadrature._legendre(15,0.0,1.0)))
# brute numerical approach
q1 = quadpoints(t1, 10)
q2 = quadpoints(t2, 10)
M = N = numfunctions(rt)
iref = zero(i5)
for (x,w1) in q1
f = rt(x)
for (y,w2) in q2
g = rt(y)
G = kernel(x,y)
ds = w1*w2
global iref += SMatrix{M,N}([dot(f[i][1], G*g[j][1])*ds for i=1:M, j=1:N])
end
end
include(joinpath(dirname(@__FILE__,),"numquad.jl"))
ibf = numquad(kernel, rt, rt, t1, t2, zero(i5))
@test i5 ≈ iref atol=1e-3
@test i10 ≈ iref atol=1e-3
@test i10 ≈ ibf atol=1e-3
@test i10 ≈ i15 atol=1e-5
# Test the more (or less) singular case of the second kind kernel
function kernel2nd(x,y)
r = cartesian(x) - cartesian(y)
R = norm(r)
gradgreen = - r / R^3
@SMatrix [
0 -gradgreen[3] gradgreen[2]
gradgreen[3] 0 -gradgreen[1]
-gradgreen[2] gradgreen[1] 0 ]
end
igd = generate_integrand_uv(kernel2nd, rt, rt, t1, t2)
i10 = sauterschwab_parameterized(igd, CommonEdge(SauterSchwabQuadrature._legendre(10,0.0,1.0)))
i15 = sauterschwab_parameterized(igd, CommonEdge(SauterSchwabQuadrature._legendre(15,0.0,1.0)))
i20 = sauterschwab_parameterized(igd, CommonEdge(SauterSchwabQuadrature._legendre(20,0.0,1.0)))
iref = numquad(kernel2nd, rt, rt, t1, t2, zero(i15))
# # Compare to BEAST:
# tqd = CompScienceMeshes.quadpoints(rt, [t1], (12,))
# bqd = CompScienceMeshes.quadpoints(rt, [t2], (13,))
#
# SE_strategy = BEAST.WiltonSEStrategy(
# tqd[1,1],
# BEAST.DoubleQuadStrategy(
# tqd[1,1],
# bqd[1,1]))
#
# op = BEAST.MWDoubleLayer3D(0.0)
# z2 = zeros(3,3)
# BEAST.momintegrals!(op, rt, rt, t1, t2, z2, SE_strategy)
|
{"hexsha": "86fc04fa7978f9f6fde04ad7b39484174ea10d12", "size": 3131, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_ce_p_verification.jl", "max_stars_repo_name": "UnofficialJuliaMirror/SauterSchwabQuadrature.jl-535c7bfe-2023-5c1d-b712-654ef9d93a38", "max_stars_repo_head_hexsha": "419fb564912814b5e033df52e255db707349b600", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-23T16:54:33.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-23T16:54:33.000Z", "max_issues_repo_path": "test/test_ce_p_verification.jl", "max_issues_repo_name": "UnofficialJuliaMirror/SauterSchwabQuadrature.jl-535c7bfe-2023-5c1d-b712-654ef9d93a38", "max_issues_repo_head_hexsha": "419fb564912814b5e033df52e255db707349b600", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-07-23T12:35:34.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-23T12:44:53.000Z", "max_forks_repo_path": "test/test_ce_p_verification.jl", "max_forks_repo_name": "UnofficialJuliaMirror/SauterSchwabQuadrature.jl-535c7bfe-2023-5c1d-b712-654ef9d93a38", "max_forks_repo_head_hexsha": "419fb564912814b5e033df52e255db707349b600", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-10-31T14:02:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-25T06:47:26.000Z", "avg_line_length": 26.5338983051, "max_line_length": 95, "alphanum_fraction": 0.6793356755, "num_tokens": 1230}
|
# coding: UTF8
from sklearn.pipeline import FeatureUnion
from sklearn import preprocessing
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
import sklearn.linear_model
import img_to_pickle as i_p
import features as f
import classify
import pickle
import numpy as np
import pandas as pd
import datetime
import os
ROOT = os.path.abspath(os.path.dirname(__file__))
SUBMISSION_DIR = ROOT.replace("script", "tmp/submission")
def zero_one(x):
return min(max(x, 0.), 1.)
def convert_testdata(test_gray_data, feature_rule=f.feature_transformer_rule):
data_df = f.make_test_df(test_gray_data)
fu = FeatureUnion(transformer_list=feature_rule)
Std = preprocessing.StandardScaler()
X_test = fu.fit_transform(data_df)
#X_test = Std.fit_transform(X_test)
return X_test
def reprediction():
_, _, _, _, test_gray_data, _, _ = i_p.load_data()
test_keys = test_gray_data.keys()
test_df = f.make_test_df(test_gray_data)
test_df = test_df.reset_index()
test_df.columns = ["pngname", "input"]
clf_dir = os.path.abspath(os.path.dirname(__file__)) +\
"/../tmp/fit_instance/"
savefile = clf_dir + "GB22015_10_04_07_30_36.pickle"
fi = open(savefile, "r")
clf = pickle.load(fi)
fi.close()
for i in xrange(len(test_keys)):
test_img = test_df[(test_df["pngname"] == test_keys[i])]["input"].as_matrix()[0]
imgname = test_keys[i]
shape = test_img.shape
test_img = {test_keys[i]: test_img}
X_middle = convert_testdata(test_img, f.transformer_middle)
middle_ratio = X_middle.mean()
if middle_ratio >= 0.2:
X_test = convert_testdata(test_img)
output = clf.predict(X_test)
output = np.asarray(output)
zo = np.vectorize(zero_one)
output = zo(output).reshape(shape)
else:
X_test = convert_testdata(test_img, f.transformer_gray)
output = np.asarray(X_test)
zo = np.vectorize(zero_one)
output = zo(output).reshape(shape)
tmp = []
for row in xrange(len(output)):
for column in xrange(len(output[row])):
id_ = imgname + "_" + str(row + 1) + "_" + str(column + 1)
value = output[row][column]
pix = [id_, value]
tmp.append(pix)
if i == 0:
predict_df = pd.DataFrame(tmp)
else:
tmp_df = pd.DataFrame(tmp)
predict_df = pd.concat([predict_df, tmp_df])
predict_df.columns = ["id", "value"]
now = datetime.datetime.now()
submission_path = SUBMISSION_DIR + "/submission_repredict" + now.strftime("%Y_%m_%d_%H_%M_%S") + ".csv"
predict_df.to_csv(submission_path, header=True, index=False)
if __name__ == '__main__':
reprediction()
|
{"hexsha": "7da1535713aa8d8d73902b0f4f3ea56bbb02855a", "size": 2854, "ext": "py", "lang": "Python", "max_stars_repo_path": "script/repredict.py", "max_stars_repo_name": "haisland0909/Denoising-Dirty-Documents", "max_stars_repo_head_hexsha": "dcf4be659d045633f7b369db5fa9ad89793669f0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "script/repredict.py", "max_issues_repo_name": "haisland0909/Denoising-Dirty-Documents", "max_issues_repo_head_hexsha": "dcf4be659d045633f7b369db5fa9ad89793669f0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2015-08-27T12:36:19.000Z", "max_issues_repo_issues_event_max_datetime": "2015-09-25T13:19:02.000Z", "max_forks_repo_path": "script/repredict.py", "max_forks_repo_name": "haisland0909/Denoising-Dirty-Documents", "max_forks_repo_head_hexsha": "dcf4be659d045633f7b369db5fa9ad89793669f0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9803921569, "max_line_length": 107, "alphanum_fraction": 0.6391030133, "include": true, "reason": "import numpy", "num_tokens": 689}
|
#include <config.h>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <boost/asio.hpp>
#include <libtorrent/session.hpp>
#include "command_server.hpp"
using namespace boost::asio::ip;
namespace lt = libtorrent;
const auto UNIX_SOCKET_PATH = "/tmp/arr-torrent-cmd-srv.sock";
void print_usage(const char *prog_name)
{
std::cout << "usage: " << prog_name << "\n"
<< "version: " << PACKAGE_VERSION << "\n";
}
int main(int argc, char *argv[])
{
if (argc > 1) {
print_usage(argv[0]);
return EXIT_FAILURE;
}
{
lt::settings_pack settings;
settings.set_str(lt::settings_pack::listen_interfaces, "0.0.0.0:6881");
lt::session session(settings);
// run command server
boost::asio::io_service io_service;
std::remove(UNIX_SOCKET_PATH);
arr::protocol::endpoint endpoint(UNIX_SOCKET_PATH);
arr::server server(io_service, endpoint);
server.wait_until_quit();
}
return EXIT_SUCCESS;
}
|
{"hexsha": "cb92c235b703e3e0446cf9ed88470f7b7af47574", "size": 937, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "daemon/main.cpp", "max_stars_repo_name": "IT-Syndikat/arr-torrent", "max_stars_repo_head_hexsha": "d14e7959294d2bb8a3b7332a269be70838e8109a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2018-04-24T19:08:10.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-24T19:08:10.000Z", "max_issues_repo_path": "daemon/main.cpp", "max_issues_repo_name": "IT-Syndikat/arr-torrent", "max_issues_repo_head_hexsha": "d14e7959294d2bb8a3b7332a269be70838e8109a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "daemon/main.cpp", "max_forks_repo_name": "IT-Syndikat/arr-torrent", "max_forks_repo_head_hexsha": "d14e7959294d2bb8a3b7332a269be70838e8109a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.1224489796, "max_line_length": 73, "alphanum_fraction": 0.6915688367, "num_tokens": 244}
|
subroutine apaga_lista(lista)
integer esco,nl,nc,j,i,ii,m,iostat,ierr,tam(256,10),r,t,aux(256),a,b
character (512) arq(256,10),arqaux(256,10),text
character (50) tit(1,10)
character (50) lista , listaux
write(*,"(A41)")'Digite o nome da lista que deseja apagar.'
8 read(*,"(A)",iostat=ierr)listaux
if(ierr/=0) then
write(*,"(A38)")'Erro na leitura! Digite o nome denovo.'
go to 8
end if
if(trim(adjustl(listaux)) == 'sair') return
if(trim(adjustl(listaux)) == trim(adjustl(lista))) then
write(*,"(A77)")'Esta lista esta em uso. Procure outra lista para que possa apagar esta lista,'
write(*,"(A)")'ou apague outra lista.'
return
end if
open(unit = 2 , file = trim(adjustl(listaux)) // '.dat' , status = 'old' , iostat = ierr)
if(ierr /= 0) then
write(*,"(A)")'Erro ao tentar apagrar esta lista. Tente novamente.'
go to 8
end if
close(unit = 2 , status = 'delete' , iostat = ierr)
return
end subroutine
|
{"hexsha": "654855179774f34228648fe22b360b08416c7089", "size": 980, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "codes/Projeto designe/designe final/apaga_lista.f90", "max_stars_repo_name": "danielsanfr/fortran-study", "max_stars_repo_head_hexsha": "101ff0aa552f40542b5bc3e90ee0265f9a74eb48", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "codes/Projeto designe/designe final/apaga_lista.f90", "max_issues_repo_name": "danielsanfr/fortran-study", "max_issues_repo_head_hexsha": "101ff0aa552f40542b5bc3e90ee0265f9a74eb48", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codes/Projeto designe/designe final/apaga_lista.f90", "max_forks_repo_name": "danielsanfr/fortran-study", "max_forks_repo_head_hexsha": "101ff0aa552f40542b5bc3e90ee0265f9a74eb48", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.0, "max_line_length": 97, "alphanum_fraction": 0.6326530612, "num_tokens": 324}
|
using Dates
using TimeZones
using CBinding
packagedir = joinpath(dirname(pathof(DWDataReader)), "..")
includedir = joinpath(packagedir, "src", "include")
# CBinding.jl: Set up compiler context
c`-std=c99 -Wall -I$(includedir) -lDWDataReaderLib64 -L$(packagedir)`
const c"int64_t" = Int64
# CBinding.jl: Create Julia types and bindings for DLL functions from header
c"""
#include <DWDataReaderLib.h>
"""j;
struct FileInfo
sample_rate::Float64
start_store_time::ZonedDateTime
duration::Float64
end
function Base.show(io::IO, fi::FileInfo)
println(io, "$(fi.start_store_time) $(fi.sample_rate) Hz $(fi.duration) s")
end
"""
DWDataReader.File(source; kwargs...) => DWDataReader.File
Read DEWESoft input data files (.d7d extension) and return a `DWDataReader.File` object.
"""
struct File
name::String
info::FileInfo
nchannels::Int64
channels::Cptr{DWChannel}
closed::Bool
delete::Bool
readerid::Int8
lookup::Dict{Symbol,DWChannel}
end
getname(f::File) = getfield(f, :name)
getnchannels(f::File) = getfield(f, :nchannels)
function Base.show(io::IO, f::File)
println(io, "DWDataReader.File(\"$(getname(f))\"): $(getnchannels(f)) channels")
end
# Enables f[:channel] via internal lookup Dict
function Base.getproperty(f::File, ch::Symbol)
lookup = getfield(f, :lookup)
return haskey(lookup, ch) ? lookup[ch] : getfield(f, ch)
end
function Base.getindex(f::File, ch::Symbol)
lookup = getfield(f, :lookup)
return haskey(lookup, ch) ? lookup[ch] : getfield(f, ch)
end
# New File objects register via the global readers counter as per DLL requirements
global readers = 0
setreaders(r) = (global readers += r)
"""Deprecated in favor of direct DWChannel.name via `getproperty` mechanism"""
function getname(c::DWChannel)
replace(String(c.name), r"\0+$" => s"")
end
# Property access for wrapped DWChannel with null-terminated string truncation
function Base.getproperty(c::DWChannel, v::Symbol)
x = invoke(getproperty, Tuple{supertype(DWChannel),Symbol}, c, v)
return v == :name ? replace(String(x), r"\0+$" => s"") : x
end
function File(
source;
# options
debug::Bool = false,
kw...,
)
isempty(source) && throw(ArgumentError("unable to read DW data from empty source"))
name = source
closed = true
delete = false
DWInit()
# file Reader management
readerid = DWDataReader.readers # DWInit creates the first readerid 0
if readerid > 0 # Add reader only if this is not the first
status = DWAddReader()
status != 0 && throw(status)
end
DWDataReader.setreaders(1)
# Check for matching number of readers
num_readers = Ref{Cint}(0)
status = DWGetNumReaders(num_readers)
status != 0 && throw(status)
num_readers[] != readers && throw("DWGetNumReaders=$(num_readers[]) != $(readers)")
# Opening the file
dwfileinfo = Ref(DWFileInfo())
status = DWOpenDataFile(source, dwfileinfo)
status != 0 && throw(status)
closed = false
fileinfo = FileInfo(
dwfileinfo[].sample_rate,
startstoretime(dwfileinfo[].start_store_time),
dwfileinfo[].duration,
)
# How to make this the File AbstractVector{DWChannel} type?
nchannels = DWGetChannelListCount()
channels = Libc.malloc(DWChannel(), nchannels)
DWGetChannelList(channels)
lookup = Dict(Symbol(getname(channels[i])) => channels[i] for i = 1:nchannels)
File(name, fileinfo, nchannels, channels, closed, delete, readerid, lookup)
end
"""Set this DWFile instance as the active reader"""
function activate(f::File, verifyopen::Bool = true)
verifyopen && f.closed && error("I/O operation on closed file")
status = DWSetActiveReader(f.readerid)
status != 0 && throw(status)
end
function numberofsamples(ch::DWChannel)
count = DWGetScaledSamplesCount(ch.index)
count < 0 &&
throw("DWGetScaledSamplesCount($(ch.index))=$(count) should be non-negative")
count
end
"""Load and return full speed data as vector"""
function scaled(ch::DWChannel, arrayindex = 0)
!(0 <= arrayindex < ch.array_size) && throw("arrayIndex is out of range")
count = numberofsamples(ch)
data = zeros(count * ch.array_size)
time = zeros(count)
status = DWGetScaledSamples(ch.index, Cint(0), count, data, time)
status != 0 && throw(status)
return [time data]
end
function startstoretime(time::Float64)
epoch = DateTime(1899, 12, 30)
epochutc = ZonedDateTime(epoch, tz"UTC")
microseconds = time * 24 * 60 * 60 * 1000 * 1000
epochutc + Dates.Microsecond(round(microseconds))
end
|
{"hexsha": "5a98e1404acea6ff395b945af4c0e8120cd02e55", "size": 4617, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/file.jl", "max_stars_repo_name": "fleimgruber/DWDataReader.jl", "max_stars_repo_head_hexsha": "4a8a280ed9a34a6af63a295d976e578aeaca1e97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/file.jl", "max_issues_repo_name": "fleimgruber/DWDataReader.jl", "max_issues_repo_head_hexsha": "4a8a280ed9a34a6af63a295d976e578aeaca1e97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-13T14:43:21.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-13T14:43:30.000Z", "max_forks_repo_path": "src/file.jl", "max_forks_repo_name": "fleimgruber/DWDataReader.jl", "max_forks_repo_head_hexsha": "4a8a280ed9a34a6af63a295d976e578aeaca1e97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2215189873, "max_line_length": 88, "alphanum_fraction": 0.6844271172, "num_tokens": 1255}
|
# coding=utf-8
import numpy as np
import torch
from PIL import Image
def _is_numpy(input):
"""
Check if input is a numpy object.
Args:
input (:obj:): input.
Returns:
(bool): True if input is a numpy object.
"""
return isinstance(input, np.ndarray)
def _is_pil_image(input):
"""
Check if input is a ``PIL Image``.
Args:
input (:obj:): input.
Returns:
(bool): True if input is a ``PIL Image``.
"""
return isinstance(input, Image.Image)
def pil_to_tensor(input):
"""
Convert a ``PIL Image`` to a tensor of the same type.
This function does not support torchscript.
Args:
input (`PIL.Image.Image`): input PIL image to be converted to tensor.
Returns:
(torch.Tensor): output tensor.
"""
if not _is_pil_image(input):
raise TypeError("input should be PIL Image. Got {}".format(type(input)))
default_float_dtype = torch.get_default_dtype()
# handle PIL Image
if input.mode == "I":
output = torch.from_numpy(np.array(input, np.int32, copy=False))
elif input.mode == "I;16":
output = torch.from_numpy(np.array(input, np.int16, copy=False))
elif input.mode == "F":
output = torch.from_numpy(np.array(input, np.float32, copy=False))
elif input.mode == "1":
output = 255 * torch.from_numpy(np.array(input, np.uint8, copy=False))
else:
output = torch.ByteTensor(torch.ByteStorage.from_buffer(input.tobytes()))
output = output.view(input.size[1], input.size[0], len(input.getbands()))
# put it from HWC to CHW format
output = output.permute((2, 0, 1)).contiguous()
if isinstance(output, torch.ByteTensor):
return output.to(dtype=default_float_dtype).div(255)
else:
return output
|
{"hexsha": "8d906e0edf9e2f0cd2f90955b8cd8b179df83686", "size": 1816, "ext": "py", "lang": "Python", "max_stars_repo_path": "cheblienet/datas/functionals.py", "max_stars_repo_name": "haguettaz/ChebLieNet", "max_stars_repo_head_hexsha": "8545122c85513a4b4e8cc34c9f01bacca9140110", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-11-25T11:51:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T18:35:32.000Z", "max_issues_repo_path": "cheblienet/datas/functionals.py", "max_issues_repo_name": "haguettaz/ChebLieNet", "max_issues_repo_head_hexsha": "8545122c85513a4b4e8cc34c9f01bacca9140110", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cheblienet/datas/functionals.py", "max_forks_repo_name": "haguettaz/ChebLieNet", "max_forks_repo_head_hexsha": "8545122c85513a4b4e8cc34c9f01bacca9140110", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-06T18:35:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-06T18:35:42.000Z", "avg_line_length": 26.3188405797, "max_line_length": 81, "alphanum_fraction": 0.6272026432, "include": true, "reason": "import numpy", "num_tokens": 445}
|
####
# install.packages("devtools")
# devtools::install_github("iobis/obistools")
# devtools::install_github("EMODnet/skosxml")
# devtools::install_github("EMODnet/EMODnetBiocheck")
####
library(obistools)
library(EMODnetBiocheck)
check_shark_dwca <- function(data_dir_path, dwca_file_name) {
dwca_zip_path = paste(data_dir_path, "\\", dwca_file_name, ".zip", sep="")
event_file = unz(dwca_zip_path, "event.txt")
occurrence_file = unz(dwca_zip_path, "occurrence.txt")
emof_file = unz(dwca_zip_path, "extendedmeasurementorfact.txt")
event = read.csv(event_file, header = TRUE, sep='\t')
occurrence = read.csv(occurrence_file, header = TRUE, sep='\t')
emof = read.csv(emof_file, header = TRUE, sep='\t')
remove(event_file)
remove(occurrence_file)
remove(emof_file)
IPTreport <- checkdataset(Event = event, Occurrence = occurrence, eMoF = emof)
data_summary <- IPTreport$datasummary
mof_summary <- IPTreport$mofsummary
event_error_table <- IPTreport$dtb$eventerror_table
occurrence_error_table <- IPTreport$dtb$occurrenceerror_table
emof_error_table <-IPTreport$dtb$emoferror_table
general_issues <- IPTreport$dtb$general_issues
mof_issues <- IPTreport$dtb$mof_issues
write.table(data_summary, paste(data_dir_path, "\\", dwca_file_name, "_SUMMARY_DATA", '.txt', sep=""), na = "NA", sep='\t')
write.table(mof_summary, paste(data_dir_path, "\\", dwca_file_name, "_SUMMARY_MOF", '.txt', sep=""), na = "NA", sep='\t')
write.table(event_error_table, paste(data_dir_path, "\\", dwca_file_name, "_ERRORS_EVENT", '.txt', sep=""), na = "NA", sep='\t')
write.table(occurrence_error_table, paste(data_dir_path, "\\", dwca_file_name, "_ERRORS_OCCURRENCE", '.txt', sep=""), na = "NA", sep='\t')
write.table(emof_error_table, paste(data_dir_path, "\\", dwca_file_name, "_ERRORS_EMOF", '.txt', sep=""), na = "NA", sep='\t')
write.table(general_issues, paste(data_dir_path, "\\", dwca_file_name, "_ISSUES_GENERAL", '.txt', sep=""), na = "NA", sep='\t')
write.table(mof_issues, paste(data_dir_path, "\\", dwca_file_name, "_ISSUES_MOF", '.txt', sep=""), na = "NA", sep='\t')
}
data_dir_path = "C:\\darwincore\\data"
check_shark_dwca(data_dir_path, "dwca-smhi-bacterioplankton-nat_TEST")
check_shark_dwca(data_dir_path, "dwca-smhi-zoobenthos-nat_TEST")
check_shark_dwca(data_dir_path, "dwca-smhi-phytoplankton-nat_TEST")
check_shark_dwca(data_dir_path, "dwca-smhi-zooplankton-nat_TEST")
|
{"hexsha": "89fa5b20eb7b2ac31501f189a754f950aa965524", "size": 2431, "ext": "r", "lang": "R", "max_stars_repo_path": "R_scripts/iobistools_validation.r", "max_stars_repo_name": "sharkdata/darwincore", "max_stars_repo_head_hexsha": "28937763353ce75b8897c5d8ab1fadb188b302b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R_scripts/iobistools_validation.r", "max_issues_repo_name": "sharkdata/darwincore", "max_issues_repo_head_hexsha": "28937763353ce75b8897c5d8ab1fadb188b302b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R_scripts/iobistools_validation.r", "max_forks_repo_name": "sharkdata/darwincore", "max_forks_repo_head_hexsha": "28937763353ce75b8897c5d8ab1fadb188b302b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-16T07:57:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-16T07:57:09.000Z", "avg_line_length": 45.8679245283, "max_line_length": 140, "alphanum_fraction": 0.7239819005, "num_tokens": 743}
|
# coding: utf-8
# ## This notebook will help you train a vanilla Point-Cloud AE with the basic architecture we used in our paper.
# (it assumes latent_3d_points is in the PYTHONPATH and the structural losses have been compiled)
import os
import sys
sys.path.insert(0, "/home/gy46/")
import tqdm
import numpy as np
import os.path as osp
from latent_3d_points.src.ae_templates import mlp_architecture_ala_iclr_18, default_train_params
from latent_3d_points.src.autoencoder import Configuration as Conf
from latent_3d_points.src.point_net_ae import PointNetAutoEncoder
from latent_3d_points.src.in_out import snc_category_to_synth_id, create_dir
from latent_3d_points.src.in_out import PointCloudDataSet,load_all_point_clouds_under_folder
from latent_3d_points.src.tf_utils import reset_tf_graph
from latent_3d_points.src.general_utils import plot_3d_point_cloud
# Arguments
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('train_dir', type=str, default=None,
help='Training directory (where we stored the model)')
parser.add_argument('--dataset_dir', type=str, default='../data/ModelNet40.PC15k',
help='Directory of the dataset.')
parser.add_argument('--normalize_shape', action='store_true',
help="Whether normalizing shape.")
parser.add_argument('--epochs', type=int, default=1000,
help="Training epochs.")
parser.add_argument('--bneck_size', type=int, default=128,
help="Bottleneck size.")
args = parser.parse_args()
print(args)
train_dir = args.train_dir
top_in_dir = args.dataset_dir
n_pc_points = 2048 # Number of points per model.
bneck_size = args.bneck_size # Bottleneck-AE size
restore_epoch = args.epochs
print("Build model")
print("Train dir:%s"%train_dir)
print("Load model configuration:")
conf_path = os.path.join(train_dir, 'configuration')
conf = Conf.load(conf_path)
print(conf)
reset_tf_graph()
print("Build tensorflow graph")
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(args.train_dir, epoch=restore_epoch)
#####################
# Load Training Set
#####################
print("Load data (train set)")
tr_shape_lst = []
te_shape_lst = []
tr_lbl = []
te_lbl = []
class_lst = []
for i, f in enumerate(os.listdir(top_in_dir)):
# Train
tr_class_dir = os.path.join(top_in_dir, f, 'train')
if not os.path.isdir(tr_class_dir):
continue
class_lst.append(f)
all_tr_pc_data = load_all_point_clouds_under_folder(
tr_class_dir, n_threads=8, file_ending='.npy', max_num_points=n_pc_points,
verbose=True, normalize=args.normalize_shape, rotation_axis=None
)
tr_pc, _, _ = all_tr_pc_data.full_epoch_data()
N = tr_pc.shape[0]
tr_shape_lst.append(tr_pc)
for _ in range(N):
tr_lbl.append(i)
# Test
te_class_dir = os.path.join(top_in_dir, f, 'test')
all_te_pc_data = load_all_point_clouds_under_folder(
te_class_dir, n_threads=8, file_ending='.npy', max_num_points=n_pc_points,
verbose=True, normalize=args.normalize_shape, rotation_axis=None
)
te_pc, _, _ = all_te_pc_data.full_epoch_data()
M = te_pc.shape[0]
te_shape_lst.append(te_pc)
for _ in range(M):
te_lbl.append(i)
tr_pc = np.concatenate(tr_shape_lst)
tr_lbl = np.array(tr_lbl)
te_pc = np.concatenate(te_shape_lst)
te_lbl = np.array(te_lbl)
assert tr_pc.shape[0] == tr_lbl.shape[0]
assert te_pc.shape[0] == te_lbl.shape[0]
print("Gather latent vectors (train set)")
tr_latent = ae.get_latent_codes(tr_pc, batch_size=100)
print(tr_latent.shape)
tr_latent_save_path = os.path.join(conf.train_dir, 'MN_train_all_latent.npy')
tr_label_save_path = os.path.join(conf.train_dir, 'MN_train_all_label.npy')
np.save(tr_latent_save_path, tr_latent)
np.save(tr_label_save_path, tr_lbl)
print("Train latent vectors and labels save path:%s %s"\
%(tr_latent_save_path, tr_label_save_path))
print("Gather latent vectors (test set)")
te_latent = ae.get_latent_codes(te_pc, batch_size=100)
print(te_latent.shape)
te_latent_save_path = os.path.join(conf.train_dir, 'MN_test_all_latent.npy')
te_label_save_path = os.path.join(conf.train_dir, 'MN_test_all_label.npy')
np.save(te_latent_save_path, te_latent)
np.save(te_label_save_path, te_lbl)
print("Test latent vectors and labels save path:%s %s"\
%(te_latent_save_path, te_label_save_path))
# Classification
print("Classification...")
from sklearn.svm import LinearSVC
clf = LinearSVC(random_state=0)
clf.fit(tr_latent, tr_lbl)
test_pred = clf.predict(te_latent)
test_gt = te_lbl.flatten()
acc = np.mean((test_pred==test_gt).astype(float)) * 100.
print("Acc:%s"%acc)
|
{"hexsha": "78f9656e50e0b59e54cd6898e3b28d1b95b817c1", "size": 4721, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/MN_clf.py", "max_stars_repo_name": "stevenygd/latent_3d_points", "max_stars_repo_head_hexsha": "cf8c0888f4489690fa5b692cbd44638f8db2d0ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/MN_clf.py", "max_issues_repo_name": "stevenygd/latent_3d_points", "max_issues_repo_head_hexsha": "cf8c0888f4489690fa5b692cbd44638f8db2d0ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/MN_clf.py", "max_forks_repo_name": "stevenygd/latent_3d_points", "max_forks_repo_head_hexsha": "cf8c0888f4489690fa5b692cbd44638f8db2d0ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-12T04:48:43.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-12T04:48:43.000Z", "avg_line_length": 35.4962406015, "max_line_length": 113, "alphanum_fraction": 0.7396737979, "include": true, "reason": "import numpy", "num_tokens": 1182}
|
import sympy as sp
def calc_taylor_series(equation, xInit, a, n:int):
"""
Method to estimate a function using taylor series
Parameters:
equation: The equation f(x)
xInit: Initial value of x
a: Another value of x
n: number of derivatives
"""
#Variables and settings
x = sp.Symbol('x')
fOri = equation
xVal = xInit #Initial value of x
derivatives = []
#Create derivatives
derivatives.append(fOri)
for i in range(1, n+1):
derivatives.append(sp.diff(derivatives[i-1]))
#Calculate derivatives
for i in range(len(derivatives)):
derivatives[i] = derivatives[i].evalf(subs={x: a})
#Calculate series
result = 0
for i in range(len(derivatives)):
result += (derivatives[i] * (xVal - a)**i)/sp.factorial(i)
return result
def calc_maclaurin_series(equation, xInit, n:int):
"""
Method to estimate a function using maclaurin series
Parameters:
equation: The equation f(x)
xInit: Initial value of x
n: number of derivatives
"""
return calc_taylor_series(equation, xInit, 0, n)
def get_derivatives(equation, n:int):
derivatives = []
for _ in range(n):
derivatives.append(sp.diff(equation))
return derivatives
def get_n_derivative(equation, n:int):
return get_derivatives(equation, n)[-1]
def example():
x = sp.Symbol('x');
ori = (sp.sin(x))**3
print(calc_taylor_series(ori, 0.1, 1.5, 2))
|
{"hexsha": "4f563e106e1c1af8c9e6273a37d5948bd35301c7", "size": 1512, "ext": "py", "lang": "Python", "max_stars_repo_path": "mid_exam/taylor_series_calculator.py", "max_stars_repo_name": "GiantSweetroll/Computational-Mathematics", "max_stars_repo_head_hexsha": "f94457d1943a7d17379296cac284da88aefa862c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mid_exam/taylor_series_calculator.py", "max_issues_repo_name": "GiantSweetroll/Computational-Mathematics", "max_issues_repo_head_hexsha": "f94457d1943a7d17379296cac284da88aefa862c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mid_exam/taylor_series_calculator.py", "max_forks_repo_name": "GiantSweetroll/Computational-Mathematics", "max_forks_repo_head_hexsha": "f94457d1943a7d17379296cac284da88aefa862c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2, "max_line_length": 66, "alphanum_fraction": 0.6236772487, "include": true, "reason": "import sympy", "num_tokens": 414}
|
!========================================================================
!
! S P E C F E M 2 D Version 7 . 0
! --------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA
! and CNRS / University of Marseille, France
! (there are currently many more authors!)
! (c) Princeton University and CNRS / University of Marseille, April 2014
!
! This software is a computer program whose purpose is to solve
! the two-dimensional viscoelastic anisotropic or poroelastic wave equation
! using a spectral-element method (SEM).
!
! This software is governed by the CeCILL license under French law and
! abiding by the rules of distribution of free software. You can use,
! modify and/or redistribute the software under the terms of the CeCILL
! license as circulated by CEA, CNRS and Inria at the following URL
! "http://www.cecill.info".
!
! As a counterpart to the access to the source code and rights to copy,
! modify and redistribute granted by the license, users are provided only
! with a limited warranty and the software's author, the holder of the
! economic rights, and the successive licensors have only limited
! liability.
!
! In this respect, the user's attention is drawn to the risks associated
! with loading, using, modifying and/or developing or reproducing the
! software by the user in light of its specific status of free software,
! that may mean that it is complicated to manipulate, and that also
! therefore means that it is reserved for developers and experienced
! professionals having in-depth computer knowledge. Users are therefore
! encouraged to load and test the software's suitability as regards their
! requirements in conditions enabling the security of their systems and/or
! data to be ensured and, more generally, to use and operate it in the
! same conditions as regards security.
!
! The full text of the license is available in file "LICENSE".
!
!========================================================================
subroutine compute_forces_acoustic_backward(b_potential_dot_dot_acoustic,b_potential_acoustic)
! compute forces in the acoustic elements in forward simulation and in adjoint simulation in adjoint inversion
use specfem_par, only: nglob,nspec,nelemabs,it,NSTEP, &
assign_external_model,ibool,kmato,numabs,acoustic, &
codeabs,codeabs_corner, &
density,poroelastcoef,xix,xiz,gammax,gammaz,jacobian, &
vpext,rhoext, &
hprime_xx,hprimewgll_xx, &
hprime_zz,hprimewgll_zz,wxgll,wzgll, &
AXISYM,coord, is_on_the_axis,hprimeBar_xx,hprimeBarwglj_xx,xiglj,wxglj, &
ibegin_edge1,iend_edge1,ibegin_edge3,iend_edge3, &
ibegin_edge4,iend_edge4,ibegin_edge2,iend_edge2, &
ib_left,ib_right,ib_bottom,ib_top, &
b_absorb_acoustic_left,b_absorb_acoustic_right, &
b_absorb_acoustic_bottom,b_absorb_acoustic_top, &
STACEY_BOUNDARY_CONDITIONS
implicit none
include "constants.h"
real(kind=CUSTOM_REAL), dimension(nglob) :: b_potential_dot_dot_acoustic, b_potential_acoustic
! local parameters
integer :: ispec,i,j,k,iglob,ispecabs,ibegin,iend,jbegin,jend
integer :: ifirstelem,ilastelem
! spatial derivatives
real(kind=CUSTOM_REAL) :: dux_dxi,dux_dgamma,dux_dxl,dux_dzl
real(kind=CUSTOM_REAL) :: xxi
real(kind=CUSTOM_REAL), dimension(NGLLX,NGLLZ) :: tempx1,tempx2
real(kind=CUSTOM_REAL), dimension(NGLJ,NGLLZ) :: r_xiplus1
! Jacobian matrix and determinant
real(kind=CUSTOM_REAL) :: xixl,xizl,gammaxl,gammazl,jacobianl
! material properties of the acoustic medium
real(kind=CUSTOM_REAL) :: mul_relaxed,lambdal_relaxed,kappal,cpl,rhol
ifirstelem = 1
ilastelem = nspec
! loop over spectral elements
do ispec = ifirstelem,ilastelem
! acoustic spectral element
if( acoustic(ispec) ) then
rhol = density(1,kmato(ispec))
! first double loop over GLL points to compute and store gradients
do j = 1,NGLLZ
do i = 1,NGLLX
! derivative along x and along z
dux_dxi = 0._CUSTOM_REAL; dux_dgamma = 0._CUSTOM_REAL
! first double loop over GLL points to compute and store gradients
! we can merge the two loops because NGLLX == NGLLZ
do k = 1,NGLLX
if( AXISYM ) then
if( is_on_the_axis(ispec) ) then
dux_dxi = dux_dxi + b_potential_acoustic(ibool(k,j,ispec)) * hprimeBar_xx(i,k)
else
dux_dxi = dux_dxi + b_potential_acoustic(ibool(k,j,ispec)) * hprime_xx(i,k)
endif
else
dux_dxi = dux_dxi + b_potential_acoustic(ibool(k,j,ispec)) * hprime_xx(i,k)
endif
dux_dgamma = dux_dgamma + b_potential_acoustic(ibool(i,k,ispec)) * hprime_zz(j,k)
enddo
xixl = xix(i,j,ispec)
xizl = xiz(i,j,ispec)
gammaxl = gammax(i,j,ispec)
gammazl = gammaz(i,j,ispec)
! derivatives of potential
dux_dxl = dux_dxi * xixl + dux_dgamma * gammaxl
dux_dzl = dux_dxi * xizl + dux_dgamma * gammazl
if( AXISYM .and. is_on_the_axis(ispec) .and. i == 1 ) then ! dchi/dr=rho * u_r=0 on the axis
dux_dxl = ZERO
endif
jacobianl = jacobian(i,j,ispec)
! if external density model
if( assign_external_model ) then
rhol = rhoext(i,j,ispec)
endif
if( AXISYM ) then
if( is_on_the_axis(ispec) .and. i == 1 ) then
xxi = + gammaz(i,j,ispec) * jacobian(i,j,ispec)
r_xiplus1(i,j) = xxi
else if( is_on_the_axis(ispec) ) then
r_xiplus1(i,j) = coord(1,ibool(i,j,ispec))/(xiglj(i)+ONE)
endif
endif
! for acoustic medium also add integration weights
if( AXISYM ) then
if( is_on_the_axis(ispec) ) then
tempx1(i,j) = wzgll(j) * r_xiplus1(i,j) * jacobianl * (xixl * dux_dxl + xizl * dux_dzl) / rhol
tempx2(i,j) = wxglj(i) * r_xiplus1(i,j) * jacobianl * (gammaxl * dux_dxl + gammazl * dux_dzl) / rhol
else
tempx1(i,j) = wzgll(j) * coord(1,ibool(i,j,ispec)) * jacobianl * (xixl * dux_dxl + xizl * dux_dzl) / rhol
tempx2(i,j) = wxgll(i) * coord(1,ibool(i,j,ispec)) * jacobianl * (gammaxl * dux_dxl + gammazl * dux_dzl) / rhol
endif
else
tempx1(i,j) = wzgll(j) * jacobianl * (xixl * dux_dxl + xizl * dux_dzl) / rhol
tempx2(i,j) = wxgll(i) * jacobianl * (gammaxl * dux_dxl + gammazl * dux_dzl) / rhol
endif
enddo
enddo
! first double loop over GLL points to compute and store gradients
do j = 1,NGLLZ
do i = 1,NGLLX
iglob = ibool(i,j,ispec)
if( assign_external_model ) then
rhol = rhoext(i,j,ispec)
cpl = vpext(i,j,ispec)
!assuming that in fluid(acoustic) part input cpl is defined by sqrt(kappal/rhol), &
!which is not the same as in cpl input in elastic part
kappal = rhol * cpl * cpl
else
lambdal_relaxed = poroelastcoef(1,1,kmato(ispec))
mul_relaxed = poroelastcoef(2,1,kmato(ispec))
kappal = lambdal_relaxed + TWO * mul_relaxed/3._CUSTOM_REAL
rhol = density(1,kmato(ispec))
endif
enddo
enddo
!
! second double-loop over GLL to compute all the terms
!
do j = 1,NGLLZ
do i = 1,NGLLX
iglob = ibool(i,j,ispec)
! along x direction and z direction
! and assemble the contributions
do k = 1,NGLLX
if( AXISYM ) then
if( is_on_the_axis(ispec) ) then
b_potential_dot_dot_acoustic(iglob) = b_potential_dot_dot_acoustic(iglob) - &
(tempx1(k,j) * hprimeBarwglj_xx(k,i) + tempx2(i,k) * hprimewgll_zz(k,j))
else
b_potential_dot_dot_acoustic(iglob) = b_potential_dot_dot_acoustic(iglob) - &
(tempx1(k,j) * hprimewgll_xx(k,i) + tempx2(i,k) * hprimewgll_zz(k,j))
endif
else
b_potential_dot_dot_acoustic(iglob) = b_potential_dot_dot_acoustic(iglob) - &
(tempx1(k,j) * hprimewgll_xx(k,i) + tempx2(i,k) * hprimewgll_zz(k,j))
endif
enddo
enddo ! second loop over the GLL points
enddo
endif ! end of test if acoustic element
enddo ! end of loop over all spectral elements
!
!--- absorbing boundaries
!
! The outer boundary condition to use for PML elements in fluid layers is Neumann for the potential
! because we need Dirichlet conditions for the displacement vector, which means Neumann for the potential.
! Thus, there is nothing to enforce explicitly here.
! There is something to enforce explicitly only in the case of elastic elements, for which a Dirichlet
! condition is needed for the displacement vector, which is the vectorial unknown for these elements.
! for Stacey paraxial absorbing conditions (more precisely: Sommerfeld in the case of a fluid) we implement them here
if( STACEY_BOUNDARY_CONDITIONS ) then
do ispecabs=1,nelemabs
ispec = numabs(ispecabs)
! Sommerfeld condition if acoustic
if( acoustic(ispec) ) then
!--- left absorbing boundary
if( codeabs(IEDGE4,ispecabs) ) then
i = 1
jbegin = ibegin_edge4(ispecabs)
jend = iend_edge4(ispecabs)
do j = jbegin,jend
iglob = ibool(i,j,ispec)
b_potential_dot_dot_acoustic(iglob) = b_potential_dot_dot_acoustic(iglob) - &
b_absorb_acoustic_left(j,ib_left(ispecabs),NSTEP-it+1)
enddo
endif ! end of left absorbing boundary
!--- right absorbing boundary
if( codeabs(IEDGE2,ispecabs) ) then
i = NGLLX
jbegin = ibegin_edge2(ispecabs)
jend = iend_edge2(ispecabs)
do j = jbegin,jend
iglob = ibool(i,j,ispec)
! adds (previously) stored contribution
b_potential_dot_dot_acoustic(iglob) = b_potential_dot_dot_acoustic(iglob) - &
b_absorb_acoustic_right(j,ib_right(ispecabs),NSTEP-it+1)
enddo
endif ! end of right absorbing boundary
!--- bottom absorbing boundary
if( codeabs(IEDGE1,ispecabs) ) then
j = 1
ibegin = ibegin_edge1(ispecabs)
iend = iend_edge1(ispecabs)
! exclude corners to make sure there is no contradiction on the normal
if( codeabs_corner(1,ispecabs)) ibegin = 2
if( codeabs_corner(2,ispecabs)) iend = NGLLX-1
do i = ibegin,iend
iglob = ibool(i,j,ispec)
! adds (previously) stored contribution
b_potential_dot_dot_acoustic(iglob) = b_potential_dot_dot_acoustic(iglob) - &
b_absorb_acoustic_bottom(i,ib_bottom(ispecabs),NSTEP-it+1)
enddo
endif ! end of bottom absorbing boundary
!--- top absorbing boundary
if( codeabs(IEDGE3,ispecabs) ) then
j = NGLLZ
ibegin = ibegin_edge3(ispecabs)
iend = iend_edge3(ispecabs)
! exclude corners to make sure there is no contradiction on the normal
if( codeabs_corner(3,ispecabs)) ibegin = 2
if( codeabs_corner(4,ispecabs)) iend = NGLLX-1
do i = ibegin,iend
iglob = ibool(i,j,ispec)
b_potential_dot_dot_acoustic(iglob) = b_potential_dot_dot_acoustic(iglob) - &
b_absorb_acoustic_top(i,ib_top(ispecabs),NSTEP-it+1)
enddo
endif ! end of top absorbing boundary
endif ! acoustic ispec
enddo
endif ! end of absorbing boundaries
end subroutine compute_forces_acoustic_backward
|
{"hexsha": "f6dda70703ba1c02a5a85ac72c9aba83464af763", "size": 12304, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "specfem2d/src/specfem2D/compute_forces_acoustic_backward.f90", "max_stars_repo_name": "PanIGGCAS/SeisElastic2D_1.1", "max_stars_repo_head_hexsha": "2872dc514b638237771f4071195f7b8f90e0ce3d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-10-04T01:55:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-14T05:20:50.000Z", "max_issues_repo_path": "specfem2d/src/specfem2D/compute_forces_acoustic_backward.f90", "max_issues_repo_name": "PanIGGCAS/SeisElastic2D_1.1", "max_issues_repo_head_hexsha": "2872dc514b638237771f4071195f7b8f90e0ce3d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-10-31T03:36:34.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-27T09:36:13.000Z", "max_forks_repo_path": "specfem2d/src/specfem2D/compute_forces_acoustic_backward.f90", "max_forks_repo_name": "PanIGGCAS/SeisElastic2D_1.1", "max_forks_repo_head_hexsha": "2872dc514b638237771f4071195f7b8f90e0ce3d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-12-15T02:04:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-15T21:48:35.000Z", "avg_line_length": 43.020979021, "max_line_length": 125, "alphanum_fraction": 0.6137028609, "num_tokens": 3365}
|
"""Utility functions"""
from math import log, pow
import numpy as np
from ..exceptions import WaveletException
def getExponent(value):
"""Returns the exponent for the data Ex: 8 -> 3 [2 ^ 3]"""
return int(log(value) / log(2.))
def scalb(f, scaleFactor):
"""Return the scale for the factor"""
return f * pow(2., scaleFactor)
def isPowerOf2(number):
"""Checks if the length is equal to the power of 2"""
power = getExponent(number)
result = scalb(1., power)
return result == number
def decomposeArbitraryLength(number):
"""
Returns decomposition for the numbers
Examples
--------
number 42 : 32 + 8 + 2
powers : 5, 3, 1
"""
if number < 1:
raise WaveletException("Number should be greater than 1")
tempArray = list()
current = number
position = 0
while current >= 1.:
power = getExponent(current)
tempArray.append(power)
current = current - scalb(1., power)
position += 1
return tempArray[:position]
def threshold(data, value, substitute=0):
"""Soft thresholding"""
magnitude = np.absolute(data)
with np.errstate(divide='ignore'):
# divide by zero okay as np.inf values get clipped, so ignore warning.
thresholded = (1 - value / magnitude)
thresholded.clip(min=0, max=None, out=thresholded)
thresholded *= data
if substitute == 0:
return thresholded
else:
cond = np.less(magnitude, value)
return np.where(cond, substitute, thresholded)
def mad(data):
"""
Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variability of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
data = np.ma.array(data).compressed()
med = np.median(data)
return np.median(np.abs(data - med))
def snr(data, axis=0, ddof=0):
"""
Signal to Noise ratio
simply given by mean / standard deviation
"""
a = np.asanyarray(data)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m / sd)
def rmse(original, noise):
""" Returns the root mean squared error between y and x signals """
return np.sqrt(np.mean(np.power(np.subtract(original, noise), 2)))
def snr2(original, noise):
with np.errstate(divide='ignore'):
numerator = np.sum(np.power(original, 2))
denoiminator = np.sum(np.power(np.subtract(original, noise), 2))
return 10 * np.log10(np.divide(numerator, denoiminator))
def amp_to_db(S, ref=1.0, min_value=1e-5, top_db=80.0):
"""
Convert an amplitude spectrogram to dB-scaled spectrogram.
This is equivalent to ``power_to_db(S**2)``
Parameters
----------
S : array_like
input amplitude
ref : scalar
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`20 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
min_value : float > 0 [scalar]
minimum threshold for `S` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(20 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S`` measured in dB
"""
S = np.asarray(S)
magnitude = np.abs(S)
ref_value = np.abs(ref)
power = np.square(magnitude, out=magnitude)
return power_to_db(power, ref=ref_value ** 2, min_value=min_value ** 2,
top_db=top_db)
def power_to_db(S, ref=1.0, min_value=1e-10, top_db=80.0):
"""
Convert a power spectrogram (amplitude squared) to decibel (dB) units
This computes the scaling ``10 * log10(S / ref)`` in a numerically
stable way.
Parameters
----------
S : np.ndarray
input power
ref : scalar
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`10 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
min_value : float > 0 [scalar]
minimum threshold for `abs(S)` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(10 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S_db ~= 10 * log10(S) - 10 * log10(ref)``
"""
S = np.asarray(S)
if min_value <= 0:
raise Exception('min must be strictly positive')
if np.issubdtype(S.dtype, np.complexfloating):
magnitude = np.abs(S)
else:
magnitude = S
ref_value = np.abs(ref)
log_spec = 10.0 * np.log10(np.maximum(min_value, magnitude))
log_spec -= 10.0 * np.log10(np.maximum(min_value, ref_value))
# scaling based on the top db
if top_db is not None:
if top_db < 0:
raise Exception('top_db must be non-negative')
log_spec = np.maximum(log_spec, log_spec.max() - top_db)
return log_spec
|
{"hexsha": "fa345d02ad160b20613de5d8c9bc313a8e9e4cd8", "size": 4948, "ext": "py", "lang": "Python", "max_stars_repo_path": "wavelet/util/utility.py", "max_stars_repo_name": "AP-Atul/wavelets-ext", "max_stars_repo_head_hexsha": "00ced22462c369584ebd32f9b5f357f092de0142", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-02-01T07:43:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-27T06:58:54.000Z", "max_issues_repo_path": "wavelet/util/utility.py", "max_issues_repo_name": "AP-Atul/wavelets-ext", "max_issues_repo_head_hexsha": "00ced22462c369584ebd32f9b5f357f092de0142", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wavelet/util/utility.py", "max_forks_repo_name": "AP-Atul/wavelets-ext", "max_forks_repo_head_hexsha": "00ced22462c369584ebd32f9b5f357f092de0142", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9898989899, "max_line_length": 78, "alphanum_fraction": 0.6073160873, "include": true, "reason": "import numpy", "num_tokens": 1308}
|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019, 2020, 2021
Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains implementations of most cubic equations of state for
mixtures. This includes Peng-Robinson, SRK, Van der Waals, PRSV, TWU and
many other variants.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/thermo/>`_.
.. contents:: :local:
Base Class
==========
.. autoclass:: thermo.eos_mix.GCEOSMIX
:members:
:undoc-members:
:show-inheritance:
:exclude-members: a_alpha_and_derivatives_numpy, a_alpha_and_derivatives_py, main_derivatives_and_departures, derivatives_and_departures,
sequential_substitution_3P, sequential_substitution_VL, stability_Michelsen, stability_iteration_Michelsen, newton_VL, broyden2_VL,
d2A_dep_dninjs, d2A_dep_dninjs_Vt, d2A_dninjs_Vt, d2A_dninjs_Vt_another, d2P_dninjs_Vt, d2nA_dninjs_Vt, d3P_dninjnks_Vt,
dScomp_dns, d2Scomp_dninjs, dA_dep_dns_Vt, dP_dns_Vt
Peng-Robinson Family EOSs
=========================
Standard Peng Robinson
----------------------
.. autoclass:: thermo.eos_mix.PRMIX
:show-inheritance:
:members: eos_pure, a_alphas_vectorized, a_alpha_and_derivatives_vectorized,
d3a_alpha_dT3, d3a_alpha_dT3_vectorized, fugacity_coefficients,
dlnphis_dT, dlnphis_dP, dlnphis_dzs, ddelta_dzs, ddelta_dns,
d2delta_dzizjs, d2delta_dninjs, d3delta_dninjnks, depsilon_dzs,
depsilon_dns, d2epsilon_dzizjs, d2epsilon_dninjs,
d3epsilon_dninjnks
Peng Robinson (1978)
--------------------
.. autoclass:: thermo.eos_mix.PR78MIX
:show-inheritance:
:members: eos_pure
Peng Robinson Stryjek-Vera
--------------------------
.. autoclass:: thermo.eos_mix.PRSVMIX
:show-inheritance:
:members: eos_pure, a_alphas_vectorized, a_alpha_and_derivatives_vectorized
Peng Robinson Stryjek-Vera 2
----------------------------
.. autoclass:: thermo.eos_mix.PRSV2MIX
:show-inheritance:
:members: eos_pure, a_alphas_vectorized, a_alpha_and_derivatives_vectorized
Peng Robinson Twu (1995)
------------------------
.. autoclass:: thermo.eos_mix.TWUPRMIX
:show-inheritance:
:members: eos_pure
Peng Robinson Translated
------------------------
.. autoclass:: thermo.eos_mix.PRMIXTranslated
:show-inheritance:
:members: eos_pure, ddelta_dzs, d2delta_dzizjs, d3delta_dzizjzks, ddelta_dns,
d2delta_dninjs, d3delta_dninjnks, depsilon_dzs, depsilon_dns,
d2epsilon_dzizjs, d3epsilon_dzizjzks, d2epsilon_dninjs,
d3epsilon_dninjnks
Peng Robinson Translated-Consistent
-----------------------------------
.. autoclass:: thermo.eos_mix.PRMIXTranslatedConsistent
:show-inheritance:
:members: eos_pure
Peng Robinson Translated (Pina-Martinez, Privat, and Jaubert Variant)
---------------------------------------------------------------------
.. autoclass:: thermo.eos_mix.PRMIXTranslatedPPJP
:show-inheritance:
:members: eos_pure
SRK Family EOSs
===============
Standard SRK
------------
.. autoclass:: thermo.eos_mix.SRKMIX
:show-inheritance:
:members: eos_pure, dlnphis_dT, dlnphis_dP, a_alphas_vectorized,
a_alpha_and_derivatives_vectorized, fugacity_coefficients
:exclude-members:
Twu SRK (1995)
--------------
.. autoclass:: thermo.eos_mix.TWUSRKMIX
:show-inheritance:
:members: eos_pure
API SRK
-------
.. autoclass:: thermo.eos_mix.APISRKMIX
:show-inheritance:
:members: eos_pure
SRK Translated
--------------
.. autoclass:: thermo.eos_mix.SRKMIXTranslated
:show-inheritance:
:members: eos_pure, ddelta_dzs, d2delta_dzizjs, d3delta_dzizjzks, ddelta_dns,
d2delta_dninjs, d3delta_dninjnks, depsilon_dzs, depsilon_dns,
d2epsilon_dzizjs, d3epsilon_dzizjzks, d2epsilon_dninjs,
d3epsilon_dninjnks
SRK Translated-Consistent
-------------------------
.. autoclass:: thermo.eos_mix.SRKMIXTranslatedConsistent
:show-inheritance:
:members: eos_pure
MSRK Translated
---------------
.. autoclass:: thermo.eos_mix.MSRKMIXTranslated
:show-inheritance:
:members: eos_pure
Cubic Equation of State with Activity Coefficients
==================================================
.. autoclass:: thermo.eos_mix.PSRK
:show-inheritance:
:members: eos_pure
Van der Waals Equation of State
===============================
.. autoclass:: thermo.eos_mix.VDWMIX
:show-inheritance:
:members: eos_pure, dlnphis_dT, dlnphis_dP, a_alphas_vectorized,
a_alpha_and_derivatives_vectorized, fugacity_coefficients,
ddelta_dzs, ddelta_dns, d2delta_dzizjs, d2delta_dninjs,
d3delta_dninjnks
Redlich-Kwong Equation of State
===============================
.. autoclass:: thermo.eos_mix.RKMIX
:show-inheritance:
:members: eos_pure, a_alphas_vectorized, a_alpha_and_derivatives_vectorized,
ddelta_dzs, ddelta_dns, d2delta_dzizjs, d2delta_dninjs,
d3delta_dninjnks
Ideal Gas Equation of State
===========================
.. autoclass:: thermo.eos_mix.IGMIX
:show-inheritance:
:members: eos_pure, a_alphas_vectorized, a_alpha_and_derivatives_vectorized
Different Mixing Rules
======================
.. autoclass:: thermo.eos_mix.EpsilonZeroMixingRules
.. autoclass:: thermo.eos_mix.PSRKMixingRules
:members: u, A, a_alpha_and_derivatives
:undoc-members:
:show-inheritance:
Lists of Equations of State
===========================
.. autodata:: thermo.eos_mix.eos_mix_list
.. autodata:: thermo.eos_mix.eos_mix_no_coeffs_list
'''
from __future__ import division
__all__ = ['GCEOSMIX', 'PRMIX', 'SRKMIX', 'PR78MIX', 'VDWMIX', 'PRSVMIX',
'PRSV2MIX', 'TWUPRMIX', 'TWUSRKMIX', 'APISRKMIX', 'IGMIX', 'RKMIX',
'PRMIXTranslatedConsistent', 'PRMIXTranslatedPPJP', 'PRMIXTranslated',
'SRKMIXTranslatedConsistent', 'PSRK', 'MSRKMIXTranslated',
'eos_mix_list', 'eos_mix_no_coeffs_list', 'SRKMIXTranslated']
import sys
from cmath import log as clog
from fluids.numerics import numpy as np, IS_PYPY, newton_system, broyden2, UnconvergedError, trunc_exp, solve_2_direct, catanh
from fluids.numerics.arrays import det, subset_matrix
from fluids.constants import R
from chemicals.utils import normalize, dxs_to_dn_partials, dxs_to_dns, dns_to_dn_partials, d2xs_to_dxdn_partials, d2ns_to_dn2_partials
from chemicals.utils import log, exp, sqrt
from chemicals.rachford_rice import flash_inner_loop, Rachford_Rice_flash_error, Rachford_Rice_solution2
from chemicals.flash_basic import K_value, Wilson_K_value
from thermo import serialize
from thermo.eos_mix_methods import (a_alpha_aijs_composition_independent,
a_alpha_aijs_composition_independent_support_zeros, a_alpha_and_derivatives, a_alpha_and_derivatives_full,
a_alpha_quadratic_terms, a_alpha_and_derivatives_quadratic_terms,
G_dep_lnphi_d_helper, eos_mix_dV_dzs, VDW_lnphis, SRK_lnphis, eos_mix_db_dns, PR_translated_ddelta_dns,
PR_translated_depsilon_dns, PR_depsilon_dns, PR_translated_d2epsilon_dzizjs,
PR_d2epsilon_dninjs, PR_d3epsilon_dninjnks, PR_d2delta_dninjs, PR_d3delta_dninjnks,
PR_ddelta_dzs, PR_ddelta_dns, PR_d2epsilon_dzizjs, PR_depsilon_dzs,
RK_d3delta_dninjnks, SRK_translated_d2epsilon_dzizjs, SRK_translated_depsilon_dzs,
PR_translated_ddelta_dzs, PR_translated_depsilon_dzs, PR_translated_d2epsilon_dninjs,
PR_translated_d2delta_dninjs, PR_translated_d3delta_dninjnks, PR_translated_d3epsilon_dninjnks,
SRK_translated_ddelta_dns, SRK_translated_depsilon_dns, SRK_translated_d2delta_dninjs,
SRK_translated_d2epsilon_dninjs, SRK_translated_d3epsilon_dninjnks,
SRK_translated_d3delta_dninjnks)
from thermo.eos_alpha_functions import (TwuPR95_a_alpha, TwuSRK95_a_alpha, Twu91_a_alpha, Mathias_Copeman_poly_a_alpha,
Soave_1979_a_alpha, PR_a_alpha_and_derivatives_vectorized, PR_a_alphas_vectorized,
RK_a_alpha_and_derivatives_vectorized, RK_a_alphas_vectorized,
SRK_a_alpha_and_derivatives_vectorized, SRK_a_alphas_vectorized,
PRSV_a_alphas_vectorized, PRSV_a_alpha_and_derivatives_vectorized,
PRSV2_a_alphas_vectorized, PRSV2_a_alpha_and_derivatives_vectorized,
APISRK_a_alphas_vectorized, APISRK_a_alpha_and_derivatives_vectorized)
from thermo.eos import *
try:
(zeros, array, npexp, npsqrt, empty, full, npwhere, npmin, npmax) = (
np.zeros, np.array, np.exp, np.sqrt, np.empty, np.full, np.where, np.min, np.max)
except:
pass
R2 = R*R
R_inv = 1.0/R
R2_inv = R_inv*R_inv
two_root_two = 2*2**0.5
root_two = sqrt(2.)
root_two_m1 = root_two - 1.0
root_two_p1 = root_two + 1.0
c1R2_PR = PR.c1R2
c2R_PR = PR.c2R
class GCEOSMIX(GCEOS):
r'''Class for solving a generic pressure-explicit three-parameter cubic
equation of state for a mixture. Does not implement any parameters itself;
must be subclassed by a mixture equation of state class which subclasses it.
.. math::
P=\frac{RT}{V-b}-\frac{a\alpha(T)}{V^2 + \delta V + \epsilon}
'''
nonstate_constants = ('N', 'cmps', 'Tcs', 'Pcs', 'omegas', 'kijs', 'kwargs', 'ais', 'bs')
mix_kwargs_to_pure = {}
kwargs_square = ('kijs',)
'''Tuple of 2D arguments used by the specific EOS.
'''
kwargs_linear = tuple()
'''Tuple of 1D arguments used by the specific EOS in addition to the conventional ones.
'''
multicomponent = True
'''All inherited classes of GCEOSMIX are multicomponent.
'''
scalar = True
'''Whether the model is implemented using pure-Python lists of floats,
or numpy arrays of float64.
'''
translated = False
'''Whether or not the model implements volume translation.
'''
def subset(self, idxs, **state_specs):
r'''Method to construct a new :obj:`GCEOSMIX` that removes all components
not specified in the `idxs` argument.
Parameters
----------
idxs : list[int] or Slice
Indexes of components that should be included, [-]
Returns
-------
subset_eos : :obj:`GCEOSMIX`
Multicomponent :obj:`GCEOSMIX` at the same specified specs but with a
composition normalized to 1 and with fewer components, [-]
state_specs : float
Keyword arguments which can be any of `T`, `P`, `V`, `zs`; `zs`
is optional, as are (`T`, `P`, `V`), but if any of (`T`, `P`, `V`)
are specified, a second one is required as well, [various]
Notes
-----
Subclassing equations of state require their :obj:`kwargs_linear <GCEOSMIX.kwargs_linear>` and
:obj:`kwargs_square <GCEOSMIX.kwargs_square>` attributes to be correct for this to work.
`Tcs`, `Pcs`, and `omegas` are always assumed to be used.
Examples
--------
>>> kijs = [[0.0, 0.00076, 0.00171], [0.00076, 0.0, 0.00061], [0.00171, 0.00061, 0.0]]
>>> PR3 = PRMIX(Tcs=[469.7, 507.4, 540.3], zs=[0.8168, 0.1501, 0.0331], omegas=[0.249, 0.305, 0.349], Pcs=[3.369E6, 3.012E6, 2.736E6], T=322.29, P=101325.0, kijs=kijs)
>>> PR3.subset([1,2])
PRMIX(Tcs=[507.4, 540.3], Pcs=[3012000.0, 2736000.0], omegas=[0.305, 0.349], kijs=[[0.0, 0.00061], [0.00061, 0.0]], zs=[0.8193231441048036, 0.1806768558951965], T=322.29, P=101325.0)
>>> PR3.subset([1,2], T=500.0, P=1e5, zs=[.2, .8])
PRMIX(Tcs=[507.4, 540.3], Pcs=[3012000.0, 2736000.0], omegas=[0.305, 0.349], kijs=[[0.0, 0.00061], [0.00061, 0.0]], zs=[0.2, 0.8], T=500.0, P=100000.0)
>>> PR3.subset([1,2], zs=[.2, .8])
PRMIX(Tcs=[507.4, 540.3], Pcs=[3012000.0, 2736000.0], omegas=[0.305, 0.349], kijs=[[0.0, 0.00061], [0.00061, 0.0]], zs=[0.2, 0.8], T=322.29, P=101325.0)
'''
is_slice = isinstance(idxs, slice)
if is_slice:
def atindexes(values):
return values[idxs]
else:
def atindexes(values):
return [values[i] for i in idxs]
if state_specs:
kwargs = state_specs
if len(kwargs) == 1 and 'zs' in kwargs:
kwargs.update(self.state_specs)
else:
kwargs = self.state_specs
if 'zs' not in kwargs:
zs = atindexes(self.zs)
if not zs:
raise ValueError("Cannot create an EOS without any components selected")
zs_tot_inv = 1.0/sum(zs)
for i in range(len(zs)):
zs[i] *= zs_tot_inv
kwargs['zs'] = zs
kwargs['Tcs'] = atindexes(self.Tcs)
kwargs['Pcs'] = atindexes(self.Pcs)
kwargs['omegas'] = atindexes(self.omegas)
local_kwargs = self.kwargs
for k in self.kwargs_linear:
kwargs[k] = atindexes(local_kwargs[k])
for k in self.kwargs_square:
kwargs[k] = subset_matrix(local_kwargs[k], idxs)
return self.__class__(**kwargs)
def __repr__(self):
s = '%s(Tcs=%s, Pcs=%s, omegas=%s, ' %(self.__class__.__name__, repr(self.Tcs), repr(self.Pcs), repr(self.omegas))
for k, v in self.kwargs.items():
s += '%s=%s, ' %(k, repr(v))
s += 'zs=%s, ' %(repr(self.zs))
if hasattr(self, 'no_T_spec') and self.no_T_spec:
s += 'P=%s, V=%s' %(repr(self.P), repr(self.V))
elif self.V is not None:
s += 'T=%s, V=%s' %(repr(self.T), repr(self.V))
else:
s += 'T=%s, P=%s' %(repr(self.T), repr(self.P))
s += ')'
return s
@classmethod
def from_json(cls, json_repr):
r'''Method to create a mixture cubic equation of state from a JSON
friendly serialization of another mixture cubic equation of state.
Parameters
----------
json_repr : dict
Json representation, [-]
Returns
-------
eos_mix : :obj:`GCEOSMIX`
Newly created object from the json serialization, [-]
Notes
-----
It is important that the input string be in the same format as that
created by :obj:`GCEOS.as_json`.
Examples
--------
>>> import pickle
>>> eos = PRSV2MIX(Tcs=[507.6], Pcs=[3025000], omegas=[0.2975], zs=[1], T=299., P=1E6, kappa1s=[0.05104], kappa2s=[0.8634], kappa3s=[0.460])
>>> json_stuff = pickle.dumps(eos.as_json())
>>> new_eos = GCEOSMIX.from_json(pickle.loads(json_stuff))
>>> assert new_eos == eos
'''
d = json_repr
eos_name = d['py/object']
del d['py/object']
del d['json_version']
if not d['scalar']:
d = serialize.naive_lists_to_arrays(d)
try:
d['raw_volumes'] = tuple(d['raw_volumes'])
except:
pass
try:
alpha_coeffs = [tuple(v) for v in d['alpha_coeffs']]
d['alpha_coeffs'] = alpha_coeffs
except:
pass
eos = eos_mix_full_path_dict[eos_name]
if eos.kwargs_keys:
d['kwargs'] = {k: d[k] for k in eos.kwargs_keys}
try:
d['kwargs']['alpha_coeffs'] = alpha_coeffs
except:
pass
new = eos.__new__(eos)
new.__dict__ = d
return new
def to_TP_zs_fast(self, T, P, zs, only_l=False, only_g=False, full_alphas=True):
r'''Method to construct a new :obj:`GCEOSMIX` instance with the same
parameters as the existing object. If both instances are at the same
temperature, `a_alphas` and `da_alpha_dTs` and `d2a_alpha_dT2s` are
shared between the instances. It is always assumed the new object has
a differet composition. Optionally, only one set of phase properties
can be solved for, increasing speed. Additionally, if `full_alphas`
is set to False no temperature derivatives of `a_alpha` will be
computed. Those derivatives are not needed in the context of a
PT or PVF flash.
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [Pa]
zs : list[float]
Mole fractions of each component, [-]
only_l : bool
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set.
only_g : bool
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set.
Returns
-------
eos : :obj:`GCEOSMIX`
Multicomponent :obj:`GCEOSMIX` at the specified conditions [-]
Notes
-----
Examples
--------
>>> base = RKMIX(T=500.0, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.6, 0.4])
>>> base.to_TP_zs_fast(T=300, P=1e5, zs=base.zs)
RKMIX(Tcs=[126.1, 190.6], Pcs=[3394000.0, 4604000.0], omegas=[0.04, 0.011], kijs=[[0.0, 0.0], [0.0, 0.0]], zs=[0.6, 0.4], T=300, P=100000.0)
'''
copy_alphas = T == self.T
new = self.__class__.__new__(self.__class__)
new.N = self.N
new.Tcs = self.Tcs
new.Pcs = self.Pcs
new.omegas = self.omegas
new.kijs = self.kijs
new.kwargs = self.kwargs
new.ais = self.ais
new.bs = self.bs
new.scalar = self.scalar
if copy_alphas:
new.a_alphas = self.a_alphas
try:
new.da_alpha_dTs = self.da_alpha_dTs
new.d2a_alpha_dT2s = self.d2a_alpha_dT2s
except:
pass
new.zs = zs
new.T = T
new.P = P
new.V = None
new._fast_init_specific(self)
new.solve(pure_a_alphas=(not copy_alphas), only_l=only_l,
only_g=only_g, full_alphas=full_alphas)
return new
def to_TP_zs(self, T, P, zs, fugacities=True, only_l=False, only_g=False):
r'''Method to construct a new :obj:`GCEOSMIX` instance at `T`, `P`, and `zs`
with the same parameters as the existing object. Optionally, only one
set of phase properties can be solved for, increasing speed. The
fugacities calculation can be be skipped by by setting `fugacities` to
False.
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [Pa]
zs : list[float]
Mole fractions of each component, [-]
fugacities : bool
Whether or not to calculate and set the fugacities of each
component, [-]
only_l : bool
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set.
only_g : bool
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set.
Returns
-------
eos : :obj:`GCEOSMIX`
Multicomponent :obj:`GCEOSMIX` at the specified conditions [-]
Notes
-----
A check for whether or not `T`, `P`, and `zs` are the same as the
existing instance is performed; if it is, the existing object is
returned.
Examples
--------
>>> base = RKMIX(T=500.0, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.6, 0.4])
>>> base.to_TP_zs(T=300, P=1e5, zs=[.1, 0.9])
RKMIX(Tcs=[126.1, 190.6], Pcs=[3394000.0, 4604000.0], omegas=[0.04, 0.011], kijs=[[0.0, 0.0], [0.0, 0.0]], zs=[0.1, 0.9], T=300, P=100000.0)
'''
if T != self.T or P != self.P or zs != self.zs:
return self.__class__(T=T, P=P, zs=zs, Tcs=self.Tcs, Pcs=self.Pcs, omegas=self.omegas, only_l=only_l, only_g=only_g, fugacities=fugacities, **self.kwargs)
else:
return self
def to_PV_zs(self, P, V, zs, fugacities=True, only_l=False, only_g=False):
r'''Method to construct a new :obj:`GCEOSMIX` instance at `P`, `V`, and `zs`
with the same parameters as the existing object. Optionally, only one
set of phase properties can be solved for, increasing speed. The
fugacities calculation can be be skipped by by setting `fugacities` to
False.
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
zs : list[float]
Mole fractions of each component, [-]
fugacities : bool
Whether or not to calculate and set the fugacities of each
component, [-]
only_l : bool
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set.
only_g : bool
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set.
Returns
-------
eos : :obj:`GCEOSMIX`
Multicomponent :obj:`GCEOSMIX` at the specified conditions [-]
Notes
-----
A check for whether or not `P`, `V`, and `zs` are the same as the
existing instance is performed; if it is, the existing object is
returned.
Examples
--------
>>> base = RKMIX(T=500.0, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.6, 0.4])
>>> base.to_PV_zs(V=0.004162, P=1e5, zs=[.1, 0.9])
RKMIX(Tcs=[126.1, 190.6], Pcs=[3394000.0, 4604000.0], omegas=[0.04, 0.011], kijs=[[0.0, 0.0], [0.0, 0.0]], zs=[0.1, 0.9], P=100000.0, V=0.004162)
'''
if P == self.P and V == self.V and zs == self.zs:
return self
return self.__class__(P=P, V=V, zs=zs, Tcs=self.Tcs, Pcs=self.Pcs, omegas=self.omegas, only_l=only_l, only_g=only_g, fugacities=fugacities, **self.kwargs)
def to(self, zs=None, T=None, P=None, V=None, fugacities=True):
r'''Method to construct a new :obj:`GCEOSMIX` object at two of `T`, `P` or `V`
with the specified composition.
In the event the specs match those of the current object, it will be
returned unchanged.
Parameters
----------
zs : list[float], optional
Mole fractions of EOS, [-]
T : float or None, optional
Temperature, [K]
P : float or None, optional
Pressure, [Pa]
V : float or None, optional
Molar volume, [m^3/mol]
fugacities : bool
Whether or not to calculate fugacities, [-]
Returns
-------
obj : :obj:`GCEOSMIX`
Pure component :obj:`GCEOSMIX` at the two specified specs, [-]
Notes
-----
Constructs the object with parameters `Tcs`, `Pcs`, `omegas`, and
`kwargs`.
Examples
--------
>>> base = PRMIX(T=500.0, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.6, 0.4])
>>> base.to(T=300.0, P=1e9).state_specs
{'T': 300.0, 'P': 1000000000.0}
>>> base.to(T=300.0, V=1.0).state_specs
{'T': 300.0, 'V': 1.0}
>>> base.to(P=1e5, V=1.0).state_specs
{'P': 100000.0, 'V': 1.0}
'''
if zs is None:
zs = self.zs
if T is not None and P is not None:
try:
sln = self.to_TP_zs_fast(T, P, zs)
if fugacities:
sln.fugacities()
return sln
except:
return self.to_TP_zs(T, P, zs, fugacities)
elif T is not None and V is not None:
if T == self.T and V == self.V and zs == self.zs:
return self
return self.__class__(T=T, V=V, zs=zs, Tcs=self.Tcs, Pcs=self.Pcs, omegas=self.omegas, fugacities=fugacities, **self.kwargs)
elif P is not None and V is not None:
return self.to_PV_zs(P, V, zs, fugacities)
else:
return self.__class__(T=T, P=P, V=V, zs=zs, Tcs=self.Tcs, Pcs=self.Pcs, omegas=self.omegas, fugacities=fugacities, **self.kwargs)
def to_TP(self, T, P):
r'''Method to construct a new :obj:`GCEOSMIX` object at the spcified `T` and `P`
with the current composition. In the event the `T` and `P` match the
current object's `T` and `P`, it will be returned unchanged.
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [Pa]
Returns
-------
obj : :obj:`GCEOSMIX`
Pure component :obj:`GCEOSMIX` at specified `T` and `P`, [-]
Notes
-----
Constructs the object with parameters `Tcs`, `Pcs`, `omegas`, and
`kwargs`.
Examples
--------
>>> base = RKMIX(T=500.0, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.6, 0.4])
>>> new = base.to_TP(T=10.0, P=2000.0)
>>> base.state_specs, new.state_specs
({'T': 500.0, 'P': 1000000.0}, {'T': 10.0, 'P': 2000.0})
'''
return self.to_TP_zs(T, P, zs=self.zs)
def to_TV(self, T, V):
r'''Method to construct a new :obj:`GCEOSMIX` object at the spcified `T` and `V`
with the current composition. In the event the `T` and `V` match the
current object's `T` and `V`, it will be returned unchanged.
Parameters
----------
T : float
Temperature, [K]
V : float
Molar volume, [m^3/mol]
Returns
-------
obj : :obj:`GCEOSMIX`
Pure component :obj:`GCEOSMIX` at specified `T` and `V`, [-]
Notes
-----
Constructs the object with parameters `Tcs`, `Pcs`, `omegas`, and
`kwargs`.
Examples
--------
>>> base = RKMIX(T=500.0, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.6, 0.4])
>>> new = base.to_TV(T=1000000.0, V=1.0)
>>> base.state_specs, new.state_specs
({'T': 500.0, 'P': 1000000.0}, {'T': 1000000.0, 'V': 1.0})
'''
if T == self.T and V == self.V:
return self
return self.__class__(T=T, V=V, zs=self.zs, Tcs=self.Tcs, Pcs=self.Pcs, omegas=self.omegas, fugacities=True, **self.kwargs)
def to_PV(self, P, V):
r'''Method to construct a new :obj:`GCEOSMIX` object at the spcified `P` and `V`
with the current composition. In the event the `P` and `V` match the
current object's `P` and `V`, it will be returned unchanged.
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
Returns
-------
obj : :obj:`GCEOSMIX`
Pure component :obj:`GCEOSMIX` at specified `P` and `V`, [-]
Notes
-----
Constructs the object with parameters `Tcs`, `Pcs`, `omegas`, and
`kwargs`.
Examples
--------
>>> base = RKMIX(T=500.0, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.6, 0.4])
>>> new = base.to_PV(P=1000000.0, V=1.0)
>>> base.state_specs, new.state_specs
({'T': 500.0, 'P': 1000000.0}, {'P': 1000000.0, 'V': 1.0})
'''
if V == self.V and P == self.P:
return self
return self.__class__(V=V, P=P, zs=self.zs, Tcs=self.Tcs, Pcs=self.Pcs, omegas=self.omegas, fugacities=True, **self.kwargs)
def to_mechanical_critical_point(self):
r'''Method to construct a new :obj:`GCEOSMIX` object at the current object's
properties and composition, but which is at the mechanical critical
point.
Returns
-------
obj : :obj:`GCEOSMIX`
Pure component :obj:`GCEOSMIX` at mechanical critical point [-]
Examples
--------
>>> base = RKMIX(T=500.0, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.6, 0.4])
>>> base.to_mechanical_critical_point()
RKMIX(Tcs=[126.1, 190.6], Pcs=[3394000.0, 4604000.0], omegas=[0.04, 0.011], kijs=[[0.0, 0.0], [0.0, 0.0]], zs=[0.6, 0.4], T=151.861, P=3908737.9)
'''
T, P = self.mechanical_critical_point()
return self.to_TP_zs(T=T, P=P, zs=self.zs)
def to_TPV_pure(self, i, T=None, P=None, V=None):
r'''Helper method which returns a pure `EOSs` at the specs (two of `T`,
`P` and `V`) and base EOS as the mixture for a particular index.
Parameters
----------
i : int
Index of specified compound, [-]
T : float or None, optional
Specified temperature, [K]
P : float or None, optional
Specified pressure, [Pa]
V : float or None, optional
Specified volume, [m^3/mol]
Returns
-------
eos_pure : eos
A pure-species EOSs at the two specified `T`, `P`, and `V` for
component `i`, [-]
Notes
-----
'''
kwargs = {}
mix_kwargs_to_pure = self.mix_kwargs_to_pure
for k, v in self.kwargs.items():
if k in mix_kwargs_to_pure:
kwargs[mix_kwargs_to_pure[k]] = v[i]
return self.eos_pure(T=T, P=P, V=V, Tc=self.Tcs[i], Pc=self.Pcs[i],
omega=self.omegas[i], **kwargs)
def pures(self):
r'''Helper method which returns a list of pure `EOSs` at the same `T`
and `P` and base EOS as the mixture.
Returns
-------
eos_pures : list[eos]
A list of pure-species EOSs at the same `T` and `P` as the system,
[-]
Notes
-----
This is useful for i.e. comparing mixture fugacities with the
Lewis-Randall rule or when using an activity coefficient model which
require pure component fugacities.
'''
T, P, N = self.T, self.P, self.N
return [self.to_TPV_pure(T=T, P=P, V=None, i=i) for i in range(N)]
@property
def pseudo_Tc(self):
'''Apply a linear mole-fraction mixing rule to compute the average
critical temperature, [K].
Examples
--------
>>> base = RKMIX(T=150.0, P=4e6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.6, 0.4])
>>> base.pseudo_Tc
151.9
'''
zs = self.zs
Tcs = self.Tcs
Tc = 0.0
for i in range(self.N):
Tc += zs[i]*Tcs[i]
return Tc
@property
def pseudo_Pc(self):
'''Apply a linear mole-fraction mixing rule to compute the average
critical pressure, [Pa].
Examples
--------
>>> base = RKMIX(T=150.0, P=4e6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.6, 0.4])
>>> base.pseudo_Pc
3878000.0
'''
zs = self.zs
Pcs = self.Pcs
Pc = 0.0
for i in range(self.N):
Pc += zs[i]*Pcs[i]
return Pc
@property
def pseudo_omega(self):
'''Apply a linear mole-fraction mixing rule to compute the average
`omega`, [-].
Examples
--------
>>> base = RKMIX(T=150.0, P=4e6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.6, 0.4])
>>> base.pseudo_omega
0.0284
'''
zs = self.zs
omegas = self.omegas
omega = 0.0
for i in range(self.N):
omega += zs[i]*omegas[i]
return omega
@property
def pseudo_a(self):
'''Apply a linear mole-fraction mixing rule to compute the average
`a` coefficient, [-].
Examples
--------
>>> base = RKMIX(T=150.0, P=4e6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.6, 0.4])
>>> base.pseudo_a
0.17634464184
'''
zs = self.zs
ais = self.ais
a = 0.0
for i in range(self.N):
a += zs[i]*ais[i]
return a
def Psat(self, T, polish=False):
r'''Generic method to calculate vapor pressure of a pure-component
equation of state for a specified `T`. An explicit solution is used
unless `polish` is True.
The result of this function has no physical meaning for multicomponent
mixtures, and does not represent either a dew point or a bubble point!
Parameters
----------
T : float
Temperature, [K]
polish : bool, optional
Whether to attempt to use a numerical solver to make the solution
more precise or not
Returns
-------
Psat : float
Vapor pressure using the pure-component approach, [Pa]
Notes
-----
For multicomponent mixtures this may serve as a useful guess
for the dew and the bubble pressure.
'''
if self.N == 1:
Tc, Pc, omega, a = self.Tcs[0], self.Pcs[0], self.omegas[0], self.ais[0]
else:
zs = self.zs
Tcs, Pcs, omegas, ais = self.Tcs, self.Pcs, self.omegas, self.ais
Tc, Pc, omega, a = 0.0, 0.0, 0.0, 0.0
for i in range(self.N):
Tc += Tcs[i]*zs[i]
Pc += Pcs[i]*zs[i]
omega += omegas[i]*zs[i]
a += ais[i]*zs[i]
self.Tc, self.Pc, self.omega = Tc, Pc, omega
self.a = a
Psat = GCEOS.Psat(self, T, polish=False)
del self.Tc, self.Pc, self.omega
return Psat
def a_alpha_and_derivatives(self, T, full=True, quick=True,
pure_a_alphas=True):
r'''Method to calculate `a_alpha` and its first and second
derivatives for an EOS with the Van der Waals mixing rules. Uses the
parent class's interface to compute pure component values. Returns
`a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`.
For use in :obj:`solve_T <GCEOSMIX.solve_T>` this returns only
`a_alpha` if `full` is False.
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
Parameters
----------
T : float
Temperature, [K]
full : bool, optional
If False, calculates and returns only `a_alpha`
quick : bool, optional
Only the quick variant is implemented; it is little faster anyhow
pure_a_alphas : bool, optional
Whether or not to recalculate the a_alpha terms of pure components
(for the case of mixtures only) which stay the same as the
composition changes (i.e in a PT flash), [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
Notes
-----
The exact expressions can be obtained with the following SymPy
expression below, commented out for brevity.
>>> from sympy import * # doctest:+SKIP
>>> kij, T = symbols('kij, T ') # doctest:+SKIP
>>> a_alpha_i, a_alpha_j = symbols('a_alpha_i, a_alpha_j', cls=Function) # doctest:+SKIP
>>> a_alpha_ij = (1-kij)*sqrt(a_alpha_i(T)*a_alpha_j(T)) # doctest:+SKIP
>>> diff(a_alpha_ij, T) # doctest:+SKIP
>>> diff(a_alpha_ij, T, T) # doctest:+SKIP
'''
if pure_a_alphas:
if full:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = self.a_alpha_and_derivatives_vectorized(T)
self.a_alphas, self.da_alpha_dTs, self.d2a_alpha_dT2s = a_alphas, da_alpha_dTs, d2a_alpha_dT2s
else:
self.a_alphas = a_alphas = self.a_alphas_vectorized(T)
da_alpha_dTs = d2a_alpha_dT2s = None
else:
try:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = self.a_alphas, self.da_alpha_dTs, self.d2a_alpha_dT2s
except:
if full:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = self.a_alpha_and_derivatives_vectorized(T)
self.a_alphas, self.da_alpha_dTs, self.d2a_alpha_dT2s = a_alphas, da_alpha_dTs, d2a_alpha_dT2s
else:
self.a_alphas = a_alphas = self.a_alphas_vectorized(T)
da_alpha_dTs = d2a_alpha_dT2s = None
if not IS_PYPY and self.N > 2000:
return self.a_alpha_and_derivatives_numpy(a_alphas, da_alpha_dTs, d2a_alpha_dT2s, T, full=full, quick=quick)
return self.a_alpha_and_derivatives_py(a_alphas, da_alpha_dTs, d2a_alpha_dT2s, T, full=full, quick=quick)
def a_alpha_and_derivatives_py(self, a_alphas, da_alpha_dTs, d2a_alpha_dT2s, T, full=True, quick=True):
# For 44 components, takes 150 us in PyPy.; 95 in pythran. Much of that is type conversions.
# 4 ms pypy for 44*4, 1.3 ms for pythran, 10 ms python with numpy
# 2 components 1.89 pypy, pythran 1.75 us, regular python 12.7 us.
# 10 components - regular python 148 us, 9.81 us PyPy, 8.37 pythran in PyPy (flags have no effect; 14.3 us in regular python)
zs, kijs, N = self.zs, self.kijs, self.N
same_T = T == self.T
if quick:
try:
assert same_T
a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv = self.a_alpha_ijs, self.a_alpha_roots, self.a_alpha_ij_roots_inv
except (AttributeError, AssertionError):
try:
a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv = a_alpha_aijs_composition_independent(a_alphas, kijs)
except ZeroDivisionError:
a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv = a_alpha_aijs_composition_independent_support_zeros(a_alphas, kijs)
self.a_alpha_ijs, self.a_alpha_roots, self.a_alpha_ij_roots_inv = a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv
else:
try:
a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv = a_alpha_aijs_composition_independent(a_alphas, kijs)
except:
a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv = a_alpha_aijs_composition_independent_support_zeros(a_alphas, kijs)
if same_T:
self.a_alpha_ijs, self.a_alpha_roots, self.a_alpha_ij_roots_inv = a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv
if full:
try:
a_alpha, da_alpha_dT, d2a_alpha_dT2, a_alpha_ijs, da_alpha_dT_ijs, d2a_alpha_dT2_ijs = a_alpha_and_derivatives_full(a_alphas, da_alpha_dTs, d2a_alpha_dT2s, T, zs, kijs,
a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv)
except:
if self.N == 1:
a_alpha, da_alpha_dT, d2a_alpha_dT2 = a_alphas[0], da_alpha_dTs[0], d2a_alpha_dT2s[0]
d2a_alpha_dT2_ijs, da_alpha_dT_ijs, a_alpha_ijs = [[d2a_alpha_dT2s[0]]], [[da_alpha_dTs[0]]], [[a_alphas[0]]]
self.d2a_alpha_dT2_ijs = d2a_alpha_dT2_ijs
self.da_alpha_dT_ijs = da_alpha_dT_ijs
self.a_alpha_ijs = a_alpha_ijs
return a_alpha, da_alpha_dT, d2a_alpha_dT2
else:
# Priority - test, fix, and validate
a_alpha, _, a_alpha_ijs = a_alpha_and_derivatives(a_alphas, T, zs, kijs, a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv)
self.da_alpha_dT_ijs = []
self.a_alpha_ijs = a_alpha_ijs
return a_alpha
# # DO NOT REMOVE THIS CODE! IT MAKES TIHNGS SLOWER IN PYPY, even though it never runs
# cmps = self.cmps
# da_alpha_dT, d2a_alpha_dT2 = 0.0, 0.0
#
# a_alpha_ijs = [[None]*N for _ in cmps]
# a_alpha_roots = [a_alpha_i**0.5 for a_alpha_i in a_alphas]
#
# if full:
# a_alpha_ij_roots = [[None]*N for _ in cmps]
# for i in cmps:
# kijs_i = kijs[i]
# a_alpha_i = a_alphas[i]
# a_alpha_ijs_is = a_alpha_ijs[i]
# a_alpha_ij_roots_i = a_alpha_ij_roots[i]
# for j in cmps:
# if j < i:
# continue
# a_alpha_ij_roots_i[j] = a_alpha_roots[i]*a_alpha_roots[j]#(a_alpha_i*a_alphas[j])**0.5
# a_alpha_ijs_is[j] = a_alpha_ijs[j][i] = (1. - kijs_i[j])*a_alpha_ij_roots_i[j]
# else:
# for i in cmps:
# kijs_i = kijs[i]
# a_alpha_i = a_alphas[i]
# a_alpha_ijs_is = a_alpha_ijs[i]
# for j in cmps:
# if j < i:
# continue
# a_alpha_ijs_is[j] = a_alpha_ijs[j][i] = (1. - kijs_i[j])*a_alpha_roots[i]*a_alpha_roots[j]
#
# # Faster than an optimized loop in pypy even
# z_products = [[zs[i]*zs[j] for j in cmps] for i in cmps]
#
# a_alpha = 0.0
# for i in cmps:
# a_alpha_ijs_i = a_alpha_ijs[i]
# z_products_i = z_products[i]
# for j in cmps:
# if j < i:
# continue
# elif i != j:
# a_alpha += 2.0*a_alpha_ijs_i[j]*z_products_i[j]
# else:
# a_alpha += a_alpha_ijs_i[j]*z_products_i[j]
#
# # List comprehension tested to be faster in CPython not pypy
## a_alpha = sum([a_alpha_ijs[i][j]*z_products[i][j]
## for j in self.cmps for i in self.cmps])
# self.a_alpha_ijs = a_alpha_ijs
#
# da_alpha_dT_ijs = self.da_alpha_dT_ijs = [[None]*N for _ in cmps]
#
# if full:
# for i in cmps:
# kijs_i = kijs[i]
# a_alphai = a_alphas[i]
# z_products_i = z_products[i]
# da_alpha_dT_i = da_alpha_dTs[i]
# d2a_alpha_dT2_i = d2a_alpha_dT2s[i]
# a_alpha_ij_roots_i = a_alpha_ij_roots[i]
# for j in cmps:
# if j < i:
# # skip the duplicates
# continue
# a_alphaj = a_alphas[j]
# x0 = a_alphai*a_alphaj
# x0_05 = a_alpha_ij_roots_i[j]
# zi_zj = z_products_i[j]
#
# x1 = a_alphai*da_alpha_dTs[j]
# x2 = a_alphaj*da_alpha_dT_i
# x1_x2 = x1 + x2
# x3 = 2.0*x1_x2
#
# kij_m1 = kijs_i[j] - 1.0
#
# da_alpha_dT_ij = -0.5*kij_m1*x1_x2/x0_05
#
# # For temperature derivatives of fugacities
# da_alpha_dT_ijs[i][j] = da_alpha_dT_ijs[j][i] = da_alpha_dT_ij
#
# da_alpha_dT_ij *= zi_zj
#
# d2a_alpha_dT2_ij = zi_zj*kij_m1*(-0.25*x0_05*(x0*(
# 2.0*(a_alphai*d2a_alpha_dT2s[j] + a_alphaj*d2a_alpha_dT2_i)
# + 4.*da_alpha_dT_i*da_alpha_dTs[j]) - x1*x3 - x2*x3 + x1_x2*x1_x2)/(x0*x0))
#
# if i != j:
# da_alpha_dT += da_alpha_dT_ij + da_alpha_dT_ij
# d2a_alpha_dT2 += d2a_alpha_dT2_ij + d2a_alpha_dT2_ij
# else:
# da_alpha_dT += da_alpha_dT_ij
# d2a_alpha_dT2 += d2a_alpha_dT2_ij
#
# return a_alpha, da_alpha_dT, d2a_alpha_dT2
# else:
# return a_alpha
def a_alpha_and_derivatives_py(self, a_alphas, da_alpha_dTs, d2a_alpha_dT2s, T, full=True, quick=True):
zs, kijs, scalar, N = self.zs, self.kijs, self.scalar, self.N
if scalar:
self.a_alpha_roots = a_alpha_roots = [sqrt(i) for i in a_alphas]
else:
self.a_alpha_roots = a_alpha_roots = npsqrt(a_alphas)
if full:
# Converting kijs into a matrix kills the performance! 5x slower than the performance of the functions.
# converting the 1d arrays also takes as long as the function.
# a_alpha, da_alpha_dT, d2a_alpha_dT2, self.a_alpha_j_rows, self.da_alpha_dT_j_rows = (
# a_alpha_and_derivatives_quadratic_terms(np.array(a_alphas), np.array(a_alpha_roots), np.array(da_alpha_dTs),
# np.array(d2a_alpha_dT2s), T, np.array(zs), np.array(kijs)))
if scalar:
a_alpha_j_rows, da_alpha_dT_j_rows = [0.0]*N, [0.0]*N
else:
a_alpha_j_rows, da_alpha_dT_j_rows = zeros(N), zeros(N)
a_alpha, da_alpha_dT, d2a_alpha_dT2, self.a_alpha_j_rows, self.da_alpha_dT_j_rows = (
a_alpha_and_derivatives_quadratic_terms(a_alphas, a_alpha_roots, da_alpha_dTs,
d2a_alpha_dT2s, T, zs, kijs,
a_alpha_j_rows=a_alpha_j_rows, da_alpha_dT_j_rows=da_alpha_dT_j_rows))
return a_alpha, da_alpha_dT, d2a_alpha_dT2
else:
# a_alpha, self.a_alpha_j_rows = a_alpha_quadratic_terms(np.array(a_alphas), np.array(a_alpha_roots), T, np.array(zs), np.array(kijs))
a_alpha_j_rows = [0.0]*N if scalar else zeros(N)
a_alpha, self.a_alpha_j_rows = a_alpha_quadratic_terms(a_alphas, a_alpha_roots, T, zs, kijs, a_alpha_j_rows=a_alpha_j_rows)
return a_alpha
def a_alpha_and_derivatives_numpy(self, a_alphas, da_alpha_dTs, d2a_alpha_dT2s, T, full=True, quick=True):
zs, kijs = self.zs, np.array(self.kijs)
a_alphas = np.array(a_alphas)
da_alpha_dTs = np.array(da_alpha_dTs)
one_minus_kijs = 1.0 - kijs
x0 = np.einsum('i,j', a_alphas, a_alphas)
x0_05 = npsqrt(x0)
a_alpha_ijs = (one_minus_kijs)*x0_05
z_products = np.einsum('i,j', zs, zs)
a_alpha = np.einsum('ij,ji', a_alpha_ijs, z_products)
if self.scalar:
self.a_alpha_ijs = a_alpha_ijs.tolist()
else:
self.a_alpha_ijs = a_alpha_ijs
if full:
term0 = np.einsum('j,i', a_alphas, da_alpha_dTs)
term7 = (one_minus_kijs)/(x0_05)
da_alpha_dT = (z_products*term7*(term0)).sum()
term1 = -x0_05/x0*(one_minus_kijs)
term2 = np.einsum('i, j', a_alphas, da_alpha_dTs)
main3 = da_alpha_dTs/(2.0*a_alphas)*term2
main4 = -np.einsum('i, j', a_alphas, d2a_alpha_dT2s)
main6 = -0.5*np.einsum('i, j', da_alpha_dTs, da_alpha_dTs)
# Needed for fugacity temperature derivative
self.da_alpha_dT_ijs = (0.5*(term7)*(term2 + term0)).tolist()
d2a_alpha_dT2 = (z_products*(term1*(main3 + main4 + main6))).sum()
return float(a_alpha), float(da_alpha_dT), float(d2a_alpha_dT2)
else:
return float(a_alpha)
def _spinodal_f(self, TPV):
# TODO - use `self`, do not create new instance
# Work to do - ethane', 'heptane
# Specify V, solve P; increase V and keep going
# After Effective utilization of equations of state for thermodynamic properties in process simulation
'''eos = PRMIX(P=6e6, T=500, Tcs=[305.32, 540.2], Pcs=[4872000.0, 2740000.0], omegas=[0.098, 0.3457], zs=[.5, .5])
def to_solve(T):
return eos.to(T=T, P=eos.P, zs=eos.zs)._spinodal_f([T, eos.P])
# Very well could be right
eos.to(T=secant(to_solve, eos.T), P=eos.P, zs=eos.zs).rho_l # 3004.715984610371
'''
T, P, V = TPV
eos_instance = self.to(T=T, P=P, V=V, zs=self.zs)
RT_inv = 1.0/(R*eos_instance.T)
if eos_instance.phase == 'l/g':
if eos_instance.G_dep_l < eos_instance.G_dep_g:
v = eos_instance.d2nA_dninjs_Vt('l')
else:
v = eos_instance.d2nA_dninjs_Vt('g')
elif eos_instance.phase == 'g':
v = eos_instance.d2nA_dninjs_Vt('g')
else:
v = eos_instance.d2nA_dninjs_Vt('l')
dGs = [[i*RT_inv for i in row] for row in v]
return det(dGs)
def _spinodal_at(self, T=None, P=None, V=None):
# TODO finish
if T is not None:
def to_solve(V):
return self._spinodal_f((T, None, V))
if 1:
from fluids.numerics import linspace
Vs = linspace(self.b*(1+1e-7), self.b*1000, 1000)
errs = []
for Vi in Vs:
try:
errs.append(abs(to_solve(Vi)))
except:
errs.append(1e5)
import matplotlib.pyplot as plt
plt.semilogy(Vs, errs)
plt.show()
a = 1
elif P is not None:
def to_solve(V):
return self._spinodal_f((None, P, V))
elif V is not None:
def to_solve(T):
return self._spinodal_f((T, None, V))
def _mechanical_critical_point_f_jac(self, TP):
'''The criteria for c_goal and d_goal come from a cubic
'roots_cubic', which uses a `f`, `g`, and `h` parameter. When all of
them are zero, all three roots are equal. For the eos (a=1), this
results in the following system of equations:
from sympy import *
a = 1
b, c, d = symbols('b, c, d')
f = ((3* c / a) - ((b ** 2) / (a ** 2))) / 3
g = (((2 * (b ** 3)) / (a ** 3)) - ((9* b * c) / (a **2)) + (27 * d / a)) /27
h = ((g ** 2) / 4 + (f ** 3) / 27)z
solve([Eq(f, 0), Eq(g, 0), Eq(h, 0)], [b, c, d])
The solution (sympy struggled) is:
c = b^2/3
d = b^3/27
These two variables switch sign at the criteria, so they work well with
a root finding approach.
Derived with:
from sympy import *
P, T, V, R, b_eos, alpha = symbols('P, T, V, R, b_eos, alpha')
Tc, Pc, omega = symbols('Tc, Pc, omega')
delta, epsilon = symbols('delta, epsilon')
a_alpha = alpha(T)
eta = b_eos
B = b_eos*P/(R*T)
deltas = delta*P/(R*T)
thetas = a_alpha*P/(R*T)**2
epsilons = epsilon*(P/(R*T))**2
etas = eta*P/(R*T)
b = (deltas - B - 1)
c = (thetas + epsilons - deltas*(B + 1))
d = -(epsilons*(B + 1) + thetas*etas)
c_goal = b*b/3
d_goal = b*b*b/27
F1 = c - c_goal
F2 = d - d_goal
cse([F1, F2, diff(F1, T), diff(F1, P), diff(F2, T), diff(F2, P)], optimizations='basic')
Performance analysis:
77% of this is getting a_alpha and da_alpha_dT.
71% of the outer solver is getting f and this Jacobian.
Limited results from optimizing the below code, which was derived with
sympy.
'''
T, P = float(TP[0]), float(TP[1])
b_eos, delta, epsilon = self.b, self.delta, self.epsilon
eta = b_eos
try:
del self.a_alpha_ijs
del self.a_alpha_roots
del self.a_alpha_ij_roots_inv
except:
pass
a_alpha, da_alpha_dT, _ = self.a_alpha_and_derivatives(T, full=True)
x6 = R_inv
x7 = 1.0/T
x0 = a_alpha
x1 = R_inv*R_inv
x2 = x7*x7
x3 = x1*x2
x4 = P*P
x5 = epsilon*x3*x4
x8 = P*x6*x7
x9 = delta*x8
x10 = b_eos*x8
x11 = x10 + 1.0
x12 = x11 - x9
x13 = x12*x12
x14 = P*x2*x6
x15 = da_alpha_dT
x16 = x6*x7
x17 = x0*x16
x18 = 2.0*epsilon*x8
x19 = delta*x10
x20 = delta*x11
x21 = b_eos - delta
x22 = 2.0/3.0*x12*x21
x23 = P*b_eos*x0*x1*x2
x24 = b_eos*x5
x25 = x11*x18
x26 = x13*x21/9.0
F1 = P*x0*x3 - x11*x9 - x13/3.0 + x5
F2 = -x11*x5 + x13*x12/27.0 - b_eos*x0*x4*x6*x1*x7*x2
dF1_dT = x14*(x15*x6 - 2.0*x17 - x18 + x19 + x20 + x22)
dF1_dP = x16*(x17 + x18 - x19 - x20 - x22)
dF2_dT = x14*(-P*b_eos*x1*x15*x7 + 3.0*x23 + x24 + x25 - x26)
dF2_dP = x16*(-2.0*x23 - x24 - x25 + x26)
return [F1, F2], [[dF1_dT, dF1_dP], [dF2_dT, dF2_dP]]
def mechanical_critical_point(self):
r'''Method to calculate the mechanical critical point of a mixture
of defined composition.
The mechanical critical point is where:
.. math::
\frac{\partial P}{\partial \rho}|_T =
\frac{\partial^2 P}{\partial \rho^2}|_T = 0
Returns
-------
T : float
Mechanical critical temperature, [K]
P : float
Mechanical critical temperature, [Pa]
Notes
-----
One useful application of the mechanical critical temperature is that
the phase identification approach of Venkatarathnam is valid only up to
it.
Note that the equation of state, when solved at these conditions, will
have fairly large (1e-3 - 1e-6) results for the derivatives; but they
are the minimum. This is just from floating point precision.
It can also be checked looking at the calculated molar volumes - all
three (available with :obj:`sorted_volumes <GCEOSMIX.sorted_volumes>`) will be very close (1e-5
difference in practice), again differing because of floating point
error.
The algorithm here is a custom implementation, using Newton-Raphson's
method with the initial guesses described in [1] (mole-weighted
critical pressure average, critical temperature average using a
quadratic mixing rule). Normally ~4 iterations are needed to solve the
system. It is relatively fast, as only one evaluation of `a_alpha`
and `da_alpha_dT` are needed per call to function and its jacobian.
References
----------
.. [1] Watson, Harry A. J., and Paul I. Barton. "Reliable Flash
Calculations: Part 3. A Nonsmooth Approach to Density Extrapolation
and Pseudoproperty Evaluation." Industrial & Engineering Chemistry
Research, November 11, 2017.
https://doi.org/10.1021/acs.iecr.7b03233.
.. [2] Mathias P. M., Boston J. F., and Watanasiri S. "Effective
Utilization of Equations of State for Thermodynamic Properties in
Process Simulation." AIChE Journal 30, no. 2 (June 17, 2004):
182-86. https://doi.org/10.1002/aic.690300203.
'''
zs, Tcs, Pcs, N = self.zs, self.Tcs, self.Pcs, self.N
Pmc = sum([Pcs[i]*zs[i] for i in range(N)])
Tmc = sum([sqrt(Tcs[i]*Tcs[j])*zs[j]*zs[i] for i in range(N)
for j in range(N)])
TP, iterations = newton_system(self._mechanical_critical_point_f_jac,
x0=[Tmc, Pmc], jac=True, ytol=1e-10,
xtol=1e-12,
solve_func=solve_2_direct)
T, P = float(TP[0]), float(TP[1])
return T, P
def fugacities(self, only_l=False, only_g=False):
r'''Helper method for calculating fugacity coefficients for any
phases present, using either the overall mole fractions for both phases
or using specified mole fractions for each phase.
Requires :obj:`fugacity_coefficients <GCEOSMIX.fugacity_coefficients>` to be implemented by each subclassing
EOS.
In addition to setting `fugacities_l` and/or `fugacities_g`, this also
sets the fugacity coefficients `phis_l` and/or `phis_g`.
.. math::
\hat \phi_i^g = \frac{\hat f_i^g}{y_i P}
.. math::
\hat \phi_i^l = \frac{\hat f_i^l}{x_i P}
Note that in a flash calculation, each phase requires their own EOS
object.
Parameters
----------
only_l : bool
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set.
only_g : bool
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set.
Notes
-----
It is helpful to check that :obj:`fugacity_coefficients <GCEOSMIX.fugacity_coefficients>` has been
implemented correctly using the following expression, from [1]_.
.. math::
\ln \hat \phi_i = \left[\frac{\partial (n\ln \phi)}{\partial
n_i}\right]_{T,P,n_j,V_t}
For reference, several expressions for fugacity of a component are as
follows, shown in [1]_ and [2]_.
.. math::
\ln \hat \phi_i = \int_{0}^P\left(\frac{\hat V_i}
{RT} - \frac{1}{P}\right)dP
.. math::
\ln \hat \phi_i = \int_V^\infty \left[
\frac{1}{RT}\frac{\partial P}{ \partial n_i}
- \frac{1}{V}\right] d V - \ln Z
References
----------
.. [1] Hu, Jiawen, Rong Wang, and Shide Mao. "Some Useful Expressions
for Deriving Component Fugacity Coefficients from Mixture Fugacity
Coefficient." Fluid Phase Equilibria 268, no. 1-2 (June 25, 2008):
7-13. doi:10.1016/j.fluid.2008.03.007.
.. [2] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
'''
P, zs, scalar = self.P, self.zs, self.scalar
if not only_g and hasattr(self, 'V_l'):
self.lnphis_l = lnphis_l = self.fugacity_coefficients(self.Z_l)
if scalar:
try:
self.phis_l = [exp(i) for i in lnphis_l]
except:
self.phis_l = [trunc_exp(i, trunc=1e308) for i in lnphis_l]
self.fugacities_l = [phi*x*P for phi, x in zip(self.phis_l, zs)]
else:
self.phis_l = phis_l = npexp(lnphis_l)
self.fugacities_l = zs*P*phis_l
if not only_l and hasattr(self, 'V_g'):
self.lnphis_g = lnphis_g = self.fugacity_coefficients(self.Z_g)
if scalar:
try:
self.phis_g = phis_g = [exp(i) for i in lnphis_g]
except:
self.phis_g = phis_g = [trunc_exp(i, trunc=1e308) for i in lnphis_g]
self.fugacities_g = [phi*y*P for phi, y in zip(phis_g, zs)]
else:
self.phis_g = phis_g = npexp(lnphis_g)
self.fugacities_g = zs*P*phis_g
def _eos_lnphis_lowest_Gibbs(self):
try:
try:
if self.G_dep_l < self.G_dep_g:
return self.lnphis_l, 'l'
else:
return self.lnphis_g, 'g'
except:
# Only one root - take it and set the prefered other phase to be a different type
return (self.lnphis_g, 'g') if hasattr(self, 'Z_g') else (self.lnphis_l, 'l')
except:
self.fugacities()
return self._eos_fugacities_lowest_Gibbs()
def _eos_fugacities_lowest_Gibbs(self):
# TODO delete with property_package.py
try:
try:
if self.G_dep_l < self.G_dep_g:
return self.fugacities_l, 'l'
else:
return self.fugacities_g, 'g'
except:
# Only one root - take it and set the prefered other phase to be a different type
return (self.fugacities_g, 'g') if hasattr(self, 'Z_g') else (self.fugacities_l, 'l')
except:
self.fugacities()
return self._eos_fugacities_lowest_Gibbs()
def _dphi_dn(self, zi, i, phase):
# obsolete, should be deleted
z_copy = list(self.zs)
z_copy.pop(i)
z_sum = sum(z_copy) + zi
z_copy = [j/z_sum if j else 0 for j in z_copy]
z_copy.insert(i, zi)
eos = self.to_TP_zs(self.T, self.P, z_copy)
if phase == 'g':
return eos.phis_g[i]
elif phase == 'l':
return eos.phis_l[i]
def _dfugacity_dn(self, zi, i, phase):
# obsolete, should be deleted
z_copy = list(self.zs)
z_copy.pop(i)
z_sum = sum(z_copy) + zi
z_copy = [j/z_sum if j else 0 for j in z_copy]
z_copy.insert(i, zi)
eos = self.to_TP_zs(self.T, self.P, z_copy)
if phase == 'g':
return eos.fugacities_g[i]
elif phase == 'l':
return eos.fugacities_l[i]
def _Stateva_Tsvetkov_TPDF_broken(self, Zz, Zy, zs, ys):
# TODO: delete
z_log_fugacity_coefficients = self.fugacity_coefficients(Zz)
y_log_fugacity_coefficients = self.fugacity_coefficients(Zy)
kis = []
for yi, phi_yi, zi, phi_zi in zip(ys, y_log_fugacity_coefficients, zs, z_log_fugacity_coefficients):
di = log(zi) + phi_zi
try:
ki = phi_yi + log(yi) - di
except ValueError:
ki = phi_yi + log(1e-200) - di
kis.append(ki)
kis.append(kis[0])
tot = 0.0
for i in range(self.N):
t = kis[i+1] - kis[i]
tot += t*t
return tot
def _d_TPD_Michelson_modified(self, Zz, Zy, zs, alphas):
r'''Modified objective function for locating the minima of the
Tangent Plane Distance function according to [1]_, also shown in [2]_
[2]_. The stationary points of a system are all zeros of this function;
so once all zeroes have been located, the stability can be evaluated
at the stationary points only. It may be required to use multiple
guesses to find all stationary points, and there is no method of
confirming all points have been found.
This method does not alter the state of the object.
.. math::
\frac{\partial \; TPD^*}{\partial \alpha_i} = \sqrt{Y_i} \left[
\ln \phi_i(Y) + \ln(Y_i) - h_i\right]
.. math::
\alpha_i = 2 \sqrt{Y_i}
.. math::
d_i(z) = \ln z_i + \ln \phi_i(z)
Parameters
----------
Zz : float
Compressibility factor of the phase undergoing stability testing,
(`test` phase), [-]
Zy : float
Compressibility factor of the trial phase, [-]
zs : list[float]
Mole fraction composition of the phase undergoing stability
testing (`test` phase), [-]
alphas : list[float]
Twice the square root of the mole numbers of each component,
[mol^0.5]
Returns
-------
err : float
Error in solving for stationary points according to the modified
TPD method in [1]_, [-]
Notes
-----
This method is particularly useful because it is not a constrained
objective function. This has been verified to return the same roots as
other stationary point methods.
References
----------
.. [1] Michelsen, Michael L. "The Isothermal Flash Problem. Part I.
Stability." Fluid Phase Equilibria 9, no. 1 (December 1982): 1-19.
.. [2] Qiu, Lu, Yue Wang, Qi Jiao, Hu Wang, and Rolf D. Reitz.
"Development of a Thermodynamically Consistent, Robust and Efficient
Phase Equilibrium Solver and Its Validations." Fuel 115 (January 1,
2014): 1-16
'''
# TODO: delete
Ys = [(alpha/2.)**2 for alpha in alphas]
ys = normalize(Ys)
z_log_fugacity_coefficients = self.fugacity_coefficients(Zz)
y_log_fugacity_coefficients = self.fugacity_coefficients(Zy)
tot = 0
for Yi, phi_yi, zi, phi_zi in zip(Ys, y_log_fugacity_coefficients, zs, z_log_fugacity_coefficients):
di = log(zi) + phi_zi
if Yi != 0:
diff = Yi**0.5*(log(Yi) + phi_yi - di)
tot += abs(diff)
return tot
# def TDP_Michelsen(self, phase):
#
# z_log_fugacity_coefficients = self.fugacity_coefficients(Zz, zs)
# y_log_fugacity_coefficients = self.fugacity_coefficients(Zy, ys)
# tot = 0
# for yi, phi_yi, zi, phi_zi in zip(ys, y_log_fugacity_coefficients, zs, z_log_fugacity_coefficients):
# hi = di = log(zi) + phi_zi # same as di
#
# k = log(yi) + phi_yi - hi
# # Michaelsum doesn't do the exponents.
# Yi = exp(-k)*yi
# tot += Yi*(log(Yi) + phi_yi - hi - 1.)
#
# return 1. + tot
# def TDP_Michelsen_modified(self, Zz, Zy, zs, Ys):
# # https://www.e-education.psu.edu/png520/m17_p7.html
# # Might as well continue
# Ys = [abs(float(Yi)) for Yi in Ys]
# # Ys only need to be positive
# ys = normalize(Ys)
#
# z_log_fugacity_coefficients = self.fugacity_coefficients(Zz, zs)
# y_log_fugacity_coefficients = self.fugacity_coefficients(Zy, ys)
#
# tot = 0
# for Yi, phi_yi, yi, zi, phi_zi in zip(Ys, y_log_fugacity_coefficients, ys, zs, z_log_fugacity_coefficients):
# hi = di = log(zi) + phi_zi # same as di
# tot += Yi*(log(Yi) + phi_yi - di - 1.)
# return (1. + tot)
# # Another formulation, returns the same answers.
## tot += yi*(log(sum(Ys)) +log(yi)+ log(phi_yi) - di - 1.)
## return (1. + sum(Ys)*tot)*1e15
def solve_T(self, P, V, quick=True, solution=None):
r'''Generic method to calculate `T` from a specified `P` and `V`.
Provides SciPy's `newton` solver, and iterates to solve the general
equation for `P`, recalculating `a_alpha` as a function of temperature
using :obj:`a_alpha_and_derivatives <GCEOSMIX.a_alpha_and_derivatives>` each iteration.
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
quick : bool, optional
Unimplemented, although it may be possible to derive explicit
expressions as done for many pure-component EOS
solution : str or None, optional
'l' or 'g' to specify a liquid of vapor solution (if one exists);
if None, will select a solution more likely to be real (closer to
STP, attempting to avoid temperatures like 60000 K or 0.0001 K).
Returns
-------
T : float
Temperature, [K]
'''
# -4 goes back from object, GCEOS
return super(type(self).__mro__[-3], self).solve_T(P=P, V=V, solution=solution)
def _err_VL_jacobian(self, lnKsVF, T, P, zs, near_critical=False,
err_also=False, info=None):
if info is None:
info = []
N = self.N
lnKs = lnKsVF[:-1]
Ks = [exp(lnKi) for lnKi in lnKs]
VF = float(lnKsVF[-1])
xs = [zi/(1.0 + VF*(Ki - 1.0)) for zi, Ki in zip(zs, Ks)]
ys = [Ki*xi for Ki, xi in zip(Ks, xs)]
eos_g = self.to_TP_zs_fast(T=T, P=P, zs=ys, only_g=True) #
eos_l = self.to_TP_zs_fast(T=T, P=P, zs=xs, only_l=True) #
# eos_g = self.to_TP_zs(T=T, P=P, zs=ys)
# eos_l = self.to_TP_zs(T=T, P=P, zs=xs)
if not near_critical:
# lnphis_g = eos_g.lnphis_g
# lnphis_l = eos_l.lnphis_l
Z_g = eos_g.Z_g
Z_l = eos_l.Z_l
else:
try:
# lnphis_g = eos_g.lnphis_g
Z_g = eos_g.Z_g
except AttributeError:
# lnphis_g = eos_g.lnphis_l
Z_g = eos_g.Z_l
try:
# lnphis_l = eos_l.lnphis_l
Z_l = eos_l.Z_l
except AttributeError:
# lnphis_l = eos_l.lnphis_g
Z_l = eos_l.Z_g
lnphis_g = eos_g.fugacity_coefficients(Z_g)
lnphis_l = eos_l.fugacity_coefficients(Z_l)
size = N + 1
J = [[None]*size for i in range(size)]
# d_lnphi_dzs_basic_num
# d_lnphi_dxs = eos_l.d_lnphi_dzs_basic_num(Z_l, xs)
# d_lnphi_dys = eos_g.d_lnphi_dzs_basic_num(Z_g, ys)
d_lnphi_dxs = eos_l.dlnphis_dzs(Z_l)
d_lnphi_dys = eos_g.dlnphis_dzs(Z_g)
# # Handle the zeros and the ones
# Half of this is probably wrong! Only gets set for one set of variables?
# Numerical jacobian not good enough to tell
# for i in range(self.N):
# J[i][-2] = 0.0
# J[-2][i] = 0.0
J[N][N] = 1.0
# Last column except last value; believed correct
# Was not correct when compared to numerical solution
Ksm1 = [Ki - 1.0 for Ki in Ks]
RR_denoms_inv2 = []
for i in range(N):
t = 1.0 + VF*Ksm1[i]
RR_denoms_inv2.append(1.0/(t*t))
RR_terms = [zs[k]*Ksm1[k]*RR_denoms_inv2[k] for k in range(N)]
for i in range(N):
value = 0.0
d_lnphi_dxs_i, d_lnphi_dys_i = d_lnphi_dxs[i], d_lnphi_dys[i]
for k in range(N):
# pretty sure indexing is right in the below expression
value += RR_terms[k]*(d_lnphi_dxs_i[k] - Ks[k]*d_lnphi_dys_i[k])
J[i][-1] = value
# print(value)
# def delta(k, j):
# if k == j:
# return 1.0
# return 0.0
# Main body - expensive to compute! Lots of elements
# Can flip around the indexing of i, j on the d_lnphi_ds but still no fix
# unsure of correct order!
# Reveals bugs in d_lnphi_dxs though.
zsKsRRinvs2 = [zs[j]*Ks[j]*RR_denoms_inv2[j] for j in range(N)]
one_m_VF = 1.0 - VF
for i in range(N): # to N is CORRECT/MATCHES JACOBIAN NUMERICALLY
Ji = J[i]
d_lnphi_dxs_is, d_lnphi_dys_is = d_lnphi_dxs[i], d_lnphi_dys[i]
for j in range(N): # to N is CORRECT/MATCHES JACOBIAN NUMERICALLY
value = 1.0 if i == j else 0.0
# value = 0.0
# value += delta(i, j)
# print(i, j, value)
# Maybe if i == j, can skip the bit below? Tried it once and the solver never converged
# term = zs[j]*Ks[j]*RR_denoms_inv2[j]
value += zsKsRRinvs2[j]*(VF*d_lnphi_dxs_is[j] + one_m_VF*d_lnphi_dys_is[j])
Ji[j] = value
# Last row except last value - good, working
# Diff of RR w.r.t each log K
bottom_row = J[-1]
for j in range(N):
# value = 0.0
# RR_l =
# RR_l = -Ks[j]*zs[j]*VF/(1.0 + VF*(Ks[j] - 1.0))**2.0
# RR_g = Ks[j]*(1.0 - VF)*zs[j]/(1.0 + VF*(Ks[j] - 1.0))**2.0
# value += # -RR_l
bottom_row[j] = zsKsRRinvs2[j]*(one_m_VF) + VF*zsKsRRinvs2[j]
# Last row except last value - good, working
# bottom_row = J[-1]
# for j in range(self.N):
# value = 0.0
# for k in range(self.N):
# if k == j:
# RR_l = -Ks[j]*zs[k]*VF/(1.0 + VF*(Ks[k] - 1.0))**2.0
# RR_g = Ks[j]*(1.0 - VF)*zs[k]/(1.0 + VF*(Ks[k] - 1.0))**2.0
# value += RR_g - RR_l
# bottom_row[j] = value
#
# Last value - good, working, being overwritten
dF_ncp1_dB = 0.0
for i in range(N):
dF_ncp1_dB -= RR_terms[i]*Ksm1[i]
J[-1][-1] = dF_ncp1_dB
info[:] = VF, xs, ys, eos_l, eos_g
if err_also:
err_RR = Rachford_Rice_flash_error(VF, zs, Ks)
Fs = [lnKi - lnphi_l + lnphi_g for lnphi_l, lnphi_g, lnKi in zip(lnphis_l, lnphis_g, lnKs)]
Fs.append(err_RR)
return Fs, J
return J
def _err_VL(self, lnKsVF, T, P, zs, near_critical=False, info=None):
# import numpy as np
# tried autograd without luck
lnKs = lnKsVF[:-1]
# if isinstance(lnKs, np.ndarray):
# lnKs = lnKs.tolist()
# Ks = np.exp(lnKs)
Ks = [exp(lnKi) for lnKi in lnKs]
VF = float(lnKsVF[-1])
# VF = lnKsVF[-1]
if info is None:
info = []
xs = [zi/(1.0 + VF*(Ki - 1.0)) for zi, Ki in zip(zs, Ks)]
ys = [Ki*xi for Ki, xi in zip(Ks, xs)]
err_RR = Rachford_Rice_flash_error(VF, zs, Ks)
eos_g = self.to_TP_zs_fast(T=T, P=P, zs=ys, only_g=True) #
eos_g.fugacities()
eos_l = self.to_TP_zs_fast(T=T, P=P, zs=xs, only_l=True) #
eos_l.fugacities()
if not near_critical:
lnphis_g = eos_g.lnphis_g
lnphis_l = eos_l.lnphis_l
else:
try:
lnphis_g = eos_g.lnphis_g
except AttributeError:
lnphis_g = eos_g.lnphis_l
try:
lnphis_l = eos_l.lnphis_l
except AttributeError:
lnphis_l = eos_l.lnphis_g
# Fs = [fl/fg-1.0 for fl, fg in zip(fugacities_l, fugacities_g)]
Fs = [lnKi - lnphi_l + lnphi_g for lnphi_l, lnphi_g, lnKi in zip(lnphis_l, lnphis_g, lnKs)]
Fs.append(err_RR)
info[:] = VF, xs, ys, eos_l, eos_g
return Fs
def sequential_substitution_3P(self, Ks_y, Ks_z, beta_y, beta_z=0.0,
maxiter=1000,
xtol=1E-13, near_critical=True,
xs=None, ys=None, zs=None,
trivial_solution_tol=1e-5):
print(Ks_y, Ks_z, beta_y, beta_z)
beta_y, beta_z, xs_new, ys_new, zs_new = Rachford_Rice_solution2(ns=self.zs,
Ks_y=Ks_y, Ks_z=Ks_z,
beta_y=beta_y, beta_z=beta_z)
print(beta_y, beta_z, xs_new, ys_new, zs_new)
Ks_y = [exp(lnphi_x - lnphi_y) for lnphi_x, lnphi_y in zip(lnphis_x, lnphis_y)]
Ks_z = [exp(lnphi_x - lnphi_z) for lnphi_x, lnphi_z in zip(lnphis_x, lnphis_z)]
def newton_VL(self, Ks_initial=None, maxiter=30,
ytol=1E-7, near_critical=True,
xs=None, ys=None, V_over_F=None):
T, P, zs = self.T, self.P, self.zs
if xs is not None and ys is not None and V_over_F is not None:
pass
else:
if Ks_initial is None:
Ks = [Wilson_K_value(T, P, Tci, Pci, omega) for Pci, Tci, omega in zip(self.Pcs, self.Tcs, self.omegas)]
else:
Ks = Ks_initial
V_over_F, xs, ys = flash_inner_loop(zs, Ks)
lnKs_guess = [log(yi/xi) for yi, xi in zip(ys, xs)] + [V_over_F]
info = []
def err_and_jacobian(lnKs_guess):
err = self._err_VL_jacobian(lnKs_guess, T, P, zs, near_critical=True, err_also=True, info=info)
# print(lnKs_guess[-1], err[0])
return err
ans, count = newton_system(err_and_jacobian, jac=True, x0=lnKs_guess, ytol=ytol, maxiter=maxiter)
V_over_F, xs, ys, eos_l, eos_g = info
return V_over_F, xs, ys, eos_l, eos_g
def broyden2_VL(self, Ks_initial=None, maxiter=30,
ytol=1E-7, xtol=1e-8, near_critical=True,
xs=None, ys=None, V_over_F=None):
T, P, zs = self.T, self.P, self.zs
if xs is not None and ys is not None and V_over_F is not None:
pass
else:
if Ks_initial is None:
Ks = [Wilson_K_value(T, P, Tci, Pci, omega) for Pci, Tci, omega in zip(self.Pcs, self.Tcs, self.omegas)]
else:
Ks = Ks_initial
V_over_F, xs, ys = flash_inner_loop(zs, Ks)
lnKs_guess = [log(yi/xi) for yi, xi in zip(ys, xs)] + [V_over_F]
info = []
def err_and_jacobian(lnKs_guess):
err = self._err_VL_jacobian(lnKs_guess, T, P, zs, near_critical=near_critical, err_also=True, info=info)
# print(lnKs_guess[-1], err[0])
return err[0], err[1]
def err(lnKs_guess):
err = self._err_VL(lnKs_guess, T, P, zs, near_critical=near_critical, info=info)
# print(lnKs_guess[-1], err[0])
return err
ans, count = broyden2(fun=err, jac=err_and_jacobian, xs=lnKs_guess, xtol=xtol, maxiter=maxiter, jac_has_fun=True, skip_J=True)
V_over_F, xs, ys, eos_l, eos_g = info
return V_over_F, xs, ys, eos_l, eos_g, count
def sequential_substitution_VL(self, Ks_initial=None, maxiter=1000,
xtol=1E-13, near_critical=True, Ks_extra=None,
xs=None, ys=None, trivial_solution_tol=1e-5, info=None,
full_alphas=False):
# print(self.zs, Ks)
T, P, zs = self.T, self.P, self.zs
V_over_F = None
if xs is not None and ys is not None:
pass
else:
# TODO use flash_wilson here
if Ks_initial is None:
Ks = [Wilson_K_value(T, P, Tci, Pci, omega) for Pci, Tci, omega in zip(self.Pcs, self.Tcs, self.omegas)]
else:
Ks = Ks_initial
xs = None
try:
V_over_F, xs, ys = flash_inner_loop(zs, Ks)
except ValueError as e:
if Ks_extra is not None:
for Ks in Ks_extra:
try:
V_over_F, xs, ys = flash_inner_loop(zs, Ks)
break
except ValueError as e:
pass
if xs is None:
raise(e)
# print(xs, ys, 'innerloop')
# Z_l_prev = None
# Z_g_prev = None
for i in range(maxiter):
if not near_critical:
eos_g = self.to_TP_zs_fast(T=T, P=P, zs=ys, only_l=False, only_g=True, full_alphas=full_alphas)
eos_l = self.to_TP_zs_fast(T=T, P=P, zs=xs, only_l=True, only_g=False, full_alphas=full_alphas)
lnphis_g = eos_g.fugacity_coefficients(eos_g.Z_g)
lnphis_l = eos_l.fugacity_coefficients(eos_l.Z_l)
else:
eos_g = self.to_TP_zs_fast(T=T, P=P, zs=ys, only_l=False, only_g=True, full_alphas=full_alphas)
eos_l = self.to_TP_zs_fast(T=T, P=P, zs=xs, only_l=True, only_g=False, full_alphas=full_alphas)
try:
lnphis_g = eos_g.fugacity_coefficients(eos_g.Z_g)
except AttributeError:
lnphis_g = eos_g.fugacity_coefficients(eos_g.Z_l)
try:
lnphis_l = eos_l.fugacity_coefficients(eos_l.Z_l)
except AttributeError:
lnphis_l = eos_l.fugacity_coefficients(eos_l.Z_g)
# eos_g = self.to_TP_zs(T=self.T, P=self.P, zs=ys)
# eos_l = self.to_TP_zs(T=self.T, P=self.P, zs=xs)
# if 0:
# if hasattr(eos_g, 'lnphis_g') and hasattr(eos_g, 'lnphis_l'):
# if Z_l_prev is not None and Z_g_prev is not None:
# if abs(eos_g.Z_g - Z_g_prev) < abs(eos_g.Z_l - Z_g_prev):
# lnphis_g = eos_g.lnphis_g
# fugacities_g = eos_g.fugacities_g
# Z_g_prev = eos_g.Z_g
# else:
# lnphis_g = eos_g.lnphis_l
# fugacities_g = eos_g.fugacities_l
# Z_g_prev = eos_g.Z_l
# else:
# if eos_g.G_dep_g < eos_g.lnphis_l:
# lnphis_g = eos_g.lnphis_g
# fugacities_g = eos_g.fugacities_g
# Z_g_prev = eos_g.Z_g
# else:
# lnphis_g = eos_g.lnphis_l
# fugacities_g = eos_g.fugacities_l
# Z_g_prev = eos_g.Z_l
# else:
# try:
# lnphis_g = eos_g.lnphis_g#fugacity_coefficients(eos_g.Z_g, ys)
# fugacities_g = eos_g.fugacities_g
# Z_g_prev = eos_g.Z_g
# except AttributeError:
# lnphis_g = eos_g.lnphis_l#fugacity_coefficients(eos_g.Z_l, ys)
# fugacities_g = eos_g.fugacities_l
# Z_g_prev = eos_g.Z_l
# if hasattr(eos_l, 'lnphis_g') and hasattr(eos_l, 'lnphis_l'):
# if Z_l_prev is not None and Z_g_prev is not None:
# if abs(eos_l.Z_l - Z_l_prev) < abs(eos_l.Z_g - Z_l_prev):
# lnphis_l = eos_l.lnphis_g
# fugacities_l = eos_l.fugacities_g
# Z_l_prev = eos_l.Z_g
# else:
# lnphis_l = eos_l.lnphis_l
# fugacities_l = eos_l.fugacities_l
# Z_l_prev = eos_l.Z_l
# else:
# if eos_l.G_dep_g < eos_l.lnphis_l:
# lnphis_l = eos_l.lnphis_g
# fugacities_l = eos_l.fugacities_g
# Z_l_prev = eos_l.Z_g
# else:
# lnphis_l = eos_l.lnphis_l
# fugacities_l = eos_l.fugacities_l
# Z_l_prev = eos_l.Z_l
# else:
# try:
# lnphis_l = eos_l.lnphis_g#fugacity_coefficients(eos_l.Z_g, ys)
# fugacities_l = eos_l.fugacities_g
# Z_l_prev = eos_l.Z_g
# except AttributeError:
# lnphis_l = eos_l.lnphis_l#fugacity_coefficients(eos_l.Z_l, ys)
# fugacities_l = eos_l.fugacities_l
# Z_l_prev = eos_l.Z_l
# elif 0:
# if hasattr(eos_g, 'lnphis_g') and hasattr(eos_g, 'lnphis_l'):
# if eos_g.G_dep_g < eos_g.lnphis_l:
# lnphis_g = eos_g.lnphis_g
# fugacities_g = eos_g.fugacities_g
# else:
# lnphis_g = eos_g.lnphis_l
# fugacities_g = eos_g.fugacities_l
# else:
# try:
# lnphis_g = eos_g.lnphis_g#fugacity_coefficients(eos_g.Z_g, ys)
# fugacities_g = eos_g.fugacities_g
# except AttributeError:
# lnphis_g = eos_g.lnphis_l#fugacity_coefficients(eos_g.Z_l, ys)
# fugacities_g = eos_g.fugacities_l
#
# if hasattr(eos_l, 'lnphis_g') and hasattr(eos_l, 'lnphis_l'):
# if eos_l.G_dep_g < eos_l.lnphis_l:
# lnphis_l = eos_l.lnphis_g
# fugacities_l = eos_l.fugacities_g
# else:
# lnphis_l = eos_l.lnphis_l
# fugacities_l = eos_l.fugacities_l
# else:
# try:
# lnphis_l = eos_l.lnphis_g#fugacity_coefficients(eos_l.Z_g, ys)
# fugacities_l = eos_l.fugacities_g
# except AttributeError:
# lnphis_l = eos_l.lnphis_l#fugacity_coefficients(eos_l.Z_l, ys)
# fugacities_l = eos_l.fugacities_l
#
# else:
# print(phis_l, phis_g, 'phis')
Ks = [exp(l - g) for l, g in zip(lnphis_l, lnphis_g)] # K_value(phi_l=l, phi_g=g)
# print(Ks)
# Hack - no idea if this will work
# maxK = max(Ks)
# if maxK < 1:
# Ks[Ks.index(maxK)] = 1.1
# minK = min(Ks)
# if minK >= 1:
# Ks[Ks.index(minK)] = .9
# print(Ks, 'Ks into RR')
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
# if any(i < 0 for i in xs_new):
# print('hil', xs_new)
#
# if any(i < 0 for i in ys_new):
# print('hig', ys_new)
for xi in xs_new:
if xi < 0.0:
xs_new_sum = sum(abs(i) for i in xs_new)
xs_new = [abs(i)/xs_new_sum for i in xs_new]
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum = sum(abs(i) for i in ys_new)
ys_new = [abs(i)/ys_new_sum for i in ys_new]
break
# Claimed error function in CONVENTIONAL AND RAPID FLASH CALCULATIONS FOR THE SOAVE-REDLICH-KWONG AND PENG-ROBINSON EQUATIONS OF STATE
err3 = 0.0
# Suggested tolerance 1e-15
for Ki, xi, yi in zip(Ks, xs, ys):
# equivalent of fugacity ratio
# Could divide by the old Ks as well.
err_i = Ki*xi/yi - 1.0
err3 += err_i*err_i
# or use absolute for tolerance...
# err2 = sum([(exp(l-g)-1.0)**2 ])
# err2 = 0.0
# for l, g in zip(fugacities_l, fugacities_g):
# err_i = (l/g-1.0)
# err2 += err_i*err_i
# Suggested tolerance 1e-15
# This is a better metric because it does not involve hysterisis
# print(err3, err2)
# err = (sum([abs(x_new - x_old) for x_new, x_old in zip(xs_new, xs)]) +
# sum([abs(y_new - y_old) for y_new, y_old in zip(ys_new, ys)]))
# print(err, err2)
xs, ys = xs_new, ys_new
# print(i, 'err', err, err2, 'xs, ys', xs, ys, 'VF', V_over_F)
if near_critical:
comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)])
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
# print(xs)
if err3 < xtol:
break
if i == maxiter-1:
raise ValueError('End of SS without convergence')
if info is not None:
info[:] = (i, err3)
return V_over_F, xs, ys, eos_l, eos_g
def stabiliy_iteration_Michelsen(self, T, P, zs, Ks_initial=None,
maxiter=20, xtol=1E-12, liq=True):
# checks stability vs. the current zs, mole fractions
# liq: whether adding a test liquid phase to see if is stable or not
eos_ref = self#.to_TP_zs(T=T, P=P, zs=zs)
# If one phase is present - use that phase as the reference phase.
# Otherwise, consider the phase with the lowest Gibbs excess energy as
# the stable phase
fugacities_ref, fugacities_ref_phase = eos_ref._eos_fugacities_lowest_Gibbs()
# print(fugacities_ref, fugacities_ref_phase, 'fugacities_ref, fugacities_ref_phase')
if Ks_initial is None:
Ks = [Wilson_K_value(T, P, Tci, Pci, omega) for Pci, Tci, omega in zip(self.Pcs, self.Tcs, self.omegas)]
else:
Ks = Ks_initial
same_phase_count = 0.0
for _ in range(maxiter):
if liq:
zs_test = [zi/Ki for zi, Ki in zip(zs, Ks)]
else:
zs_test = [zi*Ki for zi, Ki in zip(zs, Ks)]
sum_zs_test = sum(zs_test)
zs_test_normalized = [zi/sum_zs_test for zi in zs_test]
# if liq:
# print(zs_test_normalized, sum_zs_test)
# to_TP_zs_fast(self, T, P, zs, only_l=False, only_g=False)
# IT IS NOT PERMISSIBLE TO DO ONLY ONE ROOT! 2019-03-20
# Breaks lots of stabilities.
eos_test = self.to_TP_zs_fast(T=T, P=P, zs=zs_test_normalized, only_l=False, only_g=False, full_alphas=False)
fugacities_test, fugacities_phase = eos_test._eos_fugacities_lowest_Gibbs()
if fugacities_ref_phase == fugacities_phase:
same_phase_count += 1.0
else:
same_phase_count = 0
# if liq:
# print(fugacities_test, fugacities_ref_phase, fugacities_phase)
if liq:
corrections = [fi/f_ref*sum_zs_test for fi, f_ref in zip(fugacities_test, fugacities_ref)]
else:
corrections = [f_ref/(fi*sum_zs_test) for fi, f_ref in zip(fugacities_test, fugacities_ref)]
Ks = [Ki*corr for Ki, corr in zip(Ks, corrections)]
corrections_minus_1 = [corr - 1.0 for corr in corrections]
err = sum([ci*ci for ci in corrections_minus_1])
# print(err, xtol, Ks, corrections)
# print('MM iter Ks =', Ks, 'zs', zs_test_normalized, 'MM err', err, xtol, _)
if err < xtol:
break
# elif same_phase_count > 5:
# break
# It is possible to break if the trivial solution is being approached here also
if _ == maxiter-1 and fugacities_ref_phase != fugacities_phase:
raise UnconvergedError('End of stability_iteration_Michelsen without convergence')
# Fails directly if fugacities_ref_phase == fugacities_phase
# Fugacity error:
# no, the fugacities are not supposed to be equal
# err_equifugacity = 0
# for fi, fref in zip(fugacities_test, fugacities_ref):
# err_equifugacity += abs(fi - fref)
# if err_equifugacity/P > 1e-3:
# sum_zs_test = 1
return sum_zs_test, Ks, fugacities_ref_phase == fugacities_phase
def stability_Michelsen(self, T, P, zs, Ks_initial=None, maxiter=20,
xtol=1E-12, trivial_criteria=1E-4,
stable_criteria=1E-7):
# print('MM starting, Ks=', Ks_initial)
if Ks_initial is None:
Ks = [Wilson_K_value(T, P, Tci, Pci, omega) for Pci, Tci, omega in zip(self.Pcs, self.Tcs, self.omegas)]
else:
Ks = Ks_initial
zs_sum_g, Ks_g, phase_failure_g = self.stabiliy_iteration_Michelsen(T=T, P=P, zs=zs, Ks_initial=Ks,
maxiter=maxiter, xtol=xtol, liq=False)
zs_sum_l, Ks_l, phase_failure_l = self.stabiliy_iteration_Michelsen(T=T, P=P, zs=zs, Ks_initial=Ks,
maxiter=maxiter, xtol=xtol, liq=True)
log_Ks_g = [log(Ki) for Ki in Ks_g]
log_Ks_l = [log(Ki) for Ki in Ks_l]
lnK_2_tot_g = sum(log_Ki*log_Ki for log_Ki in log_Ks_g)
lnK_2_tot_l = sum(log_Ki*log_Ki for log_Ki in log_Ks_l)
sum_g_criteria = zs_sum_g - 1.0
sum_l_criteria = zs_sum_l - 1.0
trivial_g, trivial_l = False, False
if lnK_2_tot_g < trivial_criteria:
trivial_g = True
if lnK_2_tot_l < trivial_criteria:
trivial_l = True
stable = False
# print(Ks_l, Ks_g, 'Ks_l, Ks_g')
# Table 4.6 Summary of Possible Phase Stability Test Results,
# Phase Behavior, Whitson and Brule
# There is a typo where Sl appears in the vapor column; this should be
# liquid; as shown in https://www.e-education.psu.edu/png520/m17_p7.html
g_pass, l_pass = False, False # pass means this phase cannot form another phase
if phase_failure_g:
g_pass = True
if phase_failure_l:
l_pass = True
if trivial_g:
g_pass = True
if trivial_l:
l_pass = True
if sum_g_criteria < stable_criteria:
g_pass = True
if sum_l_criteria < stable_criteria:
l_pass = True
# print(l_pass, g_pass, 'l, g test show stable')
if phase_failure_g and phase_failure_l:
stable = True
elif trivial_g and trivial_l:
stable = True
elif sum_g_criteria < stable_criteria and trivial_l:
stable = True
elif trivial_g and sum_l_criteria < stable_criteria:
stable = True
elif sum_g_criteria < stable_criteria and sum_l_criteria < stable_criteria:
stable = True
# These last two are custom, and it is apparent since they are bad
# Also did not document well enough the cases they fail in
# Disabled 2018-12-29
# elif trivial_l and sum_l_criteria < stable_criteria:
# stable = True
# elif trivial_g and sum_g_criteria < stable_criteria:
# stable = True
# else:
# print('lnK_2_tot_g', lnK_2_tot_g , 'lnK_2_tot_l', lnK_2_tot_l,
# 'sum_g_criteria', sum_g_criteria, 'sum_l_criteria', sum_l_criteria)
# print('stable', stable, 'phase_failure_g', phase_failure_g, 'phase_failure_l', phase_failure_l,
# 'sum_g_criteria', sum_g_criteria, 'sum_l_criteria', sum_l_criteria,
# 'trivial_g', trivial_g, 'trivial_l', trivial_l)
# No need to enumerate unstable results
if not stable: # One set may be trivial, which means the other set is approx
# the only use used
Ks = [K_g*K_l for K_g, K_l in zip(Ks_g, Ks_l)]
# print('MM ended', Ks, stable, Ks_g, Ks_l)
return stable, Ks, [Ks_g, Ks_l]
def _V_over_F_bubble_T_inner(self, T, P, zs, maxiter=20, xtol=1E-3):
eos_l = self.to_TP_zs(T=T, P=P, zs=zs)
if not hasattr(eos_l, 'V_l'):
raise ValueError('At the specified temperature, there is no liquid root')
Ks = [Wilson_K_value(T, P, Tci, Pci, omega) for Pci, Tci, omega in zip(self.Pcs, self.Tcs, self.omegas)]
V_over_F, xs, ys = flash_inner_loop(zs, Ks)
for i in range(maxiter):
eos_g = self.to_TP_zs(T=T, P=P, zs=ys)
if not hasattr(eos_g, 'V_g'):
phis_g = eos_g.phis_l
fugacities_g = eos_g.fugacities_l
else:
phis_g = eos_g.phis_g
fugacities_g = eos_g.fugacities_g
Ks = [K_value(phi_l=l, phi_g=g) for l, g in zip(eos_l.phis_l, phis_g)]
V_over_F, xs, ys = flash_inner_loop(zs, Ks)
err = sum([abs(i-j) for i, j in zip(eos_l.fugacities_l, fugacities_g)])
if err < xtol:
break
if not hasattr(eos_g, 'V_g'):
raise ValueError('At the specified temperature, the solver did not converge to a vapor root')
return V_over_F
# raise Exception('Could not converge to desired tolerance')
def _V_over_F_dew_T_inner(self, T, P, zs, maxiter=20, xtol=1E-10):
eos_g = self.to_TP_zs(T=T, P=P, zs=zs)
if not hasattr(eos_g, 'V_g'):
raise ValueError('At the specified temperature, there is no vapor root')
Ks = [Wilson_K_value(T, P, Tci, Pci, omega) for Pci, Tci, omega in zip(self.Pcs, self.Tcs, self.omegas)]
V_over_F, xs, ys = flash_inner_loop(zs, Ks)
for i in range(maxiter):
eos_l = self.to_TP_zs(T=T, P=P, zs=xs)
if not hasattr(eos_l, 'V_l'):
phis_l = eos_l.phis_g
fugacities_l = eos_l.fugacities_g
else:
phis_l = eos_l.phis_l
fugacities_l = eos_l.fugacities_l
Ks = [K_value(phi_l=l, phi_g=g) for l, g in zip(phis_l, eos_g.phis_g)]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks)
err = (sum([abs(x_new - x_old) for x_new, x_old in zip(xs_new, xs)]) +
sum([abs(y_new - y_old) for y_new, y_old in zip(ys_new, ys)]))
xs, ys = xs_new, ys_new
if xtol < 1E-10:
break
if not hasattr(eos_l, 'V_l'):
raise ValueError('At the specified temperature, the solver did not converge to a liquid root')
return V_over_F-1.0
# return abs(V_over_F-1)
def _V_over_F_dew_T_inner_accelerated(self, T, P, zs, maxiter=20, xtol=1E-10):
'''This is not working.
'''
eos_g = self.to_TP_zs(T=T, P=P, zs=zs)
if not hasattr(eos_g, 'V_g'):
raise ValueError('At the specified temperature, there is no vapor root')
Ks = [Wilson_K_value(T, P, Tci, Pci, omega) for Pci, Tci, omega in zip(self.Pcs, self.Tcs, self.omegas)]
V_over_F_new, xs, ys = flash_inner_loop(zs, Ks)
for i in range(maxiter):
eos_l = self.to_TP_zs(T=T, P=P, zs=xs)
if not hasattr(eos_l, 'V_l'):
phis_l = eos_l.phis_g
fugacities_l = eos_l.fugacities_g
else:
phis_l = eos_l.phis_l
fugacities_l = eos_l.fugacities_l
if 0.0 < V_over_F_new < 1.0 and i > 2:
Rs = [K_value(phi_l=l, phi_g=g) for l, g in zip(phis_l, eos_g.phis_g)]
lambdas = [(Ki - 1.0)/(Ki - Rri) for Rri, Ki in zip(Rs, Ks)]
Ks = [Ki*Ri**lambda_i for Ki, Ri, lambda_i in zip(Ks, Rs, lambdas)]
else:
Ks = [K_value(phi_l=l, phi_g=g) for l, g in zip(phis_l, eos_g.phis_g)]
V_over_F_new, xs_new, ys_new = flash_inner_loop(zs, Ks)
err_new = (sum([abs(x_new - x_old) for x_new, x_old in zip(xs_new, xs)]) +
sum([abs(y_new - y_old) for y_new, y_old in zip(ys_new, ys)]))
xs, ys = xs_new, ys_new
V_over_F_old = V_over_F_new
if i == 0:
err_old = err_new
err_old = err_new
if err_new < xtol:
break
if not hasattr(eos_l, 'V_l'):
raise ValueError('At the specified temperature, the solver did not converge to a liquid root')
return V_over_F_new-1.0
# return abs(V_over_F-1)
# def _a_alpha_j_rows(self):
## try:
## return self.a_alpha_j_rows
## except:
## pass
# zs = self.zs
# N = self.N
# a_alpha_ijs = self.a_alpha_ijs
# a_alpha_j_rows = []
# for i in range(N):
# l = a_alpha_ijs[i]
# sum_term = 0.0
# for j in range(N):
# sum_term += zs[j]*l[j]
# a_alpha_j_rows.append(sum_term)
# self.a_alpha_j_rows = a_alpha_j_rows
# return a_alpha_j_rows
@property
def _a_alpha_j_rows(self):
try:
return self.a_alpha_j_rows
except:
pass
zs, N = self.zs, self.N
a_alpha_ijs = self.a_alpha_ijs
if self.scalar:
a_alpha_j_rows = [0.0]*N
else:
a_alpha_j_rows = zeros(N)
for i in range(N):
l = a_alpha_ijs[i]
for j in range(i):
a_alpha_j_rows[j] += zs[i]*l[j]
a_alpha_j_rows[i] += zs[j]*l[j]
a_alpha_j_rows[i] += zs[i]*l[i]
self.a_alpha_j_rows = a_alpha_j_rows
return a_alpha_j_rows
def _set_alpha_matrices(self):
try:
a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv = a_alpha_aijs_composition_independent(self.a_alphas, self.kijs)
except ZeroDivisionError:
a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv = a_alpha_aijs_composition_independent_support_zeros(self.a_alphas, self.kijs)
_, _, _, a_alpha_ijs, da_alpha_dT_ijs, d2a_alpha_dT2_ijs = a_alpha_and_derivatives_full(
self.a_alphas, self.da_alpha_dTs, self.d2a_alpha_dT2s, self.T, self.zs, self.kijs,
a_alpha_ijs, self.a_alpha_roots, a_alpha_ij_roots_inv)
self._d2a_alpha_dT2_ijs = d2a_alpha_dT2_ijs
self._da_alpha_dT_ijs = da_alpha_dT_ijs
self._a_alpha_ijs = a_alpha_ijs
@property
def a_alpha_ijs(self):
r'''Calculate and return the matrix
:math:`(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}`.
Returns
-------
a_alpha_ijs : list[list[float]]
`a_alpha` terms for each component with every other component,
[J^2/mol^2/Pa]
Notes
-----
In an earlier implementation this matrix was stored each EOS solve;
however, allocating that much memory becomes quite expensive for large
number of component cases and this is now calculated on-demand only.
'''
try:
return self._a_alpha_ijs
except:
self._set_alpha_matrices()
return self._a_alpha_ijs
@property
def da_alpha_dT_ijs(self):
r'''Calculate and return the matrix for the temperature derivatives of
the alpha terms.
.. math::
\frac{\partial (a\alpha)_{ij}}{\partial T} =
\frac{\sqrt{\operatorname{a\alpha_{i}}{\left(T \right)} \operatorname{a\alpha_{j}}
{\left(T \right)}} \left(1 - k_{ij}\right) \left(\frac{\operatorname{a\alpha_{i}}
{\left(T \right)} \frac{d}{d T} \operatorname{a\alpha_{j}}{\left(T \right)}}{2}
+ \frac{\operatorname{a\alpha_{j}}{\left(T \right)} \frac{d}{d T} \operatorname{
a\alpha_{i}}{\left(T \right)}}{2}\right)}{\operatorname{a\alpha_{i}}{\left(T \right)}
\operatorname{a\alpha_{j}}{\left(T \right)}}
Returns
-------
da_alpha_dT_ijs : list[list[float]]
First temperature derivative of `a_alpha` terms for each component
with every other component, [J^2/mol^2/Pa/K]
Notes
-----
In an earlier implementation this matrix was stored each EOS solve;
however, allocating that much memory becomes quite expensive for large
number of component cases and this is now calculated on-demand only.
'''
try:
return self._da_alpha_dT_ijs
except:
self._set_alpha_matrices()
return self._da_alpha_dT_ijs
@property
def d2a_alpha_dT2_ijs(self):
r'''Calculate and return the matrix of the second temperature
derivatives of the alpha terms.
.. math::
\frac{\partial^2 (a\alpha)_{ij}}{\partial T^2} =
- \frac{\sqrt{\operatorname{a\alpha_{i}}{\left(T \right)} \operatorname{a\alpha_{j}}
{\left(T \right)}} \left(k_{ij} - 1\right) \left(\frac{\left(\operatorname{
a\alpha_{i}}{\left(T \right)} \frac{d}{d T} \operatorname{a\alpha_{j}}{\left(T \right)}
+ \operatorname{a\alpha_{j}}{\left(T \right)} \frac{d}{d T} \operatorname{a\alpha_{i}}
{\left(T \right)}\right)^{2}}{4 \operatorname{a\alpha_{i}}{\left(T \right)}
\operatorname{a\alpha_{j}}{\left(T \right)}} - \frac{\left(\operatorname{a\alpha_{i}}
{\left(T \right)} \frac{d}{d T} \operatorname{a\alpha_{j}}{\left(T \right)}
+ \operatorname{a\alpha_{j}}{\left(T \right)} \frac{d}{d T}
\operatorname{a\alpha_{i}}{\left(T \right)}\right) \frac{d}{d T}
\operatorname{a\alpha_{j}}{\left(T \right)}}{2 \operatorname{a\alpha_{j}}
{\left(T \right)}} - \frac{\left(\operatorname{a\alpha_{i}}{\left(T \right)}
\frac{d}{d T} \operatorname{a\alpha_{j}}{\left(T \right)}
+ \operatorname{a\alpha_{j}}{\left(T \right)} \frac{d}{d T}
\operatorname{a\alpha_{i}}{\left(T \right)}\right) \frac{d}{d T}
\operatorname{a\alpha_{i}}{\left(T \right)}}{2 \operatorname{a\alpha_{i}}
{\left(T \right)}} + \frac{\operatorname{a\alpha_{i}}{\left(T \right)}
\frac{d^{2}}{d T^{2}} \operatorname{a\alpha_{j}}{\left(T \right)}}{2}
+ \frac{\operatorname{a\alpha_{j}}{\left(T \right)} \frac{d^{2}}{d T^{2}}
\operatorname{a\alpha_{i}}{\left(T \right)}}{2} + \frac{d}{d T}
\operatorname{a\alpha_{i}}{\left(T \right)} \frac{d}{d T}
\operatorname{a\alpha_{j}}{\left(T \right)}\right)}
{\operatorname{a\alpha_{i}}{\left(T \right)} \operatorname{a\alpha_{j}}
{\left(T \right)}}
Returns
-------
d2a_alpha_dT2_ijs : list[list[float]]
Second temperature derivative of `a_alpha` terms for each component
with every other component, [J^2/mol^2/Pa/K^2]
Notes
-----
In an earlier implementation this matrix was stored each EOS solve;
however, allocating that much memory becomes quite expensive for large
number of component cases and this is now calculated on-demand only.
'''
try:
return self._d2a_alpha_dT2_ijs
except:
self._set_alpha_matrices()
return self._d2a_alpha_dT2_ijs
@property
def _da_alpha_dT_j_rows(self):
try:
return self.da_alpha_dT_j_rows
except:
pass
zs, N, scalar = self.zs, self.N, self.N
da_alpha_dT_ijs = self.da_alpha_dT_ijs
# Handle the case of attempting to avoid a full alpha derivative matrix evaluation
if not da_alpha_dT_ijs:
self.resolve_full_alphas()
da_alpha_dT_ijs = self.da_alpha_dT_ijs
if scalar:
da_alpha_dT_j_rows = [0.0]*N
else:
da_alpha_dT_j_rows = zeros(N)
for i in range(N):
l = da_alpha_dT_ijs[i]
for j in range(i):
da_alpha_dT_j_rows[j] += zs[i]*l[j]
da_alpha_dT_j_rows[i] += zs[j]*l[j]
da_alpha_dT_j_rows[i] += zs[i]*l[i]
self.da_alpha_dT_j_rows = da_alpha_dT_j_rows
return da_alpha_dT_j_rows
@property
def _d2a_alpha_dT2_j_rows(self):
try:
return self.d2a_alpha_dT2_j_rows
except AttributeError:
pass
d2a_alpha_dT2_ijs, N, scalar = self.d2a_alpha_dT2_ijs, self.N, self.scalar
# Handle the case of attempting to avoid a full alpha derivative matrix evaluation
if d2a_alpha_dT2_ijs is None:
self.resolve_full_alphas()
d2a_alpha_dT2_ijs = self.d2a_alpha_dT2_ijs
zs = self.zs
if scalar:
d2a_alpha_dT2_j_rows = [0.0]*N
else:
d2a_alpha_dT2_j_rows = zeros(N)
for i in range(N):
l = d2a_alpha_dT2_ijs[i]
for j in range(i):
d2a_alpha_dT2_j_rows[j] += zs[i]*l[j]
d2a_alpha_dT2_j_rows[i] += zs[j]*l[j]
d2a_alpha_dT2_j_rows[i] += zs[i]*l[i]
self.d2a_alpha_dT2_j_rows = d2a_alpha_dT2_j_rows
return d2a_alpha_dT2_j_rows
@property
def db_dzs(self):
r'''Helper method for calculating the composition derivatives of `b`.
Note this is independent of the phase.
.. math::
\left(\frac{\partial b}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= b_i
Returns
-------
db_dzs : list[float]
Composition derivative of `b` of each component, [m^3/mol]
Notes
-----
This derivative is checked numerically.
'''
return self.bs
@property
def db_dns(self):
r'''Helper method for calculating the mole number derivatives of `b`.
Note this is independent of the phase.
.. math::
\left(\frac{\partial b}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= b_i - b
Returns
-------
db_dns : list[float]
Composition derivative of `b` of each component, [m^3/mol^2]
Notes
-----
This derivative is checked numerically.
'''
b = self.b
if self.scalar:
return [bi - b for bi in self.bs]
else:
return self.bs - b
@property
def dnb_dns(self):
r'''Helper method for calculating the partial molar derivative of `b`.
Note this is independent of the phase.
.. math::
\left(\frac{\partial n \cdot b}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= b_i
Returns
-------
dnb_dns : list[float]
Partial molar derivative of `b` of each component, [m^3/mol]
Notes
-----
This derivative is checked numerically.
'''
return self.bs
@property
def d2b_dzizjs(self):
r'''Helper method for calculating the second partial mole fraction
derivatives of `b`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 b}{\partial x_i \partial x_j}
\right)_{T, P,
n_{k \ne i,j}} = 0
Returns
-------
d2b_dzizjs : list[list[float]]
Second mole fraction derivatives of `b` of each component,
[m^3/mol]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
if self.scalar:
return [[0.0]*N for i in range(N)]
return zeros((N, N))
@property
def d2b_dninjs(self):
r'''Helper method for calculating the second partial mole number
derivatives of `b`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 b}{\partial n_i \partial n_j}\right)_{T, P,
n_{k\ne i,k}} = 2b - b_i - b_j
Returns
-------
d2b_dninjs : list[list[float]]
Second Composition derivative of `b` of each component,
[m^3/mol^3]
Notes
-----
This derivative is checked numerically.
'''
bb = 2.0*self.b
bs = self.bs
if self.scalar:
d2b_dninjs = []
for bi in bs:
d2b_dninjs.append([bb - bi - bj for bj in bs])
else:
N = self.N
d2b_dninjs = full((N, N), bb)
d2b_dninjs -= bs
d2b_dninjs = d2b_dninjs.transpose()
d2b_dninjs -= bs
return d2b_dninjs
@property
def d3b_dzizjzks(self):
r'''Helper method for calculating the third partial mole fraction
derivatives of `b`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 b}{\partial x_i \partial x_j \partial x_k}
\right)_{T, P,
n_{k \ne i,j,k}} = 0
Returns
-------
d3b_dzizjzks : list[list[list[float]]]
Third mole fraction derivatives of `b` of each component,
[m^3/mol]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
if self.scalar:
return [[[0.0]*N for _ in range(N)] for _ in range(N)]
else:
return zeros((N, N, N))
@property
def d3b_dninjnks(self):
r'''Helper method for calculating the third partial mole number
derivatives of `b`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 b}{\partial n_i \partial n_j \partial n_k }
\right)_{T, P,
n_{m \ne i,j,k}} = 2(-3b + b_i + b_j + b_k)
Returns
-------
d3b_dninjnks : list[list[list[float]]]
Third mole number derivative of `b` of each component,
[m^3/mol^4]
Notes
-----
This derivative is checked numerically.
'''
bs = self.bs
n6b = -6.0*self.b
if self.scalar:
bs2 = [bi + bi for bi in bs]
d3b_dninjnks = []
for bi2 in bs2:
d3b_dnjnks = []
for bj2 in bs2:
base = n6b + bi2 + bj2
d3b_dnjnks.append([base + bk2 for bk2 in bs2])
d3b_dninjnks.append(d3b_dnjnks)
else:
bs2 = 2.0*self.bs
N = self.N
d3b_dninjnks = full((N, N, N), n6b)
d3b_dninjnks += bs2
d3b_dninjnks = d3b_dninjnks.transpose((2, 1, 0))
d3b_dninjnks += bs2
d3b_dninjnks = d3b_dninjnks.transpose((0, 2, 1))
d3b_dninjnks += bs2
return d3b_dninjnks
@property
def d3epsilon_dzizjzks(self):
r'''Helper method for calculating the third composition derivatives
of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 \epsilon}{\partial x_i \partial x_j
\partial x_k }\right)_{T, P, x_{m\ne i,j,k}} = 0
Returns
-------
d2epsilon_dzizjzks : list[list[list[float]]]
Composition derivative of `epsilon` of each component, [m^6/mol^2]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
if self.scalar:
return [[[0.0]*N for _ in range(N)] for _ in range(N)]
else:
return zeros((N, N, N))
@property
def d3delta_dzizjzks(self):
r'''Helper method for calculating the third composition derivatives
of `delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 \delta}{\partial x_i \partial x_j
\partial x_k }\right)_{T, P, x_{m\ne i,j,k}} = 0
Returns
-------
d3delta_dzizjzks : list[list[list[float]]]
Third composition derivative of `epsilon` of each component,
[m^6/mol^5]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
if self.scalar:
return [[[0.0]*N for _ in range(N)] for _ in range(N)]
else:
return zeros((N, N, N))
@property
def da_alpha_dzs(self):
r'''Helper method for calculating the composition derivatives of
`a_alpha`. Note this is independent of the phase.
.. math::
\left(\frac{\partial a \alpha}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= 2 \cdot \sum_j z_{j} (1 - k_{ij}) \sqrt{ (a \alpha)_i (a \alpha)_j}
Returns
-------
da_alpha_dzs : list[float]
Composition derivative of `alpha` of each component,
[kg*m^5/(mol^2*s^2)]
Notes
-----
This derivative is checked numerically.
'''
try:
a_alpha_j_rows = self.a_alpha_j_rows
except:
a_alpha_j_rows = self._a_alpha_j_rows
if self.scalar:
return [i + i for i in a_alpha_j_rows]
return 2.0*a_alpha_j_rows
@property
def da_alpha_dns(self):
r'''Helper method for calculating the mole number derivatives of
`a_alpha`. Note this is independent of the phase.
.. math::
\left(\frac{\partial a \alpha}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= 2 (-a\alpha + \sum_j z_{j} (1 - k_{ij}) \sqrt{ (a \alpha)_i (a \alpha)_j})
Returns
-------
da_alpha_dns : list[float]
Mole number derivative of `alpha` of each component,
[kg*m^5/(mol^3*s^2)]
Notes
-----
This derivative is checked numerically.
'''
try:
a_alpha_j_rows = self.a_alpha_j_rows
except:
a_alpha_j_rows = self._a_alpha_j_rows
a_alpha_n_2 = -2.0*self.a_alpha
if self.scalar:
return [2.0*t + a_alpha_n_2 for t in a_alpha_j_rows]
return 2.0*a_alpha_j_rows + a_alpha_n_2
@property
def dna_alpha_dns(self):
r'''Helper method for calculating the partial molar derivatives of
`a_alpha`. Note this is independent of the phase.
.. math::
\left(\frac{\partial a \alpha}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= 2 (-0.5 a\alpha + \sum_j z_{j} (1 - k_{ij}) \sqrt{ (a \alpha)_i (a \alpha)_j})
Returns
-------
dna_alpha_dns : list[float]
Partial molar derivative of `alpha` of each component,
[kg*m^5/(mol^2*s^2)]
Notes
-----
This derivative is checked numerically.
'''
try:
a_alpha_j_rows = self.a_alpha_j_rows
except:
a_alpha_j_rows = self._a_alpha_j_rows
a_alpha = self.a_alpha
if self.scalar:
return [t + t - a_alpha for t in a_alpha_j_rows]
return 2.0*a_alpha_j_rows - a_alpha
@property
def d2a_alpha_dzizjs(self):
r'''Helper method for calculating the second composition derivatives of
`a_alpha` (hessian). Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 a \alpha}{\partial x_i \partial
x_j}\right)_{T, P, x_{k\ne i,j}}
= 2 (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
Returns
-------
d2a_alpha_dzizjs : list[float]
Second composition derivative of `alpha` of each component,
[kg*m^5/(mol^2*s^2)]
Notes
-----
This derivative is checked numerically.
'''
a_alpha_ijs = self.a_alpha_ijs
if self.scalar:
return [[i+i for i in row] for row in a_alpha_ijs]
else:
return 2.0*a_alpha_ijs
@property
def d2a_alpha_dninjs(self):
r'''Helper method for calculating the second partial molar derivatives
of `a_alpha` (hessian). Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 a \alpha}{\partial n_i \partial n_j }\right)_{T, P, n_{k\ne i,j}}
= 2\left[3(a \alpha) + (a\alpha)_{ij} -2 (\text{term}_{i,j})
\right]
.. math::
\text{term}_{i,j} = \sum_k z_k\left((a\alpha)_{ik} + (a\alpha)_{jk}
\right)
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
Returns
-------
d2a_alpha_dninjs : list[float]
Second partial molar derivative of `alpha` of each component,
[kg*m^5/(mol^4*s^2)]
Notes
-----
This derivative is checked numerically.
'''
try:
a_alpha_j_rows = self.a_alpha_j_rows
except:
a_alpha_j_rows = self._a_alpha_j_rows
a_alpha = self.a_alpha
a_alpha_ijs = self.a_alpha_ijs
N = self.N
zs = self.zs
a_alpha3 = 3.0*a_alpha
if self.scalar:
hessian = [[0.0]*N for _ in range(N)]
else:
hessian = zeros((N, N))
for i in range(N):
for j in range(i+1):
if i == j:
term = 2.0*a_alpha_j_rows[i]
else:
term = 0.0
for k in range(N):
term += zs[k]*(a_alpha_ijs[i][k] + a_alpha_ijs[j][k])
hessian[i][j] = hessian[j][i] = 2.0*(a_alpha3 + a_alpha_ijs[i][j] -2.0*term)
# row.append(2.0*(a_alpha3 + a_alpha_ijs[i][j] -2.0*term))
# hessian.append(row)
return hessian
@property
def d3a_alpha_dzizjzks(self):
r'''Helper method for calculating the third composition derivatives of
`a_alpha`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 a \alpha}{\partial x_i \partial x_j
\partial x_k}\right)_{T, P, x_{m\ne i,j,k}}
= 0
Returns
-------
d3a_alpha_dzizjzks : list[float]
Third composition derivative of `alpha` of each component,
[kg*m^5/(mol^2*s^2)]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
return [[[0.0]*N for _ in range(N)] for _ in range(N)]
@property
def d3a_alpha_dninjnks(self):
r'''Helper method for calculating the third mole number derivatives of
`a_alpha`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 a \alpha}{\partial n_i \partial n_j
\partial n_k}\right)_{T, P, n_{m\ne i,j,k}}
= 4\left(-6 (a \alpha) - [(a \alpha)_{i,j} + (a \alpha)_{i,k}
+ (a \alpha)_{j,k}]
+ 3\sum_m z_m[(a \alpha)_{i,m} + (a \alpha)_{j,m}
+ (a \alpha)_{k,m}]\right)
Returns
-------
d3a_alpha_dninjnks : list[float]
Third mole number derivative of `alpha` of each component,
[kg*m^5/(mol^5*s^2)]
Notes
-----
This derivative is checked numerically.
'''
# Seems correct across diagonal
# Each term is of similar magnitude, so likely would notice if brokwn
a_alpha = self.a_alpha
a_alpha_ijs = self.a_alpha_ijs
N = self.N
zs = self.zs
a_alpha6 = -6.0*a_alpha
matrix = []
for i in range(N):
l = []
for j in range(N):
row = []
for k in range(N):
mid = a_alpha_ijs[i][j] + a_alpha_ijs[i][k] + a_alpha_ijs[j][k]
last = sum(zs[m]*(a_alpha_ijs[i][m] + a_alpha_ijs[j][m] + a_alpha_ijs[k][m]) for m in range(N))
ele = 4.0*(a_alpha6 - mid + 3.0*last)
row.append(ele)
l.append(row)
matrix.append(l)
return matrix
@property
def da_alpha_dT_dzs(self):
r'''Helper method for calculating the composition derivatives of
`da_alpha_dT`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 a \alpha}{\partial x_i \partial T}
\right)_{P, x_{i\ne j}}
= 2 \sum_j -z_{j} (k_{ij} - 1) (a \alpha)_i (a \alpha)_j
\frac{\partial (a \alpha)_i}{\partial T} \frac{\partial (a \alpha)_j}{\partial T}
\left({ (a \alpha)_i (a \alpha)_j}\right)^{-0.5}
Returns
-------
da_alpha_dT_dzs : list[float]
Composition derivative of `da_alpha_dT` of each component,
[kg*m^5/(mol^2*s^2*K)]
Notes
-----
This derivative is checked numerically.
'''
try:
da_alpha_dT_j_rows = self.da_alpha_dT_j_rows
except:
da_alpha_dT_j_rows = self._da_alpha_dT_j_rows
return [i + i for i in da_alpha_dT_j_rows]
@property
def da_alpha_dT_dns(self):
r'''Helper method for calculating the mole number derivatives of
`da_alpha_dT`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 a \alpha}{\partial n_i \partial T}
\right)_{P, n_{i\ne j}}
= 2 \left[\sum_j -z_{j} (k_{ij} - 1) (a \alpha)_i (a \alpha)_j
\frac{\partial (a \alpha)_i}{\partial T} \frac{\partial (a \alpha)_j}{\partial T}
\left({ (a \alpha)_i (a \alpha)_j}\right)^{-0.5}
- \frac{\partial a \alpha}{\partial T} \right]
Returns
-------
da_alpha_dT_dns : list[float]
Composition derivative of `da_alpha_dT` of each component,
[kg*m^5/(mol^3*s^2*K)]
Notes
-----
This derivative is checked numerically.
'''
try:
da_alpha_dT_j_rows = self.da_alpha_dT_j_rows
except:
da_alpha_dT_j_rows = self._da_alpha_dT_j_rows
da_alpha_dT = self.da_alpha_dT
return [2.0*(t - da_alpha_dT) for t in da_alpha_dT_j_rows]
@property
def dna_alpha_dT_dns(self):
r'''Helper method for calculating the mole number derivatives of
`da_alpha_dT`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 n a \alpha}{\partial n_i \partial T}
\right)_{P, n_{i\ne j}}
= 2 \left[\sum_j -z_{j} (k_{ij} - 1) (a \alpha)_i (a \alpha)_j
\frac{\partial (a \alpha)_i}{\partial T} \frac{\partial (a \alpha)_j}{\partial T}
\left({ (a \alpha)_i (a \alpha)_j}\right)^{-0.5}
- 0.5 \frac{\partial a \alpha}{\partial T} \right]
Returns
-------
dna_alpha_dT_dns : list[float]
Composition derivative of `da_alpha_dT` of each component,
[kg*m^5/(mol^2*s^2*K)]
Notes
-----
This derivative is checked numerically.
'''
try:
da_alpha_dT_j_rows = self.da_alpha_dT_j_rows
except:
da_alpha_dT_j_rows = self._da_alpha_dT_j_rows
da_alpha_dT = self.da_alpha_dT
return [t + t - da_alpha_dT for t in da_alpha_dT_j_rows]
@property
def d2a_alpha_dT2_dzs(self):
r'''Helper method for calculating the mole number derivatives of
`d2a_alpha_dT2`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 a \alpha}{\partial z_i \partial T^2}
\right)_{P, z_{i\ne j}}
= \text{large expression}
Returns
-------
d2a_alpha_dT2_dzs : list[float]
Composition derivative of `d2a_alpha_dT2` of each component,
[kg*m^5/(mol^2*s^2*K^2)]
Notes
-----
This derivative is checked numerically.
'''
try:
d2a_alpha_dT2_j_rows = self.d2a_alpha_dT2_j_rows
except:
d2a_alpha_dT2_j_rows = self._d2a_alpha_dT2_j_rows
return [i + i for i in d2a_alpha_dT2_j_rows]
@property
def d2a_alpha_dT2_dns(self):
r'''Helper method for calculating the mole number derivatives of
`d2a_alpha_dT2`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 a \alpha}{\partial n_i \partial T^2}
\right)_{P, n_{i\ne j}}
= f\left(\left(\frac{\partial^3 a\alpha}{\partial z_i \partial T^2}
\right)_{P, z_{i\ne j}} \right)
Returns
-------
d2a_alpha_dT2_dns : list[float]
Mole number derivative of `d2a_alpha_dT2` of each component,
[kg*m^5/(mol^3*s^2*K^2)]
Notes
-----
This derivative is checked numerically.
'''
try:
d2a_alpha_dT2_j_rows = self.d2a_alpha_dT2_j_rows
except:
d2a_alpha_dT2_j_rows = self._d2a_alpha_dT2_j_rows
d2a_alpha_dT2 = self.d2a_alpha_dT2
return [2.0*(t - d2a_alpha_dT2) for t in d2a_alpha_dT2_j_rows]
def dV_dzs(self, Z):
r'''Calculates the molar volume composition derivative
(where the mole fractions do not sum to 1). Verified numerically.
Used in many other derivatives, and for the molar volume mole number
derivative and partial molar volume calculation.
.. math::
\left(\frac{\partial V}{\partial x_i}\right)_{T, P,
x_{i\ne j}} =
\frac{- R T \left(V^{2}{\left(x \right)} + V{\left(x \right)} \delta{\left(x \right)}
+ \epsilon{\left(x \right)}\right)^{3} \frac{d}{d x} b{\left(x \right)} + \left(V{\left(x \right)}
- b{\left(x \right)}\right)^{2} \left(V^{2}{\left(x \right)} + V{\left(x \right)} \delta{\left(x \right)}
+ \epsilon{\left(x \right)}\right)^{2} \frac{d}{d x} \operatorname{a \alpha}{\left(x \right)}
- \left(V{\left(x \right)} - b{\left(x \right)}\right)^{2} V^{3}{\left(x \right)} \operatorname{a
\alpha}{\left(x \right)} \frac{d}{d x} \delta{\left(x \right)} - \left(V{\left(x \right)} - b{\left(x
\right)}\right)^{2} V^{2}{\left(x \right)} \operatorname{a \alpha}{\left(x \right)} \delta{\left(x
\right)} \frac{d}{d x} \delta{\left(x \right)} - \left(V{\left(x \right)} - b{\left(x \right)}
\right)^{2} V^{2}{\left(x \right)} \operatorname{a \alpha}{\left(x \right)} \frac{d}{d x} \epsilon{
\left(x \right)} - \left(V{\left(x \right)} - b{\left(x \right)}\right)^{2} V{\left(x \right)}
\operatorname{a \alpha}{\left(x \right)} \delta{\left(x \right)} \frac{d}{d x} \epsilon{\left(x
\right)} - \left(V{\left(x \right)} - b{\left(x \right)}\right)^{2} V{\left(x \right)} \operatorname{a
\alpha}{\left(x \right)} \epsilon{\left(x \right)} \frac{d}{d x} \delta{\left(x \right)}
- \left(V{\left(x \right)} - b{\left(x \right)}\right)^{2} \operatorname{a \alpha}{\left(x \right)}
\epsilon{\left(x \right)} \frac{d}{d x} \epsilon{\left(x \right)}}{- R T \left(V^{2}{\left(x \right)}
+ V{\left(x \right)} \delta{\left(x \right)} + \epsilon{\left(x \right)}\right)^{3}
+ 2 \left(V{\left(x \right)} - b{\left(x \right)}\right)^{2} V^{3}{\left(x \right)}
\operatorname{a \alpha}{\left(x \right)} + 3 \left(V{\left(x \right)} - b{\left(x \right)}\right)^{2}
V^{2}{\left(x \right)} \operatorname{a \alpha}{\left(x \right)} \delta{\left(x \right)}
+ \left(V{\left(x \right)} - b{\left(x \right)}\right)^{2} V{\left(x \right)} \operatorname{a
\alpha}{\left(x \right)} \delta^{2}{\left(x \right)} + 2 \left(V{\left(x \right)} - b{\left(x
\right)}\right)^{2} V{\left(x \right)} \operatorname{a \alpha}{\left(x \right)} \epsilon{\left(x
\right)} + \left(V{\left(x \right)} - b{\left(x \right)}\right)^{2} \operatorname{a \alpha}{\left(x
\right)} \delta{\left(x \right)} \epsilon{\left(x \right)}}
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dV_dzs : float
Molar volume composition derivatives, [m^3/mol]
Notes
-----
The derivation for the derivative is performed as follows using SymPy.
The function source code is an optimized variant created with the `cse`
SymPy function, and hand optimized further.
>>> from sympy import * # doctest:+SKIP
>>> P, T, R, x = symbols('P, T, R, x') # doctest:+SKIP
>>> V, delta, epsilon, a_alpha, b = symbols('V, delta, epsilon, a\ \\alpha, b', cls=Function) # doctest:+SKIP
>>> CUBIC = R*T/(V(x) - b(x)) - a_alpha(x)/(V(x)*V(x) + delta(x)*V(x) + epsilon(x)) - P # doctest:+SKIP
>>> solve(diff(CUBIC, x), Derivative(V(x), x)) # doctest:+SKIP
[(-R*T*(V(x)**2 + V(x)*delta(x) + epsilon(x))**3*Derivative(b(x), x) + (V(x) - b(x))**2*(V(x)**2 + V(x)*delta(x) + epsilon(x))**2*Derivative(a \alpha(x), x) - (V(x) - b(x))**2*V(x)**3*a \alpha(x)*Derivative(delta(x), x) - (V(x) - b(x))**2*V(x)**2*a \alpha(x)*delta(x)*Derivative(delta(x), x) - (V(x) - b(x))**2*V(x)**2*a \alpha(x)*Derivative(epsilon(x), x) - (V(x) - b(x))**2*V(x)*a \alpha(x)*delta(x)*Derivative(epsilon(x), x) - (V(x) - b(x))**2*V(x)*a \alpha(x)*epsilon(x)*Derivative(delta(x), x) - (V(x) - b(x))**2*a \alpha(x)*epsilon(x)*Derivative(epsilon(x), x))/(-R*T*(V(x)**2 + V(x)*delta(x) + epsilon(x))**3 + 2*(V(x) - b(x))**2*V(x)**3*a \alpha(x) + 3*(V(x) - b(x))**2*V(x)**2*a \alpha(x)*delta(x) + (V(x) - b(x))**2*V(x)*a \alpha(x)*delta(x)**2 + 2*(V(x) - b(x))**2*V(x)*a \alpha(x)*epsilon(x) + (V(x) - b(x))**2*a \alpha(x)*delta(x)*epsilon(x))]
'''
return eos_mix_dV_dzs(self.T, self.P, Z, self.b, self.delta, self.epsilon,
self.a_alpha, self.db_dzs, self.ddelta_dzs,
self.depsilon_dzs, self.da_alpha_dzs, self.N)
def dV_dns(self, Z):
r'''Calculates the molar volume mole number derivatives
(where the mole fractions sum to 1). No specific formula is implemented
for this property - it is calculated from the mole fraction derivative.
.. math::
\left(\frac{\partial V}{\partial n_i}\right)_{T, P,
n_{i\ne j}} = f\left( \left(\frac{\partial V}{\partial x_i}\right)_{T, P,
x_{i\ne j}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dV_dns : float
Molar volume mole number derivatives, [m^3/mol^2]
'''
dV_dns = dxs_to_dns(self.dV_dzs(Z), self.zs)
if not self.scalar:
dV_dns = array(dV_dns)
return dV_dns
def dnV_dns(self, Z):
r'''Calculates the partial molar volume of the specified phase
No specific formula is implemented
for this property - it is calculated from the molar
volume mole fraction derivative.
.. math::
\left(\frac{\partial n V}{\partial n_i}\right)_{T, P,
n_{i\ne j}} = f\left( \left(\frac{\partial V}{\partial x_i}\right)_{T, P,
x_{i\ne j}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dnV_dns : float
Partial molar volume of the mixture of the specified phase,
[m^3/mol]
'''
V = Z*R*self.T/self.P
return dxs_to_dn_partials(self.dV_dzs(Z), self.zs, V)
def _d2V_dij_wrapper(self, V, d_Vs, dbs, d2bs, d_epsilons, d2_epsilons,
d_deltas, d2_deltas, da_alphas, d2a_alphas):
T = self.T
x0 = V
x3 = self.b
x4 = x0 - x3
x5 = self.epsilon
x6 = x0*x0
x7 = self.delta
x8 = x0*x7
x9 = x5 + x6 + x8
x10 = self.a_alpha
x11 = x10*x4*x4
x12 = x0 + x0
x13 = x9*x9
x14 = R*T
x17 = x4*x4*x4
x18 = x10*x17
x19 = 2*x18
x22 = 4*x18
x27 = x12*x18
x33 = x14*x13*x9
x34 = x33 + x33
x37 = x19*x8
x38 = x17*x9
x39 = x10*x38
hessian = []
N = self.N
for i in range(N):
row = []
for j in range(N):
# TODO optimize this - symmetric, others
x15 = d_epsilons[i]
x16 = d_epsilons[j]
x20 = x16*x19
x21 = d_Vs[i]
x24 = d_Vs[j]
x23 = x21*x22
x25 = x15*x24
x26 = d_deltas[i]
x28 = d_deltas[j]
x29 = x21*x24
x30 = 8*x18*x29
x31 = x28*x6
x32 = x24*x26
x35 = x34*dbs[j]
x36 = dbs[i]
x40 = x38*da_alphas[i]
x41 = x38*da_alphas[j]
x42 = x21*x41
x43 = x24*x40
x44 = x21*x39
d1 = d2_deltas[i][j] # Derivative(x7, x1, x2)
d2 = d2a_alphas[i][j] # Derivative(x10, x1, x2)
d3 = d2bs[i][j] # Derivative(x3, x1, x2)
d4 = d2_epsilons[i][j] # Derivative(x5, x1, x2)
v = ((x0*x16*x23 + x0*x22*x25 - x0*x26*x41 - x0*x28*x40
- x0*x39*d1 - x12*x42 - x12*x43 + x13*x17*d2 + x15*x20
+ x15*x27*x28 - x15*x41 + x16*x26*x27 - x16*x40
+ x19*x25*x7 + x19*x26*x31 + x19*x29*x7**2 + x20*x21*x7
+ x21*x28*x37 + x21*x35 + x22*x32*x6 + x23*x31
+ x24*x34*x36 - 2*x24*x44 - x28*x44 - x29*x34 + x30*x6
+ x30*x8 + x32*x37 - x32*x39 - x33*x4*d3 - x35*x36
- x39*d4 - x42*x7 - x43*x7)/(x4*x9*(x11*x12 + x11*x7 - x13*x14)))
row.append(v)
hessian.append(row)
return hessian
def d2V_dzizjs(self, Z):
r'''Calculates the molar volume second composition derivative
(where the mole fractions do not sum to 1). Verified numerically.
Used in many other derivatives, and for the molar volume second mole
number derivative.
.. math::
\left(\frac{\partial^2 V}{\partial x_i \partial x_j}\right)_{T, P,
x_{k \ne i,j}} = \text{run SymPy code to obtain - very long!}
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
d2V_dzizjs : float
Molar volume second composition derivatives, [m^3/mol]
Notes
-----
The derivation for the derivative is performed as follows using SymPy.
The function source code is an optimized variant created with the `cse`
SymPy function, and hand optimized further.
>>> from sympy import * # doctest:+SKIP
>>> P, T, R, x1, x2 = symbols('P, T, R, x1, x2') # doctest:+SKIP
>>> V, delta, epsilon, a_alpha, b = symbols('V, delta, epsilon, a\ \\alpha, b', cls=Function) # doctest:+SKIP
>>> CUBIC = R*T/(V(x1, x2) - b(x1, x2)) - a_alpha(x1, x2)/(V(x1, x2)*V(x1, x2) + delta(x1, x2)*V(x1, x2) + epsilon(x1, x2)) - P # doctest:+SKIP
>>> solve(diff(CUBIC, x1, x2), Derivative(V(x1, x2), x1, x2)) # doctest:+SKIP
'''
V = Z*self.T*R/self.P
dV_dzs = self.dV_dzs(Z)
depsilon_dzs = self.depsilon_dzs
d2epsilon_dzizjs = self.d2epsilon_dzizjs
ddelta_dzs = self.ddelta_dzs
d2delta_dzizjs = self.d2delta_dzizjs
db_dzs = self.db_dzs
d2bs = self.d2b_dzizjs
da_alpha_dzs = self.da_alpha_dzs
d2a_alpha_dzizjs = self.d2a_alpha_dzizjs
return self._d2V_dij_wrapper(V=V, d_Vs=dV_dzs, dbs=db_dzs, d2bs=d2bs,
d_epsilons=depsilon_dzs, d2_epsilons=d2epsilon_dzizjs,
d_deltas=ddelta_dzs, d2_deltas=d2delta_dzizjs,
da_alphas=da_alpha_dzs, d2a_alphas=d2a_alpha_dzizjs)
def d2V_dninjs(self, Z):
r'''Calculates the molar volume second mole number derivatives
(where the mole fractions sum to 1). No specific formula is implemented
for this property - it is calculated from the second mole fraction
derivatives.
.. math::
\left(\frac{\partial^2 V}{\partial n_i \partial n_j}\right)_{T, P,
n_{k\ne i,j}} = f\left( \left(\frac{\partial^2 V}{\partial
x_i\partial x_j}\right)_{T, P, x_{k\ne i,j}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
d2V_dninjs : float
Molar volume second mole number derivatives, [m^3/mol^3]
'''
V = Z*self.T*R/self.P
dV_dns = self.dV_dns(Z)
depsilon_dns = self.depsilon_dns
d2epsilon_dninjs = self.d2epsilon_dninjs
ddelta_dns = self.ddelta_dns
d2delta_dninjs = self.d2delta_dninjs
db_dns = self.db_dns
d2bs = self.d2b_dninjs
da_alpha_dns = self.da_alpha_dns
d2a_alpha_dninjs = self.d2a_alpha_dninjs
return self._d2V_dij_wrapper(V=V, d_Vs=dV_dns, dbs=db_dns, d2bs=d2bs,
d_epsilons=depsilon_dns, d2_epsilons=d2epsilon_dninjs,
d_deltas=ddelta_dns, d2_deltas=d2delta_dninjs,
da_alphas=da_alpha_dns, d2a_alphas=d2a_alpha_dninjs)
def dZ_dzs(self, Z):
r'''Calculates the compressibility composition derivatives
(where the mole fractions do not sum to 1). No specific formula is
implemented for this property - it is calculated from the
composition derivative of molar volume, which does have its formula
implemented.
.. math::
\left(\frac{\partial Z}{\partial x_i}\right)_{T, P,
x_{i\ne j}} = \frac{P }{RT}
\left(\frac{\partial V}{\partial x_i}\right)_{T, P, x_{i\ne j}}
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dZ_dzs : float
Compressibility composition derivative, [-]
'''
factor = self.P/(self.T*R)
return [dV*factor for dV in self.dV_dzs(Z)]
def dZ_dns(self, Z):
r'''Calculates the compressibility mole number derivatives
(where the mole fractions sum to 1). No specific formula is implemented
for this property - it is calculated from the mole fraction derivative.
.. math::
\left(\frac{\partial Z}{\partial n_i}\right)_{T, P,
n_{i\ne j}} = f\left( \left(\frac{\partial Z}{\partial x_i}\right)_{T, P,
x_{i\ne j}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dZ_dns : float
Compressibility number derivatives, [1/mol]
'''
return dxs_to_dns(self.dZ_dzs(Z), self.zs)
def dnZ_dns(self, Z):
r'''Calculates the partial compressibility of the specified phase
No specific formula is implemented
for this property - it is calculated from the compressibility
mole fraction derivative.
.. math::
\left(\frac{\partial n Z}{\partial n_i}\right)_{T, P,
n_{i\ne j}} = f\left( \left(\frac{\partial Z}{\partial x_i}\right)_{T, P,
x_{i\ne j}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dnZ_dns : float
Partial compressibility of the mixture of the specified phase,
[-]
'''
return dxs_to_dn_partials(self.dZ_dzs(Z), self.zs, Z)
def dH_dep_dzs(self, Z):
r'''Calculates the molar departure enthalpy composition derivative
(where the mole fractions do not sum to 1). Verified numerically.
Useful in solving for enthalpy specifications in newton-type methods,
and forms the basis for the molar departure enthalpy mole number
derivative and molar partial departure enthalpy.
.. math::
\left(\frac{\partial H_{dep}}{\partial x_i}\right)_{T, P,
x_{i\ne j}} =
P \frac{d}{d x} V{\left(x \right)} + \frac{2 \left(T \frac{\partial}{\partial T}
\operatorname{a \alpha}{\left(T,x \right)} - \operatorname{a \alpha}{\left(x
\right)}\right) \left(- \delta{\left(x \right)} \frac{d}{d x} \delta{\left(x
\right)} + 2 \frac{d}{d x} \epsilon{\left(x \right)}\right) \operatorname{atanh}
{\left(\frac{2 V{\left(x \right)} + \delta{\left(x \right)}}{\sqrt{\delta^{2}
{\left(x \right)} - 4 \epsilon{\left(x \right)}}} \right)}}{\left(\delta^{2}
{\left(x \right)} - 4 \epsilon{\left(x \right)}\right)^{\frac{3}{2}}}
+ \frac{2 \left(T \frac{\partial}{\partial T} \operatorname{a \alpha}
{\left(T,x \right)} - \operatorname{a \alpha}{\left(x \right)}\right)
\left(\frac{\left(- \delta{\left(x \right)} \frac{d}{d x} \delta{\left(x
\right)} + 2 \frac{d}{d x} \epsilon{\left(x \right)}\right) \left(2
V{\left(x \right)} + \delta{\left(x \right)}\right)}{\left(\delta^{2}{\left(x
\right)} - 4 \epsilon{\left(x \right)}\right)^{\frac{3}{2}}} + \frac{2
\frac{d}{d x} V{\left(x \right)} + \frac{d}{d x} \delta{\left(x \right)}}
{\sqrt{\delta^{2}{\left(x \right)} - 4 \epsilon{\left(x \right)}}}\right)}{\left(
- \frac{\left(2 V{\left(x \right)} + \delta{\left(x \right)}\right)^{2}}{
\delta^{2}{\left(x \right)} - 4 \epsilon{\left(x \right)}} + 1\right) \sqrt{
\delta^{2}{\left(x \right)} - 4 \epsilon{\left(x \right)}}} + \frac{2
\left(T \frac{\partial^{2}}{\partial x\partial T} \operatorname{a \alpha}
{\left(T,x \right)} - \frac{d}{d x} \operatorname{a \alpha}{\left(x \right)}
\right) \operatorname{atanh}{\left(\frac{2 V{\left(x \right)} + \delta{\left(x
\right)}}{\sqrt{\delta^{2}{\left(x \right)} - 4 \epsilon{\left(x \right)}}}
\right)}}{\sqrt{\delta^{2}{\left(x \right)} - 4 \epsilon{\left(x \right)}}}
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dH_dep_dzs : float
Departure enthalpy composition derivatives, [J/mol]
Notes
-----
The derivation for the derivative is performed as follows using SymPy.
The function source code is an optimized variant created with the `cse`
SymPy function, and hand optimized further.
>>> from sympy import * # doctest:+SKIP
>>> P, T, V, R, b, a, delta, epsilon, x = symbols('P, T, V, R, b, a, delta, epsilon, x') # doctest:+SKIP
>>> V, delta, epsilon, a_alpha, b = symbols('V, delta, epsilon, a_alpha, b', cls=Function) # doctest:+SKIP
>>> H_dep = (P*V(x) - R*T + 2/sqrt(delta(x)**2 - 4*epsilon(x))*(T*Derivative(a_alpha(T, x), T) # doctest:+SKIP
... - a_alpha(x))*atanh((2*V(x)+delta(x))/sqrt(delta(x)**2-4*epsilon(x))))
>>> diff(H_dep, x) # doctest:+SKIP
P*Derivative(V(x), x) + 2*(T*Derivative(a \alpha(T, x), T) - a \alpha(x))*(-delta(x)*Derivative(delta(x), x) + 2*Derivative(epsilon(x), x))*atanh((2*V(x) + delta(x))/sqrt(delta(x)**2 - 4*epsilon(x)))/(delta(x)**2 - 4*epsilon(x))**(3/2) + 2*(T*Derivative(a \alpha(T, x), T) - a \alpha(x))*((-delta(x)*Derivative(delta(x), x) + 2*Derivative(epsilon(x), x))*(2*V(x) + delta(x))/(delta(x)**2 - 4*epsilon(x))**(3/2) + (2*Derivative(V(x), x) + Derivative(delta(x), x))/sqrt(delta(x)**2 - 4*epsilon(x)))/((-(2*V(x) + delta(x))**2/(delta(x)**2 - 4*epsilon(x)) + 1)*sqrt(delta(x)**2 - 4*epsilon(x))) + 2*(T*Derivative(a \alpha(T, x), T, x) - Derivative(a \alpha(x), x))*atanh((2*V(x) + delta(x))/sqrt(delta(x)**2 - 4*epsilon(x)))/sqrt(delta(x)**2 - 4*epsilon(x))
'''
P = self.P
T = self.T
ddelta_dzs = self.ddelta_dzs
depsilon_dzs = self.depsilon_dzs
da_alpha_dzs = self.da_alpha_dzs
da_alpha_dT_dzs = self.da_alpha_dT_dzs
dV_dzs = self.dV_dzs(Z)
x0 = V = Z*R*T/P
x2 = self.delta
x3 = x0 + x0 + x2
x4 = self.epsilon
x5 = x2*x2 - 4.0*x4
try:
x6 = x5**-0.5
except:
# VDW has x5 as zero as delta, epsilon = 0
x6 = 1e50
x7 = 2.0*catanh(x3*x6).real
x8 = x9 = self.a_alpha
x10 = T*self.da_alpha_dT - x8
x13 = x6*x6# 1.0/x5
t0 = x6*x7
t1 = x10*t0*x13
t2 = 2.0*x10*x13/(x13*x3*x3 - 1.0)
x3_x13 = x3*x13
dH_dzs = []
for i in range(self.N):
x1 = dV_dzs[i]
x11 = ddelta_dzs[i]
x12 = x11*x2 - 2.0*depsilon_dzs[i]
value = (P*x1 - x12*t1 + t2*(x12*x3_x13 - x1 - x1 - x11)
+ t0*(T*da_alpha_dT_dzs[i] - da_alpha_dzs[i]))
dH_dzs.append(value)
return dH_dzs
def dS_dep_dzs(self, Z):
r'''Calculates the molar departure entropy composition derivative
(where the mole fractions do not sum to 1). Verified numerically.
Useful in solving for entropy specifications in newton-type methods,
and forms the basis for the molar departure entropy mole number
derivative and molar partial departure entropy.
.. math::
\left(\frac{\partial S_{dep}}{\partial x_i}\right)_{T, P,
x_{i\ne j}} = \frac{1}{T}\left(
\left(\frac{\partial H_{dep}}{\partial x_i}\right)_{T, P,
x_{i\ne j}}
- \left(\frac{\partial G_{dep}}{\partial x_i}\right)_{T, P,
x_{i\ne j}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dS_dep_dzs : float
Departure entropy composition derivatives, [J/mol/K]
Notes
-----
'''
dH_dep_dzs = self.dH_dep_dzs(Z)
dG_dep_dzs = self.dG_dep_dzs(Z)
T_inv = 1.0/self.T
return [T_inv*(dH_dep_dzs[i] - dG_dep_dzs[i]) for i in range(self.N)]
def dS_dep_dns(self, Z):
r'''Calculates the molar departure entropy mole number derivatives
(where the mole fractions sum to 1). No specific formula is implemented
for this property - it is calculated from the mole fraction derivative.
.. math::
\left(\frac{\partial S_{dep}}{\partial n_i}\right)_{T, P,
n_{i\ne j}} = f\left( \left(\frac{\partial S_{dep}}{\partial x_i}\right)_{T, P,
x_{i\ne j}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dS_dep_dns : float
Departure entropy mole number derivatives, [J/mol^2/K]
'''
return dxs_to_dns(self.dS_dep_dzs(Z), self.zs)
def dP_dns_Vt(self, phase):
# Checked numerically, working. Evaluated at constant temperature and total volume.
r'''from sympy import *
Vt, P, T, R, n1, n2, n3, no = symbols('Vt, P, T, R, n1, n2, n3, no') # doctest:+SKIP
n, P, V, a_alpha, delta, epsilon, b = symbols('n, P, V, a\ \\alpha, delta, epsilon, b', cls=Function) # doctest:+SKIP
da_alpha_dT, d2a_alpha_dT2 = symbols('da_alpha_dT, d2a_alpha_dT2', cls=Function) # doctest:+SKIP
n = no + n1 + n2 + n3
P = R*T/(Vt/n-b(n1, n2, n3)) - a_alpha(T, n1, n2, n3)/((Vt/n)**2 + delta(n1, n2, n3)*(Vt/n)+epsilon(n1, n2, n3))
V = Vt/n
cse(diff(P, n1))
'''
if phase == 'g':
Vt = self.V_g
else:
Vt = self.V_l
T = self.T
b = self.b
a_alpha = self.a_alpha
epsilon = self.epsilon
Vt2 = Vt*Vt
delta = self.delta
x9 = Vt2 + Vt*delta + epsilon
depsilon_dns = self.depsilon_dns
ddelta_dns = self.ddelta_dns
db_dns = self.db_dns
da_alpha_dns = self.da_alpha_dns
t1 = R*T*1.0/((Vt - b)*(Vt - b))
t2 = 1.0/x9
t3 = a_alpha*t2*t2
t4 = t1*Vt -t3*(Vt*delta + Vt2 + Vt2)
dP_dns_Vt = []
for i in range(self.N):
v = (t4 + t1*db_dns[i] + t3*(Vt*ddelta_dns[i] + depsilon_dns[i]) - t2*da_alpha_dns[i])
dP_dns_Vt.append(v)
return dP_dns_Vt
def d2P_dninjs_Vt(self, phase):
if phase == 'g':
Vt = self.V_g
else:
Vt = self.V_l
T, N = self.T, self.N
b = self.b
a_alpha = self.a_alpha
epsilon = self.epsilon
depsilon_dns = self.depsilon_dns
ddelta_dns = self.ddelta_dns
db_dns = self.db_dns
da_alpha_dns = self.da_alpha_dns
d2delta_dninjs = self.d2delta_dninjs
d2epsilon_dninjs = self.d2epsilon_dninjs
d2bs = self.d2b_dninjs
d2a_alpha_dninjs = self.d2a_alpha_dninjs
x0 = self.a_alpha
x1 = self.epsilon
x2 = Vt*Vt
x5 = self.delta
x7 = x1 + x2 + x5*Vt
x7_inv = 1.0/x7
x8 = self.b
x9 = Vt - x8
x11 = Vt + Vt
x12 = R*T
x13 = Vt
x14 = x7_inv*x7_inv
x16 = x2 + x2 + x13*x5
t1 = x0*x14
x9_inv = 1.0/x9
x9_inv2 = x9_inv*x9_inv
x9_inv3 = x9_inv*x9_inv2
t2 = t1*(x11*x5 + 6.0*x2) - x12*x11*x9_inv2
t3 = x12*x9_inv2
t4 = 2.0*x12*x9_inv3
t5 = 2.0*x0*x7_inv*x7_inv*x7_inv
hess = [[0.0]*N for _ in range(N)]
for i in range(N):
x15 = ddelta_dns[i]
x17 = -x15*Vt + x16 - depsilon_dns[i]
t50 = -x13*x15
t51 = t5*x17
t52 = t4*(x13 + db_dns[i])
t53 = x14*x17
t54 = x14*da_alpha_dns[i]
t55 = (t51 + t54)
iadd = t1*t50 + t52*x13 - x16*t55
for j in range(i+1):
x18 = ddelta_dns[j]
x19 = x18*Vt + depsilon_dns[j]
v = (t2 + iadd + t1*(Vt*d2delta_dninjs[i][j] + d2epsilon_dninjs[i][j] - x13*x18)
+ t52*db_dns[j] - t53*da_alpha_dns[j] + t55*x19
+ t3*d2bs[i][j] - x7_inv*d2a_alpha_dninjs[i][j])
hess[i][j] = hess[j][i] = v
return hess
def d3P_dninjnks_Vt(self, phase):
if phase == 'g':
Vt = self.V_g
else:
Vt = self.V_l
T, N = self.T, self.N
b = self.b
a_alpha = self.a_alpha
epsilon = self.epsilon
depsilon_dns = self.depsilon_dns
ddelta_dns = self.ddelta_dns
db_dns = self.db_dns
da_alpha_dns = self.da_alpha_dns
d2delta_dninjs = self.d2delta_dninjs
d2epsilon_dninjs = self.d2epsilon_dninjs
d2bs = self.d2b_dninjs
d2a_alpha_dninjs = self.d2a_alpha_dninjs
d3epsilon_dninjnks = self.d3epsilon_dninjnks
d3delta_dninjnks = self.d3delta_dninjnks
d3a_alpha_dninjnks = self.d3a_alpha_dninjnks
d3b_dninjnks = self.d3b_dninjnks
mat = [[[0.0]*N for _ in range(N)] for _ in range(N)]
for i in range(N):
for j in range(N):
for k in range(N):
x0 = self.b
x1 = 1.0
x2 = Vt/x1
x3 = -x0 + x2
x4 = 6/x1**4
x5 = Vt*x4
x6 = R*T
x7 = self.a_alpha
x8 = self.epsilon
x9 = Vt**2
x10 = x1**(-2)
x11 = self.delta
x12 = x10*x9 + x11*x2 + x8
x13 = 2/x1**3
x14 = Vt*x13
x15 = Vt*x10
x16 = x6*(x15 + db_dns[k])
x17 = 2/x3**3
x18 = x15 + db_dns[j]
x19 = x17*x6
x20 = x15 + db_dns[i]
x21 = x12**(-2)
x22 = ddelta_dns[i]
x23 = x11*x15 + x13*x9
x24 = -x2*x22 + x23 - depsilon_dns[i]
x25 = ddelta_dns[j]
x26 = -x2*x25 + x23 - depsilon_dns[j]
x27 = ddelta_dns[k]
x28 = -x2*x27 + x23 - depsilon_dns[j]
x29 = da_alpha_dns[k]
x30 = d2delta_dninjs[i][j]
x31 = -x15*x25
x32 = x4*x9
x33 = x11*x14
x34 = -x15*x22 + x32 + x33
x35 = x2*x30 + x31 + x34 + d2epsilon_dninjs[i][j]
x36 = da_alpha_dns[j]
x37 = d2delta_dninjs[i][k]
x38 = -x15*x27
x39 = x2*x37 + x34 + x38 + d2epsilon_dninjs[i][k]
x40 = da_alpha_dns[i]
x41 = d2delta_dninjs[j][k]
x42 = x2*x41 + x31 + x32 + x33 + x38 + d2epsilon_dninjs[j][k]
x43 = 2/x12**3
x44 = x24*x26
x45 = x28*x43
x46 = x43*x7
v = (-x16*x17*(x14 - d2bs[i][j]) + 6*x16*x18*x20/x3**4 - x18*x19*(x14 -d2bs[i][k])
- x19*x20*(x14 - d2bs[j][k]) - x21*x24*d2a_alpha_dninjs[j][k]
- x21*x26*d2a_alpha_dninjs[i][k] - x21*x28*d2a_alpha_dninjs[i][j]
+ x21*x29*x35 + x21*x36*x39 + x21*x40*x42
- x21*x7*(x11*x5 - x14*x22 - x14*x25 - x14*x27 + x15*x30 + x15*x37 + x15*x41
- x2*d3delta_dninjnks[i][j][k] - d3epsilon_dninjnks[i][j][k] + 24*x9/x1**5)
- x24*x36*x45 + x24*x42*x46 + x26*x39*x46 - x26*x40*x45 - x29*x43*x44 + x35*x45*x7
+ x6*(x5 + d3b_dninjnks[i][j][k])/x3**2 - d3a_alpha_dninjnks[i][j][k]/x12 - 6*x28*x44*x7/x12**4)
mat[i][j][k] = v
return mat
def dH_dep_dns(self, Z):
r'''Calculates the molar departure enthalpy mole number derivatives
(where the mole fractions sum to 1). No specific formula is implemented
for this property - it is calculated from the mole fraction derivative.
.. math::
\left(\frac{\partial H_{dep}}{\partial n_i}\right)_{T, P,
n_{i\ne j}} = f\left( \left(\frac{\partial H_{dep}}{\partial x_i}\right)_{T, P,
x_{i\ne j}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dH_dep_dns : float
Departure enthalpy mole number derivatives, [J/mol^2]
'''
return dxs_to_dns(self.dH_dep_dzs(Z), self.zs)
def dnH_dep_dns(self, Z):
r'''Calculates the partial molar departure enthalpy. No specific
formula is implemented for this property - it is calculated from the
mole fraction derivative.
.. math::
\left(\frac{\partial n H_{dep}}{\partial n_i}\right)_{T, P,
n_{i\ne j}} = f\left( \left(\frac{\partial H_{dep}}{\partial x_i}\right)_{T, P,
x_{i\ne j}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dnH_dep_dns : float
Partial molar departure enthalpies of the phase, [J/mol]
'''
try:
if Z == self.Z_l:
F = self.H_dep_l
else:
F = self.H_dep_g
except:
F = self.H_dep_g
return dxs_to_dn_partials(self.dH_dep_dzs(Z), self.zs, F)
def _G_dep_lnphi_d_helper(self, Z, dbs, depsilons, ddelta, dVs, da_alphas,
G=True):
return G_dep_lnphi_d_helper(self.T, self.P, self.b, self.delta,
self.epsilon, self.a_alpha, self.N,
Z, dbs, depsilons, ddelta, dVs, da_alphas,
G)
def dlnphi_dzs(self, Z):
r'''Calculates the mixture log *fugacity coefficient* mole fraction
derivatives (where the mole fractions do not sum to 1). No specific
formula is implemented for this property - it is calculated from the
mole fraction derivative of Gibbs free energy.
.. math::
\left(\frac{\partial \ln \phi }{\partial x_i}\right)_{T, P,
x_{i\ne j}} = \frac{1}{RT}\left( \left(\frac{\partial G_{dep}}
{\partial x_i}\right)_{T, P, x_{i\ne j}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dlnphi_dzs : float
Mixture log fugacity coefficient mole fraction derivatives, [-]
'''
return self._G_dep_lnphi_d_helper(Z, dbs=self.db_dzs, depsilons=self.depsilon_dzs,
ddelta=self.ddelta_dzs, dVs=self.dV_dzs(Z),
da_alphas=self.da_alpha_dzs, G=False)
def dlnphi_dns(self, Z):
r'''Calculates the mixture log *fugacity coefficient* mole number
derivatives (where the mole fractions sum to 1). No specific formula is
implemented for this property - it is calculated from the mole fraction
derivative of Gibbs free energy.
.. math::
\left(\frac{\partial \ln \phi }{\partial n_i}\right)_{T, P,
n_{i\ne j}} = f\left( \left(\frac{\partial G_{dep}}{\partial x_i}\right)_{T, P,
x_{i\ne j}}
\right)
This property can be converted into a partial molar property to obtain
the individual fugacity coefficients.
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dlnphi_dns : float
Mixture log fugacity coefficient mole number derivatives, [1/mol]
'''
return self._G_dep_lnphi_d_helper(Z, dbs=self.db_dns, depsilons=self.depsilon_dns,
ddelta=self.ddelta_dns, dVs=self.dV_dns(Z),
da_alphas=self.da_alpha_dns, G=False)
def dG_dep_dzs(self, Z):
r'''Calculates the molar departure Gibbs energy composition derivative
(where the mole fractions do not sum to 1). Verified numerically.
Useful in solving for gibbs minimization calculations or for solving
for the true critical point. Also forms the basis for the molar
departure Gibbs energy mole number derivative and molar partial
departure Gibbs energy.
.. math::
\left(\frac{\partial G_{dep}}{\partial x_i}\right)_{T, P,
x_{i\ne j}} =
P \frac{d}{d x} V{\left(x \right)} - \frac{R T \left(\frac{d}{d x}
V{\left(x \right)} - \frac{d}{d x} b{\left(x \right)}\right)}{
V{\left(x \right)} - b{\left(x \right)}} - \frac{2 \left(- \delta{
\left(x \right)} \frac{d}{d x} \delta{\left(x \right)} + 2 \frac{d}
{d x} \epsilon{\left(x \right)}\right) \operatorname{a \alpha}{
\left(x \right)} \operatorname{atanh}{\left(\frac{2 V{\left(x
\right)}}{\sqrt{\delta^{2}{\left(x \right)} - 4 \epsilon{\left(x
\right)}}} + \frac{\delta{\left(x \right)}}{\sqrt{\delta^{2}{\left(
x \right)} - 4 \epsilon{\left(x \right)}}} \right)}}{\left(
\delta^{2}{\left(x \right)} - 4 \epsilon{\left(x \right)}\right)^{
\frac{3}{2}}} - \frac{2 \operatorname{atanh}{\left(\frac{2 V{\left(
x \right)}}{\sqrt{\delta^{2}{\left(x \right)} - 4 \epsilon{\left(x
\right)}}} + \frac{\delta{\left(x \right)}}{\sqrt{\delta^{2}{\left(
x \right)} - 4 \epsilon{\left(x \right)}}} \right)} \frac{d}{d x}
\operatorname{a \alpha}{\left(x \right)}}{\sqrt{\delta^{2}{\left(x
\right)} - 4 \epsilon{\left(x \right)}}} - \frac{2 \left(\frac{2
\left(- \delta{\left(x \right)} \frac{d}{d x} \delta{\left(x
\right)} + 2 \frac{d}{d x} \epsilon{\left(x \right)}\right)
V{\left(x \right)}}{\left(\delta^{2}{\left(x \right)} - 4 \epsilon{
\left(x \right)}\right)^{\frac{3}{2}}} + \frac{\left(- \delta{\left
(x \right)} \frac{d}{d x} \delta{\left(x \right)} + 2 \frac{d}{d x}
\epsilon{\left(x \right)}\right) \delta{\left(x \right)}}{\left(
\delta^{2}{\left(x \right)} - 4 \epsilon{\left(x \right)}\right)^{
\frac{3}{2}}} + \frac{2 \frac{d}{d x} V{\left(x \right)}}{\sqrt{
\delta^{2}{\left(x \right)} - 4 \epsilon{\left(x \right)}}}
+ \frac{\frac{d}{d x} \delta{\left(x \right)}}{\sqrt{\delta^{2}{
\left(x \right)} - 4 \epsilon{\left(x \right)}}}\right)
\operatorname{a \alpha}{\left(x \right)}}{\left(1 - \left(\frac{2
V{\left(x \right)}}{\sqrt{\delta^{2}{\left(x \right)} - 4 \epsilon{
\left(x \right)}}} + \frac{\delta{\left(x \right)}}{\sqrt{
\delta^{2}{\left(x \right)} - 4 \epsilon{\left(x \right)}}}\right
)^{2}\right) \sqrt{\delta^{2}{\left(x \right)}
- 4 \epsilon{\left(x \right)}}}
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dG_dep_dzs : float
Departure Gibbs free energy composition derivatives, [J/mol]
Notes
-----
The derivation for the derivative is performed as follows using SymPy.
The function source code is an optimized variant created with the `cse`
SymPy function, and hand optimized further.
>>> from sympy import * # doctest:+SKIP
>>> P, T, R, x = symbols('P, T, R, x') # doctest:+SKIP
>>> a_alpha, a, delta, epsilon, V, b, da_alpha_dT = symbols('a\ \\alpha, a, delta, epsilon, V, b, da_alpha_dT', cls=Function) # doctest:+SKIP
>>> S_dep = R*log(P*V(x)/(R*T)) + R*log(V(x)-b(x))+2*da_alpha_dT(x)*atanh((2*V(x)+delta(x))/sqrt(delta(x)**2-4*epsilon(x)))/sqrt(delta(x)**2-4*epsilon(x))-R*log(V(x)) # doctest:+SKIP
>>> H_dep = P*V(x) - R*T + 2*atanh((2*V(x)+delta(x))/sqrt(delta(x)**2-4*epsilon(x)))*(da_alpha_dT(x)*T-a_alpha(x))/sqrt(delta(x)**2-4*epsilon(x)) # doctest:+SKIP
>>> G_dep = simplify(H_dep - T*S_dep) # doctest:+SKIP
>>> diff(G_dep, x) # doctest:+SKIP
P*Derivative(V(x), x) - R*T*(Derivative(V(x), x) - Derivative(b(x), x))/(V(x) - b(x)) - 2*(-delta(x)*Derivative(delta(x), x) + 2*Derivative(epsilon(x), x))*a \alpha(x)*atanh(2*V(x)/sqrt(delta(x)**2 - 4*epsilon(x)) + delta(x)/sqrt(delta(x)**2 - 4*epsilon(x)))/(delta(x)**2 - 4*epsilon(x))**(3/2) - 2*atanh(2*V(x)/sqrt(delta(x)**2 - 4*epsilon(x)) + delta(x)/sqrt(delta(x)**2 - 4*epsilon(x)))*Derivative(a \alpha(x), x)/sqrt(delta(x)**2 - 4*epsilon(x)) - 2*(2*(-delta(x)*Derivative(delta(x), x) + 2*Derivative(epsilon(x), x))*V(x)/(delta(x)**2 - 4*epsilon(x))**(3/2) + (-delta(x)*Derivative(delta(x), x) + 2*Derivative(epsilon(x), x))*delta(x)/(delta(x)**2 - 4*epsilon(x))**(3/2) + 2*Derivative(V(x), x)/sqrt(delta(x)**2 - 4*epsilon(x)) + Derivative(delta(x), x)/sqrt(delta(x)**2 - 4*epsilon(x)))*a \alpha(x)/((1 - (2*V(x)/sqrt(delta(x)**2 - 4*epsilon(x)) + delta(x)/sqrt(delta(x)**2 - 4*epsilon(x)))**2)*sqrt(delta(x)**2 - 4*epsilon(x)))
'''
return self._G_dep_lnphi_d_helper(Z, dbs=self.db_dzs, depsilons=self.depsilon_dzs,
ddelta=self.ddelta_dzs, dVs=self.dV_dzs(Z),
da_alphas=self.da_alpha_dzs, G=True)
def dG_dep_dns(self, Z):
r'''Calculates the molar departure Gibbs energy mole number derivatives
(where the mole fractions sum to 1). No specific formula is implemented
for this property - it is calculated from the mole fraction derivative.
.. math::
\left(\frac{\partial G_{dep}}{\partial n_i}\right)_{T, P,
n_{i\ne j}} = f\left( \left(\frac{\partial G_{dep}}{\partial x_i}\right)_{T, P,
x_{i\ne j}}
\right)
Apart from the ideal term, this is the formulation for chemical
potential.
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dG_dep_dns : float
Departure Gibbs energy mole number derivatives, [J/mol^2]
'''
return self._G_dep_lnphi_d_helper(Z, dbs=self.db_dns, depsilons=self.depsilon_dns,
ddelta=self.ddelta_dns, dVs=self.dV_dns(Z),
da_alphas=self.da_alpha_dns, G=True)
def dnG_dep_dns(self, Z):
r'''Calculates the partial molar departure Gibbs energy. No specific
formula is implemented for this property - it is calculated from the
mole fraction derivative.
.. math::
\left(\frac{\partial n G_{dep}}{\partial n_i}\right)_{T, P,
n_{i\ne j}} = f\left( \left(\frac{\partial G_{dep}}{\partial x_i}\right)_{T, P,
x_{i\ne j}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dnG_dep_dns : float
Partial molar departure Gibbs energy of the phase, [J/mol]
'''
try:
if Z == self.Z_l:
F = self.G_dep_l
else:
F = self.G_dep_g
except:
F = self.G_dep_g
dG_dns = self.dG_dep_dns(Z)
return dns_to_dn_partials(dG_dns, F)
def fugacity_coefficients(self, Z):
r'''Generic formula for calculating log fugacity coefficients for each
species in a mixture. Verified numerically. Applicable to all cubic
equations of state which can be cast in the form used here.
Normally this routine is slower than EOS-specific ones, as it does not
make assumptions that certain parameters are zero or equal to other
parameters.
.. math::
\left(\frac{\partial n \ln \phi}{\partial n_i}
\right)_{n_{k \ne i}} = \ln \phi _i = \ln \phi +
n \left(\frac{\partial \ln \phi}{\partial n_i}
\right)_{n_{k\ne i}}
.. math::
\left(\frac{\partial \ln \phi }{\partial n_i}\right)_{T, P,
n_{i\ne j}} = \frac{1}{RT}\left( \left(\frac{\partial G_{dep}}
{\partial n_i}\right)_{T, P, n_{i\ne j}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
log_phis : float
Log fugacity coefficient for each species, [-]
'''
zs = self.zs
try:
if Z == self.Z_l:
F = self.phi_l
else:
F = self.phi_g
except:
F = self.phi_g
# This conversion seems numerically safe anyway
try:
logF = log(F)
except:
logF = -690.7755278982137
log_phis = dns_to_dn_partials(self.dlnphi_dns(Z), logF)
return log_phis if self.scalar else array(log_phis)
def _d2_G_dep_lnphi_d2_helper(self, V, d_Vs, d2Vs, dbs, d2bs, d_epsilons, d2_epsilons,
d_deltas, d2_deltas, da_alphas, d2a_alphas, G=True):
T, P = self.T, self.P
N = self.N
RT = T*R
RT_inv = 1.0/RT
hess = []
for i in range(N):
row = []
for j in range(N):
# x1: i
# x2: j
x0 = V# V(x1, x2)
x3 = d2Vs[i][j] #Derivative(x0, x1, x2)
x4 = self.b#b(x1, x2)
x5 = x0 - x4
x6 = R*T
x7 = d_Vs[i] #Derivative(x0, x1)
x8 = d_Vs[j] #Derivative(x0, x2)
x9 = self.delta#delta(x1, x2)
x10 = self.epsilon#epsilon(x1, x2)
x11 = -4*x10 + x9**2
if x11 == 0.0:
x11 = 1e-100
x12 = 1/sqrt(x11)
x13 = self.a_alpha#alpha(x1, x2)
x14 = 2*x0
x15 = x14 + x9
x16 = catanh(x12*x15).real
x17 = 2*x16
x18 = d_deltas[i] #Derivative(x9, x1)
x19 = x18*x9 - 2*d_epsilons[i]#Derivative(x10, x1)
x20 = da_alphas[j]#Derivative(x13, x2)
x21 = x17/x11**(3/2)
x22 = d_deltas[j]#Derivative(x9, x2)
x23 = x22*x9 - 2*d_epsilons[j]#Derivative(x10, x2)
x24 = da_alphas[i]#Derivative(x13, x1)
x25 = d2_deltas[i][j]#Derivative(x9, x1, x2)
x26 = x18*x22 + x25*x9 - 2*d2_epsilons[i][j]#Derivative(x10, x1, x2)
x27 = x13*x23
x28 = 2*x7
x29 = 1/x11
x30 = x29*x9
x31 = x19*x29
x32 = x14*x31 - x18 + x19*x30 - x28
x33 = x15**2*x29 - 1
x34 = 2/x33
x35 = x29*x34
x36 = 2*x8
x37 = x23*x29
x38 = x14*x37 - x22 + x23*x30 - x36
x39 = x11**(-2)
x40 = x19*x39
x41 = x13*x38
x42 = x32*x39
x43 = x23*x40
v = (P*x3 - x12*x17*d2a_alphas[i][j] + x13*x21*x26
- x13*x35*(-6*x0*x43 + x14*x26*x29 + x18*x37 + x22*x31
- x25 + x26*x30 + x28*x37 - 2*x3 + x31*x36
- 3*x43*x9) - 4*x15*x41*x42/x33**2 + x19*x20*x21
- x20*x32*x35 + x21*x23*x24 - x24*x35*x38 + x27*x34*x42
+ x34*x40*x41 - x6*(x3 - d2bs[i][j])/x5
+ x6*(x7 - dbs[i])*(x8 - dbs[j])/x5**2
- 6*x16*x19*x27/x11**(5/2))
if not G:
v *= RT_inv
row.append(v)
hess.append(row)
return hess
def d2lnphi_dzizjs(self, Z):
r'''Calculates the mixture log *fugacity coefficient* second mole
fraction derivatives (where the mole fractions do not sum to 1). No
specific formula is implemented for this property - it is calculated
from the second mole fraction derivative of Gibbs free energy.
.. math::
\left(\frac{\partial^2 \ln \phi }{\partial x_i\partial x_j}\right)_{T, P,
x_{i,j\ne k}} = \frac{1}{RT}\left( \left(\frac{\partial^2 G_{dep}}
{\partial x_j \partial x_i}\right)_{T, P, x_{i,j\ne k}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
d2lnphi_dzizjs : float
Mixture log fugacity coefficient second mole fraction derivatives,
[-]
'''
V = Z*self.T*R/self.P
dV_dzs = self.dV_dzs(Z)
d2Vs = self.d2V_dzizjs(Z)
depsilon_dzs = self.depsilon_dzs
d2epsilon_dzizjs = self.d2epsilon_dzizjs
ddelta_dzs = self.ddelta_dzs
d2delta_dzizjs = self.d2delta_dzizjs
db_dzs = self.db_dzs
d2bs = self.d2b_dzizjs
da_alpha_dzs = self.da_alpha_dzs
d2a_alpha_dzizjs = self.d2a_alpha_dzizjs
return self._d2_G_dep_lnphi_d2_helper(V=V, d_Vs=dV_dzs, d2Vs=d2Vs, dbs=db_dzs, d2bs=d2bs,
d_epsilons=depsilon_dzs, d2_epsilons=d2epsilon_dzizjs,
d_deltas=ddelta_dzs, d2_deltas=d2delta_dzizjs,
da_alphas=da_alpha_dzs, d2a_alphas=d2a_alpha_dzizjs,
G=False)
def d2lnphi_dninjs(self, Z):
r'''Calculates the mixture log *fugacity coefficient* second mole
number derivatives (where the mole fraction sum to 1). No
specific formula is implemented for this property - it is calculated
from the second mole fraction derivative of Gibbs free energy.
.. math::
\left(\frac{\partial^2 \ln \phi }{\partial n_i\partial n_j}\right)_{T, P,
n_{i,j\ne k}} f\left( \left(\frac{\partial^2 G_{dep}}
{\partial x_j \partial x_i}\right)_{T, P, x_{i,j\ne k}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
d2lnphi_dninjs : float
Mixture log fugacity coefficient second mole number derivatives,
[-]
'''
V = Z*self.T*R/self.P
dV_dns = self.dV_dns(Z)
d2Vs = self.d2V_dninjs(Z)
depsilon_dns = self.depsilon_dns
d2epsilon_dninjs = self.d2epsilon_dninjs
ddelta_dns = self.ddelta_dns
d2delta_dninjs = self.d2delta_dninjs
db_dns = self.db_dns
d2bs = self.d2b_dninjs
da_alpha_dns = self.da_alpha_dns
d2a_alpha_dninjs = self.d2a_alpha_dninjs
return self._d2_G_dep_lnphi_d2_helper(V=V, d2Vs=d2Vs, d_Vs=dV_dns, dbs=db_dns, d2bs=d2bs,
d_epsilons=depsilon_dns, d2_epsilons=d2epsilon_dninjs,
d_deltas=ddelta_dns, d2_deltas=d2delta_dninjs,
da_alphas=da_alpha_dns, d2a_alphas=d2a_alpha_dninjs,
G=False)
def d2G_dep_dzizjs(self, Z):
r'''Calculates the molar departure Gibbs energy second composition
derivative (where the mole fractions do not sum to 1). Verified numerically.
Useful in solving for gibbs minimization calculations or for solving
for the true critical point. Also forms the basis for the molar
departure Gibbs energy mole second number derivative.
.. math::
\left(\frac{\partial^2 G_{dep}}{\partial x_j \partial x_i}\right)_{T, P,
x_{i,j\ne k}} = \text{run SymPy code to obtain - very long!}
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
d2G_dep_dzizjs : float
Departure Gibbs free energy second composition derivatives, [J/mol]
Notes
-----
The derivation for the derivative is performed as follows using SymPy.
The function source code is an optimized variant created with the `cse`
SymPy function, and hand optimized further.
>>> from sympy import * # doctest:+SKIP
>>> P, T, R, x1, x2 = symbols('P, T, R, x1, x2') # doctest:+SKIP
>>> a_alpha, delta, epsilon, V, b = symbols('a\ \\alpha, delta, epsilon, V, b', cls=Function) # doctest:+SKIP
>>> da_alpha_dT, d2a_alpha_dT2 = symbols('da_alpha_dT, d2a_alpha_dT2', cls=Function) # doctest:+SKIP
>>> S_dep = R*log(P*V(x1, x2)/(R*T)) + R*log(V(x1, x2)-b(x1, x2))+2*da_alpha_dT(x1, x2)*atanh((2*V(x1, x2)+delta(x1, x2))/sqrt(delta(x1, x2)**2-4*epsilon(x1, x2)))/sqrt(delta(x1, x2)**2-4*epsilon(x1, x2))-R*log(V(x1, x2)) # doctest:+SKIP
>>> H_dep = P*V(x1, x2) - R*T + 2*atanh((2*V(x1, x2)+delta(x1, x2))/sqrt(delta(x1, x2)**2-4*epsilon(x1, x2)))*(da_alpha_dT(x1, x2)*T-a_alpha(x1, x2))/sqrt(delta(x1, x2)**2-4*epsilon(x1, x2)) # doctest:+SKIP
>>> G_dep = simplify(H_dep - T*S_dep) # doctest:+SKIP
>>> diff(G_dep, x1, x2) # doctest:+SKIP
'''
V = Z*self.T*R/self.P
dV_dzs = self.dV_dzs(Z)
d2Vs = self.d2V_dzizjs(Z)
depsilon_dzs = self.depsilon_dzs
d2epsilon_dzizjs = self.d2epsilon_dzizjs
ddelta_dzs = self.ddelta_dzs
d2delta_dzizjs = self.d2delta_dzizjs
db_dzs = self.db_dzs
d2bs = self.d2b_dzizjs
da_alpha_dzs = self.da_alpha_dzs
d2a_alpha_dzizjs = self.d2a_alpha_dzizjs
return self._d2_G_dep_lnphi_d2_helper(V=V, d_Vs=dV_dzs, d2Vs=d2Vs, dbs=db_dzs, d2bs=d2bs,
d_epsilons=depsilon_dzs, d2_epsilons=d2epsilon_dzizjs,
d_deltas=ddelta_dzs, d2_deltas=d2delta_dzizjs,
da_alphas=da_alpha_dzs, d2a_alphas=d2a_alpha_dzizjs,
G=True)
def dlnphis_dns(self, Z):
r'''Generic formula for calculating the mole number derivaitves of
log fugacity coefficients for each species in a mixture. Verified
numerically. Applicable to all cubic equations of state which can be
cast in the form used here.
.. math::
\left(\frac{\partial \ln \phi_i}{\partial n_i}\right)_{P,
n_{j \ne i}}
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dlnphis_dns : list[list[float]]
Mole number derivatives of log fugacity coefficient for each
species, [-]
Notes
-----
'''
dns = self.dlnphi_dns(Z)
d2ns = self.d2lnphi_dninjs(Z)
return d2ns_to_dn2_partials(d2ns, dns)
def dlnfugacities_dns(self, phase):
r'''Generic formula for calculating the mole number derivaitves of
log fugacities for each species in a mixture. Verified
numerically. Applicable to all cubic equations of state which can be
cast in the form used here.
.. math::
\left(\frac{\partial \ln f_i}{\partial n_i}\right)_{P,
n_{j \ne i}}
Parameters
----------
phase : str
One of 'l' or 'g', [-]
Returns
-------
dlnfugacities_dns : list[list[float]]
Mole number derivatives of log fugacities for each
species, [-]
Notes
-----
'''
zs, N = self.zs, self.N
if phase == 'l':
Z = self.Z_l
try:
fugacities = self.fugacities_l
except AttributeError:
self.fugacities()
fugacities = self.fugacities_l
else:
Z = self.Z_g
try:
fugacities = self.fugacities_g
except AttributeError:
self.fugacities()
fugacities = self.fugacities_g
dlnfugacities_dns = [list(i) for i in self.dfugacities_dns(phase)]
fugacities_inv = [1.0/fi for fi in fugacities]
for i in range(N):
r = dlnfugacities_dns[i]
for j in range(N):
r[j]*= fugacities_inv[i]
return dlnfugacities_dns
def dfugacities_dns(self, phase):
r'''Generic formula for calculating the mole number derivaitves of
fugacities for each species in a mixture. Verified
numerically. Applicable to all cubic equations of state which can be
cast in the form used here.
.. math::
\left(\frac{\partial f_i}{\partial n_i}\right)_{P,
n_{j \ne i}}
Parameters
----------
phase : str
One of 'l' or 'g', [-]
Returns
-------
dfugacities_dns : list[list[float]]
Mole number derivatives of fugacities for each species, [-]
Notes
-----
'''
'''
from sympy import *
phifun1, phifun2 = symbols('phifun1, phifun2', cls=Function)
n1, n2, P = symbols('n1, n2, P')
x1 = n1/(n1+n2)
x2 = n2/(n1+n2)
to_diff = x2*P*exp(phifun1(n1))
diff(to_diff, n1).subs({n1+n1: 1})
'''
zs = self.zs
if phase == 'l':
Z = self.Z_l
try:
phis = self.phis_l
except AttributeError:
self.fugacities()
phis = self.phis_l
else:
Z = self.Z_g
try:
phis = self.phis_g
except AttributeError:
self.fugacities()
phis = self.phis_g
dlnphis_dns = self.dlnphis_dns(Z)
P = self.P
N = self.N
matrix = []
for i in range(N):
phi_P = P*phis[i]
ziPphi = phi_P*zs[i]
r = dlnphis_dns[i]
# row = [ziPphi*(r[j] - 1.0) for j in range(N)]
row = [ziPphi*(dlnphis_dns[j][i] - 1.0) for j in range(N)]
row[i] += phi_P
matrix.append(row)
return matrix
def d2G_dep_dninjs(self, Z):
r'''Calculates the molar departure Gibbs energy mole number derivatives
(where the mole fractions sum to 1). No specific formula is implemented
for this property - it is calculated from the mole fraction derivative.
.. math::
\left(\frac{\partial^2 G_{dep}}{\partial n_j \partial n_i}\right)_{T, P,
n_{i,j\ne k}} = f\left( \left(\frac{\partial^2 G_{dep}}{\partial x_j \partial x_i}\right)_{T, P,
x_{i,j\ne k}}
\right)
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
d2G_dep_dninjs : float
Departure Gibbs energy second mole number derivatives, [J/mol^3]
'''
V = Z*self.T*R/self.P
dV_dns = self.dV_dns(Z)
d2Vs = self.d2V_dninjs(Z)
depsilon_dns = self.depsilon_dns
d2epsilon_dninjs = self.d2epsilon_dninjs
ddelta_dns = self.ddelta_dns
d2delta_dninjs = self.d2delta_dninjs
db_dns = self.db_dns
d2bs = self.d2b_dninjs
da_alpha_dns = self.da_alpha_dns
d2a_alpha_dninjs = self.d2a_alpha_dninjs
return self._d2_G_dep_lnphi_d2_helper(V=V, d2Vs=d2Vs, d_Vs=dV_dns, dbs=db_dns, d2bs=d2bs,
d_epsilons=depsilon_dns, d2_epsilons=d2epsilon_dninjs,
d_deltas=ddelta_dns, d2_deltas=d2delta_dninjs,
da_alphas=da_alpha_dns, d2a_alphas=d2a_alpha_dninjs,
G=True)
def _d2_A_dep_d2_helper(self, V, d_Vs, d2Vs, dbs, d2bs, d_epsilons,
d2_epsilons, d_deltas, d2_deltas, da_alphas,
d2a_alphas):
# pass
r'''from sympy import * # doctest:+SKIP
P, T, R, x1, x2 = symbols('P, T, R, x1, x2') # doctest:+SKIP
a_alpha, delta, epsilon, V, b = symbols('a\ \\alpha, delta, epsilon, V, b', cls=Function) # doctest:+SKIP
da_alpha_dT, d2a_alpha_dT2 = symbols('da_alpha_dT, d2a_alpha_dT2', cls=Function) # doctest:+SKIP
S_dep = R*log(P*V(x1, x2)/(R*T)) + R*log(V(x1, x2)-b(x1, x2))+2*da_alpha_dT(x1, x2)*atanh((2*V(x1, x2)+delta(x1, x2))/sqrt(delta(x1, x2)**2-4*epsilon(x1, x2)))/sqrt(delta(x1, x2)**2-4*epsilon(x1, x2))-R*log(V(x1, x2)) # doctest:+SKIP
H_dep = P*V(x1, x2) - R*T + 2*atanh((2*V(x1, x2)+delta(x1, x2))/sqrt(delta(x1, x2)**2-4*epsilon(x1, x2)))*(da_alpha_dT(x1, x2)*T-a_alpha(x1, x2))/sqrt(delta(x1, x2)**2-4*epsilon(x1, x2)) # doctest:+SKIP
G_dep = simplify(H_dep - T*S_dep) # doctest:+SKIP
V_dep = V(x1, x2) - R*T/P
U_dep = H_dep - P*V_dep
A_dep = simplify(U_dep - T*S_dep)
'''
T, P = self.T, self.P
b = self.b
N = self.N
RT = T*R
hess = []
for i in range(N):
row = []
for j in range(N):
x0 = V
x3 = b
x4 = x0 - x3
x5 = d2Vs[i][j]
x6 = R*T
x7 = d_Vs[i]
x8 = d_Vs[j]
x9 = self.delta
x10 = self.epsilon
x11 = -4*x10 + x9**2
x12 = 1/sqrt(x11)
x13 = self.a_alpha
x14 = 2*x0
x15 = x14 + x9
x16 = catanh(x12*x15).real
x17 = 2*x16
x18 = d_deltas[i]
x19 = x18*x9 - 2*d_epsilons[i]
x20 = da_alphas[j]
x21 = x17/x11**(3/2)
x22 = d_deltas[j]
x23 = x22*x9 - 2*d_epsilons[j]
x24 = da_alphas[i]
x25 = d2_deltas[i][j]
x26 = x18*x22 + x25*x9 - 2*d2_epsilons[i][j]
x27 = x13*x23
x28 = 2*x7
x29 = 1/x11
x30 = x29*x9
x31 = x19*x29
x32 = x14*x31 - x18 + x19*x30 - x28
x33 = x15**2*x29 - 1
x34 = 2/x33
x35 = x29*x34
x36 = 2*x8
x37 = x23*x29
x38 = x14*x37 - x22 + x23*x30 - x36
x39 = x11**(-2)
x40 = x19*x39
x41 = x13*x38
x42 = x32*x39
x43 = x23*x40
v = (-x12*x17*d2a_alphas[i][j] + x13*x21*x26 - x13*x35*(-6*x0*x43
+ x14*x26*x29 + x18*x37 + x22*x31 - x25 + x26*x30 + x28*x37
+ x31*x36 - 3*x43*x9 - 2*x5) - 4*x15*x41*x42/x33**2
+ x19*x20*x21 - x20*x32*x35 + x21*x23*x24 - x24*x35*x38 + x27*x34*x42
+ x34*x40*x41 - x6*(x5 - d2bs[i][j])/x4 + x6*(x7 - dbs[i])*(x8 - dbs[j])/x4**2 - 6*x16*x19*x27/x11**(5/2.))
row.append(v)
hess.append(row)
return hess
def d2A_dep_dninjs(self, Z):
V = Z*self.T*R/self.P
dV_dns = self.dV_dns(Z)
d2Vs = self.d2V_dninjs(Z)
depsilon_dns = self.depsilon_dns
d2epsilon_dninjs = self.d2epsilon_dninjs
ddelta_dns = self.ddelta_dns
d2delta_dninjs = self.d2delta_dninjs
db_dns = self.db_dns
d2bs = self.d2b_dninjs
da_alpha_dns = self.da_alpha_dns
d2a_alpha_dninjs = self.d2a_alpha_dninjs
return self._d2_A_dep_d2_helper(V=V, d2Vs=d2Vs, d_Vs=dV_dns, dbs=db_dns, d2bs=d2bs,
d_epsilons=depsilon_dns, d2_epsilons=d2epsilon_dninjs,
d_deltas=ddelta_dns, d2_deltas=d2delta_dninjs,
da_alphas=da_alpha_dns, d2a_alphas=d2a_alpha_dninjs)
def dA_dep_dns_Vt(self, phase):
# pass
r'''
from sympy import *
Vt, P, T, R, n1, n2, n3 = symbols('Vt, P, T, R, n1, n2, n3') # doctest:+SKIP
P, V, a_alpha, delta, epsilon, b = symbols('P, V, a\ \\alpha, delta, epsilon, b', cls=Function) # doctest:+SKIP
da_alpha_dT, d2a_alpha_dT2 = symbols('da_alpha_dT, d2a_alpha_dT2', cls=Function) # doctest:+SKIP
ns = [n1, n2, n3]
S_dep = R*log(P(n1, n2, n3)*V(n1, n2, n3)/(R*T)) + R*log(V(n1, n2, n3)-b(n1, n2, n3))+2*da_alpha_dT(n1, n2, n3)*atanh((2*V(n1, n2, n3)+delta(n1, n2, n3))/sqrt(delta(n1, n2, n3)**2-4*epsilon(n1, n2, n3)))/sqrt(delta(n1, n2, n3)**2-4*epsilon(n1, n2, n3))-R*log(V(n1, n2, n3))
H_dep = P(n1, n2, n3)*V(n1, n2, n3) - R*T + 2*atanh((2*V(n1, n2, n3)+delta(n1, n2, n3))/sqrt(delta(n1, n2, n3)**2-4*epsilon(n1, n2, n3)))*(da_alpha_dT(n1, n2, n3)*T-a_alpha(n1, n2, n3))/sqrt(delta(n1, n2, n3)**2-4*epsilon(n1, n2, n3))
G_dep = simplify(H_dep - T*S_dep)
V_dep = V(n1, n2, n3) - R*T/P(n1, n2, n3)
U_dep = H_dep - P(n1, n2, n3)*V_dep
A_dep = simplify(U_dep - T*S_dep)
expr = diff(A_dep, n1)
for ni in ns:
expr = expr.subs(Derivative(V(n1, n2, n3), ni), -Vt)
expr = simplify(expr)
cse(expr, optimizations='basic')
'''
if phase == 'g':
Vt = self.V_g
else:
Vt = self.V_l
T, N = self.T, self.N
b = self.b
a_alpha = self.a_alpha
epsilon = self.epsilon
depsilon_dns = self.depsilon_dns
ddelta_dns = self.ddelta_dns
db_dns = self.db_dns
da_alpha_dns = self.da_alpha_dns
dP_dns_Vt = self.dP_dns_Vt(phase)
x0 = self.P
x1 = Vt
x2 = self.b
x3 = x1 - x2
x4 = self.delta
x5 = x4**2
x6 = self.epsilon
x7 = 4*x6
x8 = x5 - x7
x9 = x8**(7/2)
x10 = 2*x1
x11 = x10 + x4
x12 = x11**2 - x5 + x7
x13 = Vt*x0
x14 = x12*x3
x15 = R*T*x9
x16 = x14*x15
x17 = self.a_alpha
x18 = x0*x10
x19 = x14*catanh(x11*x8**-0.5).real
jac = []
for i in range(N):
x20 = ddelta_dns[i]
x21 = x20*x4 - 2*depsilon_dns[i]
x22 = x17*x18
v = (-(-x0*x1*x12*x15*(Vt + db_dns[i]) + x13*x16 - x16*(-x1*dP_dns_Vt[i] + x13)
+ x18*x19*x8**3*da_alpha_dns[i] - x19*x21*x22*x8**2
+ x22*x3*x8**(5/2)*(x11*x21 + x8*(2*Vt - x20)))/(x0*x1*x12*x3*x9))
jac.append(v)
return jac
def d2A_dep_dninjs_Vt(self, phase):
if phase == 'g':
Vt = self.V_g
else:
Vt = self.V_l
T, N = self.T, self.N
b = self.b
a_alpha = self.a_alpha
epsilon = self.epsilon
depsilon_dns = self.depsilon_dns
ddelta_dns = self.ddelta_dns
db_dns = self.db_dns
da_alpha_dns = self.da_alpha_dns
d2delta_dninjs = self.d2delta_dninjs
d2epsilon_dninjs = self.d2epsilon_dninjs
d2bs = self.d2b_dninjs
d2a_alpha_dninjs = self.d2a_alpha_dninjs
dP_dns_Vt = self.dP_dns_Vt(phase)
d2P_dninjs_Vt = self.d2P_dninjs_Vt(phase)
hess = [[0.0]*N for i in range(N)]
for i in range(N):
for j in range(i+1):
x0 = self.P
x1 = x0**2
x2 = Vt#V(n1, n2, n3)
x3 = x2**2
x4 = self.b
x5 = x2 - x4
x6 = x5**2
x7 = self.delta
x8 = x7**2
x9 = self.epsilon
x10 = 4*x9
x11 = -x10 + x8
x12 = x11**(25/2)
x13 = 2*x2
x14 = x13 + x7
x15 = x10 + x14**2 - x8
x16 = x15**2
x17 = x1*x6
x18 = R*T*x12*x16
x19 = x17*x18
x20 = x1*x18*x3
x21 = Vt*x0
x22 = dP_dns_Vt[i]
x23 = -x2*x22 + x21
x24 = 2*Vt
x25 = dP_dns_Vt[j]
x26 = x18*x2*x6
x27 = self.a_alpha
x28 = x17*x3
x29 = 2*x28
x30 = x16*catanh(x14/sqrt(x11)).real
x31 = x29*x30
x32 = ddelta_dns[i]
x33 = x32*x7 - 2*depsilon_dns[i]
x34 = ddelta_dns[j]
x35 = x34*x7 - 2*depsilon_dns[j]
x36 = x33*x35
x37 = da_alpha_dns[j]
x38 = da_alpha_dns[i]
x39 = d2delta_dninjs[i][j]
x40 = x32*x34 + x39*x7 - 2*d2epsilon_dninjs[i][j]
x41 = x11*(x24 - x32) + x13*x33 + x33*x7
x42 = x11*(x24 - x34) + x13*x35 + x35*x7
x43 = x11**(21/2)*x27
x44 = x15*x29
x45 = x43*x44
v = (-(Vt**2*x19 - Vt*x13*x19 + x0*x26*(-Vt*x22 - Vt*x25 + x0*x24
+ x2*d2P_dninjs_Vt[i][j]) + x11**(23/2)*x44*(x37*x41 + x38*x42)
+ x11**12*x31*d2a_alpha_dninjs[i][j] - x11**11*x31*(x27*x40 + x33*x37 + x35*x38)
+ 6*x11**10*x27*x28*x30*x36 + 4*x14*x28*x41*x42*x43 - x18*x21*x23*x6
+ x20*x5*(x24 - d2bs[i][j]) - x20*(Vt + db_dns[i])*(Vt + db_dns[j]) + x23*x25*x26
- x45*(x33*x42 + x35*x41) - x45*(x11**2*(4*Vt + x39) - x11*(x13*x40 - x24*x33
- x24*x35 + x32*x35 + x33*x34 + x40*x7) + 3*x14*x36))/(x1*x12*x16*x3*x6))
hess[i][j] = hess[j][i] = v
return hess
# @property
# def SCp0_l(self):
# S_dep = self.S_dep_l
# S_dep -= R*sum([zi*log(zi) for zi in self.zs if zi > 0.0]) # ideal composition entropy composition
# S_dep -= R*log(self.P/101325.0)
# return S_dep
#
# @property
# def ACp0_l(self):
# return self.A_dep_l - self.T*(self.SCp0_l - self.S_dep_l)
#
# @property
# def SCp0_g(self):
# S_dep = self.S_dep_g
# S_dep -= R*sum([zi*log(zi) for zi in self.zs if zi > 0.0]) # ideal composition entropy composition
# S_dep -= R*log(self.P/101325.0)
# return S_dep
#
# @property
# def ACp0_g(self):
# return self.A_dep_g - self.T*(self.SCp0_g - self.S_dep_g)
#
# def Scomp(self, phase):
# v = self.T*R*sum([zi*log(zi) for zi in self.zs if zi > 0.0]) # ideal composition entropy composition
# v += R*self.T*log(self.P/101325.0)
# return v
#
# @property
# def HCp0_g(self):
# return self.H_dep_g
#
# @property
# def HCp0_l(self):
# return self.H_dep_l
#
# @property
# def GCp0_g(self):
# return self.HCp0_g - self.T*self.SCp0_g
#
# @property
# def GCp0_l(self):
# return self.HCp0_l - self.T*self.SCp0_l
def dScomp_dns(self, phase):
dP_dns_Vt = self.dP_dns_Vt(phase)
mRT = -R*self.T
zs, N = self.zs, self.N
logzs = [log(zi) for zi in zs]
tot = 0.0
for i in range(N):
tot += zs[i]*logzs[i]
const = R*self.T/self.P
return [mRT*(tot - logzs[i]) + const*dP_dns_Vt[i] for i in range(N)]
def d2Scomp_dninjs(self, phase):
'''P_ref = symbols('P_ref')
diff(R*T*log(P(n1, n2, n3)/P_ref), n1, n2)
'''
dP_dns_Vt = self.dP_dns_Vt(phase)
d2P_dninjs_Vt = self.d2P_dninjs_Vt(phase)
P = self.P
RT = R*self.T
const = RT/P
zs, N = self.zs, self.N
logzs = [log(zi) for zi in zs]
hess = []
for i in range(N):
row = []
for j in range(N):
t = sum(2.0*zs[i]*logzs[i] + 3.0*zs[i] for i in range(N))
if i != j:
v = RT*(t - logzs[i] - logzs[j] -4.0)
else:
v = RT*(t - 2*logzs[i] - 3 - (zs[i] - 1.0)/zs[i])
v += const*(d2P_dninjs_Vt[i][j] - dP_dns_Vt[i]*dP_dns_Vt[j]/P)
row.append(v)
hess.append(row)
return hess
# TODO fix the implementation below, make it work
tot = 0.0
for i in range(N):
tot += zs[i]*logzs[i]
tot2m1 = tot + tot - 1.0
hess = [[RT*(tot2m1 - logzs[i] - logzs[j]) for i in range(N)] for j in range(N)]
return hess
# return d2xs_to_dxdn_partials(hess, zs)
# return d2ns_to_dn2_partials(hess, self.dScomp_dns)
def d2A_dninjs_Vt(self, phase):
if phase == 'g':
Vt = self.V_g
else:
Vt = self.V_l
N, zs = self.N, self.zs
d2A_dep_dninjs_Vt = self.d2A_dep_dninjs_Vt(phase)
d2Scomp_dninjs = self.d2Scomp_dninjs
hess = [[0.0]*N for i in range(N)]
for i in range(N):
for j in range(N):
hess[i][j] = d2Scomp_dninjs[i][j] + d2A_dep_dninjs_Vt[i][j]
return hess
def d2nA_dninjs_Vt(self, phase):
d2ns = [[i+j for i, j in zip(r1, r2)] for r1, r2 in zip(self.d2A_dep_dninjs_Vt(phase), self.d2Scomp_dninjs(phase))]
dns = [i+j for i, j in zip(self.dA_dep_dns_Vt(phase), self.dScomp_dns(phase))]
return d2ns_to_dn2_partials(d2ns, dns)
def d2A_dninjs_Vt_another(self, phase):
d2ns = [[i+j for i, j in zip(r1, r2)] for r1, r2 in zip(self.d2A_dep_dninjs_Vt(phase), self.d2Scomp_dninjs(phase))]
return d2ns
# dns = [i+j for i, j in zip(self.dA_dep_dns_Vt(phase), self.dScomp_dns(phase))]
# return d2ns_to_dn2_partials(d2ns, dns)
def _d_main_derivatives_and_departures_dnx(self, V, db_dns, ddelta_dns,
depsilon_dns, da_alpha_dns,
da_alpha_dT_dns,
d2a_alpha_dT2_dns, dV_dns):
T = self.T
Z = (self.P*V)/(R*T)
x0 = self.a_alpha
x2 = self.epsilon
x3 = V
x4 = self.delta
x5 = x2 + x3**2 + x3*x4
x6 = 1/x5
x7 = self.b
x8 = x3 - x7
x14 = x5**(-2)
x15 = self.da_alpha_dT
x16 = x14*x15
x18 = 2*x3 + x4
x23 = x5**(-3)
x24 = 2*x23
x27 = x18**2
x28 = x18*x24
dndP_dT_dsn = []
dndP_dV_dns = []
dnd2P_dT2_dns = []
dnd2P_dV2_dns = []
dnd2P_dTdV_dns = []
for i in range(self.N):
x1 = da_alpha_dT_dns[i]
x9 = dV_dns[i]
x10 = R*(x9 - db_dns[i])
x17 = 2*x10/x8**3
x12 = 2*x9
x11 = ddelta_dns[i]
x21 = x11 + x12
x22 = x0*x21
x13 = x11*x3 + x12*x3 + x4*x9 + depsilon_dns[i]
x25 = x0*x13
x26 = x24*x25
x19 = da_alpha_dns[i]
x20 = x14*x19
dndP_dT = -x1*x6 - x10/x8**2 + x13*x16
dndP_dT_dsn.append(dndP_dT)
dndP_dV = T*x17 + x14*x22 + x18*x20 - x18*x26
dndP_dV_dns.append(dndP_dV)
d2a_alpha_dT2_dn = d2a_alpha_dT2_dns[i]
dnd2P_dT2 = x6*(x13*x6*self.d2a_alpha_dT2 - d2a_alpha_dT2_dn)
dnd2P_dT2_dns.append(dnd2P_dT2)
dnd2P_dV2 = -6*T*x10/x8**4 - 2*x19*x23*x27 + 2*x20 - 2*x22*x28 + 6*x25*x27/x5**4 - 2*x26
dnd2P_dV2_dns.append(dnd2P_dV2)
dnd2P_dTdV = x1*x14*x18 - x13*x15*x28 + x16*x21 + x17
dnd2P_dTdV_dns.append(dnd2P_dTdV)
return dndP_dT_dsn, dndP_dV_dns, dnd2P_dT2_dns, dnd2P_dV2_dns, dnd2P_dTdV_dns
def _d_main_derivatives_and_departures_dn(self, V):
Z = (self.P*V)/(R*self.T)
db_dns = self.db_dns
ddelta_dns = self.ddelta_dns
depsilon_dns = self.depsilon_dns
dV_dns = self.dV_dns(Z)
da_alpha_dns = self.da_alpha_dns
da_alpha_dT_dns = self.da_alpha_dT_dns
d2a_alpha_dT2_dns = self.d2a_alpha_dT2_dns
return self._d_main_derivatives_and_departures_dnx(V, db_dns, ddelta_dns,
depsilon_dns, da_alpha_dns,
da_alpha_dT_dns, d2a_alpha_dT2_dns,
dV_dns)
def _d_main_derivatives_and_departures_dz(self, V):
Z = (self.P*V)/(R*self.T)
db_dzs = self.db_dzs
ddelta_dzs = self.ddelta_dzs
depsilon_dzs = self.depsilon_dzs
dV_dzs = self.dV_dzs(Z)
da_alpha_dzs = self.da_alpha_dzs
da_alpha_dT_dzs = self.da_alpha_dT_dzs
d2a_alpha_dT2_dzs = self.d2a_alpha_dT2_dzs
return self._d_main_derivatives_and_departures_dnx(V, db_dzs, ddelta_dzs,
depsilon_dzs, da_alpha_dzs,
da_alpha_dT_dzs, d2a_alpha_dT2_dzs,
dV_dzs)
def _dnz_derivatives_and_departures(self, V, n=True):
try:
if V == self.V_l:
l = True
else:
l = False
except:
l = False
if n:
f = self._d_main_derivatives_and_departures_dn
else:
f = self._d_main_derivatives_and_departures_dz
d2P_dTdns, d2P_dVdns, d3P_dT2dns, d3P_dV2dns, d3P_dTdVdns = f(V)
# Needed in calculation routines
if l:
(dP_dT, dP_dV, dV_dT, dV_dP, dT_dV, dT_dP, d2P_dT2, d2P_dV2, d2V_dT2,
d2V_dP2, d2T_dV2, d2T_dP2, d2V_dPdT, d2P_dTdV, d2T_dPdV) = (self.dP_dT_l,
self.dP_dV_l, self.dV_dT_l, self.dV_dP_l, self.dT_dV_l, self.dT_dP_l,
self.d2P_dT2_l, self.d2P_dV2_l, self.d2V_dT2_l, self.d2V_dP2_l, self.d2T_dV2_l,
self.d2T_dP2_l, self.d2V_dPdT_l, self.d2P_dTdV_l, self.d2T_dPdV_l)
else:
(dP_dT, dP_dV, dV_dT, dV_dP, dT_dV, dT_dP, d2P_dT2, d2P_dV2, d2V_dT2,
d2V_dP2, d2T_dV2, d2T_dP2, d2V_dPdT, d2P_dTdV, d2T_dPdV) = (self.dP_dT_g,
self.dP_dV_g, self.dV_dT_g, self.dV_dP_g, self.dT_dV_g, self.dT_dP_g,
self.d2P_dT2_g, self.d2P_dV2_g, self.d2V_dT2_g, self.d2V_dP2_g, self.d2T_dV2_g,
self.d2T_dP2_g, self.d2V_dPdT_g, self.d2P_dTdV_g, self.d2T_dPdV_g)
d2V_dTdns = []
d2V_dPdns = []
d2T_dVdns = []
d2T_dPdns = []
d3T_dP2dns = []
d3V_dP2dns = []
d3T_dV2dns = []
d3V_dT2dns = []
d3T_dPdVdns = []
d3V_dPdTdns = []
for i in range(self.N):
d2P_dTdn, d2P_dVdn, d3P_dT2dn, d3P_dV2dn, d3P_dTdVdn = (
d2P_dTdns[i], d2P_dVdns[i], d3P_dT2dns[i], d3P_dV2dns[i], d3P_dTdVdns[i])
# First derivative - one over the other
d2V_dTdn = dP_dT*d2P_dVdn/dP_dV**2 - d2P_dTdn/dP_dV
d2V_dTdns.append(d2V_dTdn)
# dP_dT # f
# dP_dV # g
# Second derivative - one over the other
d2V_dPdn = dV_dT*d2P_dTdn/dP_dT**2 - d2V_dTdn/dP_dT
d2V_dPdns.append(d2V_dPdn)
# f = dV_dT
# g = dP_dT
# Third derivative - inverse of other expression
d2T_dVdn = -d2V_dTdn/dV_dT**2
d2T_dVdns.append(d2T_dVdn)
# Fourth derivative - inverse of other expression
d2T_dPdn = -d2P_dTdn/dP_dT**2
d2T_dPdns.append(d2T_dPdn)
# Fifth derivative - starting to get big
f = d2P_dT2
df = d3P_dT2dn
g = dP_dT
dg = d2P_dTdn
d3T_dP2dn = 3*f*dg/g**4 - df/g**3
d3T_dP2dns.append(d3T_dP2dn)
# Sixth derivative
f = d2P_dV2
df = d3P_dV2dn
g = dP_dV
dg = d2P_dVdn
d3V_dP2dn = 3*f*dg/g**4 - df/g**3
d3V_dP2dns.append(d3V_dP2dn)
# Seventh - crazy
f = d2P_dV2
df = d3P_dV2dn
g = dP_dT
dg = d2P_dTdn
h = dP_dV
dh = d2P_dVdn
k = d2P_dTdV
dk = d3P_dTdVdn
j = d2P_dT2
dj = d3P_dT2dn
d3T_dV2dn = (f*g**2*dg - g**3*df + 2*g**2*h*dk + 2*g**2*k*dh - g*h**2*dj - 4*g*h*k*dg - 2*g*h*j*dh + 3*h**2*j*dg)/g**4
d3T_dV2dns.append(d3T_dV2dn)
# ekghth - crazy
f = d2P_dT2
df = d3P_dT2dn
g = dP_dV
dg = d2P_dVdn
h = dP_dT
dh = d2P_dTdn
k = d2P_dTdV
dk = d3P_dTdVdn
j = d2P_dV2
dj = d3P_dV2dn
d3V_dT2dn = (f*g**2*dg - g**3*df + 2*g**2*h*dk + 2*g**2*k*dh - g*h**2*dj - 4*g*h*k*dg - 2*g*h*j*dh + 3*h**2*j*dg)/g**4
d3V_dT2dns.append(d3V_dT2dn)
# nknth
f = d2P_dTdV
df = d3P_dTdVdn
g = dP_dT
dg = d2P_dTdn
h = dP_dV
dh = d2P_dVdn
k = d2P_dT2
dk = d3P_dT2dn
j = dP_dT
dj = d2P_dTdn
d3T_dPdVdn = 3*(f*g - h*k)*dj/j**4 - (f*dg + g*df - h*dk- k*dh)/j**3
d3T_dPdVdns.append(d3T_dPdVdn)
# tenth
f = d2P_dTdV
df = d3P_dTdVdn
g = dP_dV
dg = d2P_dVdn
h = dP_dT
dh = d2P_dTdn
k = d2P_dV2
dk = d3P_dV2dn
j = dP_dV
dj = d2P_dVdn
d3V_dPdTdn = 3*(f*g - h*k)*dj/j**4 - (f*dg + g*df - h*dk- k*dh)/j**3
d3V_dPdTdns.append(d3V_dPdTdn)
return (d2P_dTdns, d2P_dVdns, d2V_dTdns, d2V_dPdns, d2T_dVdns, d2T_dPdns,
d3P_dT2dns, d3P_dV2dns, d3V_dT2dns, d3V_dP2dns, d3T_dV2dns, d3T_dP2dns,
d3V_dPdTdns, d3P_dTdVdns, d3T_dPdVdns)
def set_dnzs_derivatives_and_departures(self, n=True, x=True, only_l=False,
only_g=False):
r'''Sets a number of mole number and/or composition partial derivatives
of thermodynamic partial derivatives.
The list of properties set is as follows, with all properties suffixed
with '_l' or '_g'
if `n` is True:
d2P_dTdns, d2P_dVdns, d2V_dTdns, d2V_dPdns, d2T_dVdns, d2T_dPdns,
d3P_dT2dns, d3P_dV2dns, d3V_dT2dns, d3V_dP2dns, d3T_dV2dns, d3T_dP2dns,
d3V_dPdTdns, d3P_dTdVdns, d3T_dPdVdns, dV_dep_dns, dG_dep_dns,
dH_dep_dns, dU_dep_dns, dS_dep_dns, dA_dep_dns
if `x` is True:
d2P_dTdzs, d2P_dVdzs, d2V_dTdzs, d2V_dPdzs, d2T_dVdzs, d2T_dPdzs,
d3P_dT2dzs, d3P_dV2dzs, d3V_dT2dzs, d3V_dP2dzs, d3T_dV2dzs, d3T_dP2dzs,
d3V_dPdTdzs, d3P_dTdVdzs, d3T_dPdVdzs, dV_dep_dzs, dG_dep_dzs,
dH_dep_dzs, dU_dep_dzs, dS_dep_dzs, dA_dep_dzs
Parameters
----------
n : bool, optional
Whether or not to set the mole number derivatives (sums up to one),
[-]
x : bool, optional
Whether or not to set the composition derivatives (does not sum up
to one), [-]
only_l : bool, optional
Whether or not to set only the liquid-like phase properties (if
there are two phases), [-]
only_g : bool, optional
Whether or not to set only the gas-like phase properties (if
there are two phases), [-]
Notes
-----
'''
N = self.N
zs = self.zs
T, P = self.T, self.P
if n and x:
ns = [True, False]
elif n:
ns = [True]
elif x:
ns = [False]
else:
return
if only_l:
phases = ['l']
elif only_g:
phases = ['g']
else:
phases = ['l', 'g']
for n in ns:
for phase in phases:
if phase == 'g':
Z, V = self.Z_g, self.V_g
else:
Z, V = self.Z_l, self.V_l
if n:
V_fun, G_fun, H_fun = self.dV_dns, self.dG_dep_dns, self.dH_dep_dns
else:
V_fun, G_fun, H_fun = self.dV_dzs, self.dG_dep_dzs, self.dH_dep_dzs
(d2P_dTdns, d2P_dVdns, d2V_dTdns, d2V_dPdns, d2T_dVdns, d2T_dPdns,
d3P_dT2dns, d3P_dV2dns, d3V_dT2dns, d3V_dP2dns, d3T_dV2dns, d3T_dP2dns,
d3V_dPdTdns, d3P_dTdVdns, d3T_dPdVdns) = self._dnz_derivatives_and_departures(V, n=n)
# V
dV_dep_dns = V_fun(Z)
# G
dG_dep_dns = G_fun(Z)
# H
dH_dep_dns = H_fun(Z)
# U
dU_dep_dns = [dH_dep_dns[i] - P*dV_dep_dns[i] for i in range(N)]
# S
dS_dep_dns = [(dG_dep_dns[i] - dH_dep_dns[i])/-T for i in range(N)]
# A
dA_dep_dns = [dU_dep_dns[i] - T*dS_dep_dns[i] for i in range(N)]
if n and phase == 'l':
self.d2P_dTdns_l, self.d2P_dVdns_l, self.d2V_dTdns_l = d2P_dTdns, d2P_dVdns, d2V_dTdns
self.d2V_dPdns_l, self.d2T_dVdns_l, self.d2T_dPdns_l = d2V_dPdns, d2T_dVdns, d2T_dPdns
self.d3P_dT2dns_l, self.d3P_dV2dns_l, self.d3V_dT2dns_l = d3P_dT2dns, d3P_dV2dns, d3V_dT2dns
self.d3V_dP2dns_l, self.d3T_dV2dns_l, self.d3T_dP2dns_l = d3V_dP2dns, d3T_dV2dns, d3T_dP2dns
self.d3V_dPdTdns_l, self.d3P_dTdVdns_l, self.d3T_dPdVdns_l = d3V_dPdTdns, d3P_dTdVdns, d3T_dPdVdns
self.dV_dep_dns_l, self.dG_dep_dns_l, self.dH_dep_dns_l = dV_dep_dns, dG_dep_dns, dH_dep_dns
self.dU_dep_dns_l, self.dS_dep_dns_l, self.dA_dep_dns_l = dU_dep_dns, dS_dep_dns, dA_dep_dns
if n and phase == 'g':
self.d2P_dTdns_g, self.d2P_dVdns_g, self.d2V_dTdns_g = d2P_dTdns, d2P_dVdns, d2V_dTdns
self.d2V_dPdns_g, self.d2T_dVdns_g, self.d2T_dPdns_g = d2V_dPdns, d2T_dVdns, d2T_dPdns
self.d3P_dT2dns_g, self.d3P_dV2dns_g, self.d3V_dT2dns_g = d3P_dT2dns, d3P_dV2dns, d3V_dT2dns
self.d3V_dP2dns_g, self.d3T_dV2dns_g, self.d3T_dP2dns_g = d3V_dP2dns, d3T_dV2dns, d3T_dP2dns
self.d3V_dPdTdns_g, self.d3P_dTdVdns_g, self.d3T_dPdVdns_g = d3V_dPdTdns, d3P_dTdVdns, d3T_dPdVdns
self.dV_dep_dns_g, self.dG_dep_dns_g, self.dH_dep_dns_g = dV_dep_dns, dG_dep_dns, dH_dep_dns
self.dU_dep_dns_g, self.dS_dep_dns_g, self.dA_dep_dns_g = dU_dep_dns, dS_dep_dns, dA_dep_dns
if not n and phase == 'g':
self.d2P_dTdzs_g, self.d2P_dVdzs_g, self.d2V_dTdzs_g = d2P_dTdns, d2P_dVdns, d2V_dTdns
self.d2V_dPdzs_g, self.d2T_dVdzs_g, self.d2T_dPdzs_g = d2V_dPdns, d2T_dVdns, d2T_dPdns
self.d3P_dT2dzs_g, self.d3P_dV2dzs_g, self.d3V_dT2dzs_g = d3P_dT2dns, d3P_dV2dns, d3V_dT2dns
self.d3V_dP2dzs_g, self.d3T_dV2dzs_g, self.d3T_dP2dzs_g = d3V_dP2dns, d3T_dV2dns, d3T_dP2dns
self.d3V_dPdTdzs_g, self.d3P_dTdVdzs_g, self.d3T_dPdVdzs_g = d3V_dPdTdns, d3P_dTdVdns, d3T_dPdVdns
self.dV_dep_dzs_g, self.dG_dep_dzs_g, self.dH_dep_dzs_g = dV_dep_dns, dG_dep_dns, dH_dep_dns
self.dU_dep_dzs_g, self.dS_dep_dzs_g, self.dA_dep_dzs_g = dU_dep_dns, dS_dep_dns, dA_dep_dns
if not n and phase == 'l':
self.d2P_dTdzs_l, self.d2P_dVdzs_l, self.d2V_dTdzs_l = d2P_dTdns, d2P_dVdns, d2V_dTdns
self.d2V_dPdzs_l, self.d2T_dVdzs_l, self.d2T_dPdzs_l = d2V_dPdns, d2T_dVdns, d2T_dPdns
self.d3P_dT2dzs_l, self.d3P_dV2dzs_l, self.d3V_dT2dzs_l = d3P_dT2dns, d3P_dV2dns, d3V_dT2dns
self.d3V_dP2dzs_l, self.d3T_dV2dzs_l, self.d3T_dP2dzs_l = d3V_dP2dns, d3T_dV2dns, d3T_dP2dns
self.d3V_dPdTdzs_l, self.d3P_dTdVdzs_l, self.d3T_dPdVdzs_l = d3V_dPdTdns, d3P_dTdVdns, d3T_dPdVdns
self.dV_dep_dzs_l, self.dG_dep_dzs_l, self.dH_dep_dzs_l = dV_dep_dns, dG_dep_dns, dH_dep_dns
self.dU_dep_dzs_l, self.dS_dep_dzs_l, self.dA_dep_dzs_l = dU_dep_dns, dS_dep_dns, dA_dep_dns
def dlnphis_dP(self, phase):
r'''Generic formula for calculating the pressure derivaitve of
log fugacity coefficients for each species in a mixture. Verified
numerically. Applicable to all cubic equations of state which can be
cast in the form used here.
Normally this routine is slower than EOS-specific ones, as it does not
make assumptions that certain parameters are zero or equal to other
parameters.
.. math::
\left(\frac{\partial \ln \phi_i}{\partial P}\right)_{T,
nj \ne i} = \frac{G_{dep}}{\partial P}_{T, n}
+ \left(\frac{\partial^2 \ln \phi}{\partial P \partial n_i}
\right)_{T, P, n_{j \ne i}}
Parameters
----------
phase : str
One of 'l' or 'g', [-]
Returns
-------
dlnphis_dP : float
Pressure derivatives of log fugacity coefficient for each species,
[1/Pa]
Notes
-----
This expression for the partial derivative of the mixture `lnphi` with
respect to pressure and mole number can be derived as follows; to
convert to the partial molar `lnphi` pressure and temperature
derivative, add ::math::`\frac{G_{dep}/(RT)}{\partial P}_{T, n}`.
>>> from sympy import * # doctest:+SKIP
>>> P, T, R, n = symbols('P, T, R, n') # doctest:+SKIP
>>> a_alpha, a, delta, epsilon, V, b, da_alpha_dT, d2a_alpha_dT2 = symbols('a_alpha, a, delta, epsilon, V, b, da_alpha_dT, d2a_alpha_dT2', cls=Function) # doctest:+SKIP
>>> S_dep = R*log(P*V(n, P)/(R*T)) + R*log(V(n, P)-b(n))+2*da_alpha_dT(n, T)*atanh((2*V(n, P)+delta(n))/sqrt(delta(n)**2-4*epsilon(n)))/sqrt(delta(n)**2-4*epsilon(n))-R*log(V(n, P)) # doctest:+SKIP
>>> H_dep = P*V(n, P) - R*T + 2*atanh((2*V(n, P)+delta(n))/sqrt(delta(n)**2-4*epsilon(n)))*(da_alpha_dT(n, T)*T-a_alpha(n, T))/sqrt(delta(n)**2-4*epsilon(n)) # doctest:+SKIP
>>> G_dep = H_dep - T*S_dep # doctest:+SKIP
>>> lnphi = simplify(G_dep/(R*T)) # doctest:+SKIP
>>> diff(diff(lnphi, P), n) # doctest:+SKIP
P*Derivative(V(n, P), P, n)/(R*T) + Derivative(V(n, P), P, n)/V(n, P) - Derivative(V(n, P), P)*Derivative(V(n, P), n)/V(n, P)**2 - Derivative(V(n, P), P, n)/(V(n, P) - b(n)) - (-Derivative(V(n, P), n) + Derivative(b(n), n))*Derivative(V(n, P), P)/(V(n, P) - b(n))**2 + Derivative(V(n, P), n)/(R*T) - 4*(-2*delta(n)*Derivative(delta(n), n) + 4*Derivative(epsilon(n), n))*a_alpha(n, T)*Derivative(V(n, P), P)/(R*T*(1 - (2*V(n, P)/sqrt(delta(n)**2 - 4*epsilon(n)) + delta(n)/sqrt(delta(n)**2 - 4*epsilon(n)))**2)*(delta(n)**2 - 4*epsilon(n))**2) - 4*a_alpha(n, T)*Derivative(V(n, P), P, n)/(R*T*(1 - (2*V(n, P)/sqrt(delta(n)**2 - 4*epsilon(n)) + delta(n)/sqrt(delta(n)**2 - 4*epsilon(n)))**2)*(delta(n)**2 - 4*epsilon(n))) - 4*Derivative(V(n, P), P)*Derivative(a_alpha(n, T), n)/(R*T*(1 - (2*V(n, P)/sqrt(delta(n)**2 - 4*epsilon(n)) + delta(n)/sqrt(delta(n)**2 - 4*epsilon(n)))**2)*(delta(n)**2 - 4*epsilon(n))) - 4*(2*V(n, P)/sqrt(delta(n)**2 - 4*epsilon(n)) + delta(n)/sqrt(delta(n)**2 - 4*epsilon(n)))*(4*(-delta(n)*Derivative(delta(n), n) + 2*Derivative(epsilon(n), n))*V(n, P)/(delta(n)**2 - 4*epsilon(n))**(3/2) + 2*(-delta(n)*Derivative(delta(n), n) + 2*Derivative(epsilon(n), n))*delta(n)/(delta(n)**2 - 4*epsilon(n))**(3/2) + 4*Derivative(V(n, P), n)/sqrt(delta(n)**2 - 4*epsilon(n)) + 2*Derivative(delta(n), n)/sqrt(delta(n)**2 - 4*epsilon(n)))*a_alpha(n, T)*Derivative(V(n, P), P)/(R*T*(1 - (2*V(n, P)/sqrt(delta(n)**2 - 4*epsilon(n)) + delta(n)/sqrt(delta(n)**2 - 4*epsilon(n)))**2)**2*(delta(n)**2 - 4*epsilon(n))) + R*T*(P*Derivative(V(n, P), P)/(R*T) + V(n, P)/(R*T))*Derivative(V(n, P), n)/(P*V(n, P)**2) - R*T*(P*Derivative(V(n, P), P, n)/(R*T) + Derivative(V(n, P), n)/(R*T))/(P*V(n, P))
'''
if phase == 'g':
V = self.V_g
Z = self.Z_g
dV_dP = self.dV_dP_g
dG_dep_dP = (self.dH_dep_dP_g - self.T*self.dS_dep_dP_g)/(R*self.T)
else:
V = self.V_l
Z = self.Z_l
dV_dP = self.dV_dP_l
dG_dep_dP = (self.dH_dep_dP_l - self.T*self.dS_dep_dP_l)/(R*self.T)
T = self.T
P = self.P
dV_dns = self.dV_dns(Z)
ddelta_dns = self.ddelta_dns
depsilon_dns = self.depsilon_dns
da_alpha_dns = self.da_alpha_dns
db_dns = self.db_dns
d2V_dPdns = self._dnz_derivatives_and_departures(V)[3]# self.d2V_dPdn
x0 = V
x2 = 1/(R*T)
x3 = 1/x0
x6 = dV_dP
x8 = self.b
x9 = x0 - x8
x10 = 1/P
x11 = self.delta
x12 = 2*x0
x13 = x11 + x12
x14 = self.epsilon
x15 = x11**2 - 4*x14
try:
x16 = 1/x15
except ZeroDivisionError:
x16 = 1e50
x17 = x13**2*x16 - 1
x18 = 1/x17
x19 = self.a_alpha
x20 = 4*x16
x21 = x2*x6
x22 = x18*x21
x25 = 8*x19*x16*x16
t50 = 1.0/(x0*x0)
dlnphis_dPs = []
for i in range(self.N):
# number dependent calculations
x1 = dV_dns[i] # Derivative(x0, n)
x7 = x1*t50
x4 = d2V_dPdns[i] #Derivative(x0, P, n) # TODO calculate only this - d2V_dPdn; the T one wants d2V_dTdn
x5 = P*x4
x23 = ddelta_dns[i]# Derivative(x11, n)
x24 = x11*x23 - 2.0*depsilon_dns[i]#Derivative(x14, n)
x26 = x16*x24
dlnphi_dP = (x1*x2 - x10*x3*(x1 + x5) + x10*x7*(P*x6 + x0)
- x13*x21*x25*(2*x1 - x11*x26 - x12*x26 + x23)/x17**2
+ x18*x19*x2*x20*x4 + x2*x5 + x20*x22*da_alpha_dns[i]
- x22*x24*x25 + x3*x4 - x4/x9 - x6*x7 + x6*(x1 - db_dns[i])/x9**2)
dlnphis_dPs.append(dlnphi_dP + dG_dep_dP)
return dlnphis_dPs
def dlnphis_dT(self, phase):
r'''Generic formula for calculating the temperature derivaitve of
log fugacity coefficients for each species in a mixture. Verified
numerically. Applicable to all cubic equations of state which can be
cast in the form used here.
Normally this routine is slower than EOS-specific ones, as it does not
make assumptions that certain parameters are zero or equal to other
parameters.
.. math::
\left(\frac{\partial \ln \phi_i}{\partial T}\right)_{P,
nj \ne i} = \frac{\frac{G_{dep}}{RT}}{\partial T}_{P, n}
+ \left(\frac{\partial^2 \ln \phi}{\partial T \partial n_i}
\right)_{P, n_{j \ne i}}
Parameters
----------
phase : str
One of 'l' or 'g', [-]
Returns
-------
dlnphis_dT : float
Temperature derivatives of log fugacity coefficient for each species,
[1/K]
Notes
-----
This expression for the partial derivative of the mixture `lnphi` with
respect to pressure and mole number can be derived as follows; to
convert to the partial molar `lnphi` pressure and temperature
derivative, add ::math::`\frac{G_{dep}/(RT)}{\partial T}_{P, n}`.
>>> from sympy import * # doctest:+SKIP
>>> P, T, R, n = symbols('P, T, R, n') # doctest:+SKIP
>>> a_alpha, a, delta, epsilon, V, b, da_alpha_dT, d2a_alpha_dT2 = symbols('a_alpha, a, delta, epsilon, V, b, da_alpha_dT, d2a_alpha_dT2', cls=Function) # doctest:+SKIP
>>> S_dep = R*log(P*V(n, T)/(R*T)) + R*log(V(n, T)-b(n))+2*da_alpha_dT(n, T)*atanh((2*V(n, T)+delta(n))/sqrt(delta(n)**2-4*epsilon(n)))/sqrt(delta(n)**2-4*epsilon(n))-R*log(V(n, T)) # doctest:+SKIP
>>> H_dep = P*V(n, T) - R*T + 2*atanh((2*V(n, T)+delta(n))/sqrt(delta(n)**2-4*epsilon(n)))*(da_alpha_dT(n, T)*T-a_alpha(n, T))/sqrt(delta(n)**2-4*epsilon(n)) # doctest:+SKIP
>>> G_dep = H_dep - T*S_dep # doctest:+SKIP
>>> lnphi = simplify(G_dep/(R*T)) # doctest:+SKIP
>>> diff(diff(lnphi, T), n) # doctest:+SKIP
'''
T, P, zs, N = self.T, self.P, self.zs, self.N
if phase == 'g':
V = self.V_g
Z = self.Z_g
dV_dT = self.dV_dT_g
dG_dep_dT = (-T*self.dS_dep_dT_g - self.S_dep_g + self.dH_dep_dT_g)/(R*self.T)
dG_dep_dT -= (-T*self.S_dep_g + self.H_dep_g)/(R*self.T*self.T)
else:
V = self.V_l
Z = self.Z_l
dV_dT = self.dV_dT_l
dG_dep_dT = (-T*self.dS_dep_dT_l - self.S_dep_l + self.dH_dep_dT_l)/(R*self.T)
dG_dep_dT -= (-T*self.S_dep_l + self.H_dep_l)/(R*self.T*self.T)
'''R, T = symbols('R, T')
H, S = symbols('H, S', cls=Function)
print(diff((H(T) - T*S(T))/(R*T), T))
# (-T*Derivative(S(T), T) - S(T) + Derivative(H(T), T))/(R*T) - (-T*S(T) + H(T))/(R*T**2)
'''
d2V_dTdns = self._dnz_derivatives_and_departures(V, n=True)[2]
dV_dns = self.dV_dns(Z)
db_dns = self.db_dns
da_alpha_dns = self.da_alpha_dns
da_alpha_dT_dns = self.da_alpha_dT_dns
ddelta_dns = self.ddelta_dns
depsilon_dns = self.depsilon_dns
x0 = V
x1 = 1/x0
x4 = T**(-2)
x5 = 1/R
x6 = P*x5
x7 = 1/T
x9 = dV_dT
x11 = self.b
x12 = x0 - x11
x13 = self.a_alpha
x15 = self.delta
x16 = self.epsilon
x17 = x15*x15 - 4.0*x16
if x17 == 0.0:
x17 = 1e-100
x18 = 1/sqrt(x17)
x19 = 2*x0
x20 = x15 + x19
x21 = 2*x5
x22 = x21*catanh(x18*x20).real
x23 = x18*x22
x24 = 1/x17
x25 = x20**2*x24 - 1
x26 = 1/x25
x27 = x24*x26
x28 = 4*x27*x5
x29 = x7*x9
x30 = x13*x4
x34 = x7*self.da_alpha_dT
x35 = 8*x13*x29*x5/x17**2
dlnphis_dTs = []
for i in range(N):
x2 = d2V_dTdns[i]
x8 = x2*x7
x3 = dV_dns[i]
x10 = x3/x0**2
x14 = da_alpha_dns[i]
x31 = ddelta_dns[i]
x32 = x15*x31 - 2.0*depsilon_dns[i]
x33 = x22*x32/x17**(3/2)
x36 = x24*x32
x37 = -x15*x36 - x19*x36 + 2.0*x3 + x31
x38 = x21*x27*x37
dlnphi_dT = (x1*x2 - x1*(x2 - x3*x7) - x10*x9 + x10*(-x0*x7 + x9)
+ x13*x28*x8 + x14*x23*x4 + x14*x28*x29 - x20*x35*x37/x25**2
- x23*x7*da_alpha_dT_dns[i] - x26*x32*x35 - x3*x4*x6 - x30*x33
- x30*x38 + x33*x34 + x34*x38 + x6*x8 - x2/x12 + x9*(x3 - db_dns[i])/x12**2)
dlnphis_dTs.append(dlnphi_dT + dG_dep_dT)
return dlnphis_dTs
def dlnphis_dzs(self, Z):
r'''Generic formula for calculating the mole fraction derivaitves of
log fugacity coefficients for each species in a mixture. Verified
numerically. Applicable to all cubic equations of state which can be
cast in the form used here.
.. math::
\left(\frac{\partial \ln \phi_i}{\partial z_i}\right)_{P,
z_{j \ne i}}
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dlnphis_dzs : list[list[float]]
Mole fraction derivatives of log fugacity coefficient for each
species (such that the mole fractions do not sum to 1), [-]
Notes
-----
'''
d2dxs = self.d2lnphi_dzizjs(Z)
d2ns = d2xs_to_dxdn_partials(d2dxs, self.zs)
if sefl.scalar:
return d2ns
return array(d2ns)
class EpsilonZeroMixingRules(object):
@property
def depsilon_dzs(self):
r'''Helper method for calculating the composition derivatives of
`epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \epsilon}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= 0
Returns
-------
depsilon_dzs : list[float]
Composition derivative of `epsilon` of each component, [m^6/mol^2]
Notes
-----
This derivative is checked numerically.
'''
if self.scalar:
return [0.0]*self.N
return zeros(self.N)
@property
def depsilon_dns(self):
r'''Helper method for calculating the mole number derivatives of
`epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \epsilon}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= 0
Returns
-------
depsilon_dns : list[float]
Composition derivative of `epsilon` of each component, [m^6/mol^3]
Notes
-----
This derivative is checked numerically.
'''
if self.scalar:
return [0.0]*self.N
return zeros(self.N)
@property
def d2epsilon_dzizjs(self):
r'''Helper method for calculating the second composition derivatives (hessian)
of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \epsilon}{\partial x_i \partial x_j}\right)_{T, P, x_{k\ne i,j}}
= 0
Returns
-------
d2epsilon_dzizjs : list[list[float]]
Composition derivative of `epsilon` of each component, [m^6/mol^2]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
if self.scalar:
return [[0.0]*N for i in range(N)]
return zeros((N, N))
@property
def d2epsilon_dninjs(self):
r'''Helper method for calculating the second mole number derivatives (hessian) of
`epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \epsilon}{\partial n_i n_j}\right)_{T, P, n_{k\ne i,j}}
= 0
Returns
-------
d2epsilon_dninjs : list[list[float]]
Second composition derivative of `epsilon` of each component, [m^6/mol^4]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
if self.scalar:
return [[0.0]*N for i in range(N)]
return zeros((N, N))
@property
def d3epsilon_dninjnks(self):
r'''Helper method for calculating the third partial mole number
derivatives of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 \epsilon}{\partial n_i \partial n_j \partial n_k }
\right)_{T, P,
n_{m \ne i,j,k}} = 0
Returns
-------
d3epsilon_dninjnks : list[list[list[float]]]
Third mole number derivative of `epsilon` of each component,
[m^6/mol^5]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
if self.scalar:
return [[[0.0]*N for _ in range(N)] for _ in range(N)]
return zeros((N, N, N))
# # Python 2/3 compatibility
# try:
# eos.__dict__['d3epsilon_dninjnks'] = d3epsilon_dninjnks
# eos.__dict__['d2epsilon_dninjs'] = d2epsilon_dninjs
# eos.__dict__['d2epsilon_dzizjs'] = d2epsilon_dzizjs
# eos.__dict__['depsilon_dns'] = depsilon_dns
# eos.__dict__['depsilon_dzs'] = depsilon_dzs
# except:
# setattr(eos, 'd3epsilon_dninjnks', d3epsilon_dninjnks)
# setattr(eos, 'd2epsilon_dninjs', d2epsilon_dninjs)
# setattr(eos, 'd2epsilon_dzizjs', d2epsilon_dzizjs)
# setattr(eos, 'depsilon_dns', depsilon_dns)
# setattr(eos, 'depsilon_dzs', depsilon_dzs)
class PSRKMixingRules(object):
u = 1.1
A = -0.6466271649250525 # log(1.1/(1.1+1))
A_inv = 1.0/A
def a_alpha_and_derivatives(self, T, full=True, quick=True,
pure_a_alphas=True):
r'''Method to calculate `a_alpha` and its first and second
derivatives for an EOS with the PSRK mixing rules. Returns
`a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`.
For use in some methods, this returns only `a_alpha` if `full` is False.
.. math::
\alpha = bRT \left[ \sum_i \frac{z_i \alpha_i}{b_i RT}
+ \frac{1}{A}\left(\frac{G^E}{RT} + \sum_i z_i \ln
\left(\frac{b}{b_i}\right) \right)\right]
.. math::
\frac{\partial \alpha}{\partial T} = RTb\left[
\sum_i \left(\frac{z_i \frac{\partial \alpha_i}{\partial T}}{RTb_i}
-\frac{z_i\alpha_i}{RT^2b_i} \right)
+ \frac{1}{A}\left(\frac{\frac{\partial G^E}{\partial T}}{RT}
- \frac{G^E}{RT^2} \right)
\right] + \frac{\alpha}{T}
.. math::
\frac{\partial^2 \alpha}{\partial T^2} = b\left[\sum_i
\left(\frac{z_i\frac{\partial^2 \alpha_i}{\partial T^2}}{b_i}
- \frac{2z_i \frac{\partial \alpha_i}{\partial T}}{T b_i}
+ \frac{2z_i\alpha_i}{T^2 b_i}
\right)
+ \frac{2}{T}\left[\sum_i \left(\frac{z_i\frac{\partial \alpha_i}
{\partial T}}{b_i}
- \frac{z_i \alpha_i}{T b_i}
\right)
+ \frac{1}{A}\left(\frac{\partial G^E}{\partial T} - \frac{G^E}{T}
\right)
\right]
+ \frac{1}{A}\left(
\frac{\partial^2 G^E}{\partial T^2} - \frac{2}{T}
\frac{\partial G^E}{\partial T} + 2\frac{G^E}{T^2}
\right)
\right]
Parameters
----------
T : float
Temperature, [K]
full : bool, optional
If False, calculates and returns only `a_alpha`
quick : bool, optional
Only the quick variant is implemented; it is little faster anyhow
pure_a_alphas : bool, optional
Whether or not to recalculate the a_alpha terms of pure components
(for the case of mixtures only) which stay the same as the
composition changes (i.e in a PT flash), [-]
Returns
-------
a_alpha : float
Coefficient calculated by PSRK-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by PSRK-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
PSRK-specific method, [J^2/mol^2/Pa/K**2]
Notes
-----
'''
if pure_a_alphas:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = self.a_alpha_and_derivatives_vectorized(T)
self.a_alphas, self.da_alpha_dTs, self.d2a_alpha_dT2s = a_alphas, da_alpha_dTs, d2a_alpha_dT2s
else:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = self.a_alphas, self.da_alpha_dTs, self.d2a_alpha_dT2s
b, zs, bs = self.b, self.zs, self.bs
ge_model = self.ge_model
if T != ge_model.T:
# TODO make sure this gets set when solve_T is called
ge_model = ge_model.to_T_xs(T, zs)
self._last_ge = ge_model
GE = ge_model.GE()
if full:
dGE_dT = ge_model.dGE_dT()
d2GE_dT2 = ge_model.d2GE_dT2()
T_inv = 1.0/T
T2_inv = T_inv*T_inv
RT_inv = R_inv*T_inv
RT2_inv = R_inv*T2_inv
A_inv = self.A_inv
N = self.N
tot0, tot1, d1tot, d2tot, other = 0.0, 0.0, 0.0, 0.0, 0.0
if full:
for i in range(N):
bi_inv = 1.0/bs[i]
# Main component
tot0 += zs[i]*a_alphas[i]*bi_inv*RT_inv
tot1 += zs[i]*log(b*bi_inv)
d1tot += zs[i]*da_alpha_dTs[i]*RT_inv*bi_inv - zs[i]*a_alphas[i]*RT2_inv*bi_inv
# TODO go back to just using d1tot
# TODO optimize all of this
other += zs[i]*da_alpha_dTs[i]*bi_inv - zs[i]*a_alphas[i]*bi_inv*T_inv
d2tot += (zs[i]*d2a_alpha_dT2s[i]*bi_inv
- 2.0*zs[i]*da_alpha_dTs[i]*T_inv*bi_inv
+ 2.0*zs[i]*a_alphas[i]*T2_inv*bi_inv)
else:
for i in range(N):
bi_inv = 1.0/bs[i]
tot0 += zs[i]*a_alphas[i]*bi_inv*RT_inv
tot1 += zs[i]*log(b*bi_inv)
a_alpha = R*T*b*(tot0 + A_inv*(GE*RT_inv + tot1))
if full:
da_alpha_dT = R*T*b*(d1tot + A_inv*(dGE_dT*RT_inv - GE*RT2_inv)) + a_alpha*T_inv
d2a_alpha_dT2 = b*(d2tot + 2.0*T_inv*(other + A_inv*(dGE_dT - GE*T_inv))
+ A_inv*(d2GE_dT2 - 2.0*T_inv*dGE_dT + 2.0*GE*T2_inv))
return a_alpha, da_alpha_dT, d2a_alpha_dT2
return a_alpha
def solve_T(self, P, V, quick=True, solution=None):
T = GCEOS.solve_T(self, P, V, solution=solution)
if hasattr(self, '_last_ge') and self._last_ge.T == T:
self.ge_model = self._last_ge
del self._last_ge
else:
self.ge_model = self.ge_model.to_T_xs(T, self.zs)
return T
@property
def da_alpha_dzs(self):
raise NotImplementedError("TODO")
@property
def da_alpha_dns(self):
raise NotImplementedError("TODO")
@property
def dna_alpha_dns(self):
raise NotImplementedError("TODO")
@property
def d2a_alpha_dzizjs(self):
raise NotImplementedError("TODO")
@property
def d2a_alpha_dninjs(self):
raise NotImplementedError("TODO")
@property
def d3a_alpha_dzizjzks(self):
raise NotImplementedError("TODO")
@property
def d3a_alpha_dninjnks(self):
raise NotImplementedError("TODO")
@property
def da_alpha_dT_dzs(self):
raise NotImplementedError("TODO")
@property
def da_alpha_dT_dns(self):
raise NotImplementedError("TODO")
@property
def dna_alpha_dT_dns(self):
raise NotImplementedError("TODO")
@property
def d2a_alpha_dT2_dzs(self):
raise NotImplementedError("TODO")
@property
def d2a_alpha_dT2_dns(self):
raise NotImplementedError("TODO")
class IGMIX(EpsilonZeroMixingRules, GCEOSMIX, IG):
r'''Class for solving the ideal gas [1]_ [2]_ equation of state for a
mixture of any number of compounds. Subclasses :obj:`thermo.eos.IG`. Solves
the EOS on initialization.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P =\frac{RT}{V}
Parameters
----------
zs : list[float]
Overall mole fractions of all species, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Tcs : list[float], optional
Critical temperatures of all compounds, [K]
Pcs : list[float], optional
Critical pressures of all compounds, [Pa]
omegas : list[float], optional
Acentric factors of all compounds - Not used in this equation of
state!, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 and not used[-]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = IGMIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, .008], zs=[0.5, 0.5])
>>> eos.phase, eos.V_g
('g', 0.0009561632010876225)
Notes
-----
Many properties of this object are zero. Many of the arguments are not used
and are provided for consistency only.
References
----------
.. [1] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
.. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th
edition. New York: McGraw-Hill Professional, 2000.
'''
eos_pure = IG
a_alphas = None
da_alpha_dTs = None
d2a_alpha_dT2s = None
nonstate_constants_specific = ()
kwargs_keys = ('kijs',)
model_id = 0
def _zeros1d(self):
return self.zeros1d
def _zeros2d(self):
return self.zeros2d
def _zeros3d(self):
N = self.N
if self.scalar:
return [[[0.0]*N for _ in range(N)] for _ in range(N)]
else:
return zeros((N, N, N))
@property
def a_alpha_roots(self):
return self.zeros1d
@property
def ddelta_dzs(self):
r'''Helper method for calculating the composition derivatives of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \delta}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= 0
Returns
-------
ddelta_dzs : list[float]
Composition derivative of `delta` of each component, [m^3/mol]
'''
return self.zeros1d
@property
def ddelta_dns(self):
r'''Helper method for calculating the mole number derivatives of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \delta}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= 0
Returns
-------
ddelta_dns : list[float]
Mole number derivative of `delta` of each component, [m^3/mol^2]
'''
return self.zeros1d
@property
def depsilon_dzs(self):
r'''Helper method for calculating the composition derivatives of
`epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \epsilon}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= 0
Returns
-------
depsilon_dzs : list[float]
Composition derivative of `epsilon` of each component, [m^6/mol^2]
'''
return self.zeros1d
@property
def depsilon_dns(self):
r'''Helper method for calculating the mole number derivatives of
`epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \epsilon}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= 0
Returns
-------
depsilon_dns : list[float]
Composition derivative of `epsilon` of each component, [m^6/mol^3]
'''
return self.zeros1d
@property
def d2delta_dzizjs(self):
r'''Helper method for calculating the second composition derivatives (hessian) of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \delta}{\partial x_i\partial x_j}\right)_{T, P, x_{k\ne i,j}}
= 0
Returns
-------
d2delta_dzizjs : list[float]
Second Composition derivative of `delta` of each component, [m^3/mol]
'''
return self.zeros2d
@property
def d2delta_dninjs(self):
r'''Helper method for calculating the second mole number derivatives (hessian) of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \delta}{\partial n_i \partial n_j}\right)_{T, P, n_{k\ne i,j}}
= 0
Returns
-------
d2delta_dninjs : list[list[float]]
Second mole number derivative of `delta` of each component, [m^3/mol^3]
'''
return self.zeros2d
@property
def d2epsilon_dzizjs(self):
r'''Helper method for calculating the second composition derivatives (hessian)
of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \epsilon}{\partial x_i \partial x_j}\right)_{T, P, x_{k\ne i,j}}
= 0
Returns
-------
d2epsilon_dzizjs : list[list[float]]
Second composition derivative of `epsilon` of each component, [m^6/mol^2]
'''
return self.zeros2d
@property
def d2epsilon_dninjs(self):
r'''Helper method for calculating the second mole number derivatives
(hessian) of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \epsilon}{\partial n_i n_j}\right)_{T, P,
n_{k\ne i,j}} = 0
Returns
-------
d2epsilon_dninjs : list[list[float]]
Second mole number derivative of `epsilon` of each component,
[m^6/mol^4]
'''
return self.zeros2d
@property
def d3delta_dninjnks(self):
r'''Helper method for calculating the third partial mole number
derivatives of `delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 \delta}{\partial n_i \partial n_j \partial n_k }
\right)_{T, P,
n_{m \ne i,j,k}} = 0
Returns
-------
d3delta_dninjnks : list[list[list[float]]]
Third mole number derivative of `delta` of each component,
[m^3/mol^4]
'''
return self._zeros3d()
@property
def d3epsilon_dninjnks(self):
r'''Helper method for calculating the third partial mole number
derivatives of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 \epsilon}{\partial n_i \partial n_j \partial n_k }
\right)_{T, P,
n_{m \ne i,j,k}} = 0
Returns
-------
d3epsilon_dninjnks : list[list[list[float]]]
Third mole number derivative of `epsilon` of each component,
[m^6/mol^5]
'''
return self._zeros3d()
def __init__(self, zs, T=None, P=None, V=None,
Tcs=None, Pcs=None, omegas=None, kijs=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(zs)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if scalar:
self.zeros2d = zeros2d = [[0.0]*N for _ in range(N)]
else:
self.zeros2d = zeros2d = zeros((N, N))
if kijs is None:
kijs = zeros2d
self.kijs = kijs
self.kwargs = {'kijs': kijs}
self.T = T
self.P = P
self.V = V
self.b = 0.0
self.bs = self.ais = self.zeros1d = self.a_alphas = self.da_alpha_dTs = self.d2a_alpha_dT2s = zeros2d[0]
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
self.bs = other.bs
self.ais = other.ais
self.b = other.b
self.zeros1d = self.a_alphas = self.da_alpha_dTs = self.d2a_alpha_dT2s = other.zeros1d
self.zeros2d = other.zeros2d
def a_alphas_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` for the Ideal Gas
EOS. This vectorized implementation is added for extra speed.
.. math::
a\alpha = 0
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
'''
return self.zeros1d
def a_alpha_and_derivatives_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` and their first
and second derivatives for the Ideal Gas EOS. This vectorized
implementation is added for extra speed.
.. math::
a\alpha = 0
.. math::
\frac{d a\alpha}{dT} = 0
.. math::
\frac{d^2 a\alpha}{dT^2} = 0
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dTs : list[float]
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2s : list[float]
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
'''
return self.zeros1d, self.zeros1d, self.zeros1d
def a_alpha_and_derivatives(self, T, full=True, quick=True,
pure_a_alphas=True):
# Saves time
if full:
return 0.0, 0.0, 0.0
return 0.0
try:
a_alpha_and_derivatives.__doc__ = GCEOSMIX.a_alpha_and_derivatives.__doc__
except:
pass
def fugacity_coefficients(self, Z):
r'''Calculate and return the fugacity coefficients of the ideal-gas
phase (0 by definition).
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
log_phis : float
Log fugacity coefficient for each species, [-]
'''
return self.zeros1d
def dlnphis_dT(self, phase):
r'''Calculate and return the temperature derivative of fugacity
coefficients of the ideal-gas phase (0 by definition).
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dlnphis_dT : float
Temperature derivatives of log fugacity coefficient for each
species, [1/K]
'''
return self.zeros1d
def dlnphis_dP(self, phase):
r'''Calculate and return the pressure derivative of fugacity
coefficients of the ideal-gas phase (0 by definition).
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dlnphis_dP : float
Pressure derivatives of log fugacity coefficient for each
species, [1/Pa]
'''
return self.zeros1d
@property
def a_alpha_ijs(self):
return self.zeros2d
try:
a_alpha_ijs.__doc__ = GCEOSMIX.a_alpha_ijs.__doc__
except:
pass
@property
def da_alpha_dT_ijs(self):
return self.zeros2d
try:
da_alpha_dT_ijs.__doc__ = GCEOSMIX.da_alpha_dT_ijs.__doc__
except:
pass
@property
def d2a_alpha_dT2_ijs(self):
return self.zeros2d
try:
d2a_alpha_dT2_ijs.__doc__ = GCEOSMIX.d2a_alpha_dT2_ijs.__doc__
except:
pass
class RKMIX(EpsilonZeroMixingRules, GCEOSMIX, RK):
r'''Class for solving the Redlich Kwong [1]_ [2]_ cubic equation of state for a
mixture of any number of compounds. Subclasses :obj:`thermo.eos.RK` . Solves the EOS on
initialization and calculates fugacities for all components in all phases.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P =\frac{RT}{V-b}-\frac{a}{V\sqrt{T}(V+b)}
.. math::
a = \sum_i \sum_j z_i z_j {a}_{ij}
.. math::
b = \sum_i z_i b_i
.. math::
a_{ij} = (1-k_{ij})\sqrt{a_{i}a_{j}}
.. math::
a_i =\left(\frac{R^2(T_{c,i})^{2}}{9(\sqrt[3]{2}-1)P_{c,i}} \right)
=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}
.. math::
b_i=\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_{c,i}}{P_{c,i}}
=\frac{0.08664\cdot R T_{c,i}}{P_{c,i}}
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
omegas : float, optional
Acentric factors of all compounds - Not used in this equation of
state!, [-]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = RKMIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> eos.V_l, eos.V_g
(4.048414781e-05, 0.00070060605863)
Notes
-----
The PV solution for `T` is iterative.
References
----------
.. [1] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
.. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th
edition. New York: McGraw-Hill Professional, 2000.
'''
eos_pure = RK
kwargs_keys = ('kijs',)
model_id = 10002
def __init__(self, Tcs, Pcs, zs, omegas=None, kijs=None, T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in cmps]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.kwargs = {'kijs': kijs}
self.T = T
self.P = P
self.V = V
c1R2_c2R, c2R = self.c1R2_c2R, self.c2R
if scalar:
self.bs = bs = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*bs[i] for i in cmps]
b = 0.0
for i in cmps:
b += bs[i]*zs[i]
else:
self.bs = bs = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*bs
b = float((bs*zs).sum())
self.b = self.delta = b
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
b = 0.0
if self.scalar:
for bi, zi in zip(self.bs, self.zs):
b += bi*zi
else:
b = float((self.bs*self.zs).sum())
self.b = self.delta = b
def a_alphas_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` for the RK EOS.
This vectorized implementation is added for extra speed.
.. math::
a\alpha = \frac{a}{\sqrt{\frac{T}{Tc}}}
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
Examples
--------
>>> eos = RKMIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> eos.a_alphas_vectorized(115)
[0.1449810919468, 0.30019773677]
'''
return RK_a_alphas_vectorized(T, self.Tcs, self.ais,
a_alphas=[0.0]*self.N if self.scalar else zeros(self.N))
def a_alpha_and_derivatives_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` and their first
and second derivatives for the RK EOS. This vectorized implementation
is added for extra speed.
.. math::
a\alpha = \frac{a}{\sqrt{\frac{T}{Tc}}}
.. math::
\frac{d a\alpha}{dT} = - \frac{a}{2 T\sqrt{\frac{T}{Tc}}}
.. math::
\frac{d^2 a\alpha}{dT^2} = \frac{3 a}{4 T^{2}\sqrt{\frac{T}{Tc}}}
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dTs : list[float]
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2s : list[float]
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
Examples
--------
>>> eos = RKMIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> eos.a_alpha_and_derivatives_vectorized(115)
([0.1449810919468, 0.30019773677], [-0.000630352573681, -0.00130520755121], [8.2219900915e-06, 1.7024446320e-05])
'''
N = self.N
if self.scalar:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = [0.0]*N, [0.0]*N, [0.0]*N
else:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = zeros(N), zeros(N), zeros(N)
return RK_a_alpha_and_derivatives_vectorized(T, self.Tcs, self.ais,
a_alphas=a_alphas, da_alpha_dTs=da_alpha_dTs,
d2a_alpha_dT2s=d2a_alpha_dT2s)
def solve_T(self, P, V, solution=None):
if self.N == 1 and type(self) is RKMIX:
self.Tc = self.Tcs[0]
self.Pc = self.Pcs[0]
self.a = self.ais[0]
T = super(type(self).__mro__[-4], self).solve_T(P=P, V=V, solution=solution)
del self.Tc
del self.Pc
del self.a
return T
else:
return super(type(self).__mro__[-3], self).solve_T(P=P, V=V, solution=solution)
@property
def ddelta_dzs(self):
r'''Helper method for calculating the composition derivatives of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \delta}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= b_i
Returns
-------
ddelta_dzs : list[float]
Composition derivative of `delta` of each component, [m^3/mol]
Notes
-----
This derivative is checked numerically.
'''
return self.bs
@property
def ddelta_dns(self):
r'''Helper method for calculating the mole number derivatives of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \delta}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= (b_i - b)
Returns
-------
ddelta_dns : list[float]
Mole number derivative of `delta` of each component, [m^3/mol^2]
Notes
-----
This derivative is checked numerically.
'''
b = self.b
if self.scalar:
return [(bi - b) for bi in self.bs]
return self.bs - b
@property
def d2delta_dzizjs(self):
r'''Helper method for calculating the second composition derivatives (hessian) of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \delta}{\partial x_i\partial x_j}\right)_{T, P, x_{k\ne i,j}}
= 0
Returns
-------
d2delta_dzizjs : list[float]
Second Composition derivative of `delta` of each component, [m^3/mol]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
if self.scalar:
return [[0.0]*N for i in range(N)]
else:
return zeros((N, N))
@property
def d2delta_dninjs(self):
r'''Helper method for calculating the second mole number derivatives (hessian) of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \delta}{\partial n_i \partial n_j}\right)_{T, P, n_{k\ne i,j}}
= 2b - b_i - b_j
Returns
-------
d2delta_dninjs : list[list[float]]
Second mole number derivative of `delta` of each component, [m^3/mol^3]
Notes
-----
This derivative is checked numerically.
'''
return self.d2b_dninjs
@property
def d3delta_dninjnks(self):
r'''Helper method for calculating the third partial mole number
derivatives of `delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 \delta}{\partial n_i \partial n_j \partial n_k }
\right)_{T, P,
n_{m \ne i,j,k}} = 2(-3b + b_i + b_j + b_k)
Returns
-------
d3delta_dninjnks : list[list[list[float]]]
Third mole number derivative of `delta` of each component,
[m^3/mol^4]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[[0.0]*N for _ in range(N) ] for _ in range(N)] if self.scalar else zeros((N, N, N))
return RK_d3delta_dninjnks(self.b, self.bs, N, out)
class PRMIX(GCEOSMIX, PR):
r'''Class for solving the Peng-Robinson [1]_ [2]_ cubic equation of state
for a mixture of any number of compounds. Subclasses `PR`. Solves the EOS
on initialization and calculates fugacities for all components in all
phases.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{v-b}-\frac{a\alpha(T)}{v(v+b)+b(v-b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
b = \sum_i z_i b_i
.. math::
a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}
.. math::
b_i=0.07780\frac{RT_{c,i}}{P_{c,i}}
.. math::
\alpha(T)_i=[1+\kappa_i(1-\sqrt{T_{r,i}})]^2
.. math::
\kappa_i=0.37464+1.54226\omega_i-0.26992\omega^2_i
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = PRMIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> eos.V_l, eos.V_g
(3.6257362939e-05, 0.00070066592313)
>>> eos.fugacities_l, eos.fugacities_g
([793860.83821, 73468.552253], [436530.92470, 358114.63827])
Notes
-----
For P-V initializations, a numerical solver is used to find T.
References
----------
.. [1] Peng, Ding-Yu, and Donald B. Robinson. "A New Two-Constant Equation
of State." Industrial & Engineering Chemistry Fundamentals 15, no. 1
(February 1, 1976): 59-64. doi:10.1021/i160057a011.
.. [2] Robinson, Donald B., Ding-Yu Peng, and Samuel Y-K Chung. "The
Development of the Peng - Robinson Equation and Its Application to Phase
Equilibrium in a System Containing Methanol." Fluid Phase Equilibria 24,
no. 1 (January 1, 1985): 25-41. doi:10.1016/0378-3812(85)87035-7.
'''
eos_pure = PR
nonstate_constants_specific = ('kappas', )
kwargs_keys = ('kijs',)
model_id = 10200
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in cmps]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.kwargs = {'kijs': kijs}
self.T = T
self.P = P
self.V = V
# optimization, unfortunately
c1R2_c2R, c2R = self.c1R2_c2R, self.c2R
# Also tried to store the inverse of Pcs, without success - slows it down
self.scalar = scalar = type(Tcs) is list
if scalar:
self.bs = bs = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*bs[i] for i in cmps]
self.kappas = [omega*(-0.26992*omega + 1.54226) + 0.37464 for omega in omegas]
b = 0.0
for i in cmps:
b += bs[i]*zs[i]
else:
self.bs = bs = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*bs
self.kappas = omegas*(-0.26992*omegas + 1.54226) + 0.37464
b = float((bs*zs).sum())
self.b = b
self.delta = 2.0*b
self.epsilon = -b*b
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
self.kappas = other.kappas
if self.scalar:
b = 0.0
for bi, zi in zip(self.bs, self.zs):
b += bi*zi
else:
b = float((self.bs*self.zs).sum())
self.b = b
self.delta = 2.0*b
self.epsilon = -b*b
def a_alphas_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` for the PR EOS.
This vectorized implementation is added for extra speed.
.. math::
a\alpha = a \left(\kappa \left(- \frac{T^{0.5}}{Tc^{0.5}}
+ 1\right) + 1\right)^{2}
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
'''
return PR_a_alphas_vectorized(T, self.Tcs, self.ais, self.kappas,
a_alphas=[0.0]*self.N if self.scalar else zeros(self.N))
def a_alpha_and_derivatives_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` and their first
and second derivatives for the PR EOS. This vectorized implementation
is added for extra speed.
.. math::
a\alpha = a \left(\kappa \left(- \frac{T^{0.5}}{Tc^{0.5}}
+ 1\right) + 1\right)^{2}
.. math::
\frac{d a\alpha}{dT} = - \frac{1.0 a \kappa}{T^{0.5} Tc^{0.5}}
\left(\kappa \left(- \frac{T^{0.5}}{Tc^{0.5}} + 1\right) + 1\right)
.. math::
\frac{d^2 a\alpha}{dT^2} = 0.5 a \kappa \left(- \frac{1}{T^{1.5}
Tc^{0.5}} \left(\kappa \left(\frac{T^{0.5}}{Tc^{0.5}} - 1\right)
- 1\right) + \frac{\kappa}{T^{1.0} Tc^{1.0}}\right)
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dTs : list[float]
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2s : list[float]
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
'''
N = self.N
if self.scalar:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = [0.0]*N, [0.0]*N, [0.0]*N
else:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = zeros(N), zeros(N), zeros(N)
return PR_a_alpha_and_derivatives_vectorized(T, self.Tcs, self.ais, self.kappas, a_alphas=a_alphas,
da_alpha_dTs=da_alpha_dTs, d2a_alpha_dT2s=d2a_alpha_dT2s)
@property
def d3a_alpha_dT3(self):
r'''Method to calculate approximately the third temperature derivative
of `a_alpha` for the PR EOS. A rigorous calculation has not been
implemented.
Parameters
----------
T : float
Temperature, [K]
Returns
-------
d3a_alpha_dT3 : float
Third temperature derivative :math:`a \alpha`, [J^2/mol^2/Pa/K^3]
'''
try:
return self._d3a_alpha_dT3
except AttributeError:
pass
tot = 0.0
zs = self.zs
vs = self.d3a_alpha_dT3_vectorized(self.T)
for i in range(self.N):
tot += zs[i]*vs[i]
self._d3a_alpha_dT3 = tot
return tot
def d3a_alpha_dT3_vectorized(self, T):
r'''Method to calculate the third temperature derivative of
pure-component `a_alphas` for the PR EOS. This vectorized implementation
is added for extra speed.
Parameters
----------
T : float
Temperature, [K]
Returns
-------
d3a_alpha_dT3s : list[float]
Third temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^3]
'''
ais, kappas, Tcs = self.ais, self.kappas, self.Tcs
T_inv = 1.0/T
N = self.N
d3a_alpha_dT3s = [0.0]*N if self.scalar else zeros(N)
for i in range(N):
kappa = kappas[i]
x0 = 1.0/Tcs[i]
x1 = sqrt(T*x0)
v = (-ais[i]*0.75*kappa*(kappa*x0 - x1*(kappa*(x1 - 1.0) - 1.0)*T_inv)*T_inv*T_inv)
d3a_alpha_dT3s[i] = v
return d3a_alpha_dT3s
def fugacity_coefficients(self, Z):
r'''Literature formula for calculating fugacity coefficients for each
species in a mixture. Verified numerically. Applicable to most
derivatives of the Peng-Robinson equation of state as well.
Called by :obj:`fugacities <GCEOSMIX.fugacities>` on initialization, or by a solver routine
which is performing a flash calculation.
.. math::
\ln \hat \phi_i = \frac{B_i}{B}(Z-1)-\ln(Z-B) + \frac{A}{2\sqrt{2}B}
\left[\frac{B_i}{B} - \frac{2}{a\alpha}\sum_i y_i(a\alpha)_{ij}\right]
\ln\left[\frac{Z + (1+\sqrt{2})B}{Z-(\sqrt{2}-1)B}\right]
.. math::
A = \frac{(a\alpha)P}{R^2 T^2}
.. math::
B = \frac{b P}{RT}
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
log_phis : float
Log fugacity coefficient for each species, [-]
'''
a_alpha = self.a_alpha
# a_alpha_ijs = self.a_alpha_ijs
T_inv = 1.0/self.T
bs, b = self.bs, self.b
P_T = self.P*T_inv
A = a_alpha*P_T*R2_inv*T_inv
B = b*P_T*R_inv
# The two log terms need to use a complex log; typically these are
# calculated at "liquid" volume solutions which are unstable
# and cannot exist
try:
x0 = log(Z - B)
except ValueError:
# less than zero
x0 = 0.0
root_two_B = B*root_two
two_root_two_B = root_two_B + root_two_B
ZB = Z + B
try:
x4 = A*log((ZB + root_two_B)/(ZB - root_two_B))
except ValueError:
# less than zero
x4 = 0.0
a_alpha_j_rows = self._a_alpha_j_rows
try:
t50 = 2.0*x4/(a_alpha*two_root_two_B)
except ZeroDivisionError:
return [0.0]*self.N
t51 = (x4 + (Z - 1.0)*two_root_two_B)/(b*two_root_two_B)
if self.scalar:
return [bs[i]*t51 - x0 - t50*a_alpha_j_rows[i]
for i in range(self.N)]
else:
return bs*t51 - x0 - t50*a_alpha_j_rows
def dlnphis_dT(self, phase):
r'''Formula for calculating the temperature derivaitve of
log fugacity coefficients for each species in a mixture for the
Peng-Robinson equation of state. Verified numerically.
.. math::
\left(\frac{\partial \ln \phi_i}{\partial T}\right)_{P,
nj \ne i}
Parameters
----------
phase : str
One of 'l' or 'g', [-]
Returns
-------
dlnphis_dT : float
Temperature derivatives of log fugacity coefficient for each species,
[1/K]
Notes
-----
This expression was derived using SymPy and optimized with the `cse`
technique.
'''
zs = self.zs
if phase == 'g':
Z = self.Z_g
dZ_dT = self.dZ_dT_g
else:
Z = self.Z_l
dZ_dT = self.dZ_dT_l
bs, b = self.bs, self.b
T_inv = 1.0/self.T
A = self.a_alpha*self.P*R2_inv*T_inv*T_inv
B = b*self.P*R_inv*T_inv
x2 = T_inv*T_inv
x3 = R_inv
x4 = self.P*b*x3
x5 = x2*x4
x8 = x4*T_inv
x10 = self.a_alpha
x11 = 1.0/self.a_alpha
x12 = self.da_alpha_dT
x13 = root_two
x14 = 1.0/b
x15 = x13 + 1.0 # root_two plus 1
x16 = Z + x15*x8
x17 = x13 - 1.0 # root two minus one
x18 = x16/(x17*x8 - Z)
x19 = log(-x18)
x13x14 = x13*x14
x10x13x14_4 = 0.25*x10*x13x14
x19x3 = x19*x3
x24 = x10x13x14_4*x19x3*x2
x25 = 0.25*x12*x13x14*x19x3*T_inv
x26 = x10x13x14_4*x3*T_inv*(-dZ_dT + x15*x5 - x18*(dZ_dT + x17*x5))/(x16)
x50 = -0.5*x13x14*x19x3*T_inv
x51 = -x11*x12
x52 = (dZ_dT + x5)/(x8 - Z)
x53 = 2.0*x11
x54 = x52/x50
x55 = x24 - x25 + x26
x56 = dZ_dT/x55
x57 = x53*x55
x58 = x14*(dZ_dT - x55)
x59 = x57/x50 + x51
# Composition stuff
a_alpha_j_rows = self._a_alpha_j_rows
da_alpha_dT_j_rows = self._da_alpha_dT_j_rows
d_lnphis_dTs = [x52 + bs[i]*x58 + x50*(x59*a_alpha_j_rows[i] + da_alpha_dT_j_rows[i])
for i in range(self.N)]
return d_lnphis_dTs
def dlnphis_dP(self, phase):
r'''Generic formula for calculating the pressure derivaitve of
log fugacity coefficients for each species in a mixture for the
Peng-Robinson EOS. Verified numerically.
.. math::
\left(\frac{\partial \ln \phi_i}{\partial P}\right)_{T,
nj \ne i}
Parameters
----------
phase : str
One of 'l' or 'g', [-]
Returns
-------
dlnphis_dP : float
Pressure derivatives of log fugacity coefficient for each species,
[1/Pa]
Notes
-----
This expression was derived using SymPy and optimized with the `cse`
technique.
'''
zs = self.zs
if phase == 'l':
Z, dZ_dP = self.Z_l, self.dZ_dP_l
else:
Z, dZ_dP = self.Z_g, self.dZ_dP_g
a_alpha = self.a_alpha
bs, b = self.bs, self.b
T_inv = 1.0/self.T
x2 = 1.0/b
x6 = b*R_inv*T_inv
x8 = self.P*x6
x9 = (dZ_dP - x6)/(x8 - Z)
x13 = Z + root_two_p1*x8
x15 = (a_alpha*root_two*x2*R_inv*T_inv*(dZ_dP + root_two_p1*x6
+ x13*(dZ_dP - root_two_m1*x6)/(root_two_m1*x8 - Z))/(4.0*x13))
x16 = dZ_dP + x15
a_alpha_j_rows = self._a_alpha_j_rows
x50 = -2.0/a_alpha
d_lnphi_dPs = []
for i in range(self.N):
x3 = bs[i]*x2
x10 = x50*a_alpha_j_rows[i]
# d_lnphi_dP = dZ_dP*x3 + x15*(x10 + x3) + x9
d_lnphi_dP = x16*x3 + x15*x10 + x9
d_lnphi_dPs.append(d_lnphi_dP)
return d_lnphi_dPs
def d_lnphi_dzs_analytical0(self, Z, zs):
# TODO try to follow "B.5.2.1 Derivatives of Fugacity Coefficient with Respect to Mole Fraction"
# "Development of an Equation-of-State Thermal Flooding Simulator"
N = self.N
cmps_m1 = range(N-1)
a_alpha = self.a_alpha
a_alpha_ijs = self.a_alpha_ijs
T2 = self.T*self.T
b = self.b
A = a_alpha*self.P/(R2*T2)
B = b*self.P/(R*self.T)
B2 = B*B
Z2 = Z*Z
A_B = A/B
ZmB = Z - B
dZ_dA = (B - Z)/(3.0*Z2 - 2.0*(1.0 - B)*Z + (A - 2.0*B - 3.0*B2))
# 2*(3.0*B + 1)*Z may or may not have Z
# Simple phase stability-testing algorithm in the reduction method.
dZ_dB = ((-Z2 + 2*(3.0*B + 1)*Z) + (A - 2.0*B - 3.0*B2))/(
3.0*Z2 - 2.0*(1.0 - B)*Z + (A - 2.0*B - 3.0*B2))
Sis = []
for i in range(N):
tot = 0.0
for j in range(N):
tot += zs[j]*a_alpha_ijs[i][j]
Sis.append(tot)
Sais = [val/a_alpha for val in Sis]
Sbis = [bi/b for bi in self.bs]
Snc = Sis[-1]
const_A = 2.0*self.P/(R2*T2)
dA_dzis = [const_A*(Si - Snc) for Si in Sis[:-1]]
const_B = 2.0*self.P/(R*self.T)
bnc = self.bs[-1]
dB_dzis = [const_B*(self.bs[i] - bnc) for i in range(N)] # Probably wrong, missing
dZ_dzs = [dZ_dA*dA_dz_i + dZ_dB*dB_dzi for dA_dz_i, dB_dzi in zip(dA_dzis, dB_dzis)]
t1 = (Z2 + 2.0*Z*B - B2)
t2 = clog((Z + (root_two + 1.)*B)/(Z - (root_two - 1.)*B)).real
t3 = t2*-A/(B*two_root_two)
t4 = -t2/(two_root_two*B)
a_nc = a_alpha_ijs[-1][-1] # no idea if this is right
# Have some converns of what Snc really is
dlnphis_dzs_all = []
for i in range(self.N):
Diks = [-A_B*(2.0*Sais[i] - Sbis[i])*(Z*dB_dzis[k] - B*dZ_dzs[k])/t1
for k in cmps_m1]
Ciks = [t3*(2.0*(a_alpha_ijs[i][k] - a_nc)/a_alpha
- 4.0*Sais[i]*(Sais[k] - Snc)
+ Sbis[i]*(Sbis[k] - Snc))
for k in cmps_m1]
x5 = t4*(2.0*Sais[i] - Sbis[i])
Biks = [x5*(dA_dzis[k] - A_B*dB_dzis[k])
for k in cmps_m1 ]
Aiks = [Sbis[i]*(dZ_dzs[k] - (Sbis[k] - Snc)*(Z - 1.0))
- (dZ_dzs[k] - dB_dzis[k])/ZmB
for k in cmps_m1 ]
dlnphis_dzs = [Aik + Bik + Cik + Dik for Aik, Bik, Cik, Dik in zip(Aiks, Biks, Ciks, Diks)]
dlnphis_dzs_all.append(dlnphis_dzs)
return dlnphis_dzs_all
def d_lnphi_dzs_basic_num(self, Z, zs):
all_diffs = []
try:
if self.G_dep_l < self.G_dep_g:
lnphis_ref = self.lnphis_l
else:
lnphis_ref = self.lnphis_g
except:
lnphis_ref = self.lnphis_l if hasattr(self, 'G_dep_l') else self.lnphis_g
for i in range(len(zs)):
zs2 = list(zs)
dz = 1e-7#zs2[i]*3e-
zs2[i] = zs2[i]+dz
# sum_one = sum(zs2)
# zs2 = normalize(zs2)
eos2 = self.to_TP_zs(T=self.T, P=self.P, zs=zs2)
diffs = []
for j in range(len(zs)):
try:
dlnphis = (eos2.lnphis_g[j] - lnphis_ref[j])/dz
except:
dlnphis = (eos2.lnphis_l[j] - lnphis_ref[j])/dz
diffs.append(dlnphis)
all_diffs.append(diffs)
import numpy as np
return np.array(all_diffs).T.tolist()
def d_lnphi_dzs_numdifftools(self, Z, zs):
import numpy as np
import numdifftools as nd
def lnphis_from_zs(zs2):
if isinstance(zs2, np.ndarray):
zs2 = zs2.tolist()
zs2 = normalize(zs2)
# Last row suggests the normalization breaks everything!
# zs2 = normalize(zs2)
# if Z == self.Z_l
try:
return np.array(self.to_TP_zs(T=self.T, P=self.P, zs=zs2).lnphis_l)
except:
return np.array(self.to_TP_zs(T=self.T, P=self.P, zs=zs2).lnphis_g)
Jfun_partial = nd.Jacobian(lnphis_from_zs, step=1e-4, order=2, method='central')
return Jfun_partial(zs)
def dlnphis_dzs(self, Z):
r'''Calculate and return the mole fraction derivaitves of
log fugacity coefficients for each species in a mixture. This formula
is specific to the Peng-Robinson equation of state.
.. math::
\left(\frac{\partial \ln \phi_i}{\partial z_i}\right)_{P,
z_{j \ne i}}
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
dlnphis_dzs : list[list[float]]
Mole fraction derivatives of log fugacity coefficient for each
species (such that the mole fractions do not sum to 1), [-]
Notes
-----
This formula is from [1]_ but is validated to match the generic
implementation.
Examples
--------
>>> kijs = [[0, 0.00076, 0.00171], [0.00076, 0, 0.00061], [0.00171, 0.00061, 0]]
>>> eos = PRMIX(Tcs=[469.7, 507.4, 540.3], zs=[0.8168, 0.1501, 0.0331], omegas=[0.249, 0.305, 0.349], Pcs=[3.369E6, 3.012E6, 2.736E6], T=322.29, P=101325, kijs=kijs)
>>> eos.dlnphis_dzs(eos.Z_l)
[[0.009938069276, 0.0151503498382, 0.018297235797], [-0.038517738793, -0.05958926042, -0.068438990795], [-0.07057106923, -0.10363920720, -0.14116283024]]
References
----------
.. [1] Chang, Yih-Bor. "Development and Application of an Equation of
State Compositional Simulator" 1990.
https://repositories.lib.utexas.edu/handle/2152/80585.
'''
T, P, zs = self.T, self.P, self.zs
T2 = T*T
T_inv = 1.0/T
RT_inv = R_inv*T_inv
bs, b = self.bs, self.b
a_alpha = self.a_alpha
a_alpha_ijs = self.a_alpha_ijs
a_alphas = self.a_alphas
a_alpha_j_rows = self.a_alpha_j_rows
N = len(zs)
b2 = b*b
b_inv = 1.0/b
b2_inv = b_inv*b_inv
a_alpha2 = a_alpha*a_alpha
A = a_alpha*P*RT_inv*RT_inv
B = b*P*RT_inv
B_inv = 1.0/B
C = 1.0/(Z - B)
Zm1 = Z - 1.0
G = (Z + (1.0 + root_two)*B)/(Z + (1.0 - root_two)*B)
t4 = 2.0/a_alpha
t5 = -A/(two_root_two*B)
Eis = [t5*(t4*a_alpha_j_rows[i] - bs[i]*b_inv) for i in range(N)]
# ln_phis = []
# for i in range(N):
# ln_phis.append(log(C) + Dis[i] + Eis[i]*log(G))
# return ln_phis
# Bis = [bi*P/(R*T) for bi in bs]
# maybe with a 2 constant?
t6 = P*RT_inv
dB_dxks = [t6*bk for bk in bs]
# THIS IS WRONG - the sum changes w.r.t (or does it?)
# Believed right now?
const = (P+P)*RT_inv*RT_inv
dA_dxks = [const*term_i for term_i in a_alpha_j_rows]
dF_dZ_inv = 1.0/(3.0*Z*Z - 2.0*Z*(1.0 - B) + (A - 3.0*B*B - 2.0*B))
t15 = (A - 2.0*B - 3.0*B*B + 2.0*(3.0*B + 1.0)*Z - Z*Z)
BmZ = (B - Z)
dZ_dxs = [(BmZ*dA_dxks[i] + t15*dB_dxks[i])*dF_dZ_inv for i in range(N)]
# function only of k
ZmB = Z - B
t20 = -1.0/(ZmB*ZmB)
dC_dxs = [t20*(dZ_dxs[k] - dB_dxks[k]) for k in range(N)]
dD_dxs = []
# dD_dxs = [[0.0]*N for _ in cmps]
t55s = [b*dZ_dxs[k] - bs[k]*Zm1 for k in range(N)]
for i in range(N):
# dD_dxs_i = dD_dxs[i]
b_term_ratio = bs[i]*b2_inv
dD_dxs.append([b_term_ratio*t55s[k] for k in range(N)])
# for k in range(N):
# dD_dxs_i[k] = b_term_ratio*t55s[k]
# dD_dxs = []
# for i in range(N):
# term = bs[i]/(b*b)*(b*dZ_dxs[i] - b*(Z - 1.0))
# dD_dxs.append(term)
# ? Believe this is the only one with multi indexes?
t1 = 1.0/(two_root_two*a_alpha*b*B)
t2 = t1*A/(a_alpha*b)
t50s = [B*dA_dxks[k] - A*dB_dxks[k] for k in range(N)]
# problem is in here, tested numerically
b_two = b + b
t32 = 2.0*a_alpha*b2
t33 = 4.0*b2
t34 = t1*B_inv*a_alpha
t35 = -t1*B_inv*b_two
# Symmetric matrix!
dE_dxs = [[0.0]*N for _ in range(N)] # TODO - makes little sense. Too many i indexes.
for i in range(N):
zm_aim_tot = a_alpha_j_rows[i]
t30 = t34*bs[i] + t35*zm_aim_tot
t31 = t33*zm_aim_tot
dE_dxs_i = []
a_alpha_ijs_i = a_alpha_ijs[i]
for k in range(0, i+1):
# Sign was wrong in article - should be a plus
second = t2*(t31*a_alpha_j_rows[k] - t32*a_alpha_ijs_i[k] - bs[i]*bs[k]*a_alpha2)
dE_dxs[i][k] = dE_dxs[k][i] = t30*t50s[k] + second
# dE_dxs_i.append(t1*(first + second))
# dE_dxs.append(dE_dxs_i)
t59 = (Z + (1.0 - root_two)*B)
t60 = two_root_two/(t59*t59)
dG_dxs = [t60*(Z*dB_dxks[k] - B*dZ_dxs[k]) for k in range(N)]
G_inv = 1.0/G
logG = log(G)
C_inv = 1.0/C
dlnphis_dxs = []
# dlnphis_dxs = [[0.0]*N for _ in range(N)]
t61s = [C_inv*dC_dxi for dC_dxi in dC_dxs]
for i in range(N):
dD_dxs_i = dD_dxs[i]
dE_dxs_i = dE_dxs[i]
E_G = Eis[i]*G_inv
# dlnphis_dxs_i = dlnphis_dxs[i]
dlnphis_dxs_i = [t61s[k] + dD_dxs_i[k] + logG*dE_dxs_i[k] + E_G*dG_dxs[k]
for k in range(N)]
dlnphis_dxs.append(dlnphis_dxs_i)
# return dlnphis_dxs
return dlnphis_dxs#, dZ_dxs, dA_dxks, dB_dxks, dC_dxs, dD_dxs, dE_dxs, dG_dxs
@property
def ddelta_dzs(self):
r'''Helper method for calculating the composition derivatives of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \delta}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= 2 b_i
Returns
-------
ddelta_dzs : list[float]
Composition derivative of `delta` of each component, [m^3/mol]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
return PR_ddelta_dzs(self.bs, N, out=[0.0]*N if self.scalar else zeros(N))
@property
def ddelta_dns(self):
r'''Helper method for calculating the mole number derivatives of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \delta}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= 2 (b_i - b)
Returns
-------
ddelta_dns : list[float]
Mole number derivative of `delta` of each component, [m^3/mol^2]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
return PR_ddelta_dns(self.bs, self.b, N, out=[0.0]*N if self.scalar else zeros(N))
@property
def d2delta_dzizjs(self):
r'''Helper method for calculating the second composition derivatives (hessian) of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \delta}{\partial x_i\partial x_j}\right)_{T, P, x_{k\ne i,j}}
= 0
Returns
-------
d2delta_dzizjs : list[float]
Second Composition derivative of `delta` of each component, [m^3/mol]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
if self.scalar:
return [[0.0]*N for i in range(N)]
return zeros((N, N))
@property
def d2delta_dninjs(self):
r'''Helper method for calculating the second mole number derivatives (hessian) of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \delta}{\partial n_i \partial n_j}\right)_{T, P, n_{k\ne i,j}}
= 4b - 2b_i - 2b_j
Returns
-------
d2delta_dninjs : list[list[float]]
Second mole number derivative of `delta` of each component, [m^3/mol^3]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[0.0]*N for _ in range(N)] if self.scalar else zeros((N, N))
return PR_d2delta_dninjs(self.b, self.bs, N, out)
@property
def d3delta_dninjnks(self):
r'''Helper method for calculating the third partial mole number
derivatives of `delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 \delta}{\partial n_i \partial n_j \partial n_k }
\right)_{T, P,
n_{m \ne i,j,k}} = 4(-3b + b_i + b_j + b_k)
Returns
-------
d3delta_dninjnks : list[list[list[float]]]
Third mole number derivative of `delta` of each component,
[m^3/mol^4]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[[0.0]*N for _ in range(N) ] for _ in range(N)] if self.scalar else zeros((N, N, N))
return PR_d3delta_dninjnks(self.b, self.bs, N, out)
@property
def depsilon_dzs(self):
r'''Helper method for calculating the composition derivatives of
`epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \epsilon}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= -2 b_i\cdot b
Returns
-------
depsilon_dzs : list[float]
Composition derivative of `epsilon` of each component, [m^6/mol^2]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
return PR_depsilon_dzs(self.b, self.bs, N, out=[0.0]*N if self.scalar else zeros(N))
@property
def depsilon_dns(self):
r'''Helper method for calculating the mole number derivatives of
`epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \epsilon}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= 2b(b - b_i)
Returns
-------
depsilon_dns : list[float]
Composition derivative of `epsilon` of each component, [m^6/mol^3]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
return PR_depsilon_dns(self.b, self.bs, N, out=[0.0]*N if self.scalar else zeros(N))
@property
def d2epsilon_dzizjs(self):
r'''Helper method for calculating the second composition derivatives (hessian)
of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \epsilon}{\partial x_i \partial x_j}\right)_{T, P, x_{k\ne i,j}}
= 2 b_i b_j
Returns
-------
d2epsilon_dzizjs : list[list[float]]
Second composition derivative of `epsilon` of each component, [m^6/mol^2]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[0.0]*N for _ in range(N)] if self.scalar else zeros((N, N))
return PR_d2epsilon_dzizjs(self.b, self.bs, N, out)
@property
def d2epsilon_dninjs(self):
r'''Helper method for calculating the second mole number derivatives
(hessian) of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \epsilon}{\partial n_i n_j}\right)_{T, P,
n_{k\ne i,j}} = -2b(2b - b_i - b_j) - 2(b - b_i)(b - b_j)
Returns
-------
d2epsilon_dninjs : list[list[float]]
Second mole number derivative of `epsilon` of each component,
[m^6/mol^4]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[0.0]*N for _ in range(N)] if self.scalar else zeros((N, N))
return PR_d2epsilon_dninjs(self.b, self.bs, N, out)
@property
def d3epsilon_dninjnks(self):
r'''Helper method for calculating the third partial mole number
derivatives of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 \epsilon}{\partial n_i \partial n_j \partial n_k }
\right)_{T, P,
n_{m \ne i,j,k}} = 24b^2 - 12b(b_i + b_j + b_k)
+ 4(b_i b_j + b_i b_k + b_j b_k)
Returns
-------
d3epsilon_dninjnks : list[list[list[float]]]
Third mole number derivative of `epsilon` of each component,
[m^6/mol^5]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[[0.0]*N for _ in range(N) ] for _ in range(N)] if self.scalar else zeros((N, N, N))
return PR_d3epsilon_dninjnks(self.b, self.bs, N, out)
def solve_T(self, P, V, quick=True, solution=None):
if self.N == 1 and type(self) is PRMIX:
self.Tc = self.Tcs[0]
self.Pc = self.Pcs[0]
self.kappa = self.kappas[0]
self.a = self.ais[0]
T = super(type(self).__mro__[-4], self).solve_T(P=P, V=V, solution=solution)
del self.Tc
del self.Pc
del self.kappa
del self.a
return T
else:
return super(type(self).__mro__[-3], self).solve_T(P=P, V=V, solution=solution)
class PRMIXTranslated(PRMIX):
r'''Class for solving the Peng-Robinson [1]_ [2]_ translated cubic equation
of state for a mixture of any number of compounds. Solves the EOS
on initialization and calculates fugacities for all components in all
phases.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{v + c - b} - \frac{a\alpha(T)}{(v+c)(v + c + b)+b(v
+ c - b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
b = \sum_i z_i b_i
.. math::
a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}
.. math::
b_i=0.07780\frac{RT_{c,i}}{P_{c,i}}
.. math::
\alpha(T)_i=[1+\kappa_i(1-\sqrt{T_{r,i}})]^2
.. math::
\kappa_i=0.37464+1.54226\omega_i-0.26992\omega^2_i
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
cs : list[float], optional
Volume translation parameters; always zero in the original
implementation, [m^3/mol]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = PRMIXTranslated(T=115, P=1E6, cs=[-4.4e-6, -4.35e-6], Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.2, 0.8], kijs=[[0,0.03],[0.03,0]])
>>> eos.V_l, eos.V_g
(3.9079056337e-05, 0.00060231393016)
>>> eos.fugacities_l, eos.fugacities_g
([442838.8615, 108854.48589], [184396.972, 565531.7709])
Notes
-----
For P-V initializations, a numerical solver is used to find T.
References
----------
.. [1] Peng, Ding-Yu, and Donald B. Robinson. "A New Two-Constant Equation
of State." Industrial & Engineering Chemistry Fundamentals 15, no. 1
(February 1, 1976): 59-64. doi:10.1021/i160057a011.
.. [2] Robinson, Donald B., Ding-Yu Peng, and Samuel Y-K Chung. "The
Development of the Peng - Robinson Equation and Its Application to Phase
Equilibrium in a System Containing Methanol." Fluid Phase Equilibria 24,
no. 1 (January 1, 1985): 25-41. doi:10.1016/0378-3812(85)87035-7.
'''
translated = True
eos_pure = PRTranslated
mix_kwargs_to_pure = {'cs': 'c'}
kwargs_linear = ('cs',)
fugacity_coefficients = GCEOSMIX.fugacity_coefficients
dlnphis_dT = GCEOSMIX.dlnphis_dT
dlnphis_dP = GCEOSMIX.dlnphis_dP
d_lnphi_dzs = GCEOSMIX.dlnphis_dzs
P_max_at_V = GCEOSMIX.P_max_at_V
model_id = 10202
# All the b derivatives happen to work out to be the same, and are checked numerically
solve_T = GCEOS.solve_T
kwargs_keys = ('kijs', 'cs')
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, cs=None,
T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in range(N)]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.T = T
self.P = P
self.V = V
c2R, c1R2_c2R = self.c2R, self.c1R2_c2R
if scalar:
b0s = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*b0s[i] for i in cmps]
else:
b0s = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*b0s
if cs is None:
if scalar:
cs = [0.0]*N
else:
cs = zeros(N)
if scalar:
self.kappas = [omega*(-0.26992*omega + 1.54226) + 0.37464 for omega in omegas]
b0, c = 0.0, 0.0
for i in range(N):
b0 += b0s[i]*zs[i]
c += cs[i]*zs[i]
bs = [b0s[i] - cs[i] for i in range(N)]
else:
self.kappas = omegas*(-0.26992*omegas + 1.54226) + 0.37464
b0 = float((b0s*zs).sum())
c = float((cs*zs).sum())
bs = b0s - cs
self.kwargs = {'kijs': kijs, 'cs': cs}
self.cs = cs
self.b0s = b0s
self.bs = bs
self.c = c
self.b = b = b0 - c
self.delta = 2.0*(c + b0)
self.epsilon = -b0*b0 + c*(c + b0 + b0)
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
self.cs = cs = other.cs
self.kappas = other.kappas
zs = self.zs
self.b0s = b0s = other.b0s
if self.scalar:
b0, c = 0.0, 0.0
for i in range(self.N):
b0 += b0s[i]*zs[i]
c += cs[i]*zs[i]
else:
b0 = float((b0s*zs).sum())
c = float((cs*zs).sum())
self.c = c
self.b = b0 - c
self.delta = 2.0*(c + b0)
self.epsilon = -b0*b0 + c*(c + b0 + b0) # Very important to be calculated exactly the same way as the other implementation
@property
def ddelta_dzs(self):
r'''Helper method for calculating the composition derivatives of
`delta`. Note this is independent of the phase. :math:`b^0` refers to
the original `b` parameter not involving any translation.
.. math::
\left(\frac{\partial \delta}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= 2 (c_i + b^0_i)
Returns
-------
ddelta_dzs : list[float]
Composition derivative of `delta` of each component, [m^3/mol]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
return PR_translated_ddelta_dzs(self.b0s, self.cs, N,
[0.0]*N if self.scalar else zeros(N))
# Zero in both cases
d2delta_dzizjs = PRMIX.d2delta_dzizjs
d3delta_dzizjzks = PRMIX.d3delta_dzizjzks
@property
def ddelta_dns(self):
r'''Helper method for calculating the mole number derivatives of
`delta`. Note this is independent of the phase. :math:`b^0` refers to
the original `b` parameter not involving any translation.
.. math::
\left(\frac{\partial \delta}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= 2 (c_i + b^0_i) - \delta
Returns
-------
ddelta_dns : list[float]
Mole number derivative of `delta` of each component, [m^3/mol^2]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
return PR_translated_ddelta_dns(self.b0s, self.cs, self.delta, N, [0.0]*N if self.scalar else zeros(N))
@property
def d2delta_dninjs(self):
r'''Helper method for calculating the second mole number derivatives (hessian) of
`delta`. Note this is independent of the phase. :math:`b^0` refers to
the original `b` parameter not involving any translation.
.. math::
\left(\frac{\partial^2 \delta}{\partial n_i \partial n_j}\right)_{T, P, n_{k\ne i,j}}
= 2\left(\delta - b^0_i - b^0_j - c_i - c_j \right)
Returns
-------
d2delta_dninjs : list[list[float]]
Second mole number derivative of `delta` of each component, [m^3/mol^3]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[0.0]*N for _ in range(N)] if self.scalar else zeros((N, N))
return PR_translated_d2delta_dninjs(self.b0s, self.cs, self.b, self.c, self.delta, N, out)
@property
def d3delta_dninjnks(self):
r'''Helper method for calculating the third partial mole number
derivatives of `delta`. Note this is independent of the phase. :math:`b^0` refers to
the original `b` parameter not involving any translation.
.. math::
\left(\frac{\partial^3 \delta}{\partial n_i \partial n_j \partial n_k }
\right)_{T, P,
n_{m \ne i,j,k}} = 4\left(b^0_i + b^0_j + b^0_k + c_i + c_j
+ c_k \right) - 6 \delta
Returns
-------
d3delta_dninjnks : list[list[list[float]]]
Third mole number derivative of `delta` of each component,
[m^3/mol^4]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[[0.0]*N for _ in range(N)] for _ in range(N)] if self.scalar else zeros((N, N, N))
return PR_translated_d3delta_dninjnks(self.b0s, self.cs, self.delta, N, out)
@property
def depsilon_dzs(self):
r'''Helper method for calculating the composition derivatives of
`epsilon`. Note this is independent of the phase. :math:`b^0` refers to
the original `b` parameter not involving any translation.
.. math::
\left(\frac{\partial \epsilon}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= c_i(2b^0_i + c) + c(2b^0_i + c_i) - 2b^0 b^0_i
Returns
-------
depsilon_dzs : list[float]
Composition derivative of `epsilon` of each component, [m^6/mol^2]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
return PR_translated_depsilon_dzs(self.epsilon, self.c, self.b, self.b0s, self.cs, N,
[0.0]*N if self.scalar else zeros(N))
@property
def depsilon_dns(self):
r'''Helper method for calculating the mole number derivatives of
`epsilon`. Note this is independent of the phase. :math:`b^0` refers to
the original `b` parameter not involving any translation.
.. math::
\left(\frac{\partial \epsilon}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= 2b^0(b^0 - b^0_i) - c(2b^0 - 2b_i^0 + c - c_i) - (c - c_i)(2b^0 + c)
Returns
-------
depsilon_dns : list[float]
Composition derivative of `epsilon` of each component, [m^6/mol^3]
Notes
-----
This derivative is checked numerically.
'''
epsilon, c, b = self.epsilon, self.c, self.b
N, b0s, cs = self.N, self.b0s, self.cs
return PR_translated_depsilon_dns(epsilon, c, b, b0s, cs, N, out=([0.0]*N if self.scalar else zeros(N)))
@property
def d2epsilon_dzizjs(self):
r'''Helper method for calculating the second composition derivatives (hessian)
of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \epsilon}{\partial x_i \partial x_j}\right)_{T, P, x_{k\ne i,j}}
= -2 b^0_i b^0_j + 2b^0_i c_j + 2b^0_j c_i + 2c_i c_j
Returns
-------
d2epsilon_dzizjs : list[list[float]]
Second composition derivative of `epsilon` of each component, [m^6/mol^2]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[0.0]*N for _ in range(N)] if self.scalar else zeros((N, N))
return PR_translated_d2epsilon_dzizjs(self.b0s, self.cs, N=N, out=out)
d3epsilon_dzizjzks = GCEOSMIX.d3epsilon_dzizjzks # Zeros
@property
def d2epsilon_dninjs(self):
r'''Helper method for calculating the second mole number derivatives (hessian) of
`epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \epsilon}{\partial n_i n_j}\right)_{T, P, n_{k\ne i,j}}
= -2b^0(2b^0 - b_i^0 - b_j^0) + c(4b^0 - 2b^0_i - 2b^0_j + 2c - c_i - c_j)
-2(b^0 - b_i^0)(b^0 - b^0_j)
+ (c - c_i)(2b^0 - 2b^0_j - c_j + c)
+ (c - c_j)(2b^0 - 2b^0_i - c_i + c)
+ (2b^0 + c)(2c-c_i - c_j)
Returns
-------
d2epsilon_dninjs : list[list[float]]
Second mole number derivative of `epsilon` of each component, [m^6/mol^4]
Notes
-----
This derivative is checked numerically.
'''
# Not trusted yet - numerical check does not have enough digits
N = self.N
out = [[0.0]*N for _ in range(N)] if self.scalar else zeros((N, N))
return PR_translated_d2epsilon_dninjs(self.b0s, self.cs, self.b, self.c, N, out=out)
@property
def d3epsilon_dninjnks(self):
r'''Helper method for calculating the third partial mole number
derivatives of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 \epsilon}{\partial n_i \partial n_j \partial n_k }
\right)_{T, P,
n_{m \ne i,j,k}} = 4b^0(3b^0 - b_i^0 - b_j^0 - b_k^0)
-2c(6b^0 - 2(b_i^0 + b_j^0 + b_k^0) + 3c - (c_i + c_j + c_k))
+2(b^0-b_i^0)(2b^0 - b_j^0 - b_k^0) + 2(b^0 - b^0_j)(2b^0 - b_i^0 - b_k^0)
+2(b^0-b^0_k)(2b^0 - b^0_i-b^0_j)
-(c-c_i)(4b^0 - 2b^0_j - 2b^0_k + 2c - c_j - c_k)
-(c-c_j)(4b^0 - 2b^0_i - 2b^0_k + 2c - c_i - c_k)
-(c-c_k)(4b^0 - 2b^0_j - 2b^0_i + 2c - c_j - c_i)
-2(c + 2b^0)(3c - c_i - c_j - c_k)
-(2c - c_i - c_j)(2b^0 + c - 2b^0_k - c_k)
-(2c - c_i - c_k)(2b^0 + c - 2b^0_j - c_j)
-(2c - c_j - c_k)(2b^0 + c - 2b^0_i - c_i)
Returns
-------
d3epsilon_dninjnks : list[list[list[float]]]
Third mole number derivative of `epsilon` of each component,
[m^6/mol^5]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[[0.0]*N for _ in range(N)] for _ in range(N)] if self.scalar else zeros((N, N, N))
return PR_translated_d3epsilon_dninjnks(self.b0s, self.cs, self.b, self.c, self.epsilon, N, out)
class PRMIXTranslatedPPJP(PRMIXTranslated):
r'''Class for solving the Pina-Martinez, Privat, Jaubert,
and Peng revision of the Peng-Robinson equation of state.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{v + c - b} - \frac{a\alpha(T)}{(v+c)(v + c + b)+b(v
+ c - b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
b = \sum_i z_i b_i
.. math::
a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}
.. math::
b_i=0.07780\frac{RT_{c,i}}{P_{c,i}}
.. math::
\alpha(T)_i=[1+\kappa_i(1-\sqrt{T_{r,i}})]^2
.. math::
\kappa_i=0.3919 + 1.4996 \omega - 0.2721\omega^2 + 0.1063\omega^3
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
cs : list[float], optional
Volume translation parameters, [m^3/mol]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = PRMIXTranslatedPPJP(T=115, P=1E6, cs=[-4.4e-6, -4.35e-6], Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.2, 0.8], kijs=[[0,0.03],[0.03,0]])
>>> eos.V_l, eos.V_g
(3.8989032701e-05, 0.00059686183724)
>>> eos.fugacities_l, eos.fugacities_g
([444791.13707, 104520.280997], [184782.600238, 563352.147])
Notes
-----
For P-V initializations, a numerical solver is used to find T.
References
----------
.. [1] Pina-Martinez, Andrés, Romain Privat, Jean-Noël Jaubert, and
Ding-Yu Peng. "Updated Versions of the Generalized Soave α-Function
Suitable for the Redlich-Kwong and Peng-Robinson Equations of State."
Fluid Phase Equilibria, December 7, 2018.
https://doi.org/10.1016/j.fluid.2018.12.007.
'''
eos_pure = PRTranslatedPPJP
mix_kwargs_to_pure = {'cs': 'c'}
kwargs_linear = ('cs',)
kwargs_keys = ('kijs', 'cs')
model_id = 10207
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, cs=None,
T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in cmps]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.T = T
self.P = P
self.V = V
c2R, c1R2_c2R = self.c2R, self.c1R2_c2R
if scalar:
b0s = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*b0s[i] for i in cmps]
else:
b0s = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*b0s
if cs is None:
if scalar:
cs = [0.0]*N
else:
cs = zeros(N)
if scalar:
self.kappas = [omega*(omega*(0.1063*omega - 0.2721) + 1.4996) + 0.3919 for omega in omegas]
b0, c = 0.0, 0.0
for i in range(N):
b0 += b0s[i]*zs[i]
c += cs[i]*zs[i]
bs = [b0s[i] - cs[i] for i in range(N)]
else:
self.kappas = omegas*(omegas*(0.1063*omegas - 0.2721) + 1.4996) + 0.3919
b0 = float((b0s*zs).sum())
c = float((cs*zs).sum())
bs = b0s - cs
self.kwargs = {'kijs': kijs, 'cs': cs}
self.cs = cs
self.b0s = b0s
self.bs = bs
self.c = c
self.b = b = b0 - c
self.delta = 2.0*(c + b0)
self.epsilon = -b0*b0 + c*(c + b0 + b0)
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
class PRMIXTranslatedConsistent(Twu91_a_alpha, PRMIXTranslated):
r'''Class for solving the volume translated Le Guennec, Privat, and Jaubert
revision of the Peng-Robinson equation of state according to [1]_.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{v + c - b} - \frac{a\alpha(T)}{(v+c)(v + c + b)+b(v
+ c - b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
b = \sum_i z_i b_i
.. math::
a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}
.. math::
b_i=0.07780\frac{RT_{c,i}}{P_{c,i}}
.. math::
\alpha_i = \left(\frac{T}{T_{c}}\right)^{c_{3} \left(c_{2}
- 1\right)} e^{c_{1} \left(- \left(\frac{T}{T_{c}}
\right)^{c_{2} c_{3}} + 1\right)}
If `c` is not provided, they are estimated as:
.. math::
c =\frac{R T_c}{P_c}(0.0198\omega - 0.0065)
If `alpha_coeffs` is not provided, the parameters `L` and `M` are estimated
from the acentric factor as follows:
.. math::
L = 0.1290\omega^2 + 0.6039\omega + 0.0877
.. math::
M = 0.1760\omega^2 - 0.2600\omega + 0.8884
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
cs : list[float], optional
Volume translation parameters, [m^3/mol]
alpha_coeffs : list[tuple(float[3])], optional
Coefficients L, M, N (also called C1, C2, C3) of TWU 1991 form, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = PRMIXTranslatedConsistent(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.2, 0.8], kijs=[[0,0.03],[0.03,0]])
>>> eos.V_l, eos.V_g
(3.675235812e-05, 0.00059709319879)
>>> eos.fugacities_l, eos.fugacities_g
([443454.9336, 106184.004057], [184122.74082, 563037.785])
Notes
-----
For P-V initializations, a numerical solver is used to find T.
References
----------
.. [1] Le Guennec, Yohann, Romain Privat, and Jean-Noël Jaubert.
"Development of the Translated-Consistent Tc-PR and Tc-RK Cubic
Equations of State for a Safe and Accurate Prediction of Volumetric,
Energetic and Saturation Properties of Pure Compounds in the Sub- and
Super-Critical Domains." Fluid Phase Equilibria 429 (December 15, 2016):
301-12. https://doi.org/10.1016/j.fluid.2016.09.003.
'''
eos_pure = PRTranslatedConsistent
kwargs_linear = ('cs', 'alpha_coeffs')
mix_kwargs_to_pure = {'cs': 'c', 'alpha_coeffs': 'alpha_coeffs'}
kwargs_keys = ('kijs', 'alpha_coeffs', 'cs')
model_id = 10203
# There is an updated set of correlations - which means a revision flag is needed
# Analysis of the Combinations of Property Data That Are Suitable for a Safe Estimation of Consistent Twu α-Function Parameters: Updated Parameter Values for the Translated-Consistent tc-PR and tc-RK Cubic Equations of State
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, cs=None,
alpha_coeffs=None, T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in cmps]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.T = T
self.P = P
self.V = V
c1R2_c2R, c2R = self.c1R2_c2R, self.c2R
if scalar:
b0s = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*b0s[i] for i in cmps]
else:
b0s = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*b0s
if cs is None:
if scalar:
cs = [R*Tcs[i]/Pcs[i]*(0.0198*min(max(omegas[i], -0.01), 1.48) - 0.0065)
for i in range(N)]
else:
cs = R*Tcs/Pcs*(0.0198*npmin(npmax(omegas, -0.01), 1.48) - 0.0065)
if alpha_coeffs is None:
alpha_coeffs = []
for i in range(N):
o = min(max(omegas[i], -0.01), 1.48)
L = o*(0.1290*o + 0.6039) + 0.0877
M = o*(0.1760*o - 0.2600) + 0.8884
alpha_coeffs.append((L, M, 2.0))
self.kwargs = {'kijs': kijs, 'alpha_coeffs': alpha_coeffs, 'cs': cs}
self.alpha_coeffs = alpha_coeffs
self.cs = cs
if scalar:
b0, c = 0.0, 0.0
for i in range(N):
b0 += b0s[i]*zs[i]
c += cs[i]*zs[i]
bs = [b0s[i] - cs[i] for i in range(N)]
else:
b0 = float((b0s*zs).sum())
c = float((cs*zs).sum())
bs = b0s - cs
self.b0s = b0s
self.bs = bs
self.c = c
self.b = b = b0 - c
self.delta = 2.0*(c + b0)
self.epsilon = -b0*b0 + c*(c + b0 + b0)
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
self.cs = cs = other.cs
self.alpha_coeffs = other.alpha_coeffs
zs = self.zs
self.b0s = b0s = other.b0s
if self.scalar:
b0, c = 0.0, 0.0
for i in range(self.N):
b0 += b0s[i]*zs[i]
c += cs[i]*zs[i]
else:
b0 = float((zs*b0s).sum())
c = float((zs*cs).sum())
self.c = c
self.b = b0 - c
self.delta = 2.0*(c + b0)
self.epsilon = -b0*b0 + c*(c + b0 + b0) # Very important to be calculated exactly the same way as the other implementation
class SRKMIX(EpsilonZeroMixingRules, GCEOSMIX, SRK):
r'''Class for solving the Soave-Redlich-Kwong cubic equation of state for a
mixture of any number of compounds. Solves the EOS on
initialization and calculates fugacities for all components in all phases.
The implemented method here is :obj:`fugacity_coefficients <SRKMIX.fugacity_coefficients>`, which implements
the formula for fugacity coefficients in a mixture as given in [1]_.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{V-b} - \frac{a\alpha(T)}{V(V+b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
b = \sum_i z_i b_i
.. math::
a_i =\left(\frac{R^2(T_{c,i})^{2}}{9(\sqrt[3]{2}-1)P_{c,i}} \right)
=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}
.. math::
b_i =\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_{c,i}}{P_{c,i}}
=\frac{0.08664\cdot R T_{c,i}}{P_{c,i}}
.. math::
\alpha(T)_i = \left[1 + m_i\left(1 - \sqrt{\frac{T}{T_{c,i}}}\right)\right]^2
.. math::
m_i = 0.480 + 1.574\omega_i - 0.176\omega_i^2
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> SRK_mix = SRKMIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> SRK_mix.V_l, SRK_mix.V_g
(4.1047569614e-05, 0.0007110158049)
Notes
-----
For P-V initializations, a numerical solver is used to find T.
References
----------
.. [1] Soave, Giorgio. "Equilibrium Constants from a Modified Redlich-Kwong
Equation of State." Chemical Engineering Science 27, no. 6 (June 1972):
1197-1203. doi:10.1016/0009-2509(72)80096-4.
.. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th
edition. New York: McGraw-Hill Professional, 2000.
.. [3] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
'''
eos_pure = SRK
nonstate_constants_specific = ('ms',)
kwargs_keys = ('kijs', )
model_id = 10100
ddelta_dzs = RKMIX.ddelta_dzs
ddelta_dns = RKMIX.ddelta_dns
d2delta_dzizjs = RKMIX.d2delta_dzizjs
d2delta_dninjs = RKMIX.d2delta_dninjs
d3delta_dninjnks = RKMIX.d3delta_dninjnks
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in cmps]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.kwargs = {'kijs': kijs}
self.T = T
self.P = P
self.V = V
if self.scalar:
self.ais = [self.c1*R2*Tc*Tc/Pc for Tc, Pc in zip(Tcs, Pcs)]
self.bs = [self.c2*R*Tc/Pc for Tc, Pc in zip(Tcs, Pcs)]
ms = [omega*(1.574 - 0.176*omega) + 0.480 for omega in omegas]
b = sum(bi*zi for bi, zi in zip(self.bs, self.zs))
else:
Tc_Pc_ratio = Tcs/Pcs
self.ais = self.c1R2*Tcs*Tc_Pc_ratio
self.bs = bs = self.c2R*Tc_Pc_ratio
ms = omegas*(1.574 - 0.176*omegas) + 0.480
b = float((bs*zs).sum())
self.b = b
self.ms = ms
self.delta = self.b
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
self.ms = other.ms
if self.scalar:
self.b = b = sum([bi*zi for bi, zi in zip(self.bs, self.zs)])
else:
self.b = b = float((self.bs*self.zs).sum())
self.delta = b
def a_alphas_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` for the SRK EOS.
This vectorized implementation is added for extra speed.
.. math::
a\alpha = a \left(m \left(- \sqrt{\frac{T}{Tc}} + 1\right)
+ 1\right)^{2}
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
'''
return SRK_a_alphas_vectorized(T, self.Tcs, self.ais, self.ms,
a_alphas=[0.0]*self.N if self.scalar else zeros(self.N))
def a_alpha_and_derivatives_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` and their first
and second derivatives for the SRK EOS. This vectorized implementation
is added for extra speed.
.. math::
a\alpha = a \left(m \left(- \sqrt{\frac{T}{Tc}} + 1\right)
+ 1\right)^{2}
.. math::
\frac{d a\alpha}{dT} = \frac{a m}{T} \sqrt{\frac{T}{Tc}} \left(m
\left(\sqrt{\frac{T}{Tc}} - 1\right) - 1\right)
.. math::
\frac{d^2 a\alpha}{dT^2} = \frac{a m \sqrt{\frac{T}{Tc}}}{2 T^{2}}
\left(m + 1\right)
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dTs : list[float]
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2s : list[float]
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
'''
N = self.N
if self.scalar:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = [0.0]*N, [0.0]*N, [0.0]*N
else:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = zeros(N), zeros(N), zeros(N)
return SRK_a_alpha_and_derivatives_vectorized(T, self.Tcs, self.ais, self.ms,
a_alphas=a_alphas, da_alpha_dTs=da_alpha_dTs, d2a_alpha_dT2s=d2a_alpha_dT2s)
def fugacity_coefficients(self, Z):
r'''Literature formula for calculating fugacity coefficients for each
species in a mixture. Verified numerically. Applicable to most
derivatives of the SRK equation of state as well.
Called by :obj:`fugacities <GCEOSMIX.fugacities>` on initialization, or by a solver routine
which is performing a flash calculation.
.. math::
\ln \hat \phi_i = \frac{B_i}{B}(Z-1) - \ln(Z-B) + \frac{A}{B}
\left[\frac{B_i}{B} - \frac{2}{a \alpha}\sum_i y_i(a\alpha)_{ij}
\right]\ln\left(1+\frac{B}{Z}\right)
.. math::
A=\frac{a\alpha P}{R^2T^2}
.. math::
B = \frac{bP}{RT}
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
log_phis : float
Log fugacity coefficient for each species, [-]
'''
N = self.N
return SRK_lnphis(self.T, self.P, Z, self.b, self.a_alpha, self.bs, self.a_alpha_j_rows, N,
lnphis=[0.0]*N if self.scalar else zeros(N))
def dlnphis_dT(self, phase):
r'''Formula for calculating the temperature derivaitve of
log fugacity coefficients for each species in a mixture for the
SRK equation of state. Verified numerically.
.. math::
\left(\frac{\partial \ln \phi_i}{\partial T}\right)_{P,
nj \ne i}
Parameters
----------
phase : str
One of 'l' or 'g', [-]
Returns
-------
dlnphis_dT : float
Temperature derivatives of log fugacity coefficient for each
species, [1/K]
Notes
-----
This expression was derived using SymPy and optimized with the `cse`
technique.
'''
zs = self.zs
if phase == 'g':
Z = self.Z_g
dZ_dT = self.dZ_dT_g
else:
Z = self.Z_l
dZ_dT = self.dZ_dT_l
da_alpha_dT_j_rows = self._da_alpha_dT_j_rows
N = self.N
P, bs, b = self.P, self.bs, self.b
T_inv = 1.0/self.T
A = self.a_alpha*P*R2_inv*T_inv*T_inv
B = b*P*R_inv*T_inv
x2 = T_inv*T_inv
x4 = P*b*R_inv
x6 = x4*T_inv
x8 = self.a_alpha
x9 = 1.0/x8
x10 = self.da_alpha_dT
x11 = 1.0/b
x12 = 1.0/Z
x13 = x12*x6 + 1.0
x14 = log(x13)
x19 = x11*x14*x2*R_inv*x8
x20 = x10*x11*x14*R_inv*T_inv
x21 = P*x12*x2*x8*(dZ_dT*x12 + T_inv)/(R2*x13)
x50 = -x11*x14*R_inv*T_inv
x51 = -2.0*x10
x52 = (dZ_dT + x2*x4)/(x6 - Z)
# Composition stuff
d_lnphis_dTs = []
a_alpha_j_rows = self.a_alpha_j_rows
for i in range(N):
x7 = a_alpha_j_rows[i]
x15 = (x50*(x51*x7*x9 + 2.0*da_alpha_dT_j_rows[i]) + x52)
x16 = bs[i]*x11
x18 = -x16 + 2.0*x7*x9
d_lhphi_dT = dZ_dT*x16 + x15 + x18*(x19 - x20 + x21)
d_lnphis_dTs.append(d_lhphi_dT)
return d_lnphis_dTs
def dlnphis_dP(self, phase):
r'''Generic formula for calculating the pressure derivaitve of
log fugacity coefficients for each species in a mixture for the
SRK EOS. Verified numerically.
.. math::
\left(\frac{\partial \ln \phi_i}{\partial P}\right)_{T,
nj \ne i}
Parameters
----------
phase : str
One of 'l' or 'g', [-]
Returns
-------
dlnphis_dP : float
Pressure derivatives of log fugacity coefficient for each species,
[1/Pa]
Notes
-----
This expression was derived using SymPy and optimized with the `cse`
technique.
'''
zs = self.zs
if phase == 'l':
Z, dZ_dP = self.Z_l, self.dZ_dP_l
else:
Z, dZ_dP = self.Z_g, self.dZ_dP_g
a_alpha = self.a_alpha
N = self.N
bs, b = self.bs, self.b
T_inv = 1.0/self.T
a_alpha_j_rows = self._a_alpha_j_rows
RT_inv = T_inv*R_inv
x0 = Z
x1 = dZ_dP
x2 = 1.0/b
x4 = b*RT_inv
x5 = self.P*x4
x6 = (dZ_dP - x4)/(x5 - Z)
x7 = a_alpha
x9 = 1./Z
x10 = a_alpha*x9*(self.P*dZ_dP*x9 - 1.0)*RT_inv*RT_inv/((x5*x9 + 1.0))
x50 = 2.0/a_alpha
d_lnphi_dPs = []
for i in range(N):
x8 = x50*a_alpha_j_rows[i]
x3 = bs[i]*x2
d_lnphi_dP = dZ_dP*x3 + x10*(x8 - x3) + x6
d_lnphi_dPs.append(d_lnphi_dP)
return d_lnphi_dPs
class SRKMIXTranslated(SRKMIX):
r'''Class for solving the volume translated Soave-Redlich-Kwong cubic equation of state for a
mixture of any number of compounds. Subclasses :obj:`SRKMIX`. Solves the EOS on
initialization and calculates fugacities for all components in all phases.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{V + c - b} - \frac{a\alpha(T)}{(V + c)(V + c + b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
b = \sum_i z_i b_i
.. math::
a_i =\left(\frac{R^2(T_{c,i})^{2}}{9(\sqrt[3]{2}-1)P_{c,i}} \right)
=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}
.. math::
b_i =\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_{c,i}}{P_{c,i}}
=\frac{0.08664\cdot R T_{c,i}}{P_{c,i}}
.. math::
\alpha(T)_i = \left[1 + m_i\left(1 - \sqrt{\frac{T}{T_{c,i}}}\right)\right]^2
.. math::
m_i = 0.480 + 1.574\omega_i - 0.176\omega_i^2
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
cs : list[float], optional
Volume translation parameters; always zero in the original
implementation, [m^3/mol]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = SRKMIXTranslated(T=115, P=1E6, cs=[-4.4e-6, -4.35e-6], Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.2, 0.8], kijs=[[0,0.03],[0.03,0]])
>>> eos.V_l, eos.V_g
(4.35928920e-05, 0.00060927202)
Notes
-----
For P-V initializations, a numerical solver is used to find T.
'''
fugacity_coefficients = GCEOSMIX.fugacity_coefficients
dlnphis_dT = GCEOSMIX.dlnphis_dT
dlnphis_dP = GCEOSMIX.dlnphis_dP
d_lnphi_dzs = GCEOSMIX.dlnphis_dzs
P_max_at_V = GCEOSMIX.P_max_at_V
solve_T = GCEOS.solve_T
model_id = 10101
eos_pure = SRKTranslated
translated = True
mix_kwargs_to_pure = {'cs': 'c'}
kwargs_linear = ('cs',)
kwargs_keys = ('kijs', 'cs')
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, cs=None, T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if cs is None:
if scalar:
cs = [0.0]*N
else:
cs = zeros(N)
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in cmps]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.kwargs = {'kijs': kijs, 'cs': cs}
self.T = T
self.P = P
self.V = V
c2R, c1R2_c2R = self.c2R, self.c1R2_c2R
if scalar:
b0s = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*b0s[i] for i in cmps]
self.ms = [0.480 + omega*(1.574 - 0.176*omega) for omega in omegas]
else:
b0s = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*b0s
self.ms = 0.480 + omegas*(1.574 - 0.176*omegas)
self.cs = cs
if scalar:
b0, c = 0.0, 0.0
for i in range(N):
b0 += b0s[i]*zs[i]
c += cs[i]*zs[i]
bs = [b0s[i] - cs[i] for i in range(N)]
else:
b0 = float((b0s*zs).sum())
c = float((cs*zs).sum())
bs = b0s - cs
self.b0s = b0s
self.bs = bs
self.c = c
self.b = b = b0 - c
self.delta = c + c + b0
self.epsilon = c*(b0 + c)
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
self.cs = cs = other.cs
self.ms = other.ms
zs = self.zs
self.b0s = b0s = other.b0s
if self.scalar:
b0, c = 0.0, 0.0
for i in range(self.N):
b0 += b0s[i]*zs[i]
c += cs[i]*zs[i]
else:
b0 = float((b0s*zs).sum())
c = float((cs*zs).sum())
self.c = c
self.b = b0 - c
self.delta = c + c + b0
self.epsilon = c*(b0 + c)
@property
def ddelta_dzs(self):
r'''Helper method for calculating the composition derivatives of
`delta`. Note this is independent of the phase. :math:`b^0` refers to
the original `b` parameter not involving any translation.
.. math::
\left(\frac{\partial \delta}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= 2 (c_i + b^0_i)
Returns
-------
ddelta_dzs : list[float]
Composition derivative of `delta` of each component, [m^3/mol]
Notes
-----
This derivative is checked numerically.
'''
b0s, cs = self.b0s, self.cs
if self.scalar:
return [(2.0*cs[i] + b0s[i]) for i in range(self.N)]
return 2.0*cs + b0s
# Zero in both cases
d2delta_dzizjs = PRMIX.d2delta_dzizjs
d3delta_dzizjzks = PRMIX.d3delta_dzizjzks
@property
def ddelta_dns(self):
r'''Helper method for calculating the mole number derivatives of
`delta`. Note this is independent of the phase. :math:`b^0` refers to
the original `b` parameter not involving any translation.
.. math::
\left(\frac{\partial \delta}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= (2 c_i + b^0_i) - \delta
Returns
-------
ddelta_dns : list[float]
Mole number derivative of `delta` of each component, [m^3/mol^2]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
return SRK_translated_ddelta_dns(self.b0s, self.cs, self.delta, N, out=[0.0]*N if self.scalar else zeros(N))
@property
def d2delta_dninjs(self):
r'''Helper method for calculating the second mole number derivatives (hessian) of
`delta`. Note this is independent of the phase. :math:`b^0` refers to
the original `b` parameter not involving any translation.
.. math::
\left(\frac{\partial^2 \delta}{\partial n_i \partial n_j}\right)_{T, P, n_{k\ne i,j}}
= \left(\2(b^0 - c_i - c_j) + 4c - b_i^0 - b_j^0\right)
Returns
-------
d2delta_dninjs : list[list[float]]
Second mole number derivative of `delta` of each component, [m^3/mol^3]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[0.0]*N for _ in range(N)] if self.scalar else zeros((N, N))
return SRK_translated_d2delta_dninjs(self.b0s, self.cs, self.b, self.c, self.delta, N, out)
@property
def d3delta_dninjnks(self):
r'''Helper method for calculating the third partial mole number
derivatives of `delta`. Note this is independent of the phase. :math:`b^0` refers to
the original `b` parameter not involving any translation.
.. math::
\left(\frac{\partial^3 \delta}{\partial n_i \partial n_j \partial n_k }
\right)_{T, P,
n_{m \ne i,j,k}} = -6b^0 + 2(b^0_i + b^0_j + b^0_k) + -12c
+4(c_i + c_j + c_k)
Returns
-------
d3delta_dninjnks : list[list[list[float]]]
Third mole number derivative of `delta` of each component,
[m^3/mol^4]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[[0.0]*N for _ in range(N)] for _ in range(N)] if self.scalar else zeros((N, N, N))
return SRK_translated_d3delta_dninjnks(self.b0s, self.cs, self.b, self.c, self.delta, N, out)
@property
def depsilon_dzs(self):
r'''Helper method for calculating the composition derivatives of
`epsilon`. Note this is independent of the phase. :math:`b^0` refers to
the original `b` parameter not involving any translation.
.. math::
\left(\frac{\partial \epsilon}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= c_i b^0 + 2c c_i + b_i c
Returns
-------
depsilon_dzs : list[float]
Composition derivative of `epsilon` of each component, [m^6/mol^2]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [0.0]*N if self.scalar else zeros(N)
return SRK_translated_depsilon_dzs(self.b0s, self.cs, self.b, self.c, N, out)
@property
def depsilon_dns(self):
r'''Helper method for calculating the mole number derivatives of
`epsilon`. Note this is independent of the phase. :math:`b^0` refers to
the original `b` parameter not involving any translation.
.. math::
\left(\frac{\partial \epsilon}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= -b^0(c - c_i) - c(b^0 - b_i^0) - 2c(c - c_i)
Returns
-------
depsilon_dns : list[float]
Composition derivative of `epsilon` of each component, [m^6/mol^3]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
return SRK_translated_depsilon_dns(self.b0s, self.cs, self.b, self.c, N, out=[0.0]*N if self.scalar else zeros(N))
@property
def d2epsilon_dzizjs(self):
r'''Helper method for calculating the second composition derivatives (hessian)
of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \epsilon}{\partial x_i \partial x_j}\right)_{T, P, x_{k\ne i,j}}
= b^0_i c_j + b^0_j c_i + 2c_i c_j
Returns
-------
d2epsilon_dzizjs : list[list[float]]
Second composition derivative of `epsilon` of each component, [m^6/mol^2]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[0.0]*N for _ in range(N)] if self.scalar else zeros((N, N))
return SRK_translated_d2epsilon_dzizjs(self.b0s, self.cs, self.b, self.c, N, out=out)
d3epsilon_dzizjzks = GCEOSMIX.d3epsilon_dzizjzks # Zeros
@property
def d2epsilon_dninjs(self):
r'''Helper method for calculating the second mole number derivatives (hessian) of
`epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \epsilon}{\partial n_i n_j}\right)_{T, P, n_{k\ne i,j}}
= b^0(2c - c_i - c_j) + c(2b^0 - b_i^0 - b_j^0) + 2c(2c - c_i - c_j)
+(b^0 - b^0_i)(c - c_j) + (b^0 - b_j^0)(c - c_i) + 2(c - c_i)(c - c_j)
Returns
-------
d2epsilon_dninjs : list[list[float]]
Second mole number derivative of `epsilon` of each component, [m^6/mol^4]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[0.0]*N for _ in range(N)] if self.scalar else zeros((N, N))
return SRK_translated_d2epsilon_dninjs(self.b0s, self.cs, self.b, self.c, N, out)
@property
def d3epsilon_dninjnks(self):
r'''Helper method for calculating the third partial mole number
derivatives of `epsilon`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 \epsilon}{\partial n_i \partial n_j \partial n_k }
\right)_{T, P,
n_{m \ne i,j,k}} = -2b^0(3c - c_i - c_j - c_k)
- 2c(3b^0 - b^0_i - b^0_j - b^0_k)
- 4c(3c - c_i - c_j - c_k)
-(b^0 - b^0_i)(2c - c_j - c_k)
-(b^0 - b^0_j)(2c - c_i - c_k)
-(b^0 - b^0_k)(2c - c_i - c_j)
- (c - c_i)(2b^0 - b^0_j - b^0_k)
- (c - c_j)(2b^0 - b^0_i - b^0_k)
- (c - c_k)(2b^0 - b^0_i - b^0_j)
-2(c - c_i)(2c - c_j - c_k)
-2(c - c_j)(2c - c_i - c_k)
-2(c - c_k)(2c - c_i - c_j)
Returns
-------
d3epsilon_dninjnks : list[list[list[float]]]
Third mole number derivative of `epsilon` of each component,
[m^6/mol^5]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
out = [[[0.0]*N for _ in range(N)] for _ in range(N)] if self.scalar else zeros((N, N, N))
return SRK_translated_d3epsilon_dninjnks(self.b0s, self.cs, self.b, self.c, self.epsilon, N, out)
class SRKMIXTranslatedConsistent(Twu91_a_alpha, SRKMIXTranslated):
r'''Class for solving the volume translated Le Guennec, Privat, and Jaubert
revision of the SRK equation of state according to [1]_.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{V + c - b} - \frac{a\alpha(T)}{(V + c)(V + c + b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
\alpha_i = \left(\frac{T}{T_{c,i}}\right)^{c_{3} \left(c_{2}
- 1\right)} e^{c_{1} \left(- \left(\frac{T}{T_{c,i}}
\right)^{c_{2} c_{3}} + 1\right)}
.. math::
b = \sum_i z_i b_i
.. math::
a_i =\left(\frac{R^2(T_{c,i})^{2}}{9(\sqrt[3]{2}-1)P_{c,i}} \right)
=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}
.. math::
b_i =\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_{c,i}}{P_{c,i}}
=\frac{0.08664\cdot R T_{c,i}}{P_{c,i}}
If `cs` is not provided, they are estimated as:
.. math::
c =\frac{R T_c}{P_c}(0.0172\omega - 0.0096)
If `alpha_coeffs` is not provided, the parameters `L` and `M` are estimated
from each of the acentric factors as follows:
.. math::
L = 0.0947\omega^2 + 0.6871\omega + 0.1508
.. math::
M = 0.1615\omega^2 - 0.2349\omega + 0.8876
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
cs : list[float], optional
Volume translation parameters, [m^3/mol]
alpha_coeffs : list[list[float]]
Coefficients for
:obj:`thermo.eos_alpha_functions.Twu91_a_alpha`, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = SRKMIXTranslatedConsistent(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.2, 0.8], kijs=[[0,0.03],[0.03,0]])
>>> eos.V_l, eos.V_g
(3.591044498e-05, 0.0006020501621)
Notes
-----
For P-V initializations, a numerical solver is used to find T.
References
----------
.. [1] Le Guennec, Yohann, Romain Privat, and Jean-Noël Jaubert.
"Development of the Translated-Consistent Tc-PR and Tc-RK Cubic
Equations of State for a Safe and Accurate Prediction of Volumetric,
Energetic and Saturation Properties of Pure Compounds in the Sub- and
Super-Critical Domains." Fluid Phase Equilibria 429 (December 15, 2016):
301-12. https://doi.org/10.1016/j.fluid.2016.09.003.
'''
eos_pure = SRKTranslatedConsistent
mix_kwargs_to_pure = {'cs': 'c', 'alpha_coeffs': 'alpha_coeffs'}
kwargs_linear = ('cs', 'alpha_coeffs')
kwargs_keys = ('kijs', 'alpha_coeffs', 'cs')
model_id = 10102
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, cs=None,
alpha_coeffs=None, T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in range(N)]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.T = T
self.P = P
self.V = V
c2R, c1R2_c2R = self.c2R, self.c1R2_c2R
if scalar:
b0s = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*b0s[i] for i in cmps]
else:
b0s = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*b0s
if cs is None:
if scalar:
cs = [R*Tcs[i]/Pcs[i]*(0.0172*min(max(omegas[i], -0.01), 1.46) + 0.0096)
for i in range(N)]
else:
cs = R*Tcs/Pcs*(0.0172*npmin(npmax(omegas, -0.01), 1.46) + 0.0096)
if alpha_coeffs is None:
alpha_coeffs = []
for i in range(N):
o = min(max(omegas[i], -0.01), 1.46)
L = o*(0.0947*o + 0.6871) + 0.1508
M = o*(0.1615*o - 0.2349) + 0.8876
alpha_coeffs.append((L, M, 2.0))
self.kwargs = {'kijs': kijs, 'alpha_coeffs': alpha_coeffs, 'cs': cs}
self.alpha_coeffs = alpha_coeffs
self.cs = cs
if scalar:
b0, c = 0.0, 0.0
for i in range(N):
b0 += b0s[i]*zs[i]
c += cs[i]*zs[i]
bs = [b0s[i] - cs[i] for i in range(N)]
else:
b0 = float((b0s*zs).sum())
c = float((cs*zs).sum())
bs = b0s - cs
self.b0s = b0s
self.bs = bs
self.c = c
self.b = b = b0 - c
self.delta = c + c + b0
self.epsilon = c*(b0 + c)
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
self.cs = cs = other.cs
self.alpha_coeffs = other.alpha_coeffs
zs = self.zs
self.b0s = b0s = other.b0s
if self.scalar:
b0, c = 0.0, 0.0
for i in range(self.N):
b0 += b0s[i]*zs[i]
c += cs[i]*zs[i]
else:
b0 = float((b0s*zs).sum())
c = float((cs*zs).sum())
self.c = c
self.b = b0 - c
self.delta = c + c + b0
self.epsilon = c*(b0 + c)
class MSRKMIXTranslated(Soave_1979_a_alpha, SRKMIXTranslatedConsistent):
r'''Class for solving the volume translated Soave (1980) alpha function,
revision of the Soave-Redlich-Kwong equation of state
for a pure compound according to [1]_. Uses two fitting parameters `N` and
`M` to more accurately fit the vapor pressure of pure species.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{V + c - b} - \frac{a\alpha(T)}{(V + c)(V + c + b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
\alpha(T)_i = 1 + (1 - T_{r,i})(M + \frac{N}{T_{r,i}})
.. math::
b = \sum_i z_i b_i
.. math::
a_i =\left(\frac{R^2(T_{c,i})^{2}}{9(\sqrt[3]{2}-1)P_{c,i}} \right)
=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}
.. math::
b_i =\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_{c,i}}{P_{c,i}}
=\frac{0.08664\cdot R T_{c,i}}{P_{c,i}}
This is an older correlation that offers lower accuracy on many properties
which were sacrificed to obtain the vapor pressure accuracy. The alpha
function of this EOS does not meet any of the consistency requriements for
alpha functions.
Coefficients can be found in [2]_, or estimated with the method in [3]_.
The estimation method in [3]_ works as follows, using the acentric factor
and true critical compressibility:
.. math::
M = 0.4745 + 2.7349(\omega Z_c) + 6.0984(\omega Z_c)^2
.. math::
N = 0.0674 + 2.1031(\omega Z_c) + 3.9512(\omega Z_c)^2
An alternate estimation scheme is provided in [1]_, which provides
analytical solutions to calculate the parameters `M` and `N` from two
points on the vapor pressure curve, suggested as 10 mmHg and 1 atm.
This is used as an estimation method here if the parameters are not
provided, and the two vapor pressure points are obtained from the original
SRK equation of state.
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
cs : list[float], optional
Volume translation parameters, [m^3/mol]
alpha_coeffs : list[list[float]]
Coefficients for
:obj:`thermo.eos_alpha_functions.Soave_1979_a_alpha`, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = MSRKMIXTranslated(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.2, 0.8], kijs=[[0,0.03],[0.03,0]])
>>> eos.V_l, eos.V_g
(3.9222990198e-05, 0.00060438075638)
Notes
-----
For P-V initializations, a numerical solver is used to find T.
References
----------
.. [1] Soave, G. "Rigorous and Simplified Procedures for Determining
the Pure-Component Parameters in the Redlich—Kwong—Soave Equation of
State." Chemical Engineering Science 35, no. 8 (January 1, 1980):
1725-30. https://doi.org/10.1016/0009-2509(80)85007-X.
.. [2] Sandarusi, Jamal A., Arthur J. Kidnay, and Victor F. Yesavage.
"Compilation of Parameters for a Polar Fluid Soave-Redlich-Kwong
Equation of State." Industrial & Engineering Chemistry Process Design
and Development 25, no. 4 (October 1, 1986): 957-63.
https://doi.org/10.1021/i200035a020.
.. [3] Valderrama, Jose O., Héctor De la Puente, and Ahmed A. Ibrahim.
"Generalization of a Polar-Fluid Soave-Redlich-Kwong Equation of State."
Fluid Phase Equilibria 93 (February 11, 1994): 377-83.
https://doi.org/10.1016/0378-3812(94)87021-7.
'''
kwargs_keys = ('kijs', 'alpha_coeffs', 'cs')
eos_pure = MSRKTranslated
model_id = 10103
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, cs=None,
alpha_coeffs=None, T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
kijs = [[0.0]*N for i in cmps]
self.kijs = kijs
self.T = T
self.P = P
self.V = V
c1R2, c2R = self.c1*R2, self.c2*R
self.ais = [c1R2*Tcs[i]*Tcs[i]/Pcs[i] for i in cmps]
b0s = [c2R*Tcs[i]/Pcs[i] for i in cmps]
if cs is None:
cs = [0.0]*N # TODO peneloux? Inherit?
if alpha_coeffs is None:
alpha_coeffs = []
for i in cmps:
alpha_coeffs.append(MSRKTranslated.estimate_MN(Tcs[i], Pcs[i], omegas[i], cs[i]))
self.kwargs = {'kijs': kijs, 'alpha_coeffs': alpha_coeffs, 'cs': cs}
self.alpha_coeffs = alpha_coeffs
self.cs = cs
b0, c = 0.0, 0.0
for i in cmps:
b0 += b0s[i]*zs[i]
c += cs[i]*zs[i]
self.b0s = b0s
self.bs = [b0s[i] - cs[i] for i in cmps]
self.c = c
self.b = b = b0 - c
self.delta = c + c + b0
self.epsilon = c*(b0 + c)
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
class PSRK(Mathias_Copeman_poly_a_alpha, PSRKMixingRules, SRKMIXTranslated):
r'''Class for solving the Predictive Soave-Redlich-Kwong [1]_ equation of
state for a mixture of any number of compounds.
Solves the EOS on initialization.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. warning::
This class is not complete! Fugacities and their derivatives among
others are not yet implemented.
.. math::
P = \frac{RT}{V-b} - \frac{a\alpha(T)}{V(V+b)}
.. math::
b = \sum_i z_i b_i
.. math::
a_i =\left(\frac{R^2(T_{c,i})^{2}}{9(\sqrt[3]{2}-1)P_{c,i}} \right)
=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}
.. math::
b_i =\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_{c,i}}{P_{c,i}}
=\frac{0.08664\cdot R T_{c,i}}{P_{c,i}}
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
alpha_coeffs : list[list[float]]
Coefficients for
:obj:`thermo.eos_alpha_functions.Mathias_Copeman_poly_a_alpha`, [-]
ge_model : :obj:`thermo.activity.GibbsExcess` object
Excess Gibbs free energy model; to match the `PSRK` model, this is
a :obj:`thermo.unifac.UNIFAC` object, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
cs : list[float], optional
Volume translation parameters; always zero in the original
implementation, [m^3/mol]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, equimolar CO2, n-hexane:
>>> from thermo.unifac import UNIFAC, PSRKIP, PSRKSG
>>> Tcs = [304.2, 507.4]
>>> Pcs = [7.37646e6, 3.014419e6]
>>> omegas = [0.2252, 0.2975]
>>> zs = [0.5, 0.5]
>>> Mathias_Copeman_coeffs = [[-1.7039, 0.2515, 0.8252, 1.0], [2.9173, -1.4411, 1.1061, 1.0]]
>>> T = 313.
>>> P = 1E6
>>> ge_model = UNIFAC.from_subgroups(T=T, xs=zs, chemgroups=[{117: 1}, {1:2, 2:4}], subgroups=PSRKSG, interaction_data=PSRKIP, version=0)
>>> eos = PSRK(Tcs=Tcs, Pcs=Pcs, omegas=omegas, zs=zs, ge_model=ge_model, alpha_coeffs=Mathias_Copeman_coeffs, T=T, P=P)
>>> eos
PSRK(Tcs=[304.2, 507.4], Pcs=[7376460.0, 3014419.0], omegas=[0.2252, 0.2975], kijs=[[0.0, 0.0], [0.0, 0.0]], alpha_coeffs=[[-1.7039, 0.2515, 0.8252, 1.0], [2.9173, -1.4411, 1.1061, 1.0]], cs=[0.0, 0.0], ge_model=UNIFAC(T=313.0, xs=[0.5, 0.5], rs=[1.3, 4.4998000000000005], qs=[0.982, 3.856], Qs=[0.848, 0.54, 0.982], vs=[[0, 2], [0, 4], [1, 0]], psi_abc=([[0.0, 0.0, 919.8], [0.0, 0.0, 919.8], [-38.672, -38.672, 0.0]], [[0.0, 0.0, -3.9132], [0.0, 0.0, -3.9132], [0.8615, 0.8615, 0.0]], [[0.0, 0.0, 0.0046309], [0.0, 0.0, 0.0046309], [-0.0017906, -0.0017906, 0.0]]), version=0), zs=[0.5, 0.5], T=313.0, P=1000000.0)
>>> eos.phase, eos.V_l, eos.V_g
('l/g', 0.000110889753959, 0.00197520225546)
Notes
-----
References
----------
.. [1] Holderbaum, T., and J. Gmehling. "PSRK: A Group Contribution
Equation of State Based on UNIFAC.” Fluid Phase Equilibria 70, no. 2-3
(December 30, 1991): 251-65.
https://doi.org/10.1016/0378-3812(91)85038-V.
'''
eos_pure = SRKTranslated
mix_kwargs_to_pure = {'cs': 'c', 'alpha_coeffs': 'alpha_coeffs'}
kwargs_linear = ('cs', 'alpha_coeffs')
kwargs_keys = ('kijs', 'alpha_coeffs', 'cs', 'ge_model')
model_id = 10300
def __init__(self, Tcs, Pcs, omegas, zs, alpha_coeffs, ge_model,
kijs=None, cs=None,
T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
kijs = [[0.0]*N for i in cmps]
if cs is None:
cs = [0.0]*N
self.kijs = kijs
self.T = T
self.P = P
self.V = V
c1R2, c2R = self.c1*R2, self.c2*R
self.ais = [c1R2*Tcs[i]*Tcs[i]/Pcs[i] for i in cmps]
b0s = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.kwargs = {'kijs': kijs, 'alpha_coeffs': alpha_coeffs, 'cs': cs,
'ge_model': ge_model}
self.alpha_coeffs = alpha_coeffs
self.cs = cs
if zs != ge_model.xs or ge_model.T != T:
if T is None:
T = 298.15 # default value, need to check in a_alpha call
ge_model = ge_model.to_T_xs(T, zs)
self.ge_model = ge_model
b0, c = 0.0, 0.0
for i in cmps:
b0 += b0s[i]*zs[i]
c += cs[i]*zs[i]
self.b0s = b0s
self.bs = [b0s[i] - cs[i] for i in cmps]
self.c = c
self.b = b = b0 - c
self.delta = c + c + b0
self.epsilon = c*(b0 + c)
self.solve(only_l=only_l, only_g=only_g)
# if fugacities:
# self.fugacities()
def _fast_init_specific(self, other):
zs = self.zs
self.ge_model = other.ge_model.to_T_xs(self.T, zs)
self.cs = cs = other.cs
self.alpha_coeffs = other.alpha_coeffs
self.b0s = b0s = other.b0s
b0, c = 0.0, 0.0
for i in range(self.N):
b0 += b0s[i]*zs[i]
c += cs[i]*zs[i]
self.c = c
self.b = b0 - c
self.delta = c + c + b0
self.epsilon = c*(b0 + c)
class PR78MIX(PRMIX):
r'''Class for solving the Peng-Robinson cubic equation of state for a
mixture of any number of compounds according to the 1978 variant.
Subclasses `PR`. Solves the EOS on initialization and calculates fugacities
for all components in all phases.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{v-b}-\frac{a\alpha(T)}{v(v+b)+b(v-b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
b = \sum_i z_i b_i
.. math::
a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}
.. math::
b_i=0.07780\frac{RT_{c,i}}{P_{c,i}}
.. math::
\alpha(T)_i=[1+\kappa_i(1-\sqrt{T_{r,i}})]^2
.. math::
\kappa_i = 0.37464+1.54226\omega_i-0.26992\omega_i^2 \text{ if } \omega_i
\le 0.491
.. math::
\kappa_i = 0.379642 + 1.48503 \omega_i - 0.164423\omega_i^2 + 0.016666
\omega_i^3 \text{ if } \omega_i > 0.491
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa, with modified
acentric factors to show the difference between :obj:`PRMIX`
>>> eos = PR78MIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.6, 0.7], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> eos.V_l, eos.V_g
(3.2396438915e-05, 0.00050433802024)
>>> eos.fugacities_l, eos.fugacities_g
([833048.45119, 6160.9088153], [460717.27767, 279598.90103])
Notes
-----
This variant is recommended over the original.
References
----------
.. [1] Peng, Ding-Yu, and Donald B. Robinson. "A New Two-Constant Equation
of State." Industrial & Engineering Chemistry Fundamentals 15, no. 1
(February 1, 1976): 59-64. doi:10.1021/i160057a011.
.. [2] Robinson, Donald B., Ding-Yu Peng, and Samuel Y-K Chung. "The
Development of the Peng - Robinson Equation and Its Application to Phase
Equilibrium in a System Containing Methanol." Fluid Phase Equilibria 24,
no. 1 (January 1, 1985): 25-41. doi:10.1016/0378-3812(85)87035-7.
'''
eos_pure = PR78
model_id = 10201
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in range(N)]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.kwargs = {'kijs': kijs}
self.T = T
self.P = P
self.V = V
c1R2_c2R, c2R = self.c1R2_c2R, self.c2R
if scalar:
self.bs = bs = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*bs[i] for i in cmps]
self.kappas = kappas = [omega*(-0.26992*omega + 1.54226) + 0.37464 for omega in omegas]
for i, omega in enumerate(omegas):
if omega > 0.491:
kappas[i] = omega*(omega*(0.016666*omega - 0.164423) + 1.48503) + 0.379642
b = 0.0
for i in cmps:
b += bs[i]*zs[i]
else:
self.bs = bs = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*bs
self.kappas = kappas = omegas*(-0.26992*omegas + 1.54226) + 0.37464
b = float((bs*zs).sum())
high_omega_idxs = npwhere(omegas > 0.491)
high_omegas = omegas[high_omega_idxs]
kappas[high_omega_idxs] = high_omegas*(high_omegas*(0.016666*high_omegas - 0.164423) + 1.48503) + 0.379642
self.b = b
self.delta = 2.*b
self.epsilon = -b*b
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
class VDWMIX(EpsilonZeroMixingRules, GCEOSMIX, VDW):
r'''Class for solving the Van der Waals [1]_ [2]_ cubic equation of state for a
mixture of any number of compounds. Solves the EOS on
initialization and calculates fugacities for all components in all phases.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P=\frac{RT}{V-b}-\frac{a}{V^2}
.. math::
a = \sum_i \sum_j z_i z_j {a}_{ij}
.. math::
b = \sum_i z_i b_i
.. math::
a_{ij} = (1-k_{ij})\sqrt{a_{i}a_{j}}
.. math::
a_i=\frac{27}{64}\frac{(RT_{c,i})^2}{P_{c,i}}
.. math::
b_i=\frac{RT_{c,i}}{8P_{c,i}}
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
omegas : float, optional
Acentric factors of all compounds - Not used in equation of state!, [-]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = VDWMIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> eos.V_l, eos.V_g
(5.881369844883e-05, 0.00077708723758)
>>> eos.fugacities_l, eos.fugacities_g
([854533.266920, 207126.8497276], [448470.736338, 397826.543999])
Notes
-----
For P-V initializations, a numerical solver is used to find T.
References
----------
.. [1] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
.. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th
edition. New York: McGraw-Hill Professional, 2000.
'''
eos_pure = VDW
nonstate_constants_specific = tuple()
kwargs_keys = ('kijs',)
model_id = 10001
def __init__(self, Tcs, Pcs, zs, kijs=None, T=None, P=None, V=None,
omegas=None, fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
self.Tcs = Tcs
self.Pcs = Pcs
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*self.N for i in range(N)]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.kwargs = {'kijs': kijs}
self.T = T
self.P = P
self.V = V
c1R2, c2R = self.c1R2, self.c2R
if self.scalar:
self.ais = [c1R2*Tc*Tc/Pc for Tc, Pc in zip(Tcs, Pcs)]
self.bs = [c2R*Tc/Pc for Tc, Pc in zip(Tcs, Pcs)]
self.b = sum(bi*zi for bi, zi in zip(self.bs, self.zs))
else:
Tc_Pc_ratio = Tcs/Pcs
self.ais = c1R2*Tcs*Tc_Pc_ratio
self.bs = bs = c2R*Tc_Pc_ratio
self.b = float((bs*zs).sum())
self.omegas = omegas
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
if self.scalar:
self.b = sum(bi*zi for bi, zi in zip(self.bs, self.zs))
else:
self.b = float((self.bs*self.zs).sum())
def a_alphas_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` for the VDW EOS.
This vectorized implementation is added for extra speed.
.. math::
a\alpha = a
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
'''
return self.ais
def a_alpha_and_derivatives_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` and their first
and second derivatives for the VDW EOS. This vectorized implementation
is added for extra speed.
.. math::
a\alpha = a
.. math::
\frac{d a\alpha}{dT} = 0
.. math::
\frac{d^2 a\alpha}{dT^2} = 0
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dTs : list[float]
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2s : list[float]
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
'''
if self.scalar:
zero_array = [0.0]*self.N
else:
zero_array = zeros(self.N)
return self.ais, zero_array, zero_array
def fugacity_coefficients(self, Z):
r'''Literature formula for calculating fugacity coefficients for each
species in a mixture. Verified numerically.
Called by `fugacities` on initialization, or by a solver routine
which is performing a flash calculation.
.. math::
\ln \hat \phi_i = \frac{b_i}{V-b} - \ln\left[Z\left(1
- \frac{b}{V}\right)\right] - \frac{2\sqrt{aa_i}}{RTV}
Parameters
----------
Z : float
Compressibility of the mixture for a desired phase, [-]
Returns
-------
log_phis : float
Log fugacity coefficient for each species, [-]
References
----------
.. [1] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
'''
N = self.N
return VDW_lnphis(self.T, self.P, Z, self.b, self.a_alpha, self.bs, self.a_alpha_roots, N,
lnphis=[0.0]*N if self.scalar else zeros(N))
def dlnphis_dT(self, phase):
r'''Formula for calculating the temperature derivaitve of
log fugacity coefficients for each species in a mixture for the
VDW equation of state. Verified numerically.
.. math::
\left(\frac{\partial \ln \phi_i}{\partial T}\right)_{P,
nj \ne i}
Parameters
----------
phase : str
One of 'l' or 'g', [-]
Returns
-------
dlnphis_dT : float
Temperature derivatives of log fugacity coefficient for each
species, [1/K]
Notes
-----
This expression was derived using SymPy and optimized with the `cse`
technique.
'''
zs = self.zs
if phase == 'g':
Z = self.Z_g
dZ_dT = self.dZ_dT_g
else:
Z = self.Z_l
dZ_dT = self.dZ_dT_l
N = self.N
T, P, ais, bs, b = self.T, self.P, self.ais, self.bs, self.b
T_inv = 1.0/T
T_inv2 = T_inv*T_inv
A = self.a_alpha*P*R2_inv*T_inv2
B = b*P*R_inv*T_inv
x0 = self.a_alpha
x4 = 1.0/Z
x5 = 4.0*P*R2_inv*x4*T_inv2*T_inv
x8 = 2*P*R2_inv*T_inv2*dZ_dT/Z**2
x9 = P*R2_inv*x4*T_inv2*self.da_alpha_dT/x0
x10 = 1.0/P
x11 = R*x10*(T*dZ_dT + Z)/(-R*T*x10*Z + b)**2
x13 = b*T_inv*R_inv
x14 = P*x13*x4 - 1.0
x15 = x4*(P*x13*(T_inv + x4*dZ_dT) - x14*dZ_dT)/x14
# Composition stuff
d_lnphis_dTs = []
for i in range(N):
x1 = (ais[i]*x0)**0.5
d_lhphi_dT = -bs[i]*x11 + x1*x5 + x1*x8 - x1*x9 + x15
d_lnphis_dTs.append(d_lhphi_dT)
return d_lnphis_dTs
def dlnphis_dP(self, phase):
r'''Generic formula for calculating the pressure derivaitve of
log fugacity coefficients for each species in a mixture for the
VDW EOS. Verified numerically.
.. math::
\left(\frac{\partial \ln \phi_i}{\partial P}\right)_{T,
nj \ne i}
Parameters
----------
phase : str
One of 'l' or 'g', [-]
Returns
-------
dlnphis_dP : float
Pressure derivatives of log fugacity coefficient for each species,
[1/Pa]
Notes
-----
This expression was derived using SymPy and optimized with the `cse`
technique.
'''
zs = self.zs
if phase == 'l':
Z, dZ_dP = self.Z_l, self.dZ_dP_l
else:
Z, dZ_dP = self.Z_g, self.dZ_dP_g
a_alpha = self.a_alpha
N = self.N
T, P, bs, b, ais = self.T, self.P, self.bs, self.b, self.ais
T_inv = 1.0/T
RT_inv = T_inv*R_inv
x3 = T_inv*T_inv
x5 = 1.0/Z
x6 = 2.0*R2_inv*x3*x5
x8 = 2.0*P*R2_inv*x3*dZ_dP*x5*x5
x9 = 1./P
x10 = Z*x9
x11 = R*T*x9*(-x10 + dZ_dP)/(-R*T*x10 + b)**2
x12 = P*x5
x13 = b*RT_inv
x14 = x12*x13 - 1.0
x15 = -x5*(-x13*(x12*dZ_dP - 1.0) + x14*dZ_dP)/x14
d_lnphi_dPs = []
for i in range(N):
x1 = (ais[i]*a_alpha)**0.5
d_lnphi_dP = -bs[i]*x11 - x1*x6 + x1*x8 + x15
d_lnphi_dPs.append(d_lnphi_dP)
return d_lnphi_dPs
@property
def ddelta_dzs(self):
r'''Helper method for calculating the composition derivatives of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \delta}{\partial x_i}\right)_{T, P, x_{i\ne j}}
= 0
Returns
-------
ddelta_dzs : list[float]
Composition derivative of `delta` of each component, [m^3/mol]
Notes
-----
This derivative is checked numerically.
'''
if self.scalar:
zero_array = [0.0]*self.N
else:
zero_array = zeros(self.N)
return zero_array
@property
def ddelta_dns(self):
r'''Helper method for calculating the mole number derivatives of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial \delta}{\partial n_i}\right)_{T, P, n_{i\ne j}}
= 0
Returns
-------
ddelta_dns : list[float]
Mole number derivative of `delta` of each component, [m^3/mol^2]
Notes
-----
This derivative is checked numerically.
'''
if self.scalar:
zero_array = [0.0]*self.N
else:
zero_array = zeros(self.N)
return zero_array
@property
def d2delta_dzizjs(self):
r'''Helper method for calculating the second composition derivatives (hessian) of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \delta}{\partial x_i\partial x_j}\right)_{T, P, x_{k\ne i,j}}
= 0
Returns
-------
d2delta_dzizjs : list[float]
Second Composition derivative of `delta` of each component, [m^3/mol]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
if self.scalar:
zero_array = [[0.0]*N for i in range(N)]
else:
zero_array = zeros((N, N))
return zero_array
@property
def d2delta_dninjs(self):
r'''Helper method for calculating the second mole number derivatives (hessian) of
`delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^2 \delta}{\partial n_i \partial n_j}\right)_{T, P, n_{k\ne i,j}}
= 0
Returns
-------
d2delta_dninjs : list[list[float]]
Second mole number derivative of `delta` of each component, [m^3/mol^3]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
if self.scalar:
zero_array = [[0.0]*N for i in range(N)]
else:
zero_array = zeros((N, N))
return zero_array
@property
def d3delta_dninjnks(self):
r'''Helper method for calculating the third partial mole number
derivatives of `delta`. Note this is independent of the phase.
.. math::
\left(\frac{\partial^3 \delta}{\partial n_i \partial n_j \partial n_k }
\right)_{T, P,
n_{m \ne i,j,k}} = 0
Returns
-------
d3delta_dninjnks : list[list[list[float]]]
Third mole number derivative of `delta` of each component,
[m^3/mol^4]
Notes
-----
This derivative is checked numerically.
'''
N = self.N
if self.scalar:
zero_array = [[[0.0]*N for _ in range(N)] for _ in range(N)]
else:
zero_array = zeros((N, N, N))
return zero_array
class PRSVMIX(PRMIX, PRSV):
r'''Class for solving the Peng-Robinson-Stryjek-Vera equations of state for
a mixture as given in [1]_. Subclasses :obj:`PRMIX` and :obj:`PRSV <thermo.eos.PRSV>`.
Solves the EOS on initialization and calculates fugacities for all
components in all phases.
Inherits the method of calculating fugacity coefficients from :obj:`PRMIX`.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{v-b}-\frac{a\alpha(T)}{v(v+b)+b(v-b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
b = \sum_i z_i b_i
.. math::
a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}
.. math::
b_i=0.07780\frac{RT_{c,i}}{P_{c,i}}
.. math::
\alpha(T)_i=[1+\kappa_i(1-\sqrt{T_{r,i}})]^2
.. math::
\kappa_i = \kappa_{0,i} + \kappa_{1,i}(1 + T_{r,i}^{0.5})(0.7 - T_{r,i})
.. math::
\kappa_{0,i} = 0.378893 + 1.4897153\omega_i - 0.17131848\omega_i^2
+ 0.0196554\omega_i^3
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
kappa1s : list[float], optional
Fit parameter; available in [1]_ for over 90 compounds, SRKMIXTranslated[-]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
P-T initialization, two-phase, nitrogen and methane
>>> eos = PRSVMIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l/g', 3.6235536165e-05, -6349.0055583, -49.1240502472)
Notes
-----
[1]_ recommends that `kappa1` be set to 0 for Tr > 0.7. This is not done by
default; the class boolean `kappa1_Tr_limit` may be set to True and the
problem re-solved with that specified if desired. `kappa1_Tr_limit` is not
supported for P-V inputs.
For P-V initializations, a numerical solver is used to find T.
[2]_ and [3]_ are two more resources documenting the PRSV EOS. [4]_ lists
`kappa` values for 69 additional compounds. See also :obj:`PRSV2MIX`. Note that
tabulated `kappa` values should be used with the critical parameters used
in their fits. Both [1]_ and [4]_ only considered vapor pressure in fitting
the parameter.
References
----------
.. [1] Stryjek, R., and J. H. Vera. "PRSV: An Improved Peng-Robinson
Equation of State for Pure Compounds and Mixtures." The Canadian Journal
of Chemical Engineering 64, no. 2 (April 1, 1986): 323-33.
doi:10.1002/cjce.5450640224.
.. [2] Stryjek, R., and J. H. Vera. "PRSV - An Improved Peng-Robinson
Equation of State with New Mixing Rules for Strongly Nonideal Mixtures."
The Canadian Journal of Chemical Engineering 64, no. 2 (April 1, 1986):
334-40. doi:10.1002/cjce.5450640225.
.. [3] Stryjek, R., and J. H. Vera. "Vapor-liquid Equilibrium of
Hydrochloric Acid Solutions with the PRSV Equation of State." Fluid
Phase Equilibria 25, no. 3 (January 1, 1986): 279-90.
doi:10.1016/0378-3812(86)80004-8.
.. [4] Proust, P., and J. H. Vera. "PRSV: The Stryjek-Vera Modification of
the Peng-Robinson Equation of State. Parameters for Other Pure Compounds
of Industrial Interest." The Canadian Journal of Chemical Engineering
67, no. 1 (February 1, 1989): 170-73. doi:10.1002/cjce.5450670125.
'''
eos_pure = PRSV
nonstate_constants_specific = ('kappa0s', 'kappa1s', 'kappas')
mix_kwargs_to_pure = {'kappa1s': 'kappa1'}
kwargs_linear = ('kappa1s',)
kwargs_keys = ('kijs', 'kappa1s')
model_id = 10205
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, T=None, P=None, V=None,
kappa1s=None, fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0]*self.N for i in range(N)]
else:
kijs = zeros((N, N))
self.kijs = kijs
if kappa1s is None:
if scalar:
kappa1s = [0.0 for i in range(N)]
else:
kappa1s = zeros(N)
self.kwargs = {'kijs': kijs, 'kappa1s': kappa1s}
self.T = T
self.P = P
self.V = V
c1R2_c2R, c2R = self.c1R2_c2R, self.c2R
if scalar:
self.kappa0s = [omega*(omega*(0.0196554*omega - 0.17131848) + 1.4897153) + 0.378893 for omega in omegas]
self.bs = bs = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*bs[i] for i in cmps]
b = 0.0
for i in cmps:
b += bs[i]*zs[i]
else:
self.kappa0s = omegas*(omegas*(0.0196554*omegas - 0.17131848) + 1.4897153) + 0.378893
self.bs = bs = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*bs
b = float((bs*zs).sum())
self.b = b
self.delta = 2.0*b
self.epsilon = -b*b
self.check_sufficient_inputs()
if self.V and self.P:
# Deal with T-solution here; does NOT support kappa1_Tr_limit.
self.kappa1s = kappa1s
solution = 'g' if (only_g and not only_l) else ('l' if only_l else None)
self.T = self.solve_T(self.P, self.V, solution=solution)
else:
self.kappa1s = [(0 if (T/Tc > 0.7 and self.kappa1_Tr_limit) else kappa1) for kappa1, Tc in zip(kappa1s, Tcs)]
self.kappas = [kappa0 + kappa1*(1 + (self.T/Tc)**0.5)*(0.7 - (self.T/Tc)) for kappa0, kappa1, Tc in zip(self.kappa0s, self.kappa1s, self.Tcs)]
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
self.kappa0s = other.kappa0s
self.kappa1s = other.kappa1s
self.kappas = other.kappas
if self.scalar:
b = 0.0
for bi, zi in zip(self.bs, self.zs):
b += bi*zi
else:
b = float((self.bs*self.zs).sum())
self.b = b
self.delta = 2.0*b
self.epsilon = -b*b
def a_alphas_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` for the PRSV EOS.
This vectorized implementation is added for extra speed.
.. math::
a\alpha = a \left(\left(\kappa_{0} + \kappa_{1} \left(\sqrt{\frac{
T}{Tc}} + 1\right) \left(- \frac{T}{Tc} + \frac{7}{10}\right)
\right) \left(- \sqrt{\frac{T}{Tc}} + 1\right) + 1\right)^{2}
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
'''
return PRSV_a_alphas_vectorized(T, self.Tcs, self.ais, self.kappa0s, self.kappa1s,
a_alphas=[0.0]*self.N if self.scalar else zeros(self.N))
def a_alpha_and_derivatives_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` and their first
and second derivatives for the PRSV EOS. This vectorized implementation
is added for extra speed.
.. math::
a\alpha = a \left(\left(\kappa_{0} + \kappa_{1} \left(\sqrt{\frac{
T}{Tc}} + 1\right) \left(- \frac{T}{Tc} + \frac{7}{10}\right)
\right) \left(- \sqrt{\frac{T}{Tc}} + 1\right) + 1\right)^{2}
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dTs : list[float]
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2s : list[float]
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
'''
N = self.N
if self.scalar:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = [0.0]*N, [0.0]*N, [0.0]*N
else:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = zeros(N), zeros(N), zeros(N)
return PRSV_a_alpha_and_derivatives_vectorized(T, self.Tcs, self.ais, self.kappa0s, self.kappa1s,
a_alphas=a_alphas, da_alpha_dTs=da_alpha_dTs, d2a_alpha_dT2s=d2a_alpha_dT2s)
class PRSV2MIX(PRMIX, PRSV2):
r'''Class for solving the Peng-Robinson-Stryjek-Vera 2 equations of state
for a Mixture as given in [1]_. Subclasses :obj:`PRMIX` and `PRSV2 <thermo.eos.PRSV2>`.
Solves the EOS on initialization and calculates fugacities for all
components in all phases.
Inherits the method of calculating fugacity coefficients from :obj:`PRMIX`.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{v-b}-\frac{a\alpha(T)}{v(v+b)+b(v-b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
b = \sum_i z_i b_i
.. math::
a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}
.. math::
b_i=0.07780\frac{RT_{c,i}}{P_{c,i}}
.. math::
\alpha(T)_i=[1+\kappa_i(1-\sqrt{T_{r,i}})]^2
.. math::
\kappa_i = \kappa_{0,i} + [\kappa_{1,i} + \kappa_{2,i}(\kappa_{3,i} - T_{r,i})(1-T_{r,i}^{0.5})]
(1 + T_{r,i}^{0.5})(0.7 - T_{r,i})
.. math::
\kappa_{0,i} = 0.378893 + 1.4897153\omega_i - 0.17131848\omega_i^2
+ 0.0196554\omega_i^3
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
kappa1s : list[float], optional
Fit parameter; available in [1]_ for over 90 compounds, [-]
kappa2s : list[float], optional
Fit parameter; available in [1]_ for over 90 compounds, [-]
kappa3s : list[float], optional
Fit parameter; available in [1]_ for over 90 compounds, [-]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = PRSV2MIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> eos.V_l, eos.V_g
(3.6235536165e-05, 0.00070024238654)
>>> eos.fugacities_l, eos.fugacities_g
([794057.58318, 72851.22327], [436553.65618, 357878.11066])
Notes
-----
For P-V initializations, a numerical solver is used to find T.
Note that tabulated `kappa` values should be used with the critical
parameters used in their fits. [1]_ considered only vapor
pressure in fitting the parameter.
References
----------
.. [1] Stryjek, R., and J. H. Vera. "PRSV2: A Cubic Equation of State for
Accurate Vapor-liquid Equilibria Calculations." The Canadian Journal of
Chemical Engineering 64, no. 5 (October 1, 1986): 820-26.
doi:10.1002/cjce.5450640516.
'''
eos_pure = PRSV2
nonstate_constants_specific = ('kappa1s', 'kappa2s', 'kappa3s', 'kappa0s', 'kappas')
mix_kwargs_to_pure = {'kappa1s': 'kappa1', 'kappa2s': 'kappa2', 'kappa3s': 'kappa3'}
kwargs_linear = ('kappa1s', 'kappa2s', 'kappa3s')
kwargs_keys = ('kijs', 'kappa1s', 'kappa2s', 'kappa3s')
model_id = 10206
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, T=None, P=None, V=None,
kappa1s=None, kappa2s=None, kappa3s=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in cmps]
else:
kijs = zeros((N, N))
self.kijs = kijs
if scalar:
if kappa1s is None:
kappa1s = [0.0]*N
if kappa2s is None:
kappa2s = [0.0]*N
if kappa3s is None:
kappa3s = [0.0]*N
else:
if kappa1s is None:
kappa1s = zeros(N)
if kappa2s is None:
kappa2s = zeros(N)
if kappa3s is None:
kappa3s = zeros(N)
self.kwargs = {'kijs': kijs, 'kappa1s': kappa1s, 'kappa2s': kappa2s, 'kappa3s': kappa3s}
self.kappa1s = kappa1s
self.kappa2s = kappa2s
self.kappa3s = kappa3s
self.T = T
self.P = P
self.V = V
c2R, c1R2_c2R = self.c2R, self.c1R2_c2R
if scalar:
self.bs = bs = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*bs[i] for i in cmps]
self.kappa0s = kappa0s = [omega*(omega*(0.0196554*omega - 0.17131848) + 1.4897153) + 0.378893 for omega in omegas]
b = 0.0
for i in cmps:
b += bs[i]*zs[i]
else:
self.bs = bs = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*bs
self.kappa0s = kappa0s = omegas*(omegas*(0.0196554*omegas - 0.17131848) + 1.4897153) + 0.378893
b = float((bs*zs).sum())
self.b = b
self.delta = 2.0*b
self.epsilon = -b*b
if self.V and self.P:
solution = 'g' if (only_g and not only_l) else ('l' if only_l else None)
self.T = T = self.solve_T(self.P, self.V, solution=solution)
if scalar:
kappas = [0.0]*N
for i in cmps:
Tr = T/Tcs[i]
sqrtTr = sqrt(Tr)
kappas[i] = kappa0s[i] + ((kappa1s[i] + kappa2s[i]*(kappa3s[i] - Tr)*(1. - sqrtTr))*(1. + sqrtTr)*(0.7 - Tr))
else:
Trs = T/Tcs
sqrtTrs = npsqrt(Trs)
kappas = kappa0s + ((kappa1s + kappa2s*(kappa3s - Trs)*(1. - sqrtTrs))*(1. + sqrtTrs)*(0.7 - Trs))
self.kappas = kappas
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
self.kappa0s = other.kappa0s
self.kappa1s = other.kappa1s
self.kappa2s = other.kappa2s
self.kappa3s = other.kappa3s
self.kappas = other.kappas
if self.scalar:
b = 0.0
for bi, zi in zip(self.bs, self.zs):
b += bi*zi
else:
b = float((self.bs*self.zs).sum())
self.b = b
self.delta = b + b
self.epsilon = -b*b
def a_alphas_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` for the PRSV2
EOS. This vectorized implementation is added for extra speed.
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
Examples
--------
>>> eos = PRSV2MIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> eos.a_alphas_vectorized(300)
[0.0860568595, 0.20174345803]
'''
return PRSV2_a_alphas_vectorized(T, self.Tcs, self.ais, self.kappa0s, self.kappa1s, self.kappa2s, self.kappa3s,
a_alphas=([0.0]*self.N if self.scalar else zeros(self.N)))
def a_alpha_and_derivatives_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` and their first
and second derivatives for the PRSV2 EOS. This vectorized
implementation is added for extra speed.
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dTs : list[float]
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2s : list[float]
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
'''
N = self.N
if self.scalar:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = [0.0]*N, [0.0]*N, [0.0]*N
else:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = zeros(N), zeros(N), zeros(N)
return PRSV2_a_alpha_and_derivatives_vectorized(T, self.Tcs, self.ais, self.kappa0s, self.kappa1s, self.kappa2s, self.kappa3s,
a_alphas=a_alphas, da_alpha_dTs=da_alpha_dTs, d2a_alpha_dT2s=d2a_alpha_dT2s)
class TWUPRMIX(TwuPR95_a_alpha, PRMIX):
r'''Class for solving the Twu [1]_ variant of the Peng-Robinson cubic
equation of state for a mixture. Solves the EOS on
initialization and calculates fugacities for all components in all phases.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{v-b}-\frac{a\alpha(T)}{v(v+b)+b(v-b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
b = \sum_i z_i b_i
.. math::
a_i=0.45724\frac{R^2T_{c,i}^2}{P_{c,i}}
.. math::
b_i=0.07780\frac{RT_{c,i}}{P_{c,i}}
.. math::
\alpha_i = \alpha_i^{(0)} + \omega_i(\alpha_i^{(1)}-\alpha_i^{(0)})
.. math::
\alpha^{(\text{0 or 1})} = T_{r,i}^{N(M-1)}\exp[L(1-T_{r,i}^{NM})]
For sub-critical conditions:
L0, M0, N0 = 0.125283, 0.911807, 1.948150;
L1, M1, N1 = 0.511614, 0.784054, 2.812520
For supercritical conditions:
L0, M0, N0 = 0.401219, 4.963070, -0.2;
L1, M1, N1 = 0.024955, 1.248089, -8.
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = TWUPRMIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> eos.V_l, eos.V_g
(3.624571041e-05, 0.0007004401318)
>>> eos.fugacities_l, eos.fugacities_g
([792155.022163, 73305.88829], [436468.967764, 358049.2495573])
Notes
-----
For P-V initializations, a numerical solver is used to find T.
Claimed to be more accurate than the PR, PR78 and PRSV equations.
References
----------
.. [1] Twu, Chorng H., John E. Coon, and John R. Cunningham. "A New
Generalized Alpha Function for a Cubic Equation of State Part 1.
Peng-Robinson Equation." Fluid Phase Equilibria 105, no. 1 (March 15,
1995): 49-59. doi:10.1016/0378-3812(94)02601-V.
'''
eos_pure = TWUPR
P_max_at_V = GCEOS.P_max_at_V
solve_T = GCEOS.solve_T
kwargs_keys = ('kijs', )
model_id = 10204
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in cmps]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.kwargs = {'kijs': kijs}
self.T = T
self.P = P
self.V = V
c2R, c1R2_c2R = self.c2R, self.c1R2_c2R
if scalar:
self.bs = bs = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*bs[i] for i in cmps]
b = 0.0
for i in cmps:
b += bs[i]*zs[i]
else:
self.bs = bs = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*bs
b = float((bs*zs).sum())
self.b = b
self.delta = 2.*b
self.epsilon = -b*b
self.check_sufficient_inputs()
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
if self.scalar:
b = 0.0
for bi, zi in zip(self.bs, self.zs):
b += bi*zi
else:
b = float((self.bs*self.zs).sum())
self.b = b
self.delta = 2.0*b
self.epsilon = -b*b
class TWUSRKMIX(TwuSRK95_a_alpha, SRKMIX):
r'''Class for solving the Twu variant of the Soave-Redlich-Kwong cubic
equation of state for a mixture. Solves the EOS on
initialization and calculates fugacities for all components in all phases.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{V-b} - \frac{a\alpha(T)}{V(V+b)}
.. math::
a_i =\left(\frac{R^2(T_{c,i})^{2}}{9(\sqrt[3]{2}-1)P_{c,i}} \right)
=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}
.. math::
b_i =\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_{c,i}}{P_{c,i}}
=\frac{0.08664\cdot R T_{c,i}}{P_{c,i}}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
b = \sum_i z_i b_i
.. math::
\alpha_i = \alpha^{(0,i)} + \omega_i(\alpha^{(1,i)}-\alpha^{(0,i)})
.. math::
\alpha^{(\text{0 or 1, i})} = T_{r,i}^{N(M-1)}\exp[L(1-T_{r,i}^{NM})]
For sub-critical conditions:
L0, M0, N0 = 0.141599, 0.919422, 2.496441
L1, M1, N1 = 0.500315, 0.799457, 3.291790
For supercritical conditions:
L0, M0, N0 = 0.441411, 6.500018, -0.20
L1, M1, N1 = 0.032580, 1.289098, -8.0
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = TWUSRKMIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> eos.V_l, eos.V_g
(4.1087927542e-05, 0.00071170732525)
>>> eos.fugacities_l, eos.fugacities_g
([809692.830826, 74093.6388157], [441783.431489, 362470.3174107])
Notes
-----
For P-V initializations, a numerical solver is used to find T.
Claimed to be more accurate than the SRK equation.
References
----------
.. [1] Twu, Chorng H., John E. Coon, and John R. Cunningham. "A New
Generalized Alpha Function for a Cubic Equation of State Part 2.
Redlich-Kwong Equation." Fluid Phase Equilibria 105, no. 1 (March 15,
1995): 61-69. doi:10.1016/0378-3812(94)02602-W.
'''
# a_alpha_mro = -5
kwargs_keys = ('kijs', )
eos_pure = TWUSRK
P_max_at_V = GCEOS.P_max_at_V
solve_T = GCEOS.solve_T
model_id = 10104
def __init__(self, Tcs, Pcs, omegas, zs, kijs=None, T=None, P=None, V=None,
fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in range(N)]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.kwargs = {'kijs': kijs}
self.T = T
self.P = P
self.V = V
c2R, c1R2_c2R = self.c2R, self.c1R2_c2R
if scalar:
self.bs = bs = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*bs[i] for i in cmps]
b = 0.0
for i in cmps:
b += bs[i]*zs[i]
else:
self.bs = bs = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*bs
b = float((bs*zs).sum())
self.delta = self.b = b
self.check_sufficient_inputs()
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
b = 0.0
bs, zs = self.bs, self.zs
if self.scalar:
for i in range(self.N):
b += bs[i]*zs[i]
else:
b = float((bs*zs).sum())
self.delta = self.b = b
class APISRKMIX(SRKMIX, APISRK):
r'''Class for solving the Refinery Soave-Redlich-Kwong cubic
equation of state for a mixture of any number of compounds, as shown in the
API Databook [1]_. Subclasses :obj:`APISRK <thermo.eos.APISRK>`. Solves the EOS on
initialization and calculates fugacities for all components in all phases.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{V-b} - \frac{a\alpha(T)}{V(V+b)}
.. math::
a \alpha = \sum_i \sum_j z_i z_j {(a\alpha)}_{ij}
.. math::
(a\alpha)_{ij} = (1-k_{ij})\sqrt{(a\alpha)_{i}(a\alpha)_{j}}
.. math::
b = \sum_i z_i b_i
.. math::
a_i =\left(\frac{R^2(T_{c,i})^{2}}{9(\sqrt[3]{2}-1)P_{c,i}} \right)
=\frac{0.42748\cdot R^2(T_{c,i})^{2}}{P_{c,i}}
.. math::
b_i =\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_{c,i}}{P_{c,i}}
=\frac{0.08664\cdot R T_{c,i}}{P_{c,i}}
.. math::
\alpha(T)_i = \left[1 + S_{1,i}\left(1-\sqrt{T_{r,i}}\right) + S_{2,i}
\frac{1- \sqrt{T_{r,i}}}{\sqrt{T_{r,i}}}\right]^2
.. math::
S_{1,i} = 0.48508 + 1.55171\omega_i - 0.15613\omega_i^2 \text{ if S1 is not tabulated }
Parameters
----------
Tcs : float
Critical temperatures of all compounds, [K]
Pcs : float
Critical pressures of all compounds, [Pa]
omegas : float
Acentric factors of all compounds, [-]
zs : float
Overall mole fractions of all species, [-]
kijs : list[list[float]], optional
n*n size list of lists with binary interaction parameters for the
Van der Waals mixing rules, default all 0 [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
S1s : float, optional
Fit constant or estimated from acentric factor if not provided [-]
S2s : float, optional
Fit constant or 0 if not provided [-]
fugacities : bool, optional
Whether or not to calculate fugacity related values (phis, log phis,
and fugacities); default True, [-]
only_l : bool, optional
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set; default False, [-]
only_g : bool, optional
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set; default False, [-]
Notes
-----
For P-V initializations, a numerical solver is used to find T.
Examples
--------
T-P initialization, nitrogen-methane at 115 K and 1 MPa:
>>> eos = APISRKMIX(T=115, P=1E6, Tcs=[126.1, 190.6], Pcs=[33.94E5, 46.04E5], omegas=[0.04, 0.011], zs=[0.5, 0.5], kijs=[[0,0],[0,0]])
>>> eos.V_l, eos.V_g
(4.101592310e-05, 0.00071046883030)
>>> eos.fugacities_l, eos.fugacities_g
([817882.3033, 71620.4823812], [442158.29113, 361519.79877])
References
----------
.. [1] API Technical Data Book: General Properties & Characterization.
American Petroleum Institute, 7E, 2005.
'''
eos_pure = APISRK
nonstate_constants_specific = ('S1s', 'S2s')
mix_kwargs_to_pure = {'S1s': 'S1', 'S2s': 'S2'}
kwargs_linear = ('S1s', 'S2s')
kwargs_keys = ('kijs', 'S1s', 'S2s')
model_id = 10105
def __init__(self, Tcs, Pcs, zs, omegas=None, kijs=None, T=None, P=None, V=None,
S1s=None, S2s=None, fugacities=True, only_l=False, only_g=False):
self.N = N = len(Tcs)
cmps = range(N)
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.zs = zs
self.scalar = scalar = type(zs) is list
if kijs is None:
if scalar:
kijs = [[0.0]*N for i in cmps]
else:
kijs = zeros((N, N))
self.kijs = kijs
self.kwargs = {'kijs': kijs}
self.T = T
self.P = P
self.V = V
self.check_sufficient_inputs()
# Setup S1s and S2s
if S1s is None and omegas is None:
raise ValueError('Either acentric factor of S1 is required')
if S1s is None:
if scalar:
self.S1s = [omega*(1.55171 - 0.15613*omega) + 0.48508 for omega in omegas]
else:
self.S1s = omegas*(1.55171 - 0.15613*omegas) + 0.48508
else:
self.S1s = S1s
if S2s is None:
if scalar:
S2s = [0.0]*N
else:
S2s = zeros(N)
self.S2s = S2s
self.kwargs = {'S1s': self.S1s, 'S2s': self.S2s}
c2R, c1R2_c2R = self.c2R, self.c1R2_c2R
if scalar:
self.bs = bs = [c2R*Tcs[i]/Pcs[i] for i in cmps]
self.ais = [c1R2_c2R*Tcs[i]*bs[i] for i in cmps]
b = 0.0
for i in cmps:
b += bs[i]*zs[i]
else:
self.bs = bs = c2R*Tcs/Pcs
self.ais = c1R2_c2R*Tcs*bs
b = float((bs*zs).sum())
self.b = self.delta = b
self.solve(only_l=only_l, only_g=only_g)
if fugacities:
self.fugacities()
def _fast_init_specific(self, other):
self.S1s = other.S1s
self.S2s = other.S2s
if self.scalar:
self.delta = self.b = sum([bi*zi for bi, zi in zip(self.bs, self.zs)])
else:
self.delta = self.b = float((self.bs*self.zs).sum())
def a_alphas_vectorized(self, T):
a_alphas = [0.0]*self.N if self.scalar else zeros(self.N)
return APISRK_a_alphas_vectorized(T, self.Tcs, self.ais, self.S1s, self.S2s, a_alphas=a_alphas)
def a_alpha_and_derivatives_vectorized(self, T):
r'''Method to calculate the pure-component `a_alphas` and their first
and second derivatives for the API SRK EOS. This vectorized implementation
is added for extra speed.
.. math::
a\alpha(T) = a\left[1 + S_1\left(1-\sqrt{T_r}\right) + S_2\frac{1
- \sqrt{T_r}}{\sqrt{T_r}}\right]^2
.. math::
\frac{d a\alpha}{dT} = a\frac{Tc}{T^{2}} \left(- S_{2} \left(\sqrt{
\frac{T}{Tc}} - 1\right) + \sqrt{\frac{T}{Tc}} \left(S_{1} \sqrt{
\frac{T}{Tc}} + S_{2}\right)\right) \left(S_{2} \left(\sqrt{\frac{
T}{Tc}} - 1\right) + \sqrt{\frac{T}{Tc}} \left(S_{1} \left(\sqrt{
\frac{T}{Tc}} - 1\right) - 1\right)\right)
.. math::
\frac{d^2 a\alpha}{dT^2} = a\frac{1}{2 T^{3}} \left(S_{1}^{2} T
\sqrt{\frac{T}{Tc}} - S_{1} S_{2} T \sqrt{\frac{T}{Tc}} + 3 S_{1}
S_{2} Tc \sqrt{\frac{T}{Tc}} + S_{1} T \sqrt{\frac{T}{Tc}}
- 3 S_{2}^{2} Tc \sqrt{\frac{T}{Tc}} + 4 S_{2}^{2} Tc + 3 S_{2}
Tc \sqrt{\frac{T}{Tc}}\right)
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alphas : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dTs : list[float]
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2s : list[float]
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
'''
N = self.N
if self.scalar:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = [0.0]*N, [0.0]*N, [0.0]*N
else:
a_alphas, da_alpha_dTs, d2a_alpha_dT2s = zeros(N), zeros(N), zeros(N)
return APISRK_a_alpha_and_derivatives_vectorized(T, self.Tcs, self.ais, self.S1s, self.S2s,
a_alphas=a_alphas, da_alpha_dTs=da_alpha_dTs,
d2a_alpha_dT2s=d2a_alpha_dT2s)
def P_max_at_V(self, V):
if self.N == 1 and self.S2s[0] == 0:
self.ms = self.S1s
P_max_at_V = SRK.P_max_at_V(self, V)
del self.ms
return P_max_at_V
return GCEOSMIX.P_max_at_V(self, V)
eos_mix_list = [PRMIX, SRKMIX, PR78MIX, VDWMIX, PRSVMIX, PRSV2MIX, TWUPRMIX,
TWUSRKMIX, APISRKMIX, IGMIX, RKMIX, PRMIXTranslatedConsistent,
PRMIXTranslatedPPJP, SRKMIXTranslatedConsistent,
PRMIXTranslated, SRKMIXTranslated]
'''List of all exported EOS classes.
'''
eos_mix_no_coeffs_list = [PRMIX, SRKMIX, PR78MIX, VDWMIX, TWUPRMIX, TWUSRKMIX,
IGMIX, RKMIX, PRMIXTranslatedConsistent, PRMIXTranslated,
SRKMIXTranslated,
PRMIXTranslatedPPJP, SRKMIXTranslatedConsistent]
'''List of all exported EOS classes that do not require special parameters
or can fill in their special parameters from other specified parameters.
'''
eos_mix_dict = {c.__name__: c for c in eos_mix_list}
'''dict : Dict of all cubic mixture equation of state classes, indexed by their class name.
'''
eos_mix_full_path_dict = {c.__full_path__: c for c in eos_mix_list}
'''dict : Dict of all cubic mixture equation of state classes, indexed by their module path and class name.
'''
eos_mix_full_path_reverse_dict = {c: c.__full_path__ for c in eos_mix_list}
'''dict : Dict of all cubic mixture equation of state classes, indexed by their module path and class name.
'''
|
{"hexsha": "43859292910a325435a452907759586a392f2abf", "size": 422590, "ext": "py", "lang": "Python", "max_stars_repo_path": "thermo/eos_mix.py", "max_stars_repo_name": "RoryKurek/thermo", "max_stars_repo_head_hexsha": "985279467faa028234ab422a19b69385e5100149", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 380, "max_stars_repo_stars_event_min_datetime": "2016-07-04T09:45:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T18:09:45.000Z", "max_issues_repo_path": "thermo/eos_mix.py", "max_issues_repo_name": "RoryKurek/thermo", "max_issues_repo_head_hexsha": "985279467faa028234ab422a19b69385e5100149", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 104, "max_issues_repo_issues_event_min_datetime": "2016-07-10T20:47:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T20:43:39.000Z", "max_forks_repo_path": "thermo/eos_mix.py", "max_forks_repo_name": "RoryKurek/thermo", "max_forks_repo_head_hexsha": "985279467faa028234ab422a19b69385e5100149", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 96, "max_forks_repo_forks_event_min_datetime": "2016-07-05T20:54:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T03:06:02.000Z", "avg_line_length": 36.4962431989, "max_line_length": 1710, "alphanum_fraction": 0.5428287465, "include": true, "reason": "import numpy,from sympy", "num_tokens": 128725}
|
import gym
from .GridInterface import *
import numpy
import os
class GridTargetSearchAEnv(gym.Env, GridInterface):
def __init__(self, render = False, view_camera_distance = 1.5, view_camera_angle = -80.0):
gym.Env.__init__(self)
GridInterface.__init__(self, render, view_camera_distance, view_camera_angle)
self.grid_map = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
self.reset_interface(self.grid_map)
obs = self.update_observation()
self.action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2,), dtype=numpy.float32)
self.observation_space = gym.spaces.Box(low=-1.0, high=1.0, shape=obs.shape, dtype=numpy.float32)
def step(self, action):
self.step_interface(action)
reward = 0.0
done = False
if self.steps >= 1000:
reward = 0.0
done = True
elif self.on_target(0, 0):
reward = 1.0
done = True
elif self.out_board(0):
reward = -1.0
done = True
return self.update_observation(), reward, done, None
def reset(self):
self.reset_interface(self.grid_map)
return self.update_observation()
def render(self):
pass
def close(self):
pass
class GridTargetSearchADiscreteEnv(gym.Env):
def __init__(self, render = False, view_camera_distance = 1.5, view_camera_angle = -80.0):
gym.Env.__init__(self)
self.env = GridTargetSearchAEnv(render, view_camera_distance, view_camera_angle)
self.action_space = gym.spaces.Discrete(16)
self.observation_space = self.env.observation_space
actions = []
actions.append([ 0.0, 0.0])
actions.append([ 0.0, 0.2])
actions.append([ 0.2, 0.0])
actions.append([ 0.0, -0.2])
actions.append([-0.2, 0.0])
actions.append([ 0.0, 0.5])
actions.append([ 0.5, 0.0])
actions.append([ 0.0, -0.5])
actions.append([-0.5, 0.0])
actions.append([ 0.0, 1.0])
actions.append([ 1.0, 0.0])
actions.append([ 1.0, 1.0])
actions.append([ 0.5, -0.5])
actions.append([-0.5, 0.5])
actions.append([-0.2, -0.2])
actions.append([-0.5, -0.5])
self.actions = numpy.array(actions)
def step(self, action):
return self.env.step(self.actions[action])
def reset(self):
return self.env.reset()
def render(self):
pass
def close(self):
pass
|
{"hexsha": "d244988980f1e8553c80a1c7f6847eaf2fed747e", "size": 3579, "ext": "py", "lang": "Python", "max_stars_repo_path": "gym-aeris/gym_aeris/envs/grid_target_search_a_env.py", "max_stars_repo_name": "michalnand/gym-aeris", "max_stars_repo_head_hexsha": "e3b924ff767073bf3e42339b2763a736851664c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gym-aeris/gym_aeris/envs/grid_target_search_a_env.py", "max_issues_repo_name": "michalnand/gym-aeris", "max_issues_repo_head_hexsha": "e3b924ff767073bf3e42339b2763a736851664c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gym-aeris/gym_aeris/envs/grid_target_search_a_env.py", "max_forks_repo_name": "michalnand/gym-aeris", "max_forks_repo_head_hexsha": "e3b924ff767073bf3e42339b2763a736851664c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9553571429, "max_line_length": 106, "alphanum_fraction": 0.4775076837, "include": true, "reason": "import numpy", "num_tokens": 1496}
|
import copy
import numpy as np
from nn_lib import *
def debug_net():
'''Debug network by calculating gradient and numerical gradient'''
frst_layer = 60
hidden = [40, 20]
out_layer = 10
# First without regularization
network = NetworkObject(inpt = frst_layer, hidden = hidden,
outpt = out_layer, lbda = 0)
rel_diff = num_grad(network, frst_layer, 1e-4)
print("Maximum relative difference in numerical gradient ", end = "")
print("without regularization = {:.2e}".format(rel_diff))
# Now with regularization
network = NetworkObject(inpt = frst_layer, hidden = hidden,
outpt = out_layer, lbda = 0.1)
rel_diff = num_grad(network, frst_layer, 1e-4)
print("Maximum relative difference in numerical gradient ", end = "")
print("with regularization = {:.2e}".format(rel_diff))
def num_grad(network, inpt_siz, eps):
# Create random input
nExamples = 100
lab = 2
input_arr = np.random.random((nExamples, inpt_siz))
# Calculate analytical gradient
network.calc_gradient(input_arr, [lab]*nExamples, nExamples)
analyt_grad = network.get_gradient()
# Now calculate numerical gradient
# Get the thetas and make a copy
thetas = network.get_thetas()
thetas_cpy = copy.deepcopy(thetas)
# For each possible theta, calculate the gradient
num_grad = []
for grad in analyt_grad:
num_grad.append(grad * 0)
# Go theta by theta adding eps and -eps and calculating cost
for kk in range(len(num_grad)):
nn, mm = np.shape(num_grad[kk])
for ii in range(nn):
for jj in range(mm):
# Plus
thetas_cpy[kk][ii][jj] += eps
network.set_thetas(thetas_cpy)
jp = network.get_cost(input_arr, [lab]*nExamples)
# Minus
thetas_cpy[kk][ii][jj] -= 2 * eps
network.set_thetas(thetas_cpy)
jm = network.get_cost(input_arr, [lab]*nExamples)
num_grad[kk][ii][jj] = (jp - jm) / (2 * eps)
# Restore copy
thetas_cpy[kk][ii][jj] = thetas[kk][ii][jj]
rel_diff = None
for kk in range(len(num_grad)):
diff = np.amax(abs((analyt_grad[kk] - num_grad[kk])/analyt_grad[kk]))
if rel_diff is None:
rel_diff = diff
else:
rel_diff = max(diff, rel_diff)
return rel_diff
debug_net()
|
{"hexsha": "88b181dd77683e4c39f5a3268259e938dad56a2a", "size": 2448, "ext": "py", "lang": "Python", "max_stars_repo_path": "debug_network.py", "max_stars_repo_name": "AndresYague/neural_network_numbers", "max_stars_repo_head_hexsha": "049c6ece317127b96ea50be589c79e14c7f04e32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "debug_network.py", "max_issues_repo_name": "AndresYague/neural_network_numbers", "max_issues_repo_head_hexsha": "049c6ece317127b96ea50be589c79e14c7f04e32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "debug_network.py", "max_forks_repo_name": "AndresYague/neural_network_numbers", "max_forks_repo_head_hexsha": "049c6ece317127b96ea50be589c79e14c7f04e32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9873417722, "max_line_length": 77, "alphanum_fraction": 0.6102941176, "include": true, "reason": "import numpy", "num_tokens": 629}
|
[STATEMENT]
lemma rel_mset_size: "rel_mset R M N \<Longrightarrow> size M = size N"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rel_mset R M N \<Longrightarrow> size M = size N
[PROOF STEP]
unfolding multiset.rel_compp_Grp Grp_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>a b. b = image_mset fst a \<and> a \<in> {x. set_mset x \<subseteq> {(x, y). R x y}})\<inverse>\<inverse> OO (\<lambda>a b. b = image_mset snd a \<and> a \<in> {x. set_mset x \<subseteq> {(x, y). R x y}})) M N \<Longrightarrow> size M = size N
[PROOF STEP]
by auto
|
{"llama_tokens": 240, "file": null, "length": 2}
|
import os
import shutil
import numpy as np
import pandas as pd
import time
from scipy.special import softmax
import sys
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import h5py
import argparse
from load_data import *
from model import *
from config_cnn import *
from utils import *
# set gpu number
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
parser = argparse.ArgumentParser()
parser.add_argument('-model', '--model_type', help="teacher/student", default='teacher', type=str)
parser.add_argument('-bs', '--batch_size', help="batch size used for training", default=64, type=int)
parser.add_argument('-lr', '--learning_rate', help="learning rate for adam", default=1e-4, type=float)
parser.add_argument('-temp', '--temperature', help="distillation temperature(>1)", default=2, type=float)
parser.add_argument('-alpha', '--alpha', help="weight to cce loss with soft targets, 1-alpha to loss with hard targets", default=0.2, type=float)
parser.add_argument('-comb', '--combination', help="combining predictions of models in ensemble (am/gm)", default='am', type=str)
parser.add_argument('-e', '--num_epochs', help="number of epochs to run", default=50, type=int)
parser.add_argument('-es', '--early_stop', default=7, type=int)
parser.add_argument('-rd_lr', '--reduce_lr', default=5, type=int)
parser.add_argument('-seed', '--rand_seed', default=0, type=int)
args = parser.parse_args()
np.random.seed(args.rand_seed)
teacher_lstm = tf.keras.models.load_model('../lstm_scnn_feat/val_acc-0.6886_teacher_tr_acc-0.9730_bestEp-09_bs-64_lr-0.0001.h5')
teacher_cnn = tf.keras.models.load_model('../schluter-cnn/teacher_val_acc-0.7920_tr_acc-0.9405_bestEp-05_bs-32_lr-0.0001_dr-0.2_fs-1.h5')
if args.model_type=='teacher': student = Leglaive_RNN(timesteps=CNN_INPUT_SIZE)
elif args.model_type=='student': student = RNN_small(timesteps=CNN_INPUT_SIZE)
else:
print('Invalid model type specified!')
sys.exit()
opt = Adam(lr=args.learning_rate)
# Removing softmax layers
teacher_lstm.pop()
teacher_cnn.pop()
student.pop()
student_logits = student.layers[-1].output
student_logits_T = Lambda(lambda x: x/args.temperature)(student_logits)
probs_T = Softmax(axis=1)(student_logits_T)
probs_1 = Softmax(axis=1)(student_logits)
output = Concatenate()([probs_1, probs_T])
student = Model(inputs=student.input, outputs=output)
student.compile(optimizer=opt, loss=kd_loss(args.alpha, args.temperature), metrics=[acc, categorical_crossentropy, kld_loss])
print('\nStudent Model:\n')
print(student.summary())
X_tr, y_train = load_xy_data(None, MEL_JAMENDO_DIR, JAMENDO_LABEL_DIR, 'train')
Y_tr = to_categorical(y_train, 2)
X_val, y_val = load_xy_data(None, MEL_JAMENDO_DIR, JAMENDO_LABEL_DIR, 'valid')
Y_val = to_categorical(y_val, 2)
# Train Logits
teacher_lstm_tr_logits = teacher_lstm.predict(X_tr, verbose=1)
teacher_cnn_tr_logits = teacher_cnn.predict(np.expand_dims(np.swapaxes(X_tr, 1, 2), axis=3), verbose=1)
teacher_lstm_tr_prob_T = softmax(teacher_lstm_tr_logits/args.temperature, axis=1)
teacher_cnn_tr_prob_T = softmax(teacher_cnn_tr_logits/args.temperature, axis=1)
# Val Logits
teacher_lstm_val_logits = teacher_lstm.predict(X_val, verbose=1)
teacher_cnn_val_logits = teacher_cnn.predict(np.expand_dims(np.swapaxes(X_val, 1, 2), axis=3), verbose=1)
teacher_lstm_val_prob_T = softmax(teacher_lstm_val_logits/args.temperature, axis=1)
teacher_cnn_val_prob_T = softmax(teacher_cnn_val_logits/args.temperature, axis=1)
if args.combination=='am':
Y_tr_soft = (teacher_lstm_tr_prob_T+teacher_cnn_tr_prob_T)/2
Y_val_soft = (teacher_lstm_val_prob_T+teacher_cnn_val_prob_T)/2
elif args.combination=='gm':
GM = np.sqrt(teacher_lstm_tr_prob_T*teacher_cnn_tr_prob_T)
GM /= GM.sum(axis=1)[:, np.newaxis]
Y_tr_soft = GM
GM = np.sqrt(teacher_lstm_val_prob_T*teacher_cnn_val_prob_T)
GM /= GM.sum(axis=1)[:, np.newaxis]
Y_val_soft = GM
Y_tr = np.concatenate((Y_tr, Y_tr_soft), axis=1)
Y_val = np.concatenate((Y_val, Y_val_soft), axis=1)
print('\nLoading complete!\n')
print("Train Data Shape", X_tr.shape, Y_tr.shape)
print("Val Data Shape", X_val.shape, Y_val.shape)
timestampTime = time.strftime("%H%M%S")
timestampDate = time.strftime("%d%m%Y")
timestampLaunch = timestampDate + '_' + timestampTime
wts_dir = './weights_'+timestampLaunch+'/'
if not os.path.exists(wts_dir): os.makedirs(wts_dir)
score_string = 'val_acc-{val_acc:.4f}_kd_tr_acc-{acc:.4f}_bestEp-{epoch:02d}'
model_save_name = wts_dir+score_string+'_bs-'+str(args.batch_size)+'_lr-'+str(args.learning_rate)+'_temp-'+str(args.temperature)+'_alpha-'+\
str(args.alpha)+'.h5'
checkpoint = ModelCheckpoint(filepath=model_save_name, monitor='val_acc', verbose=1, save_weights_only=True, save_best_only=True, mode='auto')
earlyStopping = EarlyStopping(monitor='val_acc', patience=args.early_stop, verbose=1, mode='auto')
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.8, patience=args.reduce_lr, verbose=1, min_lr=1e-8)
history = student.fit(X_tr, Y_tr, batch_size=args.batch_size, epochs=args.num_epochs, shuffle=True, validation_data=(X_val, Y_val),
callbacks=[checkpoint, earlyStopping, reduce_lr])
tr_loss = history.history['loss']
val_loss = history.history['val_loss']
tr_acc = history.history['acc']
val_acc = history.history['val_acc']
idx, best_val_acc = np.argmax(val_acc), np.max(val_acc)
corr_tr_acc = tr_acc[idx]
df_save = pd.DataFrame({'tr_loss':tr_loss, 'val_loss':val_loss, 'tr_acc':tr_acc, 'val_acc':val_acc})
best_model_name = 'val_acc-'+'{0:.4f}_kd'.format(best_val_acc)+'_tr_acc-'+'{0:.4f}'.format(corr_tr_acc)+'_bestEp-'+'{:02d}'.format(idx+1)+\
'_bs-'+str(args.batch_size)+'_lr-'+str(args.learning_rate)+'_temp-'+str(args.temperature)+'_alpha-'+ str(args.alpha)+'.h5'
save_path = './results/'+args.model_type+'/'+args.combination+'/'
if not os.path.exists(save_path): os.makedirs(save_path)
scores = test(best_model_name, args.model_type, wts_dir, args)
suffix = best_model_name[:-3]+'_acc-{0:.4f}'.format(scores[0])+'_pr-{0:.4f}'.format(scores[1])+'_re-{0:.4f}'.format(scores[2])+\
'_f1-{0:.4f}'.format(scores[3])+'_fp-{0:.4f}'.format(scores[4])+'_fn-{0:.4f}'.format(scores[5])
df_save.to_csv(open(save_path + suffix + '.csv', 'w'))
if os.path.exists(wts_dir): shutil.rmtree(wts_dir)
print("Finished!")
|
{"hexsha": "02402d7a8505a94dbe6f7dba7a3fe4069f6aa8bb", "size": 6459, "ext": "py", "lang": "Python", "max_stars_repo_path": "enkd_scnn_feat_student-lstm/main_kd.py", "max_stars_repo_name": "mvp18/KD-SVD", "max_stars_repo_head_hexsha": "35b208a2455256b721189dbb0580e22654479761", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "enkd_scnn_feat_student-lstm/main_kd.py", "max_issues_repo_name": "mvp18/KD-SVD", "max_issues_repo_head_hexsha": "35b208a2455256b721189dbb0580e22654479761", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "enkd_scnn_feat_student-lstm/main_kd.py", "max_forks_repo_name": "mvp18/KD-SVD", "max_forks_repo_head_hexsha": "35b208a2455256b721189dbb0580e22654479761", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-24T09:15:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T09:15:27.000Z", "avg_line_length": 42.7748344371, "max_line_length": 145, "alphanum_fraction": 0.750116117, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1768}
|
from abc import abstractmethod, ABC
from collections import deque
import numpy as np
import time
import cv2
def timit(func):
def wrapper(*args, **kwargs):
tick = time.perf_counter()
res = func(*args, **kwargs)
tock = time.perf_counter()
print(f"[runtime] --- {func.__name__}: {(tock-tick):.3f}(s)")
return res
return wrapper
class VisualOdom(ABC):
@abstractmethod
def estimate_motion(self):
pass
@abstractmethod
def track_motion(self):
pass
class MonoCamVisualOdom(VisualOdom):
RANSAC_METHOD = cv2.RANSAC
RANSAC_THRESHOLD = 0.75
RANSAC_CONF = 0.999
def __init__(self, intrinsic_mtx, nbr_features):
self._K = intrinsic_mtx
self._nbr_features = nbr_features # more features more stable odom
self._kpts_buffer = deque(maxlen=2)
self._desc_buffer = deque(maxlen=2)
self._frames_buffer = deque(maxlen=2)
self._camTFs = [np.eye(4)]
# @timit
def estimate_motion(self, img1Pts, img2Pts, K):
E, mask = cv2.findEssentialMat(
img1Pts, img2Pts, K,
method=MonoCamVisualOdom.RANSAC_METHOD,
prob=MonoCamVisualOdom.RANSAC_CONF,
threshold=MonoCamVisualOdom.RANSAC_THRESHOLD
)
ret, camR, camT, mask = cv2.recoverPose(E, img1Pts, img2Pts, K)
camTF = np.eye(4) # (TF) -> world origin with respect to the camera center
camTF[:-1, :-1] = camR.T
camTF[:-1, -1:] = -camR.T @ camT
self._camTFs.append(self._camTFs[-1] @ camTF) # smooth by the last 3 TFs
@abstractmethod
def get_features(self, frame:np.ndarray)-> tuple:
pass
@abstractmethod
def match_features(self, desc1:list, desc2:list):
pass
@staticmethod
def get_matches(matches:list, img1Kpts:list, img2Kpts:list):
img1_pts = []
img2_pts = []
for match in matches:
img1_pts.append(img1Kpts[match.queryIdx].pt)
img2_pts.append(img2Kpts[match.trainIdx].pt)
return np.array(img1_pts), np.array(img2_pts)
@property
def trajectory(self):
return np.array(self._camTFs)[:, :3, 3]
@property
def cam_tf(self):
return np.array(self._camTFs)
@timit
def track_motion(self, frame):
if len(frame.shape) == 3:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self._frames_buffer.append(frame)
kpts, des = self.get_features(frame)
self._kpts_buffer.append(kpts)
self._desc_buffer.append(des)
if len(self._frames_buffer) == self._frames_buffer.maxlen:
matched_pts = self.match_features(*self._desc_buffer)
img1_pts, img2_pts = MonoCamVisualOdom.get_matches(matched_pts, *self._kpts_buffer)
self.estimate_motion(img1_pts, img2_pts, self._K)
class SiftOdom(MonoCamVisualOdom):
CONTRAST_THRESHOLD = 0.15
EDGE_THRESHOLD = 15
N_OCTAVE_LAYERS = 5
def __init__(self, *args, **kwargs):
super(SiftOdom, self).__init__(*args, **kwargs)
self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
self._sift = cv2.SIFT_create(
nfeatures=kwargs.get('nfeatures', self._nbr_features),
contrastThreshold=kwargs.get('contrastThreshold', SiftOdom.CONTRAST_THRESHOLD),
edgeThreshold=kwargs.get('edgeThreshold', SiftOdom.EDGE_THRESHOLD),
nOctaveLayers=kwargs.get('nOctaveLayers', SiftOdom.N_OCTAVE_LAYERS)
)
def get_features(self, frame:np.uint8):
kpts, des = self._sift.detectAndCompute(frame, None)
return kpts, des
def match_features(self, desc1:list, desc2:list):
matched_pts = self._matcher.match(desc1, desc2)
sortedMatches = sorted(matched_pts, key=lambda x: x.distance)[:50]
return sortedMatches
class OrbOdom(MonoCamVisualOdom):
EDGE_THRESHOLD = 13
SCORE_TYPE = cv2.ORB_FAST_SCORE
def __init__(self, *args, **kwargs):
super(OrbOdom, self).__init__(*args, **kwargs)
self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
self._orb = cv2.ORB_create(
nfeatures=kwargs.get('nfeatures', self._nbr_features),
edgeThreshold=kwargs.get('edgeThreshold', OrbOdom.EDGE_THRESHOLD),
scoreType=kwargs.get('scoreType', OrbOdom.SCORE_TYPE),
)
def get_features(self, frame:np.uint8):
kpts, des = self._orb.detectAndCompute(frame, None)
return kpts, des
def match_features(self, desc1:list, desc2:list):
matched_pts = self._matcher.match(desc1, desc2)
sortedMatches = sorted(matched_pts, key=lambda x: x.distance)
return sortedMatches
|
{"hexsha": "f13f6f0ce9f379ce5f0e1645b4249746ec5c31be", "size": 4742, "ext": "py", "lang": "Python", "max_stars_repo_path": "odom.py", "max_stars_repo_name": "loaywael/VisualOdom", "max_stars_repo_head_hexsha": "c090a78d7166ce12e0b526df0219015949c3de79", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "odom.py", "max_issues_repo_name": "loaywael/VisualOdom", "max_issues_repo_head_hexsha": "c090a78d7166ce12e0b526df0219015949c3de79", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "odom.py", "max_forks_repo_name": "loaywael/VisualOdom", "max_forks_repo_head_hexsha": "c090a78d7166ce12e0b526df0219015949c3de79", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6312056738, "max_line_length": 95, "alphanum_fraction": 0.6465626318, "include": true, "reason": "import numpy", "num_tokens": 1295}
|
import shutil
import sys
import subprocess
import glob
from tqdm import tqdm
import numpy as np
import os
import argparse
from PIL import Image
import torch
from torch import nn
import torch.nn.functional as F
import torchvision.models as models
import transforms as TF
import utils
import torchvision
C, H, W = 3, 112, 112
def extract_feats(params, model, load_img):
global C, H, W
model.eval()
dir_fc = os.path.join(os.getcwd(), params['output_dir'])
if not os.path.isdir(dir_fc):
os.mkdir(dir_fc)
video_list = os.listdir(params['video_path'])
nn = 0
total_len = len(video_list)
for video in video_list:
print("\n-->: ", video)
nn = nn + 1
dst = video
if video == 'yz02dWv_shs':
print(video, " is too large!")
continue
outfile = os.path.join(dir_fc, video + '.npy')
if os.path.exists(outfile):
print(video, " is already processed!")
continue
image_list = sorted(glob.glob(os.path.join(params['video_path'], dst, '*.jpg')))
# samples = np.round(np.linspace(0, len(image_list) - 1, params['n_frame_steps']))
params_frames = len(image_list)
samples = np.round(np.linspace(0, len(image_list) - 1, params_frames))
image_list = [image_list[int(sample)] for sample in samples]
images = torch.zeros((len(image_list)//1, C, 1, H, W))
i = 0
for iImg in range(len(image_list)):
ii = i//1
img = load_img(image_list[iImg])
images[ii, :, i%1, :, :] = img
i += 1
with torch.no_grad():
fc_feats = model(images.cuda()).squeeze()
img_feats = fc_feats.cpu().numpy()
# Save the inception features
# outfile = os.path.join(dir_fc, video + '.npy')
np.save(outfile, img_feats)
# cleanup
#shutil.rmtree(dst)
# print(nn)
print("Process: ", nn, " / ", total_len, " ------- video id: ", video, " ------- save shape: ", img_feats.shape)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", dest='gpu', type=str, default='1',
help='Set CUDA_VISIBLE_DEVICES environment variable, optional')
parser.add_argument("--output_dir", dest='output_dir', type=str,
default='./data/feats/r2plus1d', help='directory to store features')
parser.add_argument("--n_frame_steps", dest='n_frame_steps', type=int, default=80,
help='how many frames to sampler per video')
parser.add_argument("--video_path", dest='video_path', type=str,
default='./data/frames/', help='path to video dataset')
parser.add_argument("--model", dest="model", type=str, default='r2plus1d_18',
help='the CNN model you want to use to extract_feats')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
params = vars(args)
if params['model'] == 'r2plus1d_18':
model = models.video.r2plus1d_18(pretrained=True)
model = nn.Sequential(*list(model.children())[:-1])
for param in model.parameters():
param.requires_grad = False
T, C, H, W = 1, 3, 112, 112
load_img = utils.LoadTransformImage()
else:
print("doesn't support %s" % (params['model']))
model = nn.DataParallel(model)
model = model.cuda()
extract_feats(params, model, load_img)
|
{"hexsha": "fc58defc84fbda476a6c5db952d810b3b18017e4", "size": 3516, "ext": "py", "lang": "Python", "max_stars_repo_path": "feat_script/extract_visual_feat/extract_3D_feat.py", "max_stars_repo_name": "GeWu-Lab/MUSIC-AVQA_CVPR2022", "max_stars_repo_head_hexsha": "f704130f37a342b5ff861780282c75cc875221b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2022-03-24T04:01:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:10:11.000Z", "max_issues_repo_path": "feat_script/extract_visual_feat/extract_3D_feat.py", "max_issues_repo_name": "GeWu-Lab/MUSIC-AVQA", "max_issues_repo_head_hexsha": "f704130f37a342b5ff861780282c75cc875221b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "feat_script/extract_visual_feat/extract_3D_feat.py", "max_forks_repo_name": "GeWu-Lab/MUSIC-AVQA", "max_forks_repo_head_hexsha": "f704130f37a342b5ff861780282c75cc875221b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1698113208, "max_line_length": 120, "alphanum_fraction": 0.598407281, "include": true, "reason": "import numpy", "num_tokens": 853}
|
[STATEMENT]
lemma proj_same_not_active:
assumes "n \<le> n'"
and "enat (n'-1) < llength t"
and "\<pi>\<^bsub>c\<^esub>(ltake n' t) = \<pi>\<^bsub>c\<^esub>(ltake n t)"
shows "\<nexists>k. k\<ge>n \<and> k<n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> (\<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub>)
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
assume "\<exists>k. k\<ge>n \<and> k<n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub>"
[PROOF STATE]
proof (state)
this:
\<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub>
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub>
[PROOF STEP]
obtain i where "i\<ge>n" and "i<n'" and "\<parallel>c\<parallel>\<^bsub>lnth t i\<^esub>"
[PROOF STATE]
proof (prove)
using this:
\<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub>
goal (1 subgoal):
1. (\<And>i. \<lbrakk>n \<le> i; i < n'; \<parallel>c\<parallel>\<^bsub>lnth t i\<^esub>\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
n \<le> i
i < n'
\<parallel>c\<parallel>\<^bsub>lnth t i\<^esub>
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
n \<le> i
i < n'
\<parallel>c\<parallel>\<^bsub>lnth t i\<^esub>
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
from \<open>enat (n'-1)<llength t\<close> and \<open>i<n'\<close>
[PROOF STATE]
proof (chain)
picking this:
enat (n' - 1) < llength t
i < n'
[PROOF STEP]
have "i<llength t"
[PROOF STATE]
proof (prove)
using this:
enat (n' - 1) < llength t
i < n'
goal (1 subgoal):
1. enat i < llength t
[PROOF STEP]
by (metis diff_Suc_1 dual_order.strict_trans enat_ord_simps(2) lessE)
[PROOF STATE]
proof (state)
this:
enat i < llength t
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
n \<le> i
i < n'
\<parallel>c\<parallel>\<^bsub>lnth t i\<^esub>
enat i < llength t
[PROOF STEP]
have "\<pi>\<^bsub>c\<^esub>(ltake (Suc i) t) =
(\<pi>\<^bsub>c\<^esub>(ltake i t)) @\<^sub>l ((\<sigma>\<^bsub>c\<^esub>(lnth t i)) #\<^sub>l []\<^sub>l)"
[PROOF STATE]
proof (prove)
using this:
n \<le> i
i < n'
\<parallel>c\<parallel>\<^bsub>lnth t i\<^esub>
enat i < llength t
goal (1 subgoal):
1. \<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t = \<pi>\<^bsub>c\<^esub>ltake (enat i) t @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t = \<pi>\<^bsub>c\<^esub>ltake (enat i) t @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t = \<pi>\<^bsub>c\<^esub>ltake (enat i) t @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
from \<open>i<n'\<close>
[PROOF STATE]
proof (chain)
picking this:
i < n'
[PROOF STEP]
have "Suc i \<le> n'"
[PROOF STATE]
proof (prove)
using this:
i < n'
goal (1 subgoal):
1. Suc i \<le> n'
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Suc i \<le> n'
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
hence "lprefix(\<pi>\<^bsub>c\<^esub>(ltake (Suc i) t)) (\<pi>\<^bsub>c\<^esub>(ltake n' t))"
[PROOF STATE]
proof (prove)
using this:
Suc i \<le> n'
goal (1 subgoal):
1. lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t) (\<pi>\<^bsub>c\<^esub>ltake (enat n') t)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t) (\<pi>\<^bsub>c\<^esub>ltake (enat n') t)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t) (\<pi>\<^bsub>c\<^esub>ltake (enat n') t)
[PROOF STEP]
obtain "tl" where "\<pi>\<^bsub>c\<^esub>(ltake n' t) = (\<pi>\<^bsub>c\<^esub>(ltake (Suc i) t)) @\<^sub>l tl"
[PROOF STATE]
proof (prove)
using this:
lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t) (\<pi>\<^bsub>c\<^esub>ltake (enat n') t)
goal (1 subgoal):
1. (\<And>tl. \<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t @\<^sub>l tl \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using lprefix_conv_lappend
[PROOF STATE]
proof (prove)
using this:
lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t) (\<pi>\<^bsub>c\<^esub>ltake (enat n') t)
lprefix ?xs ?ys = (\<exists>zs. ?ys = ?xs @\<^sub>l zs)
goal (1 subgoal):
1. (\<And>tl. \<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t @\<^sub>l tl \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t @\<^sub>l tl
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t @\<^sub>l tl
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
from \<open>n\<le>i\<close>
[PROOF STATE]
proof (chain)
picking this:
n \<le> i
[PROOF STEP]
have "lprefix(\<pi>\<^bsub>c\<^esub>(ltake n t)) (\<pi>\<^bsub>c\<^esub>(ltake i t))"
[PROOF STATE]
proof (prove)
using this:
n \<le> i
goal (1 subgoal):
1. lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat n) t) (\<pi>\<^bsub>c\<^esub>ltake (enat i) t)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat n) t) (\<pi>\<^bsub>c\<^esub>ltake (enat i) t)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
hence "lprefix(\<pi>\<^bsub>c\<^esub>(ltake n t)) (\<pi>\<^bsub>c\<^esub>(ltake i t))"
[PROOF STATE]
proof (prove)
using this:
lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat n) t) (\<pi>\<^bsub>c\<^esub>ltake (enat i) t)
goal (1 subgoal):
1. lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat n) t) (\<pi>\<^bsub>c\<^esub>ltake (enat i) t)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat n) t) (\<pi>\<^bsub>c\<^esub>ltake (enat i) t)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat n) t) (\<pi>\<^bsub>c\<^esub>ltake (enat i) t)
[PROOF STEP]
obtain "hd" where "\<pi>\<^bsub>c\<^esub>(ltake i t) = (\<pi>\<^bsub>c\<^esub>(ltake n t)) @\<^sub>l hd"
[PROOF STATE]
proof (prove)
using this:
lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat n) t) (\<pi>\<^bsub>c\<^esub>ltake (enat i) t)
goal (1 subgoal):
1. (\<And>hd. \<pi>\<^bsub>c\<^esub>ltake (enat i) t = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using lprefix_conv_lappend
[PROOF STATE]
proof (prove)
using this:
lprefix (\<pi>\<^bsub>c\<^esub>ltake (enat n) t) (\<pi>\<^bsub>c\<^esub>ltake (enat i) t)
lprefix ?xs ?ys = (\<exists>zs. ?ys = ?xs @\<^sub>l zs)
goal (1 subgoal):
1. (\<And>hd. \<pi>\<^bsub>c\<^esub>ltake (enat i) t = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat i) t = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t = \<pi>\<^bsub>c\<^esub>ltake (enat i) t @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l)
\<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t @\<^sub>l tl
\<pi>\<^bsub>c\<^esub>ltake (enat i) t = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd
[PROOF STEP]
have "\<pi>\<^bsub>c\<^esub>(ltake n' t) =
(((\<pi>\<^bsub>c\<^esub>(ltake n t)) @\<^sub>l hd) @\<^sub>l ((\<sigma>\<^bsub>c\<^esub>(lnth t i)) #\<^sub>l []\<^sub>l)) @\<^sub>l tl"
[PROOF STATE]
proof (prove)
using this:
\<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t = \<pi>\<^bsub>c\<^esub>ltake (enat i) t @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l)
\<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat (Suc i)) t @\<^sub>l tl
\<pi>\<^bsub>c\<^esub>ltake (enat i) t = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd
goal (1 subgoal):
1. \<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l) @\<^sub>l tl
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l) @\<^sub>l tl
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l) @\<^sub>l tl
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
have "\<dots> = ((\<pi>\<^bsub>c\<^esub>(ltake n t)) @\<^sub>l hd) @\<^sub>l ((\<sigma>\<^bsub>c\<^esub>(lnth t i)) #\<^sub>l tl)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l) @\<^sub>l tl = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
[PROOF STEP]
using lappend_snocL1_conv_LCons2[of "(\<pi>\<^bsub>c\<^esub>(ltake n t)) @\<^sub>l hd" "\<sigma>\<^bsub>c\<^esub>(lnth t i)"]
[PROOF STATE]
proof (prove)
using this:
\<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l) @\<^sub>l ?ys = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l ?ys)
goal (1 subgoal):
1. \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l) @\<^sub>l tl = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l) @\<^sub>l tl = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l []\<^sub>l) @\<^sub>l tl = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
have "\<dots> = (\<pi>\<^bsub>c\<^esub>(ltake n t)) @\<^sub>l (hd @\<^sub>l ((\<sigma>\<^bsub>c\<^esub>(lnth t i)) #\<^sub>l tl))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl) = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l (hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl))
[PROOF STEP]
using lappend_assoc
[PROOF STATE]
proof (prove)
using this:
?xs @\<^sub>l ?ys @\<^sub>l ?zs = ?xs @\<^sub>l (?ys @\<^sub>l ?zs)
goal (1 subgoal):
1. \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl) = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l (hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl) = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l (hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl))
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl) = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l (hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl))
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
have "\<pi>\<^bsub>c\<^esub>(ltake n' t) = (\<pi>\<^bsub>c\<^esub>(ltake n' t)) @\<^sub>l []\<^sub>l"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat n') t @\<^sub>l []\<^sub>l
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat n') t @\<^sub>l []\<^sub>l
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t @\<^sub>l []\<^sub>l = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l (hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl))
[PROOF STEP]
have "(\<pi>\<^bsub>c\<^esub>(ltake n' t)) @\<^sub>l []\<^sub>l = (\<pi>\<^bsub>c\<^esub>(ltake n t)) @\<^sub>l (hd @\<^sub>l ((\<sigma>\<^bsub>c\<^esub>(lnth t i)) #\<^sub>l tl))"
[PROOF STATE]
proof (prove)
using this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t @\<^sub>l []\<^sub>l = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l (hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl))
goal (1 subgoal):
1. \<pi>\<^bsub>c\<^esub>ltake (enat n') t @\<^sub>l []\<^sub>l = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l (hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl))
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t @\<^sub>l []\<^sub>l = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l (hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl))
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t @\<^sub>l []\<^sub>l = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l (hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl))
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
from assms(3)
[PROOF STATE]
proof (chain)
picking this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat n) t
[PROOF STEP]
have "llength (\<pi>\<^bsub>c\<^esub>(ltake n' t)) = llength (\<pi>\<^bsub>c\<^esub>(ltake n t))"
[PROOF STATE]
proof (prove)
using this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat n) t
goal (1 subgoal):
1. llength (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) = llength (\<pi>\<^bsub>c\<^esub>ltake (enat n) t)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
llength (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) = llength (\<pi>\<^bsub>c\<^esub>ltake (enat n) t)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t @\<^sub>l []\<^sub>l = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l (hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl))
llength (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) = llength (\<pi>\<^bsub>c\<^esub>ltake (enat n) t)
[PROOF STEP]
have "lfinite (\<pi>\<^bsub>c\<^esub>(ltake n' t)) \<longrightarrow> []\<^sub>l = hd @\<^sub>l ((\<sigma>\<^bsub>c\<^esub>(lnth t i)) #\<^sub>l tl)"
[PROOF STATE]
proof (prove)
using this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t @\<^sub>l []\<^sub>l = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l (hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl))
llength (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) = llength (\<pi>\<^bsub>c\<^esub>ltake (enat n) t)
goal (1 subgoal):
1. lfinite (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) \<longrightarrow> []\<^sub>l = hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
[PROOF STEP]
using assms(3) lappend_eq_lappend_conv[of "\<pi>\<^bsub>c\<^esub>(ltake n' t)" "\<pi>\<^bsub>c\<^esub>(ltake n t)" "[]\<^sub>l"]
[PROOF STATE]
proof (prove)
using this:
\<pi>\<^bsub>c\<^esub>ltake (enat n') t @\<^sub>l []\<^sub>l = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l (hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl))
llength (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) = llength (\<pi>\<^bsub>c\<^esub>ltake (enat n) t)
\<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat n) t
llength (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) = llength (\<pi>\<^bsub>c\<^esub>ltake (enat n) t) \<Longrightarrow> (\<pi>\<^bsub>c\<^esub>ltake (enat n') t @\<^sub>l []\<^sub>l = \<pi>\<^bsub>c\<^esub>ltake (enat n) t @\<^sub>l ?vs) = (\<pi>\<^bsub>c\<^esub>ltake (enat n') t = \<pi>\<^bsub>c\<^esub>ltake (enat n) t \<and> (lfinite (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) \<longrightarrow> []\<^sub>l = ?vs))
goal (1 subgoal):
1. lfinite (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) \<longrightarrow> []\<^sub>l = hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
lfinite (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) \<longrightarrow> []\<^sub>l = hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
lfinite (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) \<longrightarrow> []\<^sub>l = hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
have "lfinite (\<pi>\<^bsub>c\<^esub>(ltake n' t))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lfinite (\<pi>\<^bsub>c\<^esub>ltake (enat n') t)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
lfinite (\<pi>\<^bsub>c\<^esub>ltake (enat n') t)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
lfinite (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) \<longrightarrow> []\<^sub>l = hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
lfinite (\<pi>\<^bsub>c\<^esub>ltake (enat n') t)
[PROOF STEP]
have "[]\<^sub>l = hd @\<^sub>l ((\<sigma>\<^bsub>c\<^esub>(lnth t i)) #\<^sub>l tl)"
[PROOF STATE]
proof (prove)
using this:
lfinite (\<pi>\<^bsub>c\<^esub>ltake (enat n') t) \<longrightarrow> []\<^sub>l = hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
lfinite (\<pi>\<^bsub>c\<^esub>ltake (enat n') t)
goal (1 subgoal):
1. []\<^sub>l = hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
[]\<^sub>l = hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
hence "(\<sigma>\<^bsub>c\<^esub>(lnth t i)) #\<^sub>l tl = []\<^sub>l"
[PROOF STATE]
proof (prove)
using this:
[]\<^sub>l = hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
goal (1 subgoal):
1. \<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl = []\<^sub>l
[PROOF STEP]
using LNil_eq_lappend_iff
[PROOF STATE]
proof (prove)
using this:
[]\<^sub>l = hd @\<^sub>l (\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl)
([]\<^sub>l = ?xs @\<^sub>l ?ys) = (?xs = []\<^sub>l \<and> ?ys = []\<^sub>l)
goal (1 subgoal):
1. \<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl = []\<^sub>l
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl = []\<^sub>l
goal (1 subgoal):
1. \<exists>k\<ge>n. k < n' \<and> \<parallel>c\<parallel>\<^bsub>lnth t k\<^esub> \<Longrightarrow> False
[PROOF STEP]
thus False
[PROOF STATE]
proof (prove)
using this:
\<sigma>\<^bsub>c\<^esub>lnth t i #\<^sub>l tl = []\<^sub>l
goal (1 subgoal):
1. False
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 10274, "file": "DynamicArchitectures_Configuration_Traces", "length": 69}
|
# Aiyagari (1994) in Continuous Time
#### By [SeHyoun Ahn](http://www.princeton.edu/~sehyouna/) and [Benjamin Moll](http://www.princeton.edu/~moll/)
The material in this notebook is based on [Achdou et al. (2015) "Heterogeneous Agent Models in Continuous Time"](http://www.princeton.edu/~moll/HACT.pdf) and follows closely the material in the paper's [online Appendix](http://www.princeton.edu/~moll/HACTproject/HACT_Numerical_Appendix.pdf). Additional codes (mainly in MATLAB) can be found [here](http://www.princeton.edu/~moll/HACTproject.htm). The code and its performance should be compared to [QuantEcon's discrete-time version of the Aiyagari model](http://quant-econ.net/py/aiyagari.html). The structure of the code in the present notebook is purposely kept as close as possible to that of the discrete-time code. The continuous-time code is considerably faster.
We begin by importing some packages
```python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
from scipy.sparse.linalg import spsolve
```
## Aiyagari Economy
The economy can be represented by the following system of equations which we aim to solve numerically:
$$
\begin{align*}
\rho v_1(a) &= \max_c \ u(c) + v_1'(a)(wz_1 + ra - c) + \lambda_1(v_2(a) - v_1(a))\\
\rho v_2(a) &= \max_c \ u(c) + v_2'(a)(wz_2 + ra - c) + \lambda_2(v_1(a) - v_2(a))\\
0 &= - \frac{d}{da}[s_1(a)g_1(a)] - \lambda_1 g_1(a) + \lambda_2 g_2(a)\\
0 &= - \frac{d}{da}[s_2(a)g_2(a)] - \lambda_2 g_2(a) + \lambda_1 g_1(a)\\
1 &= \int_{\underline{a}}^\infty g_1(a)da + \int_{\underline{a}}^\infty g_2(a)da\\
K &= \int_{\underline{a}}^\infty a g_1(a)da + \int_{\underline{a}}^\infty a g_2(a)da\\
r &= \alpha K^{\alpha-1} - \delta, \quad w=(1-\alpha)K^\alpha
\end{align*}
$$
where $z_1$ < $z_2$ and $s_j(a)=wz_j + ra -c_j(a)$ and $c_j(a) = (u')^{-1}(v_j(a))$ are optimal savings and consumption. Finally, there is a state constraint $a\geq \underline{a}$. The first order condition $u'(c_j(\underline{a}))=v'_j(\underline{a})$ still holds at the borrowing constraint. However, in order to respect the constraint we need $s_j(\underline{a}) = z_j + ra - c_j(\underline{a}) \geq 0$. Combining this with the FOC, the state constraint motivates a boundary condition
$$
\begin{align} v_j'(\underline{a}) \geq u'(z_j + r \underline{a}), \quad j=1,2 \end{align}
$$
We use a finite difference method, an in particular an "implicit upwind scheme." The details are explained in the paper's [online Appendix](http://www.princeton.edu/~moll/HACTproject/HACT_Numerical_Appendix.pdf). We here provide a brief summary. We approximate the functions $(v_1,v_2,g_1,g_2)$ at $I$ discrete points in the space dimension, $a_i,i=1,...,I$. We use equispaced grids, denote by $\Delta a$ the distance between grid points, and use the short-hand notation $v_{i,j} \equiv v_j(a_i)$ and so on. The derivative $v_{i,j}'=v_j'(a_i)$ is approximated with either a forward or a backward difference approximation
$$
\begin{align*}
v_j'(a_i) \approx \frac{v_{i+1,j} - v_{i,j}}{\Delta a} \equiv v_{i,j,F}' \\
v_j'(a_i) \approx \frac{v_{i-1,j} - v_{i,j}}{\Delta a} \equiv v_{i,j,B}'
\end{align*}
$$
An upwind scheme means that we approximate the derivative $v_j'(a_i)$ with a forward difference approximation whenever the drift of the state variable is positive and the backward difference approximation whenever it is negative. An implicit scheme is a particular way of iterating on the value function.
In a nutshell, the discretized HJB equation can be written as
$$\rho v = u + \mathbf{A} v$$
where $\mathbf{A}$ is $N \times N$ transition matrix with $N = 2 \times I$ and where $I$ is the number of wealth grid points. The matrix $\mathbf{A}$ depends on $v$, i.e. this is a nonlinear problem and we therefore need to iterate (this is where the implicit scheme comes in). The matrix $\mathbf{A}$ has the interpretation of a Poisson transition matrix (or "intensity matrix") on the discretized state space $(a_i,z_j)$.
Similarly, one can show that the discretized Kolmogorov Forward equation is
$$0 = \mathbf{A}^T g$$
which is an eigenvalue problem. That is, the discretized stationary distribution $g$ is the eigenvector corresponding to a zero eigenvalue of the transpose of the Poisson transition matrix $\mathbf{A}$. The transpose comes from the fact that the differential operator in the KF equation is the "adjoint" of the operator in the HJB equation. And an "adjoint" is the infinite-dimensional analogue of matrix transpose.
The matrix $\mathbf{A}$ is found from the discretized HJB equation. Skipping a number of steps it can be written as
$$
\begin{align*}
&\frac{v^{n+1}_{i,j} - v^{n}_{i,j}}{\Delta} + \rho v_{i,j}^{n+1} = u(c_{i,j}^n) + v_{i-1,j}^{n+1}x_{i,j} + v^{n+1}_{i,j} y_{i,j} + v_{i+1,j}^{n+1} z_{i,j} + v_{i,-j}^{n+1}\lambda_j \quad \mbox{where}\\
&x_{i,j} = -\frac{(s^n_{i,j,B})^-}{\Delta a},\\
&y_{i,j} = - \frac{(s^n_{i,j,F})^+}{\Delta a} + \frac{ (s^n_{i,j,B})^-}{\Delta a} - \lambda_j,\\
&z_{i,j} = \frac{(s^n_{i,j,F})^+}{\Delta a}
\end{align*}
$$
where $s^n_{i,j,F}$ and $s^n_{i,j,B}$ are the discretized optimal household savings at grid points $(a_i,z_j)$ using forward and backward approximations, and where for any number $x$, the notation $x^+$ means "the positive part of $x$", i.e. $x^+ = \max\{x,0\}$ and analogously $x^{-} = \min\{x,0\}$. This part is what makes it an upwind scheme.
This is a system of $2 \times I$ linear equations which can be written in matrix notation as:
\begin{equation}\frac{1}{\Delta}(v^{n+1} - v^n) + \rho v^{n+1} = u^n + \mathbf{A}^n v^{n+1} \end{equation}
where
$$\mathbf{A}^n = \left[\begin{matrix}y_{1,1} & z_{1,1} & 0 & \cdots & 0 & \lambda_1 & 0 & 0 & \cdots & 0 \\ x_{2,1} & y_{2,1} & z_{2,1} & 0 & \cdots & 0 & \lambda_1 & 0 & 0 & \cdots \\ 0 & x_{3,1} & y_{3,1} & z_{3,1} & 0 & \cdots & 0 & \lambda_1 & 0 & 0 \\ \vdots & \ddots & \ddots & \ddots & \ddots & \ddots & \ddots & \ddots & \ddots & \vdots \\ 0 & \ddots & \ddots & x_{I,1} & y_{I,1} & 0 & 0 & 0 & 0 & \lambda_1\\ \lambda_2 & 0 & 0 & 0 & 0 & y_{1,2} & z_{1,2} & 0 & 0 & 0 \\ 0 & \lambda_2 & 0 & 0 & 0 & x_{2,2} & y_{2,2} & z_{2,2} & 0 & 0 \\ 0 & 0 & \lambda_2 & 0 & 0 & 0 & x_{3,2} & y_{3,2} & z_{3,2} & 0 \\ 0 & 0 & \ddots & \ddots & \ddots & \ddots & \ddots & \ddots & \ddots & \ddots \\ 0 & \cdots & \cdots & 0 & \lambda_2 & 0 & \cdots & 0 & x_{I,2} & y_{I,2} \end{matrix}\right], \quad u^n = \left[\begin{matrix} u(c_{1,1}^n)\\ \vdots \\ \vdots \\ u(c_{I,1}^n)\\ u(c_{1,2}^n) \\ \vdots \\ \vdots \\ u(c_{I,2}^n)\end{matrix}\right]$$
This system can in turn be written as
$$
\begin{align*}\mathbf{B}^n v^{n+1} = b^n, \quad \quad \mathbf{B}^n = \left(\frac{1}{\Delta} + \rho\right)\mathbf{I} - \mathbf{A}^n, \quad b^n = u^n + \frac{1}{\Delta}v^n \end{align*}
$$
This system of linear equations can be solved very efficiently using sparse matrix routines. In Python this is implemented with the function "spsolve()."
#### Summary of Algorithm
First consider the algorithm for solving the HJB equations. Guess $v^0_{i,j},i=1,...,I,j=1,2$ and for $n=0,1,2,...$ follow
1. Compute $(v^n_{i,j})'$ using the current guess of the value function and the upwind scheme (forward difference when drift is positive, backward difference when drift is negative)
2. Compute $c^n$ from $c_{i,j}^n = (u')^{-1}[(v_{i,j}^n)']$
3. Find $v^{n+1}$ by solving the system of linear equations involving the matrix $\mathbf{A}$ described above (implicit scheme)
4. If $v^{n+1}$ is close enough to $v^n$: stop. Otherwise, go to step 1.
After solving the HJB equations, solving the Kolmogorov Forward equation: simply solve the eigenvalue problem $0=\mathbf{A}^T g$ described above. That is, once the HJB equation is solved, we basically get the Kolmogorov Forward equation "for free."
#### Overview of Code
Given this overview of the algorithm, we now briefly describe the code. As in the discrete-time version, we define a "household" class. Household objects contain all the data relevant to solving a household's decision problem. The household class contain:
* economic parameters (e.g., w, r)
* utility paramters (e.g., discount factor $\rho$)
* asset and skill level represented on a grid
The household's decision problem is solved by invoking the function solve_bellman() which solves the HJB equation given the relevant parameters saving the value-function as v.
We also include the stationary wealth distribution of households in the household object. This is a natural thing to do since this stationary distribution is computed using the household's decision rule. Hence, after computing the decision problem of households, the stationary distribution can be found by invoking compute_stationary_distribution()
The definition of the household class is given below.
```python
class Household(object):
def __init__(self,
dep=0.05, ###ADDED DEPRECIATION
r=0.03, # interest rate
w=1, # wages
rho=0.04, # discount factor
a_min=1e-10, # minimum asset amount
pi=[[-0.33, 0.33], [0.33, -0.33]], # poisson Jumps
z_vals=[1.0, 2.0], # exogenous income states
a_max=40,
a_size=1000, # number of asset grid points
delta=1000.0):
# Initialize values, and set up grids over a and z
self.r, self.w, self.rho, self.dep = r, w, rho, dep
self.a_min, self.a_max, self.a_size = a_min, a_max, a_size
self.da = (self.a_max-self.a_min)/(self.a_size-1)
self.k = 10
self.pi = np.asarray(pi)
self.z_vals = np.asarray(z_vals)
self.z_size = len(z_vals)
self.a_vals = np.linspace(self.a_min, self.a_max, self.a_size)
self.n = self.a_size * self.z_size
self.delta = delta
###### ADDED TO MATCH LABOR SUPPLY IN .m
self.z_ave = (self.z_vals[0]*self.pi[0, 1] +
self.z_vals[1]*self.pi[1, 0]) / \
(self.pi[0, 1] + self.pi[1, 0])
# Initial Guess of Value Function
self.v = np.log(np.tile(self.a_vals,(self.z_size,1))*self.r
+self.w*np.tile(self.z_vals,(self.a_size,1)).transpose())/self.rho
# Build skill_transition, the matrix summarizing transitions due to the Poisson income shocks
# This is analogous to the Q matrix in the discrete time version of the QuantEcon Aiyagari model
self.z_transition = sparse.kron(self.pi,sparse.eye(self.a_size), format="csr")
# Preallocation
self.v_old = np.zeros((self.z_size,self.a_size))
self.g = np.zeros((self.z_size,self.a_size))
self.dv = np.zeros((self.z_size,self.a_size-1))
self.cf = np.zeros((self.z_size,self.a_size-1))
self.c0 = np.zeros((self.z_size,self.a_size))
self.ssf = np.zeros((self.z_size,self.a_size))
self.ssb = np.zeros((self.z_size,self.a_size))
self.is_forward = np.zeros((self.z_size,self.a_size),'bool')
self.is_backward = np.zeros((self.z_size,self.a_size),'bool')
self.diag_helper = np.zeros((self.z_size,self.a_size))
self.A = self.z_transition.copy()
self.B = self.z_transition.copy()
self.AT = self.z_transition.copy()
def set_prices(self, r, w):
"""
Resets prices
Calling the method will resolves the Bellman Equation.
Parameters:
-----------------
r : Interest rate
w : wage
"""
self.r, self.w = r, w
self.solve_bellman()
def reinitialize_v(self):
"""
Reinitializes the value function if the value function
became NaN
"""
self.v = np.log(np.tile(self.a_vals,(self.z_size,1))*self.r
+self.w*np.tile(self.z_vals,(self.a_size,1)).transpose())/self.rho
def solve_bellman(self,maxiter=100,crit=1e-6):
"""
This function solves the decision problem with the given parameters
Parameters:
-----------------
maxiter : maximum number of iteration before haulting value function iteration
crit : convergence metric, stops if value function does not change more than crit
"""
dist=100.0
for i in range(maxiter):
# compute saving and consumption implied by current guess for value function, using upwind method
self.dv = (self.v[:,1:]-self.v[:,:-1])/self.da
self.cf = 1.0/self.dv
self.c0 = np.tile(self.a_vals,(self.z_size,1))*self.r \
+self.w*np.tile(self.z_vals,(self.a_size,1)).transpose()
# computes savings with forward forward difference and backward difference
self.ssf[:,:-1] = self.c0[:,:-1]-self.cf
self.ssb[:,1:] = self.c0[:,1:]-self.cf
# Note that the boundary conditions are handled implicitly as ssf will be zero at a_max and ssb at a_min
self.is_forward = self.ssf>0
self.is_backward = self.ssb<0
# Update consumption based on forward or backward difference based on direction of drift
self.c0[:,:-1] += (self.cf-self.c0[:,:-1])*self.is_forward[:,:-1]
self.c0[:,1:] += (self.cf-self.c0[:,1:])*self.is_backward[:,1:]
######
# UNCOMMENT FOR DEBUGGING
#plt.plot(self.a_vals, self.c0.transpose())
#plt.show()
self.c0 = np.log(self.c0)
# Build the matrix A that summarizes the evolution of the process for (a,z)
# This is a Poisson transition matrix (aka intensity matrix) with rows adding up to zero
self.A = self.z_transition.copy()
self.diag_helper = (-self.ssf*self.is_forward/self.da \
+ self.ssb*self.is_backward/self.da).reshape(self.n)
self.A += sparse.spdiags(self.diag_helper,0,self.n,self.n)
self.diag_helper = (-self.ssb*self.is_backward/self.da).reshape(self.n)
self.A += sparse.spdiags(self.diag_helper[1:],-1,self.n,self.n)
self.diag_helper = (self.ssf*self.is_forward/self.da).reshape(self.n)
self.A += sparse.spdiags(np.hstack((0,self.diag_helper)),1,self.n,self.n)
# Solve the system of linear equations corresponding to implicit finite difference scheme
self.B = sparse.eye(self.n)*(1/self.delta + self.rho) - self.A
self.b = self.c0.reshape(self.n,1) + self.v.reshape(self.n,1)/self.delta
self.v_old = self.v.copy()
self.v = spsolve(self.B,self.b).reshape(self.z_size,self.a_size)
# Compute convergence metric and stop if it satisfies the convergence criterion
dist = np.amax(np.absolute(self.v_old-self.v).reshape(self.n))
if dist < crit:
break
def compute_stationary_distribution(self):
"""
Solves for the stationary distribution given household decision rules
Output:
Capital level from the stationary distribution
"""
self.AT = self.A.transpose().tocsr()
# The discretized Kolmogorov Forward equation AT*g=0 is an eigenvalue problem
# AT is singular because one of the equation is the distribution adding
# up to 1. Here we solve the eigenvalue problem by setting g(1,1)=0.1
# and the equation is solved relative to that value.
# Alternatively, one could use a routine for solving eigenvalue problems.
b = np.zeros((self.n,1))
b[0] = 0.1
self.AT.data[1:self.AT.indptr[1]] = 0
self.AT.data[0] = 1.0
self.AT.indices[0] = 0
self.AT.eliminate_zeros()
self.g = spsolve(self.AT,b).reshape(self.z_size,self.a_size)
# Since g was solved taking one of g(1,1) as given, g needs to be
# renormalized to add up to 1
self.g = self.g/np.sum(self.g)
return np.sum(self.g*(np.tile(self.a_vals,(self.z_size,1))))
```
For example, if interest rate is 0.05 and wage is 1, we can initialize a household, solve it decision problem, and find the stationary distribution by running
```python
lam = 0.11
PI = [[-lam, lam], [lam, -lam]]
am=Household(rho=0.05, r=0.02,w=1, pi=PI)
am.solve_bellman()
am.compute_stationary_distribution()
```
0.69274641340853271
Once, a household object is created, the object can be reused to solve a different problem by changing parameters. For example, if the interest rate were to change to 0.03 and wage to 0.9, the new problem can be solved by setting parameters directly by
```python
am.r=0.02
am.w=0.9
```
and running
```python
am.solve_bellman()
am.compute_stationary_distribution()
```
0.62323720534758664
Alternately, a helper function can be created to automate this process. For example, set_prices(r,w) function resets parameters, and solves the decision problem again. For example, you can run the following to the same effect.
```python
am.set_prices(r=0.03,w=0.9)
am.compute_stationary_distribution()
```
1.129833308836365
Given the household class, solving for the steady state becomes really simple. To find the equilibrium, we solve for the capital level that is consistent with household's decisions.
Before solving for the actual steady state, we can visualize the capital demand and capital supply.
```python
A = 0.1
##### NOT NECESSARY
#N = 0.05
alpha = 0.33
def r_to_w(am, r):
return A*(1 - alpha) * \
(alpha*A / (am.dep + r))**(alpha / (1 - alpha))
def rd(am, K):
return A*alpha*(am.z_ave / K)**(1 - alpha) - am.dep
def prices_to_capital_stock(am, r):
"""
Map prices to the induced level of capital stock.
Parameters:
----------
am : Household
An instance of the Household class
r : float
The interest rate
"""
w = r_to_w(am, r)
# Set new prices and solve the Bellman equation
am.set_prices(r, w)
# Compute the stationary distribution and capital
return am.compute_stationary_distribution()
```
We make a grid of interest rate points, and plot the resulting capital.
```python
num_points = 20
r_vals = np.linspace(0.02, 0.048, num_points)
# Compute supply of capital
k_vals = np.empty(num_points)
for i, r in enumerate(r_vals):
k_vals[i] = prices_to_capital_stock(am,r)
# Plot supply and demand of capital
fig, ax = plt.subplots(figsize=(11, 8))
ax.plot(k_vals, r_vals, lw=2, alpha=0.6, label='supply of capital')
ax.plot(k_vals, rd(am, k_vals), lw=2, alpha=0.6, label='demand for capital')
ax.grid()
ax.set_xlabel('capital')
ax.set_ylabel('interest rate')
ax.legend(loc='upper right')
plt.show()
```
Finally, the equilibrium interest rate can be found by using the bisection method, and we can see the equilibrium distribution of assets.
```python
# Set parameters for bisection method
crit = 1e-6
r_min = 0.02
r_max = 0.05
r = 0.03
# Bisection loop
for i in range(100):
am.set_prices(r,r_to_w(am, r))
r_new = rd(am, am.compute_stationary_distribution())
if np.absolute(r_new-r)<crit:
break
elif r_new > r:
r_min = r
r = (r_max+r_min)/2.
else:
r_max = r
r = (r_max+r_min)/2.
# Plot stationary distribution at the equilibrium
fig, ax = plt.subplots(figsize=(11, 8))
n=50 # Determine the max asset level to show in the plot
ax.plot(am.a_vals[0:n], am.g[0,0:n], lw=2, alpha=0.6, label='low income')
ax.plot(am.a_vals[0:n], am.g[1,0:n], lw=2, alpha=0.6, label='high income')
ax.grid()
ax.set_xlabel('asset position')
ax.set_ylabel('distribution')
ax.legend(loc='upper right')
plt.show()
```
```python
am.r
```
0.04605979919433595
|
{"hexsha": "8cf60ff625fbf1aa0b4fc7f89866c4977734787f", "size": 94602, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "aiyagari_continuous_time.ipynb", "max_stars_repo_name": "a-parida12/QuantEcon.notebooks", "max_stars_repo_head_hexsha": "b8794ae7d869a0cbc585b56e2c71cefcd2d9cdc6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3266, "max_stars_repo_stars_event_min_datetime": "2017-08-06T16:51:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:34:24.000Z", "max_issues_repo_path": "quanteconomics/aiyagari_continuous_time.ipynb", "max_issues_repo_name": "nuhaltinsoy/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials", "max_issues_repo_head_hexsha": "6017441f2d476f9c6c568dd886da43c6c0fd89bd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 150, "max_issues_repo_issues_event_min_datetime": "2017-08-28T14:59:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:21:35.000Z", "max_forks_repo_path": "quanteconomics/aiyagari_continuous_time.ipynb", "max_forks_repo_name": "nuhaltinsoy/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials", "max_forks_repo_head_hexsha": "6017441f2d476f9c6c568dd886da43c6c0fd89bd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1449, "max_forks_repo_forks_event_min_datetime": "2017-08-06T17:40:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T12:03:24.000Z", "avg_line_length": 155.5953947368, "max_line_length": 34860, "alphanum_fraction": 0.840468489, "converted": true, "num_tokens": 5797}
|
import jax
import jax.numpy as np
@jax.jit
def interp_dim(x_new, x, y):
return jax.vmap(np.interp, in_axes=(0, 0, 0))(x_new, x, y)
def searchsorted(bin_locations, inputs, eps=1e-6):
# add noise to prevent zeros
# bin_locations = bin_locations[..., -1] + eps
bin_locations = bin_locations + eps
# find bin locations (parallel bisection search)
# sum dim
print("Bins:", bin_locations.shape)
print("Inputs:", inputs[..., None].shape)
input_bins = np.sum(inputs[..., None] >= bin_locations, axis=-1)
return input_bins
|
{"hexsha": "5f9ac71d77b422d0c231819f83fadc6fd9ded747", "size": 561, "ext": "py", "lang": "Python", "max_stars_repo_path": "rbig_jax/utils.py", "max_stars_repo_name": "jejjohnson/rbig_jax", "max_stars_repo_head_hexsha": "112e064d5b62631aa03b7563c9eb9f115ab23eb0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rbig_jax/utils.py", "max_issues_repo_name": "jejjohnson/rbig_jax", "max_issues_repo_head_hexsha": "112e064d5b62631aa03b7563c9eb9f115ab23eb0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rbig_jax/utils.py", "max_forks_repo_name": "jejjohnson/rbig_jax", "max_forks_repo_head_hexsha": "112e064d5b62631aa03b7563c9eb9f115ab23eb0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3913043478, "max_line_length": 68, "alphanum_fraction": 0.6577540107, "include": true, "reason": "import jax", "num_tokens": 160}
|
#**
function cost_eens(eqp,ks)
cp_tbl=cap_prob_table(eqp)#make capacity probability table
eens_all=[]#Create eens array
eens=0.0
for i=1:length(cp_tbl[:,1])#loop through rows of cpt
ratio_curt=cp_tbl[i,1]/eqp.mva#find PU curtailment ratio
diff=wind_module.wind_profs[eqp.wnd].pu.-ratio_curt#find closest PU of wind power series to PU curtail ratio
#i_min=argmin(sqrt.((diff[:]).^2))diff[5240]
i_min=argmin(abs.((diff[:])))
#i_min=argmin(diff[:])
if ratio_curt>=1#check if curt ratio is at or above full power and set ce=curtailed energy to 0
ce=wind_module.wind_profs[eqp.wnd].ce[1]
elseif ratio_curt<=0#check if curt ratio is at or below zero power and set ce to max
ce=wind_module.wind_profs[eqp.wnd].ce[length(wind_module.wind_profs[eqp.wnd].ce)]
#interpolation is not needed it is already accurate to several decimal points
#=elseif i_min < length(diff) && diff[i_min]<0#if curt ratio is a mid point interpolate ce
ce=interpolate(ratio_curt,eqp.wnd.pu[i_min],eqp.wnd.pu[i_min+1],eqp.wnd.ce[i_min],eqp.wnd.ce[i_min+1])
elseif i_min > 1 && diff[i_min]>0
ce=interpolate(ratio_curt,eqp.wnd.pu[i_min-1],eqp.wnd.pu[i_min],eqp.wnd.ce[i_min-1],eqp.wnd.ce[i_min])=#
else#if exact match occurs
ce=wind_module.wind_profs[eqp.wnd].ce[i_min]
end
push!(eens_all, ce*eqp.mva*cp_tbl[i,2])#multiply PU curtailed energy with max power and availability, then store
end
eens=sum(eens_all)*npv_years()*ks.E_op#sum all eens and multiply by cost factors
return eens
end
#The calculation of equipment level capacity probability table **
#**
function cap_prob_table(eqp)
#Calculate Availability of eqiupment
A_eqp=1.0/(1.0+eqp.relia.fr*(eqp.relia.mttr*30.0*24.0/8760.0))
#Create combinatorial matrix of 0s and 1s
clms=trunc(Int,eqp.num)
rows=trunc(Int, 2.0^clms)
empty_tbl=blank_table(rows,clms)
#Create blank power and availability tables
PWR_tbl=zeros(Float32,rows,1)
AVL_tbl=ones(Float32,rows,1)
#Set powers and availabilities by looping through the CPT
for k=1:clms
for j=1:rows
#if 1 the equipment is functional and the power is added to total
#the availability is multiplied
if trunc(Int,empty_tbl[j,k])==1
AVL_tbl[j]=AVL_tbl[j]*A_eqp
PWR_tbl[j]=min(eqp.mva,PWR_tbl[j]+eqp.elec.mva)
#if 0 the equipment is broken and no power is transmitted
else
AVL_tbl[j]=AVL_tbl[j]*(1-A_eqp)
end
end
end
#all unique power levels are extracted
tbl_c1=unique(PWR_tbl)
tbl_c2=zeros(Float32,length(tbl_c1),1)
for k=1:length(tbl_c1)
for j=1:length(AVL_tbl)
#Availabilities are summed for common power levels
if PWR_tbl[j]==tbl_c1[k]
tbl_c2[k]=tbl_c2[k]+AVL_tbl[j]
end
end
end
#Checks if probability sums to 1 else error is thrown
if sum(tbl_c2) > 1.0001 || sum(tbl_c2) < 0.9999
println("sum is: "*string(sum(tbl_c2)))
#error("probability does not sum to 1")
elseif maximum(tbl_c1) > eqp.mva && minimum(tbl_c1) > 1
#error("power is not correct")
else
return [tbl_c1 tbl_c2]
end
end
#creates a blank capacity probability table **
#**
function blank_table(rows,clms)
XFM_CBL=trunc.(Int8,zeros(rows,clms))
#=create all combinations ie
transpose(
11101000
11010100
10110010
)
=#
round=1
k=1
multi=1
while round<=clms
while k<rows
while k<=(multi*2^(clms-round))
XFM_CBL[k,round]=1
k=k+1
end
multi=multi+2
k=k+2^(clms-round)
end
round=round+1
k=1
multi=1
end
return XFM_CBL
end
#linearly interpolates 2 points of graph **
#**
function interpolate(true_x,min_x,max_x,min_y,max_y)
slope=(max_y-min_y)/(max_x-min_x)
b=min_y-slope*min_x
true_y=slope*true_x+b
return true_y
end
|
{"hexsha": "0af769a885101963c50058e1c61956cc4ae7599b", "size": 4053, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/economics/eens/functions.jl", "max_stars_repo_name": "sdwhardy/cordoba.jl", "max_stars_repo_head_hexsha": "49de8a6a5862c6ee9a70f241a498e0a48ef41eed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/economics/eens/functions.jl", "max_issues_repo_name": "sdwhardy/cordoba.jl", "max_issues_repo_head_hexsha": "49de8a6a5862c6ee9a70f241a498e0a48ef41eed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/economics/eens/functions.jl", "max_forks_repo_name": "sdwhardy/cordoba.jl", "max_forks_repo_head_hexsha": "49de8a6a5862c6ee9a70f241a498e0a48ef41eed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0588235294, "max_line_length": 120, "alphanum_fraction": 0.6469282013, "num_tokens": 1264}
|
"""
pyart.aux_io.d3r_gcpex_nc
=========================
Routines for reading GCPEX D3R files.
.. autosummary::
:toctree: generated/
read_d3r_gcpex_nc
_ncvar_to_dict
"""
import datetime
import numpy as np
import netCDF4
from ..config import FileMetadata
from ..io.common import make_time_unit_str, _test_arguments
from ..core.radar import Radar
D3R_FIELD_NAMES = {
# corrected reflectivity, horizontal
'Reflectivity': 'reflectivity',
# corrected reflectivity, vertical
'DBZV': 'reflectivity',
# differential reflectivity
'DifferentialReflectivity': 'differential_reflectivity',
'CrossPolCorrelation': 'cross_correlation_ratio',
'ClutterPowerH': 'clutter_power_h',
'ClutterPowerV': 'clutter_power_v',
'DifferentialPhase': 'differential_phase',
'KDP': 'specific_differential_phase',
'NormalizedCoherentPower': 'normalized_coherent_power',
'Signal+Clutter_toNoise_H': 'signal_to_noise_ratio',
'Velocity': 'velocity',
'SpectralWidth': 'spectrum_width',
'SignalPower_H': 'signal_power_h',
}
def read_d3r_gcpex_nc(filename, field_names=None, additional_metadata=None,
file_field_names=False, exclude_fields=None, **kwargs):
"""
Read a D3R GCPEX netCDF file.
Parameters
----------
filename : str
Name of the ODIM_H5 file to read.
field_names : dict, optional
Dictionary mapping ODIM_H5 field names to radar field names. If a
data type found in the file does not appear in this dictionary or has
a value of None it will not be placed in the radar.fields dictionary.
A value of None, the default, will use the mapping defined in the
Py-ART configuration file.
additional_metadata : dict of dicts, optional
Dictionary of dictionaries to retrieve metadata from during this read.
This metadata is not used during any successive file reads unless
explicitly included. A value of None, the default, will not
introduct any addition metadata and the file specific or default
metadata as specified by the Py-ART configuration file will be used.
file_field_names : bool, optional
True to use the MDV data type names for the field names. If this
case the field_names parameter is ignored. The field dictionary will
likely only have a 'data' key, unless the fields are defined in
`additional_metadata`.
exclude_fields : list or None, optional
List of fields to exclude from the radar object. This is applied
after the `file_field_names` and `field_names` parameters.
Returns
-------
radar : Radar
Radar object containing data from ODIM_H5 file.
"""
# TODO before moving to pyart.io
# * unit test
# * add default field mapping, etc to default config
# * auto-detect file type with pyart.io.read function
# * instrument parameters
# * add additional checks for HOW attributes
# * support for other objects (SCAN, XSEC)
# test for non empty kwargs
_test_arguments(kwargs)
# create metadata retrieval object
if field_names is None:
field_names = D3R_FIELD_NAMES
filemetadata = FileMetadata('cfradial', field_names, additional_metadata,
file_field_names, exclude_fields)
# read the data
ncobj = netCDF4.Dataset(filename)
ncvars = ncobj.variables
# One sweep per file
nsweeps = 1
# latitude, longitude and altitude
latitude = filemetadata('latitude')
longitude = filemetadata('longitude')
altitude = filemetadata('altitude')
latitude['data'] = np.array([ncobj.Latitude], dtype='float64')
longitude['data'] = np.array([ncobj.Longitude], dtype='float64')
altitude['data'] = np.array([295.], dtype='float64')
# metadata
metadata = filemetadata('metadata')
metadata['source'] = "Colorado State EE - chandrasekar"
metadata['original_container'] = 'D3R_gcpex_nc'
metadata['nc_conventions'] = ncobj.NetCDFRevision
metadata['version'] = ncobj.NetCDFRevision
metadata['source'] = "Chandra"
metadata['system'] = ncobj.RadarName
metadata['software'] = ncobj.NetCDFRevision
metadata['sw_version'] = ncobj.NetCDFRevision
# sweep_start_ray_index, sweep_end_ray_index
sweep_start_ray_index = filemetadata('sweep_start_ray_index')
sweep_end_ray_index = filemetadata('sweep_end_ray_index')
rays_per_sweep = np.shape(ncvars['Azimuth'][:])
ssri = np.cumsum(np.append([0], rays_per_sweep[:-1])).astype('int32')
seri = np.cumsum(rays_per_sweep).astype('int32') - 1
sweep_start_ray_index['data'] = ssri
sweep_end_ray_index['data'] = seri
# sweep_number
sweep_number = filemetadata('sweep_number')
sweep_number['data'] = np.arange(nsweeps, dtype='int32')
# sweep_mode
sweep_mode = filemetadata('sweep_mode')
sweep_mode['data'] = np.array(nsweeps * ['azimuth_surveillance'])
# scan_type
if ncobj.ScanType == 2:
scan_type = 'ppi'
else:
scan_type = 'rhi'
# fixed_angle
fixed_angle = filemetadata('fixed_angle')
if ncobj.ScanType == 2:
sweep_el = ncvars['Elevation'][0]
else:
sweep_el = ncvars['Azimuth'][0]
fixed_angle['data'] = np.array([sweep_el], dtype='float32')
# elevation
elevation = filemetadata('elevation')
elevation['data'] = ncvars['Elevation']
# range
_range = filemetadata('range')
# check that the gate spacing is constant between sweeps
rstart = ncvars['StartRange'][:]
if any(rstart != rstart[0]):
raise ValueError('range start changes between sweeps')
rscale = ncvars['GateWidth'][:]/1000.
if any(rscale != rscale[0]):
raise ValueError('range scale changes between sweeps')
nbins = ncobj.NumGates
_range['data'] = (np.arange(nbins, dtype='float32') * rscale[0] +
rstart[0] * 1000.)
_range['meters_to_center_of_first_gate'] = rstart[0]
_range['meters_between_gates'] = float(rscale[0])
# azimuth
azimuth = filemetadata('azimuth')
azimuth['data'] = ncvars['Azimuth'][:]
# time
_time = filemetadata('time')
start_time = datetime.datetime.utcfromtimestamp(ncobj.Time)
_time['units'] = make_time_unit_str(start_time)
_time['data'] = (ncvars['Time']-ncobj.Time).astype('float32')
# fields
# all variables with dimensions of 'Radial', 'Gate' are fields
keys = [k for k, v in ncvars.items()
if v.dimensions == ('Radial', 'Gate')]
fields = {}
for key in keys:
field_name = filemetadata.get_field_name(key)
if field_name is None:
if exclude_fields is not None and key in exclude_fields:
continue
field_name = key
fields[field_name] = _ncvar_to_dict(ncvars[key])
# instrument_parameters
instrument_parameters = None
return Radar(
_time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index,
sweep_end_ray_index,
azimuth, elevation,
instrument_parameters=instrument_parameters)
def _ncvar_to_dict(ncvar):
""" Convert a NetCDF Dataset variable to a dictionary. """
# copy all attribute except for scaling parameters
d = dict((k, getattr(ncvar, k)) for k in ncvar.ncattrs()
if k not in ['scale_factor', 'add_offset'])
d['data'] = ncvar[:]
if np.isscalar(d['data']):
# netCDF4 1.1.0+ returns a scalar for 0-dim array, we always want
# 1-dim+ arrays with a valid shape.
d['data'] = np.array(d['data'])
d['data'].shape = (1, )
return d
|
{"hexsha": "a3bc28e5a8c0d72f58acf48731b481f1956f676f", "size": 7742, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyart/aux_io/d3r_gcpex_nc.py", "max_stars_repo_name": "josephhardinee/pyart", "max_stars_repo_head_hexsha": "909cd4a36bb4cae34349294d2013bc7ad71d0969", "max_stars_repo_licenses": ["OLDAP-2.6", "Python-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyart/aux_io/d3r_gcpex_nc.py", "max_issues_repo_name": "josephhardinee/pyart", "max_issues_repo_head_hexsha": "909cd4a36bb4cae34349294d2013bc7ad71d0969", "max_issues_repo_licenses": ["OLDAP-2.6", "Python-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyart/aux_io/d3r_gcpex_nc.py", "max_forks_repo_name": "josephhardinee/pyart", "max_forks_repo_head_hexsha": "909cd4a36bb4cae34349294d2013bc7ad71d0969", "max_forks_repo_licenses": ["OLDAP-2.6", "Python-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1057268722, "max_line_length": 78, "alphanum_fraction": 0.6679152674, "include": true, "reason": "import numpy", "num_tokens": 1942}
|
[STATEMENT]
lemma chainI:
assumes "Y 0 = false" "\<And> i. Y (Suc i) \<sqsubseteq> Y i"
shows "chain Y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. chain Y
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
Y 0 = false
Y (Suc ?i) \<sqsubseteq> Y ?i
goal (1 subgoal):
1. chain Y
[PROOF STEP]
by (auto simp add: chain_def)
|
{"llama_tokens": 153, "file": "UTP_utp_utp_recursion", "length": 2}
|
"""Generation and plot of an events file : the neurospin/localizer events.
==========================================================================
The protocol described is the simplified version of the so-called
"archi standard" localizer event sequence.
See Pinel et al., BMC neuroscience 2007 for reference.
"""
print(__doc__)
#########################################################################
# Define the onset times in seconds. Those are typically extracted
# from the stimulation software used.
import numpy as np
onset = np.array([
0., 2.4, 8.7, 11.4, 15., 18., 20.7, 23.7, 26.7, 29.7, 33., 35.4, 39.,
41.7, 44.7, 48., 56.4, 59.7, 62.4, 69., 71.4, 75., 83.4, 87., 89.7,
96., 108., 116.7, 119.4, 122.7, 125.4, 131.4, 135., 137.7, 140.4,
143.4, 146.7, 149.4, 153., 156., 159., 162., 164.4, 167.7, 170.4,
173.7, 176.7, 188.4, 191.7, 195., 198., 201., 203.7, 207., 210.,
212.7, 215.7, 218.7, 221.4, 224.7, 227.7, 230.7, 234., 236.7, 246.,
248.4, 251.7, 254.7, 257.4, 260.4, 264., 266.7, 269.7, 275.4, 278.4,
284.4, 288., 291., 293.4, 296.7])
#########################################################################
# Associated trial types: these are numbered between 0 and 5, hence
# correspond to 6 different conditions.
trial_idx = np.array(
[3, 3, 0, 2, 5, 3, 5, 2, 3, 5, 1, 2, 4, 4, 2, 2, 4, 0, 2, 3, 3, 4, 2,
2, 5, 1, 2, 3, 5, 1, 3, 4, 2, 2, 1, 2, 5, 0, 3, 1, 4, 2, 3, 4, 2, 2,
0, 0, 2, 4, 3, 3, 1, 1, 1, 3, 3, 0, 3, 0, 3, 2, 3, 5, 4, 0, 2, 2, 2,
3, 1, 0, 0, 3, 1, 5, 4, 3, 5, 5])
#########################################################################
# We may want to map these indices to explicit condition names.
# For that, we define a list of 10 strings.
condition_ids = ['horizontal checkerboard',
'vertical checkerboard',
'auditory instructions',
'visual instructions',
'visual sentence',
'auditory sentence']
trial_type = np.array([condition_ids[i] for i in trial_idx])
#########################################################################
# We also define a duration (required by BIDS conventions).
duration = np.ones_like(onset)
#########################################################################
# Form an event dataframe from these information.
import pandas as pd
events = pd.DataFrame({'trial_type': trial_type,
'onset': onset,
'duration': duration})
#########################################################################
# Export them to a tsv file.
tsvfile = 'localizer_events.tsv'
events.to_csv(tsvfile, sep='\t', index=False)
print("Created the events file in %s " % tsvfile)
#########################################################################
# Plot the event dataframe.
import matplotlib.pyplot as plt
from nilearn.reporting import plot_event
plot_event(events, figsize=(12, 4))
plt.show()
|
{"hexsha": "94184fce103fa31f63812c9da65e31133127f12f", "size": 2950, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/04_glm_first_level_models/plot_events_file.py", "max_stars_repo_name": "ariekahn/nilearn", "max_stars_repo_head_hexsha": "baa77b18ecee7c4507579214af59d715cc9292f9", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-21T12:07:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-21T12:07:53.000Z", "max_issues_repo_path": "examples/04_glm_first_level_models/plot_events_file.py", "max_issues_repo_name": "ariekahn/nilearn", "max_issues_repo_head_hexsha": "baa77b18ecee7c4507579214af59d715cc9292f9", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/04_glm_first_level_models/plot_events_file.py", "max_forks_repo_name": "ariekahn/nilearn", "max_forks_repo_head_hexsha": "baa77b18ecee7c4507579214af59d715cc9292f9", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5492957746, "max_line_length": 74, "alphanum_fraction": 0.4847457627, "include": true, "reason": "import numpy", "num_tokens": 957}
|
import numpy as np
import scipy.sparse as sp
import torch
import torch.utils.data
import typing as _typing
import torch_geometric
from . import target_dependant_sampler
class _LayerDependentImportanceSampler(
target_dependant_sampler.BasicLayerWiseTargetDependantSampler
):
"""
Obsolete implementation, unused
"""
class _Utility:
@classmethod
def compute_edge_weights(
cls, __all_edge_index_with_self_loops: torch.Tensor
) -> torch.Tensor:
__out_degree: torch.Tensor = torch_geometric.utils.degree(
__all_edge_index_with_self_loops[0]
)
__in_degree: torch.Tensor = torch_geometric.utils.degree(
__all_edge_index_with_self_loops[1]
)
temp_tensor: torch.Tensor = torch.stack(
[
__out_degree[__all_edge_index_with_self_loops[0]],
__in_degree[__all_edge_index_with_self_loops[1]],
]
)
temp_tensor: torch.Tensor = torch.pow(temp_tensor, -0.5)
temp_tensor[torch.isinf(temp_tensor)] = 0.0
return temp_tensor[0] * temp_tensor[1]
@classmethod
def get_candidate_source_nodes_probabilities(
cls,
all_candidate_edge_indexes: torch.LongTensor,
all_edge_index_with_self_loops: torch.Tensor,
all_edge_weights: torch.Tensor,
) -> _typing.Tuple[torch.LongTensor, torch.Tensor]:
"""
:param all_candidate_edge_indexes:
:param all_edge_index_with_self_loops: integral edge index with self-loops
:param all_edge_weights:
:return: (all_source_nodes_indexes, all_source_nodes_probabilities)
"""
all_candidate_edge_indexes: torch.LongTensor = (
all_candidate_edge_indexes.unique()
)
_all_candidate_edges_weights: torch.Tensor = all_edge_weights[
all_candidate_edge_indexes
]
all_candidate_source_nodes_indexes: torch.LongTensor = (
all_edge_index_with_self_loops[0, all_candidate_edge_indexes].unique()
)
all_candidate_source_nodes_probabilities: torch.Tensor = torch.tensor(
[
torch.sum(
_all_candidate_edges_weights[
all_edge_index_with_self_loops[
0, all_candidate_edge_indexes
]
== _current_source_node_index
]
).item()
/ torch.sum(_all_candidate_edges_weights).item()
for _current_source_node_index in all_candidate_source_nodes_indexes.tolist()
]
)
assert (
all_candidate_source_nodes_indexes.size()
== all_candidate_source_nodes_probabilities.size()
)
return (
all_candidate_source_nodes_indexes,
all_candidate_source_nodes_probabilities,
)
@classmethod
def filter_selected_edges_by_source_nodes_and_target_nodes(
cls,
all_edges_with_self_loops: torch.Tensor,
selected_source_node_indexes: torch.LongTensor,
selected_target_node_indexes: torch.LongTensor,
) -> torch.Tensor:
"""
:param all_edges_with_self_loops: all edges with self loops
:param selected_source_node_indexes: selected source node indexes
:param selected_target_node_indexes: selected target node indexes
:return: filtered edge indexes
"""
selected_edges_mask_for_source_nodes: torch.Tensor = torch.zeros(
all_edges_with_self_loops.size(1), dtype=torch.bool
)
selected_edges_mask_for_source_nodes[
torch.cat(
[
torch.where(
all_edges_with_self_loops[0]
== __current_selected_source_node_index
)[0]
for __current_selected_source_node_index in selected_source_node_indexes.unique().tolist()
]
).unique()
] = True
selected_edges_mask_for_target_nodes: torch.Tensor = torch.zeros(
all_edges_with_self_loops.size(1), dtype=torch.bool
)
selected_edges_mask_for_target_nodes[
torch.cat(
[
torch.where(
all_edges_with_self_loops[1]
== __current_selected_target_node_index
)[0]
for __current_selected_target_node_index in selected_target_node_indexes.unique().tolist()
]
)
] = True
return torch.where(
selected_edges_mask_for_source_nodes
& selected_edges_mask_for_target_nodes
)[0]
def __init__(
self,
edge_index: torch.LongTensor,
target_nodes_indexes: torch.LongTensor,
layer_wise_arguments: _typing.Sequence,
batch_size: _typing.Optional[int] = 1,
num_workers: int = 0,
shuffle: bool = True,
**kwargs
):
super().__init__(
torch_geometric.utils.add_remaining_self_loops(edge_index)[0],
target_nodes_indexes,
layer_wise_arguments,
batch_size,
num_workers,
shuffle,
**kwargs
)
self.__all_edge_weights: torch.Tensor = self._Utility.compute_edge_weights(
self._edge_index
)
def _sample_edges_for_layer(
self,
__current_layer_target_nodes_indexes: torch.LongTensor,
__top_layer_target_nodes_indexes: torch.LongTensor,
layer_argument: _typing.Any,
*args,
**kwargs
) -> _typing.Tuple[torch.LongTensor, _typing.Optional[torch.Tensor]]:
"""
Sample edges for one layer
:param __current_layer_target_nodes_indexes: target nodes for current layer
:param __top_layer_target_nodes_indexes: target nodes for top layer
:param layer_argument: argument for current layer
:param args: remaining positional arguments
:param kwargs: remaining keyword arguments
:return: (edge_id_in_integral_graph, edge_weight)
"""
if type(layer_argument) != int:
raise TypeError
elif not layer_argument > 0:
raise ValueError
else:
sampled_node_size_budget: int = layer_argument
all_candidate_edge_indexes: torch.LongTensor = torch.cat(
[
torch.where(self._edge_index[1] == current_target_node_index)[0]
for current_target_node_index in __current_layer_target_nodes_indexes.unique().tolist()
]
).unique()
(
__all_candidate_source_nodes_indexes,
all_candidate_source_nodes_probabilities,
) = self._Utility.get_candidate_source_nodes_probabilities(
all_candidate_edge_indexes,
self._edge_index,
self.__all_edge_weights * self.__all_edge_weights,
)
assert (
__all_candidate_source_nodes_indexes.size()
== all_candidate_source_nodes_probabilities.size()
)
""" Sampling """
if sampled_node_size_budget < __all_candidate_source_nodes_indexes.numel():
selected_source_node_indexes: torch.LongTensor = (
__all_candidate_source_nodes_indexes[
torch.from_numpy(
np.unique(
np.random.choice(
np.arange(__all_candidate_source_nodes_indexes.numel()),
sampled_node_size_budget,
p=all_candidate_source_nodes_probabilities.numpy(),
replace=False,
)
)
).unique()
].unique()
)
else:
selected_source_node_indexes: torch.LongTensor = (
__all_candidate_source_nodes_indexes
)
selected_source_node_indexes: torch.LongTensor = torch.cat(
[selected_source_node_indexes, __top_layer_target_nodes_indexes]
).unique()
__selected_edges_indexes: torch.LongTensor = (
self._Utility.filter_selected_edges_by_source_nodes_and_target_nodes(
self._edge_index,
selected_source_node_indexes,
__current_layer_target_nodes_indexes,
).unique()
)
non_normalized_selected_edges_weight: torch.Tensor = self.__all_edge_weights[
__selected_edges_indexes
] / torch.tensor(
[
all_candidate_source_nodes_probabilities[
__all_candidate_source_nodes_indexes == current_source_node_index
].item()
for current_source_node_index in self._edge_index[
0, __selected_edges_indexes
].tolist()
]
)
def __normalize_edges_weight_by_target_nodes(
__edge_index: torch.Tensor, __edge_weight: torch.Tensor
) -> torch.Tensor:
if __edge_index.size(1) != __edge_weight.numel():
raise ValueError
for current_target_node_index in __edge_index[1].unique().tolist():
__current_mask_for_edges: torch.BoolTensor = (
__edge_index[1] == current_target_node_index
)
__edge_weight[__current_mask_for_edges] = __edge_weight[
__current_mask_for_edges
] / torch.sum(__edge_weight[__current_mask_for_edges])
return __edge_weight
normalized_selected_edges_weight: torch.Tensor = (
__normalize_edges_weight_by_target_nodes(
self._edge_index[:, __selected_edges_indexes],
non_normalized_selected_edges_weight,
)
)
return __selected_edges_indexes, normalized_selected_edges_weight
class LayerDependentImportanceSampler(
target_dependant_sampler.BasicLayerWiseTargetDependantSampler
):
"""
The layer-dependent importance sampler from the
`"Layer-Dependent Importance Sampling for Training Deep and Large Graph Convolutional Networks"
<https://arxiv.org/abs/1911.07323>`_ literature, which allows
for mini-batch training of GNNs on large-scale graphs where full-batch training is not feasible.
Arguments
------------
edge_index:
A :obj:`torch.LongTensor` that defines the underlying graph
connectivity/message passing flow.
:obj:`edge_index` holds the indices of a (sparse) adjacency matrix.
If :obj:`edge_index` is of type :obj:`torch.LongTensor`, its shape
must be defined as :obj:`[2, num_edges]`, where messages from nodes
:obj:`edge_index[0]` are sent to nodes in :obj:`edge_index[1]`
(in case :obj:`flow="source_to_target"`).
target_nodes_indexes:
indexes of target nodes to learn representation.
layer_wise_arguments:
The number of nodes to sample for each layer.
It's noteworthy that the target nodes for a specific layer
always be preserved as source nodes for that layer,
such that the self loops for those target nodes
are generally preserved for representation learning.
batch_size:
number of target nodes for each mini-batch.
num_workers:
num_workers argument for inner :class:`torch.utils.data.DataLoader`
shuffle:
whether to shuffle target nodes for mini-batches.
"""
@classmethod
def __compute_edge_weight(cls, edge_index: torch.Tensor) -> torch.Tensor:
__num_nodes: int = max(int(edge_index[0].max()), int(edge_index[1].max())) + 1
_temp_tensor: torch.Tensor = torch.stack(
[
torch_geometric.utils.degree(edge_index[0], __num_nodes)[edge_index[0]],
torch_geometric.utils.degree(edge_index[1], __num_nodes)[edge_index[1]],
]
)
_temp_tensor: torch.Tensor = torch.pow(_temp_tensor, -0.5)
_temp_tensor[torch.isinf(_temp_tensor)] = 0
return _temp_tensor[0] * _temp_tensor[1]
def __init__(
self,
edge_index: torch.LongTensor,
target_nodes_indexes: torch.LongTensor,
layer_wise_arguments: _typing.Sequence,
batch_size: _typing.Optional[int] = 1,
num_workers: int = 0,
shuffle: bool = True,
**kwargs
):
super(LayerDependentImportanceSampler, self).__init__(
torch_geometric.utils.add_remaining_self_loops(edge_index)[0],
target_nodes_indexes,
layer_wise_arguments,
batch_size,
num_workers,
shuffle,
**kwargs
)
self.__edge_weight: torch.Tensor = self.__compute_edge_weight(self._edge_index)
self.__integral_normalized_l_matrix: sp.csr_matrix = sp.csr_matrix(
(
self.__edge_weight.numpy(),
(self._edge_index[1].numpy(), self._edge_index[0].numpy()),
)
)
self.__integral_edges_indexes_sparse_matrix: sp.csr_matrix = sp.csr_matrix(
(
np.arange(self._edge_index.size(1)),
(self._edge_index[1].numpy(), self._edge_index[0].numpy()),
)
)
def __sample_edges(
self,
__current_layer_target_nodes_indexes: np.ndarray,
__top_layer_target_nodes_indexes: np.ndarray,
sampled_source_nodes_budget: int,
) -> _typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
:param __current_layer_target_nodes_indexes: indexes of target nodes for current layer
:param __top_layer_target_nodes_indexes: indexes of target nodes for top layer
:param sampled_source_nodes_budget: sampled source nodes budget
:return: (
sampled_edges_indexes,
sampled_source_nodes_indexes,
corresponding probabilities for sampled_source_nodes_indexes
)
"""
partial_l_matrix: sp.csr_matrix = self.__integral_normalized_l_matrix[
__current_layer_target_nodes_indexes, :
]
p: np.ndarray = np.array(
np.sum(partial_l_matrix.multiply(partial_l_matrix), axis=0)
)[0]
p: np.ndarray = p / np.sum(p)
_number_of_nodes_to_sample = np.min(
[np.sum(p > 0), sampled_source_nodes_budget]
)
_selected_source_nodes: np.ndarray = np.unique(
np.concatenate(
[
np.random.choice(
p.size, _number_of_nodes_to_sample, replace=False, p=p
),
__top_layer_target_nodes_indexes,
]
)
)
_sampled_edges_indexes_sparse_matrix: sp.csr_matrix = (
self.__integral_edges_indexes_sparse_matrix[
__current_layer_target_nodes_indexes, :
]
)
_sampled_edges_indexes_sparse_matrix: sp.csc_matrix = (
_sampled_edges_indexes_sparse_matrix.tocsc()[:, _selected_source_nodes]
)
_sampled_edges_indexes: np.ndarray = np.unique(
_sampled_edges_indexes_sparse_matrix.data
)
return _sampled_edges_indexes, _selected_source_nodes, p[_selected_source_nodes]
def _sample_edges_for_layer(
self,
__current_layer_target_nodes_indexes: torch.LongTensor,
__top_layer_target_nodes_indexes: torch.LongTensor,
layer_argument: _typing.Any,
*args,
**kwargs
) -> _typing.Tuple[torch.LongTensor, _typing.Optional[torch.Tensor]]:
"""
Sample edges for one specific layer, expected to be implemented in subclass.
Parameters
------------
__current_layer_target_nodes_indexes:
target nodes for current layer
__top_layer_target_nodes_indexes:
target nodes for top layer
layer_argument:
argument for current layer
args:
remaining positional arguments
kwargs:
remaining keyword arguments
Returns
--------
edge_id_in_integral_graph:
the corresponding positional indexes for the `edge_index` of integral graph
edge_weight:
the optional `edge_weight` for aggregation
"""
__wrapped_result: _typing.Tuple[
np.ndarray, np.ndarray, np.ndarray
] = self.__sample_edges(
__current_layer_target_nodes_indexes.numpy(),
__top_layer_target_nodes_indexes.numpy(),
layer_argument,
)
_sampled_edges_indexes: torch.Tensor = torch.from_numpy(__wrapped_result[0])
_selected_source_nodes: torch.Tensor = torch.from_numpy(__wrapped_result[1])
_selected_source_nodes_probabilities: torch.Tensor = torch.from_numpy(
__wrapped_result[2]
)
""" Multiply corresponding discount weights """
__selected_source_node_probability_mapping: _typing.Dict[int, float] = dict(
zip(
_selected_source_nodes.tolist(),
_selected_source_nodes_probabilities.tolist(),
)
)
_selected_edges_weight: torch.Tensor = self.__edge_weight[
_sampled_edges_indexes
]
_selected_edges_weight: torch.Tensor = _selected_edges_weight / torch.tensor(
[
__selected_source_node_probability_mapping.get(
_current_source_node_index
)
for _current_source_node_index in self._edge_index[
0, _sampled_edges_indexes
].tolist()
]
)
""" Normalize edge weight for selected edges by target nodes """
for _current_target_node_index in (
self._edge_index[1, _sampled_edges_indexes].unique().tolist()
):
_current_mask_for_selected_edges: torch.BoolTensor = (
self._edge_index[1, _sampled_edges_indexes]
== _current_target_node_index
)
_selected_edges_weight[
_current_mask_for_selected_edges
] = _selected_edges_weight[_current_mask_for_selected_edges] / torch.sum(
_selected_edges_weight[_current_mask_for_selected_edges]
)
_sampled_edges_indexes: _typing.Union[
torch.LongTensor, torch.Tensor
] = _sampled_edges_indexes
return _sampled_edges_indexes, _selected_edges_weight
|
{"hexsha": "bc66bdf83323bc3f67dd616bce77dfeebb6ef677", "size": 19191, "ext": "py", "lang": "Python", "max_stars_repo_path": "autogl/module/train/sampling/sampler/layer_dependent_importance_sampler.py", "max_stars_repo_name": "dedsec-9/AutoGL", "max_stars_repo_head_hexsha": "487f2b2f798b9b1363ad5dc100fb410b12222e06", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 824, "max_stars_repo_stars_event_min_datetime": "2020-11-30T14:38:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T10:14:04.000Z", "max_issues_repo_path": "autogl/module/train/sampling/sampler/layer_dependent_importance_sampler.py", "max_issues_repo_name": "dedsec-9/AutoGL", "max_issues_repo_head_hexsha": "487f2b2f798b9b1363ad5dc100fb410b12222e06", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 38, "max_issues_repo_issues_event_min_datetime": "2020-12-21T12:32:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-31T02:32:05.000Z", "max_forks_repo_path": "autogl/module/train/sampling/sampler/layer_dependent_importance_sampler.py", "max_forks_repo_name": "dedsec-9/AutoGL", "max_forks_repo_head_hexsha": "487f2b2f798b9b1363ad5dc100fb410b12222e06", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 85, "max_forks_repo_forks_event_min_datetime": "2020-12-21T05:16:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T08:44:22.000Z", "avg_line_length": 39.98125, "max_line_length": 114, "alphanum_fraction": 0.6052837267, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3646}
|
import tensorflow as tf
import numpy as np
from pylab import mpl
import matplotlib.pyplot as plt
import math
plt.rcParams['axes.unicode_minus'] = False
mpl.rcParams['font.sans-serif'] = ['SimHei']
# 同时我们要在中文前面加上u
# 随机种子
tf.set_random_seed(1)
np.random.seed(1)
# 设置超参数
BATCH_SIZE = 64 # 批量大小
LR_G = 0.0001 # 生成器的学习率
LR_D = 0.0001 # 判别器的学习率
N_IDEAS = 5 # 假设生成了5种不同的信号(5种初始化曲线)
num_sample = 15 # 用15个点绘制拟合包络(波形)的形状
# 列表解析式代替了for循环,PAINT_POINTS.shape=(64,15),
# np.vstack()默认逐行叠加(axis=0)
input_data = np.vstack([np.linspace(-2 * math.pi, 2 * math.pi, num_sample) for _ in range(BATCH_SIZE)]) # (64, 15)
def generate_signal():
# 基本的正弦序列,不包括相位偏移或者幅度畸变
signal_shape = np.sin(input_data)
return signal_shape
with tf.variable_scope('Generator'):
"""网络结构batch_size --->128--->num_sample"""
G_in = tf.placeholder(tf.float32, [None, N_IDEAS]) # 随机的ideals(来源于正态分布)
G_l1 = tf.layers.dense(G_in, 128, tf.nn.sigmoid)
G_out = tf.layers.dense(G_l1, num_sample) # 生成一副生成信号的包络(15个数据点)
with tf.variable_scope('Discriminator'):
"""判别器与生成器不同,生成器只需要输入生成的信号数据,它无法接触到真实信号,
如果能接触到真实信号数据,那就不用学习了,直接导入到判别器就是0.5的概率,换句话说,
生成器只能通过生成器的误差反馈来调节权重(优化方法可以是梯度下降,随机梯度下降,批量梯度下降,动量下降,或者Adam),
使得逐渐生成逼真实的信号波形出来。"""
# 接受真实的信号
true_signal = tf.placeholder(tf.float32, [None, num_sample], name='real_in')
"""
回到神经网络的基本知识,我们输入矩阵是一个 ( 特征*样本数 )的矩阵
在使用优化算法的时候,如果选择梯度下降,动量下降或者Adam(虽然这两者基于梯度下降)
他们都会遍历所有的训练样本,然后在进行网络权值的优化,所耗费的时间长
为了让网络学习的更好,可以多按照真实的信号波形产生一些抽样得到的样本来优化网络
"""
# 将真实信号输入到判别器,判别器判断这些信号数据来自于真实信号的概率
D_l0 = tf.layers.dense(true_signal, 128, tf.nn.relu, name='Discriminate')
prob_from_true_signal = tf.layers.dense(D_l0, 1, tf.nn.sigmoid, name='out')
# 输入生成的信号数据,G_out代入到判别器中。
D_l1 = tf.layers.dense(G_out, 128, tf.nn.relu, name='Discriminate', reuse=True)
# 代入生成的数据,判别器判断这数据来自生成器的概率
prob_from_generate_signal = tf.layers.dense(D_l1, 1, tf.nn.sigmoid, name='out', reuse=True)
"""注意到,判别器中当输入生成的信号时,这层是可以重复利用的,通过动态调整这次的权重来完成判别器的loss最小,关键一步。"""
# 判别器loss,此时需同时优化两部分的概率
# 使用交叉熵损失
D_loss = -tf.reduce_mean(tf.log(prob_from_true_signal) + tf.log(1 - prob_from_generate_signal))
# 对于生成器的loss,此时prob_from_true_signal是固定的,可以看到生成器并没有输入真实的信号数据,
# 所以tf.log(prob_artist0)是一个常数,故在这里不用考虑。
G_loss = tf.reduce_mean(tf.log(1 - prob_from_generate_signal))
train_D = tf.train.AdamOptimizer(LR_D).minimize(
D_loss, var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Discriminator'))
train_G = tf.train.AdamOptimizer(LR_G).minimize(
G_loss, var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Generator'))
# 以下两步是TensorFlow必须要做的
sess = tf.Session()
sess.run(tf.global_variables_initializer())
plt.ion() # 连续画图
for step in range(5000):
every_step_generate_signal = generate_signal() # 真实信号数据,每一轮真实信号的数据都是随机生成的!
G_ideas = np.random.randn(BATCH_SIZE, N_IDEAS) # 生成信号的5个可行方法
'''
再次回归到最初的神经网络,我们的输入矩阵是一个(特征*样本数的矩阵)维矩阵
损失函数不论使用梯度下降,还是动量下降或Adam优化(虽说这两者是基于梯度下降算法的)
都是遍历了所有的样本,学习了所有的样本对神经网络的训练是有好处的
'''
G_signal, pa0, Dl = sess.run([G_out, prob_from_true_signal, D_loss, train_D, train_G],
{G_in: G_ideas, true_signal: every_step_generate_signal})[:3] # 训练和获取结果
if step % 100 == 0: # 每100次训练画一次图
plt.cla()
plt.plot(input_data[0], G_signal[0], c='black', lw=3, label='真实信号包络')
plt.plot(input_data[0], every_step_generate_signal[0], c='red', lw=3, label='生成信号的包络')
plt.text(-.5, -2.5, '第{}次训练'.format(step), fontdict={'size': 15})
plt.text(-.5, -1.3, 'D mean accuracy=%.2f ' % pa0.mean(), fontdict={'size': 15})
# -1.38 for G to converge
plt.text(-.5, -1.5, 'D score= %.2f ' % -Dl, fontdict={'size': 15})
plt.ylim((-2, 3))
plt.xlim((-2 * math.pi - 1, 2 * math.pi + 1))
plt.legend(loc='upper right', fontsize=12)
plt.draw()
plt.pause(0.01)
plt.ioff()
plt.show()
|
{"hexsha": "9ef885d620cc9cb2ea9d1399165ba056b64131ed", "size": 4034, "ext": "py", "lang": "Python", "max_stars_repo_path": "gan_DAY_2.py", "max_stars_repo_name": "SoulProficiency/MyRepository", "max_stars_repo_head_hexsha": "3738e558190bacb596a89a305408ad6621342930", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-03-26T07:08:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-17T15:16:38.000Z", "max_issues_repo_path": "gan_DAY_2.py", "max_issues_repo_name": "SoulProficiency/MyRepository", "max_issues_repo_head_hexsha": "3738e558190bacb596a89a305408ad6621342930", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gan_DAY_2.py", "max_forks_repo_name": "SoulProficiency/MyRepository", "max_forks_repo_head_hexsha": "3738e558190bacb596a89a305408ad6621342930", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-02T03:12:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-02T03:12:09.000Z", "avg_line_length": 38.0566037736, "max_line_length": 116, "alphanum_fraction": 0.6784828954, "include": true, "reason": "import numpy", "num_tokens": 1860}
|
import tensorflow as tf
import numpy as np
#from data_utils import get_batch
import data_utils
import pdb
import json
from mod_core_rnn_cell_impl import LSTMCell #modified to allow initializing bias in lstm
#from tensorflow.contrib.rnn import LSTMCell
tf.logging.set_verbosity(tf.logging.ERROR)
import mmd
from differential_privacy.dp_sgd.dp_optimizer import dp_optimizer
from differential_privacy.dp_sgd.dp_optimizer import sanitizer
from differential_privacy.privacy_accountant.tf import accountant
# --- to do with latent space --- #
def sample_Z(batch_size, seq_length, latent_dim, use_time=False, use_noisy_time=False):
sample = np.float32(np.random.normal(size=[batch_size, seq_length, latent_dim]))
if use_time:
print('WARNING: use_time has different semantics')
sample[:, :, 0] = np.linspace(0, 1.0/seq_length, num=seq_length)
#if use_noisy_time or use_time:
# # time grid is time_grid_mult times larger than seq_length
# time_grid_mult = 5
# time_grid = (np.arange(seq_length*time_grid_mult)/((seq_length*time_grid_mult)/2)) - 1
# time_axes = []
# for i in range(batch_size):
# # randomly chose a starting point in the time grid
# starting_point = np.random.choice(np.arange(len(time_grid))[:-seq_length])
# time_axis = time_grid[starting_point:starting_point+seq_length]
# if use_noisy_time:
# time_axis += np.random.normal(scale=2.0/len(time_axis), size=len(time_axis))
# time_axes.append(time_axis)
# sample[:,:,0] = time_axes
return sample
def sample_C(batch_size, cond_dim=0, max_val=1, one_hot=False):
"""
return an array of integers (so far we only allow integer-valued
conditional values)
"""
if cond_dim == 0:
return None
else:
if one_hot:
assert max_val == 1
C = np.zeros(shape=(batch_size, cond_dim))
# locations
labels = np.random.choice(cond_dim, batch_size)
C[np.arange(batch_size), labels] = 1
else:
C = np.random.choice(max_val+1, size=(batch_size, cond_dim))
return C
# --- to do with training --- #
def train_epoch(epoch, samples, labels, sess, Z, X, CG, CD, CS, D_loss, G_loss, D_solver, G_solver,
batch_size, use_time, D_rounds, G_rounds, seq_length,
latent_dim, num_generated_features, cond_dim, max_val, WGAN_clip, one_hot):
"""
Train generator and discriminator for one epoch.
"""
for batch_idx in range(0, int(len(samples) / batch_size) - (D_rounds + (cond_dim > 0)*G_rounds), D_rounds + (cond_dim > 0)*G_rounds):
# update the discriminator
for d in range(D_rounds):
X_mb, Y_mb = data_utils.get_batch(samples, batch_size, batch_idx + d, labels)
Z_mb = sample_Z(batch_size, seq_length, latent_dim, use_time)
if cond_dim > 0:
# CGAN
Y_mb = Y_mb.reshape(-1, cond_dim)
if one_hot:
# change all of the labels to a different one
offsets = np.random.choice(cond_dim-1, batch_size) + 1
new_labels = (np.argmax(Y_mb, axis=1) + offsets) % cond_dim
Y_wrong = np.zeros_like(Y_mb)
Y_wrong[np.arange(batch_size), new_labels] = 1
else:
# flip all of the bits (assuming binary...)
Y_wrong = 1 - Y_mb
_ = sess.run(D_solver, feed_dict={X: X_mb, Z: Z_mb, CD: Y_mb, CS: Y_wrong, CG: Y_mb})
else:
_ = sess.run(D_solver, feed_dict={X: X_mb, Z: Z_mb})
if WGAN_clip:
# clip the weights
_ = sess.run([clip_disc_weights])
# update the generator
for g in range(G_rounds):
if cond_dim > 0:
# note we are essentially throwing these X_mb away...
X_mb, Y_mb = data_utils.get_batch(samples, batch_size, batch_idx + D_rounds + g, labels)
_ = sess.run(G_solver,
feed_dict={Z: sample_Z(batch_size, seq_length, latent_dim, use_time=use_time), CG: Y_mb})
else:
_ = sess.run(G_solver,
feed_dict={Z: sample_Z(batch_size, seq_length, latent_dim, use_time=use_time)})
# at the end, get the loss
if cond_dim > 0:
D_loss_curr, G_loss_curr = sess.run([D_loss, G_loss], feed_dict={X: X_mb, Z: sample_Z(batch_size, seq_length, latent_dim, use_time=use_time), CG: Y_mb, CD: Y_mb})
D_loss_curr = np.mean(D_loss_curr)
G_loss_curr = np.mean(G_loss_curr)
else:
D_loss_curr, G_loss_curr = sess.run([D_loss, G_loss], feed_dict={X: X_mb, Z: sample_Z(batch_size, seq_length, latent_dim, use_time=use_time)})
D_loss_curr = np.mean(D_loss_curr)
G_loss_curr = np.mean(G_loss_curr)
return D_loss_curr, G_loss_curr
def WGAN_loss(Z, X, WGAN_clip=False):
raise NotImplementedError
G_sample = generator(Z, hidden_units_g, W_out_G, b_out_G, scale_out_G)
D_real, D_logit_real, D_logit_real_final = discriminator(X, hidden_units_d, seq_length, batch_size)
D_loss = tf.reduce_mean(D_fake) - tf.reduce_mean(D_real)
G_loss = -tf.reduce_mean(D_fake)
if not WGAN_clip:
# gradient penalty from improved WGAN code
# ... but it doesn't work in TF for RNNs, so let's skip it for now
# alpha = np.random.uniform(size=batch_size, low=0.0, high=1.0).reshape(batch_size, 1, 1)
# interpolates = alpha*X + ((1-alpha)*G_sample)
# pdb.set_trace()
# disc_interpolates, _ = discriminator(interpolates, reuse=True)
# gradients = tf.gradients(disc_interpolates, [interpolates])[0]
# slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
# gradient_penalty = tf.reduce_mean((slopes-1)**2)
# now for my own hack
# sample a random h
h = tf.random_normal(shape=X.shape, stddev=0.1)
D_offset, _ = discriminator(X + h, hidden_units_d)
gradient_penalty = tf.norm(D_offset - D_real)
KAPPA = 1.0
D_loss += KAPPA*gradient_penalty
clip_disc_weights = None
else:
# weight clipping from original WGAN
# Build an op to do the weight clipping
clip_ops = []
for var in discriminator_vars:
clip_bounds = [-.01, .01]
clip_ops.append(
tf.assign(
var,
tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
)
)
clip_disc_weights = tf.group(*clip_ops)
return G_loss, D_loss, clip_disc_weights
def GAN_loss(Z, X, generator_settings, discriminator_settings, kappa, cond, CG, CD, CS, wrong_labels=False):
if cond:
# C-GAN
G_sample = generator(Z, **generator_settings, c=CG)
D_real, D_logit_real = discriminator(X, **discriminator_settings, c=CD)
D_fake, D_logit_fake = discriminator(G_sample, reuse=True, **discriminator_settings, c=CG)
if wrong_labels:
# the discriminator must distinguish between real data with fake labels and real data with real labels, too
D_wrong, D_logit_wrong = discriminator(X, reuse=True, **discriminator_settings, c=CS)
else:
# normal GAN
G_sample = generator(Z, **generator_settings)
D_real, D_logit_real = discriminator(X, **discriminator_settings)
D_fake, D_logit_fake = discriminator(G_sample, reuse=True, **discriminator_settings)
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)), 1)
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)), 1)
D_loss = D_loss_real + D_loss_fake
if cond and wrong_labels:
D_loss = D_loss + D_loss_wrong
#G_loss = tf.reduce_mean(tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)), axis=1))
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)), 1)
return D_loss, G_loss
def GAN_solvers(D_loss, G_loss, learning_rate, batch_size, total_examples,
l2norm_bound, batches_per_lot, sigma, dp=False):
"""
Optimizers
"""
discriminator_vars = [v for v in tf.trainable_variables() if v.name.startswith('discriminator')]
generator_vars = [v for v in tf.trainable_variables() if v.name.startswith('generator')]
if dp:
print('Using differentially private SGD to train discriminator!')
eps = tf.placeholder(tf.float32)
delta = tf.placeholder(tf.float32)
priv_accountant = accountant.GaussianMomentsAccountant(total_examples)
clip = True
l2norm_bound = l2norm_bound/batch_size
batches_per_lot = 1
gaussian_sanitizer = sanitizer.AmortizedGaussianSanitizer(
priv_accountant,
[l2norm_bound, clip])
# the trick is that we need to calculate the gradient with respect to
# each example in the batch, during the DP SGD step
D_solver = dp_optimizer.DPGradientDescentOptimizer(learning_rate,
[eps, delta],
sanitizer=gaussian_sanitizer,
sigma=sigma,
batches_per_lot=batches_per_lot).minimize(D_loss, var_list=discriminator_vars)
else:
D_loss_mean_over_batch = tf.reduce_mean(D_loss)
D_solver = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(D_loss_mean_over_batch, var_list=discriminator_vars)
priv_accountant = None
G_loss_mean_over_batch = tf.reduce_mean(G_loss)
G_solver = tf.train.AdamOptimizer().minimize(G_loss_mean_over_batch, var_list=generator_vars)
return D_solver, G_solver, priv_accountant
# --- to do with the model --- #
def create_placeholders(batch_size, seq_length, latent_dim, num_generated_features, cond_dim):
Z = tf.placeholder(tf.float32, [batch_size, seq_length, latent_dim])
X = tf.placeholder(tf.float32, [batch_size, seq_length, num_generated_features])
CG = tf.placeholder(tf.float32, [batch_size, cond_dim])
CD = tf.placeholder(tf.float32, [batch_size, cond_dim])
CS = tf.placeholder(tf.float32, [batch_size, cond_dim])
return Z, X, CG, CD, CS
def generator(z, hidden_units_g, seq_length, batch_size, num_generated_features, reuse=False, parameters=None, cond_dim=0, c=None, learn_scale=True):
"""
If parameters are supplied, initialise as such
"""
with tf.variable_scope("generator") as scope:
if reuse:
scope.reuse_variables()
if parameters is None:
W_out_G_initializer = tf.truncated_normal_initializer()
b_out_G_initializer = tf.truncated_normal_initializer()
scale_out_G_initializer = tf.constant_initializer(value=1.0)
lstm_initializer = None
bias_start = 1.0
else:
W_out_G_initializer = tf.constant_initializer(value=parameters['generator/W_out_G:0'])
b_out_G_initializer = tf.constant_initializer(value=parameters['generator/b_out_G:0'])
try:
scale_out_G_initializer = tf.constant_initializer(value=parameters['generator/scale_out_G:0'])
except KeyError:
scale_out_G_initializer = tf.constant_initializer(value=1)
assert learn_scale
lstm_initializer = tf.constant_initializer(value=parameters['generator/rnn/lstm_cell/weights:0'])
bias_start = parameters['generator/rnn/lstm_cell/biases:0']
W_out_G = tf.get_variable(name='W_out_G', shape=[hidden_units_g, num_generated_features], initializer=W_out_G_initializer)
b_out_G = tf.get_variable(name='b_out_G', shape=num_generated_features, initializer=b_out_G_initializer)
scale_out_G = tf.get_variable(name='scale_out_G', shape=1, initializer=scale_out_G_initializer, trainable=learn_scale)
if cond_dim > 0:
# CGAN!
assert not c is None
repeated_encoding = tf.stack([c]*seq_length, axis=1)
inputs = tf.concat([z, repeated_encoding], axis=2)
#repeated_encoding = tf.tile(c, [1, tf.shape(z)[1]])
#repeated_encoding = tf.reshape(repeated_encoding, [tf.shape(z)[0], tf.shape(z)[1], cond_dim])
#inputs = tf.concat([repeated_encoding, z], 2)
else:
inputs = z
## TIMEGAN SPECIFICATIONS
def cell():
# d = LSTMCell(num_units=hidden_units_g,
# state_is_tuple=True,
# initializer=lstm_initializer,
# bias_start=bias_start, reuse=reuse)
d = tf.nn.rnn_cell.GRUCell(num_units=hidden_units_g, activation=tf.nn.tanh)
return d
e_cell = tf.nn.rnn_cell.MultiRNNCell([cell() for _ in range(3)])
rnn_outputs, rnn_states = tf.nn.dynamic_rnn(e_cell, inputs, dtype=tf.float32, sequence_length = [seq_length]*batch_size)
# rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
# cell=cell,
# dtype=tf.float32,
# sequence_length=[seq_length]*batch_size,
# inputs=inputs)
rnn_outputs_2d = tf.reshape(rnn_outputs, [-1, hidden_units_g])
logits_2d = tf.matmul(rnn_outputs_2d, W_out_G) + b_out_G
output_2d = tf.nn.tanh(logits_2d)
output_3d = tf.reshape(output_2d, [-1, seq_length, num_generated_features])
return output_3d
def discriminator(x, hidden_units_d, seq_length, batch_size, reuse=False,
cond_dim=0, c=None, batch_mean=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
W_out_D = tf.get_variable(name='W_out_D', shape=[hidden_units_d, 1],
initializer=tf.truncated_normal_initializer())
b_out_D = tf.get_variable(name='b_out_D', shape=1,
initializer=tf.truncated_normal_initializer())
# W_final_D = tf.get_variable(name='W_final_D', shape=[hidden_units_d, 1],
# initializer=tf.truncated_normal_initializer())
# b_final_D = tf.get_variable(name='b_final_D', shape=1,
# initializer=tf.truncated_normal_initializer())
if cond_dim > 0:
assert not c is None
repeated_encoding = tf.stack([c]*seq_length, axis=1)
inputs = tf.concat([x, repeated_encoding], axis=2)
else:
inputs = x
# add the average of the inputs to the inputs (mode collapse?
if batch_mean:
mean_over_batch = tf.stack([tf.reduce_mean(x, axis=0)]*batch_size, axis=0)
inputs = tf.concat([x, mean_over_batch], axis=2)
# cell = tf.contrib.rnn.LSTMCell(num_units=hidden_units_d,
# state_is_tuple=True,
# reuse=reuse)
# rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
# cell=cell,
# dtype=tf.float32,
# inputs=inputs)
## TIMEGAN SPECIFICATIONS
def cell():
d = tf.nn.rnn_cell.GRUCell(num_units=hidden_units_d, activation=tf.nn.tanh)
return d
e_cell = tf.nn.rnn_cell.MultiRNNCell([cell() for _ in range(3)])
rnn_outputs, rnn_states = tf.nn.dynamic_rnn(e_cell, inputs, dtype=tf.float32, sequence_length = [seq_length]*batch_size)
# logit_final = tf.matmul(rnn_outputs[:, -1], W_final_D) + b_final_D
logits = tf.einsum('ijk,km', rnn_outputs, W_out_D) + b_out_D
# rnn_outputs_flat = tf.reshape(rnn_outputs, [-1, hidden_units_d])
# logits = tf.matmul(rnn_outputs_flat, W_out_D) + b_out_D
output = tf.nn.sigmoid(logits)
#return output, logits, logit_final
return output, logits
# --- to do with saving/loading --- #
def dump_parameters(identifier, sess):
"""
Save model parmaters to a numpy file
"""
dump_path = './experiments/parameters/' + identifier + '.npy'
model_parameters = dict()
for v in tf.trainable_variables():
model_parameters[v.name] = sess.run(v)
np.save(dump_path, model_parameters)
print('Recorded', len(model_parameters), 'parameters to', dump_path)
return True
def load_parameters(identifier):
"""
Load parameters from a numpy file
"""
load_path = './experiments/parameters/' + identifier + '.npy'
model_parameters = np.load(load_path).item()
return model_parameters
# --- to do with trained models --- #
def sample_trained_model(settings, epoch, num_samples, Z_samples=None, C_samples=None):
"""
Return num_samples samples from a trained model described by settings dict
"""
# if settings is a string, assume it's an identifier and load
if type(settings) == str:
settings = json.load(open('./experiments/settings/' + settings + '.txt', 'r'))
print('Sampling', num_samples, 'samples from', settings['identifier'], 'at epoch', epoch)
# get the parameters, get other variables
parameters = load_parameters(settings['identifier'] + '_' + str(epoch))
# create placeholder, Z samples
Z = tf.placeholder(tf.float32, [num_samples, settings['seq_length'], settings['latent_dim']])
CG = tf.placeholder(tf.float32, [num_samples, settings['cond_dim']])
if Z_samples is None:
Z_samples = sample_Z(num_samples, settings['seq_length'], settings['latent_dim'], settings['use_time'], use_noisy_time=False)
else:
assert Z_samples.shape[0] == num_samples
# create the generator (GAN or CGAN)
if C_samples is None:
# normal GAN
G_samples = generator(Z, settings['hidden_units_g'], settings['seq_length'],
num_samples, settings['num_generated_features'],
reuse=False, parameters=parameters, cond_dim=settings['cond_dim'])
else:
assert C_samples.shape[0] == num_samples
# CGAN
G_samples = generator(Z, settings['hidden_units_g'], settings['seq_length'],
num_samples, settings['num_generated_features'],
reuse=False, parameters=parameters, cond_dim=settings['cond_dim'], c=CG)
# sample from it
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if C_samples is None:
real_samples = sess.run(G_samples, feed_dict={Z: Z_samples})
else:
real_samples = sess.run(G_samples, feed_dict={Z: Z_samples, CG: C_samples})
tf.reset_default_graph()
return real_samples
# --- to do with inversion --- #
def invert(settings, epoch, samples, g_tolerance=None, e_tolerance=0.1,
n_iter=None, max_iter=10000, heuristic_sigma=None, C_samples=None):
"""
Return the latent space points corresponding to a set of a samples
( from gradient descent )
"""
# cast samples to float32
samples = np.float32(samples[:, :, :])
# get the model
if type(settings) == str:
settings = json.load(open('./experiments/settings/' + settings + '.txt', 'r'))
num_samples = samples.shape[0]
print('Inverting', num_samples, 'samples using model', settings['identifier'], 'at epoch', epoch,)
if not g_tolerance is None:
print('until gradient norm is below', g_tolerance)
else:
print('until error is below', e_tolerance)
# get parameters
parameters = load_parameters(settings['identifier'] + '_' + str(epoch))
# assertions
assert samples.shape[2] == settings['num_generated_features']
# create VARIABLE Z
Z = tf.get_variable(name='Z', shape=[num_samples, settings['seq_length'],
settings['latent_dim']],
initializer=tf.random_normal_initializer())
if C_samples is None:
# create outputs
G_samples = generator(Z, settings['hidden_units_g'], settings['seq_length'],
num_samples, settings['num_generated_features'],
reuse=False, parameters=parameters)
fd = None
else:
CG = tf.placeholder(tf.float32, [num_samples, settings['cond_dim']])
assert C_samples.shape[0] == samples.shape[0]
# CGAN
G_samples = generator(Z, settings['hidden_units_g'], settings['seq_length'],
num_samples, settings['num_generated_features'],
reuse=False, parameters=parameters, cond_dim=settings['cond_dim'], c=CG)
fd = {CG: C_samples}
# define loss
if heuristic_sigma is None:
heuristic_sigma = mmd.median_pairwise_distance(samples) # this is noisy
print('heuristic_sigma:', heuristic_sigma)
Kxx, Kxy, Kyy, wts = mmd._mix_rbf_kernel(G_samples, samples, sigmas=tf.constant(value=heuristic_sigma, shape=(1, 1)))
similarity_per_sample = tf.diag_part(Kxy)
reconstruction_error_per_sample = 1 - similarity_per_sample
#reconstruction_error_per_sample = tf.reduce_sum((tf.nn.l2_normalize(G_samples, dim=1) - tf.nn.l2_normalize(samples, dim=1))**2, axis=[1,2])
similarity = tf.reduce_mean(similarity_per_sample)
reconstruction_error = 1 - similarity
# updater
# solver = tf.train.AdamOptimizer().minimize(reconstruction_error_per_sample, var_list=[Z])
#solver = tf.train.RMSPropOptimizer(learning_rate=500).minimize(reconstruction_error, var_list=[Z])
solver = tf.train.RMSPropOptimizer(learning_rate=0.1).minimize(reconstruction_error_per_sample, var_list=[Z])
#solver = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.9).minimize(reconstruction_error_per_sample, var_list=[Z])
grad_Z = tf.gradients(reconstruction_error_per_sample, Z)[0]
grad_per_Z = tf.norm(grad_Z, axis=(1, 2))
grad_norm = tf.reduce_mean(grad_per_Z)
#solver = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(reconstruction_error, var_list=[Z])
print('Finding latent state corresponding to samples...')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
error = sess.run(reconstruction_error, feed_dict=fd)
g_n = sess.run(grad_norm, feed_dict=fd)
print(g_n)
i = 0
if not n_iter is None:
while i < n_iter:
_ = sess.run(solver, feed_dict=fd)
error = sess.run(reconstruction_error, feed_dict=fd)
i += 1
else:
if not g_tolerance is None:
while g_n > g_tolerance:
_ = sess.run(solver, feed_dict=fd)
error, g_n = sess.run([reconstruction_error, grad_norm], feed_dict=fd)
i += 1
print(error, g_n)
if i > max_iter:
break
else:
while np.abs(error) > e_tolerance:
_ = sess.run(solver, feed_dict=fd)
error = sess.run(reconstruction_error, feed_dict=fd)
i += 1
print(error)
if i > max_iter:
break
Zs = sess.run(Z, feed_dict=fd)
error_per_sample = sess.run(reconstruction_error_per_sample, feed_dict=fd)
print('Z found in', i, 'iterations with final reconstruction error of', error)
tf.reset_default_graph()
return Zs, error_per_sample, heuristic_sigma
|
{"hexsha": "e012127130a83978fbced65a3ab2d82372ce86d2", "size": 23651, "ext": "py", "lang": "Python", "max_stars_repo_path": "model.py", "max_stars_repo_name": "gebob19/RGAN", "max_stars_repo_head_hexsha": "cb8c4c36ff7af0395611f10d5b17c8719fff0b00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model.py", "max_issues_repo_name": "gebob19/RGAN", "max_issues_repo_head_hexsha": "cb8c4c36ff7af0395611f10d5b17c8719fff0b00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model.py", "max_forks_repo_name": "gebob19/RGAN", "max_forks_repo_head_hexsha": "cb8c4c36ff7af0395611f10d5b17c8719fff0b00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.302, "max_line_length": 170, "alphanum_fraction": 0.6422138599, "include": true, "reason": "import numpy", "num_tokens": 5602}
|
"""
Basics
~~~~~~
Tensor trains are a versatile tensor decomposition. They consist of a list of
order-3 tensors known as `cores`. A tensor train encoding an order `d` dense
tensor, has `d` cores. The second dimension of these cores coincides with the
dimensions of the dense tensor. The first and third dimensions of the cores are
known as the `tensor train rank`.
For example, below we create a random tensor train encoding a shape ``(10, 12,
8)`` tensor, with tensor train ranks ``(3, 4)``
>>> from ttml.tensor_train import TensorTrain
...
... dims = (10, 12, 8)
... tt_rank = (3, 4)
... tt = TensorTrain.random(dims, tt_rank)
... tt
<TensorTrain of order 3 with outer dimensions (10, 12, 8), TT-rank (3, 4),
and orthogonalized at mode 2>
We can access the cores of the tensor train through simple array indexing or
looping. Let's print the shapes of the three cores in this tensor train
>>> for core in tt
... print(core.shape)
(1, 10, 3)
(3, 12, 4)
(4, 8, 1)
Note that the first dimension of the first core and the last dimension of the
last core is always ``1``.
Orthogonalization
~~~~~~~~~~~~~~~~~
Note in the representation string above, it is mentioned that the tensor train
is ``orthogonalized at mode 2``. This means that the `left-matricization` of the
cores to the left of mode 2 (i.e. the first and second cores) are orthogonal.
This means that the following contraction is the identity matrix:
>>> import numpy as np
...
... np.einsum("abi,abj->ij", tt[0], tt[0])
array([[ 1.00000000e+00, -8.84708973e-17, 3.46944695e-18],
[-8.84708973e-17, 1.00000000e+00, -6.93889390e-18],
[ 3.46944695e-18, -6.93889390e-18, 1.00000000e+00]])
The same contraction is also an identity matrix for ``tt[1]``. Orthogonalization
is extremely important for numerical stability, as well as for working with
tangent vectors (more on that later). We can specify on which mode we want the
tensor train to be orthogonalized at initialization, but we can also change the
orthogonalization mode by using the :meth:`TensorTrain.orthogonalize` method.
For example, we can orthogonalize the tensor train on mode 1 below. The mode
argument can be any integer, or either of the strings ``'l'`` and ``'r'``. Here
``'l'`` corresponds to a `left orthogonalization`, which is an orthogonalization
with respect to `the last mode`. Conversely ``'r'`` corresponds to a `right
orthogonalization`, that is, with respect to `the first mode`.
>>> tt.orthogonalize(mode=1)
...
... A = np.einsum("abi,abj->ij", tt[0], tt[0])
... print(np.allclose(A, np.eye(len(A))))
...
... B = np.einsum("iab,jab->ij", tt[2], tt[2])
... print(np.allclose(B, np.eye(len(B))))
True
True
One consequence of orthogonalization is that computing the norm of the the
tensor train can be done very efficiently; the (Frobenius) norm of a tensor
trained orthogonalized at mode ``mu`` is simply the (Frobenius) norm of core
``mu``.
>>> np.isclose(tt.norm(), np.linalg.norm(tt[1]))
True
Tensor train arithmetic
~~~~~~~~~~~~~~~~~~~~~~~
We can also perform many operations involving two or more tensor trains. Let's
first create two new tensor trains with the same outer dimensions, but different
tt-ranks. We can then for example contract the tensor trains using the
:meth:`TensorTrain.dot` method, or alternatively the ``@`` operator.
>>> dims = (4, 6, 6, 5)
... tt1 = TensorTrain.random(dims, (3, 4, 3))
... tt2 = TensorTrain.random(dims, (2, 2, 2))
... tt1 @ tt2
0.016860730327956833
We can verify that this is indeed the Frobenius inner product of these two
tensors by comparing the result to contracting the two associated dense tensors.
We can turn a tensor train into a dense tensor using the
:meth:`TensorTrain.dense` method.
>>> np.einsum("ijkl,ijkl->", tt1.dense(), tt2.dense())
0.016860730327956833
We can also add/subtract tensor trains or multiply them by scalars.
>>> tt3 = tt1 + 0.1 * tt2
... tt3
<TensorTrain of order 4 with outer dimensions (4, 6, 6, 5), TT-rank (4, 6, 5),
and orthogonalized at mode 3>
Truncation
~~~~~~~~~~
Note that when we add tensor trains, the outer dimensions stay the same but in
principle the tt-rank increases, becoming at most the sum of the tt-ranks.
In many cases the rank of the sum is not the sum of the ranks. For example in
the case above, the first rank of ``tt3`` is 4 and not 5=2+3, since the first
rank is always bounded by the first dimension (which is 4 in this case).
If we now add another copy of ``tt2`` to ``tt3`` we would expect the rank to
stay the same, yet this doesn't always happen due to numerical errors. Note
that the middle tt-rank below is 8, even though ``tt4`` can be expressed
by a rank ``(4, 6, 5)`` tensor train.
>>> tt4 = tt3 + tt2
... tt4
<TensorTrain of order 4 with outer dimensions (4, 6, 6, 5), TT-rank (4, 8, 5),
and orthogonalized at mode 3>
We can truncate the rank of a tensor train by using the
:meth:`TensorTrain.round` method. This uses HOSVD to truncate the tensor train,
and it has two methods for rounding; it can round to a pre-specified tt-rank,
or it can truncate based on singular values. We do the latter below by
specifying the ``eps=1e-16`` keyword, meaning we can round each core in a HOSVD
sweep with relative error up to ``1e-16``.
>>> tt5 = tt4.round(eps=1e-16, inplace=False)
... print(tt5)
... (tt4 - tt5).norm()
<TensorTrain of order 4 with outer dimensions (4, 6, 6, 5), TT-rank (4, 6, 5),
and orthogonalized at mode 3>
3.1508721275986887e-15
Note that the rank has decreased to the correct value, while only gathering an
error on the order of machine epsilon. This is because the last two singular
values of the second unfolding are very small, we can see them using
:meth:`TensorTrain.sing_vals`:
>>> tt4.sing_vals()
[array([1.92032396, 1.1634468 , 0.62421987, 0.4174046 ]),
array([1.77671233e+00, 9.81237260e-01, 7.97148647e-01, 6.93386512e-01,
5.05833036e-01, 3.36895334e-01, 2.17444526e-31, 1.32445319e-31]),
array([1.95129345, 0.99099654, 0.80976405, 0.38830473, 0.09492614])]
We can also round to even lower ranks, at the cost of a higher rounding error.
>>> tt6 = tt4.round(max_rank=4, inplace=False)
... (tt4 - tt6).norm()
0.6129466966082833
Here ``max_rank=4`` means all tt-ranks should be at most 4, but we could also
supply a tuple of ints here, e.g. ``tt4.round(max_rank = (4, 5, 5))``.
Accessing entries
~~~~~~~~~~~~~~~~~
To access any specific entries of the tensor train we can use the
:meth:`TensorTrain.gather` method. We need to supply it a list of entries
we want to access, encoded as an integer array. For example to access entry
(0,0,0,0) and (0,1,0,0) we do the following:
>>> tt4.gather(np.array([[0, 0, 0, 0], [0, 1, 0, 0]]))
array([ 0.02875322, -0.02476423])
Tangent vectors
~~~~~~~~~~~~~~~
For Riemannian optimization of tensor trains we need to work with tangent
vectors on the manifold of tensor trains of specified rank. Tangent vectors
are always associated to a particular tensor train (remember: a tensor train
is a point on the tensor-train manifold). For efficient manipulation of
tangent vectors, we need to have both the left- and right-orthogonalized cores
of a tensor train. Since tensor trains are left-orthogonalized by default, we
just need to compute the right-orthogonal cores.
>>> tt = TensorTrain.random((3, 4, 4, 3), (2, 3, 2))
... right_cores = tt.orthogonalize(mode="r", inplace=False)
... tv = TensorTrainTangentVector.random(tt, right_cores)
... tv
<TensorTrainTangentVector of order 4, outer dimensions (3, 4, 4, 3),
and TT-rank (2, 3, 2)>
The arguably most important thing we can do with tangent vectors is that using
a `'retract'` we can 'move' in the direction of the tangent vector. We can do
this using the :meth:`TensorTrain.apply_grad` method. Below we apply the retract
of ``tv`` to `tt`, after first multiplying it by ``1e-6`` using the ``alpha``
keyword argument.
>>> tt2 = tt.apply_grad(tv, alpha=1e-6)
... (tt-tt2).norm()
8.200339164343568e-07
We see that this changes `tt` on the order of the 'step size' ``alpha`` and the
norm of ``tt2``.
Transporting a tangent vector to a new point is equivalent to projecting the
tangent vector to the tangent space of the new point. This can be done using
:meth:`TensorTrain.grad_proj`:
>>> tv2 = tt2.grad_proj(tv)
<TensorTrainTangentVector of order 4, outer dimensions (3, 4, 4, 3),
and TT-rank (2, 3, 2)>
We can also perform arithmetic with tangent vectors, like multiplying them by
scalars, adding them, or computing inner products (using
:meth:`TensorTrainTangentVector.inner` or the ``@`` operator).
Note that this only makes mathematical sense if the tangent vectors are
associated to the same tensor train.
>>> tv1 = TensorTrainTangentVector.random(tt, right_cores)
... tv2 = TensorTrainTangentVector.random(tt, right_cores)
... print((tv1 + tv2).norm())
... tv3 = tv1 - 0.1 * tv2
... print(tv1 @ tv3)
1.278782626647999
0.6505614686324931
The way we usually create tangent vectors is as the gradient of some
optimization problem. For example we could try to solve a tensor completion
problem. We want to approximate an unknown dense tensor using a tensor train
based on knowing the values of particular entries of the dense tensor. We can
solve this by starting with a random tensor train and applying Riemannian
optimization. At each point the `Euclidean gradient` is just linear residual
error between the entries in the tensor train and the true value. We can convert
this Euclidean gradient into a tangent vector using
:meth:`TensorTrain.rgrad_sparse`. Below we illustrate one step of gradient
descent for the tensor completion problem.
>>> tt = TensorTrain.random((10, 10, 10, 10, 10), (2, 5, 5, 2))
...
... # Generate 100 random values and 100 random indices of `tt`
... N = 100
... y = np.random.normal(N)
... idx = [np.random.choice(r, size=N) for r in tt.tt_rank]
... idx = np.stack(idx)
...
... # Compute the initial error and the gradient
... prediction = tt.gather(idx)
... residual = prediction - y
... print(np.linalg.norm(residual))
... grad = tt.rgrad_sparse(-residual, idx)
...
... # Take a step in gradient direction and compute new error
... tt2 = tt.apply_grad(grad, alpha=10)
... prediction = tt2.gather(idx)
... residual = y - prediction
... print(np.linalg.norm(residual))
initial error: 200.76000727481116
error after step: 191.21863957535254
Alternative backends
~~~~~~~~~~~~~~~~~~~~
So far all the objects we have use were encoded as numpy arrays, but other
backends are supported as well. For example to use ``tensorflow`` as a backend
for a tensor train, we just need to supply the ``backend`` keyword:
>>> tt_tf = TensorTrain.random((4, 4, 4), (2, 2), backend="tensorflow")
... print(tt_tf[0])
Here in principle many backends are supported, such as ``pytorch``, ``dask``,
``jax`` or ``cupy``. Support for other backends is handled by
`autoray <https://github.com/jcmgray/autoray>`_. However, not all functionality
has been thoroughly tested for most backends. Moreover, all the functions used
here are not `compiled`, so usually things end up being fastest for numpy. This
may change in the future. In particular :class:`TTML` has very limited
support for backends other than numpy, since the ``scikit-learn`` estimators
used for initialization only support numpy anyway.
"""
|
{"hexsha": "c568f6287003ca87c826ac420aca1c7affe694ee", "size": 11248, "ext": "py", "lang": "Python", "max_stars_repo_path": "ttml/_tensor_train_doc.py", "max_stars_repo_name": "RikVoorhaar/ttml", "max_stars_repo_head_hexsha": "3786cfc02976f7d6cd5f045f213e28793f4ece61", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ttml/_tensor_train_doc.py", "max_issues_repo_name": "RikVoorhaar/ttml", "max_issues_repo_head_hexsha": "3786cfc02976f7d6cd5f045f213e28793f4ece61", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ttml/_tensor_train_doc.py", "max_forks_repo_name": "RikVoorhaar/ttml", "max_forks_repo_head_hexsha": "3786cfc02976f7d6cd5f045f213e28793f4ece61", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4604316547, "max_line_length": 80, "alphanum_fraction": 0.7223506401, "include": true, "reason": "import numpy", "num_tokens": 3226}
|
# Reference from http://bluewhale.cc/2017-09-22/use-python-opencv-for-image-template-matching-match-template.html
import cv2
import numpy as np
def similarity(img,tar):
# img=cv2.imread(image,0)
# tar=cv2.imread(target,0)
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
tar=cv2.cvtColor(tar,cv2.COLOR_BGR2GRAY)
img=get_two(img)
tar=get_two(tar)
ret=cv2.compare(img,tar,cv2.CMP_EQ)
ret=np.sum(ret)
return ret
def match(img,tar):
# img=cv2.imread(image,0)
# tar=cv2.imread(target,0)
ret=cv2.matchTemplate(img,tar,cv2.TM_CCOEFF_NORMED)
ret=cv2.minMaxLoc(ret)
return ret
def match_pos(img,tar):
return match(img,tar)[3]
def match_val(img,tar):
return match(img,tar)[1]
def get_arr(image,through=None):
ret=cv2.imread(image)
if not through is None:
ret=ret[:,:,through]
return ret
def get_two(img,thresh=None):
if thresh is None:
ret,img=cv2.threshold(img,0,255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)
else:
ret,img=cv2.threshold(img,thresh,255,cv2.THRESH_BINARY)
return img
def match_diff(img,tar):
# img=get_arr(image)
# tar=get_arr(target)
x,y,z=img.shape
ret=np.zeros(img.size).reshape((x,y,z))
for i in range(x):
for j in range(y):
for k in range(z):
a=int(img[i][j][k])-int(tar[i][j][k])
if a<100 and a>-100:
ret[i][j][k]=0
else:
ret[i][j][k]=255
return ret
if __name__=='__main__':
print("Matcher Here")
|
{"hexsha": "e98258596f9453490514e698b83b93a26beb3653", "size": 1550, "ext": "py", "lang": "Python", "max_stars_repo_path": "matcher.py", "max_stars_repo_name": "Jerry-Terrasse/LlfSystem", "max_stars_repo_head_hexsha": "069d9e6935cfae19f1d2c17dfe3dcf1a75515f53", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-05-15T02:24:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-31T14:03:21.000Z", "max_issues_repo_path": "matcher.py", "max_issues_repo_name": "Jerry-Terrasse/LlfSystem", "max_issues_repo_head_hexsha": "069d9e6935cfae19f1d2c17dfe3dcf1a75515f53", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-05-17T13:41:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:46:17.000Z", "max_forks_repo_path": "matcher.py", "max_forks_repo_name": "Jerry-Terrasse/LlfSystem", "max_forks_repo_head_hexsha": "069d9e6935cfae19f1d2c17dfe3dcf1a75515f53", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-05-18T03:49:38.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-27T09:16:05.000Z", "avg_line_length": 26.2711864407, "max_line_length": 113, "alphanum_fraction": 0.62, "include": true, "reason": "import numpy", "num_tokens": 447}
|
#pragma once
#include <Core/RaCore.hpp>
#include <Eigen/Core>
#include <Eigen/Geometry>
namespace Ra {
namespace Core {
namespace Geometry {
/// An oriented bounding box.
class Obb
{
public:
using Transform = Eigen::Transform<Scalar, 3, Eigen::Affine>;
using Aabb = Eigen::AlignedBox<Scalar, 3>;
/// Constructors and destructor.
/// Initializes an empty bounding box.
inline Obb() : m_aabb(), m_transform( Transform::Identity() ) {}
/// Initialize an OBB from an AABB and a transform.
inline Obb( const Aabb& aabb, const Transform& tr ) : m_aabb( aabb ), m_transform( tr ) {}
/// Default copy constructor and assignment operator.
Obb( const Obb& other ) = default;
Obb& operator=( const Obb& other ) = default;
virtual inline ~Obb() {}
/// Return the AABB enclosing this
inline Aabb toAabb() const {
if ( m_aabb.isEmpty() ) { return m_aabb; }
Aabb tmp;
for ( int i = 0; i < 8; ++i )
{
tmp.extend( worldCorner( i ) );
}
return tmp;
}
/// Extends the OBB with an new point.
inline void addPoint( const Eigen::Matrix<Scalar, 3, 1>& p ) { m_aabb.extend( p ); }
/// Returns the position of the i^th corner of AABB (model space)
inline Eigen::Matrix<Scalar, 3, 1> corner( int i ) const {
return m_aabb.corner( static_cast<Aabb::CornerType>( i ) );
}
/// Returns the position of the ith corner of the OBB ( world space )
inline Eigen::Matrix<Scalar, 3, 1> worldCorner( int i ) const {
return m_transform * m_aabb.corner( static_cast<Aabb::CornerType>( i ) );
}
/// Non-const access to the obb transformation
Transform& transform() { return m_transform; }
/// Const access to the obb transformation
const Transform& transform() const { return m_transform; }
private:
/// The untransformed AABB
Aabb m_aabb;
/// Orientation of the box.
Transform m_transform;
};
} // namespace Geometry
} // namespace Core
} // namespace Ra
|
{"hexsha": "fd44f0b1dc52762bd8b2cf14650f3b07e7977089", "size": 2034, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/Core/Geometry/Obb.hpp", "max_stars_repo_name": "Yasoo31/Radium-Engine", "max_stars_repo_head_hexsha": "e22754d0abe192207fd946509cbd63c4f9e52dd4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 78.0, "max_stars_repo_stars_event_min_datetime": "2017-12-01T12:23:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T05:08:09.000Z", "max_issues_repo_path": "src/Core/Geometry/Obb.hpp", "max_issues_repo_name": "neurodiverseEsoteric/Radium-Engine", "max_issues_repo_head_hexsha": "ebebc29d889a9d32e0637e425e589e403d8edef8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 527.0, "max_issues_repo_issues_event_min_datetime": "2017-09-25T13:05:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:47:44.000Z", "max_forks_repo_path": "src/Core/Geometry/Obb.hpp", "max_forks_repo_name": "neurodiverseEsoteric/Radium-Engine", "max_forks_repo_head_hexsha": "ebebc29d889a9d32e0637e425e589e403d8edef8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 48.0, "max_forks_repo_forks_event_min_datetime": "2018-01-04T22:08:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T08:13:41.000Z", "avg_line_length": 29.0571428571, "max_line_length": 94, "alphanum_fraction": 0.6297935103, "num_tokens": 528}
|
import json
import numpy as np
import os
import random
import re
import sklearn.linear_model
import sklearn.preprocessing
import time
EMB_DIR = '/tmp/basilica-embeddings/'
files = [f for f in os.listdir(EMB_DIR)]
random.shuffle(files)
train_size = int(len(files)*0.8)
x_train = np.zeros((train_size, 2048))
x_test = np.zeros((len(files)-train_size, 2048))
y_train = np.zeros(train_size, dtype=int)
y_test = np.zeros(len(files)-train_size, dtype=int)
for i in range(train_size):
filename = files[i]
with open(EMB_DIR + filename, 'r') as f:
x_train[i] = json.load(f)
y_train[i] = (0 if re.match('.*cat.*', filename) else 1)
for i in range(len(files) - train_size):
filename = files[train_size+i]
with open(EMB_DIR + filename, 'r') as f:
x_test[i] = json.load(f)
y_test[i] = (0 if re.match('.*cat.*', filename) else 1)
x_train = sklearn.preprocessing.normalize(x_train)
x_test = sklearn.preprocessing.normalize(x_test)
model = sklearn.linear_model.LogisticRegression()
model.fit(x_train, y_train)
print('Train accuracy: %.3f' % model.score(x_train, y_train))
print('Test accuracy: %.3f' % model.score(x_test, y_test))
test_proba = model.predict_proba(x_test)
probabilities = [(pred[y], f) for f, y, pred in zip(files[train_size:], y_test, test_proba)]
probabilities.sort()
for prob, filename in probabilities[:3]:
print('%s: %.2f' % (filename, prob))
|
{"hexsha": "b128c62dcc41477cd869efb0daf8071ff53646ff", "size": 1406, "ext": "py", "lang": "Python", "max_stars_repo_path": "module2-consuming-data-from-an-api/training_a_classifier.py", "max_stars_repo_name": "nrvanwyck/DS-Unit-3-Sprint-3-Productization-and-Cloud", "max_stars_repo_head_hexsha": "186a2420ddaf0ca1b229982e05867b88d9c66fd5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "module2-consuming-data-from-an-api/training_a_classifier.py", "max_issues_repo_name": "nrvanwyck/DS-Unit-3-Sprint-3-Productization-and-Cloud", "max_issues_repo_head_hexsha": "186a2420ddaf0ca1b229982e05867b88d9c66fd5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-03-19T03:07:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:00:28.000Z", "max_forks_repo_path": "cats_dogs_demo/training_a_classifier.py", "max_forks_repo_name": "mjh09/twitoff", "max_forks_repo_head_hexsha": "e1ae76b4dd436d979c46ee5770aac82b35755b0a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9545454545, "max_line_length": 92, "alphanum_fraction": 0.7012802276, "include": true, "reason": "import numpy", "num_tokens": 388}
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
from src.models.lang_model.w2v_averager_model import W2vAveragerModel
from src.models.lang_model.embedding_model import EmbeddingModel
import src.models.time_series.ts_models as ts
import src.models.time_series.latent_to_params as latent_to_params
import numpy as np
import matplotlib.animation as animation
class StripTemporalCF(nn.Module):
"""
Stripped Megamodel class.
Comprises:
--- Always Necessary ---
Language Model String - Either Avg or Embedding. If avg, uses average
word vector features. If embedding, uses a
learned embedding from the item id.
Latent to params hidden dims: A list of the sizes of the hidden units
in the MLP that produces the parameter outputs
Time model string: Either LinRegress or OnlyMLP. LinRegress learns slope and intercept
for each user, question pair. OnlyMLP directly produces
the respones.
User Latent Model - Embedding module
Time Model - Matrix Factorization (no time), Linear Regression, Markov Chain
Latent to Time Series Parameters - MLP for Linear Regression/Markov Chain, Dot Product for Matrix Factorization
--- Optional ---
Metadata Model - Linear layer (optional), only for politifact task
----------------------------------------------------------------------------
The models are composed in the following structure:
question_latent_model ----------------\
\
-> latent_to_time_series_parameters -> time_model
/
user_latent_model --------------------/>
----------------------------------------------------------------------------
"""
def __init__(self, language_model_string,
latent_to_params_hidden_dims, time_model_string,
task, use_metadata, time_ordinal,
language_embed_dim, softmax,
metadata_size, num_users,
user_embed_size, include_q_embed,
question_map_dict, temperature=10):
"""
Builds the components of the TemporalCF. These are:
language_model_string (str): language model
latent_to_params_hidden_dims (list): hidden layer dimension sizes for latent to param
MLP
task (str): whether dataset is 'synthetic', 'politifact' or 'fermi'.
Necessary to use the correct time sequence.
use_metadata (bool): whether to use metadata in the model
softmax (bool): whether to use the softmax in the time series model
time_ordinal: whether the time for the linear regression uses times [0, 1, 2] insted
of the actual numerical times.
include_q_embed: Whether to use an item embedding from the question_id
as well as the metadata information.
question_map_dict: Maps question text to id. Necessary for include_q_embed.
temperature: Temperature to use in the softmax.
"""
super(StripTemporalCF, self).__init__()
self.time_model_string = time_model_string
self.include_q_embed = include_q_embed
################################
######## LANGUAGE MODEL ########
################################
assert language_model_string in ["avg", "embedding"], "language model not implemented"
if include_q_embed:
if language_model_string == 'embedding' and question_map_dict is None:
raise ValueError("if using embeddings, must provide a `question map dict`"
+ "to map each question to an embedding")
elif language_model_string == "avg":
transform_layer = nn.Linear(50, language_embed_dim)
self.question_latent_model = nn.Sequential(W2vAveragerModel(False),
transform_layer)
elif language_model_string == 'embedding':
self.question_latent_model = EmbeddingModel(question_map_dict,
language_embed_dim)
################################
######## METADATA ########
################################
if use_metadata:
self.metadata_latent_model = torch.nn.Linear(metadata_size, language_embed_dim)
self.user_latent_model = torch.nn.Embedding(num_users, user_embed_size)
else:
self.metadata_latent_model = None
self.user_latent_model = torch.nn.Embedding(num_users, user_embed_size)
################################
######## TIME SERIES MODEL ########
################################
if time_model_string == 'LinRegress':
if task == 'politifact':
# times from politifact task
times = np.array([30, 90, 360])
else:
# times from Fermi,
times = np.array([20, 60, 180])
if time_ordinal:
times = np.array([0, 1, 2])
# convert times to FloatTensor, using cuda if necessary
self.ts_model = ts.LinRegress(times, temperature=temperature, softmax=softmax)
elif time_model_string == 'OnlyMLP':
self.ts_model = ts.OnlyMLP(temperature=temperature, softmax=softmax)
################################
######## LATENT TO PARAM ########
################################
latent_dimension = user_embed_size
if use_metadata:
latent_dimension += language_embed_dim
if include_q_embed:
latent_dimension += language_embed_dim
self.latent_to_param = latent_to_params.CatMLP(latent_dimension,
latent_to_params_hidden_dims,
self.ts_model.param_dim)
def forward(self, user_ids, item_texts, metadata_items, return_vals=False):
"""Combines the forwards of the TemporalCF. Follows structure of class docstring."""
user_vals = self.user_latent_model.forward(user_ids)
if self.metadata_latent_model is not None:
meta_vals = self.metadata_latent_model.forward(metadata_items)
if self.include_q_embed:
item_vals = self.question_latent_model.forward(item_texts)
item_vals = torch.cat([item_vals, meta_vals], 1)
else:
item_vals = meta_vals
else:
if self.include_q_embed:
item_vals = self.question_latent_model.forward(item_texts)
else:
item_vals = None
params = self.latent_to_param.forward(
user_vals, item_vals, None, None)
ans = self.ts_model.forward(params)
return ans
def cuda(self):
super(StripTemporalCF, self).cuda()
self.question_latent_model.cuda()
self.ts_model.cuda()
def cpu(self):
super(StripTemporalCF, self).cpu()
self.question_latent_model.cpu()
self.ts_model.cpu()
class CompiledStripTemporalCF:
def __init__(self, network, weight, num_epochs, batch_size,
optim_params, use_cuda=False,
l1_coef=0, optimizer="SGD", cross_entropy=False):
"""
fit method takes a ContentDataset and fits it for num_epochs (passed at initialisation)
Parameters
----------
batch_size (int): the size of each training batch
network (ContentMF): a network that fits using user_ids and item_texts
num_epochs (int): the number of training epochs
optim_params (dict): parameters passed to the Stochastic Gradient Descent (SGD) class
use_cuda (bool): set to True to use the GPU
"""
self.batch_size = batch_size
self.network = network
self.num_epochs = num_epochs
self.optim_params = optim_params
self.loss_fn = nn.MSELoss
self.use_cuda = use_cuda
# by default there is no L1 loss, l1_coef = 0
self.l1_coef = l1_coef
self.optimizer = optimizer
self.weight = weight
self.time_model_string = self.network.time_model_string
# TODO: probably move use_cuda and dataloader_extract to a Superclass / MixIn
if use_cuda:
print('using cuda')
self.network.cuda()
self.floattype = torch.cuda.FloatTensor
self.inttype = torch.cuda.LongTensor
self.bytetype = torch.cuda.ByteTensor
else:
self.floattype = torch.FloatTensor
self.inttype = torch.LongTensor
self.bytetype = torch.ByteTensor
# for param in self.network.parameters():
# param = param/100
# # TODO: not for language
def dataloader_extract(self, sample):
ratings = Variable(sample['rating'].type(self.floattype)).squeeze()
user_ids = Variable(sample['user_id'].type(self.inttype))
item_ids = Variable(sample['item_id'].type(self.inttype))
item_metadata = Variable(sample['item_metadata'].type(self.floattype))
item_text = sample['item_text']
return ratings, user_ids, item_ids, item_metadata, item_text
def weighted_mse_loss(self, inputs, targets, weights):
return torch.sum(weights * (inputs - targets) ** 2)
def fit(self, train_set, train_sampler, val_set, val_sampler, verbose,
patience=5000, eps=1e-4, schedule_epochs=[50],
schedule_reductions=[5], animate=False):
assert len(schedule_epochs) == len(schedule_reductions)
import time
if animate:
fig, axes, ims = self.set_up_animation()
self.network.train()
t0 = time.time()
data_loader = DataLoader(
train_set, batch_size=self.batch_size, sampler=train_sampler)
param_groups = self.get_param_groups()
opt = getattr(optim, self.optimizer)(param_groups)
# for L1 loss
l1_crit = nn.L1Loss(size_average=False)
train_loss_list = []
mse_val_loss_list = []
time_list = []
stopping_counter = 0
min_val_loss = None
for epoch in range(self.num_epochs):
epoch_loss = 0
total_scored = 0.0 # Number of ratings we score on
# Schedule the reduction in the learning rates
if len(schedule_epochs) > 0:
if epoch == schedule_epochs[0]:
schedule_epochs.pop(0)
reduction = schedule_reductions.pop(0)
for p in opt.param_groups:
p['lr'] = p['lr'] / reduction
for i, sample in enumerate(data_loader):
ratings, user_ids, item_ids, item_metadata, item_text = self.dataloader_extract(
sample)
# We can get some wacky results if we feed in batches
# that are not full-size, so check for this
if ratings.size == torch.Size([]):
continue # If rating is a singleton tensor
if len(ratings) < self.batch_size:
continue
opt.zero_grad()
# We form the prediction as a bs x 3 tensor.
# We don't have all the actual responses, so need to
# filter out the corresponding predictions where there
# are nans in the responses
pred = self.network.forward(user_ids, item_text, item_metadata)
ones = torch.Tensor(torch.ones(ratings.shape))
weight_matrix = (torch.Tensor(self.weight)*ones).type(self.floattype)
# Calculate response mask from ratings
response_mask = ~np.isnan(ratings)
response_mask = response_mask.type(self.bytetype)
# Now we need to only update on the provided targets
masked_ratings = torch.masked_select(ratings, response_mask)
masked_weights = torch.masked_select(weight_matrix, response_mask)
masked_preds = torch.masked_select(pred, response_mask)
loss = self.weighted_mse_loss(masked_preds, masked_ratings, masked_weights)
# L1 loss
reg_loss = 0
for name, param in self.network.named_parameters():
reg_loss += l1_crit(param, torch.zeros_like(param))
loss += self.l1_coef * reg_loss
loss.backward()
opt.step()
epoch_loss += loss.data
total_scored += len(masked_ratings)
tdiff = time.time() - t0
assert total_scored > 0, "Train set is smaller than batch_size"
av_train_loss = (
epoch_loss / total_scored)
val_outs = self.validate(val_set, val_sampler)
val_mse_loss, masked_preds, masked_ratings, masked_weights, _ = val_outs
av_val_loss = val_mse_loss
if animate:
fig, axes, ims = self.add_animation_panels(fig, axes, ims,
train_set,
train_sampler,
masked_ratings,
masked_preds,
response_mask,
pred, train_loss_list,
epoch, mse_val_loss_list)
# Early stopping
if min_val_loss is None:
min_val_loss = av_val_loss
elif av_val_loss < min_val_loss - eps:
# Reset counter if we have improved validation loss
min_val_loss = av_val_loss
stopping_counter = 0
else:
# Increment counter, stopping if we have plateaued
stopping_counter += 1
if stopping_counter > patience:
print('Stopping at epoch {}'.format(epoch))
break
if verbose:
print('epoch {0:4d}\ttrain_loss = {1:6.5f}\tval_mse_loss = {2:6.5f}\tElapsed time: {3:8.1f}'.format(
epoch, av_train_loss, val_mse_loss, tdiff))
train_loss_list.append(av_train_loss.item())
mse_val_loss_list.append(val_mse_loss.item())
time_list.append(tdiff)
if animate:
ani = animation.ArtistAnimation(fig, ims, interval=100, blit=True,
repeat_delay=1000, repeat=True)
import time
ani.save('../results/test{}.mp4'.format(str(time.time()).split('.')[0]), fps=15)
return train_loss_list, mse_val_loss_list, time_list
def _extract_data(self, dataset, sampler):
data_loader = DataLoader(dataset, batch_size=len(dataset), sampler=sampler)
assert len(data_loader) == 1, 'data loader should have size 1'
sample = next(data_loader.__iter__()) # dataloader takes one (sub)set of the dataset
ratings, user_ids, item_ids, item_metadata, item_text = self.dataloader_extract(sample)
assert ratings.size() != torch.Size([]), 'ratings size empty'
assert len(ratings) >= self.batch_size, 'not enough ratings compared to batch size'
return ratings, user_ids, item_ids, item_metadata, item_text
def _predict(self, user_ids, item_text, item_metadata):
self.network.eval()
pred = self.network.forward(user_ids, item_text, item_metadata)
self.network.train()
return pred
def predict(self, dataset, sampler):
ratings, user_ids, item_ids, item_metadata, item_text = self._extract_data(dataset, sampler)
pred = self._predict(user_ids, item_text, item_metadata)
return pred.detach().numpy()
def validate(self, dataset, sampler):
"""For validate we assume we are looking at a set of responses with
only slow entries. We set the weight matrix to the unit so that
the validation mse is the actual MSE (as opposed to the train,
which can be different due to the weighting of different times)"""
ratings, user_ids, item_ids, item_metadata, item_text = self._extract_data(dataset, sampler)
# valid_loss = self.loss_fn()
pred = self._predict(user_ids, item_text, item_metadata)
weight_matrix = torch.Tensor(torch.ones(ratings.shape)).type(self.floattype)
response_mask = ~np.isnan(ratings)
response_mask = response_mask.type(self.bytetype)
masked_ratings = torch.masked_select(ratings, response_mask)
masked_preds = torch.masked_select(pred, response_mask)
masked_weights = torch.masked_select(weight_matrix, response_mask)
val_mse_loss = self.weighted_mse_loss(masked_preds, masked_ratings, masked_weights)
av_val_mse_loss = val_mse_loss.data / len(masked_ratings)
# Return item list sorted by MSE on last item
mse = ((pred - ratings)**2)
zipped_responses = zip(mse.cpu().data.numpy(),
masked_preds.cpu().data.numpy(),
masked_ratings.cpu().data.numpy(),
user_ids.cpu().data.numpy(),
item_ids.cpu().data.numpy(),
item_text,
item_metadata)
return av_val_mse_loss, masked_preds, masked_ratings, masked_weights, list(zipped_responses)
def set_up_animation(self):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(3, 5, figsize=(40, 20))
axes[0, 0].set_xlim([-0.1, 1.1])
axes[0, 0].set_ylim([-0.1, 1.1])
axes[0, 1].set_xlim([-0.1, 1.1])
axes[0, 1].set_ylim([-0.1, 1.1])
axes[1, 4].set_ylim([0.09, 0.13])
ims = []
return fig, axes, ims
def get_param_groups(self, remove_lang_weight_decay=True):
"""Returns a list of parameter groups dictionaries.
If remove_lang_weight_decay, we stop any weight decay
happening to the language model as it's probably not a good
idea to regularize the LSTM weights directly in that way."""
param_groups_list = []
for sub_model in self.network.children():
params = filter(lambda p: p.requires_grad,
sub_model.parameters())
optim_params = self.optim_params.copy()
optim_params['params'] = params
param_groups_list.append(optim_params)
return param_groups_list
def add_animation_panels(self, fig, axes, ims, train_set,
train_sampler, masked_ratings,
masked_preds, response_mask, pred,
train_loss_list, epoch, mse_val_loss_list):
_, _, train_masked_preds, train_masked_ratings, _, _ = self.validate(train_set, train_sampler)
im1 = axes[0, 0].scatter(train_masked_ratings.data, train_masked_preds.data, c='b', alpha=0.1)
im2 = axes[0, 1].scatter(masked_ratings.data, masked_preds.data, c='b', alpha=0.5)
metadata_weights = self.network.metadata_latent_model.weight.detach().numpy()[0]
im3 = axes[0, 2].hist(metadata_weights, 20, color='C1', edgecolor='k', linewidth=1)
axes[0, 2].set_xlabel('Metadata Weights')
user_weights = self.network.user_latent_model.weight.detach().numpy()
im4 = axes[0, 3].hist(user_weights, np.linspace(-2, 2, 21), color='C2', linewidth=1, edgecolor='k')
axes[0, 3].set_xlabel('User Weights')
latent_to_param_weights_1 = self.network.latent_to_param.net.layers[0].weight.detach().numpy().flatten()
latent_to_param_weights_2 = self.network.latent_to_param.net.layers[1].weight.detach().numpy().flatten()
latent_to_param_weights_3_0 = self.network.latent_to_param.net.layers[2].weight[0, :].detach().numpy().flatten()
latent_to_param_weights_3_1 = self.network.latent_to_param.net.layers[2].weight[1, :].detach().numpy().flatten()
im5 = axes[0, 4].hist(latent_to_param_weights_1, 10, color='C3', linewidth=1, edgecolor='k')
axes[0, 4].set_xlabel('Latent to Param Weights Layer 1')
im6 = axes[1, 0].hist(latent_to_param_weights_2, 10, color='C3', linewidth=1, edgecolor='k')
axes[1, 0].set_xlabel('Latent to Param Weights Layer 2')
im7 = axes[1, 1].hist(latent_to_param_weights_3_0, 10, color='C3', linewidth=1, edgecolor='k')
axes[1, 1].set_xlabel('Latent to Param Weights Layer 3 to Slope')
im8 = axes[1, 2].hist(latent_to_param_weights_3_1, 10, color='C3', linewidth=1, edgecolor='k')
axes[1, 2].set_xlabel('Latent to Param Weights Layer 3 to Intercept')
im9 = []
for index, response_row in enumerate(response_mask):
if response_row[0] == 1 and response_row[1] == 1 and response_row[2] == 1:
# Only plot for full rows:
line = axes[1, 3].plot([0, 1, 2], pred[index, :].detach().numpy(),
c='k', alpha=0.2)
im9.append(line[0])
im10 = []
if len(train_loss_list) > 0:
line2 = axes[1, 4].plot(range(epoch), train_loss_list, c='b')
line20 = axes[1, 4].scatter([epoch-1], train_loss_list[-1], c='b', s=15)
line3 = axes[1, 4].plot(range(epoch), mse_val_loss_list, c='r')
line30 = axes[1, 4].scatter([epoch-1], mse_val_loss_list[-1], c='r', s=15)
im10.extend([line2[0], line3[0], line20, line30])
to_list = [im1, im2]
to_list.extend(im3[2])
to_list.extend(im4[2])
to_list.extend(im5[2])
to_list.extend(im6[2])
to_list.extend(im7[2])
to_list.extend(im8[2])
to_list.extend(im9)
to_list.extend(im10)
ims.append(to_list)
return fig, axes, ims
|
{"hexsha": "44f54c4d3ea788c108f41fab41d22180e0d12c39", "size": 22519, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/content_aware/strip_temporal_cf.py", "max_stars_repo_name": "oughtinc/psj", "max_stars_repo_head_hexsha": "e7c5e987039ce7978234e137167991a61371604b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-07-16T23:01:40.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-18T14:49:06.000Z", "max_issues_repo_path": "src/models/content_aware/strip_temporal_cf.py", "max_issues_repo_name": "oughtinc/psj", "max_issues_repo_head_hexsha": "e7c5e987039ce7978234e137167991a61371604b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-07-09T17:33:52.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-09T17:33:52.000Z", "max_forks_repo_path": "src/models/content_aware/strip_temporal_cf.py", "max_forks_repo_name": "oughtinc/psj", "max_forks_repo_head_hexsha": "e7c5e987039ce7978234e137167991a61371604b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.128256513, "max_line_length": 120, "alphanum_fraction": 0.5817753897, "include": true, "reason": "import numpy", "num_tokens": 4751}
|
#-*-coding: utf-8 -*-
#Imagelerde Aritmatik Islemler - Resim Birlestirme
import numpy as np
import cv2
img1=cv2.imread('resimler/cameraman.tif')
img2=cv2.imread('resimler/text.tif')
#birlestirilecek resimler aynı boyutta olmalı.
#g(X)=(1-a)xF0(X)+axF1(X) denklemi kullanılarak eklenecek
#resimlerin agırlık degerleri belirlenir.
#dst=a x img1 + b x img2 + y
#resimleri belirlenen agırlık degerleri ile birbirine ekler.
birlestir=cv2.addWeighted(img1,0.3,img2,0.7,0)
cv2.imshow('birsestir',birlestir)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
{"hexsha": "1bc34249d85da80faa6d6efd0d8aef3ee70e04c3", "size": 561, "ext": "py", "lang": "Python", "max_stars_repo_path": "Goruntu Isleme/Beginning/ornk12.py", "max_stars_repo_name": "NevzatBOL/Paket-Kurulumlar-", "max_stars_repo_head_hexsha": "f5ce3b8205b11d072b9dadd305c11c278f184388", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2017-11-12T20:26:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T08:14:25.000Z", "max_issues_repo_path": "Goruntu Isleme/Beginning/ornk12.py", "max_issues_repo_name": "NevzatBOL/Paket-Kurulumlar-", "max_issues_repo_head_hexsha": "f5ce3b8205b11d072b9dadd305c11c278f184388", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-05-02T08:22:26.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-19T12:43:05.000Z", "max_forks_repo_path": "Goruntu Isleme/Beginning/ornk12.py", "max_forks_repo_name": "NevzatBOL/Paket-Kurulumlar-", "max_forks_repo_head_hexsha": "f5ce3b8205b11d072b9dadd305c11c278f184388", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2018-02-01T18:21:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-15T06:48:47.000Z", "avg_line_length": 29.5263157895, "max_line_length": 61, "alphanum_fraction": 0.7450980392, "include": true, "reason": "import numpy", "num_tokens": 208}
|
\chapter*{Introduction}
\addcontentsline{toc}{section}{Introduction}
\chaptermark{Introduction}
\pagenumbering{arabic}
% ! TODO: Delete this line
\lipsum[1-3]
Random citation \cite{DUMMY:1} embeddeed in text.
\lipsum[4-6]
|
{"hexsha": "e4434d7cc7bca22bb18871660f8b9bdd3b9d3777", "size": 222, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/03.Introduction.tex", "max_stars_repo_name": "MECHEDDAL-Hani/Thesis-template", "max_stars_repo_head_hexsha": "dbb83c1686b0be1cc56419a07f81effcad5d6ec6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2022-03-02T16:34:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T13:07:30.000Z", "max_issues_repo_path": "chapters/03.Introduction.tex", "max_issues_repo_name": "MECHEDDAL-Hani/Thesis-template", "max_issues_repo_head_hexsha": "dbb83c1686b0be1cc56419a07f81effcad5d6ec6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/03.Introduction.tex", "max_forks_repo_name": "MECHEDDAL-Hani/Thesis-template", "max_forks_repo_head_hexsha": "dbb83c1686b0be1cc56419a07f81effcad5d6ec6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-08T11:52:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T11:52:05.000Z", "avg_line_length": 24.6666666667, "max_line_length": 49, "alphanum_fraction": 0.7702702703, "num_tokens": 71}
|
import time
import numpy as np
import collections
import so3g
class Timer():
def __init__(self, block_name=None):
self.t0 = time.time()
def __enter__(self):
return self
def report(self):
return time.time() - self.t0
def __exit__(self, exc_type, exc_value, exc_traceback):
print('Timer block exits after %.6f seconds' % (time.time() - self.t0))
def Qmul(q1, q2, *args):
qout = q1[...,:1] * q2
qout[...,1:] += q1[...,1:] * q2[...,:1]
qout[...,0] -= (q1[...,1:] * q2[...,1:]).sum(axis=-1)
qout[...,1] += q1[...,2]*q2[...,3] - q1[...,3]*q2[...,2]
qout[...,2] += q1[...,3]*q2[...,1] - q1[...,1]*q2[...,3]
qout[...,3] += q1[...,1]*q2[...,2] - q1[...,2]*q2[...,1]
if len(args) == 0:
return qout
return Qmul(qout, args[0], *args[1:])
def Qroti(n,phi):
"""
Euler quaternion, appropriate for rotating by angle phi about axis
n=(0,1,2).
"""
phi = np.asarray(phi)/2
out = np.zeros(np.asarray(phi).shape + (4,))
out[...,0 ] = np.cos(phi)
out[...,n+1] = np.sin(phi)
return out
proj_dict = collections.OrderedDict([
('flat', 'Flat'),
('car' , 'CAR'),
('cea' , 'CEA'),
('arc' , 'ARC'),
('tan' , 'TAN'),
('zea' , 'ZEA'),
])
def get_proj(coord_sys, pol_sys, pxz=None, tiled=False):
assert pol_sys in ['T', 'TQU', 'QU']
tiling_word = '_Tiled' if tiled else '_NonTiled'
name = f'ProjEng_{proj_dict[coord_sys]}_{pol_sys}{tiling_word}'
#.format(proj_dict[coord_sys], pol_sys)
cls = getattr(so3g, name) # ProjEng_X_Y
if pxz is None:
return cls
return cls(pxz)
def get_proj_precomp(tiled=False):
tiling_word = '_Tiled' if tiled else '_NonTiled'
name = f'ProjEng_Precomp{tiling_word}'
cls = getattr(so3g, name) # ProjEng_X_Y
return cls()
def get_boresight_quat(system, x, y, gamma=None):
if gamma is None:
gamma = 0
if system == 'flat':
# At each time step, boresight is (x, y, cos(phi), sin(phi))
n_t = len(x)
ptg = np.zeros((n_t, 4))
ptg[...,0] = x
ptg[...,1] = y
ptg[...,2] = np.cos(gamma)
ptg[...,3] = np.sin(gamma)
elif system in ['car', 'cea']:
# boresight needs to point to equinox...
ptg = Qmul(Qroti(2, x),
Qroti(1, np.pi/2 - y),
Qroti(2, np.pi - gamma))
elif system in['arc', 'tan', 'zea']:
# boresight needs to point to pole...
ptg = Qmul(Qroti(1, y),
Qroti(0, x),
Qroti(2, gamma))
else:
raise ValueError('Unknown system: "%s"' % system)
return ptg
def get_offsets_quat(system, dx, dy, polphi):
if system == 'flat':
ofs = np.transpose([dx, dy, np.cos(polphi), np.sin(polphi)])
else:
ofs = Qmul(Qroti(0, dx),
Qroti(1,-dy),
Qroti(2, polphi))
return ofs
def linalg_pinv(wmap):
try:
iwmap = np.linalg.pinv(wmap)
except ValueError:
# np.linalg.pinv only runs on stacks for numpy>=1.14 (Jan 2018),
# so we hack an alternative.
iwmap = np.empty(wmap.shape, wmap.dtype)
for i in range(wmap.shape[0]):
for j in range(wmap.shape[1]):
iwmap[i,j] = np.linalg.pinv(wmap[i,j])
return iwmap
|
{"hexsha": "742c6eabf7f689ee568ae3f1e6866b585334f6f9", "size": 3336, "ext": "py", "lang": "Python", "max_stars_repo_path": "demos/test_utils.py", "max_stars_repo_name": "tskisner/so3g", "max_stars_repo_head_hexsha": "75c1d8dea84f862bdd2c9fa2c2f9d1c5b8da5eec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-09-02T14:17:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T16:43:14.000Z", "max_issues_repo_path": "demos/test_utils.py", "max_issues_repo_name": "tskisner/so3g", "max_issues_repo_head_hexsha": "75c1d8dea84f862bdd2c9fa2c2f9d1c5b8da5eec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 70, "max_issues_repo_issues_event_min_datetime": "2019-05-16T23:42:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T14:35:35.000Z", "max_forks_repo_path": "demos/test_utils.py", "max_forks_repo_name": "tskisner/so3g", "max_forks_repo_head_hexsha": "75c1d8dea84f862bdd2c9fa2c2f9d1c5b8da5eec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-17T18:20:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-22T20:35:44.000Z", "avg_line_length": 28.2711864407, "max_line_length": 79, "alphanum_fraction": 0.5263788969, "include": true, "reason": "import numpy", "num_tokens": 1063}
|
"""Data Augmentaion for scaling n.i.O images from powertrain"""
import argparse
from operator import index
import os, cv2, random
import numpy as np
import pandas as pd
import scipy
import tensorflow as tf
import glob
import PIL
from keras_preprocessing.image import ImageDataGenerator, img_to_array, load_img
# ==============================================================================
# = param =
# ==============================================================================
def augmentation(dataset):
"""Augment all png or jpg images in a folder according to the args that have been passed by the terminal command
Args:
path
Returns:
Augmented files in the specified folder
"""
dataset_path = dataset
save_directory = dataset
# Keras Imagedatagenerator for automatic augmentation
datagen = ImageDataGenerator(
rotation_range=20, width_shift_range=0.2,
height_shift_range=0.2, rescale=1./255,
shear_range=0.2, zoom_range=0.2,
horizontal_flip=True, fill_mode='nearest')
'''try:
os.mkdir(augmented_images)
except OSError:
print ("Creation of the directory %s failed" % augmented_images)
else:
print("Successfully created the directory %s " % augmented_images)'''
# define parameters
#files = os.listdir(dataset_path)
pngs = glob.glob(os.path.join(dataset_path, '*.jpg')) + glob.glob(os.path.join(dataset_path, '*.png'))
# define number of loops
i=1
aug_images = 1
numb_images = len(pngs)*aug_images
for index, j in enumerate(pngs):
img = tf.keras.preprocessing.image.load_img(j)
x = tf.keras.preprocessing.image.img_to_array(img)
# x.shape returns[number of images, height, width, rgb channels] = [1, 1080, 1920, 3]
x = x.reshape((1,) + x.shape)
i = 1
for batch in datagen.flow(x, batch_size = 1,
save_to_dir = save_directory,
save_prefix = "aug_%s" %(index),
save_format = 'png'):
i = i+1
if i > aug_images:
break
print("Successfully created %s augmented Images" % numb_images)
def data_augmentation(dataset, factor):
parser = argparse.ArgumentParser(description= 'Data augmentation for training with images')
parser.add_argument('--dataset', type=str, help='Directory where images will be augmented')
args = parser.parse_args()
try:
augmentation(args.dataset)
except FileNotFoundError:
print("Wrong file/folder path")
else:
print("augmentation.py was successful")
|
{"hexsha": "635c1be42f077c875f3954a421bff6eda46db309", "size": 2777, "ext": "py", "lang": "Python", "max_stars_repo_path": "augmentation.py", "max_stars_repo_name": "molu1019/CycleGAN-Tensorflow-2", "max_stars_repo_head_hexsha": "69e51007718b76595313b24ed1fb7c3ee5ea346c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "augmentation.py", "max_issues_repo_name": "molu1019/CycleGAN-Tensorflow-2", "max_issues_repo_head_hexsha": "69e51007718b76595313b24ed1fb7c3ee5ea346c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "augmentation.py", "max_forks_repo_name": "molu1019/CycleGAN-Tensorflow-2", "max_forks_repo_head_hexsha": "69e51007718b76595313b24ed1fb7c3ee5ea346c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0649350649, "max_line_length": 116, "alphanum_fraction": 0.5866042492, "include": true, "reason": "import numpy,import scipy", "num_tokens": 585}
|
/**
* Copyright (c) 2020 libnuls developers (see AUTHORS)
*
* This file is part of libnuls.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef LIBNULS_SYSTEM_CHAIN_POINT_HPP
#define LIBNULS_SYSTEM_CHAIN_POINT_HPP
#include <cstdint>
#include <istream>
#include <string>
#include <vector>
#include <boost/functional/hash.hpp>
#include <nuls/system/define.hpp>
#include <nuls/system/math/hash.hpp>
#include <nuls/system/utility/data.hpp>
#include <nuls/system/utility/reader.hpp>
#include <nuls/system/utility/writer.hpp>
namespace libnuls {
namespace system {
namespace chain {
class BC_API point
{
public:
/// This is a sentinel used in .index to indicate no output, e.g. coinbase.
/// This value is serialized and defined by consensus, not implementation.
static const uint32_t null_index;
typedef std::vector<point> list;
typedef std::vector<uint32_t> indexes;
// Constructors.
//-------------------------------------------------------------------------
point();
point(point&& other);
point(const point& other);
point(hash_digest&& hash, uint32_t index);
point(const hash_digest& hash, uint32_t index);
// Operators.
//-------------------------------------------------------------------------
/// This class is move assignable and copy assignable.
point& operator=(point&& other);
point& operator=(const point& other);
bool operator<(const point& other) const;
bool operator==(const point& other) const;
bool operator!=(const point& other) const;
// Deserialization.
//-------------------------------------------------------------------------
static point factory(const data_chunk& data, bool wire=true);
static point factory(std::istream& stream, bool wire=true);
static point factory(reader& source, bool wire=true);
bool from_data(const data_chunk& data, bool wire=true);
bool from_data(std::istream& stream, bool wire=true);
bool from_data(reader& source, bool wire=true);
bool is_valid() const;
// Serialization.
//-------------------------------------------------------------------------
data_chunk to_data(bool wire=true) const;
void to_data(std::ostream& stream, bool wire=true) const;
void to_data(writer& sink, bool wire=true) const;
// Properties (size, accessors, cache).
//-------------------------------------------------------------------------
static size_t satoshi_fixed_size(bool wire=true);
size_t serialized_size(bool wire=true) const;
const hash_digest& hash() const;
void set_hash(hash_digest&& value);
void set_hash(const hash_digest& value);
uint32_t index() const;
void set_index(uint32_t value);
// Utilities.
//-------------------------------------------------------------------------
/// This is for client-server, not related to consensus or p2p networking.
uint64_t checksum() const;
// Validation.
//-------------------------------------------------------------------------
bool is_null() const;
protected:
point(hash_digest&& hash, uint32_t index, bool valid);
point(const hash_digest& hash, uint32_t index, bool valid);
void reset();
private:
hash_digest hash_;
uint32_t index_;
bool valid_;
};
} // namespace chain
} // namespace system
} // namespace libnuls
#endif
|
{"hexsha": "ff2e48e5313c8d4f9b9f6d9f01aa249d5352de72", "size": 3989, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/nuls/system/chain/point.hpp", "max_stars_repo_name": "ccccbjcn/nuls-v2-cplusplus-sdk", "max_stars_repo_head_hexsha": "3d5a76452fe0673eba490b26e5a95fea3d5788df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-04-26T07:32:52.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-26T07:32:52.000Z", "max_issues_repo_path": "include/nuls/system/chain/point.hpp", "max_issues_repo_name": "CCC-NULS/nuls-cplusplus-sdk", "max_issues_repo_head_hexsha": "3d5a76452fe0673eba490b26e5a95fea3d5788df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/nuls/system/chain/point.hpp", "max_forks_repo_name": "CCC-NULS/nuls-cplusplus-sdk", "max_forks_repo_head_hexsha": "3d5a76452fe0673eba490b26e5a95fea3d5788df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9224806202, "max_line_length": 79, "alphanum_fraction": 0.6104286789, "num_tokens": 812}
|
from __future__ import print_function
import numpy as np
import theano
import theano.tensor as T
import lasagne
try:
input_text = open("shakespeare_input.txt", "r").read()
input_text = input_text.decode("utf-8-sig").encode("utf-8")
except Exception as e:
raise IOError("Couldn't read input file")
#Based on training input, predict what follows:
generation_phrase = "First Citizen:\nBefore we proceed any further, hear me speak."
vocabulary = list(set(input_text))
input_size = len(input_text)
vocabulary_size = len(vocabulary)
character_to_ix = {char:i for i, char in enumerate(vocabulary)}
ix_to_character = {i:char for i, char in enumerate(vocabulary)}
lasagne.random.set_rng(np.random.RandomState(1))
#Constants. Constants everywhere.
SEQUENCE_SIZE = 20
HIDDEN_SIZE = 512 #Amount of units in the two LSTM layers
LEARNING_RATE = 0.01
GRADIENT_CLAMP = 100 #Remove gradients above this number.
PRINT_INTERVAL = 1 #How often to check output.
EPOCHS = 50 #Number of epochs to train network.
BATCH_SIZE = 128
def generate_data(p, batch_size=BATCH_SIZE, data=input_text, pass_target=True):
x = np.zeros((batch_size, SEQUENCE_SIZE, vocabulary_size))
y = np.zeros(batch_size)
for n in range(batch_size):
pointer = n
for i in range(SEQUENCE_SIZE):
x[n, i, character_to_ix[data[p + pointer + i]]] = 1
if pass_target:
y[n] = character_to_ix[data[p + pointer + SEQUENCE_SIZE]]
return x, np.array(y, dtype="int32")
def main(epochs=EPOCHS):
print("Now building network ...")
#Build the network, starting at input layer.
#Recurrent layers need input of shape:
#(batch_size, SEQUENCE_SIZE, number of feature)
layer_input = lasagne.layers.InputLayer(shape=(None, None, vocabulary_size))
#Build Long Short Term Memory layer taking "layer_input" as first input.
#Clamp the gradient to avoid the problem of exploding gradients.
#Clamping/Clipping is defined by the "GRADIENT_CLAMP" ...
layer_forward_01 = lasagne.layers.LSTMLayer(
layer_input, HIDDEN_SIZE, grad_clipping=GRADIENT_CLAMP,
nonlinearity=lasagne.nonlinearities.tanh)
layer_forward_02 = lasagne.layers.LSTMLayer(
layer_forward_01, HIDDEN_SIZE, grad_clipping=GRADIENT_CLAMP,
nonlinearity=lasagne.nonlinearities.tanh)
#The layer_forward creates output of dimension:
#(batch_size, SEQUENCE_SIZE, HIDDEN_SIZE)
#We care only about the final prediction, so we
#isolate that quantity and feed it to the next layer.
#Output of the sliced layer will be of dimension:
#(batch_size, vocabulary_size)
layer_forward_slice = lasagne.layers.SliceLayer(layer_forward_02, -1, 1)
#The sliced output is parsed through softmax function to create
#probability distribution
layer_output = lasagne.layers.DenseLayer(
layer_forward_slice, num_units=vocabulary_size,
W=lasagne.init.Normal(), nonlinearity=lasagne.nonlinearities.softmax)
target_values = T.ivector("target_output")
network_output = lasagne.layers.get_output(layer_output)
cost = T.nnet.categorical_crossentropy(network_output, target_values).mean()
#Recieve all parameters from the network.
all_parameters = lasagne.layers.get_all_params(layer_output, trainable=True)
#Compute AdaGrad updates for training.
print("Computing updates ...")
updates = lasagne.updates.adagrad(cost, all_parameters, LEARNING_RATE)
print("Compiling functions ...")
train = theano.function([layer_input.input_var, target_values], cost, updates=updates, allow_input_downcast=True)
compute_cost = theano.function([layer_input.input_var, target_values], cost, allow_input_downcast=True)
probs = theano.function([layer_input.input_var], network_output, allow_input_downcast=True)
def try_stuff(N=200):
assert(len(generation_phrase)>=SEQUENCE_SIZE)
sample_ix = []
x,_ = generate_data(len(generation_phrase) - SEQUENCE_SIZE, 1, generation_phrase,0)
for i in range(N):
# Pick the character that got assigned the highest probability
ix = np.argmax(probs(x).ravel())
# Alternatively, to sample from the distribution instead:
# ix = np.random.choice(np.arange(vocab_size), p=probs(x).ravel())
sample_ix.append(ix)
x[:, 0:SEQUENCE_SIZE - 1,:] = x[:, 1:, :]
x[:, SEQUENCE_SIZE - 1,:] = 0
x[0, SEQUENCE_SIZE - 1, sample_ix[-1]] = 1.
random_snippet = generation_phrase + "".join(ix_to_character[ix] for ix in sample_ix)
print("----\n %s \n----" % random_snippet)
print("Training ...")
print("Seed for generation is: %s" % generation_phrase)
p = 0
try:
for it in xrange(input_size * epochs / BATCH_SIZE):
try_stuff() #Generate text using p^th character as the start.
average_cost = 0;
for _ in range(PRINT_INTERVAL):
x, y = generate_data(p)
p += SEQUENCE_SIZE + BATCH_SIZE - 1
if(p + BATCH_SIZE + SEQUENCE_SIZE >= input_size):
print("Carriage return")
p = 0
average_cost += train(x, y)
print("Epoch {} average loss = {}".format(it * 1.0 * PRINT_INTERVAL / input_size * BATCH_SIZE, average_cost / PRINT_INTERVAL))
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
|
{"hexsha": "6743f057dc07be234f855ee223216b5ecd6df3cf", "size": 5587, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/Experiments/Shakespeare/shakespeare.py", "max_stars_repo_name": "matthijsvk/convNets", "max_stars_repo_head_hexsha": "7e65db7857a4e6abfbcab264953eb7741319de6c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 53, "max_stars_repo_stars_event_min_datetime": "2017-04-18T10:06:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T21:26:07.000Z", "max_issues_repo_path": "code/Experiments/Shakespeare/shakespeare.py", "max_issues_repo_name": "matthijsvk/convNets", "max_issues_repo_head_hexsha": "7e65db7857a4e6abfbcab264953eb7741319de6c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/Experiments/Shakespeare/shakespeare.py", "max_forks_repo_name": "matthijsvk/convNets", "max_forks_repo_head_hexsha": "7e65db7857a4e6abfbcab264953eb7741319de6c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2017-05-03T03:27:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T07:07:45.000Z", "avg_line_length": 42.9769230769, "max_line_length": 139, "alphanum_fraction": 0.6703060677, "include": true, "reason": "import numpy,import theano", "num_tokens": 1315}
|
import unittest
import matplotlib.pyplot as plt
import numpy as np
import GooseMPL as gplt
class Test_ticks(unittest.TestCase):
"""
Functions generating ticks.
"""
def test_log_ticks(self):
ticks, labels = gplt.log_ticks((0, 3))
self.assertEqual(list(ticks), [1, 10, 100, 1000])
self.assertEqual(labels, [r"$10^{0}$", r"$10^{1}$", r"$10^{2}$", r"$10^{3}$"])
ticks, labels = gplt.log_ticks((0, 3), keep=[0, -1])
self.assertEqual(list(ticks), [1, 10, 100, 1000])
self.assertEqual(labels, [r"$10^{0}$", "", "", r"$10^{3}$"])
def test_log_ticks_plot(self):
fig, ax = plt.subplots()
ax.set_xlim([1, 1000])
ax.set_ylim([10, 1000])
ticks, labels = gplt.log_xticks()
self.assertEqual(list(ticks), [1, 10, 100, 1000])
self.assertEqual(labels, [r"$10^{0}$", r"$10^{1}$", r"$10^{2}$", r"$10^{3}$"])
ticks, labels = gplt.log_yticks()
self.assertEqual(list(ticks), [10, 100, 1000])
self.assertEqual(labels, [r"$10^{1}$", r"$10^{2}$", r"$10^{3}$"])
plt.close(fig)
def test_log_minorticks(self):
ticks, labels = gplt.log_minorticks((1, 10))
self.assertEqual(list(ticks), [2, 3, 4, 5, 6, 7, 8, 9])
self.assertEqual(labels, ["2", "3", "4", "5", "6", "7", "8", "9"])
def test_log_minorticks_plot(self):
fig, ax = plt.subplots()
ax.set_xlim([0.1, 10])
ax.set_ylim([0.01, 0.7])
ticks, labels = gplt.log_minorxticks()
self.assertEqual(
list(ticks), [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 2, 3, 4, 5, 6, 7, 8, 9]
)
self.assertEqual(
labels,
[
"0.2",
"0.3",
"0.4",
"0.5",
"0.6",
"0.7",
"0.8",
"0.9",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
],
)
ticks, labels = gplt.log_minoryticks()
self.assertTrue(
np.allclose(
ticks,
[
0.02,
0.03,
0.04,
0.05,
0.06,
0.07,
0.08,
0.09,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
],
)
)
self.assertEqual(
labels,
[
"0.02",
"0.03",
"0.04",
"0.05",
"0.06",
"0.07",
"0.08",
"0.09",
"0.2",
"0.3",
"0.4",
"0.5",
"0.6",
"0.7",
],
)
plt.close(fig)
class Test_fit_powerlaw(unittest.TestCase):
"""
Fit a powerlaw.
"""
def test_prefactor_exponent(self):
x = np.linspace(0, 1, 1000)
y = 1.2 * x**3.4
prefactor, exponent, _ = gplt.fit_powerlaw(x, y)
self.assertTrue(np.isclose(prefactor, 1.2))
self.assertTrue(np.isclose(exponent, 3.4))
def test_prefactor(self):
x = np.linspace(0, 1, 1000)
y = 1.2 * x**3.4
prefactor, exponent, _ = gplt.fit_powerlaw(x, y, exponent=3.4)
self.assertTrue(np.isclose(prefactor, 1.2))
self.assertTrue(np.isclose(exponent, 3.4))
def test_exponent(self):
x = np.linspace(0, 1, 1000)
y = 1.2 * x**3.4
prefactor, exponent, _ = gplt.fit_powerlaw(x, y, prefactor=1.2)
self.assertTrue(np.isclose(prefactor, 1.2))
self.assertTrue(np.isclose(exponent, 3.4))
class Test_fit_exp(unittest.TestCase):
"""
Fit an exponential.
"""
def test_prefactor_exponent(self):
x = np.linspace(0, 1, 1000)
y = 1.2 * np.exp(x * 3.4)
prefactor, exponent, _ = gplt.fit_exp(x, y)
self.assertTrue(np.isclose(prefactor, 1.2))
self.assertTrue(np.isclose(exponent, 3.4))
def test_prefactor_negative_exponent(self):
x = np.linspace(0, 1, 1000)
y = 1.2 * np.exp(x * -3.4)
prefactor, exponent, _ = gplt.fit_exp(x, y)
self.assertTrue(np.isclose(prefactor, 1.2))
self.assertTrue(np.isclose(exponent, -3.4))
def test_prefactor(self):
x = np.linspace(0, 1, 1000)
y = 1.2 * np.exp(x * 3.4)
prefactor, exponent, _ = gplt.fit_exp(x, y, exponent=3.4)
self.assertTrue(np.isclose(prefactor, 1.2))
self.assertTrue(np.isclose(exponent, 3.4))
def test_exponent(self):
x = np.linspace(0, 1, 1000)
y = 1.2 * np.exp(x * 3.4)
prefactor, exponent, _ = gplt.fit_exp(x, y, prefactor=1.2)
self.assertTrue(np.isclose(prefactor, 1.2))
self.assertTrue(np.isclose(exponent, 3.4))
class Test_fit_log(unittest.TestCase):
"""
Fit an logarithmic function.
"""
def test_prefactor_exponent(self):
x = np.linspace(0, 1, 1000)[1:]
y = 1.2 + 3.4 * np.log(x)
offset, prefactor, _ = gplt.fit_log(x, y)
self.assertTrue(np.isclose(offset, 1.2))
self.assertTrue(np.isclose(prefactor, 3.4))
def test_prefactor_negative_prefactor(self):
x = np.linspace(0, 1, 1000)[1:]
y = 1.2 - 3.4 * np.log(x)
offset, prefactor, _ = gplt.fit_log(x, y)
self.assertTrue(np.isclose(offset, 1.2))
self.assertTrue(np.isclose(prefactor, -3.4))
def test_prefactor(self):
x = np.linspace(0, 1, 1000)[1:]
y = 1.2 + 3.4 * np.log(x)
offset, prefactor, _ = gplt.fit_log(x, y, prefactor=3.4)
self.assertTrue(np.isclose(offset, 1.2))
self.assertTrue(np.isclose(prefactor, 3.4))
def test_exponent(self):
x = np.linspace(0, 1, 1000)[1:]
y = 1.2 + 3.4 * np.log(x)
offset, prefactor, _ = gplt.fit_log(x, y, offset=1.2)
self.assertTrue(np.isclose(offset, 1.2))
self.assertTrue(np.isclose(prefactor, 3.4))
class Test_fit_linear(unittest.TestCase):
"""
Fit a linear.
"""
def test_offset_slope(self):
x = np.linspace(0, 1, 1000)
y = 1.2 + 3.4 * x
offset, slope, _ = gplt.fit_linear(x, y)
self.assertTrue(np.isclose(offset, 1.2))
self.assertTrue(np.isclose(slope, 3.4))
def test_slope(self):
x = np.linspace(0, 1, 1000)
y = 1.2 + 3.4 * x
offset, slope, _ = gplt.fit_linear(x, y, slope=3.4)
self.assertTrue(np.isclose(offset, 1.2))
self.assertTrue(np.isclose(slope, 3.4))
def test_offset(self):
x = np.linspace(0, 1, 1000)
y = 1.2 + 3.4 * x
offset, slope, _ = gplt.fit_linear(x, y, offset=1.2)
self.assertTrue(np.isclose(offset, 1.2))
self.assertTrue(np.isclose(slope, 3.4))
class Test_cdf(unittest.TestCase):
"""
Cumulative probability density.
"""
def test_simple(self):
data = np.array([0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 5, 5])
xr = np.array([0, 1, 2, 3, 4, 5])
pr = np.array([2, 1, 1, 2, 3, 4]) / data.size
p, x = gplt.cdf(data, less_equal=True)
self.assertTrue(np.allclose(x, xr))
self.assertTrue(np.allclose(p, np.cumsum(pr)))
p, x = gplt.cdf(data)
self.assertTrue(np.allclose(x, xr))
self.assertTrue(np.allclose(p, np.cumsum([0] + pr.tolist())[:-1]))
p, x = gplt.ccdf(data)
self.assertTrue(np.allclose(x, xr))
self.assertTrue(np.allclose(p, np.cumsum(pr[::-1])[::-1]))
p, x = gplt.ccdf(data, greater_equal=False)
self.assertTrue(np.allclose(x, xr))
self.assertTrue(np.allclose(p, np.cumsum([0] + pr[::-1].tolist())[1::-1]))
p, x = gplt.cdf(data)
pc, xc = gplt.ccdf(data)
self.assertTrue(np.allclose(x, xc))
self.assertTrue(np.allclose(1 - p, pc))
def test_random(self):
data = np.random.random(10000)
p, x = gplt.cdf(data)
xp = np.linspace(0, 1, 100)
pp = np.interp(xp, x, p)
self.assertTrue(np.allclose(xp, pp, rtol=1e-1, atol=1e-1))
p, x = gplt.ccdf(data)
xp = np.linspace(0, 1, 100)
pp = np.interp(xp, x, p)
self.assertTrue(np.allclose(1 - xp, pp, rtol=1e-1, atol=1e-1))
class Test_bin(unittest.TestCase):
"""
Bin data
"""
def test_simple(self):
xdata = np.array([1, 1, 3, 3, 3, 5, 5])
ydata = np.array([2, 4, 1, 2, 3, 2, 4])
bin_edges = np.array([0, 2, 4, 6])
data = gplt.bin(xdata, ydata, bin_edges)
self.assertTrue(np.allclose(data["x"], np.array([1, 3, 5])))
self.assertTrue(np.allclose(data["y"], np.array([3, 2, 3])))
self.assertTrue(np.allclose(data["xerr"], np.array([0, 0, 0])))
self.assertTrue(
np.allclose(data["yerr"], np.array([np.std([2, 4]), np.std([1, 2, 3]), np.std([2, 4])]))
)
median_data = gplt.bin(xdata, ydata, bin_edges, use_median=False)
for key in data:
self.assertTrue(np.allclose(data[key], median_data[key]))
class Test_histogram_norm(unittest.TestCase):
"""
Histogram normalisation.
"""
def test_density(self):
data = [0, 0, 0, 1, 1, 2]
bin_edges = [-0.5, 0.5, 1.5, 2.5]
p = gplt.histogram_norm(*np.histogram(data, bins=bin_edges))
q = gplt.histogram_norm(p, bin_edges)
r, _ = np.histogram(data, bins=bin_edges, density=True)
self.assertTrue(np.allclose(p, r))
self.assertTrue(np.allclose(q, r))
class Test_histogram_bin_edges2midpoint(unittest.TestCase):
"""
Midpoints of bins
"""
def test_simple(self):
bin_edges = [-0.5, 0.5, 1.5, 2.5]
mid = [0, 1, 2]
self.assertTrue(np.allclose(gplt.histogram_bin_edges2midpoint(bin_edges), mid))
class Test_histogram_bin_edges_integer(unittest.TestCase):
"""
Bin edges.
"""
def test_front(self):
a = [0, 0.5, 1.5, 2.5]
b = [0, 1.5, 2.5]
self.assertTrue(np.allclose(gplt.histogram_bin_edges_integer(a), b))
def test_middle(self):
a = [0, 1.5, 1.6, 2.5]
b = [0, 1.6, 2.5]
self.assertTrue(np.allclose(gplt.histogram_bin_edges_integer(a), b))
def test_back(self):
a = [0, 1.5, 2.5, 2.6]
b = [0, 1.5, 2.6]
self.assertTrue(np.allclose(gplt.histogram_bin_edges_integer(a), b))
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "fd3b5f45c31c24a372f40b55c96521b29a67beec", "size": 10751, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/main.py", "max_stars_repo_name": "tdegeus/pyplot_ext", "max_stars_repo_head_hexsha": "d084fb6f5a824d9c9c3d1bf9a3c9ef9e579b4d7f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/main.py", "max_issues_repo_name": "tdegeus/pyplot_ext", "max_issues_repo_head_hexsha": "d084fb6f5a824d9c9c3d1bf9a3c9ef9e579b4d7f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/main.py", "max_forks_repo_name": "tdegeus/pyplot_ext", "max_forks_repo_head_hexsha": "d084fb6f5a824d9c9c3d1bf9a3c9ef9e579b4d7f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.080604534, "max_line_length": 100, "alphanum_fraction": 0.5085108362, "include": true, "reason": "import numpy", "num_tokens": 3301}
|
[STATEMENT]
lemma in_Gr[simp]:
shows "(x,y) \<in> BNF_Def.Gr A f \<longleftrightarrow> x \<in> A \<and> f x = y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((x, y) \<in> BNF_Def.Gr A f) = (x \<in> A \<and> f x = y)
[PROOF STEP]
unfolding BNF_Def.Gr_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((x, y) \<in> {(a, f a) |a. a \<in> A}) = (x \<in> A \<and> f x = y)
[PROOF STEP]
by auto
|
{"llama_tokens": 199, "file": "Graph_Saturation_MissingRelation", "length": 2}
|
"""
Test `sinethesizer.envelopes.user_defined` module.
Author: Nikolay Lysenko
"""
from typing import Any, Dict, List
import numpy as np
import pytest
from sinethesizer.envelopes.user_defined import create_user_defined_envelope
from sinethesizer.synth.core import Event
@pytest.mark.parametrize(
"duration, velocity, frame_rate, "
"parts, ratio_at_zero_velocity, envelope_values_on_velocity_order, "
"expected",
[
(
# `duration`
2,
# `velocity`
0.375,
# `frame_rate`
10,
# `parts`
[
{
'values': [0.0, 1.0, 0.9, 0.1, 0.0],
'max_duration': None
},
],
# `ratio_at_zero_velocity`
0.2,
# `envelope_values_on_velocity_order`
1.0,
# `expected`
np.array([
0.0, 0.1, 0.2, 0.3, 0.4,
0.5, 0.49, 0.48, 0.47, 0.46,
0.45, 0.35, 0.25, 0.15, 0.05,
0.04, 0.03, 0.02, 0.01, 0.0
])
),
(
# `duration`
2,
# `velocity`
1,
# `frame_rate`
10,
# `parts`
[
{
'values': [0.0, 0.5, 1.0, 0.0],
'max_duration': None
},
],
# `ratio_at_zero_velocity`
0.2,
# `envelope_values_on_velocity_order`
1.0,
# `expected`
np.array([
0, 1 / 12, 2 / 12, 3 / 12, 4 / 12, 5 / 12,
0.5, 8 / 14, 9 / 14, 10 / 14, 11 / 14, 12 / 14, 13 / 14,
1, 5 / 6, 4 / 6, 3 / 6, 2 / 6, 1 / 6, 0
])
),
(
# `duration`
2,
# `velocity`
1,
# `frame_rate`
10,
# `parts`
[
{
'values': [0.0, 0.5, 1.0],
'max_duration': 0.2
},
{
'values': [1.0, 0.7, 0.1],
'max_duration': None
},
],
# `ratio_at_zero_velocity`
0.2,
# `envelope_values_on_velocity_order`
1.0,
# `expected`
np.array([
0.5, 1.0,
1.0, 0.9625, 0.925, 0.8875, 0.85, 0.8125, 0.775, 0.7375, 0.7,
19 / 30, 17 / 30, 15 / 30, 13 / 30, 11 / 30, 9 / 30, 7 / 30,
5 / 30, 3 / 30
])
),
(
# `duration`
2,
# `velocity`
1,
# `frame_rate`
10,
# `parts`
[
{
'values': [0.0, 0.5, 1.0],
'max_duration': None
},
{
'values': [1.0, 0.7, 0.1],
'max_duration': None
},
],
# `ratio_at_zero_velocity`
0.2,
# `envelope_values_on_velocity_order`
1.0,
# `expected`
np.array([
0, 0.125, 0.25, 0.375, 0.5, 0.6, 0.7, 0.8, 0.9, 1,
1, 0.925, 0.85, 0.775, 0.7, 0.58, 0.46, 0.34, 0.22, 0.1
])
),
]
)
def test_create_user_defined_envelope(
duration: float, velocity: float, frame_rate: int,
parts: List[Dict[str, Any]], ratio_at_zero_velocity: float,
envelope_values_on_velocity_order: float, expected: np.ndarray
) -> None:
"""Test `create_user_defined_envelope` function."""
event = Event(
instrument='any_instrument',
start_time=0,
duration=duration,
frequency=440,
velocity=velocity,
effects='',
frame_rate=frame_rate
)
result = create_user_defined_envelope(
event, parts, ratio_at_zero_velocity, envelope_values_on_velocity_order
)
np.testing.assert_almost_equal(result, expected)
|
{"hexsha": "e3d2ef7a39e79996234d8eb0ee0538503cdc4745", "size": 4149, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/envelopes/test_user_defined.py", "max_stars_repo_name": "Nikolay-Lysenko/sinethesizer", "max_stars_repo_head_hexsha": "fe6855186a00e701113ea5bb4fac104bf8497035", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-07-25T12:17:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-04T19:38:21.000Z", "max_issues_repo_path": "tests/envelopes/test_user_defined.py", "max_issues_repo_name": "Nikolay-Lysenko/sinethesizer", "max_issues_repo_head_hexsha": "fe6855186a00e701113ea5bb4fac104bf8497035", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-07-20T18:04:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-03T17:31:26.000Z", "max_forks_repo_path": "tests/envelopes/test_user_defined.py", "max_forks_repo_name": "Nikolay-Lysenko/sinethesizer", "max_forks_repo_head_hexsha": "fe6855186a00e701113ea5bb4fac104bf8497035", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-16T18:44:43.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-16T18:44:43.000Z", "avg_line_length": 27.2960526316, "max_line_length": 79, "alphanum_fraction": 0.3986502772, "include": true, "reason": "import numpy", "num_tokens": 1217}
|
subroutine trace_bend_2d(xzt,yzt,xst,yst,ips, tout)
real xrmin(1000),yrmin(1000)
real t_segm(1000)
integer indexx(100)
real dxtmp(1000),dytmp(1000),xtmp(1000),ytmp(1000)
common/ray_param/ds_ini,ds_segm_min,bend_min0,bend_max0
common/ray/ nodes,xray(1000),yray(1000)
common/ray_part/ npart,xpart(1000),ypart(1000),spart(1000),kod_part(1000)
common/shift/ dxpart(1000),dypart(1000)
totdist=sqrt((xzt-xst)*(xzt-xst)+(yzt-yst)*(yzt-yst))
tout=0
if(totdist.lt.ds_segm_min) then
x1=xzt
x2=xst
xm=(x1+x2)/2.
y1=yzt
y2=yst
ym=(y1+y2)/2.
vvv=velocity (xm,ym,ips)
tout=ds/vvv
return
end if
nsegm_max = int_best(totdist / ds_segm_min)
if(nsegm_max.eq.0) then
return
end if
call streight_line(xzt,yzt,xst,yst,ips, tout)
!write(*,*)' streight line: tout=',tout
!write(*,*)ds_ini,ds_segm_min,bend_min0,bend_max0
!pause
tmin=tout
do nsegm=1,nsegm_max
!open(22,file='nodes.dat')
indexx=0
dpart=1./nsegm
val_cur0=bend_max0/nsegm
xtmp=xray
ytmp=yray
do iseg=1,nsegm
part1=(iseg-1)*dpart
part2=iseg*dpart
call part_ray(xzt,yzt,xst,yst,part1,part2)
!write(*,*)' iseg=',iseg,' part1=',part1,' part2=',part2
!write(22,*) xpart(1),ypart(1)
ttt=0
do inode=1,npart-1
x1=xpart(inode)
x2=xpart(inode+1)
xm=(x1+x2)/2.
y1=ypart(inode)
y2=ypart(inode+1)
ym=(y1+y2)/2.
ds=sqrt((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1))
vvv=velocity (xm,ym,ips)
ttt=ttt+ds/vvv
!write(*,*)inode,ypart(inode),dypart(inode),' v=',vvv
end do
tini=ttt
!write(*,*)' tini=',tini,' npart=',npart
dxpart=0
dypart=0
dxtmp=0
dytmp=0
valtot=0
tmin=tini
t_segm(iseg)=tmin
val_cur=val_cur0
332 continue
do icase=1,2
ind=0
val_bend = (-1)**icase * val_cur
331 continue
val = val_bend
sss2=spart(npart)/2.
sss=0
do inode=2,npart-1
x1=xpart(inode-1)
y1=ypart(inode-1)
x2=xpart(inode)
y2=ypart(inode)
x3=xpart(inode+1)
y3=ypart(inode+1)
ds=sqrt((x2-x1)**2+(y2-y1)**2)
sss=sss+ds
dx=x3-x1
dy=y3-y1
ds2=sqrt(dx*dx+dy*dy)
d_value = val * (1-abs(sss-sss2)/sss2)
dxtmp(inode)= - d_value * dy / ds2
dytmp(inode)= d_value * dx / ds2
!write(*,*)inode,d_value,dxpart(inode),dypart(inode)
end do
ttt=0
sss=0
do inode=1,npart-1
x1= xpart(inode)+ dxpart(inode)+ dxtmp(inode)
x2= xpart(inode+1)+ dxpart(inode+1)+ dxtmp(inode+1)
xm=(x1+x2)/2.
y1= ypart(inode)+ dypart(inode)+ dytmp(inode)
y2= ypart(inode+1)+ dypart(inode+1)+ dytmp(inode+1)
ym=(y1+y2)/2.
ds=sqrt((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1))
sss=sss+ds
vvv=velocity (xm,ym,ips)
ttt=ttt+ds/vvv
!write(*,*)inode,ypart(inode),dypart(inode),' v=',vvv
end do
tout=ttt
!write(*,*)' val_bend=',val_bend,' tout=',tout
if(tout.lt.tmin) then
ind=ind+1
tmin=tout
dxpart=dxpart+dxtmp
dypart=dypart+dytmp
valtot=valtot+val_bend
!write(*,*)' valtot=',valtot,' val_bend=',val_bend,' tmin=',tmin
goto 331
else
if(ind.ne.0) exit
end if
end do
val_cur = val_cur / 2.
if(abs(val_cur).gt.bend_min0) goto 332
do ipp=1,npart
kod=kod_part(ipp)
if(kod.eq.0) cycle
xtmp(kod)=xtmp(kod)+dxpart(ipp)
ytmp(kod)=ytmp(kod)+dypart(ipp)
xpart(ipp)=xpart(ipp)+dxpart(ipp)
ypart(ipp)=ypart(ipp)+dypart(ipp)
end do
t_segm(iseg)=tmin
end do
333 continue
ttot=0
do iseg=1,nsegm
!write(*,*)' indexx(ip)=',indexx(ip)
ttot=ttot+t_segm(iseg)
end do
!write(22,*)xray(nodes),yray(nodes)
!close(22)
xray=xtmp
yray=ytmp
!write(*,*)' val_cur0=',val_cur0,' ttot=',ttot
nrefl=0
call remeshing(nrefl)
!open(21,file='ray_iter.bln')
!write(21,*)nodes
!do i=1,nodes
!write(21,*)xray(i),yray(i)
!end do
!close(21)
!pause
end do
tout=ttot
ttt=0
sss=0
do inode=1,nodes-1
x1= xray(inode)
x2= xray(inode+1)
xm=(x1+x2)/2.
y1= yray(inode)
y2= yray(inode+1)
ym=(y1+y2)/2.
ds=sqrt((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1))
sss=sss+ds
vvv=velocity (xm,ym,ips)
!dv= dv_mod_2d(xm,ym)
! write(*,*)xm,ym,' vvv=',vvv
!call vel_mod_2d(xm,ym, dv000,dv060,dv120)
!write(*,*)' dv000=',dv000,' dv060=',dv060,' dv120=',dv120
ttt=ttt+ds/vvv
!write(*,*)inode,ypart(inode),dypart(inode),' v=',vvv
end do
tout=ttt
return
end
|
{"hexsha": "22e95078a3fb481d57bd35fb9ab467122ddcfdf1", "size": 4453, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "PROGRAMS/subr/trace_2d_iso/trace_bend_2d.f90", "max_stars_repo_name": "ilyasnsk/colima_lotos_2019", "max_stars_repo_head_hexsha": "d3ff4f32034e49a32560f170e980b6847b6ea9c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-28T06:16:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-16T02:52:23.000Z", "max_issues_repo_path": "PROGRAMS/subr/trace_2d_iso/trace_bend_2d.f90", "max_issues_repo_name": "ilyasnsk/colima_lotos_2019", "max_issues_repo_head_hexsha": "d3ff4f32034e49a32560f170e980b6847b6ea9c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PROGRAMS/subr/trace_2d_iso/trace_bend_2d.f90", "max_forks_repo_name": "ilyasnsk/colima_lotos_2019", "max_forks_repo_head_hexsha": "d3ff4f32034e49a32560f170e980b6847b6ea9c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.7100840336, "max_line_length": 74, "alphanum_fraction": 0.6009431844, "num_tokens": 1903}
|
module HFMod
if length(findin("C:\\Users\\Clinton\\Dropbox\\Projects\\SummerProject17",LOAD_PATH)) == 0
push!(LOAD_PATH,"C:\\Users\\Clinton\\Dropbox\\Projects\\SummerProject17")
end
#=TODO:
1) IV=#
#2) Run specifications
using DataFrames, Distributions, StatsBase, GZip, JLD, Gadfly, CTMod
#importall CT
#Set this to an optimal value- I find logical cores *(3/4) works well
BLAS.set_num_threads(Int(round((Sys.CPU_CORES*3÷4))))
#if !isdefined(:constDef)
const constDef = true
#other constants here
const DATA_PATH = pwd() * "\\data"
const DATA_FILE_NAME = "HFPerformance_2015-2017"
const DATA_FILE_NAME_DEAD = DATA_FILE_NAME * "_dead"
const DATE_FORMAT_STR = "yyyymmdd"
const NOTICE_PERIOD_ADJ = 7 #days to add to the notice (since data are monthly)
const MIN_PERIODS_FOR_INCLUSION = 6 #number of periods required for inclusion
const T_TEST_THRESHOLD = 2.0 #this is a parameter for identificaiton if the flows are lagged
const PERIODS_SAME_AUM_TO_DELETE = 4 #assume 4 consecutive periods of identical aum implies stale data
const LAGS_NOTICE_TO_CAPTURE = 3
const LAGS_PERFORMANCE_TO_CAPTURE = 12
const MIN_ASSETS = 10.0 #10,000,000
const START_LAGS_FROM_NOTICE = true #start the performance lags from the notice period (not the redemption date)
const RUN_LM_INTERACTIONS = false #run the interaction regressions (takes a while)
const RUN_2SLS_INTERACTIONS = false
const RESULTS_PATH = pwd() * "\\results"
const FOOTER_NAME = "footer.tex"
const HEADER_NAME = "header.tex"
const USE_AGGRESSIVE_GC = false #this enables very agressive GC which helps memory management at cost of performance
const R_TEST_PATH = "C:\\Users\\Clinton\\Dropbox\\Projects\\Summer17RTester\\Summer17RTester"
#end
###############HELPER FUNCTIONS
#helper function to extract all positive numbers from a string
#IN: A string
#OUT: A vector of floats, possibly empty
function getPosNumsFromString(s::String)
len::Int = length(s)
preParseS::Vector{Char} = collect(s)
#replace non-numerical values with delimitor Ξ
for i ∈ 1:len
preParseS[i] = (isnumber(s[i]) || (
s[i]=='.' && (i>1 && isnumber(s[i-1]) || (
i<len && isnumber(s[i+1])))))?s[i]:'Ξ'
end
#split the tokens, keeping only numberical strings
sSplit::Vector{String} = split(preParseS,'Ξ',keep=false)
#parse and return the values as an array of floats
return broadcast((x::String)->Float64(parse(x)),sSplit)::Vector{Float64}
end
#=helper function which sorts and splits DF on symbol, extracts all positive
numeric values, averages them, and replaces the column
IN: DataFrame, column symbol, a default value, and whetehr to leave NAs
OUT: None explicit. DataFrame modified with numberic column named col,
old column is named col_str=#
function DFStringtoMeanNum!(DF::DataFrame, col::Symbol;
default::Float64=0.0, leaveNAs::Bool=false, silent::Bool=true)
oldCol = Symbol(col, "_str")
rename!(DF, col, oldCol)
DF[:,col] = default
sort!(DF, cols=(oldCol))
by(DF, oldCol) do DFSub::SubDataFrame
if ~isna(DFSub[1,oldCol])
parsed::Vector{Float64} = #parse the string
getPosNumsFromString(DFSub[1,oldCol])
if length(parsed)>0 #if string wasn't all garbage
DFSub[:,col] = mean(parsed)
elseif !silent#Give a heads up given a garbage incentive fee
println("WARNING: Could not parse ", col, ". Value: ", DFSub[1, oldCol],
", Assuming numeric value of ", default)
end
elseif leaveNAs
DFSub[:,col] = NA
end
end
end
#takes a vector and writes a string
function vec2String(v::Vector{T}, delim::W = "")::String where
{T<:Any, W<:Union{AbstractString,Char}}
return join(String.(v), delim)
end
#calculates the t statistic of the Pearson Correlation (Lowry 2017)
function tStatOfCor(x::Vector{Float64},y::Vector{Float64})
r::Float64 = cor(x,y)
return r/sqrt((1-r^2.0)/(length(x)-2))::Float64
end
#this is a type which stores all of the notice lags
struct NoticeLagSymbols
noticeLags::Vector{Symbol}
noticeLagsNo1st::Vector{Symbol}
noticeLLags::Vector{Symbol}
noticeLLagsNo1st::Vector{Symbol}
end
#Constructor for the above type
#IN: Number of lags
#OUT: A container of symbols with the appropraite lags
function NoticeLagSymbols(lags::Int)::NoticeLagSymbols
noticeLags::Vector{Symbol} = Vector{Symbol}(lags)
noticeLagsNo1st::Vector{Symbol} = similar(noticeLags)
noticeLLags::Vector{Symbol} = similar(noticeLags)
noticeLLagsNo1st::Vector{Symbol} = similar(noticeLags)
for i ∈ 1:lags
noticeLags[i] = Symbol("noticeLag$i")
noticeLagsNo1st[i] = Symbol("noticeLag$(i)No1st")
noticeLLags[i] = Symbol("noticeLLag$i")
noticeLLagsNo1st[i] = Symbol("noticeLLag$(i)No1st")
end
return NoticeLagSymbols(noticeLags,noticeLagsNo1st, noticeLLags,
noticeLLagsNo1st)
end
#this is a type which stores all of the performance lags
struct PerformanceLagSymbols
performanceLags::Vector{Symbol}
end
#Constructor for the above type
#IN: Number of lags
#OUT: A container of symbols with the appropraite lags
function PerformanceLagSymbols(lags::Int)::PerformanceLagSymbols
performanceLags::Vector{Symbol} = Vector{Symbol}(lags) #preallocate
for i ∈ 1:lags
performanceLags[i] = Symbol("performanceLag$i")
end
return PerformanceLagSymbols(performanceLags)
end
contains(e::T where T<:CTExpr, s::String)::Bool = Base.contains("$e",s)
###########Main Methods
#U for unlagged, L for lagged, C for corrected, R for corrected but with
#a lagged default
function PreProcessHFData(;FlowsLaggingProcedure::Char = 'U', newBinary::Bool = false)
nPer::Int = 0 #numer of periods
dtFormat::DateFormat = DateFormat(DATE_FORMAT_STR) ## a date format object
#this allows us to capture a variable amount of lags
noticeLS::NoticeLagSymbols = NoticeLagSymbols(LAGS_NOTICE_TO_CAPTURE)
performanceLS::PerformanceLagSymbols = PerformanceLagSymbols(LAGS_PERFORMANCE_TO_CAPTURE)
#this makes sure we have a binary of the data (Improves load times 3-4x)
if !isfile("$DATA_PATH\\$DATA_FILE_NAME.jls") || newBinary
#extract from the zip file
gHandle::GZipStream = GZip.open("$DATA_PATH\\$DATA_FILE_NAME.gz")
write("$DATA_PATH\\$DATA_FILE_NAME.csv", readlines(gHandle,chomp=false))
close(gHandle)
gHandle = GZip.open("$DATA_PATH\\$DATA_FILE_NAME_DEAD.gz")
write("$DATA_PATH\\$DATA_FILE_NAME_DEAD.csv", readlines(gHandle,chomp=false))
close(gHandle)
#write the binary
oStream::IOStream = open("$DATA_PATH\\$DATA_FILE_NAME.jls","w+")
serialize(oStream,
[readtable("$DATA_PATH\\$DATA_FILE_NAME.csv"); readtable("$DATA_PATH\\$DATA_FILE_NAME_DEAD.csv")])
close(oStream)
end
#load the binary
iStream::IOStream = open("$DATA_PATH\\$DATA_FILE_NAME.jls")
HFData::DataFrame = deserialize(iStream)
close(iStream)
println("Initial rows: ", size(HFData,1), ", Initial columns: ", size(HFData,2))
println("Begining pre-processing...")
#drop some columns
HFData = HFData[:, [:fund_id, :inception, :main_strategy,
:sub_strategy, :leverage, :fund_assets, :firm_assets,
:returns_denomination, :management_fee, :incentive_fee, :high_watermark,
:hurdle_rate, :subscriptions, :redemptions,
:advance_notice, :lockup, :fund_assets_as_of,
:fund_assets_denomin, :sales_fee, :other_fees,
:date_added_to_db, :fund_status, :ucitsiii, :date, :performance,
:nav, :assets]]
#Drop some null valued fields:
HFData = dropNullsFromDF(HFData, [:date, :performance, :assets,
:returns_denomination, :management_fee,:advance_notice])
#only want active funds
#HFData = HFData[HFData[:,:fund_status].=="Active",:]
delete!(HFData,:fund_status) #no need for this anymore
HFData = HFData[HFData[:,:assets] .≥ MIN_ASSETS, :]
#parse the incentive fees and mangement fees
DFStringtoMeanNum!(HFData, :incentive_fee)
DFStringtoMeanNum!(HFData, :management_fee)
#convert from percentage to decimal
HFData[:,:incentive_fee] ./= 100.0
HFData[:,:management_fee] ./= 100.0
HFData[:,:performance] ./= 100.0
#broadcast an anonymous functino to do the type conversions
HFData[:,:inception] = ((x::Int)->Date("$x",dtFormat)).(HFData[:,:inception])
HFData[:,:date_added_to_db] = ((x::Int)->Date("$x",dtFormat)).(HFData[:,:date_added_to_db])
HFData[:,:date] = ((x::Int)->Date("$x",dtFormat)).(HFData[:,:date])
HFData[:,:fund_assets_as_of] = ((x::Int)->Date("$x",dtFormat)).(HFData[:,:fund_assets_as_of])
#sort the data by fund id and date
sort!(HFData,cols = (:fund_id,:date))
#delStaleAUMDat!(HFData)
#clean out some funds with stale aum data
by(HFData, :fund_id) do HFSub::SubDataFrame
nPer = size(HFSub,1)
if nPer > PERIODS_SAME_AUM_TO_DELETE #check if this procedure makes sense
identicalAUMs::Int = 1
i::Int = 2
while i ≤ nPer && identicalAUMs < PERIODS_SAME_AUM_TO_DELETE
identicalAUMs = (HFSub[i,:assets]==HFSub[i-1,:assets])?identicalAUMs+1:1
i+=1
end
if identicalAUMs == PERIODS_SAME_AUM_TO_DELETE #set assets to NA to flag for deletion
HFSub[:,:assets] = NA
end
end
end
HFData = HFData[~isna(HFData[:,:assets]),:] #delete the identified funds
#delete funds with gaps in their performance history, or funds with a
#track record less than six months
HFData[:,:mcnt]=0
by(HFData, :fund_id) do HFSub::SubDataFrame
mcnt::Int = size(HFSub,1)
#now check for track record breaks
if maximum(HFSub[:,:date]) ≠
Dates.lastdayofmonth(minimum(HFSub[:,:date])+Dates.Month(mcnt)-Dates.Month(1))
mcnt = 0
end
HFSub[:,:mcnt] = mcnt
end
HFData=HFData[~(HFData[:,:mcnt].<MIN_PERIODS_FOR_INCLUSION),:]
#get the months of advance notice
HFData[:,:months_notice] = broadcast((x::Int)->
((x+NOTICE_PERIOD_ADJ)÷30)::Int,HFData[:,:advance_notice])
###The following sections create nessecary columns with placeholders
#calculate monthly flows and construct the data set
HFData[:,:flows] = 0.0 #flows
HFData[:,:redemp_notice] = 0.0 #notifications about flows
HFData[:,:rolling_notice] = 0.0 #notifications about flows
HFData[:,:lFlows] = 0.0 #will hold log flows
HFData[:,:redemp_notice_dt] = Date(1,1,1)
#create the appropriate columns for lagged flow notice
for i::Int ∈ 1:LAGS_NOTICE_TO_CAPTURE
HFData[:,noticeLS.noticeLags[i]] = 0.0
HFData[:,noticeLS.noticeLagsNo1st[i]] = 0.0
HFData[:,noticeLS.noticeLLags[i]] = 0.0
HFData[:,noticeLS.noticeLLagsNo1st[i]] = 0.0
end
#create columns for performance lag
for i::Int ∈ 1:LAGS_PERFORMANCE_TO_CAPTURE
HFData[:,performanceLS.performanceLags[i]] = 0.0
HFData[:,performanceLS.performanceLags[i]] .= NA
end
ctr = 0
###This is the main section for manipulating data by fund. We will
#adjust the lagging of assets and assign to appropriate lagged variables
by(HFData, :fund_id) do HFSub::SubDataFrame
nPer = HFSub[1,:mcnt]
useLaggingProc::Bool = false
#now calculate the flows in the lagged and unlagged flows
flowsUnlagged::Vector{Float64} =Vector{Float64}(nPer-1)
flowsLagged::Vector{Float64} =Vector{Float64}(nPer-1)
for i ∈ 2:nPer
#back out the flows:
#F_unlagged=A(t)-A(t-1)*(1+r(t))
flowsUnlagged[i-1] = HFSub[i,:assets] -
(HFSub[i-1,:assets])*(1.0+HFSub[i,:performance])
#F_lagged=A(t)-A(t-1)*(1+r(t-1))
flowsLagged[i-1] = HFSub[i,:assets] -
(HFSub[i-1,:assets])*(1.0+HFSub[i-1,:performance])
end
if FlowsLaggingProcedure == 'U' #assume all unlagged
useLaggingProc = false
elseif FlowsLaggingProcedure == 'L' #assume all lagged
useLaggingProc = true
#below assumes a comparison process
elseif FlowsLaggingProcedure == 'C' || FlowsLaggingProcedure == 'R'
#get the differencein performance between lagged and unlagged
perfDelta::Vector{Float64} = HFSub[2:end,:performance]
.-HFSub[1:(end-1),:performance]
#check the correlations
flowsUnlaggedT::Float64 = tStatOfCor(flowsUnlagged,perfDelta)
flowsLaggedT::Float64 = tStatOfCor(flowsLagged,perfDelta)
#='C' Logic: Assume unlagged. If unlagged T test for correlation against
return deltas is <-2 (or some other threshold), switch to lagged unless
the lagged T stat is greater than 2
'R' Logic: Same as C but reverse.=#
if flowsUnlaggedT > -1.0 * T_TEST_THRESHOLD && flowsLaggedT > 1.0 * T_TEST_THRESHOLD
useLaggingProc = false
elseif flowsUnlaggedT < -1.0 * T_TEST_THRESHOLD && flowsLaggedT < 1.0 * T_TEST_THRESHOLD
useLaggingProc = true
elseif flowsUnlaggedT * flowsLaggedT ≤ 0.0
if FlowsLaggingProcedure == 'C'
useLaggingProc = false
elseif FlowsLaggingProcedure == 'R'
useLaggingProc = true
end
end
end
#now execute the appropriate lagging procedure
if useLaggingProc
HFSub[end,:flows] = NA
HFSub[1:(end-1),:flows] = flowsLagged
HFSub[1:(end-1),:lFlows] = flowsLagged ./
(HFSub[2:end,:assets] .- flowsLagged)
else
HFSub[1,:flows] = NA
HFSub[1,:lFlows] = NA
HFSub[2:end,:flows] = flowsUnlagged
HFSub[2:end,:lFlows] = flowsUnlagged ./
(HFSub[2:end,:assets] .- flowsUnlagged)
end
#now we look up the notice period to determine the appropriate month
#Only will be for negative flows
#we will make the table two way, at the expense of redundancy
for i::Int ∈ 1:nPer
if !isna(HFSub[i,:flows]) && HFSub[i,:flows] < 0.0 &&
i::Int > HFSub[i,:months_notice]
HFSub[i-HFSub[i,:months_notice],:redemp_notice] = HFSub[i,:flows]
HFSub[(i-HFSub[i,:months_notice]):(i),:rolling_notice] += HFSub[i,:flows]
HFSub[i-HFSub[i,:months_notice],:redemp_notice_dt] = HFSub[i,:date]
#record the redemption notificaion lags and performance lags
if HFSub[i,:months_notice] > 0
for l::Int ∈ 1:min(LAGS_NOTICE_TO_CAPTURE, HFSub[i,:months_notice])
HFSub[i-l, noticeLS.noticeLags[l]] = HFSub[i,:flows]
HFSub[i-l, noticeLS.noticeLLags[l]] = HFSub[i,:lFlows]
end
end
#record the redemption notificaion lags, excluding the first months' notice
if HFSub[i,:months_notice] > 1
for l::Int ∈ 1:min(LAGS_NOTICE_TO_CAPTURE, HFSub[i,:months_notice]-1)
HFSub[i-l, noticeLS.noticeLagsNo1st[l]] = HFSub[i,:flows]
HFSub[i-l, noticeLS.noticeLLagsNo1st[l]] = HFSub[i,:lFlows]
end
end
end
end
#now assign to the performance lags
if START_LAGS_FROM_NOTICE #if true, we start from the end of the notice period
for i::Int ∈ 2:nPer
if i - 1 - HFSub[i,:months_notice] ≥ 1 #(make sure its positive number)
for l::Int ∈ 1:min(i-1- HFSub[i,:months_notice],LAGS_PERFORMANCE_TO_CAPTURE)
HFSub[i, performanceLS.performanceLags[l]] = HFSub[i - l, :performance]
end
end
end
else
for i::Int ∈ 2:nPer
for l::Int ∈ 1:min(i-1,LAGS_PERFORMANCE_TO_CAPTURE)
HFSub[i, performanceLS.performanceLags[l]] = HFSub[i - l, :performance]
end
end
end
end
#get a seperate column with net negative flows
HFData[:,:negLFlows] = HFData[:,:lFlows]
HFData[(~isna(HFData[:,:lFlows])) & (HFData[:,:lFlows].≥0.0),:negLFlows] .= NA
#convert string columns to factors
HFTypes::Vector{Type} = eltypes(HFData) #all types
HFNames::Vector{String} = names(HFData) #all names
for i ∈ 1:size(HFData,2) #for all columns
if HFTypes[i] == String && !(typeof(HFData[:,i]) <: PooledDataVector)
pool!(HFData,Symbol(HFNames[i])) #convert to factor
end
end
#create a factor column for dates
HFData[:,:fDate] = HFData[:,:date]
pool!(HFData,:fDate)
println("Processing complete.")
println("Ending rows: ", size(HFData,1), ", Ending columns: ", size(HFData,2))
#save the binary
oStream = open("$DATA_PATH\\$(DATA_FILE_NAME)_$(FlowsLaggingProcedure)_clean.jls","w+")
serialize(oStream, HFData)
close(oStream)
end
#returns a table with the total flows
#High performance requires an initial sort
function aggregateOnDate(HFData::DataFrame, lags::Int=5,
noFirstNoticeMonth::Bool=false)
nDates::Int = size(unique(HFData[:,:date]),1)
noticeLS::NoticeLagSymbols = NoticeLagSymbols(lags)
#preallocate data frame
HFByDate::DataFrame =
DataFrame([Date, Int, Float64, Float64, Float64, Float64,Float64],
[:date, :numFunds, :totAUM, :totFlows, :totRedemp,
:totRollingRedemp, :totRedempNotice], nDates)
HFByDate[:,:date] .= unique(HFData[:,:date])
#allocate the lag columns
for s ∈ [noticeLS.noticeLags; noticeLS.noticeLagsNo1st]
HFByDate[:,s] = 0.0
end
#make a lookup table
dateTbl = Dict(HFByDate[i,:date] => i for i::Int ∈ 1:nDates)
#might be a better way to do this with aggregates and joins,
#although this is reasonably fast with a low number of comparisons
#Basically we are getting totals for each symbol by a bunch of symbols by date
by(HFData, :date) do HFSub::SubDataFrame
dateIdx::Int = dateTbl[HFSub[1,:date]]
applyToDate::Vector{Bool} = HFByDate[:,:date].==HFSub[1,:date]
HFByDate[dateIdx,:numFunds]=size(unique(HFSub[:,:fund_id]),1)
HFByDate[dateIdx,:totAUM]=sum(HFSub[~isna(HFSub[:,:assets]),:assets])
HFByDate[dateIdx,:totFlows]=sum(HFSub[~isna(HFSub[:,:flows]),:flows])
HFByDate[dateIdx,:totRedemp]=
sum(HFSub[broadcast((x)->ifelse(~isna(x)&&x<0,true, false),HFSub[:,:flows]),:flows])
HFByDate[dateIdx,:totRedempNotice]=
sum(HFSub[~isna(HFSub[:,:redemp_notice]),:redemp_notice])
HFByDate[dateIdx,:totRollingRedemp]=
sum(HFSub[~isna(HFSub[:,:rolling_notice]),:rolling_notice])
for s ∈ [noticeLS.noticeLags; noticeLS.noticeLagsNo1st]
HFByDate[dateIdx,s]= sum(HFSub[~isna(HFSub[:,s]),s])
end
end
return HFByDate
end
#this is the main method for analysis
function AnalyzeAggregates(;FlowsLaggingProcedure::Char = 'U',
verbose::Bool = true)
#open the pre-processed data file
iStream::IOStream =
open("$DATA_PATH\\$(DATA_FILE_NAME)_$(FlowsLaggingProcedure)_clean.jls")
HFData::DataFrame = deserialize(iStream)
sort!(HFData,cols = (:date, :fund_id))
HFByDate::DataFrame = aggregateOnDate(HFData, LAGS_NOTICE_TO_CAPTURE)
performanceLS::PerformanceLagSymbols =
PerformanceLagSymbols(LAGS_PERFORMANCE_TO_CAPTURE)
XLMSpecs::Vector{CTExpr} = [parse("negLFlows")]
XLMNames::Vector{Vector{Symbol}} = [[:intercept; :negLFlows]] #note only non-factor names
#####put additional specifications here
push!(XLMSpecs, parse("negLFlows + fDate"))
push!(XLMNames, [:intercept; :negLFlows])
push!(XLMSpecs, parse("negLFlows + main_strategy"))
push!(XLMNames, [:intercept; :negLFlows])
push!(XLMSpecs, parse("negLFlows + " *
"$(vec2String(performanceLS.performanceLags, "+"))"))
push!(XLMNames, [:intercept; :negLFlows; performanceLS.performanceLags])
push!(XLMSpecs, parse("negLFlows + " *
"$(vec2String(performanceLS.performanceLags, "+")) + fDate"))
push!(XLMNames, [:intercept; :negLFlows; performanceLS.performanceLags])
push!(XLMSpecs, parse("negLFlows + " *
"$(vec2String(performanceLS.performanceLags, "+")) + main_strategy"))
push!(XLMNames, [:intercept; :negLFlows; performanceLS.performanceLags])
push!(XLMSpecs, parse("negLFlows + " *
"$(vec2String(performanceLS.performanceLags, "+")) + fDate + main_strategy"))
push!(XLMNames, [:intercept; :negLFlows; performanceLS.performanceLags])
if RUN_LM_INTERACTIONS
push!(XLMSpecs, parse("negLFlows + fDate * main_strategy"))
push!(XLMNames, [:intercept; :negLFlows])
push!(XLMSpecs, parse("negLFlows + " *
"$(vec2String(performanceLS.performanceLags, "+")) + " *
"fDate * main_strategy"))
push!(XLMNames, [:intercept; :negLFlows; performanceLS.performanceLags])
end
#####
modelsLM::Vector{CTLM} = Vector{CTLM}()
#run the LM Specs
for i ∈ 1:length(XLMSpecs)
if verbose
println("Begin Spec RHS: $(XLMSpecs[i])")
end
push!(modelsLM, CTLM(HFData, XLMSpecs[i],
:performance, XNames=XLMNames[i], YName = :performance))
if verbose
println("Linear Model X Specification: $(XLMSpecs[i])")
println("Names: $(XLMNames[i])")
println("β: $(modelsLM[i].β[1:min(10,end)])")
println("σ: $(sqrt.(diag(getHomoskedΣ!(modelsLM[i])))[1:min(10,end)])")
println("Data points: $(size(modelsLM[i].X,1))\n")
end
end
##########LM IO
descRowNamesLM::Vector{String} = ["Performance Lags", "Date Fixed Effects",
"Strategy Fixed Effects", "F.E. Interactions", "N ('000s)"]
#need to print the descriptive rows
descContentLM::Vector{Vector{String}} =
#[fill("",length(modelsLM)) for i∈ 1:length(descRowNamesLM)]
[Vector{String}(length(modelsLM))for i∈ 1:length(descRowNamesLM)]
#this builds the descriptive rows. There is Probably a better way to do this,
#but its fairly specific to the project.
for i ∈ 1:length(XLMSpecs)
descContentLM[1][i] = "$(contains(XLMSpecs[i], "performanceLag")?"X":"")"
descContentLM[2][i] = "$(contains(XLMSpecs[i], "fDate")?"X":"")"
descContentLM[3][i] = "$(contains(XLMSpecs[i], "main_strategy")?"X":"")"
descContentLM[4][i] = "$(contains(XLMSpecs[i], "*")?"X":"")"
descContentLM[5][i] = "$(round(modelsLM[i].N/1000.0,1))"
end
TableTextLM::String = texTable(modelsLM, getHomoskedΣ!, [:intercept; :negLFlows],
caption = "Performance Flows OLS Specifications",
colNames = [["($i)" for i∈1:length(modelsLM)]],
contentRowNames = ["intercept", "outflows/AUM x 100"],
descRowNames = descRowNamesLM,
descContent = descContentLM,
decimalDigits = 3,
columnSepPt = -20,
scaling = [1000.0,1000.0],
clearMem = USE_AGGRESSIVE_GC)
if USE_AGGRESSIVE_GC
gc()
end
#####################2SLS code###################
X2SLSSpecs::Vector{CTExpr} = [parse("negLFlows+0")]
W2SLSSpecs::Vector{CTExpr} = [Symbol("")]
Z2SLSSpecs::Vector{CTExpr} = [parse("noticeLag1+0")]
W2SLSNames::Vector{Vector{Symbol}} = [[:intercept]]
X2SLSNames::Vector{Vector{Symbol}} = [[:negLFlows]]
Z2SLSNames::Vector{Vector{Symbol}} = [[:noticeLag1]]
#####put additional 2SLS specifications here
push!(W2SLSSpecs, parse("fDate"))
push!(W2SLSNames, [:intercept])
push!(W2SLSSpecs, parse("main_strategy"))
push!(W2SLSNames, [:intercept])
push!(W2SLSSpecs, parse("$(vec2String(performanceLS.performanceLags, "+"))"))
push!(W2SLSNames, [:intercept; performanceLS.performanceLags])
push!(W2SLSSpecs, parse("$(vec2String(performanceLS.performanceLags, "+"))" *
" + fDate"))
push!(W2SLSNames, [:intercept; performanceLS.performanceLags])
push!(W2SLSSpecs, parse("$(vec2String(performanceLS.performanceLags, "+"))" *
" + main_strategy"))
push!(W2SLSNames, [:intercept; performanceLS.performanceLags])
push!(W2SLSSpecs, parse("$(vec2String(performanceLS.performanceLags, "+"))" *
" + fDate + main_strategy"))
push!(W2SLSNames, [:intercept; performanceLS.performanceLags])
if RUN_2SLS_INTERACTIONS #needs too much memory. Would need a mem-mapped array or simialr solution to do this.
push!(W2SLSSpecs, parse("fDate * main_strategy"))
push!(W2SLSNames, [:intercept])
push!(W2SLSSpecs, parse("$(vec2String(performanceLS.performanceLags, "+"))" *
" + fDate * main_strategy"))
push!(W2SLSNames, [:intercept; performanceLS.performanceLags])
end
################# run the 2SLS models
models2SLS::Vector{CT2SLS} = Vector{CT2SLS}()
for iX ∈ 1:length(X2SLSSpecs), iW ∈ 1:length(W2SLSSpecs), iZ ∈ 1:length(Z2SLSSpecs)
if verbose #print the specification to run
println("Spec X: $(X2SLSSpecs[iX])
Spec W: $(W2SLSSpecs[iW])
Spec Z: $(Z2SLSSpecs[iZ])")
end
#build the regression model
push!(models2SLS, CT2SLS(HFData, X2SLSSpecs[iX], W2SLSSpecs[iW],
:performance, Z2SLSSpecs[iZ], XNames=X2SLSNames[iX],
WNames = W2SLSNames[iW], YName = :performance, ZNames = Z2SLSNames[iZ]))
println(models2SLS[end].ZNames)
println(models2SLS[end].Π1)
if verbose
println("IV Model Specification:
X: $(X2SLSSpecs[iX])
W: $(W2SLSSpecs[iW])
Z: $(Z2SLSSpecs[iZ])")
println("WNames: $(W2SLSNames[iW])")
println("β: $(models2SLS[i].δ2[1:min(10,end)])")
println("σ: $(sqrt.(diag(getHomoskedΣ!(models2SLS[i])))[1:min(10,end)])")
println("Data points: $(size(models2SLS[i].X,1))\n")
end
end
##################IO Code for 2SLS
descRowNames2SLS::Vector{String} = ["Performance Lags", "Date Fixed Effects",
"Strategy Fixed Effects", "N ('000s)"]
#need to print the descriptive rows
descContent2SLS::Vector{Vector{String}} =
#[fill("",length(modelsLM)) for i∈ 1:length(descRowNamesLM)]
[Vector{String}(length(models2SLS))for i∈ 1:length(descRowNames2SLS)]
#this builds the descriptive rows. There is Probably a better way to do this,
#but its fairly specific to the project.
for i ∈ 1:length(W2SLSSpecs)
descContent2SLS[1][i] = "$(contains(W2SLSSpecs[i], "performanceLag")?"X":"")"
descContent2SLS[2][i] = "$(contains(W2SLSSpecs[i], "fDate")?"X":"")"
descContent2SLS[3][i] = "$(contains(W2SLSSpecs[i], "main_strategy")?"X":"")"
descContent2SLS[4][i] = "$(round(models2SLS[i].N/1000.0,1))"
end
#note this only works for a single focal var
stage12SLS::Vector{CTLM} = [get1stStage(m)[1] for m ∈ models2SLS]
TableText1stStage::String = texTable(stage12SLS, getHomoskedΣ!, [:intercept; :noticeLag1],
caption = "Performance Flows IV 1st Stage (Focal: Outflows)",
colNames = [["($i)" for i∈1:length(models2SLS)]],
contentRowNames = ["intercept", "Notifications (frac of fund x1000)"],
descRowNames = descRowNames2SLS,
descContent = descContent2SLS,
decimalDigits = 3,
columnSepPt = -20,
scaling = [1000.0,1000.0],
clearMem = USE_AGGRESSIVE_GC)
TableText2SLS::String = texTable(models2SLS, getHomoskedΣ!, [:intercept; :negLFlows],
caption = "Performance Flows 2SLS Specifications",
colNames = [["Z=1 Month Notification of Flows"],["($i)" for i∈1:length(models2SLS)]],
contentRowNames = ["intercept", "outflows/AUM x 1000"],
descRowNames = descRowNames2SLS,
descContent = descContent2SLS,
decimalDigits = 3,
columnSepPt = -20,
scaling = [1.0,1000.0],
widthColNames = [[length(W2SLSSpecs)],ones(Int,length(W2SLSSpecs))],
clearMem = USE_AGGRESSIVE_GC)
writeTables2File([TableTextLM, TableText1stStage, TableText2SLS], HEADER_NAME, FOOTER_NAME, path=RESULTS_PATH,
outName = "RegTables_$FlowsLaggingProcedure.tex")
#=plOut::Plot = plot(melt(HFByDate[:,[:date,
#:totFlows, :totRedemp, :noticeLag1,
:totRedempNotice,:noticeLag1,:noticeLag2]],:date),
x=:date,y=:value,color=:variable,Geom.line,
Guide.title("Total Fund Flows over Time"),
Guide.xlabel("Date"),Guide.ylabel("Flows"))
draw(PNG("$(DATA_FILE_NAME)_$(FlowsLaggingProcedure)_flows.png", 7.5inch, 6inch), plOut)
=#
close(iStream)
end
function verify2R(;FlowsLaggingProcedure::Char = 'U')
#read the seria
iStream::IOStream =
open("$DATA_PATH\\$(DATA_FILE_NAME)_$(FlowsLaggingProcedure)_clean.jls")
HFData::DataFrame = deserialize(iStream)
close(iStream)
#write the csv
writetable("$R_TEST_PATH\\RHFData.csv",HFData)
end
@time begin
#PreProcessHFData(FlowsLaggingProcedure='C', newBinary=false)
AnalyzeAggregates(FlowsLaggingProcedure='C', verbose=false)
verify2R(FlowsLaggingProcedure='C')
end
##Full Pass:
#=@time for c ∈ ['C', 'R', 'L', 'U']
PreProcessHFData(FlowsLaggingProcedure=c, newBinary=true)
AnalyzeAggregates(FlowsLaggingProcedure=c)
end=#
end
|
{"hexsha": "0ca49fc85973ab3032d65eb15c726efc61cc84d6", "size": 28318, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Old/SummerProject17/SAVE- HF Data Functions-preNLRedux.jl", "max_stars_repo_name": "clintonTE/CCA", "max_stars_repo_head_hexsha": "a555cc1fa4b6d5f1464de44e2e322d32336d1e3a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Old/SummerProject17/SAVE- HF Data Functions-preNLRedux.jl", "max_issues_repo_name": "clintonTE/CCA", "max_issues_repo_head_hexsha": "a555cc1fa4b6d5f1464de44e2e322d32336d1e3a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Old/SummerProject17/SAVE- HF Data Functions-preNLRedux.jl", "max_forks_repo_name": "clintonTE/CCA", "max_forks_repo_head_hexsha": "a555cc1fa4b6d5f1464de44e2e322d32336d1e3a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8244473342, "max_line_length": 118, "alphanum_fraction": 0.6697506886, "num_tokens": 8720}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#==============================================================================
# DOCS
#==============================================================================
"""Move data
"""
#==============================================================================
# IMPORTS
#==============================================================================
import logging
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas as pd
from django_extensions.management import shells
from django_extensions.management.shells import import_objects as _oio
from ... import extra_stats as estats
#==============================================================================
# PATCH IMPORT OBJECTS
#==============================================================================
def sk_import_objects(*args, **kwargs):
data = _oio(*args, **kwargs)
data.update(np=np, plt=plt, stats=stats, estats=estats, pd=pd)
return data
shells.import_objects = sk_import_objects
#==============================================================================
# LOGGER
#==============================================================================
logger = logging.getLogger("carpyncho")
#==============================================================================
# COMMAND
#==============================================================================
from django_extensions.management.commands import shell_plus
class Command(shell_plus.Command):
pass
#==============================================================================
# MAIN
#==============================================================================
if __name__ == "__main__":
print(__doc__)
|
{"hexsha": "bdd0b5a2c7eccc4bf7ae664e464e4eb1b7daf026", "size": 1772, "ext": "py", "lang": "Python", "max_stars_repo_path": "carpyncho1/skdjango/management/commands/skshell.py", "max_stars_repo_name": "carpyncho/yeolde_carpyncho", "max_stars_repo_head_hexsha": "fba72ebf9d4a3e4e4ea18160310058c6812a0457", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "carpyncho1/skdjango/management/commands/skshell.py", "max_issues_repo_name": "carpyncho/yeolde_carpyncho", "max_issues_repo_head_hexsha": "fba72ebf9d4a3e4e4ea18160310058c6812a0457", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-06-05T19:37:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-05T19:40:38.000Z", "max_forks_repo_path": "carpyncho1/skdjango/management/commands/skshell.py", "max_forks_repo_name": "carpyncho/yeolde_carpyncho", "max_forks_repo_head_hexsha": "fba72ebf9d4a3e4e4ea18160310058c6812a0457", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2615384615, "max_line_length": 79, "alphanum_fraction": 0.3374717833, "include": true, "reason": "import numpy,import scipy", "num_tokens": 225}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Computes the spectrogram of a test signal using cupy and cuFFT.
Author: Jan Schlüter
"""
import sys
import os
import timeit
import numpy as np
import cupy as cp
INPUT_ON_GPU = True
OUTPUT_ON_GPU = True
from testfile import make_test_signal
def spectrogram(signal, sample_rate=22050, frame_len=1024, fps=70):
"""
Computes a magnitude spectrogram at a given sample rate (in Hz), frame
length (in samples) and frame rate (in Hz), on CUDA using cupy.
"""
if not INPUT_ON_GPU:
signal = cp.array(signal.astype(np.float32)) # already blown up to a list of frames
win = cp.hanning(frame_len).astype(cp.float32)
# apply window function
#signal *= win # this doesn't work correctly for some reason.
signal = signal * win
# perform FFT
spect = cp.fft.rfft(signal)
# convert into magnitude spectrogram
spect = cp.abs(spect)
# return
if OUTPUT_ON_GPU:
cp.cuda.get_current_stream().synchronize()
else:
return spect.get()
def main():
# load input
global x, spectrogram
x = make_test_signal()
# we do the following here because cupy cannot do stride tricks
# the actual copying work is included in the benchmark unless INPUT_ON_GPU
hop_size = 22050 // 70
frame_len = 1024
frames = len(x) - frame_len + 1
x = np.lib.stride_tricks.as_strided(
x, (frames, frame_len), (x.strides[0], x.strides[0]))[::hop_size]
if INPUT_ON_GPU:
x = cp.array(x.astype(np.float32))
# benchmark
times = timeit.repeat(
setup='from __main__ import x, spectrogram',
stmt='spectrogram(x)',
repeat=5, number=32)
print("Took %.3fs." % (min(times) / 32))
# save result
#assert not OUTPUT_ON_GPU
#np.save(sys.argv[0][:-2] + 'npy', spectrogram(x))
if __name__=="__main__":
main()
|
{"hexsha": "bc93ed322f15833ada38ade26d0df82b04900ca0", "size": 1908, "ext": "py", "lang": "Python", "max_stars_repo_path": "bench_cupy.py", "max_stars_repo_name": "zhouxzh/Jetson_nano_stft_benchmark", "max_stars_repo_head_hexsha": "ffa97984f95b9862ac2a10b8459bb7ef241c6c72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bench_cupy.py", "max_issues_repo_name": "zhouxzh/Jetson_nano_stft_benchmark", "max_issues_repo_head_hexsha": "ffa97984f95b9862ac2a10b8459bb7ef241c6c72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bench_cupy.py", "max_forks_repo_name": "zhouxzh/Jetson_nano_stft_benchmark", "max_forks_repo_head_hexsha": "ffa97984f95b9862ac2a10b8459bb7ef241c6c72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5, "max_line_length": 92, "alphanum_fraction": 0.6493710692, "include": true, "reason": "import numpy,import cupy", "num_tokens": 515}
|
"""Defines a segmentation module for evaluation only."""
import os
import imageio
import numpy as np
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
class SegmentationModule:
def __init__(self, load_checkpoint_path: str, debug_dir: str):
self.debug_dir = debug_dir
cfg = get_cfg()
cfg.merge_from_file(
model_zoo.get_config_file(
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
)
)
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.99
cfg.MODEL.WEIGHTS = load_checkpoint_path
self.predictor = DefaultPredictor(cfg)
def predict(self, img: np.ndarray, debug_id: int) -> np.ndarray:
"""Predicts binary instance segmentations of objects vs. background for
a given image.
Args:
img: The RGB image to predict segmentations on.
Returns:
masks: A numpy array of shape (N, H, W) of instance masks.
"""
# Write the input image for debugging.
if self.debug_dir is not None:
path = os.path.join(
self.debug_dir, f"{debug_id:04}", "seg_input.png"
)
os.makedirs(os.path.dirname(path), exist_ok=True)
imageio.imwrite(path, img)
outputs = self.predictor(img)
# This produces a numpy array of shape (N, H, W) containing binary
# masks.
masks = outputs["instances"].to("cpu")._fields["pred_masks"].numpy()
# seg = np.full((320, 480), -1, dtype=np.uint8)
# for idx, mask in enumerate(masks):
# seg[mask] = idx
# mask_img = mask.astype(np.uint8) * 255
# path = f"/home/michelle/tmp/seg_mask_{idx}.png"
# imageio.imwrite(path, mask_img)
# print(f"Wrote mask image to: {path}")
return masks
|
{"hexsha": "11bc22236dfdf51889e750ffc409cc22110b387b", "size": 2021, "ext": "py", "lang": "Python", "max_stars_repo_path": "system/seg_module.py", "max_stars_repo_name": "jyf588/pytorch-rl-bullet", "max_stars_repo_head_hexsha": "3ac1835d01e658b2078126895ffa0eb11304abb4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "system/seg_module.py", "max_issues_repo_name": "jyf588/pytorch-rl-bullet", "max_issues_repo_head_hexsha": "3ac1835d01e658b2078126895ffa0eb11304abb4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "system/seg_module.py", "max_forks_repo_name": "jyf588/pytorch-rl-bullet", "max_forks_repo_head_hexsha": "3ac1835d01e658b2078126895ffa0eb11304abb4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8448275862, "max_line_length": 79, "alphanum_fraction": 0.6120732311, "include": true, "reason": "import numpy", "num_tokens": 493}
|
""" Estimate the correlations in terms of robustness between existing natural and synthetic corruption benchmarks.
The computed correlation scores and their associated p-values are saved in pickles at ../Results/benchmark_correlations.
Require: performances on ImageNet-C and ImageNet-P of the models defined in ../models.py (obtained using test_inet_c.py, test_inet_p.py)
The accuracies of the models defined in ../models.py for all considered natural corruption benchmarks (obtained using test_inet_a.py, test_inet_r.py, test_inet_sk.py, test_inet_v2.py and test_objectnet.py) """
import pandas as pd
import numpy as np
import pickle
import scipy.stats
import os
import sys
p = os.path.abspath('..')
sys.path.insert(1, p)
natural_bench_name = ["inet_a","inet_r","inet_v2","onet","inet_sk"]
syn_bench_name = ["inet_c","inet_p"]
natural_bench = []
syn_bench = []
for name in natural_bench_name:
with open(os.path.join("../Results/benchmark_correlations","{}_accuracies.pkl".format(name)), 'rb') as f:
model_acc = pickle.load(f).squeeze()
natural_bench.append(model_acc)
for name in syn_bench_name:
if name != "inet_p":
with open(os.path.join("../Results/benchmark_correlations","{}_accuracies.pkl".format(name)), 'rb') as f:
model_acc = pickle.load(f).squeeze()
else :
with open(os.path.join("../Results/benchmark_correlations","inet_p_mfr.pkl"), 'rb') as f:
model_acc = pickle.load(f).squeeze()
syn_bench.append(model_acc)
with open(os.path.join("../Results/benchmark_correlations","inet_clean_accuracies.pkl"), 'rb') as f:
inet_clean_acc = pickle.load(f).squeeze()
correlation_array = np.zeros([len(natural_bench),len(syn_bench)])
p_value_array = np.zeros([len(natural_bench),len(syn_bench)])
for i in range(len(natural_bench)):
for j in range(len(syn_bench)):
if syn_bench_name[j] != "inet_p":
correlation_array[i,j], p_value_array[i,j] = scipy.stats.pearsonr(inet_clean_acc-natural_bench[i],inet_clean_acc-syn_bench[j])
else:
correlation_array[i,j], p_value_array[i,j] = scipy.stats.pearsonr(inet_clean_acc-natural_bench[i],syn_bench[j])
correlation_array = pd.DataFrame(correlation_array, index=natural_bench_name, columns=syn_bench_name)
p_value_array = pd.DataFrame(p_value_array, index=natural_bench_name, columns=syn_bench_name)
correlation_array.to_pickle(os.path.join("../Results/benchmark_correlations","existing_bench_correlations.pkl"))
correlation_array.to_html(os.path.join("../Results/benchmark_correlations","existing_bench_correlations.html"))
p_value_array.to_pickle(os.path.join("../Results/benchmark_correlations","existing_bench_p_values.pkl"))
p_value_array.to_html(os.path.join("../Results/benchmark_correlations","existing_bench_p_values.html"))
|
{"hexsha": "6ebbb44064172bc61a2477aa649a0fd2268dfb16", "size": 2801, "ext": "py", "lang": "Python", "max_stars_repo_path": "bench_correlations/get_existing_bench_correlations.py", "max_stars_repo_name": "bds-ailab/common_corruption_benchmark", "max_stars_repo_head_hexsha": "b6888f1591a2eb03d186628e25550ebd132e0024", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-06-19T08:36:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-22T13:34:38.000Z", "max_issues_repo_path": "bench_correlations/get_existing_bench_correlations.py", "max_issues_repo_name": "bds-ailab/common_corruption_benchmark", "max_issues_repo_head_hexsha": "b6888f1591a2eb03d186628e25550ebd132e0024", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-22T07:12:58.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-22T07:13:19.000Z", "max_forks_repo_path": "bench_correlations/get_existing_bench_correlations.py", "max_forks_repo_name": "bds-ailab/common_corruption_benchmark", "max_forks_repo_head_hexsha": "b6888f1591a2eb03d186628e25550ebd132e0024", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.2931034483, "max_line_length": 209, "alphanum_fraction": 0.7511602999, "include": true, "reason": "import numpy,import scipy", "num_tokens": 683}
|
from functools import partial
import gc
from multiprocessing import Pool
import sys
import numpy as np
from tqdm import tqdm
from components.spectrum.alignment import pafft
if __name__ == '__main__':
MZS = sys.argv[1]
REFERENCE = sys.argv[2]
SPECTRA = sys.argv[3]
POOL_SIZE = int(sys.argv[4])
DESTINATION = sys.argv[5]
mzs = np.loadtxt(MZS, delimiter=',')
reference = np.loadtxt(REFERENCE, delimiter=',')
spectra = np.load(SPECTRA, mmap_mode='r')
align = partial(pafft, mzs=mzs, reference_counts=reference)
with Pool(processes=POOL_SIZE) as pool:
aligned = pool.map(
align, tqdm(spectra, desc='Alignment'))
del spectra
gc.collect()
with open(DESTINATION, 'wb') as out_file:
np.save(out_file, aligned)
|
{"hexsha": "aa75cc2538fba88d3926a9a3f035953e045761ee", "size": 785, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/alignment.py", "max_stars_repo_name": "gmrukwa/msi-preprocessing-pipeline", "max_stars_repo_head_hexsha": "bc6d26daba42575babcdf5287999f1f844cf2e8e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bin/alignment.py", "max_issues_repo_name": "gmrukwa/msi-preprocessing-pipeline", "max_issues_repo_head_hexsha": "bc6d26daba42575babcdf5287999f1f844cf2e8e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-11-26T19:13:32.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-29T08:14:28.000Z", "max_forks_repo_path": "bin/alignment.py", "max_forks_repo_name": "gmrukwa/msi-preprocessing-pipeline", "max_forks_repo_head_hexsha": "bc6d26daba42575babcdf5287999f1f844cf2e8e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0689655172, "max_line_length": 63, "alphanum_fraction": 0.676433121, "include": true, "reason": "import numpy", "num_tokens": 209}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.