text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import scipy
import numpy as np
import os
import sys
from data_profiler.labelers.classification_report_utils import classification_report
import warnings
from sklearn.exceptions import UndefinedMetricWarning
warnings.filterwarnings("ignore", category=UndefinedMetricWarning)
# in case of data profiler in own repo
_file_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_file_dir, 'data_conversion')) # executed from base repo
sys.path.append(os.path.join(_file_dir, '../data_conversion')) # executed in folder
def f1_report_dict_to_str(f1_report, label_names):
"""
Returns the report string from the f1_report dict.
Example Output:
precision recall f1-score support
class 0 0.00 0.00 0.00 1
class 1 1.00 0.67 0.80 3
micro avg 0.67 0.50 0.57 4
macro avg 0.50 0.33 0.40 4
weighted avg 0.75 0.50 0.60 4
Note: this is generally taken from the `classification_report` function
inside sklearn.
:param f1_report: f1 report dictionary from sklearn
:type f1_report: dict
:param label_names: names of labels included in the report
:type label_names: list(str)
:return: string representing f1_report printout
:rtype: str
"""
sig_figs = 2
headers = ["precision", "recall", "f1-score", "support"]
longest_last_line_heading = 'weighted avg'
name_width = max(len(name) for name in label_names)
width = max(name_width, len(longest_last_line_heading), sig_figs)
head_fmt = '{:>{width}s} ' + ' {:>9}' * len(headers)
report = head_fmt.format('', *headers, width=width)
report += '\n\n'
report_end = '\n'
row_fmt = '{:>{width}s} ' + (' {{{}:>9.{{sig_figs}}f}}' * 3).format(
*headers[:-1]) + ' {support:>9}\n'
for key, row in f1_report.items():
if key not in ['accuracy', 'macro avg', 'weighted avg', 'micro avg']:
report += row_fmt.format(key, **row, width=width, sig_figs=sig_figs)
else:
if key == 'accuracy':
row_fmt_accuracy = '{:>{width}s} ' + \
' {:>9.{sig_figs}}' * 2 + ' {:>9.{sig_figs}f}' + \
' {:>9}\n'
report_end += row_fmt_accuracy.format(key, '', '', row, '',
width=width, sig_figs=sig_figs)
else:
report_end += row_fmt.format(key, **row,
width=width, sig_figs=sig_figs)
report += report_end
return report
def evaluate_accuracy(predicted_entities_in_index, true_entities_in_index,
num_labels, entity_rev_dict, verbose=True,
omitted_labels=('PAD', 'BACKGROUND'),
confusion_matrix_file=None):
"""
Evaluate the accuracy from comparing the predicted labels with true labels
:param predicted_entities_in_index: predicted encoded labels for input
sentences
:type predicted_entities_in_index: list(array(int))
:param true_entities_in_index: true encoded labels for input sentences
:type true_entities_in_index: list(array(int))
:param entity_rev_dict: dictionary to convert indices to entities
:type entity_rev_dict: dict([index, entity])
:param verbose: print additional information for debugging
:type verbose: boolean
:param omitted_labels: labels to omit from the accuracy evaluation
:type omitted_labels: list() of text labels
:param confusion_matrix_file: File name (and dir) for confusion matrix
:type confusion_matrix_file: str
:return : f1-score
:rtype: float
"""
label_names = None
label_indexes = None
if entity_rev_dict:
label_names = [str(x[1]) for x in
sorted(entity_rev_dict.items(), key=lambda x: x[0]) if
x[1] not in omitted_labels]
label_indexes = [x[0] for x in
sorted(entity_rev_dict.items(), key=lambda x: x[0]) if
x[1] not in omitted_labels]
max_len = len(predicted_entities_in_index[0])
true_labels_padded = np.zeros((len(true_entities_in_index), max_len))
for i, true_labels_row in enumerate(true_entities_in_index):
true_labels_padded[i][:len(true_labels_row)] = true_labels_row
true_labels_flatten = np.hstack(true_labels_padded)
predicted_labels_flatten = np.hstack(predicted_entities_in_index)
if entity_rev_dict:
all_labels = [entity_rev_dict[key] for key in
sorted(entity_rev_dict.keys())]
# From sklearn, description of the confusion matrix:
# By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
# is equal to the number of observations known to be in group :math:`i` but
# predicted to be in group :math:`j`.
conf_mat = np.zeros((num_labels, num_labels), dtype=np.int64)
batch_size = min(2**20, len(true_labels_flatten))
for batch_ind in range(len(true_labels_flatten)//batch_size + 1):
true_label_batch = true_labels_flatten[batch_size*batch_ind:(batch_ind + 1) * batch_size]
pred_label_batch = predicted_labels_flatten[batch_size * batch_ind:(batch_ind + 1) * batch_size]
conf_mat += scipy.sparse.coo_matrix(
(
np.ones((len(pred_label_batch),)),
(true_label_batch, pred_label_batch)
),
shape=(num_labels, num_labels),
dtype=np.int64).toarray()
# Only write confusion matrix if file exists
if confusion_matrix_file and entity_rev_dict:
import pandas as pd
conf_mat_pd = pd.DataFrame(
conf_mat,
columns=list(map(lambda x: 'pred:' + x, all_labels)),
index=list(map(lambda x: 'true:' + x, all_labels)))
# Make directory, if required
if os.path.dirname(confusion_matrix_file) \
and not os.path.isdir(os.path.dirname(confusion_matrix_file)):
os.makedirs(os.path.dirname(confusion_matrix_file))
conf_mat_pd.to_csv(confusion_matrix_file)
f1_report = classification_report(
conf_mat,
labels=label_indexes,
target_names=label_names, output_dict=True)
# adjust macro average to be updated only on positive support labels
# note: in sklearn, support is number of occurrences of each label in
# true_labels_flatten
num_labels_with_positive_support = 0
for key, values in f1_report.items():
if key not in ['accuracy', 'macro avg', 'weighted avg', 'micro avg']:
if values['support']:
num_labels_with_positive_support += 1
# bc sklearn does not remove 0.0 f1 score for 0 support in macro avg.
for metric in f1_report['macro avg'].keys():
if metric != 'support':
if not num_labels_with_positive_support:
f1_report['macro avg'][metric] = np.nan
else:
f1_report['macro avg'][metric] *= float(
len(label_names)) / num_labels_with_positive_support
if 'macro avg' in f1_report:
f1 = f1_report['macro avg']['f1-score'] # this is micro for the report
else:
# this is the only remaining option for the report
f1 = f1_report['accuracy']
if verbose:
f1_report_str = f1_report_dict_to_str(f1_report, label_names)
print("(After removing non-entity tokens)\n", f1_report_str)
print("\n")
print("F1 Score: ", f1)
return f1, f1_report
|
{"hexsha": "1c2a3f3cf086618942b658e64f967e06bb6596e3", "size": 7719, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_profiler/labelers/labeler_utils.py", "max_stars_repo_name": "gme5078/data-profiler", "max_stars_repo_head_hexsha": "602cc5e4f4463f9b807000abf3893815918d0723", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_profiler/labelers/labeler_utils.py", "max_issues_repo_name": "gme5078/data-profiler", "max_issues_repo_head_hexsha": "602cc5e4f4463f9b807000abf3893815918d0723", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_profiler/labelers/labeler_utils.py", "max_forks_repo_name": "gme5078/data-profiler", "max_forks_repo_head_hexsha": "602cc5e4f4463f9b807000abf3893815918d0723", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9510869565, "max_line_length": 104, "alphanum_fraction": 0.6237854644, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1828}
|
import argparse
import baltic as bt
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
from matplotlib.gridspec import GridSpec
from matplotlib.patches import Rectangle
from matplotlib.colors import LinearSegmentedColormap
import seaborn as sns
import glob
import ast
from treetime.utils import numeric_date, datetime_from_numeric
import pandas as pd
import datetime
from collections.abc import Iterable
import json
import math
from utils import plot_style
import string
import numpy as np
from collections import Counter
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--metadata')
parser.add_argument('--metadataDelim', default='\t')
parser.add_argument('--metadataCladeCol', default=2)
parser.add_argument('--metadataDateCol', default=1)
parser.add_argument('--config',
help='json file with config information (labels, colors, etc.)')
args = parser.parse_args()
plot_style()
args.metadata = 'data/weighted_downsampling/ga_focused_aligned_masked_weighted_GA_pangolin_dates.csv'
args.config='config/pango_lineage.json'
plot_config = json.load(open(args.config, 'r'))
plot_config['effects'] = eval(plot_config['effects'])
metadata = pd.read_csv(args.metadata, sep=args.metadataDelim, header=None)
print(metadata)
metadata[args.metadataDateCol] = pd.to_datetime(metadata[args.metadataDateCol])
metadata['week_end_date'] = pd.to_datetime(metadata[args.metadataDateCol]).apply(lambda k: k+datetime.timedelta(days= 6 - k.weekday()))
clade_counts = Counter(metadata[args.metadataCladeCol])
to_collapse = {key: f'Other ({key.split(".")[0]})' if value < plot_config['min_size'] else key for key, value in clade_counts.items() }
metadata[args.metadataCladeCol] = metadata[args.metadataCladeCol].map(to_collapse)
n_per_week = metadata.groupby(['week_end_date', args.metadataCladeCol]).size().reset_index()
n_per_week = n_per_week.pivot(index='week_end_date', columns=2, values=0).fillna(0)
n_per_week.index = n_per_week.index.to_series().apply(numeric_date)
fig, ax = plt.subplots(constrained_layout=True, figsize=(6.4*1.25, 4.8))
cm = LinearSegmentedColormap.from_list('clades', [plot_config['colors'][i] for i in n_per_week.columns], N=n_per_week.shape[1])
#(n_per_week.div(n_per_week.sum(axis=1), axis=0)*100).plot.area(ax=ax, colormap=cm, alpha=1.0, zorder=2)
n_per_week.plot.area(ax=ax, colormap=cm, alpha=1.0, zorder=2)
n_per_week['dt'] = n_per_week.index.to_series().apply(lambda k: datetime_from_numeric(k))
n_per_week.to_csv('.'.join(args.metadata.split('.')[0:-1])+'_n__per_week.csv')
x_ticks = [
datetime.datetime.strptime('2020-03-01', '%Y-%m-%d'),
datetime.datetime.strptime('2020-03-08', '%Y-%m-%d'),
datetime.datetime.strptime('2020-03-15', '%Y-%m-%d'),
datetime.datetime.strptime('2020-03-22', '%Y-%m-%d'),
datetime.datetime.strptime('2020-03-31', '%Y-%m-%d')]
x_labels = \
[i.strftime("%m")+'/'+i.strftime("%d") for i in x_ticks]
x_ticks = [numeric_date(i) for i in x_ticks]
ax.set_xticks(x_ticks)
ax.set_xticklabels(x_labels)
ax.set_ylabel('Sequences/Wk', size=12)
ax.get_legend().remove()
ax.set_xlabel("Date (2020)")
x_pos = 0.8
y_pos = 0.9
ax_height = (ax.get_position().y1 - ax.get_position().y0) * ax.figure.bbox.height
for label_idx, label in enumerate(n_per_week.columns[:-1][::-1]):
font_size = 14+int(ax.figure.bbox.height*0.01)*2.5
text = ax.text(x_pos, y_pos - 0.075*label_idx,
label, color=plot_config['colors'][label],
size=font_size, transform=ax.transAxes,
path_effects=plot_config['effects'])
fig.savefig(f'{plot_config["out_name"]}.pdf')
|
{"hexsha": "e6a18ff3874fc854bc7293b87cc8191c6cc96ed6", "size": 3639, "ext": "py", "lang": "Python", "max_stars_repo_path": "phylogenetic_analysis/scripts/plot_clades_per_week.py", "max_stars_repo_name": "Piantadosi-Lab/SARS-CoV-2_ATL_Introductions", "max_stars_repo_head_hexsha": "cf201410454536006508aafff83ad32aecee19ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "phylogenetic_analysis/scripts/plot_clades_per_week.py", "max_issues_repo_name": "Piantadosi-Lab/SARS-CoV-2_ATL_Introductions", "max_issues_repo_head_hexsha": "cf201410454536006508aafff83ad32aecee19ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "phylogenetic_analysis/scripts/plot_clades_per_week.py", "max_forks_repo_name": "Piantadosi-Lab/SARS-CoV-2_ATL_Introductions", "max_forks_repo_head_hexsha": "cf201410454536006508aafff83ad32aecee19ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.3139534884, "max_line_length": 139, "alphanum_fraction": 0.7375652652, "include": true, "reason": "import numpy", "num_tokens": 969}
|
## ----author info, include=F----------------------------------------------
## Author: Yanchang Zhao
## Email: yanchang@RDataMining.com
## Website: http://www.RDataMining.com
## Date: 9 December 2018
## ----load libraries, include=F, echo=F-----------------------------------
## load required packages
library(dtw)
library(wavelets)
library(party)
## ------------------------------------------------------------------------
a <- ts(1:20, frequency=12, start=c(2011,3))
print(a)
str(a)
attributes(a)
## ----out.width=".85\\textwidth"------------------------------------------
plot(AirPassengers)
## ----out.width=".9\\textwidth"-------------------------------------------
apts <- ts(AirPassengers, frequency = 12)
f <- decompose(apts)
plot(f$figure, type="b") # seasonal figures
## ----out.width=".75\\textwidth"------------------------------------------
plot(f)
## ----tidy=F--------------------------------------------------------------
# build an ARIMA model
fit <- arima(AirPassengers, order=c(1,0,0),
list(order=c(2,1,0), period=12))
fore <- predict(fit, n.ahead=24)
# error bounds at 95% confidence level
U <- fore$pred + 2*fore$se
L <- fore$pred - 2*fore$se
## ----plot-forecast, fig.show="hide", tidy=F------------------------------
ts.plot(AirPassengers, fore$pred, U, L,
col = c(1, 2, 4, 4), lty = c(1, 1, 2, 2))
legend("topleft", col = c(1, 2, 4), lty = c(1, 1, 2),
c("Actual", "Forecast", "Error Bounds (95% Confidence)"))
## ----echo=F, out.width="\\textwidth"-------------------------------------
ts.plot(AirPassengers, fore$pred, U, L,
col = c(1, 2, 4, 4), lty = c(1, 1, 2, 2))
legend("topleft", col = c(1, 2, 4), lty = c(1, 1, 2),
c("Actual", "Forecast", "Error Bounds (95% Confidence)"))
## ----fig.show="hide"-----------------------------------------------------
library(dtw)
idx <- seq(0, 2*pi, len=100)
a <- sin(idx) + runif(100)/10
b <- cos(idx)
align <- dtw(a, b, step=asymmetricP1, keep=T)
dtwPlotTwoWay(align)
## ----echo=F, out.height=".8\\textheight"---------------------------------
dtwPlotTwoWay(align)
## ----fig.show='hide', tidy=F---------------------------------------------
# read data into R
# sep="": the separator is white space, i.e., one
# or more spaces, tabs, newlines or carriage returns
sc <- read.table("./data/synthetic_control.data", header=F, sep="")
# show one sample from each class
idx <- c(1, 101, 201, 301, 401, 501)
sample1 <- t(sc[idx,])
plot.ts(sample1, main="")
## ----echo=F, out.height="\\textheight"-----------------------------------
plot.ts(sample1, main="")
## ----echo=F--------------------------------------------------------------
set.seed(17)
## ----fig.show='hide'-----------------------------------------------------
# sample n cases from every class
n <- 10
s <- sample(1:100, n)
idx <- c(s, 100+s, 200+s, 300+s, 400+s, 500+s)
sample2 <- sc[idx,]
observedLabels <- rep(1:6, each=n)
# hierarchical clustering with Euclidean distance
hc <- hclust(dist(sample2), method="ave")
plot(hc, labels=observedLabels, main="")
## ----echo=F, out.height="\\textheight"-----------------------------------
plot(hc, labels=observedLabels, main="")
## ------------------------------------------------------------------------
# cut tree to get 8 clusters
memb <- cutree(hc, k=8)
table(observedLabels, memb)
## ----fig.show='hide'-----------------------------------------------------
myDist <- dist(sample2, method="DTW")
hc <- hclust(myDist, method="average")
plot(hc, labels=observedLabels, main="")
# cut tree to get 8 clusters
memb <- cutree(hc, k=8)
table(observedLabels, memb)
## ----echo=F, out.height="\\textheight"-----------------------------------
plot(hc, labels=observedLabels, main="")
## ----tidy=F--------------------------------------------------------------
classId <- rep(as.character(1:6), each = 100)
newSc <- data.frame(cbind(classId, sc))
library(party)
ct <- ctree(classId ~ ., data = newSc,
controls = ctree_control(minsplit = 20,
minbucket = 5, maxdepth = 5))
## ------------------------------------------------------------------------
pClassId <- predict(ct)
table(classId, pClassId)
# accuracy
(sum(classId==pClassId)) / nrow(sc)
## ------------------------------------------------------------------------
# extract DWT (with Haar filter) coefficients
library(wavelets)
wtData <- NULL
for (i in 1:nrow(sc)) {
a <- t(sc[i,])
wt <- dwt(a, filter="haar", boundary="periodic")
wtData <- rbind(wtData,
unlist(c(wt@W, wt@V[[wt@level]])))
}
wtData <- as.data.frame(wtData)
wtSc <- data.frame(cbind(classId, wtData))
## ----tidy=F--------------------------------------------------------------
ct <- ctree(classId ~ ., data = wtSc,
controls = ctree_control(minsplit=20, minbucket=5,
maxdepth=5))
pClassId <- predict(ct)
table(classId, pClassId)
(sum(classId==pClassId)) / nrow(wtSc)
## ----fig.width=18, fig.height=8, out.height=".85\\textheight", out.width="1.1\\textwidth"----
plot(ct, ip_args=list(pval=F), ep_args=list(digits=0))
## ----echo=F--------------------------------------------------------------
set.seed(100)
## ------------------------------------------------------------------------
k <- 20
newTS <- sc[501,] + runif(100)*15
distances <- dist(newTS, sc, method="DTW")
s <- sort(as.vector(distances), index.return=TRUE)
# class IDs of k nearest neighbours
table(classId[s$ix[1:k]])
|
{"hexsha": "4b41b74c6e119114ec1dfd10ac7fea2284a529fa", "size": 5482, "ext": "r", "lang": "R", "max_stars_repo_path": "Scripts/RDM-script-time-series-analysis.r", "max_stars_repo_name": "enriqueescobar-askida/Kinito.R.DataMining", "max_stars_repo_head_hexsha": "766ece2ad9a30a0dc78a9fa9b27efdfb1be96ace", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Scripts/RDM-script-time-series-analysis.r", "max_issues_repo_name": "enriqueescobar-askida/Kinito.R.DataMining", "max_issues_repo_head_hexsha": "766ece2ad9a30a0dc78a9fa9b27efdfb1be96ace", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Scripts/RDM-script-time-series-analysis.r", "max_forks_repo_name": "enriqueescobar-askida/Kinito.R.DataMining", "max_forks_repo_head_hexsha": "766ece2ad9a30a0dc78a9fa9b27efdfb1be96ace", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9171974522, "max_line_length": 95, "alphanum_fraction": 0.4728201386, "num_tokens": 1446}
|
import os
import sys
import random
import math
import numpy as np
import cv2
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn.model import log
import tensorflow as tf
class PlaneConfig(Config):
NAME="multiobject2"
GPU_COUNT=1
IMAGES_PER_GPU=2
NUM_CLASSES=8
IMAGE_MIN_DIM=1024
IMAGE_MAX_DIM=1024
RPN_ANCHOR_SCALES=(16,36,64,128,512)
TRAIN_ROIS_PER_IMAGE=64
STEPS_PER_EPOCH=100
VALIDATION_STEPS=10
LEARNING_RATE=0.002
LOSS_WEIGHTS = {
"rpn_class_loss": 1.,
"rpn_bbox_loss": 1.,
"mrcnn_class_loss": 1.,
"mrcnn_bbox_loss": 1.,
"mrcnn_mask_loss": 0.
}
RPN_TRAIN_ANCHORS_PER_IMAGE=512
TRAIN_ROIS_PER_IMAGE = 400
RPN_NMS_THRESHOLD=0.8
DETECTION_NMS_THRESHOLD=0.5
MEAN_PIXEL = np.array([78.4, 82.7, 81.8])
class PlaneDataset(utils.Dataset):
def loadDataset(self,count,path):
self.add_class("object",1,"plane")
self.add_class("object",2,"ship")
self.add_class("object",3,"storage-tank")
#self.add_class("object",4,"baseball-diamond")
self.add_class("object",5,"tennis-court")
#self.add_class("object",6,"basketball-court")
#self.add_class("object",7,"ground-track-field")
self.add_class("object",8,"harbor")
#self.add_class("object",9,"bridge")
self.add_class("object",10,"small-vehicle")
self.add_class("object",11,"large-vehicle")
#self.add_class("object",12,"round-about")
#self.add_class("object",13,"swimming-pool")
#self.add_class("object",14,"helicopter")
#self.add_class("object",15,"soccer-ball-field")
imgList=os.listdir(path+"/image")
boxList=os.listdir(path+"/box")
assert len(imgList)==len(boxList),"the count of image and box is not equality"
assert len(imgList)>=count,"the count is larger than image"
for i in range(count):
img=cv2.imread(path+"/image/"+imgList[i])
self.add_image("object",image_id=i,path=path+"/image/"+imgList[i],maskpath=path+"/box/"+boxList[i],width=img.shape[1],height=img.shape[0])
def load_image(self,image_id):
#print("image: "+str(image_id))
info=self.image_info[image_id]
path=info['path']
return cv2.imread(path)
def load_mask(self,image_id):
#print("mask: "+str(image_id))
info=self.image_info[image_id]
maskpath=info['maskpath']
file=open(maskpath)
listmask=[]
classid=[]
for line in file:
mask=np.zeros((self.image_info[image_id]["height"],self.image_info[image_id]["width"]),dtype=bool)
box=line.split()
if int(box[4])==8:
continue
x1=min(int(box[0]),int(box[2]))
x2=max(int(box[0]),int(box[2]))
y1=min(int(box[1]),int(box[3]))
y2=max(int(box[1]),int(box[3]))
if x2>mask.shape[1]:
x2=mask.shape[1]
if y2>mask.shape[0]:
y2=mask.shape[0]
for i in range(x1,x2-2):
for j in range(y1,y2-2):
mask[j,i]=True
listmask.append(mask)
id=self.map_source_class_id("object."+box[4])
classid.append(id)
if len(classid) ==0:
listmask.append(np.zeros((self.image_info[image_id]["height"],self.image_info[image_id]["width"]),dtype=bool))
classid.append(0)
listmask=np.stack(listmask,axis=2).astype(np.bool)
classid=np.array(classid,dtype=np.int32)
return listmask,classid
def image_reference(self,image_id):
info=self.image_info[image_id]
if info["source"]=="object":
return info["path"]
else:
super(PlaneDataset,self).image_reference(image_id)
dataset_train=PlaneDataset()
dataset_train.loadDataset(14348,"./multiobjectdataset/train")
dataset_train.prepare()
dataset_val=PlaneDataset()
dataset_val.loadDataset(4871,"./multiobjectdataset/val")
dataset_val.prepare()
config=PlaneConfig()
config.display()
model=modellib.MaskRCNN(mode="training",config=config,model_dir="./logs")
#model.load_weights("pretrained_weights.h5",by_name=True,exclude=["mrcnn_class_logits","mrcnn_bbox_fc","mrcnn_bbox","mrcnn_mask"])
model.load_weights(model.find_last(),by_name=True,exclude=["fpn_p1add","fpn_p2upsampled","fpn_c1p1","fpn_p1"])
model.train(dataset_train,dataset_val,learning_rate=config.LEARNING_RATE,epochs=100,layers="heads")
|
{"hexsha": "d9b7d93e258790910250c8fac5a653383d82bd63", "size": 4540, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "ChienWong/Mask_RCNN", "max_stars_repo_head_hexsha": "f9d2592d8664a1abd7fd250fd129dc2bdb7c8c18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-24T01:22:27.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-15T04:48:29.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "mohuazheliu/Mask_RCNN", "max_issues_repo_head_hexsha": "f9d2592d8664a1abd7fd250fd129dc2bdb7c8c18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "mohuazheliu/Mask_RCNN", "max_forks_repo_head_hexsha": "f9d2592d8664a1abd7fd250fd129dc2bdb7c8c18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8333333333, "max_line_length": 150, "alphanum_fraction": 0.6389867841, "include": true, "reason": "import numpy", "num_tokens": 1201}
|
```python
# import libraries and modules
import numpy as np
import sympy as sp
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.animation import PillowWriter
```
```python
# The symbols and the derivatives which will be used for later calculations
x1, y1, x2, y2 = sp.symbols(r'x_1 y_1 x_2 y_2',real = True)
m1, m2, g , t= sp.symbols(r'm_1 m_2 g t', real = True, positive = True)
th1 = sp.Function(r'\theta_1')(t)
th2 = sp.Function(r'\theta_2')(t)
th1_d = sp.diff(th1, t)
th2_d = sp.diff(th2, t)
th1_dd = sp.diff(th1_d, t)
th2_dd = sp.diff(th2_d, t)
# the lengths of the 2 pendulums
l1 = 2
l2 = 1
```
```python
# The relationship of x and y with the angle of the pendulum
x1 = l1*sp.sin(th1)
y1 = -l1*sp.cos(th1)
x2 = l2*sp.sin(th2) + x1
y2 = -l2*sp.cos(th2) + y1
x1_d = sp.diff(x1, t)
y1_d = sp.diff(y1, t)
x2_d = sp.diff(x2, t)
y2_d = sp.diff(y2, t)
```
```python
# the x and y functions which will be later used to get the values of the x and y coordinates of the 2 pendulums.
x1_f = sp.lambdify(th1, x1)
y1_f = sp.lambdify(th1, y1)
x2_f = sp.lambdify((th1,th2), x2)
y2_f = sp.lambdify((th1, th2), y2)
```
```python
# Kinetic enegy
T1 = 1/2 * m1 * (x1_d**2 + y1_d**2)
T2 = 1/2 * m2 * (x2_d**2 + y2_d**2)
T = T1+T2
# Potential energy
V1 = m1*g*y1
V2 = m2*g*y2
V = V1 + V2
# Lagrangian
L = T-V
L
```
$\displaystyle 2 g m_{1} \cos{\left(\theta_{1}{\left(t \right)} \right)} - g m_{2} \left(- 2 \cos{\left(\theta_{1}{\left(t \right)} \right)} - \cos{\left(\theta_{2}{\left(t \right)} \right)}\right) + 0.5 m_{1} \left(4 \sin^{2}{\left(\theta_{1}{\left(t \right)} \right)} \left(\frac{d}{d t} \theta_{1}{\left(t \right)}\right)^{2} + 4 \cos^{2}{\left(\theta_{1}{\left(t \right)} \right)} \left(\frac{d}{d t} \theta_{1}{\left(t \right)}\right)^{2}\right) + 0.5 m_{2} \left(\left(2 \sin{\left(\theta_{1}{\left(t \right)} \right)} \frac{d}{d t} \theta_{1}{\left(t \right)} + \sin{\left(\theta_{2}{\left(t \right)} \right)} \frac{d}{d t} \theta_{2}{\left(t \right)}\right)^{2} + \left(2 \cos{\left(\theta_{1}{\left(t \right)} \right)} \frac{d}{d t} \theta_{1}{\left(t \right)} + \cos{\left(\theta_{2}{\left(t \right)} \right)} \frac{d}{d t} \theta_{2}{\left(t \right)}\right)^{2}\right)$
```python
# The lagranfe's equations
LE1 = sp.diff(sp.diff(L, th1_d), t).simplify() - sp.diff(L, th1)
LE2 = sp.diff(sp.diff(L, th2_d), t).simplify() - sp.diff(L, th2)
```
```python
# solving the lagrange's equationn for the second derivatives of theta1 and theta2
sols = sp.solve([LE1, LE2], (th1_dd, th2_dd))
sols[th1_dd]
```
$\displaystyle \frac{g m_{1} \sin{\left(\theta_{1}{\left(t \right)} \right)}}{- 2.0 m_{1} + 2.0 m_{2} \cos^{2}{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} - 2.0 m_{2}} + \frac{g m_{2} \sin{\left(\theta_{1}{\left(t \right)} \right)}}{- 2.0 m_{1} + 2.0 m_{2} \cos^{2}{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} - 2.0 m_{2}} - \frac{g m_{2} \sin{\left(\theta_{2}{\left(t \right)} \right)} \cos{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)}}{- 2.0 m_{1} + 2.0 m_{2} \cos^{2}{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} - 2.0 m_{2}} + \frac{2.0 m_{2} \sin{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} \cos{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} \left(\frac{d}{d t} \theta_{1}{\left(t \right)}\right)^{2}}{- 2.0 m_{1} + 2.0 m_{2} \cos^{2}{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} - 2.0 m_{2}} - \frac{2.0 m_{2} \sin{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} \cos{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} \frac{d}{d t} \theta_{1}{\left(t \right)} \frac{d}{d t} \theta_{2}{\left(t \right)}}{- 2.0 m_{1} + 2.0 m_{2} \cos^{2}{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} - 2.0 m_{2}} - \frac{m_{2} \sin{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} \frac{d}{d t} \theta_{1}{\left(t \right)} \frac{d}{d t} \theta_{2}{\left(t \right)}}{- 2.0 m_{1} + 2.0 m_{2} \cos^{2}{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} - 2.0 m_{2}} + \frac{m_{2} \sin{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} \left(\frac{d}{d t} \theta_{2}{\left(t \right)}\right)^{2}}{- 2.0 m_{1} + 2.0 m_{2} \cos^{2}{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} - 2.0 m_{2}} + \frac{2.0 m_{2} \sin{\left(\theta_{1}{\left(t \right)} \right)} \cos{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} \cos{\left(\theta_{2}{\left(t \right)} \right)} \frac{d}{d t} \theta_{1}{\left(t \right)} \frac{d}{d t} \theta_{2}{\left(t \right)}}{- 2.0 m_{1} + 2.0 m_{2} \cos^{2}{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} - 2.0 m_{2}} + \frac{m_{2} \sin{\left(\theta_{1}{\left(t \right)} \right)} \cos{\left(\theta_{2}{\left(t \right)} \right)} \frac{d}{d t} \theta_{1}{\left(t \right)} \frac{d}{d t} \theta_{2}{\left(t \right)}}{- 2.0 m_{1} + 2.0 m_{2} \cos^{2}{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} - 2.0 m_{2}} - \frac{2.0 m_{2} \sin{\left(\theta_{2}{\left(t \right)} \right)} \cos{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} \cos{\left(\theta_{1}{\left(t \right)} \right)} \frac{d}{d t} \theta_{1}{\left(t \right)} \frac{d}{d t} \theta_{2}{\left(t \right)}}{- 2.0 m_{1} + 2.0 m_{2} \cos^{2}{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} - 2.0 m_{2}} - \frac{m_{2} \sin{\left(\theta_{2}{\left(t \right)} \right)} \cos{\left(\theta_{1}{\left(t \right)} \right)} \frac{d}{d t} \theta_{1}{\left(t \right)} \frac{d}{d t} \theta_{2}{\left(t \right)}}{- 2.0 m_{1} + 2.0 m_{2} \cos^{2}{\left(\theta_{1}{\left(t \right)} - \theta_{2}{\left(t \right)} \right)} - 2.0 m_{2}}$
```python
# Converting the second derivatives to actual numeric conditions which can be used to actually solve the differential equations.
deriv2_th2_f = sp.lambdify([g,m1, m2, th1, th2, th1_d, th2_d], sols[th2_dd])
deriv2_th1_f = sp.lambdify([g, m1, m2, th1, th2, th1_d, th2_d], sols[th1_dd])
deriv1_th1_f = sp.lambdify(th1_d, th1_d)
deriv1_th2_f = sp.lambdify(th2_d, th2_d)
```
# Equations to solve in this system are:
- $\frac{d\theta_1}{dt} = v_1$
- $\frac{dv_1}{dt} = sols[th1\_dd]$
- $\frac{d\theta_2}{dt} = v_2$
- $\frac{dv_2}{dt} = sols[th2\_dd]$
$$\therefore S = (\theta_1, v_1, \theta_2, v_2)$$
```python
# The derivative function wjich depends upon S and t and contains all information regatding the 4 differential eqations.
def dSdt(S, t):
return [ deriv1_th1_f(S[1]), #d(theta)/dt
deriv2_th1_f(9.81, 2, 1, S[0], S[2], S[1], S[3]), # (dv1/dt)
deriv1_th2_f(S[3]), #d(theta2}/dt
deriv2_th2_f(9.81, 2, 1, S[0], S[2], S[1], S[3]) #dv2/dt
]
#Initial conditions
th1_0 = np.pi/2
v1_0 = 0
th2_0 = -np.pi/2
v2_0 = 0
S0 = [th1_0, v1_0, th2_0, v2_0]
```
```python
# solving the equations 10001 times from 0 to 40
t = np.linspace(0, 40, 1001)
solution = odeint(dSdt, y0 = S0, t = t)
```
```python
# Required solutions
theta1 = solution.T[0]
theta2 = solution.T[2]
```
```python
# The coordinates of the 2 pendulums which will be used to plot the data
x1_pts = x1_f(theta1)
x2_pts = x2_f(theta1, theta2)
y1_pts = y1_f(theta1)
y2_pts = y2_f(theta1, theta2)
```
```python
plt.plot(x2_pts, y2_pts)
plt.plot(x1_pts, y1_pts)
```
```python
# An animation of the above system
# The number of frames are the number of datapoints in between each second. So, this animation is in real time.
fig, ax = plt.subplots(1,1, figsize=(12,10))
ax.set_facecolor('w')
line, = plt.plot([], [], 'ro-', lw=2, markersize=8)
ax.set(ylim=(-4,4), xlim=(-4,4))
def animate(i):
line.set_data([0, x1_pts[i], x2_pts[i]], [0, y1_pts[i], y2_pts[i]])
return line,
anim = FuncAnimation(fig, animate, frames=1000, interval=50)
anim.save('double_pendulum.gif',writer='pillow',fps=25)
```
|
{"hexsha": "581da1d8e8958fda4b68f862c488c657506cfeb4", "size": 113364, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Double pendulum.ipynb", "max_stars_repo_name": "ScientificArchisman/Simulations", "max_stars_repo_head_hexsha": "b9f3e7cc5d94a150931c12dac5fa21391736c47f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Double pendulum.ipynb", "max_issues_repo_name": "ScientificArchisman/Simulations", "max_issues_repo_head_hexsha": "b9f3e7cc5d94a150931c12dac5fa21391736c47f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Double pendulum.ipynb", "max_forks_repo_name": "ScientificArchisman/Simulations", "max_forks_repo_head_hexsha": "b9f3e7cc5d94a150931c12dac5fa21391736c47f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 342.4894259819, "max_line_length": 87988, "alphanum_fraction": 0.9076249956, "converted": true, "num_tokens": 3388}
|
export eval_apl
# eval
eval_apl(ex) = eval_apl(ex, nothing, nothing)
eval_apl(f, α, ω) = f
eval_apl(v::JlVal, α, ω) = v.val
eval_apl(::Α, α, ω) = α
eval_apl(::Ω, α, ω) = ω
eval_apl(x::Apply, α, ω) = eval_apl(x.f, α, ω)(eval_apl(x.r, α, ω))
eval_apl(x::ConcArr, α, ω) = vcat(eval_apl(x.l, α, ω), eval_apl(x.r, α, ω))
eval_apl(x::Apply2, α, ω) = eval_apl(x.f, α, ω)(eval_apl(x.l, α, ω), eval_apl(x.r, α, ω))
# call methods for primitive functions
mkbody1(x::Symbol) = :($x(ω))
mkbody1(x::Expr) = x
mkbody2(x::Symbol) = :($x(α, ω))
mkbody2(x::Expr) = x
for (sym, fns) in prim_fns
mon, dya = fns
mon != nothing && @eval (f::PrimFn{$sym})(ω) = $(mkbody1(mon))
dya != nothing && @eval (f::PrimFn{$sym})(α, ω) = $(mkbody2(dya))
end
function actuallyreducedim(f, xs::AbstractArray, ident)
dropdims(
reduce(f, xs, dims=ndims(xs), init=ident),
dims=ndims(xs)
)
end
function actuallyreducedim(f, xs::AbstractVector, ident)
reduce(f, xs, init=ident)
end
# call methods for primitive operators
(op::Op1{'/'})(ω) = actuallyreducedim(op.l, ω, identity(op.l, eltype(ω)))
(op::Op1{'⌿'})(ω) = dropdims(reduce(op.l, ω, init=identity(op.l, eltype(ω)), dims=1), dims=1)
(op::Op1{'\\'})(ω) = prefix_scan(op.l, ω, identity(op.l, ω))
(op::Op1{'⍀'})(ω) = prefix_scan(op.l, ω, identity(op.l, ω)) # Todo
(op::Op1{'¨'})(ω) = map(op.l, ω)
(op::Op1{'↔'})(α, ω) = op.l(ω, α)
(op::Op1{'⍨'})(α, ω) = op.l(ω, α)
(op::Op2{'.'})(α, ω) = reduce(op.l, op.r(convert(Array, α), convert(Array, ω)))
(op::Op2{'⋅'})(α) = op.l(op.r(α)) # compose
(op::Op1{'∘'})(α, ω) = [op.l(x, y) for x in α, y in ω]
# user defined functions
(fn::UDefFn{0})() = eval_apl(fn.ast)
(fn::UDefFn{1})(ω) = eval_apl(fn.ast, nothing, ω)
(fn::UDefFn{2})(α, ω) = eval_apl(fn.ast, α, ω)
|
{"hexsha": "e5b89c4a645ec66c73ad37f549593880aa22bd40", "size": 1765, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/eval.jl", "max_stars_repo_name": "JuliaTagBot/APL.jl", "max_stars_repo_head_hexsha": "5806736476ad3547b0955f53af5992f35136a35e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 47, "max_stars_repo_stars_event_min_datetime": "2015-12-14T14:46:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T15:29:14.000Z", "max_issues_repo_path": "src/eval.jl", "max_issues_repo_name": "JuliaTagBot/APL.jl", "max_issues_repo_head_hexsha": "5806736476ad3547b0955f53af5992f35136a35e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2016-10-03T18:45:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-01T22:04:14.000Z", "max_forks_repo_path": "src/eval.jl", "max_forks_repo_name": "JuliaTagBot/APL.jl", "max_forks_repo_head_hexsha": "5806736476ad3547b0955f53af5992f35136a35e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2016-10-03T18:36:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T07:53:23.000Z", "avg_line_length": 33.9423076923, "max_line_length": 93, "alphanum_fraction": 0.5847025496, "num_tokens": 766}
|
from __future__ import print_function
from __future__ import division
from hoomd import *
from hoomd import hpmc
import hoomd
import numpy
import math
import sys
import os
import unittest
import tempfile
context.initialize()
class convex_polyhedron(unittest.TestCase):
def setUp(self):
# setup the MC integration
snap = data.make_snapshot(N=32, box=data.boxdim(Lx=20, Ly=20, Lz=20, dimensions=3), particle_types=['A', 'B']);
# no need to initialize particles, we are just testing construction of integrators
init.read_snapshot(snap);
if comm.get_rank() == 0:
tmp = tempfile.mkstemp(suffix='.hpmc-test-sdf');
self.tmp_file = tmp[1];
else:
self.tmp_file = "invalid";
def test_8(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=8);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_8_implicit(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=8, implicit=True);
mc.set_params(nselect=8,nR=3,depletant_type='B')
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_16(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=16);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_16_implicit(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=16, implicit=True);
mc.set_params(nselect=8,nR=3,depletant_type='B')
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_32(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=32);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_32_implicit(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=32, implicit=True);
mc.set_params(nselect=8,nR=3,depletant_type='B')
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_64(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=64);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_64_implicit(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=64, implicit=True);
mc.set_params(nselect=8,nR=3,depletant_type='B')
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_128(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=128);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_128_implicit(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=128, implicit=True);
mc.set_params(nselect=8,nR=3,depletant_type='B')
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_129(self):
self.assertRaises(Exception, hpmc.integrate.convex_polyhedron, seed=10, d=0.1, max_verts=129);
def tearDown(self):
context.initialize();
if comm.get_rank() == 0:
os.remove(self.tmp_file);
class convex_polyhedron_fl(unittest.TestCase):
def setUp(self):
# setup the MC integration
snap = data.make_snapshot(N=1, box=data.boxdim(Lx=20, Ly=20, Lz=20, dimensions=3), particle_types=['A', 'B']);
# no need to initialize particles, we are just testing construction of integrators
init.read_snapshot(snap);
def test_8(self):
if hoomd.context.exec_conf.isCUDAEnabled():
return;
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=8);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
# run
run(1, quiet=True);
def test_16(self):
if hoomd.context.exec_conf.isCUDAEnabled():
return;
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=16);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
# run
run(1, quiet=True);
def test_32(self):
if hoomd.context.exec_conf.isCUDAEnabled():
return;
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=32);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
# run
run(1, quiet=True);
def test_64(self):
if hoomd.context.exec_conf.isCUDAEnabled():
return;
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=64);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
# run
run(1, quiet=True);
def test_128(self):
if hoomd.context.exec_conf.isCUDAEnabled():
return;
mc = hpmc.integrate.convex_polyhedron(seed=10, d=0.1, max_verts=128);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
# run
run(1, quiet=True);
def tearDown(self):
context.initialize();
class convex_spheropolyhedron(unittest.TestCase):
def setUp(self):
# setup the MC integration
snap = data.make_snapshot(N=32, box=data.boxdim(Lx=20, Ly=20, Lz=20, dimensions=3), particle_types=['A', 'B']);
# no need to initialize particles, we are just testing construction of integrators
init.read_snapshot(snap);
if comm.get_rank() == 0:
tmp = tempfile.mkstemp(suffix='.hpmc-test-sdf');
self.tmp_file = tmp[1];
else:
self.tmp_file = "invalid";
def test_8(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=8);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_8_implicit(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=8, implicit=True);
mc.set_params(nselect=8,nR=3,depletant_type='B')
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_16(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=16);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_16_implicit(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=16, implicit=True);
mc.set_params(nselect=8,nR=3,depletant_type='B')
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_32(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=32);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_32_implicit(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=32, implicit=True);
mc.set_params(nselect=8,nR=3,depletant_type='B')
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_64(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=64);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_64_implicit(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=64, implicit=True);
mc.set_params(nselect=8,nR=3,depletant_type='B')
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_128(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=128);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_128_implicit(self):
xmax=0.02
dx=1e-4
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=128, implicit=True);
mc.set_params(nselect=8,nR=3,depletant_type='B')
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
hpmc.analyze.sdf(mc=mc, filename=self.tmp_file, xmax=xmax, dx=dx, navg=800, period=10, phase=0)
hpmc.compute.free_volume(mc=mc, seed=123, test_type='A', nsample=1000)
# run
run(1, quiet=True);
def test_129(self):
self.assertRaises(Exception, hpmc.integrate.convex_spheropolyhedron, seed=10, d=0.1, max_verts=129);
def tearDown(self):
context.initialize();
if comm.get_rank() == 0:
os.remove(self.tmp_file);
class convex_spheropolyhedron_fl(unittest.TestCase):
def setUp(self):
# setup the MC integration
snap = data.make_snapshot(N=1, box=data.boxdim(Lx=20, Ly=20, Lz=20, dimensions=3), particle_types=['A', 'B']);
# no need to initialize particles, we are just testing construction of integrators
init.read_snapshot(snap);
def test_8(self):
if hoomd.context.exec_conf.isCUDAEnabled():
return;
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=8);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
# run
run(1, quiet=True);
def test_16(self):
if hoomd.context.exec_conf.isCUDAEnabled():
return;
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=16);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
# run
run(1, quiet=True);
def test_32(self):
if hoomd.context.exec_conf.isCUDAEnabled():
return;
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=32);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
# run
run(1, quiet=True);
def test_64(self):
if hoomd.context.exec_conf.isCUDAEnabled():
return;
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=64);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
# run
run(1, quiet=True);
def test_128(self):
if hoomd.context.exec_conf.isCUDAEnabled():
return;
mc = hpmc.integrate.convex_spheropolyhedron(seed=10, d=0.1, max_verts=128);
mc.shape_param.set(['A', 'B'], vertices=[(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)]);
# run
run(1, quiet=True);
def tearDown(self):
context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
{"hexsha": "c6348138eefa232e1c618802e6f7ee948e7b8277", "size": 26915, "ext": "py", "lang": "Python", "max_stars_repo_path": "hoomd/hpmc/test-py/max_verts.py", "max_stars_repo_name": "kmoskovtsev/HOOMD-Blue-fork", "max_stars_repo_head_hexsha": "99560563a5ba9e082b513764bae51a84f48fdc70", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hoomd/hpmc/test-py/max_verts.py", "max_issues_repo_name": "kmoskovtsev/HOOMD-Blue-fork", "max_issues_repo_head_hexsha": "99560563a5ba9e082b513764bae51a84f48fdc70", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hoomd/hpmc/test-py/max_verts.py", "max_forks_repo_name": "kmoskovtsev/HOOMD-Blue-fork", "max_forks_repo_head_hexsha": "99560563a5ba9e082b513764bae51a84f48fdc70", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.452681388, "max_line_length": 119, "alphanum_fraction": 0.355006502, "include": true, "reason": "import numpy", "num_tokens": 6388}
|
[STATEMENT]
lemma qbs_bind_return':
assumes "x \<in> monadP_qbs_Px X"
shows "x \<bind> qbs_return X = x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<bind> qbs_return X = x
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<bind> qbs_return X = x
[PROOF STEP]
obtain \<alpha> \<mu> where h1:"qbs_prob X \<alpha> \<mu>" "x = qbs_prob_space (X, \<alpha>, \<mu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>\<alpha> \<mu>. \<lbrakk>qbs_prob X \<alpha> \<mu>; x = qbs_prob_space (X, \<alpha>, \<mu>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms rep_monadP_qbs_Px
[PROOF STATE]
proof (prove)
using this:
x \<in> monadP_qbs_Px X
?s \<in> monadP_qbs_Px ?X \<Longrightarrow> \<exists>\<alpha> \<mu>. ?s = qbs_prob_space (?X, \<alpha>, \<mu>) \<and> qbs_prob ?X \<alpha> \<mu>
goal (1 subgoal):
1. (\<And>\<alpha> \<mu>. \<lbrakk>qbs_prob X \<alpha> \<mu>; x = qbs_prob_space (X, \<alpha>, \<mu>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
qbs_prob X \<alpha> \<mu>
x = qbs_prob_space (X, \<alpha>, \<mu>)
goal (1 subgoal):
1. x \<bind> qbs_return X = x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
qbs_prob X \<alpha> \<mu>
x = qbs_prob_space (X, \<alpha>, \<mu>)
[PROOF STEP]
interpret qp: qbs_prob X \<alpha> \<mu>
[PROOF STATE]
proof (prove)
using this:
qbs_prob X \<alpha> \<mu>
x = qbs_prob_space (X, \<alpha>, \<mu>)
goal (1 subgoal):
1. qbs_prob X \<alpha> \<mu>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<bind> qbs_return X = x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<bind> qbs_return X = x
[PROOF STEP]
using qp.qbs_bind_computation[OF h1(2) qbs_return_morphism _ measurable_return_prob_space qbs_return_comp[OF qp.in_Mx]]
[PROOF STATE]
proof (prove)
using this:
\<alpha> \<in> qbs_Mx X \<Longrightarrow> qbs_prob X \<alpha> (\<mu> \<bind> return real_borel)
\<alpha> \<in> qbs_Mx X \<Longrightarrow> x \<bind> qbs_return X = qbs_prob_space (X, \<alpha>, \<mu> \<bind> return real_borel)
goal (1 subgoal):
1. x \<bind> qbs_return X = x
[PROOF STEP]
by(simp add: h1(2) bind_return'' prob_space_return qbs_probI)
[PROOF STATE]
proof (state)
this:
x \<bind> qbs_return X = x
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1038, "file": "Quasi_Borel_Spaces_Monad_QuasiBorel", "length": 11}
|
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <iostream>
using boost::asio::ip::tcp;
using boost::asio::yield_context;
int main() {
boost::asio::io_service svc;
tcp::acceptor a(svc);
a.open(tcp::v4());
a.set_option(tcp::acceptor::reuse_address(true));
a.bind({{}, 6767}); // bind to port 6767 on localhost
a.listen(5);
spawn(svc, [&a](yield_context yield) {
while (1) {
tcp::socket s(a.get_io_service());
a.async_accept(s, yield);
spawn(yield, [s = std::move(s)](yield_context yield) mutable {
// do a read
char buf[1024] = {};
size_t bytes = s.async_read_some(
boost::asio::buffer(buf), yield);
std::cout << "Echo to " << s.remote_endpoint() << ": "
<< std::string(buf, buf+bytes) << std::endl;
bytes = async_write(s, boost::asio::buffer(buf), yield);
});
}
});
svc.run(); // wait for shutdown (Ctrl-C or failure)
}
|
{"hexsha": "12271fad003488e42f672779db35801cc899692e", "size": 1066, "ext": "cc", "lang": "C++", "max_stars_repo_path": "tests/cpp/hello_boost/echo.cc", "max_stars_repo_name": "resonai/ybt", "max_stars_repo_head_hexsha": "48e9f9b8bc02686c95b2afc29265b799ff9d80da", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-10-10T09:13:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-18T08:57:19.000Z", "max_issues_repo_path": "tests/cpp/hello_boost/echo.cc", "max_issues_repo_name": "resonai/ybt", "max_issues_repo_head_hexsha": "48e9f9b8bc02686c95b2afc29265b799ff9d80da", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 123.0, "max_issues_repo_issues_event_min_datetime": "2018-02-05T08:36:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T07:44:24.000Z", "max_forks_repo_path": "tests/cpp/hello_boost/echo.cc", "max_forks_repo_name": "resonai/ybt", "max_forks_repo_head_hexsha": "48e9f9b8bc02686c95b2afc29265b799ff9d80da", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6111111111, "max_line_length": 74, "alphanum_fraction": 0.5243902439, "num_tokens": 269}
|
import numpy as np
from scipy.stats import norm
from scipy.optimize import fminbound
__all__ = ["polyserial_correlation"]
def polyserial_correlation(continuous, ordinal):
"""Computes the polyserial correlation.
Estimates the correlation value based on a bivariate
normal distribution.
Args:
continuous: Continuous Measurement
ordinal: Ordinal Measurement
Returns:
polyserial_correlation: converged value
Notes:
User must handle missing data
"""
# Get the number of ordinal values
values, counts = np.unique(ordinal, return_counts=True)
# Compute the thresholds (tau's)
thresholds = norm.isf(1 - counts.cumsum() / counts.sum())[:-1]
# Standardize the continuous variable
standardized_continuous = ((continuous - continuous.mean())
/ continuous.std(ddof=1))
def _min_func(correlation):
denominator = np.sqrt(1 - correlation * correlation)
k = standardized_continuous * correlation
log_likelihood = 0
for ndx, value in enumerate(values):
mask = ordinal == value
if ndx == 0:
numerator = thresholds[ndx] - k[mask]
probabilty = norm.cdf(numerator / denominator)
elif ndx == (values.size -1):
numerator = thresholds[ndx-1] - k[mask]
probabilty = (1 - norm.cdf(numerator / denominator))
else:
numerator1 = thresholds[ndx] - k[mask]
numerator2 = thresholds[ndx-1] - k[mask]
probabilty = (norm.cdf(numerator1 / denominator)
- norm.cdf(numerator2 / denominator))
log_likelihood -= np.log(probabilty).sum()
return log_likelihood
rho = fminbound(_min_func, -.99, .99)
return rho
|
{"hexsha": "f7ab4cf38688f965b63c2bf3c5d3ba9403d12bf5", "size": 1966, "ext": "py", "lang": "Python", "max_stars_repo_path": "common/polyserial.py", "max_stars_repo_name": "eribean/GIRTH", "max_stars_repo_head_hexsha": "daf22773aa9cd1c819bf732e1061ebf5cc4dc40e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2020-03-22T02:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T12:56:11.000Z", "max_issues_repo_path": "common/polyserial.py", "max_issues_repo_name": "eribean/GIRTH", "max_issues_repo_head_hexsha": "daf22773aa9cd1c819bf732e1061ebf5cc4dc40e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 117, "max_issues_repo_issues_event_min_datetime": "2020-03-01T13:35:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-31T01:13:17.000Z", "max_forks_repo_path": "common/polyserial.py", "max_forks_repo_name": "eribean/GIRTH", "max_forks_repo_head_hexsha": "daf22773aa9cd1c819bf732e1061ebf5cc4dc40e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-10-21T17:04:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T08:49:14.000Z", "avg_line_length": 31.2063492063, "max_line_length": 68, "alphanum_fraction": 0.5727365209, "include": true, "reason": "import numpy,from scipy", "num_tokens": 404}
|
# coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learned_optimizers.outer_trainers.full_es."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from learned_optimization.learned_optimizers import base
from learned_optimization.outer_trainers import full_es
from learned_optimization.outer_trainers import gradient_learner
from learned_optimization.outer_trainers import lopt_truncated_step
from learned_optimization.outer_trainers import test_utils
from learned_optimization.outer_trainers import truncation_schedule
from learned_optimization.tasks import base as tasks_base
from learned_optimization.tasks import quadratics
import numpy as np
class TaskFamilyWithAux(tasks_base.TaskFamily):
def sample(self, key):
return None
def task_fn(self, task_params) -> tasks_base.Task:
class _Task(tasks_base.Task):
datasets = None
def loss_and_aux(self, params, _, data):
return 0.0, {"aux_name": 1.0}
def init(self, key):
return jnp.asarray(1.)
def loss_with_state_and_aux(self, params, state, _, data):
return 0.0, None, {"aux_name": 1.0}
def loss_with_state(self, params, state, _, data):
return 0.0, None
return _Task()
class FullEsTest(parameterized.TestCase):
@parameterized.product(
meta_loss_split=(None, "train"), loss_type=("avg", "last_recompute"))
def test_full_es_trainer(self, meta_loss_split, loss_type):
learned_opt = base.LearnableSGD()
task_family = quadratics.FixedDimQuadraticFamily(10)
trunc_sched = truncation_schedule.NeverEndingTruncationSchedule()
truncated_step = lopt_truncated_step.VectorizedLOptTruncatedStep(
task_family,
learned_opt,
trunc_sched,
num_tasks=5,
meta_loss_split=meta_loss_split)
trunc_sched = truncation_schedule.ConstantTruncationSchedule(10)
trainer = full_es.FullES(
truncated_step,
truncation_schedule=trunc_sched,
steps_per_jit=5,
loss_type=loss_type)
test_utils.trainer_smoketest(trainer)
@parameterized.product(
meta_loss_split=(None, "train"),
loss_type=("avg", "last_recompute", "min"))
def test_full_es_trainer_with_data(self, meta_loss_split, loss_type):
learned_opt = base.LearnableSGD()
task_family = quadratics.FixedDimQuadraticFamilyData(10)
trunc_sched = truncation_schedule.NeverEndingTruncationSchedule()
truncated_step = lopt_truncated_step.VectorizedLOptTruncatedStep(
task_family,
learned_opt,
trunc_sched,
num_tasks=5,
meta_loss_split=meta_loss_split)
trunc_sched = truncation_schedule.ConstantTruncationSchedule(10)
trainer = full_es.FullES(
truncated_step,
steps_per_jit=5,
loss_type=loss_type,
truncation_schedule=trunc_sched)
test_utils.trainer_smoketest(trainer)
def test_full_es_stacked_antithetic_samples(self):
learned_opt = base.LearnableSGD()
task_family = quadratics.FixedDimQuadraticFamilyData(10)
trunc_sched = truncation_schedule.NeverEndingTruncationSchedule()
truncated_step = lopt_truncated_step.VectorizedLOptTruncatedStep(
task_family, learned_opt, trunc_sched, num_tasks=5)
trunc_sched = truncation_schedule.ConstantTruncationSchedule(10)
trainer = full_es.FullES(
truncated_step=truncated_step,
truncation_schedule=trunc_sched,
steps_per_jit=5,
stack_antithetic_samples=True)
test_utils.trainer_smoketest(trainer)
@parameterized.product(
loss_type=("avg", "last_recompute"), meta_loss_split=(None, "train"))
def test_full_es_meta_loss_aux(self, loss_type, meta_loss_split):
learned_opt = base.LearnableSGD()
task_family = TaskFamilyWithAux()
trunc_sched = truncation_schedule.NeverEndingTruncationSchedule()
truncated_step = lopt_truncated_step.VectorizedLOptTruncatedStep(
task_family,
learned_opt,
trunc_sched,
num_tasks=5,
meta_loss_with_aux_key="aux_name",
meta_loss_split=meta_loss_split)
trunc_sched = truncation_schedule.ConstantTruncationSchedule(10)
trainer = full_es.FullES(
truncated_step,
truncation_schedule=trunc_sched,
steps_per_jit=5,
loss_type=loss_type,
)
key = jax.random.PRNGKey(0)
theta = learned_opt.init(key)
worker_weights = gradient_learner.WorkerWeights(
theta, None, gradient_learner.OuterState(1))
state = trainer.init_worker_state(worker_weights, key=key)
out, _ = trainer.compute_gradient_estimate(
worker_weights, key, state, with_summary=True)
np.testing.assert_allclose(out.mean_loss, 1.0)
def test_full_es_throws_exception_when_truncated_step_misconfigured(self):
with self.assertRaises(ValueError):
trunc_sched = truncation_schedule.ConstantTruncationSchedule(10)
learned_opt = base.LearnableSGD()
task_family = quadratics.FixedDimQuadraticFamily(10)
truncated_step = lopt_truncated_step.VectorizedLOptTruncatedStep(
task_family,
learned_opt,
trunc_sched, # This should be never-ending!
num_tasks=5)
full_es.FullES(
truncated_step, truncation_schedule=trunc_sched, steps_per_jit=5)
if __name__ == "__main__":
absltest.main()
|
{"hexsha": "934dfeb9d891b5ec6e9953fc9880aff57b36d120", "size": 5907, "ext": "py", "lang": "Python", "max_stars_repo_path": "learned_optimization/outer_trainers/full_es_test.py", "max_stars_repo_name": "google/learned_optimization", "max_stars_repo_head_hexsha": "1c9ee0159c97815fc6afe79a76224fb28b199053", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 70, "max_stars_repo_stars_event_min_datetime": "2021-12-16T07:12:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T19:13:36.000Z", "max_issues_repo_path": "learned_optimization/outer_trainers/full_es_test.py", "max_issues_repo_name": "google/learned_optimization", "max_issues_repo_head_hexsha": "1c9ee0159c97815fc6afe79a76224fb28b199053", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2021-12-29T10:03:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T15:59:55.000Z", "max_forks_repo_path": "learned_optimization/outer_trainers/full_es_test.py", "max_forks_repo_name": "google/learned_optimization", "max_forks_repo_head_hexsha": "1c9ee0159c97815fc6afe79a76224fb28b199053", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-12-16T04:52:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T03:45:31.000Z", "avg_line_length": 34.5438596491, "max_line_length": 76, "alphanum_fraction": 0.7430167598, "include": true, "reason": "import numpy,import jax", "num_tokens": 1387}
|
include("../src/includes.jl")
const TEST_EXCHANGE = "testExchange"
struct IntSource <: Source{Int}
pollFn::Function
IntSource(coll) = new(() -> length(v) > 0 ? pop!(coll) : nothing)
end
v = collect(1:10000)
@async source!(IntSource(v)) |> sink!("testExchange")
readline()
|
{"hexsha": "f68a78065e6e1b3708467c461719b5285eebd0e7", "size": 285, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/test-pub-with-config.jl", "max_stars_repo_name": "garethhu/ReactiveAmqp.jl", "max_stars_repo_head_hexsha": "2a916a2965c90d25ed1229bde7c2eb8db202c799", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/test-pub-with-config.jl", "max_issues_repo_name": "garethhu/ReactiveAmqp.jl", "max_issues_repo_head_hexsha": "2a916a2965c90d25ed1229bde7c2eb8db202c799", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-03T23:30:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-04T00:10:53.000Z", "max_forks_repo_path": "examples/test-pub-with-config.jl", "max_forks_repo_name": "garethhu/ReactiveAmqp.jl", "max_forks_repo_head_hexsha": "2a916a2965c90d25ed1229bde7c2eb8db202c799", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.0, "max_line_length": 69, "alphanum_fraction": 0.6596491228, "num_tokens": 81}
|
import numpy as np
input_data = np.array([2,3])
weights = {
'node_0': np.array([1,1]),
'node_1': np.array([-1,1]),
'output': np.array([2,-1])
}
node_0_val = np.dot(input_data,weights['node_0'])
node_1_val = np.dot(input_data,weights['node_1'])
node_2_val = np.dot(np.array([node_0_val,node_1_val]),weights['node_2'])
print("Result = {}".format(node_2_val))
|
{"hexsha": "510cc2d608983c70234a4b4f564e619895454ceb", "size": 393, "ext": "py", "lang": "Python", "max_stars_repo_path": "Deep-Learning-In-Python/Module-1/forward-propagation.py", "max_stars_repo_name": "vishwesh5/Datacamp-Courses", "max_stars_repo_head_hexsha": "f074ec25e373c3d1d2edb1629c5568001aeadec1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Deep-Learning-In-Python/Module-1/forward-propagation.py", "max_issues_repo_name": "vishwesh5/Datacamp-Courses", "max_issues_repo_head_hexsha": "f074ec25e373c3d1d2edb1629c5568001aeadec1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Deep-Learning-In-Python/Module-1/forward-propagation.py", "max_forks_repo_name": "vishwesh5/Datacamp-Courses", "max_forks_repo_head_hexsha": "f074ec25e373c3d1d2edb1629c5568001aeadec1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5625, "max_line_length": 72, "alphanum_fraction": 0.6208651399, "include": true, "reason": "import numpy", "num_tokens": 125}
|
[STATEMENT]
lemma both_mono1':
"t \<sqsubseteq> t' \<Longrightarrow> t \<otimes>\<otimes> t'' \<sqsubseteq> t' \<otimes>\<otimes> t''"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t \<sqsubseteq> t' \<Longrightarrow> t \<otimes>\<otimes> t'' \<sqsubseteq> t' \<otimes>\<otimes> t''
[PROOF STEP]
using both_mono1[folded below_set_def, unfolded paths_mono_iff]
[PROOF STATE]
proof (prove)
using this:
?t \<sqsubseteq> ?t' \<Longrightarrow> ?t \<otimes>\<otimes> ?t'' \<sqsubseteq> ?t' \<otimes>\<otimes> ?t''
goal (1 subgoal):
1. t \<sqsubseteq> t' \<Longrightarrow> t \<otimes>\<otimes> t'' \<sqsubseteq> t' \<otimes>\<otimes> t''
[PROOF STEP]
.
|
{"llama_tokens": 244, "file": "Call_Arity_TTree-HOLCF", "length": 2}
|
-- <html>
-- <head>
-- <BASE HREF="http://www.numeric-quest.com/haskell/Orthogonals.html">
-- <title>
-- Indexless linear algebra algorithms
-- </title>
-- </head>
-- <body>
-- <ul>
-- <center>
-- <h1>
-- ***
-- </h1>
-- <h1>
-- Indexless linear algebra algorithms
-- </h1>
-- <b>
-- <br>
-- Orthogonalization, linear equations, eigenvalues and eigenvectors
-- <br>
-- Literate Haskell module <i>Orthogonals.lhs</i>
-- </b>
-- <p>
-- Jan Skibinski, <a href="http://www.numeric-quest.com/news/">
-- Numeric Quest Inc.</a>, Huntsville, Ontario, Canada
-- <p>
-- 1998.09.19, last modified 1998.12.28
-- </center>
-- <hr>
-- <p>
-- It has been argued that the functional paradigm offers more
-- support for scientific computing than the traditional imperative
-- programming, such as greater similarity of functional implementation
-- to mathematical specification of a problem. However, efficiency
-- of scientific algorithms implemented in Haskell is very low compared
-- to efficiencies of C or Fortran implementations - notwithstanding
-- the exceptional descriptive power of Haskell.
-- <dd>
-- It has been also argued that tradition and inertia are partially
-- responsible for this sore state and that many functional algorithms
-- are direct translations of their imperative counterparts.
-- <dd>
-- Arrays - with their indexing schemes and destructive updating
-- are basic tools of imperative programming. But pure functional
-- languages, which prohibit variable reassignments, cannot compete
-- with imperative languages by using the same tools and following
-- similar reasoning and patterns - unless the functional arrays
-- themselves are designed with performance in mind. This is
-- a case with Clean, where efficiency of one kind of their arrays
-- -- strict unboxed array, approaches efficiency of C.
-- <dd>
-- But this has not been done for Haskell arrays yet. They are
-- lazy, boxed and use auxiliary association lists (index, value)
-- for initialization -- the latter being mostly responsible for
-- low efficiency of those algorithms that create many interim
-- arrays.
-- <dd>
-- It appears, that -- as long as indexing scheme is not used
-- for lookups and updates -- Haskell lists are more efficient
-- than arrays -- at least at the currents state of Haskell.
-- <p>
-- With this in mind, we are attempting to demonstrate here
-- that the indexing traps can be successfully avoided.
-- This module implements afresh several typical problems from linear
-- algebra. Standard Haskell lists are employed instead of arrays
-- and not a single algorithm ever uses indices for lookups
-- or updates.
-- <dd>
-- We do not claim high efficiency of these algorithms; consider
-- them exploratory. However, we do claim that the clarity of
-- these algorithms is significantly better than of those functionally
-- similar algorithms that employ indexing schemes.
-- <p>
-- Two major algorithms have been invented and implemented in Haskell:
-- one for solving systems of linear equations and one for finding
-- eigenvalues and eigenvectors of almost any type of a square matrix.
-- This includes symmetric, hermitian, general complex or nonsymmetric
-- matrices with real eigenvalues.
-- <dd>
-- Amazingly, both methods are based on the same factorization, akin
-- to QR method, but not exactly the same as the standard QR one.
-- A simple trick allows to extend this method to nonsymmetric real
-- matrices with complex eigenvalues and thus one method applies to
-- all types of matrices.
-- It follows that the eigenvalue/eigenvector problem can be consistently
-- treated all across the board. In addition, no administrative
-- (housekeeping) boring trivia is required here and that helps to
-- clearly explain the mechanisms employed.
-- </i>
-- <p>
-- <hr>
-- <p>
-- <b>
-- Contents
-- </b>
-- <p>
-- <ul>
-- <p><li>
-- Notation
-- <p><li>
-- Scalar products and vector normalization
-- <ul>
-- <li><b>
-- bra_ket</b>, scalar product
-- <li><b>
-- sum_product</b>, a cousin of bra_ket
-- <li><b>
-- norm</b>, vector norm
-- <li><b>
-- normalized</b>, vector normalized to one
-- </ul>
-- <p><li>
-- Transposition and adjoining of matrices
-- <ul>
-- <li><b>
-- transposed</b>, transposed matrix
-- <li><b>
-- adjoint</b>, transposed and conjugated matrix
-- </ul>
-- <p><li>
-- Products involving matrices
-- <ul>
-- <li><b>
-- matrix_matrix</b>, product of two matrices as list of rows
-- <li><b>
-- matrix_matrix'</b>, product of two matrices as list of columns
-- <li><b>
-- triangle_matrix'</b>, upper triangular matrix times square matrix
-- <li><b>
-- matrix_ket</b>, matrix times ket vector
-- <li><b>
-- bra_matrix</b>, bra vector times matrix
-- <li><b>
-- bra_matrix_ket</b>, matrix multiplied on both sides by vectors
-- <li><b>
-- scalar_matrix</b>, scalar times matrix
-- </ul>
-- <p><li>
-- Orthogonalization process
-- <ul>
-- <li><b>
-- orthogonals</b>, set of orthogonal vectors
-- <li><b>
-- gram_schmidt</b>, vector perpendicular to a hyperplane
-- </ul>
-- <p><li>
-- Solutions of linear equations by orthogonalization
-- <ul>
-- <li><b>
-- one_ket_triangle</b>, triangularization of one vector equation
-- <li><b>
-- one_ket_solution</b>, solution for one unknown vector
-- <li><b>
-- many_kets_triangle</b>, triangularization of several vector equations
-- <li><b>
-- many_kets_solution</b>, solution for several unknown vectors
-- </ul>
-- <p><li>
-- Matrix inversion
-- <ul>
-- <li><b>
-- inverse</b>, inverse of a matrix
-- </ul>
-- <p><li>
-- QR factorization of matrices provided by "many_kets_triangle"
-- <ul>
-- <li><b>
-- factors_QR</b>, QR alike factorization of matrices
-- <li><b>
-- determinant</b>, computation of the determinant based on the QR factorization
-- </ul>
-- <p><li>
-- Similarity transformations and eigenvalues
-- <ul>
-- <li><b>
-- similar_to</b>, matrix obtained by similarity transformation
-- <li><b>
-- iterated_eigenvalues</b>, list of approximations of eigenvalues
-- <li><b>
-- eigenvalues</b>, final approximation of eigenvalues
-- </ul>
-- <p><li>
-- Preconditioning of real nonsymmetric matrices
-- <ul>
-- <li><b>
-- add_to_diagonal</b>, simple preconditioning method
-- </ul>
-- <p><li>
-- Examples of iterated eigenvalues
-- <ul>
-- <li>
-- Symmetric real matrix
-- <li>
-- Hermitian complex matrix
-- <li>
-- General complex matrix
-- <li>
-- Nonsymmetric real matrix with real eigenvalues
-- <li>
-- Nonsymmetric real matrix with complex eigenvalues
-- </ul>
-- <p><li>
-- Eigenvectors for distinct eigenvalues
-- <ul>
-- <li><b>
-- eigenkets</b>, eigenvectors for distinct eigenvalues
-- </ul>
-- <p><li>
-- Eigenvectors for degenerated eigenvalues
-- <ul>
-- <li><b>
-- eigenket'</b>, eigenvector based on a trial vector
-- </ul>
-- <p><li>
-- Auxiliary functions
-- <ul>
-- <li><b>
-- unit_matrix</b>, a unit matrix with 1's on a diagonal
-- <li><b>
-- unit_vector</b>, a vector with one non-zero component
-- <li><b>
-- diagonals</b>, vector made of a matrix diagonal
-- </ul>
-- </ul>
-- <p>
-- <hr>
-- <p>
-- <b>
-- Notation
-- </b>
-- <p>
-- What follows is written in Dirac's notation, as used
-- in Quantum Mechanics. Matrices are represented by capital
-- letters, while vectors come in two varieties:
-- <ul>
-- <p><li>
-- Bra vector x, written < x |, is represented by one-row matrix
-- <p><li> Ket vector y, written | y >, is represented by one-column matrix
-- </ul>
-- <p>
-- Bra vectors can be obtained from ket vectors by transposition
-- and conjugation of their components. Conjugation is only
-- important for complex vectors.
-- <p>
-- Scalar product of two vectors | x > and | y > is written
-- as
-- <pre>
-- < x | y >
-- </pre>
-- which looks like a bracket and is sometimes called a "bra_ket".
-- This justifies "bra" and "ket" names introduced by Dirac. There
-- is a good reason for conjugating the components of "bra-vector":
-- the scalar product of
-- <pre>
-- < x | x >
-- </pre>
-- should be a square of the norm of the vector "x", and that
-- means that it should be represented by a real number, or complex
-- number but with its imaginary part equal to zero.
-- <p>
-- <hr>
-- <p>
-- <pre>
module Orthogonals where
import Data.Complex
import Data.Ratio
import qualified Data.List as List
-- </pre>
-- <b>
-- Scalar product and vector normalization
-- </b>
-- <p>
-- The scalar product "bra_ket" is a basis of many algorithms
-- presented here.
-- <pre>
bra_ket :: (Scalar a, Num a) => [a] -> [a] -> a
bra_ket u v =
--
-- Scalar product of two vectors u and v,
-- or < u | v > in Dirac's notation.
-- This is equally valid for both: real and complex vectors.
--
sum_product u (map coupled v)
-- </pre>
-- Notice the call to function "coupled" in the above implementation
-- of scalar product. This function conjugates its argument
-- if it is complex, otherwise does not change it. It is defined
-- in the class Scalar - specifically designed for this purpose
-- mainly.
-- <dd>
-- This class also defines a norm of a vector that might be used
-- by some algorithms. So far we have been able to avoid this.
-- <pre>
class Eq a => Scalar a where
coupled :: a->a
norm :: [a] -> a
almostZero :: a -> Bool
scaled :: [a] -> [a]
instance Scalar Double where
coupled x = x
norm u = sqrt (bra_ket u u)
almostZero x = (abs x) < 1.0e-8
scaled = scaled'
instance Scalar Float where
coupled x = x
norm u = sqrt (bra_ket u u)
almostZero x = (abs x) < 1.0e-8
scaled = scaled'
instance (Integral a) => Scalar (Ratio a) where
coupled x = x
-- norm u = fromDouble ((sqrt (bra_ket u u))::Double)
-- Intended hack to silently convert to and from Double.
-- But I do not know how to declare it properly.
--
-- Our type Fraction, when used instead of Ratio a, has its own
-- definition of sqrt. No hack would be needed here.
almostZero x = abs x < 1e-8
scaled = scaled'
instance (RealFloat a) => Scalar (Complex a) where
coupled (x:+y) = x:+(-y)
norm u = sqrt (realPart (bra_ket u u)) :+ 0
almostZero z = (realPart (abs z)) < 1.0e-8
scaled u = [(x/m):+(y/m) | x:+y <- u]
where m = maximum [max (abs x) (abs y) | x:+y <- u]
norm1 :: (Num a) => [a] -> a
norm1 = sum . map abs
norminf :: (Num a, Ord a) => [a] -> a
norminf = maximum . map abs
matnorm1 :: (Num a, Ord a) => [[a]] -> a
matnorm1 = matnorminf . transposed
matnorminf :: (Num a, Ord a) => [[a]] -> a
matnorminf = maximum . map norm1
-- </pre>
-- But we also need a slightly different definition of
-- scalar product that will appear in multiplication of matrices
-- by vectors (or vice versa): a straightforward accumulated product
-- of two lists, where no complex conjugation takes place.
-- We will call it a 'sum_product".
-- <pre>
sum_product :: Num a => [a] -> [a] -> a
sum_product u v =
--
-- Similar to scalar product but without
-- conjugations of | u > components
-- Used in matrix-vector or vector-matrix products
--
sum (zipWith (*) u v)
-- </pre>
-- Some algorithms might need vectors normalized to one, although
-- we'll try to avoid the normalizations due to its high cost
-- or its inapplicability to rational numbers. Instead, we wiil
-- scale vectors by their maximal components.
-- <pre>
normalized :: (Scalar a, Fractional a) => [a] -> [a]
normalized u =
map (/norm u) u
scaled' :: (Fractional t, Ord t) => [t] -> [t]
scaled' u =
map (/norminf u) u
-- </pre>
-- <hr>
-- <p>
-- <b>
-- Transposition and adjoining of matrices
-- </b>
-- <p>
-- Matrices are represented here by lists of lists.
-- Function "transposed" converts from row-wise to column-wise
-- representation, or vice versa.
-- <dd>
-- When transposition is combined with complex conjugation
-- the resulting matrix is called "adjoint".
-- <p>
-- A square matrix is called symmetric if it is equal to its transpose
-- <pre>
-- A = A<sup>T</sup>
-- </pre>
-- It is called Hermitian, or self-adjoint, if it equals to
-- its adjoint
-- <pre>
-- A = A<sup>+</sup>
transposed :: [[a]] -> [[a]]
transposed a
| null (head a) = []
| otherwise = ([head mi| mi <- a])
:transposed ([tail mi| mi <- a])
adjoint :: Scalar a => [[a]] -> [[a]]
adjoint a
| null (head a) = []
| otherwise = ([coupled (head mi)| mi <- a])
:adjoint ([tail mi| mi <- a])
-- </pre>
-- <p>
-- <hr>
-- <p>
-- <b>
-- Linear combination and sum of two matrices
-- </b>
-- <p>
-- One can form a linear combination of two matrices, such
-- as
-- <pre>
-- C = alpha A + beta B
-- where
-- alpha and beta are scalars
-- </pre>
-- The most generic form of any combination, not necessary
-- linear, of components of two matrices is given by "matrix_zipWith"
-- function below, which accepts a function "f" describing such
-- combination. For the linear combination with two scalars
-- the function "f" could be defined as:
-- <pre>
-- f alpha beta a b = alpha*a + beta*b
-- </pre>
-- For a straightforward addition of two matrices this auxiliary
-- function is simply "(+)".
-- <pre>
matrix_zipWith :: (a -> b -> c) -> [[a]] -> [[b]] -> [[c]]
matrix_zipWith f a b =
--
-- Matrix made of a combination
-- of matrices a and b - as specified by f
--
[zipWith f ak bk | (ak,bk) <- zip a b]
add_matrices :: (Num a) => t -> t1 -> [[a]] -> [[a]] -> [[a]]
add_matrices _ _ = matrix_zipWith (+)
-- </pre>
-- <p>
-- <hr>
-- <p>
-- <b>
-- Products involving matrices
-- </b>
-- <p>
-- Variety of products involving matrices can be defined.
-- Our Haskell implementation is based on lists of lists
-- and therefore is open to interpretation: sublists
-- can either represent the rows or the columns of a matrix.
-- <dd>
-- The following definitions are somehow arbitrary, since
-- one can choose alternative interpretations of lists
-- representing matrices.
-- <p>
-- <b>
-- C = A B
-- </b>
-- <p>
-- Inner product of two matrices A B can be expressed quite simply,
-- providing that matrix A is represented by a list of rows
-- and B - by a list of columns. Function "matrix_matrix"
-- answers list of rows, while "matrix_matrix'" - list
-- of columns.
-- <dd>
-- Major algorithms of this module make use of "triangle_matrix'",
-- which calculates a product of upper triangular matrix
-- with square matrix and returns a rectangular list of columns.
-- <pre>
matrix_matrix :: Num a => [[a]] -> [[a]] -> [[a]]
matrix_matrix a b
--
-- A matrix being an inner product
-- of matrices A and B, where
-- A is represented by a list of rows a
-- B is represented by a list of columns b
-- result is represented by list of rows
-- Require: length of a is equal of length of b
-- Require: all sublists are of equal length
| null a = []
| otherwise = ([sum_product (head a) bi | bi <- b])
: matrix_matrix (tail a) b
matrix_matrix' :: (Num a) => [[a]] -> [[a]] -> [[a]]
matrix_matrix' a b
--
-- Similar to "matrix_matrix"
-- but the result is represented by
-- a list of columns
--
| null b = []
| otherwise = ([sum_product ai (head b) | ai <- a])
: matrix_matrix' a (tail b)
triangle_matrix' :: Num a => [[a]] -> [[a]] -> [[a]]
triangle_matrix' r q =
--
-- List of columns of of a product of
-- upper triangular matrix R and square
-- matrix Q
-- where
-- r is a list of rows of R
-- q is a list of columns of A
--
[f r qk | qk <- q]
where
f t u
| null t = []
| otherwise = (sum_product (head t) u)
: (f (tail t) (tail u))
-- </pre>
-- <b>
-- | u > = A | v >
-- </b>
-- <p>
-- Product of a matrix and a ket-vector is another
-- ket vector. The following implementation assumes
-- that list "a" represents rows of matrix A.
-- <pre>
matrix_ket :: Num a => [[a]] -> [a] -> [a]
matrix_ket a v = [sum_product ai v| ai <- a]
-- </pre>
-- <b>
-- < u | = < v | A
-- </b>
-- <p>
-- Bra-vector multiplied by a matrix produces
-- another bra-vector. The implementation below
-- assumes that list "a" represents columns
-- of matrix A. It is also assumed that vector
-- "v" is given in its standard "ket" representation,
-- therefore the definition below uses "bra_ket"
-- instead of "sum_product".
-- <pre>
bra_matrix :: (Scalar a, Num a) => [a] -> [[a]] -> [a]
bra_matrix v a = [bra_ket v ai | ai <- a]
-- </pre>
-- <b>
-- alpha = < u | A | v >
-- </b>
-- <p>
-- This kind of product results in a scalar and is often
-- used to define elements of a new matrix, such as
-- <pre>
-- B[i,j] = < ei | A | ej >
-- </pre>
-- The implementation below assumes that list "a" represents
-- rows of matrix A.
-- <pre>
bra_matrix_ket :: (Scalar a, Num a) => [a] -> [[a]] -> [a] -> a
bra_matrix_ket u a v =
bra_ket u (matrix_ket a v)
-- </pre>
-- <b>
-- B = alpha A
-- </b>
-- <p>
-- Below is a function which multiplies matrix by a scalar:
-- <pre>
scalar_matrix :: Num a => a -> [[a]] -> [[a]]
scalar_matrix alpha a =
[[alpha*aij| aij <- ai] | ai<-a]
-- </pre>
-- <p>
-- <hr>
-- <p>
-- <b>
-- Orthogonalization process
-- </b>
-- <p>
-- Gram-Schmidt orthogonalization procedure is used here
-- for calculation of sets of mutually orthogonal vectors.
-- <dd>
-- Function "orthogonals" computes a set of mutually orthogonal
-- vectors - all orthogonal to a given vector. Such set plus
-- the input vector form a basis of the vector space. Another
-- words, they are the base vectors, although we cannot call them
-- unit vectors since we do not normalize them for two reasons:
-- <ul>
-- <li>
-- None of the algorithms presented here needs this -- quite
-- costly -- normalization.
-- <li>
-- Some algorithms can be used either with doubles or with
-- rationals. The neat output of the latter is sometimes desirable
-- for pedagogical or accuracy reasons. But normalization requires "sqrt"
-- function, which is not defined for rational numbers. We could
-- use our module Fraction instead, where "sqrt" is defined,
-- but we'll leave it for a future revision of this module.
-- </ul>
-- <p>
-- Function "gram_schmidt" computes one vector - orthogonal
-- to an incomplete set of orthogonal vectors, which form a hyperplane
-- in the vector space. Another words, "gram_schmidt" vector is
-- perpendicular to such a hyperlane.
-- <pre>
orthogonals :: (Scalar a, Fractional a) => [a] -> [[a]]
orthogonals x =
--
-- List of (n-1) linearly independent vectors,
-- (mutually orthogonal) and orthogonal to the
-- vector x, but not normalized,
-- where
-- n is a length of x.
--
orth [x] size (next (-1))
where
orth a n m
| n == 1 = drop 1 (reverse a)
| otherwise = orth ((gram_schmidt a u ):a) (n-1) (next m)
where
u = unit_vector m size
size = length x
next i = if (i+1) == k then (i+2) else (i+1)
k = length (takeWhile (== 0) x) -- first non-zero component of x
gram_schmidt :: (Scalar a, Fractional a) => [[a]] -> [a] -> [a]
gram_schmidt a u =
--
-- Projection of vector | u > on some direction
-- orthogonal to the hyperplane spanned by the list 'a'
-- of mutually orthogonal (linearly independent)
-- vectors.
--
gram_schmidt' a u u
where
gram_schmidt' [] _ w = w
gram_schmidt' (b:bs) v w
| all (== 0) b = gram_schmidt' bs v w
| otherwise = gram_schmidt' bs v w'
where
w' = vectorCombination w (-(bra_ket b v)/(bra_ket b b)) b
vectorCombination x c y
| null x = []
| null y = []
| otherwise = (head x + c * (head y))
: (vectorCombination (tail x) c (tail y))
-- </pre>
-- <p>
-- <hr>
-- <p>
-- <b>
-- Solutions of linear equations by orthogonalization
-- </b>
-- <p>
-- A matrix equation for unknown vector | x >
-- <pre>
-- A | x > = | b >
-- </pre>
-- can be rewritten as
-- <pre>
-- x1 | 1 > + x2 | 2 > + x3 | 3 > + ... + xn | n > = | b > (7.1)
-- where
-- | 1 >, | 2 >... represent columns of the matrix A
-- </pre>
-- For any n-dimensional vector, such as "1", there exist
-- n-1 linearly independent vectors "ck" that are orthogonal to "1";
-- that is, each satisfies the relation:
-- <pre>
-- < ck | 1 > = 0, for k = 1...m, where m = n - 1
-- </pre>
-- If we could find all such vectors, then we could multiply
-- the equation (7.1) by each of them, and end up with m = n-1
-- following equations
-- <pre>
-- < c1 | 2 > x2 + < c1 | 3 > x3 + ... < c1 | n > xn = < c1 | b >
-- < c2 | 2 > x2 + < c2 | 3 > x3 + ... < c2 | n > xn = < c2 | b >
-- .......
-- < cm | 2 > x2 + < cm | 3 > x3 + ... < cm | n > xn = < cm | b >
-- </pre>
-- But the above is nothing more than a new matrix equation
-- <pre>
-- A' | x' > = | b' >
-- or
-- x2 | 2'> + x3 | 3'> .... + xn | n'> = | b'>
-- where
-- primed vectors | 2' >, etc. are the columns of the new
-- matrix A'.
-- </pre>
-- with the problem dimension reduced by one.
-- <dd>
-- Taking as an example a four-dimensional problem and writing
-- down the successive transformations of the original equation
-- we will end up with the following triangular pattern made of
-- four vector equations:
-- <pre>
-- x1 | 1 > + x2 | 2 > + x3 | 3 > + x4 | 4 > = | b >
-- x2 | 2'> + x3 | 3'> + x4 | 4'> = | b'>
-- x3 | 3''> + x4 | 4''> = | b''>
-- x4 | 4'''> = | b'''>
-- </pre>
-- But if we premultiply each vector equation by a non-zero vector
-- of our choice, say < 1 | , < 2' |, < 3'' |, and < 4''' | - chosen
-- correspondingly for equations 1, 2, 3 and 4, then the above
-- system of vector equations will be converted to much simpler
-- system of scalar equations. The result is
-- shown below in matrix representation:
-- <pre>
-- | p11 p12 p13 p14 | | x1 | = | q1 |
-- | 0 p22 p23 p24 | | x2 | = | q2 |
-- | 0 0 p33 p34 | | x3 | = | q3 |
-- | 0 0 0 p44 | | x4 | = | q4 |
-- </pre>
-- In effect, we have triangularized our original matrix A.
-- Below is a function that does that for any problem size:
-- <pre>
one_ket_triangle :: (Scalar a, Fractional a) => [[a]] -> [a] -> [([a],a)]
one_ket_triangle a b
--
-- List of pairs: (p, q) representing
-- rows of triangular matrix P and of vector | q >
-- in the equation P | x > = | q >, which
-- has been obtained by linear transformation
-- of the original equation A | x > = | b >
--
| null a = []
| otherwise = (p,q):(one_ket_triangle a' b')
where
p = [bra_ket u ak | ak <- a]
q = bra_ket u b
a' = [[bra_ket ck ai | ck <- orth] | ai <- v]
b' = [ bra_ket ck b | ck <- orth]
orth = orthogonals u
u = head a
v = tail a
-- </pre>
-- The triangular system of equations can be easily solved by
-- successive substitutions - starting with the last equation.
-- <pre>
one_ket_solution :: (Scalar a, Fractional a) => [[a]] -> [a] -> [a]
one_ket_solution a b =
--
-- List representing vector |x>, which is
-- a solution of the matrix equation
-- A |x> = |b>
-- where
-- a is a list of columns of matrix A
-- b is a list representing vector |b>
--
solve' (unzip (reverse (one_ket_triangle a b))) []
where
solve' (d, c) xs
| null d = xs
| otherwise = solve' ((tail d), (tail c)) (x:xs)
where
x = (head c - (sum_product (tail u) xs))/(head u)
u = head d
-- </pre>
-- The triangularization procedure can be easily extended
-- to a list of several ket-vectors | b > on the right hand
-- side of the original equation A | x > = | b > -- instead
-- of just one:
-- <pre>
many_kets_triangle :: (Scalar a, Fractional a) => [[a]] -> [[a]] -> [([a],[a])]
many_kets_triangle a b
--
-- List of pairs: (p, q) representing
-- rows of triangular matrix P and of rectangular matrix Q
-- in the equation P X = Q, which
-- has been obtained by linear transformation
-- of the original equation A X = B
-- where
-- a is a list of columns of matrix A
-- b is a list of columns of matrix B
--
| null a = []
| otherwise = (p,q):(many_kets_triangle a' b')
where
p = [bra_ket u ak | ak <- a]
q = [bra_ket u bk | bk <- b]
a' = [[bra_ket ck ai | ck <- orth] | ai <- v]
b' = [[bra_ket ck bi | ck <- orth] | bi <- b]
orth = orthogonals u
u = head a
v = tail a
-- </pre>
-- Similarly, function 'one_ket_solution' can be generalized
-- to function 'many_kets_solution' that handles cases with
-- several ket-vectors on the right hand side.
-- <pre>
many_kets_solution :: (Scalar a, Fractional a) => [[a]] -> [[a]] -> [[a]]
many_kets_solution a b =
--
-- List of columns of matrix X, which is
-- a solution of the matrix equation
-- A X = B
-- where
-- a is a list of columns of matrix A
-- b is a list of columns of matrix B
--
solve' p q emptyLists
where
(p, q) = unzip (reverse (many_kets_triangle a b))
emptyLists = [[] | _ <- [1..(length (head q))]]
solve' a' b' x
| null a' = x
| otherwise = solve' (tail a') (tail b')
[(f vk xk):xk | (xk, vk) <- (zip x v)]
where
f vk xk = (vk - (sum_product (tail u) xk))/(head u)
u = head a'
v = head b'
-- </pre>
-- <p>
-- <hr>
-- <p>
-- <b>
-- Matrix inversion
-- </b>
-- <p>
-- Function 'many_kets_solution' can be used to compute
-- inverse of matrix A by specializing matrix B to a unit
-- matrix I:
-- <pre>
-- A X = I
-- </pre>
-- It follows that matrix X is an inverse of A; that is X = A<sup>-1</sup>.
-- <pre>
inverse :: (Scalar a, Fractional a) => [[a]] -> [[a]]
inverse a = many_kets_solution a (unit_matrix (length a))
--
-- List of columns of inverse of matrix A
-- where
-- a is list of columns of A
-- </pre>
-- <p>
-- <hr>
-- <p>
-- <b>
-- QR factorization of matrices
-- </b>
-- <p>
-- The process described above and implemented by
-- 'many_kets_triangle' function transforms the equation
-- <pre>
-- A X = B
-- </pre>
-- into another equation for the same matrix X
-- <pre>
-- R X = S
-- </pre>
-- where R is an upper triangular matrix. All operations
-- performed on matrices A and B during this process are linear,
-- and therefore we should be able to find a square matrix Q
-- that describes the entire process in one step. Indeed, assuming
-- that matrix A can be decomposed as a product of unknown matrix Q
-- and triangular matrix R and that Q<sup>-1</sup> is an inverse of matrix Q
-- we can reach the last equation by following these steps:
-- <pre>
-- A X = B
-- (Q R) X = B
-- Q<sup>-1</sup> Q R X = Q<sup>-1</sup> B
-- R X = S
-- </pre>
-- It follows that during this process a given matrix B
-- transforms to matrix S, as delivered by 'many_kets_triangle':
-- <pre>
-- S = Q<sup>-1</sup> B
-- </pre>
-- from which the inverse of Q can be found:
-- <pre>
-- Q<sup>-1</sup> = S B<sup>-1</sup>
-- </pre>
-- Having a freedom of choice of the right hand side matrix B
-- we can choose the unit matrix I in place of B, and therefore
-- simplify the definition of Q<sup>-1</sup>:
-- <pre>
-- Q<sup>-1</sup> = S, if B is unit matrix
-- </pre>
-- It follows that any non-singular matrix A can be decomposed
-- as a product of a matrix Q and a triangular matrix R
-- <pre>
-- A = Q R
-- </pre>
-- where matrices Q<sup>-1</sup> and R are delivered by "many_kets_triangle"
-- as a result of triangularization process of equation:
-- <pre>
-- A X = I
-- </pre>
-- The function below extracts a pair of matrices Q and R
-- from the answer provided by "many_kets_triangle".
-- During this process it inverts matrix Q<sup>-1</sup> to Q.
-- This factorization will be used by a sequence of similarity
-- transformations to be defined in the next section.
-- <pre>
factors_QR :: (Scalar a, Fractional a) => [[a]] -> ([[a]],[[a]])
factors_QR a =
--
-- A pair of matrices (Q, R), such that
-- A = Q R
-- where
-- R is upper triangular matrix in row representation
-- (without redundant zeros)
-- Q is a transformation matrix in column representation
-- A is square matrix given as columns
--
(inverse (transposed q1),r)
where
(r, q1) = unzip (many_kets_triangle a (unit_matrix (length a)))
-- </pre>
-- <p>
-- <hr>
-- <p>
-- <b>
-- Computation of the determinant
-- </b>
-- <!-- added by Henning Thielemann -->
-- <pre>
determinant :: (Scalar a, Fractional a) => [[a]] -> a
determinant a =
let (q,r) = factors_QR a
-- matrix Q is not normed so we have to respect the norms of its rows
in product (map norm q) * product (map head r)
-- </pre>
-- Naive division-free computation of the determinant by expanding the first column.
-- It consumes n! multiplications.
-- <pre>
determinantNaive :: (Num a) => [[a]] -> a
determinantNaive [] = 1
determinantNaive m =
sum (alternate
(zipWith (*) (map head m)
(map determinantNaive (removeEach (map tail m)))))
-- </pre>
-- Compute the determinant with about n^4 multiplications
-- without division according to the clow decomposition algorithm
-- of Mahajan and Vinay, and Berkowitz
-- as presented by Günter Rote:
-- <a href="http://page.inf.fu-berlin.de/~rote/Papers/pdf/Division-free+algorithms.pdf">
-- Division-Free Algorithms for the Determinant and the Pfaffian:
-- Algebraic and Combinatorial Approaches</a>.
-- <pre>
determinantClow :: (Num a) => [[a]] -> a
determinantClow [] = 1
determinantClow m =
let lm = length m
in parityFlip lm (last (newClow m
(nest (lm-1) (longerClow m)
(take lm (iterate (0:) [1])))))
-- </pre>
-- Compute the weights of all clow sequences
-- where the last clow is closed and a new one is started.
-- <pre>
newClow :: (Num a) => [[a]] -> [[a]] -> [a]
newClow a c =
scanl (-) 0
(sumVec (zipWith (zipWith (*)) (List.transpose a) c))
-- </pre>
-- Compute the weights of all clow sequences
-- where the last (open) clow is extended by a new arc.
-- <pre>
extendClow :: (Num a) => [[a]] -> [[a]] -> [[a]]
extendClow a c =
map (\ai -> sumVec (zipWith scaleVec ai c)) a
-- </pre>
-- Given the matrix of all weights of clows of length l
-- compute the weight matrix for all clows of length (l+1).
-- Take the result of 'newClow' as diagonal
-- and the result of 'extendClow' as lower triangle
-- of the weight matrix.
-- <pre>
longerClow :: (Num a) => [[a]] -> [[a]] -> [[a]]
longerClow a c =
let diagonal = newClow a c
triangle = extendClow a c
in zipWith3 (\i t d -> take i t ++ [d]) [0 ..] triangle diagonal
-- </pre>
-- Auxiliary functions for the clow determinant.
-- <pre>
{- | Compositional power of a function,
i.e. apply the function n times to a value. -}
nest :: Int -> (a -> a) -> a -> a
nest 0 _ x = x
nest n f x = f (nest (n-1) f x)
{- successively select elements from xs and remove one in each result list -}
removeEach :: [a] -> [[a]]
removeEach xs =
zipWith (++) (List.inits xs) (tail (List.tails xs))
alternate :: (Num a) => [a] -> [a]
alternate = zipWith id (cycle [id, negate])
parityFlip :: Num a => Int -> a -> a
parityFlip n x = if even n then x else -x
{-| Weight a list of numbers by a scalar. -}
scaleVec :: (Num a) => a -> [a] -> [a]
scaleVec k = map (k*)
{-| Add corresponding numbers of two lists. -}
{- don't use zipWith because it clips to the shorter list -}
addVec :: (Num a) => [a] -> [a] -> [a]
addVec x [] = x
addVec [] y = y
addVec (x:xs) (y:ys) = x+y : addVec xs ys
{-| Add some lists. -}
sumVec :: (Num a) => [[a]] -> [a]
sumVec = foldl addVec []
-- </pre>
-- <p>
-- <hr>
-- <p>
-- <b>
-- Similarity transformations and eigenvalues
-- </b>
-- <p>
-- Two n-square matrices A and B are called similar if there
-- exists a non-singular matrix S such that:
-- <pre>
-- B = S<sup>-1</sup> A S
-- </pre>
-- It can be proven that:
-- <ul>
-- <li>
-- Any two similar matrices have the same eigenvalues
-- <li>
-- Every n-square matrix A is similar to a triangular matrix
-- whose diagonal elements are the eigenvalues of A.
-- </ul>
-- <p>
-- If matrix A can be transformed to a triangular or a diagonal
-- matrix Ak by a sequence of similarity transformations then
-- the eigenvalues of matrix A are the diagonal elements of Ak.
-- <p>
-- Let's construct the sequence of matrices similar to A
-- <pre>
-- A, A1, A2, A3...
-- </pre>
-- by the following iterations - each of which factorizes a matrix
-- by applying the function 'factors_QR' and then forms a product
-- of the factors taken in the reverse order:
-- <pre>
-- A = Q R = Q (R Q) Q<sup>-1</sup> = Q A1 Q<sup>-1</sup> =
-- = Q (Q1 R1) Q<sup>-1</sup> = Q Q1 (R1 Q1) Q1<sup>-1</sup> Q<sup>-1</sup> = Q Q1 A2 Q1<sup>-1</sup> Q<sup>-1</sup> =
-- = Q Q1 (Q2 R2) Q1<sup>-1</sup> Q<sup>-1</sup> = ...
-- </pre>
-- We are hoping that after some number of iterations some matrix
-- Ak would become triangular and therefore its diagonal
-- elements could serve as eigenvalues of matrix A. As long as
-- a matrix has real eigenvalues only, this method should work well.
-- This applies to symmetric and hermitian matrices. It appears
-- that general complex matrices -- hermitian or not -- can also
-- be handled this way. Even more, this method also works for some
-- nonsymmetric real matrices, which have real eigenvalues only.
-- <dd>
-- The only type of matrices that cannot be treated by this algorithm
-- are real nonsymmetric matrices, whose some eigenvalues are complex.
-- There is no operation in the process that converts real elements
-- to complex ones, which could find their way into diagonal
-- positions of a triangular matrix. But a simple preconditioning
-- of a matrix -- described in the next section -- replaces
-- a real matrix by a complex one, whose eigenvalues are related
-- to the eigenvalues of the matrix being replaced. And this allows
-- us to apply the same method all across the board.
-- <dd>
-- It is worth noting that a process known in literature as QR
-- factorization is not uniquely defined and different algorithms
-- are employed for this. The algorithms using QR factorization
-- apply only to symmetric or hermitian matrices, and Q matrix
-- must be either orthogonal or unitary.
-- <dd>
-- But our transformation matrix Q is not orthogonal nor unitary,
-- although its first row is orthogonal to all other rows. In fact,
-- this factorization is only similar to QR factorization. We just
-- keep the same name to help identify a category of the methods
-- to which it belongs.
-- <dd>
-- The same factorization is used for tackling two major problems:
-- solving the systems of linear equations and finding the eigenvalues
-- of matrices.
-- <dd>
-- Below is the function 'similar_to', which makes a new matrix that is
-- similar to a given matrix by applying our similarity transformation.
-- <dd>
-- Function 'iterated_eigenvalues' applies this transformation n
-- times - storing diagonals of each new matrix as approximations of
-- eigenvalues.
-- <dd>
-- Function 'eigenvalues' follows the same process but reports the last
-- approximation only.
-- <pre>
similar_to :: (Scalar a, Fractional a) => [[a]] -> [[a]]
similar_to a =
--
-- List of columns of matrix A1 similar to A
-- obtained by factoring A as Q R and then
-- forming the product A1 = R Q = (inverse Q) A Q
-- where
-- a is list of columns of A
--
triangle_matrix' r q
where
(q,r) = factors_QR a
iterated_eigenvalues :: (Scalar a1, Fractional a1, Eq a, Num a) => [[a1]] -> a -> [[a1]]
iterated_eigenvalues a n
--
-- List of vectors representing
-- successive approximations of
-- eigenvalues of matrix A
-- where
-- a is a list of columns of A
-- n is a number of requested iterations
--
| n == 0 = []
| otherwise = (diagonals a)
: iterated_eigenvalues (similar_to a) (n-1)
eigenvalues :: (Scalar a1, Fractional a1, Eq a, Num a) => [[a1]] -> a -> [a1]
eigenvalues a n
--
-- Eigenvalues of matrix A
-- obtained by n similarity iterations
-- where
-- a are the columns of A
--
| n == 0 = diagonals a
| otherwise = eigenvalues (similar_to a) (n-1)
-- </pre>
-- <p>
-- <hr>
-- <p>
-- <b>
-- Preconditioning of real nonsymmetric matrices
-- </b>
-- <p>
-- As mentioned above, our QR-like factorization method works
-- well with almost all kind of matrices, but with the exception
-- of a class of real nonsymmetric matrices that have
-- complex eigenvalues.
-- <dd>
-- There is no mechanism in that method that would be able to
-- produce complex eigenvalues out of the real components of
-- this type of nonsymmetric matrices. Simple trivial replacement
-- of real components of a matrix by its complex counterparts
-- does not work because zero-valued imaginary components do
-- not contribute in any way to production of nontrivial
-- imaginary components during the factorization process.
-- <dd>
-- What we need is a trick that replaces real nonsymmetric matrix
-- by a nontrivial complex matrix in such a way that the results
-- of such replacements could be undone when the series of
-- similarity transformations finally produced the expected
-- effect in a form of a triangular matrix.
-- <dd>
-- The practical solution is surprisingly simple:
-- it's suffice to add any complex number, such as "i", to the
-- main diagonal of a matrix, and when triangularization is done
-- -- subtract it back from computed eigenvalues.
-- The explanation follows.
-- <p>
-- Consider the eigenproblem for real and nonsymmetric matrix A.
-- <pre>
-- A | x > = a | x >
-- </pre>
-- Let us now define a new complex matrix B, such that:
-- <pre>
-- B = A + alpha I
-- where
-- I is a unit matrix and alpha is a complex scalar
-- </pre>
-- It is obvious that matrices A and B commute; that is:
-- <pre>
-- A B = B A
-- </pre>
-- It can be proven that if two matrices commute then they
-- have the same eigenvectors. Therefore we can use vector
-- | x > of matrix A as an eigenvector of B:
-- <pre>
-- B | x > = b | x >
-- B | x > = A | x > + alpha I | x >
-- = a | x > + alpha | x >
-- = (a + alpha) | x >
-- </pre>
-- It follows that eigenvalues of B are related to the eigenvalues
-- of A by:
-- <pre>
-- b = a + alpha
-- </pre>
-- After eigenvalues of complex matrix B have been succesfully
-- computed, all what remains is to subtract "alpha" from them
-- all to obtain eigenvalues of A. And nothing has to be done
-- to eigenvectors of B - they are the same for A as well.
-- Simple and elegant!
-- <p>
-- Below is an auxiliary function that adds a scalar to the
-- diagonal of a matrix:
-- <pre>
add_to_diagonal :: Num a => a -> [[a]] -> [[a]]
add_to_diagonal alpha a =
--
-- Add constant alpha to diagonal of matrix A
--
[f ai ni | (ai,ni) <- zip a [0..(length a -1)]]
where
f b k = p++[head q + alpha]++(tail q)
where
(p,q) = splitAt k b
-- </pre>
-- <p>
-- <hr>
-- <p>
-- <b>
-- Examples of iterated eigenvalues
-- </b>
-- <p>
-- Here is an example of a symmetric real matrix with results
-- of application of function 'iterated_eigenvalues'.
-- <pre>
-- | 7 -2 1 |
-- |-2 10 -2 |
-- | 1 -2 7 |
-- [[7.0, 10.0, 7.0],
-- [8.66667, 9.05752, 6.27582],
-- [10.7928, 7.11006, 6.09718],
-- [11.5513, 6.40499, 6.04367],
-- [11.7889, 6.18968, 6.02142],
-- [11.8943, 6.09506, 6.01068],
-- [11.9468, 6.04788, 6.00534],
-- [11.9733, 6.02405, 6.00267],
-- [11.9866, 6.01206, 6.00134],
-- [11.9933, 6.00604, 6.00067],
-- [11.9966, 6.00302, 6.00034],
-- [11.9983, 6.00151, 6.00017],
-- [11.9992, 6.00076, 6.00008],
-- [11.9996, 6.00038, 6.00004],
-- [11.9998, 6.00019, 6.00002],
-- [11.9999, 6.00010, 6.00001],
-- [11.9999, 6.00005, 6.00001]]
-- The true eigenvalues are:
-- 12, 6, 6
-- </pre>
-- Here is an example of a hermitian matrix. (Eigenvalues of hermitian
-- matrices are real.) The algorithm works well and converges fast.
-- <pre>
-- | 2 0 i|
-- [ 0 1 0 |
-- [ -i 0 2 |
-- [[2.8 :+ 0.0, 1.0 :+ 0.0, 1.2 :+ 0.0],
-- [2.93979 :+ 0.0, 1.0 :+ 0.0, 1.06021 :+ 0.0],
-- [2.97972 :+ 0.0, 1.0 :+ 0.0, 1.02028 :+ 0.0],
-- [2.9932 :+ 0.0, 1.0 :+ 0.0, 1.0068 :+ 0.0],
-- [2.99773 :+ 0.0, 1.0 :+ 0.0, 1.00227 :+ 0.0],
-- [2.99924 :+ 0.0, 1.0 :+ 0.0, 1.00076 :+ 0.0],
-- [2.99975 :+ 0.0, 1.0 :+ 0.0, 1.00025 :+ 0.0],
-- [2.99992 :+ 0.0, 1.0 :+ 0.0, 1.00008 :+ 0.0],
-- [2.99997 :+ 0.0, 1.0 :+ 0.0, 1.00003 :+ 0.0],
-- [2.99999 :+ 0.0, 1.0 :+ 0.0, 1.00001 :+ 0.0],
-- [3.0 :+ 0.0, 1.0 :+ 0.0, 1.0 :+ 0.0],
-- [3.0 :+ 0.0, 1.0 :+ 0.0, 1.0 :+ 0.0],
-- [3.0 :+ 0.0, 1.0 :+ 0.0, 1.0 :+ 0.0]]
-- </pre>
-- Here is another example: this is a complex matrix and it is not
-- even hermitian. Yet, the algorithm still works, although its
-- fluctuates around true values.
-- <pre>
-- | 2-i 0 i |
-- | 0 1+i 0 |
-- | i 0 2-i |
-- [[2.0 :+ (-1.33333), 1.0 :+ 1.0, 2.0 :+ (-0.666667)],
-- [1.89245 :+ (-1.57849), 1.0 :+ 1.0, 2.10755 :+ (-0.421509)],
-- [1.81892 :+ (-1.80271), 1.0 :+ 1.0, 2.18108 :+ (-0.197289)],
-- [1.84565 :+ (-1.99036), 1.0 :+ 1.0, 2.15435 :+ (-0.00964242)],
-- [1.93958 :+ (-2.07773), 1.0 :+ 1.0, 2.06042 :+ 0.0777281],
-- [2.0173 :+ (-2.06818), 1.0 :+ 1.0, 1.9827 :+ 0.0681793],
-- [2.04357 :+ (-2.02437), 1.0 :+ 1.0, 1.95643 :+ 0.0243654],
-- [2.03375 :+ (-1.99072), 1.0 :+ 1.0, 1.96625 :+ (-0.00928429)],
-- [2.01245 :+ (-1.97875), 1.0 :+ 1.0, 1.98755 :+ (-0.0212528)],
-- [1.99575 :+ (-1.98307), 1.0 :+ 1.0, 2.00425 :+ (-0.0169263)],
-- [1.98938 :+ (-1.99359), 1.0 :+ 1.0, 2.01062 :+ (-0.00640583)],
-- [1.99145 :+ (-2.00213), 1.0 :+ 1.0, 2.00855 :+ 0.00212504],
-- [1.9968 :+ (-2.00535), 1.0 :+ 1.0, 2.0032 :+ 0.00535265],
-- [2.00108 :+ (-2.00427), 1.0 :+ 1.0, 1.99892 :+ 0.0042723],
-- [2.00268 :+ (-2.00159), 1.0 :+ 1.0, 1.99732 :+ 0.00158978],
-- [2.00213 :+ (-1.99946), 1.0 :+ 1.0, 1.99787 :+ (-0.000541867)],
-- [2.00079 :+ (-1.99866), 1.0 :+ 1.0, 1.9992 :+ (-0.00133514)],
-- [1.99973 :+ (-1.99893), 1.0 :+ 1.0, 2.00027 :+ (-0.00106525)],
-- [1.99933 :+ (-1.9996) , 1.0 :+ 1.0, 2.00067 :+ (-0.000397997)],
-- [1.99947 :+ (-2.00013), 1.0 :+ 1.0, 2.00053 :+ 0.000134972]]
-- The true eigenvalues are
-- 2 - 2i, 1 + i, 2
-- </pre>
-- Some nonsymmetric real matrices have all real eigenvalues and
-- our algorithm still works for such cases. Here is one
-- such an example, which traditionally would have to be treated
-- by one of the Lanczos-like algorithms, specifically designed
-- for nonsymmetric real matrices. Evaluation of
-- <br>
-- <i>
-- iterated_eigenvalues [[2,1,1],[-2,1,3],[3,1,-1::Double]] 20
-- </i>
-- <br>
-- gives the following results
-- <pre>
-- [[3.0, -0.70818,-0.291815],
-- [3.06743, -3.41538, 2.34795],
-- [3.02238, -1.60013, 0.577753],
-- [3.00746, -2.25793, 1.25047],
-- [3.00248, -1.88764, 0.885154],
-- [3.00083, -2.06025, 1.05943],
-- [3.00028, -1.97098, 0.970702],
-- [3.00009, -2.0148, 1.01471],
-- [3.00003, -1.99268, 0.992648],
-- [3.00001, -2.00368, 1.00367],
-- [3.0, -1.99817, 0.998161],
-- [3.0, -2.00092, 1.00092],
-- [3.0, -1.99954, 0.99954],
-- [3.0, -2.00023, 1.00023],
-- [3.0, -1.99989, 0.999885],
-- [3.0, -2.00006, 1.00006],
-- [3.0, -1.99997, 0.999971],
-- [3.0, -2.00001, 1.00001],
-- [3.0, -1.99999, 0.999993],
-- [3.0, -2.0, 1.0]]
-- The true eigenvalues are:
-- 3, -2, 1
-- </pre>
-- Finally, here is a case of a nonsymmetric real matrix with
-- complex eigenvalues:
-- <pre>
-- | 2 -3 |
-- | 1 0 |
-- </pre>
-- The direct application of "iterated_eigenvalues" would
-- fail to produce expected eigenvalues:
-- <pre>
-- 1 + i sqrt(2) and 1 - i sqrt (2)
-- </pre>
-- But if we first precondition the matrix by adding "i" to its diagonal:
-- <pre>
-- | 2+i -3|
-- | 1 i|
-- </pre>
-- and then compute its iterated eigenvalues:
-- <br>
-- <i>
-- iterated_eigenvalues [[2:+1,1],[-3,0:+1]] 20
-- </i>
-- <br>
-- then the method will succeed. Here are the results:
-- <pre>
-- [[1.0 :+ 1.66667, 1.0 :+ 0.333333 ],
-- [0.600936 :+ 2.34977, 1.39906 :+ (-0.349766)],
-- [0.998528 :+ 2.59355, 1.00147 :+ (-0.593555)],
-- [1.06991 :+ 2.413, 0.93009 :+ (-0.412998)],
-- [1.00021 :+ 2.38554, 0.99979 :+ (-0.385543)],
-- [0.988004 :+ 2.41407, 1.012 :+ (-0.414074)],
-- [0.999963 :+ 2.41919, 1.00004 :+ (-0.419191)],
-- [1.00206 :+ 2.41423, 0.99794 :+ (-0.414227)],
-- [1.00001 :+ 2.41336, 0.99999 :+ (-0.413361)],
-- [0.999647 :+ 2.41421, 1.00035 :+ (-0.414211)],
-- [0.999999 :+ 2.41436, 1.0 :+ (-0.41436) ],
-- [1.00006 :+ 2.41421, 0.99993 :+ (-0.414214)],
-- [1.0 :+ 2.41419, 1.0 :+ (-0.414188)],
-- [0.99999 :+ 2.41421, 1.00001 :+ (-0.414213)],
-- [1.0 :+ 2.41422, 1.0 :+ (-0.414218)],
-- [1.0 :+ 2.41421, 0.99999 :+ (-0.414213)],
-- [1.0 :+ 2.41421, 1.0 :+ (-0.414212)],
-- [1.0 :+ 2.41421, 1.0 :+ (-0.414213)],
-- [1.0 :+ 2.41421, 1.0 :+ (-0.414213)],
-- [1.0 :+ 2.41421, 1.0 :+ (-0.414213)]]
-- </pre>
-- After subtracting "i" from the last result, we will get
-- what is expected.
-- <p>
-- <hr>
-- <p>
-- <b>
-- Eigenvectors for distinct eigenvalues
-- </b>
-- <p>
-- Assuming that eigenvalues of matrix A are already found
-- we may now attempt to find the corresponding aigenvectors
-- by solving the following homogeneous equation
-- <pre>
-- (A - a I) | x > = 0
-- </pre>
-- for each eigenvalue "a". The matrix
-- <pre>
-- B = A - a I
-- </pre>
-- is by definition singular, but in most cases it can be
-- triangularized by the familiar "factors_QR" procedure.
-- <pre>
-- B | x > = Q R | x > = 0
-- </pre>
-- It follows that the unknown eigenvector | x > is one of
-- the solutions of the homogeneous equation:
-- <pre>
-- R | x > = 0
-- </pre>
-- where R is a singular, upper triangular matrix with at least one
-- zero on its diagonal.
-- <dd>
-- If | x > is a solution we seek, so is its scaled version
-- alpha | x >. Therefore we have some freedom of scaling choice.
-- Since this is a homogeneous equation, one of the components
-- of | x > can be freely chosen, while the remaining components
-- will depend on that choice.
-- </pre>
-- To solve the above, we will be working from the bottom up of
-- the matrix equation, as illustrated in the example below:
-- <pre>
-- | 0 1 1 3 | | x1 |
-- | 0 1 1 2 | | x2 | /\
-- | 0 0 2 4 | | x3 | = 0 ||
-- | 0 0 0 0 | | x4 | ||
-- </pre>
-- Recall that the diagonal elements of any triangular matrix
-- are its eigenvalues.
-- Our example matrix has three distinct eigenvalues:
-- 0, 1, 2. The eigenvalue 0 has degree of degeneration two.
-- Presence of degenerated eigenvalues complicates
-- the solution process. The complication arises when we have to
-- make our decision about how to solve the trivial scalar equations
-- with zero coefficients, such as
-- <pre>
-- 0 * x4 = 0
-- </pre>
-- resulting from multiplication of the bottom row by vector | x >.
-- Here we have two choices: "x4" could be set to 0, or to any
-- nonzero number 1, say. By always choosing the "0" option
-- we might end up with the all-zero trivial vector -- which is
-- obviously not what we want. Persistent choice of the "1" option,
-- might lead to a conflict between some of the equations, such as
-- the equations one and four in our example.
-- <p>
-- So the strategy is as follows.
-- <p>
-- If there is at least one zero on the diagonal, find the topmost
-- row with zero on the diagonal and choose for it the solution "1".
-- Diagonal zeros in other rows would force the solution "0".
-- If the diagonal element is not zero than simply solve
-- an arithmetic equation that arises from the substitutions of
-- previously computed components of the eigenvector. Since certain
-- inaccuracies acumulate during QR factorization, set to zero all
-- very small elements of matrix R.
-- <p>
-- By applying this strategy to our example we'll end up with the
-- eigenvector
-- <pre>
-- < x | = [1, 0, 0, 0]
-- </pre>
-- <p>
-- If the degree of degeneration of an eigenvalue of A is 1 then the
-- corresponding eigenvector is unique -- subject to scaling.
-- Otherwise an eigenvector found by this method is one of many
-- possible solutions, and any linear combination of such solutions
-- is also an eigenvector. This method is not able to find more than one
-- solution for degenerated eigenvalues. An alternative method, which
-- handles degenerated cases, will be described in the next section.
-- <p>
-- The function below calculates eigenvectors corresponding to
-- distinct selected eigenvalues of any square matrix A, provided
-- that the singular matrix B = A - a I can still be factorized as Q R,
-- where R is an upper triangular matrix.
-- <pre>
eigenkets :: (Scalar a, Fractional a) => [[a]] -> [a] -> [[a]]
eigenkets a u
--
-- List of eigenkets of a square matrix A
-- where
-- a is a list of columns of A
-- u is a list of eigenvalues of A
-- (This list does not need to be complete)
--
| null u = []
| not (null x') = x':(eigenkets a (tail u))
| otherwise = (eigenket_UT (reverse b) d []):(eigenkets a (tail u))
where
a' = add_to_diagonal (-(head u)) a
x' = unit_ket a' 0 (length a')
b = snd (factors_QR a')
d = discriminant [head bk | bk <- b] 1
discriminant v n
| null v = []
| otherwise = x : (discriminant (tail v) m)
where
(x, m)
| (head u) == 0 = (n, 0)
| otherwise = (n, n)
eigenket_UT c e xs
| null c = xs
| otherwise = eigenket_UT (tail c) (tail e) (x:xs)
where
x = solve_row (head c) (head e) xs
solve_row (v:vs) n x
| almostZero v = n
| otherwise = q/v
where
q
| null x = 0
| otherwise = -(sum_product vs x)
unit_ket b' m n
| null b' = []
| all (== 0) (head b') = unit_vector m n
| otherwise = unit_ket (tail b') (m+1) n
-- </pre>
-- <p>
-- <hr>
-- <p>
-- <b>
-- Eigenvectors for degenerated eigenvalues
-- </b>
-- <p>
-- Few facts:
-- <ul>
-- <li>
-- Eigenvectors of a general matrix A, which does not have any
-- special symmetry, are not generally orthogonal. However, they
-- are orthogonal, or can be made orthogonal, to another set of
-- vectors that are eigenvectors of adjoint matrix A<sup>+</sup>;
-- that is the matrix obtained by complex conjugation and transposition
-- of matrix A.
-- <li>
-- Eigenvectors corresponding to nondegenerated eigenvalues of
-- hermitian or symmetric matrix are orthogonal.
-- <li>
-- Eigenvectors corresponding to degenerated eigenvalues are - in
-- general - neither orthogonal among themselves, nor orthogonal
-- to the remaining eigenvectors corresponding to other
-- eigenvalues. But since any linear combination of such degenerated
-- eigenvectors is also an eigenvector, we can orthogonalize
-- them by Gram-Schmidt orthogonalization procedure.
-- </ul>
-- Many practical applications deal solely with hermitian
-- or symmetric matrices, and for such cases the orthogonalization
-- is not only possible, but also desired for variety of reasons.
-- <dd>
-- But the method presented in the previous section is not able
-- to find more than one eigenvector corresponding to a degenerated
-- eigenvalue. For example, the symmetric matrix
-- <pre>
-- | 7 -2 1 |
-- A = | -2 10 -2 |
-- | 1 -2 7 |
-- </pre>
-- has two distinct eigenvalues: 12 and 6 -- the latter
-- being degenerated with degree of two. Two corresponding
-- eigenvectors are:
-- <pre>
-- < x1 | = [1, -2, 1] -- for 12
-- < x2 | = [1, 1, 1] -- for 6
-- </pre>
-- It happens that those vectors are orthogonal, but this is
-- just an accidental result. However, we are missing a third
-- distinct eigenvector. To find it we need another method.
-- One possibility is presented below and the explanation
-- follows.
-- <pre>
eigenket' :: (Scalar a, Fractional a) => [[a]] -> a -> a -> [a] -> [a]
eigenket' a alpha eps x' =
--
-- Eigenket of matrix A corresponding to eigenvalue alpha
-- where
-- a is a list of columns of matrix A
-- eps is a trial inaccuracy factor
-- artificially introduced to cope
-- with singularities of A - alpha I.
-- One might try eps = 0, 0.00001, 0.001, etc.
-- x' is a trial eigenvector
--
scaled [xk' - dk | (xk', dk) <- zip x' d]
where
b = add_to_diagonal (-alpha*(1+eps)) a
d = one_ket_solution b y
y = matrix_ket (transposed b) x'
-- </pre>
-- Let us assume a trial vector | x' >, such that
-- <pre>
-- | x' > = | x > + | d >
-- where
-- | x > is an eigenvector we seek
-- | d > is an error of our estimation of | x >
-- </pre>
-- We first form a matrix B, such that:
-- <pre>
-- B = A - alpha I
-- </pre>
-- and multiply it by the trial vector | x' >, which
-- results in a vector | y >
-- <pre>
-- B | x' > = |y >
-- </pre>
-- On another hand:
-- <pre>
-- B | x' > = B | x > + B | d > = B | d >
-- because
-- B | x > = A | x > - alpha | x > = 0
-- </pre>
-- Comparing both equations we end up with:
-- <pre>
-- B | d > = | y >
-- </pre>
-- that is: with the system of linear equations for unknown error | d >.
-- Finally, we subtract error | d > from our trial vector | x' >
-- to obtain the true eigenvector | x >.
-- <p>
-- But there is some problem with this approach: matrix B is
-- by definition singular, and as such, it might be difficult
-- to handle. One of the two processes might fail, and their failures
-- relate to division by zero that might happen during either the
-- QR factorization, or the solution of the triangular system of equations.
-- <p>
-- But if we do not insist that matrix B should be exactly singular,
-- but almost singular:
-- <pre>
-- B = A - alpha (1 + eps) I
-- </pre>
-- then this method might succeed. However, the resulting eigenvector
-- will be the approximation only, and we would have to experiment
-- a bit with different values of "eps" to extrapolate the true
-- eigenvector.
-- <p>
-- The trial vector | x' > can be chosen randomly, although some
-- choices would still lead to singularity problems. Aside from
-- this, this method is quite versatile, because:
-- <ul>
-- <li>
-- Any random vector | x' > leads to the same eigenvector
-- for nondegenerated eigenvalues,
-- <li>
-- Different random vectors | x' >, chosen for degenerated
-- eigenvalues, produce -- in most cases -- distinct eigenvectors.
-- And this is what we want. If we need it, we can the always
-- orthogonalize those eigenvectors either internally (always
-- possible) or externally as well (possible only for hermitian
-- or symmetric matrices).
-- </ul>
-- It might be instructive to compute the eigenvectors for
-- the examples used in demonstration of computation of eigenvalues.
-- We'll leave to the reader, since this module is already too obese.
-- <p>
-- <hr>
-- <p>
-- <b>
-- Auxiliary functions
-- </b>
-- <p>
-- The functions below are used in the main algorithms of
-- this module. But they can be also used for testing. For example,
-- the easiest way to test the usage of resources is to use easily
-- definable unit matrices and unit vectors, as in:
-- <pre>
-- one_ket_solution (unit_matrix n::[[Double]])
-- (unit_vector 0 n::[Double])
-- where n = 20, etc.
unit_matrix :: Num a => Int -> [[a]]
unit_matrix m =
--
-- Unit square matrix of with dimensions m x m
--
[ [ if j==k then 1 else 0 | j <- [0 .. m-1] ] | k <- [0 .. m-1]]
unit_vector :: Num a => Int -> Int -> [a]
unit_vector i m =
--
-- Unit vector of length m
-- with 1 at position i, zero otherwise
map (\k -> if k==i then 1 else 0) [0 .. m-1]
diagonals :: [[a]] -> [a]
diagonals a =
--
-- Vector made of diagonal components
-- of square matrix a
--
diagonals' a 0
where
diagonals' b n
| null b = []
| otherwise =
(head $ drop n $ head b) : (diagonals' (tail b) (n+1))
-- </pre>
-- <pre>
-- -----------------------------------------------------------------------------
-- --
-- -- Copyright:
-- --
-- -- (C) 1998 Numeric Quest Inc., All rights reserved
-- --
-- -- Email:
-- --
-- -- jans@numeric-quest.com
-- --
-- -- License:
-- --
-- -- GNU General Public License, GPL
-- --
-- -----------------------------------------------------------------------------
-- </pre>
-- </ul>
-- </body>
-- <SCRIPT language="Javascript">
-- <!--
-- // FILE ARCHIVED ON 20010628005806 AND RETRIEVED FROM THE
-- // INTERNET ARCHIVE ON 20030626101500.
-- // JAVASCRIPT APPENDED BY WAYBACK MACHINE, COPYRIGHT INTERNET ARCHIVE.
-- // ALL OTHER CONTENT MAY ALSO BE PROTECTED BY COPYRIGHT (17 U.S.C.
-- // SECTION 108(a)(3)).
-- var sWayBackCGI = "http://web.archive.org/web/20010628005806/";
-- function xLateUrl(aCollection, sProp) {
-- var i = 0;
-- for(i = 0; i < aCollection.length; i++)
-- if (aCollection[i][sProp].indexOf("mailto:") == -1 &&
-- aCollection[i][sProp].indexOf("javascript:") == -1)
-- aCollection[i][sProp] = sWayBackCGI + aCollection[i][sProp];
-- }
-- if (document.links) xLateUrl(document.links, "href");
-- if (document.images) xLateUrl(document.images, "src");
-- if (document.embeds) xLateUrl(document.embeds, "src");
-- if (document.body && document.body.background)
-- document.body.background = sWayBackCGI + document.body.background;
-- //-->
-- </SCRIPT>
-- </html>
|
{"hexsha": "a54d8b00f0d433b414de1bfd766b5ffd6e12f7fe", "size": 65137, "ext": "hs", "lang": "Haskell", "max_stars_repo_path": "src/Orthogonals.hs", "max_stars_repo_name": "rzil/wLPAs", "max_stars_repo_head_hexsha": "d8cde11e4ff40c802d1f79d423f0e676ccd49d59", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Orthogonals.hs", "max_issues_repo_name": "rzil/wLPAs", "max_issues_repo_head_hexsha": "d8cde11e4ff40c802d1f79d423f0e676ccd49d59", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Orthogonals.hs", "max_forks_repo_name": "rzil/wLPAs", "max_forks_repo_head_hexsha": "d8cde11e4ff40c802d1f79d423f0e676ccd49d59", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9634997316, "max_line_length": 128, "alphanum_fraction": 0.5438076669, "num_tokens": 18609}
|
module Mandelbrot where
import Diagrams.Backend.Cairo.CmdLine
import Diagrams.Prelude
import Data.Complex
quadratic :: Complex Double -> Complex Double -> Complex Double
quadratic c z = z * z + c
orbit :: Complex Double -> Complex Double -> [Complex Double]
orbit c = iterate (quadratic c)
criticalOrbit :: Complex Double -> [Complex Double]
criticalOrbit = flip orbit 0
maxIter :: Int
maxIter = 64
magOut :: Double
magOut = 2.0
pixel :: [Complex Double] -> Int
pixel = length . takeWhile (\z -> magnitude z <= magOut) . take maxIter
edge :: Int
edge = 256
side :: Int -> Double -> Double -> [Double]
side n v0 v1 =
let sv = (v1 - v0) / fromIntegral n
in [v0, (v0 + sv) .. v1]
sideX :: [Double]
sideX = side edge (-2) 2
sideY :: [Double]
sideY = side edge (-2) 2
outerProduct :: (a -> b -> c) -> [a] -> [b] -> [[c]]
outerProduct f as bs = map (\a -> map (f a) bs) as
grid :: [[Complex Double]]
grid = outerProduct (flip (:+)) sideY sideX
toSquare :: Int -> Diagram B
toSquare n = square 1 # lw medium # fc black # opacity (sqrt o)
where
o = fromIntegral n / fromIntegral maxIter
mandel :: [[Diagram B]]
mandel = map (map (toSquare . pixel . criticalOrbit)) grid
mandelImg :: Diagram B
mandelImg = vcat . map hcat $ mandel
mandelBG :: Diagram B
mandelBG = mandelImg # bgFrame 3 pink
|
{"hexsha": "6055a8ead0c2234c028e6ae540c9cb96d75d71ca", "size": 1321, "ext": "hs", "lang": "Haskell", "max_stars_repo_path": "src/Mandelbrot.hs", "max_stars_repo_name": "FayeAlephNil/diagrams-fun", "max_stars_repo_head_hexsha": "a59e35a602ef0eb1c8511c86b10a42d3e96c4692", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Mandelbrot.hs", "max_issues_repo_name": "FayeAlephNil/diagrams-fun", "max_issues_repo_head_hexsha": "a59e35a602ef0eb1c8511c86b10a42d3e96c4692", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Mandelbrot.hs", "max_forks_repo_name": "FayeAlephNil/diagrams-fun", "max_forks_repo_head_hexsha": "a59e35a602ef0eb1c8511c86b10a42d3e96c4692", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3898305085, "max_line_length": 71, "alphanum_fraction": 0.6525359576, "num_tokens": 409}
|
#!/usr/bin/env python
# coding: utf-8
""" BSD 3-Clause License
Copyright (c) 2020, Fred Kellerman
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pynq import Xlnk
from pynq.lib.dma import DMA
import numpy as np
class CmaBufferFactory():
def __init__(self):
self._xlnk = Xlnk()
def make_cma_buf(self, shape, data_type):
assert shape != [], RuntimeError
return self._xlnk.cma_array(shape=shape, cacheable=1, dtype=data_type)
def del_cma_buf(self, cma_buf):
cma_buf.close()
"""
This class hides the details of the CMA buffers and DMA itself.
This class can be used with any compatible IP block connected to the DMA.
"""
class SimpleDmaDriver(DMA, CmaBufferFactory):
# This line is always the same for any driver
def __init__(self, description):
DMA.__init__(self, description=description)
CmaBufferFactory.__init__(self)
self.txbuf = []
self.rxbuf = []
bindto = ['xilinx.com:ip:axi_dma:7.1']
def resize_bufs(self, shape, dtype, which='both'):
assert which == 'rx' or which == 'tx' or which == 'both', RuntimeError
assert shape != [], RuntimeError
if which == 'tx' or which == 'both' :
if self.txbuf != [] :
self.del_cma_buf(self.txbuf)
self.txbuf = self.make_cma_buf(shape, dtype)
if which == 'rx' or which == 'both' :
if self.rxbuf != [] :
self.del_cma_buf(self.rxbuf)
self.rxbuf = self.make_cma_buf(shape, dtype)
def send_dma(self, wait=True):
self.send_cma_buf(self.txbuf, wait)
def rcv_dma(self, wait=True):
self.rcv_cma_buf(self.rxbuf, wait)
def send_cpy(self, data, wait=True):
"""
Copy data into DMA buffer and send it, waits for send to complete before returning
"""
tx_buf = self.make_cma_buf(data.shape, data.dtype)
tx_buf[0:len(tx_buf)] = data
self.send_cma_buf(tx_buf, wait)
self.del_cma_buf(tx_buf)
def rcv_cpy(self, shape, dtype, wait=True):
"""
Attempts to read up to max_num words, it waits until the transfer is complete before returning
"""
rx_buf = self.make_cma_buf(shape, dtype)
self.rcv_cma_buf(rx_buf, wait)
data = np.array(rx_buf)
self.del_cma_buf(rx_buf)
return data
def rcv_cma_buf(self, cma_only_buf, wait=True):
"""
Attempts to read up to max_num words, it waits until the transfer is complete before returning
"""
self.recvchannel.transfer(cma_only_buf)
if wait == True :
self.recvchannel.wait()
def send_cma_buf(self, cma_only_buf, wait=True):
"""
Copy data into DMA buffer and send it, waits for send to complete before returning
"""
self.sendchannel.transfer(cma_only_buf)
if wait == True :
self.sendchannel.wait()
|
{"hexsha": "cdc40706667bd3b96e6116a7fae975bc4d1530c7", "size": 4398, "ext": "py", "lang": "Python", "max_stars_repo_path": "axidma.py", "max_stars_repo_name": "FredKellerman/pynq-juliabrot", "max_stars_repo_head_hexsha": "c79165e021a0e50b0bc1318b54090c1de708e700", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-08-30T19:48:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T20:29:01.000Z", "max_issues_repo_path": "axidma.py", "max_issues_repo_name": "FredKellerman/pynq-juliabrot", "max_issues_repo_head_hexsha": "c79165e021a0e50b0bc1318b54090c1de708e700", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "axidma.py", "max_forks_repo_name": "FredKellerman/pynq-juliabrot", "max_forks_repo_head_hexsha": "c79165e021a0e50b0bc1318b54090c1de708e700", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9137931034, "max_line_length": 102, "alphanum_fraction": 0.6784902228, "include": true, "reason": "import numpy", "num_tokens": 1016}
|
"""
AbstractMod{T}
Abstract type for all Modueles of type T
"""
abstract type AbstractMod{T} end
"""
Mod{T} <: AbstractMod{T}
Structure to store one specific modules of type{T}
"""
# problem input as array in unique gives an error if dims = 2 not added... ne
struct Mod{T} <: AbstractMod{T}
m::T
end
# some base base function for the new type
"""
isless(mod1::Mod, mod2::Mod)
Extationtions of ``Base.isless`` for modules of type Mod
for more infromation [see](https://docs.julialang.org/en/v1/base/base/)
"""
Base.isless(mod1::Mod, mod2::Mod) = isless(mod1.m,mod2.m)
"""
isequal(mod1::Mod, mod2::Mod)
Extationtion of ``Base.isequal`` for modules of type ``Mod``
for more infromation [see](https://docs.julialang.org/en/v1/base/base/)
"""
Base.isequal(mod1::Mod, mod2::Mod) = isequal(mod1.m,mod2.m)
"""
Base.length(mod1::Mod)
Extationtions of ``Base.length`` for modules of type ``Mod``
"""
Base.length(mod1::Mod) = 1
"""
Group_Mod{N <: Mod{T} where T} <: AbstractMod{N}
Structure to group multiple modules.
The input values are first filtered to prevent duplicated modules.
Afterwards, the modules are sorted to give consistent results even if modules are load in a different order.
"""
struct Group_Mod{N <: Mod{T} where T} <: AbstractMod{N}
m::Array{N}
Group_Mod(m) = m |> Set |> collect |> sort |> (y -> new{eltype(m)}(y))
end
"""
group_mod(input::Array{T} where T)
A Function to facilitate the input of multiple modules. It returns a "Group_mod" structure.
The input is an array containing the data that needs to be transformed to an ``Mod`` and grouped afterwards. See
[`Group_mod`](@ref)
```jldoctest
Mods = [:a,:b,:c]
grouped_mods = group_mod(Mods)
isa(grouped_mods,Group_Mod)
# output
true
```
"""
group_mod(input::Array{T} where T) = Group_Mod([Mod(newmod) for newmod in input])
"""
Base.iterate(Group::Group_Mod,state =1 )
Extationtions of ``Base.iterate`` for modules of type ``Group_Mod``
for more infromation [see](https://docs.julialang.org/en/v1/manual/interfaces/)
"""
function Base.iterate(Group::Group_Mod,state =1 )
if state <= length(Group.m)
mod = Group.m[state]
state += 1
return (mod,state)
else
return
end
end
"""
Base.length(m::Group_Mod)
Return the number of `Mod` in `m`
"""
Base.length(m::Group_Mod) = length(m.m)
"""
Base.eltype(m::Group_Mod)
Return the type of the iterator
"""
Base.eltype(::Group_Mod{T}) where {T} = T
|
{"hexsha": "ea62bd4a5d88d1e112c8265b762b0b5cee1528a8", "size": 2477, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/design/Mod.jl", "max_stars_repo_name": "MichielStock/BOMoD.jl", "max_stars_repo_head_hexsha": "b2b9b3cda9e010c5ba1c0815ed3e8a31ae232f99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/design/Mod.jl", "max_issues_repo_name": "MichielStock/BOMoD.jl", "max_issues_repo_head_hexsha": "b2b9b3cda9e010c5ba1c0815ed3e8a31ae232f99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/design/Mod.jl", "max_forks_repo_name": "MichielStock/BOMoD.jl", "max_forks_repo_head_hexsha": "b2b9b3cda9e010c5ba1c0815ed3e8a31ae232f99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.7247706422, "max_line_length": 112, "alphanum_fraction": 0.672991522, "num_tokens": 698}
|
import logging
import time
import numpy
from cqcpy import ft_utils
from cqcpy.ov_blocks import one_e_blocks
from cqcpy.ov_blocks import two_e_blocks
from cqcpy.ov_blocks import two_e_blocks_full
from pyscf import lib
from . import ft_cc_energy
from . import ft_cc_equations
from . import quadrature
einsum = lib.einsum
#einsum = einsum
def form_new_ampl(method, F, I, T1old, T2old, D1, D2, ti, ng, G):
"""Form new amplitudes.
Arguments:
method (str): Amplitude equation type.
F (array): Fock matrix.
I (array): ERI tensor.
T1old (array): T1 amplitudes.
T2old (array): T2 amplitudes.
D1 (array): 1-electron denominators.
D2 (array): 2-electron denominators.
ti (array): time grid.
ng (int): number of time points.
G (array): Quadrature weight matrix.
"""
if method == "CCSD":
T1, T2 = ft_cc_equations.ccsd_stanton(
F, I, T1old, T2old, D1, D2, ti, ng, G)
elif method == "CCD":
T1 = T1old
T2 = ft_cc_equations.ccd_simple(
F, I, T2old, D2, ti, ng, G)
elif method == "LCCSD":
T1, T2 = ft_cc_equations.lccsd_simple(
F, I, T1old, T2old, D1, D2, ti, ng, G)
elif method == "LCCD":
T1 = T1old
T2 = ft_cc_equations.lccd_simple(
F, I, T2old, D2, ti, ng, G)
else:
raise Exception("Unrecognized method keyword")
return T1, T2
def form_new_ampl_u(method, Fa, Fb, Ia, Ib, Iabab, T1aold, T1bold, T2aaold, T2abold, T2bbold,
D1a, D1b, D2aa, D2ab, D2bb, ti, ng, G):
"""Form new amplitudes.
Arguments:
method (str): Amplitude equation type.
F (array): Fock matrix.
I (array): ERI tensor.
T1old (array): T1 amplitudes.
T2old (array): T2 amplitudes.
D1 (array): 1-electron denominators.
D2 (array): 2-electron denominators.
ti (array): time grid.
ng (int): number of time points.
G (array): Quadrature weight matrix.
"""
if method == "CCSD":
T1out, T2out = ft_cc_equations.uccsd_stanton(
Fa, Fb, Ia, Ib, Iabab, T1aold, T1bold, T2aaold, T2abold, T2bbold,
D1a, D1b, D2aa, D2ab, D2bb, ti, ng, G)
#elif method == "CCD":
# T1 = T1old
# T2 = ft_cc_equations.ccd_simple(F,I,T2old,
# D2,ti,ng,G)
#elif method == "LCCSD":
# T1,T2 = ft_cc_equations.lccsd_simple(F,I,T1old,T2old,
# D1,D2,ti,ng,G)
#elif method == "LCCD":
# T1 = T1old
# T2 = ft_cc_equations.lccd_simple(F,I,T2old,
# D2,ti,ng,G)
else:
raise Exception("Unrecognized method keyword for unrestricted calc")
return T1out, T2out
def form_new_ampl_extrap(ig, method, F, I, T1, T2, T1bar, T2bar, D1, D2, ti, ng, G):
if method == "CCSD":
T1, T2 = ft_cc_equations.ccsd_stanton_single(
ig, F, I, T1, T2, T1bar, T2bar, D1, D2, ti, ng, G)
else:
raise Exception("Unrecognized method keyword")
return T1, T2
def form_new_ampl_extrap_u(ig, method, Fa, Fb, Ia, Ib, Iabab,
T1a, T1b, T2aa, T2ab, T2bb, T1bara, T1barb,
T2baraa, T2barab, T2barbb, D1a, D1b,
D2aa, D2ab, D2bb, ti, ng, G):
if method == "CCSD":
T1, T2 = ft_cc_equations.uccsd_stanton_single(
ig, Fa, Fb, Ia, Ib, Iabab, T1a, T1b, T2aa, T2ab, T2bb, T1bara, T1barb,
T2baraa, T2barab, T2barbb, D1a, D1b, D2aa, D2ab, D2bb, ti, ng, G)
else:
raise Exception("Unrecognized method keyword")
return T1, T2
def ft_cc_iter(method, T1old, T2old, F, I, D1, D2, g, G, beta, ng, ti,
iprint, conv_options):
"""Form new amplitudes.
Arguments:
method (str): Amplitude equation type.
F (array): Fock matrix.
I (array): ERI tensor.
T1old (array): T1 amplitudes.
T2old (array): T2 amplitudes.
D1 (array): 1-electron denominators.
D2 (array): 2-electron denominators.
g (array): quadrature weight vector.
G (array): quadrature weight matrix.
beta (float): inverse temperature.
ti (array): time grid.
ng (int): number of time points.
iprint (int): print level.
conv_options (dict): Convergence options.
"""
tbeg = time.time()
converged = False
ethresh = conv_options["econv"]
tthresh = conv_options["tconv"]
max_iter = conv_options["max_iter"]
alpha = conv_options["damp"]
i = 0
Eold = 888888888.888888888
nl1 = numpy.linalg.norm(T1old) + 0.1
nl2 = numpy.linalg.norm(T2old) + 0.1
while i < max_iter and not converged:
# form new T1 and T2
T1, T2 = form_new_ampl(method, F, I, T1old, T2old, D1, D2, ti, ng, G)
res1 = numpy.linalg.norm(T1 - T1old) / nl1
res2 = numpy.linalg.norm(T2 - T2old) / nl2
# damp new T-amplitudes
T1old = alpha*T1old + (1.0 - alpha)*T1
T2old = alpha*T2old + (1.0 - alpha)*T2
nl1 = numpy.linalg.norm(T1old) + 0.1
nl2 = numpy.linalg.norm(T2old) + 0.1
# compute energy
E = ft_cc_energy.ft_cc_energy(
T1old, T2old, F.ov, I.oovv, g, beta)
# determine convergence
if isinstance(E, complex):
logging.info(' %2d %.10f %.3E %.4E' % (i + 1, E.real, E.imag, res1 + res2))
else:
logging.info(' %2d %.10f %.4E' % (i + 1, E, res1 + res2))
i = i + 1
if numpy.abs(E - Eold) < ethresh and res1+res2 < tthresh:
converged = True
Eold = E
if not converged:
logging.warning("{} did not converge!".format(method))
tend = time.time()
logging.info("Total {} time: {:.4f} s".format(method, (tend - tbeg)))
return Eold, T1old, T2old
def ft_cc_iter_extrap(method, F, I, D1, D2, g, G, beta, ng, ti,
iprint, conv_options):
"""Form new amplitudes.
Arguments:
method (str): Amplitude equation type.
F (array): Fock matrix.
I (array): ERI tensor.
D1 (array): 1-electron denominators.
D2 (array): 2-electron denominators.
g (array): quadrature weight vector.
G (array): quadrature weight matrix.
beta (float): inverse temperature.
ti (array): time grid.
ng (int): number of time points.
iprint (int): print level.
conv_options (dict): Convergence options.
"""
thresh = conv_options["tconv"]
max_iter = conv_options["max_iter"]
alpha = conv_options["damp"]
no, nv = F.ov.shape
t1bar = numpy.zeros((ng, nv, no), dtype=F.vo.dtype)
t2bar = numpy.zeros((ng, nv, nv, no, no), dtype=I.vvoo.dtype)
T1new = numpy.zeros((ng, nv, no), dtype=t1bar.dtype)
T2new = numpy.zeros((ng, nv, nv, no, no), dtype=t2bar.dtype)
# loop over grid points
for ig in range(ng):
if ig == 0:
t1bar[0] = -F.vo
t2bar[0] = -I.vvoo
continue # don't bother computing at T = inf
elif ig == 1:
t1bar[ig] = -F.vo
t2bar[ig] = -I.vvoo
T1new[ig] = quadrature.int_tbar1_single(ng, ig, t1bar, ti, D1, G)
T2new[ig] = quadrature.int_tbar2_single(ng, ig, t2bar, ti, D2, G)
else:
# linear extrapolation
T1new[ig] = T1new[ig - 1]\
+ (T1new[ig - 2] - T1new[ig - 1])*(ti[ig] - ti[ig - 1])/(ti[ig - 2] - ti[ig - 1])
T2new[ig] = T2new[ig - 1]\
+ (T2new[ig - 2] - T2new[ig - 1])*(ti[ig] - ti[ig - 1])/(ti[ig - 2] - ti[ig - 1])
converged = False
nl1 = numpy.sqrt(float(T1new[ig].size))
nl2 = numpy.sqrt(float(T2new[ig].size))
logging.info("Time point {}".format(ig))
i = 0
while i < max_iter and not converged:
# form new T1 and T2
T1, T2 = form_new_ampl_extrap(ig, method, F, I, T1new[ig], T2new[ig],
t1bar, t2bar, D1, D2, ti, ng, G)
res1 = numpy.linalg.norm(T1 - T1new[ig]) / nl1
res2 = numpy.linalg.norm(T2 - T2new[ig]) / nl2
# damp new T-amplitudes
T1new[ig] = alpha*T1new[ig] + (1.0 - alpha)*T1.copy()
T2new[ig] = alpha*T2new[ig] + (1.0 - alpha)*T2.copy()
# determine convergence
logging.info(' %2d %.4E' % (i + 1, res1 + res2))
i = i + 1
if res1 + res2 < thresh:
converged = True
return T1new, T2new
def ft_ucc_iter(method, T1aold, T1bold, T2aaold, T2abold, T2bbold, Fa, Fb, Ia, Ib, Iabab,
D1a, D1b, D2aa, D2ab, D2bb, g, G, beta, ng, ti, iprint, conv_options):
"""Form new amplitudes.
Arguments:
method (str): Amplitude equation type.
F (array): Fock matrix.
I (array): ERI tensor.
T1old (array): T1 amplitudes.
T2old (array): T2 amplitudes.
D1 (array): 1-electron denominators.
D2 (array): 2-electron denominators.
g (array): quadrature weight vector.
G (array): quadrature weight matrix.
beta (float): inverse temperature.
ti (array): time grid.
ng (int): number of time points.
iprint (int): print level.
conv_options (dict): Convergence options.
"""
tbeg = time.time()
converged = False
ethresh = conv_options["econv"]
tthresh = conv_options["tconv"]
max_iter = conv_options["max_iter"]
alpha = conv_options["damp"]
i = 0
Eold = 888888888.888888888
while i < max_iter and not converged:
T1out, T2out = form_new_ampl_u(
method, Fa, Fb, Ia, Ib, Iabab, T1aold, T1bold, T2aaold,
T2abold, T2bbold, D1a, D1b, D2aa, D2ab, D2bb, ti, ng, G)
nl1 = numpy.linalg.norm(T1aold) + 0.1
nl1 += numpy.linalg.norm(T1bold)
nl2 = numpy.linalg.norm(T2aaold) + 0.1
nl2 += numpy.linalg.norm(T2abold)
nl2 += numpy.linalg.norm(T2bbold)
res1 = numpy.linalg.norm(T1out[0] - T1aold) / nl1
res1 += numpy.linalg.norm(T1out[1] - T1bold) / nl1
res2 = numpy.linalg.norm(T2out[0] - T2aaold) / nl2
res2 += numpy.linalg.norm(T2out[1] - T2abold) / nl2
res2 += numpy.linalg.norm(T2out[2] - T2bbold) / nl2
# damp new T-amplitudes
T1aold = alpha*T1aold + (1.0 - alpha)*T1out[0]
T1bold = alpha*T1bold + (1.0 - alpha)*T1out[1]
T2aaold = alpha*T2aaold + (1.0 - alpha)*T2out[0]
T2abold = alpha*T2abold + (1.0 - alpha)*T2out[1]
T2bbold = alpha*T2bbold + (1.0 - alpha)*T2out[2]
# compute energy
E = ft_cc_energy.ft_ucc_energy(T1aold, T1bold, T2aaold, T2abold, T2bbold,
Fa.ov, Fb.ov, Ia.oovv, Ib.oovv, Iabab.oovv, g, beta)
# determine convergence
if isinstance(E, complex):
logging.info(' %2d %.10f %.3E %.4E' % (i+1, E.real, E.imag, res1+res2))
else:
logging.info(' %2d %.10f %.4E' % (i+1, E, res1+res2))
i = i + 1
if numpy.abs(E - Eold) < ethresh and res1+res2 < tthresh:
converged = True
Eold = E
if not converged:
logging.warning("{} did not converge!".format(method))
tend = time.time()
logging.info("Total {} time: {:.4f} s".format(method, (tend - tbeg)))
return Eold, (T1aold, T1bold), (T2aaold, T2abold, T2bbold)
def ft_ucc_iter_extrap(method, Fa, Fb, Ia, Ib, Iabab, D1a, D1b, D2aa, D2ab, D2bb,
g, G, beta, ng, ti, iprint, conv_options):
"""Form new amplitudes.
Arguments:
method (str): Amplitude equation type.
F (array): Fock matrix.
I (array): ERI tensor.
D1 (array): 1-electron denominators.
D2 (array): 2-electron denominators.
g (array): quadrature weight vector.
G (array): quadrature weight matrix.
beta (float): inverse temperature.
ti (array): time grid.
ng (int): number of time points.
iprint (int): print level.
conv_options (dict): Convergence options.
"""
thresh = conv_options["tconv"]
max_iter = conv_options["max_iter"]
alpha = conv_options["damp"]
noa, nva = Fa.ov.shape
nob, nvb = Fb.ov.shape
t1bara = numpy.zeros((ng, nva, noa), dtype=Fa.vo.dtype)
t1barb = numpy.zeros((ng, nvb, nob), dtype=Fb.vo.dtype)
t2baraa = numpy.zeros((ng, nva, nva, noa, noa), dtype=Ia.vvoo.dtype)
t2barab = numpy.zeros((ng, nva, nvb, noa, nob), dtype=Iabab.vvoo.dtype)
t2barbb = numpy.zeros((ng, nvb, nvb, nob, nob), dtype=Ib.vvoo.dtype)
T1newa = numpy.zeros(t1bara.shape, dtype=t1bara.dtype)
T1newb = numpy.zeros(t1barb.shape, dtype=t1barb.dtype)
T2newaa = numpy.zeros(t2baraa.shape, dtype=t2baraa.dtype)
T2newab = numpy.zeros(t2barab.shape, dtype=t2barab.dtype)
T2newbb = numpy.zeros(t2barbb.shape, dtype=t2barbb.dtype)
# loop over grid points
for ig in range(ng):
if ig == 0:
t1bara[0] = -Fa.vo
t1barb[0] = -Fb.vo
t2baraa[0] = -Ia.vvoo
t2barab[0] = -Iabab.vvoo
t2barbb[0] = -Ib.vvoo
continue # don't bother computing at T = inf
elif ig == 1:
t1bara[ig] = -Fa.vo
t1barb[ig] = -Fb.vo
t2baraa[ig] = -Ia.vvoo
t2barab[ig] = -Iabab.vvoo
t2barbb[ig] = -Ib.vvoo
T1newa[ig] = quadrature.int_tbar1_single(ng, ig, t1bara, ti, D1a, G)
T1newb[ig] = quadrature.int_tbar1_single(ng, ig, t1barb, ti, D1b, G)
T2newaa[ig] = quadrature.int_tbar2_single(ng, ig, t2baraa, ti, D2aa, G)
T2newab[ig] = quadrature.int_tbar2_single(ng, ig, t2barab, ti, D2ab, G)
T2newbb[ig] = quadrature.int_tbar2_single(ng, ig, t2barbb, ti, D2bb, G)
else:
# linear extrapolation
fac = (ti[ig] - ti[ig - 1])/(ti[ig - 2] - ti[ig - 1])
T1newa[ig] = T1newa[ig - 1] + (T1newa[ig - 2] - T1newa[ig - 1])*fac
T1newb[ig] = T1newb[ig - 1] + (T1newb[ig - 2] - T1newb[ig - 1])*fac
T2newaa[ig] = T2newaa[ig - 1] + (T2newaa[ig - 2] - T2newaa[ig - 1])*fac
T2newab[ig] = T2newab[ig - 1] + (T2newab[ig - 2] - T2newab[ig - 1])*fac
T2newbb[ig] = T2newbb[ig - 1] + (T2newbb[ig - 2] - T2newbb[ig - 1])*fac
converged = False
nl1 = numpy.sqrt(float(T1newa[ig].size))
nl2 = numpy.sqrt(float(T2newaa[ig].size))
logging.info("Time point {}".format(ig))
i = 0
while i < max_iter and not converged:
# form new T1 and T2
(T1a, T1b), (T2aa, T2ab, T2bb) = form_new_ampl_extrap_u(ig, method, Fa, Fb, Ia, Ib, Iabab,
T1newa[ig], T1newb[ig], T2newaa[ig], T2newab[ig], T2newbb[ig],
t1bara, t1barb, t2baraa, t2barab, t2barbb, D1a, D1b, D2aa, D2ab, D2bb, ti, ng, G)
res1 = numpy.linalg.norm(T1a - T1newa[ig]) / nl1
res1 += numpy.linalg.norm(T1b - T1newb[ig]) / nl1
res2 = numpy.linalg.norm(T2aa - T2newaa[ig]) / nl2
res2 += numpy.linalg.norm(T2ab - T2newab[ig]) / nl2
res2 += numpy.linalg.norm(T2bb - T2newbb[ig]) / nl2
# damp new T-amplitudes
T1newa[ig] = alpha*T1newa[ig] + (1.0 - alpha)*T1a.copy()
T1newb[ig] = alpha*T1newb[ig] + (1.0 - alpha)*T1b.copy()
T2newaa[ig] = alpha*T2newaa[ig] + (1.0 - alpha)*T2aa.copy()
T2newab[ig] = alpha*T2newab[ig] + (1.0 - alpha)*T2ab.copy()
T2newbb[ig] = alpha*T2newbb[ig] + (1.0 - alpha)*T2bb.copy()
# determine convergence
logging.info(' %2d %.4E' % (i+1, res1+res2))
i = i + 1
if res1 + res2 < thresh:
converged = True
return (T1newa, T1newb), (T2newaa, T2newab, T2newbb)
def ft_lambda_iter(method, L1old, L2old, T1, T2, F, I, D1, D2,
g, G, beta, ng, ti, iprint, conv_options):
"""Form new amplitudes.
Arguments:
method (str): Amplitude equation type.
F (array): Fock matrix.
I (array): ERI tensor.
T1old (array): T1 amplitudes.
T2old (array): T2 amplitudes.
D1 (array): 1-electron denominators.
D2 (array): 2-electron denominators.
beta (float): inverse temperature.
ti (array): time grid.
ng (int): number of time points.
iprint (int): print level.
conv_options (dict): Convergence options.
"""
tbeg = time.time()
converged = False
thresh = conv_options["tconv"]
max_iter = conv_options["max_iter"]
alpha = conv_options["damp"]
i = 0
nl1 = numpy.linalg.norm(L1old) + 0.1
nl2 = numpy.linalg.norm(L2old) + 0.1
while i < max_iter and not converged:
if method == "LCCSD":
L1, L2 = ft_cc_equations.lccsd_lambda_simple(
F, I, T1, T2, L1old, L2old, D1, D2, ti, ng, g, G, beta)
elif method == "LCCD":
L1 = L1old
L2 = ft_cc_equations.lccd_lambda_simple(F, I, T2,
L2old, D2, ti, ng, g, G, beta)
elif method == "CCSD":
L1, L2 = ft_cc_equations.ccsd_lambda_opt(
F, I, T1, T2, L1old, L2old, D1, D2, ti, ng, g, G, beta)
elif method == "CCD":
L1 = L1old
L2 = ft_cc_equations.ccd_lambda_simple(F, I, T2,
L2old, D2, ti, ng, g, G, beta)
else:
raise Exception("Unrecognized method keyword")
res1 = numpy.linalg.norm(L1 - L1old) / nl1
res2 = numpy.linalg.norm(L2 - L2old) / nl2
# compute new L-amplitudes
L1old = alpha*L1old + (1.0 - alpha)*L1
L2old = alpha*L2old + (1.0 - alpha)*L2
nl1 = numpy.linalg.norm(L1old) + 0.1
nl2 = numpy.linalg.norm(L2old) + 0.1
L1 = None
L2 = None
# determine convergence
logging.info(' %2d %.10f' % (i + 1, res1 + res2))
i = i + 1
if res1 + res2 < thresh:
converged = True
if not converged:
logging.warning("CCSD Lambda-equations did not converge!")
tend = time.time()
logging.info("Total CCSD Lambda time: %f s" % (tend - tbeg))
return L1old, L2old
def ft_ulambda_iter(method, L1ain, L1bin, L2aain, L2abin, L2bbin, T1aold, T1bold,
T2aaold, T2abold, T2bbold, Fa, Fb, Ia, Ib, Iabab, D1a, D1b, D2aa, D2ab, D2bb,
g, G, beta, ng, ti, iprint, conv_options):
"""Form new amplitudes.
Arguments:
method (str): Amplitude equation type.
F (array): Fock matrix.
I (array): ERI tensor.
T1old (array): T1 amplitudes.
T2old (array): T2 amplitudes.
D1 (array): 1-electron denominators.
D2 (array): 2-electron denominators.
beta (float): inverse temperature.
ti (array): time grid.
ng (int): number of time points.
iprint (int): print level.
conv_options (dict): Convergence options.
"""
tbeg = time.time()
converged = False
thresh = conv_options["tconv"]
max_iter = conv_options["max_iter"]
alpha = conv_options["damp"]
i = 0
L1aold = L1ain
L1bold = L1bin
L2aaold = L2aain
L2abold = L2abin
L2bbold = L2bbin
nl1 = numpy.linalg.norm(L1aold) + numpy.linalg.norm(L1bold) + 0.1
nl2 = numpy.linalg.norm(L2aaold) + 0.1
nl2 += numpy.linalg.norm(L2bbold)
nl2 += 4*numpy.linalg.norm(L2abold)
while i < max_iter and not converged:
if method == "LCCSD":
raise Exception("U-LCCSD lambda equations not implemented")
elif method == "LCCD":
raise Exception("U-LCCD lambda equations not implemented")
elif method == "CCSD":
L1a, L1b, L2aa, L2ab, L2bb = ft_cc_equations.uccsd_lambda_opt(
Fa, Fb, Ia, Ib, Iabab, T1aold, T1bold, T2aaold, T2abold, T2bbold,
L1aold, L1bold, L2aaold, L2abold, L2bbold, D1a, D1b, D2aa, D2ab, D2bb,
ti, ng, g, G, beta)
elif method == "CCD":
raise Exception("UCCD lambda equations not implemented")
else:
raise Exception("Unrecognized method keyword")
res1 = numpy.linalg.norm(L1a - L1aold) / nl1
res1 += numpy.linalg.norm(L1b - L1bold) / nl1
res2 = numpy.linalg.norm(L2aa - L2aaold) / nl2
res2 += numpy.linalg.norm(L2ab - L2abold) / nl2
res2 += numpy.linalg.norm(L2bb - L2bbold) / nl2
# compute new L-amplitudes
L1aold = alpha*L1aold + (1.0 - alpha)*L1a
L1bold = alpha*L1bold + (1.0 - alpha)*L1b
L2aaold = alpha*L2aaold + (1.0 - alpha)*L2aa
L2abold = alpha*L2abold + (1.0 - alpha)*L2ab
L2bbold = alpha*L2bbold + (1.0 - alpha)*L2bb
nl1 = numpy.linalg.norm(L1aold) + numpy.linalg.norm(L1bold) + 0.1
nl2 = numpy.linalg.norm(L2aaold) + 0.1
nl2 += numpy.linalg.norm(L2bbold)
nl2 += 4*numpy.linalg.norm(L2abold)
L1a = None
L1b = None
L2aa = None
L2ab = None
L2bb = None
# determine convergence
logging.info(' %2d %.10f' % (i + 1, res1 + res2))
i = i + 1
if res1 + res2 < thresh:
converged = True
if not converged:
logging.warning("CCSD Lambda-equations did not converge!")
tend = time.time()
logging.info("Total CCSD Lambda time: %f s" % (tend - tbeg))
return L1aold, L1bold, L2aaold, L2abold, L2bbold
def ft_integrals(sys, en, beta, mu):
"""Return one and two-electron integrals in the general spin orbital basis."""
fo = ft_utils.ff(beta, en, mu)
fv = ft_utils.ffv(beta, en, mu)
sfo = numpy.sqrt(fo)
sfv = numpy.sqrt(fv)
# get FT fock matrix
fmo = sys.g_fock_tot()
fmo = fmo - numpy.diag(en)
# get ERIs
eri = sys.g_aint_tot()
# pre-contract with fermi factors
Foo = einsum('ij,i,j->ij', fmo, sfo, sfo)
Fov = einsum('ia,i,a->ia', fmo, sfo, sfv)
Fvo = einsum('ai,a,i->ai', fmo, sfv, sfo)
Fvv = einsum('ab,a,b->ab', fmo, sfv, sfv)
F = one_e_blocks(Foo, Fov, Fvo, Fvv)
Ivvvv = einsum('abcd,a,b,c,d->abcd', eri, sfv, sfv, sfv, sfv)
Ivvvo = einsum('abci,a,b,c,i->abci', eri, sfv, sfv, sfv, sfo)
Ivovv = einsum('aibc,a,i,b,c->aibc', eri, sfv, sfo, sfv, sfv)
Ivvoo = einsum('abij,a,b,i,j->abij', eri, sfv, sfv, sfo, sfo)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eri, sfv, sfo, sfv, sfo)
Ioovv = einsum('ijab,i,j,a,b->ijab', eri, sfo, sfo, sfv, sfv)
Ivooo = einsum('akij,a,k,i,j->akij', eri, sfv, sfo, sfo, sfo)
Iooov = einsum('jkia,j,k,i,a->jkia', eri, sfo, sfo, sfo, sfv)
Ioooo = einsum('klij,k,l,i,j->klij', eri, sfo, sfo, sfo, sfo)
I = two_e_blocks(
vvvv=Ivvvv, vvvo=Ivvvo, vovv=Ivovv, vvoo=Ivvoo,
vovo=Ivovo, oovv=Ioovv, vooo=Ivooo, ooov=Iooov, oooo=Ioooo)
return F, I
def ft_integrals_2e(sys, en, beta, mu):
"""Return one and two-electron integrals in the general spin orbital basis."""
fo = ft_utils.ff(beta, en, mu)
fv = ft_utils.ffv(beta, en, mu)
sfo = numpy.sqrt(fo)
sfv = numpy.sqrt(fv)
# get ERIs
eri = sys.g_aint_tot()
Ivvvv = einsum('abcd,a,b,c,d->abcd', eri, sfv, sfv, sfv, sfv)
Ivvvo = einsum('abci,a,b,c,i->abci', eri, sfv, sfv, sfv, sfo)
Ivovv = einsum('aibc,a,i,b,c->aibc', eri, sfv, sfo, sfv, sfv)
Ivvoo = einsum('abij,a,b,i,j->abij', eri, sfv, sfv, sfo, sfo)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eri, sfv, sfo, sfv, sfo)
Ioovv = einsum('ijab,i,j,a,b->ijab', eri, sfo, sfo, sfv, sfv)
Ivooo = einsum('akij,a,k,i,j->akij', eri, sfv, sfo, sfo, sfo)
Iooov = einsum('jkia,j,k,i,a->jkia', eri, sfo, sfo, sfo, sfv)
Ioooo = einsum('klij,k,l,i,j->klij', eri, sfo, sfo, sfo, sfo)
I = two_e_blocks(
vvvv=Ivvvv, vvvo=Ivvvo, vovv=Ivovv, vvoo=Ivvoo,
vovo=Ivovo, oovv=Ioovv, vooo=Ivooo, ooov=Iooov, oooo=Ioooo)
return I
def ft_integrals_neq_1e(sys, en, beta, mu, t):
"""Return one and two-electron integrals in the general spin orbital basis."""
fo = ft_utils.ff(beta, en, mu)
fv = ft_utils.ffv(beta, en, mu)
sfo = numpy.sqrt(fo)
sfv = numpy.sqrt(fv)
# get FT fock matrix
fmo = sys.g_fock_tot(t=t)
fmo = fmo - numpy.diag(en)
# pre-contract with fermi factors
Foo = einsum('ij,i,j->ij', fmo, sfo, sfo)
Fov = einsum('ia,i,a->ia', fmo, sfo, sfv)
Fvo = einsum('ai,a,i->ai', fmo, sfv, sfo)
Fvv = einsum('ab,a,b->ab', fmo, sfv, sfv)
F = one_e_blocks(Foo, Fov, Fvo, Fvv)
return F
def get_ft_integrals_neq(sys, en, beta, mu):
"""Return one and two-electron integrals in the general spin orbital basis
including real-time component."""
fo = ft_utils.ff(beta, en, mu)
fv = ft_utils.ffv(beta, en, mu)
# get FT fock matrix
fmo = sys.g_fock_tot(direc='f')
fmo = (fmo - numpy.diag(en)).astype(complex)
# pre-contract with fermi factors
Foo = einsum('ij,j->ij', fmo[0], fo)
Fvo = einsum('ai,a,i->ai', fmo[0], fv, fo)
Fvv = einsum('ab,a->ab', fmo[0], fv)
F = one_e_blocks(Foo, fmo[0], Fvo, Fvv)
Foo = einsum('yij,j->yij', fmo, fo)
Fvo = einsum('yai,a,i->yai', fmo, fv, fo)
Fvv = einsum('yab,a->yab', fmo, fv)
Ff = one_e_blocks(Foo, fmo, Fvo, Fvv)
fmo = sys.g_fock_tot(direc='b')
fmo = (fmo - numpy.diag(en)).astype(complex)
Foo = einsum('yij,j->yij', fmo, fo)
Fvo = einsum('yai,a,i->yai', fmo, fv, fo)
Fvv = einsum('yab,a->yab', fmo, fv)
Fb = one_e_blocks(Foo, fmo, Fvo, Fvv)
# get ERIs
eri = sys.g_aint_tot().astype(complex)
Ivvvv = einsum('abcd,a,b->abcd', eri, fv, fv)
Ivvvo = einsum('abci,a,b,i->abci', eri, fv, fv, fo)
Ivovv = einsum('aibc,a->aibc', eri, fv)
Ivvoo = einsum('abij,a,b,i,j->abij', eri, fv, fv, fo, fo)
Ivovo = einsum('ajbi,a,i->ajbi', eri, fv, fo)
Ivooo = einsum('akij,a,i,j->akij', eri, fv, fo, fo)
Iooov = einsum('jkia,i->jkia', eri, fo)
Ioooo = einsum('klij,i,j->klij', eri, fo, fo)
I = two_e_blocks(
vvvv=Ivvvv, vvvo=Ivvvo, vovv=Ivovv, vvoo=Ivvoo,
vovo=Ivovo, oovv=eri, vooo=Ivooo, ooov=Iooov, oooo=Ioooo)
return F, Ff, Fb, I
def uft_integrals(sys, ea, eb, beta, mu):
"""Return one and two-electron integrals in the general spin orbital basis."""
foa = ft_utils.ff(beta, ea, mu)
fva = ft_utils.ffv(beta, ea, mu)
fob = ft_utils.ff(beta, eb, mu)
fvb = ft_utils.ffv(beta, eb, mu)
sfoa = numpy.sqrt(foa)
sfva = numpy.sqrt(fva)
sfob = numpy.sqrt(fob)
sfvb = numpy.sqrt(fvb)
# get FT fock matrix
fa, fb = sys.u_fock_tot()
fa = fa - numpy.diag(ea)
fb = fb - numpy.diag(eb)
# pre-contract with fermi factors
Fooa = einsum('ij,i,j->ij', fa, sfoa, sfoa)
Fova = einsum('ia,i,a->ia', fa, sfoa, sfva)
Fvoa = einsum('ai,a,i->ai', fa, sfva, sfoa)
Fvva = einsum('ab,a,b->ab', fa, sfva, sfva)
Fa = one_e_blocks(Fooa, Fova, Fvoa, Fvva)
Foob = einsum('ij,i,j->ij', fb, sfob, sfob)
Fovb = einsum('ia,i,a->ia', fb, sfob, sfvb)
Fvob = einsum('ai,a,i->ai', fb, sfvb, sfob)
Fvvb = einsum('ab,a,b->ab', fb, sfvb, sfvb)
Fb = one_e_blocks(Foob, Fovb, Fvob, Fvvb)
# get ERIs
eriA, eriB, eriAB = sys.u_aint_tot()
Ivvvv = einsum('abcd,a,b,c,d->abcd', eriA, sfva, sfva, sfva, sfva)
Ivvvo = einsum('abci,a,b,c,i->abci', eriA, sfva, sfva, sfva, sfoa)
Ivovv = einsum('aibc,a,i,b,c->aibc', eriA, sfva, sfoa, sfva, sfva)
Ivvoo = einsum('abij,a,b,i,j->abij', eriA, sfva, sfva, sfoa, sfoa)
Ioovv = einsum('ijab,i,j,a,b->ijab', eriA, sfoa, sfoa, sfva, sfva)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eriA, sfva, sfoa, sfva, sfoa)
Ivooo = einsum('akij,a,k,i,j->akij', eriA, sfva, sfoa, sfoa, sfoa)
Iooov = einsum('jkia,j,k,i,a->jkia', eriA, sfoa, sfoa, sfoa, sfva)
Ioooo = einsum('klij,k,l,i,j->klij', eriA, sfoa, sfoa, sfoa, sfoa)
Ia = two_e_blocks(
vvvv=Ivvvv, vvvo=Ivvvo, vovv=Ivovv, vvoo=Ivvoo,
vovo=Ivovo, oovv=Ioovv, vooo=Ivooo, ooov=Iooov, oooo=Ioooo)
Ivvvv = einsum('abcd,a,b,c,d->abcd', eriB, sfvb, sfvb, sfvb, sfvb)
Ivvvo = einsum('abci,a,b,c,i->abci', eriB, sfvb, sfvb, sfvb, sfob)
Ivovv = einsum('aibc,a,i,b,c->aibc', eriB, sfvb, sfob, sfvb, sfvb)
Ivvoo = einsum('abij,a,b,i,j->abij', eriB, sfvb, sfvb, sfob, sfob)
Ioovv = einsum('ijab,i,j,a,b->ijab', eriB, sfob, sfob, sfvb, sfvb)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eriB, sfvb, sfob, sfvb, sfob)
Ivooo = einsum('akij,a,k,i,j->akij', eriB, sfvb, sfob, sfob, sfob)
Iooov = einsum('jkia,j,k,i,a->jkia', eriB, sfob, sfob, sfob, sfvb)
Ioooo = einsum('klij,k,l,i,j->klij', eriB, sfob, sfob, sfob, sfob)
Ib = two_e_blocks(
vvvv=Ivvvv, vvvo=Ivvvo, vovv=Ivovv, vvoo=Ivvoo,
vovo=Ivovo, oovv=Ioovv, vooo=Ivooo, ooov=Iooov, oooo=Ioooo)
Ivvvv = einsum('abcd,a,b,c,d->abcd', eriAB, sfva, sfvb, sfva, sfvb)
Ivvvo = einsum('abci,a,b,c,i->abci', eriAB, sfva, sfvb, sfva, sfob)
Ivvov = einsum('abic,a,b,i,c->abic', eriAB, sfva, sfvb, sfoa, sfvb)
Ivovv = einsum('aibc,a,i,b,c->aibc', eriAB, sfva, sfob, sfva, sfvb)
Iovvv = einsum('iabc,i,a,b,c->iabc', eriAB, sfoa, sfvb, sfva, sfvb)
Ivvoo = einsum('abij,a,b,i,j->abij', eriAB, sfva, sfvb, sfoa, sfob)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eriAB, sfva, sfob, sfva, sfob)
Iovvo = einsum('jabi,j,a,b,i->jabi', eriAB, sfoa, sfvb, sfva, sfob)
Ivoov = einsum('ajib,a,j,i,b->ajib', eriAB, sfva, sfob, sfoa, sfvb)
Iovov = einsum('jaib,j,a,i,b->jaib', eriAB, sfoa, sfvb, sfoa, sfvb)
Ioovv = einsum('ijab,i,j,a,b->ijab', eriAB, sfoa, sfob, sfva, sfvb)
Ivooo = einsum('akij,a,k,i,j->akij', eriAB, sfva, sfob, sfoa, sfob)
Iovoo = einsum('kaij,k,a,i,j->kaij', eriAB, sfoa, sfvb, sfoa, sfob)
Ioovo = einsum('jkai,j,k,a,i->jkai', eriAB, sfoa, sfob, sfva, sfob)
Iooov = einsum('jkia,j,k,i,a->jkia', eriAB, sfoa, sfob, sfoa, sfvb)
Ioooo = einsum('klij,k,l,i,j->klij', eriAB, sfoa, sfob, sfoa, sfob)
Iabab = two_e_blocks_full(
vvvv=Ivvvv, vvvo=Ivvvo, vvov=Ivvov,
vovv=Ivovv, ovvv=Iovvv, vvoo=Ivvoo,
vovo=Ivovo, ovvo=Iovvo, voov=Ivoov,
ovov=Iovov, oovv=Ioovv, vooo=Ivooo,
ovoo=Iovoo, oovo=Ioovo, ooov=Iooov,
oooo=Ioooo)
return Fa, Fb, Ia, Ib, Iabab
def rft_integrals(sys, en, beta, mu):
"""Return one and two-electron integrals in the general spin orbital basis."""
fo = ft_utils.ff(beta, en, mu)
fv = ft_utils.ffv(beta, en, mu)
sfo = numpy.sqrt(fo)
sfv = numpy.sqrt(fv)
# get FT fock matrix
fmo = sys.r_fock_tot()
fmo = fmo - numpy.diag(en)
# get ERIs
eri = sys.r_int_tot()
# pre-contract with fermi factors
Foo = einsum('ij,i,j->ij', fmo, sfo, sfo)
Fov = einsum('ia,i,a->ia', fmo, sfo, sfv)
Fvo = einsum('ai,a,i->ai', fmo, sfv, sfo)
Fvv = einsum('ab,a,b->ab', fmo, sfv, sfv)
F = one_e_blocks(Foo, Fov, Fvo, Fvv)
Ivvvv = einsum('abcd,a,b,c,d->abcd', eri, sfv, sfv, sfv, sfv)
Ivvvo = einsum('abci,a,b,c,i->abci', eri, sfv, sfv, sfv, sfo)
Ivvov = einsum('abic,a,b,i,c->abic', eri, sfv, sfv, sfo, sfv)
Ivovv = einsum('aibc,a,i,b,c->aibc', eri, sfv, sfo, sfv, sfv)
Iovvv = einsum('iabc,i,a,b,c->iabc', eri, sfo, sfv, sfv, sfv)
Ivvoo = einsum('abij,a,b,i,j->abij', eri, sfv, sfv, sfo, sfo)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eri, sfv, sfo, sfv, sfo)
Iovvo = einsum('jabi,j,a,b,i->jabi', eri, sfo, sfv, sfv, sfo)
Ivoov = einsum('ajib,a,j,i,b->ajib', eri, sfv, sfo, sfo, sfv)
Iovov = einsum('jaib,j,a,i,b->jaib', eri, sfo, sfv, sfo, sfv)
Ioovv = einsum('ijab,i,j,a,b->ijab', eri, sfo, sfo, sfv, sfv)
Ivooo = einsum('akij,a,k,i,j->akij', eri, sfv, sfo, sfo, sfo)
Iovoo = einsum('kaij,k,a,i,j->kaij', eri, sfo, sfv, sfo, sfo)
Ioovo = einsum('jkai,j,k,a,i->jkai', eri, sfo, sfo, sfv, sfo)
Iooov = einsum('jkia,j,k,i,a->jkia', eri, sfo, sfo, sfo, sfv)
Ioooo = einsum('klij,k,l,i,j->klij', eri, sfo, sfo, sfo, sfo)
I = two_e_blocks_full(
vvvv=Ivvvv, vvvo=Ivvvo, vvov=Ivvov, vovv=Ivovv,
ovvv=Iovvv, vvoo=Ivvoo, vovo=Ivovo, ovvo=Iovvo,
voov=Ivoov, ovov=Iovov, oovv=Ioovv, vooo=Ivooo,
ovoo=Iovoo, oovo=Ioovo, ooov=Iooov, oooo=Ioooo)
return F, I
def ft_active_integrals(sys, en, focc, fvir, iocc, ivir):
"""Return one and two-electron integrals in the general spin orbital basis
with small occupations excluded."""
# get FT Fock matrix
fmo = sys.g_fock_tot()
fmo = fmo - numpy.diag(en)
sfo = numpy.sqrt(focc)
sfv = numpy.sqrt(fvir)
# get ERIs
eri = sys.g_aint_tot()
# pre-contract with fermi factors
Foo = einsum('ij,i,j->ij', fmo[numpy.ix_(iocc, iocc)], sfo, sfo)
Fov = einsum('ia,i,a->ia', fmo[numpy.ix_(iocc, ivir)], sfo, sfv)
Fvo = einsum('ai,a,i->ai', fmo[numpy.ix_(ivir, iocc)], sfv, sfo)
Fvv = einsum('ab,a,b->ab', fmo[numpy.ix_(ivir, ivir)], sfv, sfv)
F = one_e_blocks(Foo, Fov, Fvo, Fvv)
Ivvvv = einsum('abcd,a,b,c,d->abcd', eri[numpy.ix_(ivir, ivir, ivir, ivir)], sfv, sfv, sfv, sfv)
Ivvvo = einsum('abci,a,b,c,i->abci', eri[numpy.ix_(ivir, ivir, ivir, iocc)], sfv, sfv, sfv, sfo)
Ivovv = einsum('aibc,a,i,b,c->aibc', eri[numpy.ix_(ivir, iocc, ivir, ivir)], sfv, sfo, sfv, sfv)
Ivvoo = einsum('abij,a,b,i,j->abij', eri[numpy.ix_(ivir, ivir, iocc, iocc)], sfv, sfv, sfo, sfo)
Ioovv = einsum('ijab,i,j,a,b->ijab', eri[numpy.ix_(iocc, iocc, ivir, ivir)], sfo, sfo, sfv, sfv)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eri[numpy.ix_(ivir, iocc, ivir, iocc)], sfv, sfo, sfv, sfo)
Ivooo = einsum('akij,a,k,i,j->akij', eri[numpy.ix_(ivir, iocc, iocc, iocc)], sfv, sfo, sfo, sfo)
Iooov = einsum('jkia,j,k,i,a->jkia', eri[numpy.ix_(iocc, iocc, iocc, ivir)], sfo, sfo, sfo, sfv)
Ioooo = einsum('klij,k,l,i,j->klij', eri[numpy.ix_(iocc, iocc, iocc, iocc)], sfo, sfo, sfo, sfo)
I = two_e_blocks(
vvvv=Ivvvv, vvvo=Ivvvo, vovv=Ivovv, vvoo=Ivvoo,
vovo=Ivovo, oovv=Ioovv, vooo=Ivooo, ooov=Iooov, oooo=Ioooo)
return F, I
def uft_active_integrals(sys, ea, eb, foa, fva, fob, fvb, iocca, ivira, ioccb, ivirb):
"""Return one and two-electron integrals in the general spin orbital basis
with small occupations excluded."""
# get FT Fock matrix
fa, fb = sys.u_fock_tot()
fa = fa - numpy.diag(ea)
fb = fb - numpy.diag(eb)
sfoa = numpy.sqrt(foa)
sfva = numpy.sqrt(fva)
sfob = numpy.sqrt(fob)
sfvb = numpy.sqrt(fvb)
# pre-contract with fermi factors
Fooa = einsum('ij,i,j->ij', fa[numpy.ix_(iocca, iocca)], sfoa, sfoa)
Fova = einsum('ia,i,a->ia', fa[numpy.ix_(iocca, ivira)], sfoa, sfva)
Fvoa = einsum('ai,a,i->ai', fa[numpy.ix_(ivira, iocca)], sfva, sfoa)
Fvva = einsum('ab,a,b->ab', fa[numpy.ix_(ivira, ivira)], sfva, sfva)
Fa = one_e_blocks(Fooa, Fova, Fvoa, Fvva)
Foob = einsum('ij,i,j->ij', fb[numpy.ix_(ioccb, ioccb)], sfob, sfob)
Fovb = einsum('ia,i,a->ia', fb[numpy.ix_(ioccb, ivirb)], sfob, sfvb)
Fvob = einsum('ai,a,i->ai', fb[numpy.ix_(ivirb, ioccb)], sfvb, sfob)
Fvvb = einsum('ab,a,b->ab', fb[numpy.ix_(ivirb, ivirb)], sfvb, sfvb)
Fb = one_e_blocks(Foob, Fovb, Fvob, Fvvb)
# get ERIs
eriA, eriB, eriAB = sys.u_aint_tot()
Ivvvv = einsum('abcd,a,b,c,d->abcd', eriA[numpy.ix_(ivira, ivira, ivira, ivira)], sfva, sfva, sfva, sfva)
Ivvvo = einsum('abci,a,b,c,i->abci', eriA[numpy.ix_(ivira, ivira, ivira, iocca)], sfva, sfva, sfva, sfoa)
Ivovv = einsum('aibc,a,i,b,c->aibc', eriA[numpy.ix_(ivira, iocca, ivira, ivira)], sfva, sfoa, sfva, sfva)
Ivvoo = einsum('abij,a,b,i,j->abij', eriA[numpy.ix_(ivira, ivira, iocca, iocca)], sfva, sfva, sfoa, sfoa)
Ioovv = einsum('ijab,i,j,a,b->ijab', eriA[numpy.ix_(iocca, iocca, ivira, ivira)], sfoa, sfoa, sfva, sfva)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eriA[numpy.ix_(ivira, iocca, ivira, iocca)], sfva, sfoa, sfva, sfoa)
Ivooo = einsum('akij,a,k,i,j->akij', eriA[numpy.ix_(ivira, iocca, iocca, iocca)], sfva, sfoa, sfoa, sfoa)
Iooov = einsum('jkia,j,k,i,a->jkia', eriA[numpy.ix_(iocca, iocca, iocca, ivira)], sfoa, sfoa, sfoa, sfva)
Ioooo = einsum('klij,k,l,i,j->klij', eriA[numpy.ix_(iocca, iocca, iocca, iocca)], sfoa, sfoa, sfoa, sfoa)
Ia = two_e_blocks(
vvvv=Ivvvv, vvvo=Ivvvo, vovv=Ivovv, vvoo=Ivvoo,
vovo=Ivovo, oovv=Ioovv, vooo=Ivooo, ooov=Iooov, oooo=Ioooo)
Ivvvv = einsum('abcd,a,b,c,d->abcd', eriB[numpy.ix_(ivirb, ivirb, ivirb, ivirb)], sfvb, sfvb, sfvb, sfvb)
Ivvvo = einsum('abci,a,b,c,i->abci', eriB[numpy.ix_(ivirb, ivirb, ivirb, ioccb)], sfvb, sfvb, sfvb, sfob)
Ivovv = einsum('aibc,a,i,b,c->aibc', eriB[numpy.ix_(ivirb, ioccb, ivirb, ivirb)], sfvb, sfob, sfvb, sfvb)
Ivvoo = einsum('abij,a,b,i,j->abij', eriB[numpy.ix_(ivirb, ivirb, ioccb, ioccb)], sfvb, sfvb, sfob, sfob)
Ioovv = einsum('ijab,i,j,a,b->ijab', eriB[numpy.ix_(ioccb, ioccb, ivirb, ivirb)], sfob, sfob, sfvb, sfvb)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eriB[numpy.ix_(ivirb, ioccb, ivirb, ioccb)], sfvb, sfob, sfvb, sfob)
Ivooo = einsum('akij,a,k,i,j->akij', eriB[numpy.ix_(ivirb, ioccb, ioccb, ioccb)], sfvb, sfob, sfob, sfob)
Iooov = einsum('jkia,j,k,i,a->jkia', eriB[numpy.ix_(ioccb, ioccb, ioccb, ivirb)], sfob, sfob, sfob, sfvb)
Ioooo = einsum('klij,k,l,i,j->klij', eriB[numpy.ix_(ioccb, ioccb, ioccb, ioccb)], sfob, sfob, sfob, sfob)
Ib = two_e_blocks(
vvvv=Ivvvv, vvvo=Ivvvo, vovv=Ivovv, vvoo=Ivvoo,
vovo=Ivovo, oovv=Ioovv, vooo=Ivooo, ooov=Iooov, oooo=Ioooo)
Ivvvv = einsum('abcd,a,b,c,d->abcd', eriAB[numpy.ix_(ivira, ivirb, ivira, ivirb)], sfva, sfvb, sfva, sfvb)
Ivvvo = einsum('abci,a,b,c,i->abci', eriAB[numpy.ix_(ivira, ivirb, ivira, ioccb)], sfva, sfvb, sfva, sfob)
Ivvov = einsum('abic,a,b,i,c->abic', eriAB[numpy.ix_(ivira, ivirb, iocca, ivirb)], sfva, sfvb, sfoa, sfvb)
Ivovv = einsum('aibc,a,i,b,c->aibc', eriAB[numpy.ix_(ivira, ioccb, ivira, ivirb)], sfva, sfob, sfva, sfvb)
Iovvv = einsum('iabc,i,a,b,c->iabc', eriAB[numpy.ix_(iocca, ivirb, ivira, ivirb)], sfoa, sfvb, sfva, sfvb)
Ivvoo = einsum('abij,a,b,i,j->abij', eriAB[numpy.ix_(ivira, ivirb, iocca, ioccb)], sfva, sfvb, sfoa, sfob)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eriAB[numpy.ix_(ivira, ioccb, ivira, ioccb)], sfva, sfob, sfva, sfob)
Iovvo = einsum('jabi,j,a,b,i->jabi', eriAB[numpy.ix_(iocca, ivirb, ivira, ioccb)], sfoa, sfvb, sfva, sfob)
Ivoov = einsum('ajib,a,j,i,b->ajib', eriAB[numpy.ix_(ivira, ioccb, iocca, ivirb)], sfva, sfob, sfoa, sfvb)
Iovov = einsum('jaib,j,a,i,b->jaib', eriAB[numpy.ix_(iocca, ivirb, iocca, ivirb)], sfoa, sfvb, sfoa, sfvb)
Ioovv = einsum('ijab,i,j,a,b->ijab', eriAB[numpy.ix_(iocca, ioccb, ivira, ivirb)], sfoa, sfob, sfva, sfvb)
Ivooo = einsum('akij,a,k,i,j->akij', eriAB[numpy.ix_(ivira, ioccb, iocca, ioccb)], sfva, sfob, sfoa, sfob)
Iovoo = einsum('kaij,k,a,i,j->kaij', eriAB[numpy.ix_(iocca, ivirb, iocca, ioccb)], sfoa, sfvb, sfoa, sfob)
Ioovo = einsum('jkai,j,k,a,i->jkai', eriAB[numpy.ix_(iocca, ioccb, ivira, ioccb)], sfoa, sfob, sfva, sfob)
Iooov = einsum('jkia,j,k,i,a->jkia', eriAB[numpy.ix_(iocca, ioccb, iocca, ivirb)], sfoa, sfob, sfoa, sfvb)
Ioooo = einsum('klij,k,l,i,j->klij', eriAB[numpy.ix_(iocca, ioccb, iocca, ioccb)], sfoa, sfob, sfoa, sfob)
Iabab = two_e_blocks_full(
vvvv=Ivvvv, vvvo=Ivvvo, vvov=Ivvov, vovv=Ivovv,
ovvv=Iovvv, vvoo=Ivvoo, vovo=Ivovo, ovvo=Iovvo,
voov=Ivoov, ovov=Iovov, oovv=Ioovv, vooo=Ivooo,
ovoo=Iovoo, oovo=Ioovo, ooov=Iooov, oooo=Ioooo)
return Fa, Fb, Ia, Ib, Iabab
def rft_active_integrals(sys, en, focc, fvir, iocc, ivir):
"""Return one and two-electron integrals in the general spin orbital basis."""
# get FT fock matrix
fmo = sys.r_fock_tot()
fmo = fmo - numpy.diag(en)
# get ERIs
eri = sys.r_int_tot()
# square root of occupation numbers
sfo = numpy.sqrt(focc)
sfv = numpy.sqrt(fvir)
# pre-contract with fermi factors
Foo = einsum('ij,i,j->ij', fmo[numpy.ix_(iocc, iocc)], sfo, sfo)
Fov = einsum('ia,i,a->ia', fmo[numpy.ix_(iocc, ivir)], sfo, sfv)
Fvo = einsum('ai,a,i->ai', fmo[numpy.ix_(ivir, iocc)], sfv, sfo)
Fvv = einsum('ab,a,b->ab', fmo[numpy.ix_(ivir, ivir)], sfv, sfv)
F = one_e_blocks(Foo, Fov, Fvo, Fvv)
Ivvvv = einsum('abcd,a,b,c,d->abcd', eri[numpy.ix_(ivir, ivir, ivir, ivir)], sfv, sfv, sfv, sfv)
Ivvvo = einsum('abci,a,b,c,i->abci', eri[numpy.ix_(ivir, ivir, ivir, iocc)], sfv, sfv, sfv, sfo)
Ivvov = einsum('abic,a,b,i,c->abic', eri[numpy.ix_(ivir, ivir, iocc, ivir)], sfv, sfv, sfo, sfv)
Ivovv = einsum('aibc,a,i,b,c->aibc', eri[numpy.ix_(ivir, iocc, ivir, ivir)], sfv, sfo, sfv, sfv)
Iovvv = einsum('iabc,i,a,b,c->iabc', eri[numpy.ix_(iocc, ivir, ivir, ivir)], sfo, sfv, sfv, sfv)
Ivvoo = einsum('abij,a,b,i,j->abij', eri[numpy.ix_(ivir, ivir, iocc, iocc)], sfv, sfv, sfo, sfo)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eri[numpy.ix_(ivir, iocc, ivir, iocc)], sfv, sfo, sfv, sfo)
Iovvo = einsum('jabi,j,a,b,i->jabi', eri[numpy.ix_(iocc, ivir, ivir, iocc)], sfo, sfv, sfv, sfo)
Ivoov = einsum('ajib,a,j,i,b->ajib', eri[numpy.ix_(ivir, iocc, iocc, ivir)], sfv, sfo, sfo, sfv)
Iovov = einsum('jaib,j,a,i,b->jaib', eri[numpy.ix_(iocc, ivir, iocc, ivir)], sfo, sfv, sfo, sfv)
Ioovv = einsum('ijab,i,j,a,b->ijab', eri[numpy.ix_(iocc, iocc, ivir, ivir)], sfo, sfo, sfv, sfv)
Ivooo = einsum('akij,a,k,i,j->akij', eri[numpy.ix_(ivir, iocc, iocc, iocc)], sfv, sfo, sfo, sfo)
Iovoo = einsum('kaij,k,a,i,j->kaij', eri[numpy.ix_(iocc, ivir, iocc, iocc)], sfo, sfv, sfo, sfo)
Ioovo = einsum('jkai,j,k,a,i->jkai', eri[numpy.ix_(iocc, iocc, ivir, iocc)], sfo, sfo, sfv, sfo)
Iooov = einsum('jkia,j,k,i,a->jkia', eri[numpy.ix_(iocc, iocc, iocc, ivir)], sfo, sfo, sfo, sfv)
Ioooo = einsum('klij,k,l,i,j->klij', eri[numpy.ix_(iocc, iocc, iocc, iocc)], sfo, sfo, sfo, sfo)
I = two_e_blocks_full(
vvvv=Ivvvv, vvvo=Ivvvo, vvov=Ivvov, vovv=Ivovv,
ovvv=Iovvv, vvoo=Ivvoo, vovo=Ivovo, ovvo=Iovvo,
voov=Ivoov, ovov=Iovov, oovv=Ioovv, vooo=Ivooo,
ovoo=Iovoo, oovo=Ioovo, ooov=Iooov, oooo=Ioooo)
return F, I
def _form_ft_d_eris(eri, sfo, sfv, dso, dsv):
Ivvvv = einsum('abcd,a,b,c,d->abcd', eri, dsv, sfv, sfv, sfv)\
+ einsum('abcd,a,b,c,d->abcd', eri, sfv, dsv, sfv, sfv)\
+ einsum('abcd,a,b,c,d->abcd', eri, sfv, sfv, dsv, sfv)\
+ einsum('abcd,a,b,c,d->abcd', eri, sfv, sfv, sfv, dsv)
Ivvvo = einsum('abci,a,b,c,i->abci', eri, dsv, sfv, sfv, sfo)\
+ einsum('abci,a,b,c,i->abci', eri, sfv, dsv, sfv, sfo)\
+ einsum('abci,a,b,c,i->abci', eri, sfv, sfv, dsv, sfo)\
+ einsum('abci,a,b,c,i->abci', eri, sfv, sfv, sfv, dso)
Ivovv = einsum('aibc,a,i,b,c->aibc', eri, dsv, sfo, sfv, sfv)\
+ einsum('aibc,a,i,b,c->aibc', eri, sfv, dso, sfv, sfv)\
+ einsum('aibc,a,i,b,c->aibc', eri, sfv, sfo, dsv, sfv)\
+ einsum('aibc,a,i,b,c->aibc', eri, sfv, sfo, sfv, dsv)
Ivvoo = einsum('abij,a,b,i,j->abij', eri, dsv, sfv, sfo, sfo)\
+ einsum('abij,a,b,i,j->abij', eri, sfv, dsv, sfo, sfo)\
+ einsum('abij,a,b,i,j->abij', eri, sfv, sfv, dso, sfo)\
+ einsum('abij,a,b,i,j->abij', eri, sfv, sfv, sfo, dso)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eri, dsv, sfo, sfv, sfo)\
+ einsum('ajbi,a,j,b,i->ajbi', eri, sfv, dso, sfv, sfo)\
+ einsum('ajbi,a,j,b,i->ajbi', eri, sfv, sfo, dsv, sfo)\
+ einsum('ajbi,a,j,b,i->ajbi', eri, sfv, sfo, sfv, dso)
Ioovv = einsum('ijab,i,j,a,b->ijab', eri, dso, sfo, sfv, sfv)\
+ einsum('ijab,i,j,a,b->ijab', eri, sfo, dso, sfv, sfv)\
+ einsum('ijab,i,j,a,b->ijab', eri, sfo, sfo, dsv, sfv)\
+ einsum('ijab,i,j,a,b->ijab', eri, sfo, sfo, sfv, dsv)
Ivooo = einsum('akij,a,k,i,j->akij', eri, dsv, sfo, sfo, sfo)\
+ einsum('akij,a,k,i,j->akij', eri, sfv, dso, sfo, sfo)\
+ einsum('akij,a,k,i,j->akij', eri, sfv, sfo, dso, sfo)\
+ einsum('akij,a,k,i,j->akij', eri, sfv, sfo, sfo, dso)
Iooov = einsum('jkia,j,k,i,a->jkia', eri, dso, sfo, sfo, sfv)\
+ einsum('jkia,j,k,i,a->jkia', eri, sfo, dso, sfo, sfv)\
+ einsum('jkia,j,k,i,a->jkia', eri, sfo, sfo, dso, sfv)\
+ einsum('jkia,j,k,i,a->jkia', eri, sfo, sfo, sfo, dsv)
Ioooo = einsum('klij,k,l,i,j->klij', eri, dso, sfo, sfo, sfo)\
+ einsum('klij,k,l,i,j->klij', eri, sfo, dso, sfo, sfo)\
+ einsum('klij,k,l,i,j->klij', eri, sfo, sfo, dso, sfo)\
+ einsum('klij,k,l,i,j->klij', eri, sfo, sfo, sfo, dso)
I = two_e_blocks(
vvvv=Ivvvv, vvvo=Ivvvo, vovv=Ivovv, vvoo=Ivvoo,
vovo=Ivovo, oovv=Ioovv, vooo=Ivooo, ooov=Iooov, oooo=Ioooo)
return I
def _form_ft_d_active_eris(eri, sfo, sfv, dso, dsv, iocc, ivir):
Ivvvv = einsum('abcd,a,b,c,d->abcd', eri[numpy.ix_(ivir, ivir, ivir, ivir)], dsv, sfv, sfv, sfv)\
+ einsum('abcd,a,b,c,d->abcd', eri[numpy.ix_(ivir, ivir, ivir, ivir)], sfv, dsv, sfv, sfv)\
+ einsum('abcd,a,b,c,d->abcd', eri[numpy.ix_(ivir, ivir, ivir, ivir)], sfv, sfv, dsv, sfv)\
+ einsum('abcd,a,b,c,d->abcd', eri[numpy.ix_(ivir, ivir, ivir, ivir)], sfv, sfv, sfv, dsv)
Ivvvo = einsum('abci,a,b,c,i->abci', eri[numpy.ix_(ivir, ivir, ivir, iocc)], dsv, sfv, sfv, sfo)\
+ einsum('abci,a,b,c,i->abci', eri[numpy.ix_(ivir, ivir, ivir, iocc)], sfv, dsv, sfv, sfo)\
+ einsum('abci,a,b,c,i->abci', eri[numpy.ix_(ivir, ivir, ivir, iocc)], sfv, sfv, dsv, sfo)\
+ einsum('abci,a,b,c,i->abci', eri[numpy.ix_(ivir, ivir, ivir, iocc)], sfv, sfv, sfv, dso)
Ivovv = einsum('aibc,a,i,b,c->aibc', eri[numpy.ix_(ivir, iocc, ivir, ivir)], dsv, sfo, sfv, sfv)\
+ einsum('aibc,a,i,b,c->aibc', eri[numpy.ix_(ivir, iocc, ivir, ivir)], sfv, dso, sfv, sfv)\
+ einsum('aibc,a,i,b,c->aibc', eri[numpy.ix_(ivir, iocc, ivir, ivir)], sfv, sfo, dsv, sfv)\
+ einsum('aibc,a,i,b,c->aibc', eri[numpy.ix_(ivir, iocc, ivir, ivir)], sfv, sfo, sfv, dsv)
Ivvoo = einsum('abij,a,b,i,j->abij', eri[numpy.ix_(ivir, ivir, iocc, iocc)], dsv, sfv, sfo, sfo)\
+ einsum('abij,a,b,i,j->abij', eri[numpy.ix_(ivir, ivir, iocc, iocc)], sfv, dsv, sfo, sfo)\
+ einsum('abij,a,b,i,j->abij', eri[numpy.ix_(ivir, ivir, iocc, iocc)], sfv, sfv, dso, sfo)\
+ einsum('abij,a,b,i,j->abij', eri[numpy.ix_(ivir, ivir, iocc, iocc)], sfv, sfv, sfo, dso)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eri[numpy.ix_(ivir, iocc, ivir, iocc)], dsv, sfo, sfv, sfo)\
+ einsum('ajbi,a,j,b,i->ajbi', eri[numpy.ix_(ivir, iocc, ivir, iocc)], sfv, dso, sfv, sfo)\
+ einsum('ajbi,a,j,b,i->ajbi', eri[numpy.ix_(ivir, iocc, ivir, iocc)], sfv, sfo, dsv, sfo)\
+ einsum('ajbi,a,j,b,i->ajbi', eri[numpy.ix_(ivir, iocc, ivir, iocc)], sfv, sfo, sfv, dso)
Ioovv = einsum('ijab,i,j,a,b->ijab', eri[numpy.ix_(iocc, iocc, ivir, ivir)], dso, sfo, sfv, sfv)\
+ einsum('ijab,i,j,a,b->ijab', eri[numpy.ix_(iocc, iocc, ivir, ivir)], sfo, dso, sfv, sfv)\
+ einsum('ijab,i,j,a,b->ijab', eri[numpy.ix_(iocc, iocc, ivir, ivir)], sfo, sfo, dsv, sfv)\
+ einsum('ijab,i,j,a,b->ijab', eri[numpy.ix_(iocc, iocc, ivir, ivir)], sfo, sfo, sfv, dsv)
Ivooo = einsum('akij,a,k,i,j->akij', eri[numpy.ix_(ivir, iocc, iocc, iocc)], dsv, sfo, sfo, sfo)\
+ einsum('akij,a,k,i,j->akij', eri[numpy.ix_(ivir, iocc, iocc, iocc)], sfv, dso, sfo, sfo)\
+ einsum('akij,a,k,i,j->akij', eri[numpy.ix_(ivir, iocc, iocc, iocc)], sfv, sfo, dso, sfo)\
+ einsum('akij,a,k,i,j->akij', eri[numpy.ix_(ivir, iocc, iocc, iocc)], sfv, sfo, sfo, dso)
Iooov = einsum('jkia,j,k,i,a->jkia', eri[numpy.ix_(iocc, iocc, iocc, ivir)], dso, sfo, sfo, sfv)\
+ einsum('jkia,j,k,i,a->jkia', eri[numpy.ix_(iocc, iocc, iocc, ivir)], sfo, dso, sfo, sfv)\
+ einsum('jkia,j,k,i,a->jkia', eri[numpy.ix_(iocc, iocc, iocc, ivir)], sfo, sfo, dso, sfv)\
+ einsum('jkia,j,k,i,a->jkia', eri[numpy.ix_(iocc, iocc, iocc, ivir)], sfo, sfo, sfo, dsv)
Ioooo = einsum('klij,k,l,i,j->klij', eri[numpy.ix_(iocc, iocc, iocc, iocc)], dso, sfo, sfo, sfo)\
+ einsum('klij,k,l,i,j->klij', eri[numpy.ix_(iocc, iocc, iocc, iocc)], sfo, dso, sfo, sfo)\
+ einsum('klij,k,l,i,j->klij', eri[numpy.ix_(iocc, iocc, iocc, iocc)], sfo, sfo, dso, sfo)\
+ einsum('klij,k,l,i,j->klij', eri[numpy.ix_(iocc, iocc, iocc, iocc)], sfo, sfo, sfo, dso)
I = two_e_blocks(
vvvv=Ivvvv, vvvo=Ivvvo, vovv=Ivovv, vvoo=Ivvoo,
vovo=Ivovo, oovv=Ioovv, vooo=Ivooo, ooov=Iooov, oooo=Ioooo)
return I
def ft_d_integrals(sys, en, fo, fv, dvec):
"""form integrals contracted with derivatives of occupation numbers in the
spin-orbital basis."""
# get FT Fock matrix
fmo = sys.g_fock_tot()
fmo = fmo - numpy.diag(en)
fd = sys.g_fock_d_tot(dvec)
# get ERIs
eri = sys.g_aint_tot()
sfo = numpy.sqrt(fo)
sfv = numpy.sqrt(fv)
dso = -0.5*sfo*fv*dvec
dsv = +0.5*sfv*fo*dvec
# form derivative integrals
Foo = einsum('ij,i,j->ij', fd, sfo, sfo)\
+ einsum('ij,i,j->ij', fmo, dso, sfo)\
+ einsum('ij,i,j->ij', fmo, sfo, dso)
Fov = einsum('ia,i,a->ia', fd, sfo, sfv)\
+ einsum('ia,i,a->ia', fmo, dso, sfv)\
+ einsum('ia,i,a->ia', fmo, sfo, dsv)
Fvo = einsum('ai,a,i->ai', fd, sfv, sfo)\
+ einsum('ai,a,i->ai', fmo, dsv, sfo)\
+ einsum('ai,a,i->ai', fmo, sfv, dso)
Fvv = einsum('ab,a,b->ab', fd, sfv, sfv)\
+ einsum('ab,a,b->ab', fmo, dsv, sfv)\
+ einsum('ab,a,b->ab', fmo, sfv, dsv)
F = one_e_blocks(Foo, Fov, Fvo, Fvv)
I = _form_ft_d_eris(eri, sfo, sfv, dso, dsv)
return F, I
def u_ft_d_integrals(sys, ea, eb, foa, fva, fob, fvb, dveca, dvecb):
"""form unrestricted integrals contracted with derivatives of occupation numbers."""
# get FT Fock matrices
fa, fb = sys.u_fock_tot()
fa = fa - numpy.diag(ea)
fb = fb - numpy.diag(eb)
fda, fdb = sys.u_fock_d_tot(dveca, dvecb)
sfoa = numpy.sqrt(foa)
sfva = numpy.sqrt(fva)
dsoa = -0.5*sfoa*fva*dveca
dsva = +0.5*sfva*foa*dveca
sfob = numpy.sqrt(fob)
sfvb = numpy.sqrt(fvb)
dsob = -0.5*sfob*fvb*dvecb
dsvb = +0.5*sfvb*fob*dvecb
Fooa = einsum('ij,i,j->ij', fda, sfoa, sfoa)\
+ einsum('ij,i,j->ij', fa, dsoa, sfoa)\
+ einsum('ij,i,j->ij', fa, sfoa, dsoa)
Fova = einsum('ia,i,a->ia', fda, sfoa, sfva)\
+ einsum('ia,i,a->ia', fa, dsoa, sfva)\
+ einsum('ia,i,a->ia', fa, sfoa, dsva)
Fvoa = einsum('ai,a,i->ai', fda, sfva, sfoa)\
+ einsum('ai,a,i->ai', fa, dsva, sfoa)\
+ einsum('ai,a,i->ai', fa, sfva, dsoa)
Fvva = einsum('ab,a,b->ab', fda, sfva, sfva)\
+ einsum('ab,a,b->ab', fa, dsva, sfva)\
+ einsum('ab,a,b->ab', fa, sfva, dsva)
Fa = one_e_blocks(Fooa, Fova, Fvoa, Fvva)
Foob = einsum('ij,i,j->ij', fdb, sfob, sfob)\
+ einsum('ij,i,j->ij', fb, dsob, sfob)\
+ einsum('ij,i,j->ij', fb, sfob, dsob)
Fovb = einsum('ia,i,a->ia', fdb, sfob, sfvb)\
+ einsum('ia,i,a->ia', fb, dsob, sfvb)\
+ einsum('ia,i,a->ia', fb, sfob, dsvb)
Fvob = einsum('ai,a,i->ai', fdb, sfvb, sfob)\
+ einsum('ai,a,i->ai', fb, dsvb, sfob)\
+ einsum('ai,a,i->ai', fb, sfvb, dsob)
Fvvb = einsum('ab,a,b->ab', fdb, sfvb, sfvb)\
+ einsum('ab,a,b->ab', fb, dsvb, sfvb)\
+ einsum('ab,a,b->ab', fb, sfvb, dsvb)
Fb = one_e_blocks(Foob, Fovb, Fvob, Fvvb)
# get ERIs
Ia, Ib, Iabab = sys.u_aint_tot()
Ia = _form_ft_d_eris(Ia, sfoa, sfva, dsoa, dsva)
Ib = _form_ft_d_eris(Ib, sfob, sfvb, dsob, dsvb)
Ivvvv = +einsum('abcd,a,b,c,d->abcd', Iabab, dsva, sfvb, sfva, sfvb)
Ivvvv += einsum('abcd,a,b,c,d->abcd', Iabab, sfva, dsvb, sfva, sfvb)
Ivvvv += einsum('abcd,a,b,c,d->abcd', Iabab, sfva, sfvb, dsva, sfvb)
Ivvvv += einsum('abcd,a,b,c,d->abcd', Iabab, sfva, sfvb, sfva, dsvb)
Ivvvo = +einsum('abci,a,b,c,i->abci', Iabab, dsva, sfvb, sfva, sfob)
Ivvvo += einsum('abci,a,b,c,i->abci', Iabab, sfva, dsvb, sfva, sfob)
Ivvvo += einsum('abci,a,b,c,i->abci', Iabab, sfva, sfvb, dsva, sfob)
Ivvvo += einsum('abci,a,b,c,i->abci', Iabab, sfva, sfvb, sfva, dsob)
Ivvov = +einsum('abic,a,b,i,c->abic', Iabab, dsva, sfvb, sfoa, sfvb)
Ivvov += einsum('abic,a,b,i,c->abic', Iabab, sfva, dsvb, sfoa, sfvb)
Ivvov += einsum('abic,a,b,i,c->abic', Iabab, sfva, sfvb, dsoa, sfvb)
Ivvov += einsum('abic,a,b,i,c->abic', Iabab, sfva, sfvb, sfoa, dsvb)
Ivovv = +einsum('aibc,a,i,b,c->aibc', Iabab, dsva, sfob, sfva, sfvb)
Ivovv += einsum('aibc,a,i,b,c->aibc', Iabab, sfva, dsob, sfva, sfvb)
Ivovv += einsum('aibc,a,i,b,c->aibc', Iabab, sfva, sfob, dsva, sfvb)
Ivovv += einsum('aibc,a,i,b,c->aibc', Iabab, sfva, sfob, sfva, dsvb)
Iovvv = +einsum('iabc,i,a,b,c->iabc', Iabab, dsoa, sfvb, sfva, sfvb)
Iovvv += einsum('iabc,i,a,b,c->iabc', Iabab, sfoa, dsvb, sfva, sfvb)
Iovvv += einsum('iabc,i,a,b,c->iabc', Iabab, sfoa, sfvb, dsva, sfvb)
Iovvv += einsum('iabc,i,a,b,c->iabc', Iabab, sfoa, sfvb, sfva, dsvb)
Ivvoo = +einsum('abij,a,b,i,j->abij', Iabab, dsva, sfvb, sfoa, sfob)
Ivvoo += einsum('abij,a,b,i,j->abij', Iabab, sfva, dsvb, sfoa, sfob)
Ivvoo += einsum('abij,a,b,i,j->abij', Iabab, sfva, sfvb, dsoa, sfob)
Ivvoo += einsum('abij,a,b,i,j->abij', Iabab, sfva, sfvb, sfoa, dsob)
Ivovo = +einsum('ajbi,a,j,b,i->ajbi', Iabab, dsva, sfob, sfva, sfob)
Ivovo += einsum('ajbi,a,j,b,i->ajbi', Iabab, sfva, dsob, sfva, sfob)
Ivovo += einsum('ajbi,a,j,b,i->ajbi', Iabab, sfva, sfob, dsva, sfob)
Ivovo += einsum('ajbi,a,j,b,i->ajbi', Iabab, sfva, sfob, sfva, dsob)
Iovvo = +einsum('jabi,j,a,b,i->jabi', Iabab, dsoa, sfvb, sfva, sfob)
Iovvo += einsum('jabi,j,a,b,i->jabi', Iabab, sfoa, dsvb, sfva, sfob)
Iovvo += einsum('jabi,j,a,b,i->jabi', Iabab, sfoa, sfvb, dsva, sfob)
Iovvo += einsum('jabi,j,a,b,i->jabi', Iabab, sfoa, sfvb, sfva, dsob)
Ivoov = +einsum('ajib,a,j,i,b->ajib', Iabab, dsva, sfob, sfoa, sfvb)
Ivoov += einsum('ajib,a,j,i,b->ajib', Iabab, sfva, dsob, sfoa, sfvb)
Ivoov += einsum('ajib,a,j,i,b->ajib', Iabab, sfva, sfob, dsoa, sfvb)
Ivoov += einsum('ajib,a,j,i,b->ajib', Iabab, sfva, sfob, sfoa, dsvb)
Iovov = +einsum('jaib,j,a,i,b->jaib', Iabab, dsoa, sfvb, sfoa, sfvb)
Iovov += einsum('jaib,j,a,i,b->jaib', Iabab, sfoa, dsvb, sfoa, sfvb)
Iovov += einsum('jaib,j,a,i,b->jaib', Iabab, sfoa, sfvb, dsoa, sfvb)
Iovov += einsum('jaib,j,a,i,b->jaib', Iabab, sfoa, sfvb, sfoa, dsvb)
Ioovv = +einsum('ijab,i,j,a,b->ijab', Iabab, dsoa, sfob, sfva, sfvb)
Ioovv += einsum('ijab,i,j,a,b->ijab', Iabab, sfoa, dsob, sfva, sfvb)
Ioovv += einsum('ijab,i,j,a,b->ijab', Iabab, sfoa, sfob, dsva, sfvb)
Ioovv += einsum('ijab,i,j,a,b->ijab', Iabab, sfoa, sfob, sfva, dsvb)
Ivooo = +einsum('akij,a,k,i,j->akij', Iabab, dsva, sfob, sfoa, sfob)
Ivooo += einsum('akij,a,k,i,j->akij', Iabab, sfva, dsob, sfoa, sfob)
Ivooo += einsum('akij,a,k,i,j->akij', Iabab, sfva, sfob, dsoa, sfob)
Ivooo += einsum('akij,a,k,i,j->akij', Iabab, sfva, sfob, sfoa, dsob)
Iovoo = +einsum('kaij,k,a,i,j->kaij', Iabab, dsoa, sfvb, sfoa, sfob)
Iovoo += einsum('kaij,k,a,i,j->kaij', Iabab, sfoa, dsvb, sfoa, sfob)
Iovoo += einsum('kaij,k,a,i,j->kaij', Iabab, sfoa, sfvb, dsoa, sfob)
Iovoo += einsum('kaij,k,a,i,j->kaij', Iabab, sfoa, sfvb, sfoa, dsob)
Ioovo = +einsum('jkai,j,k,a,i->jkai', Iabab, dsoa, sfob, sfva, sfob)
Ioovo += einsum('jkai,j,k,a,i->jkai', Iabab, sfoa, dsob, sfva, sfob)
Ioovo += einsum('jkai,j,k,a,i->jkai', Iabab, sfoa, sfob, dsva, sfob)
Ioovo += einsum('jkai,j,k,a,i->jkai', Iabab, sfoa, sfob, sfva, dsob)
Iooov = +einsum('jkia,j,k,i,a->jkia', Iabab, dsoa, sfob, sfoa, sfvb)
Iooov += einsum('jkia,j,k,i,a->jkia', Iabab, sfoa, dsob, sfoa, sfvb)
Iooov += einsum('jkia,j,k,i,a->jkia', Iabab, sfoa, sfob, dsoa, sfvb)
Iooov += einsum('jkia,j,k,i,a->jkia', Iabab, sfoa, sfob, sfoa, dsvb)
Ioooo = +einsum('klij,k,l,i,j->klij', Iabab, dsoa, sfob, sfoa, sfob)
Ioooo += einsum('klij,k,l,i,j->klij', Iabab, sfoa, dsob, sfoa, sfob)
Ioooo += einsum('klij,k,l,i,j->klij', Iabab, sfoa, sfob, dsoa, sfob)
Ioooo += einsum('klij,k,l,i,j->klij', Iabab, sfoa, sfob, sfoa, dsob)
Iabab = two_e_blocks_full(
vvvv=Ivvvv, vvvo=Ivvvo, vvov=Ivvov, vovv=Ivovv,
ovvv=Iovvv, vvoo=Ivvoo, vovo=Ivovo, ovvo=Iovvo,
voov=Ivoov, ovov=Iovov, oovv=Ioovv, vooo=Ivooo,
ovoo=Iovoo, oovo=Ioovo, ooov=Iooov, oooo=Ioooo)
return Fa, Fb, Ia, Ib, Iabab
def ft_d_active_integrals(sys, en, fo, fv, iocc, ivir, dvec):
"""Return one and two-electron integrals in the general spin orbital basis
with small occupations excluded."""
# get FT Fock matrix
fmo = sys.g_fock_tot()
fmo = fmo - numpy.diag(en)
fd = sys.g_fock_d_tot(dvec)
sfo = numpy.sqrt(fo)
sfv = numpy.sqrt(fv)
dso = -0.5*sfo[numpy.ix_(iocc)]*fv[numpy.ix_(iocc)]*dvec[numpy.ix_(iocc)]
dsv = +0.5*sfv[numpy.ix_(ivir)]*fo[numpy.ix_(ivir)]*dvec[numpy.ix_(ivir)]
sfo = sfo[numpy.ix_(iocc)]
sfv = sfv[numpy.ix_(ivir)]
# form derivative integrals
Foo = einsum('ij,i,j->ij', fd[numpy.ix_(iocc, iocc)], sfo, sfo)\
+ einsum('ij,i,j->ij', fmo[numpy.ix_(iocc, iocc)], dso, sfo)\
+ einsum('ij,i,j->ij', fmo[numpy.ix_(iocc, iocc)], sfo, dso)
Fov = einsum('ia,i,a->ia', fd[numpy.ix_(iocc, ivir)], sfo, sfv)\
+ einsum('ia,i,a->ia', fmo[numpy.ix_(iocc, ivir)], dso, sfv)\
+ einsum('ia,i,a->ia', fmo[numpy.ix_(iocc, ivir)], sfo, dsv)
Fvo = einsum('ai,a,i->ai', fd[numpy.ix_(ivir, iocc)], sfv, sfo)\
+ einsum('ai,a,i->ai', fmo[numpy.ix_(ivir, iocc)], dsv, sfo)\
+ einsum('ai,a,i->ai', fmo[numpy.ix_(ivir, iocc)], sfv, dso)
Fvv = einsum('ab,a,b->ab', fd[numpy.ix_(ivir, ivir)], sfv, sfv)\
+ einsum('ab,a,b->ab', fmo[numpy.ix_(ivir, ivir)], dsv, sfv)\
+ einsum('ab,a,b->ab', fmo[numpy.ix_(ivir, ivir)], sfv, dsv)
F = one_e_blocks(Foo, Fov, Fvo, Fvv)
# get ERIs
eri = sys.g_aint_tot()
I = _form_ft_d_active_eris(eri, sfo, sfv, dso, dsv, iocc, ivir)
return F, I
def uft_d_active_integrals(sys, ea, eb, foa, fva, fob, fvb,
iocca, ivira, ioccb, ivirb, dveca, dvecb):
"""Return derivatives of unrestricted one and two-electron integrals
with small occupations excluded."""
# get FT Fock matrix
fa, fb = sys.u_fock_tot()
fa = fa - numpy.diag(ea)
fb = fb - numpy.diag(eb)
fda, fdb = sys.u_fock_d_tot(dveca, dvecb)
sfoa = numpy.sqrt(foa)
sfva = numpy.sqrt(fva)
sfob = numpy.sqrt(fob)
sfvb = numpy.sqrt(fvb)
dsoa = -0.5*(sfoa*fva*dveca)[numpy.ix_(iocca)]
dsva = +0.5*(sfva*foa*dveca)[numpy.ix_(ivira)]
dsob = -0.5*(sfob*fvb*dvecb)[numpy.ix_(ioccb)]
dsvb = +0.5*(sfvb*fob*dvecb)[numpy.ix_(ivirb)]
sfoa = sfoa[numpy.ix_(iocca)]
sfva = sfva[numpy.ix_(ivira)]
sfob = sfob[numpy.ix_(ioccb)]
sfvb = sfvb[numpy.ix_(ivirb)]
# form derivative integrals
Foo = einsum('ij,i,j->ij', fda[numpy.ix_(iocca, iocca)], sfoa, sfoa)\
+ einsum('ij,i,j->ij', fa[numpy.ix_(iocca, iocca)], dsoa, sfoa)\
+ einsum('ij,i,j->ij', fa[numpy.ix_(iocca, iocca)], sfoa, dsoa)
Fov = einsum('ia,i,a->ia', fda[numpy.ix_(iocca, ivira)], sfoa, sfva)\
+ einsum('ia,i,a->ia', fa[numpy.ix_(iocca, ivira)], dsoa, sfva)\
+ einsum('ia,i,a->ia', fa[numpy.ix_(iocca, ivira)], sfoa, dsva)
Fvo = einsum('ai,a,i->ai', fda[numpy.ix_(ivira, iocca)], sfva, sfoa)\
+ einsum('ai,a,i->ai', fa[numpy.ix_(ivira, iocca)], dsva, sfoa)\
+ einsum('ai,a,i->ai', fa[numpy.ix_(ivira, iocca)], sfva, dsoa)
Fvv = einsum('ab,a,b->ab', fda[numpy.ix_(ivira, ivira)], sfva, sfva)\
+ einsum('ab,a,b->ab', fa[numpy.ix_(ivira, ivira)], dsva, sfva)\
+ einsum('ab,a,b->ab', fa[numpy.ix_(ivira, ivira)], sfva, dsva)
Fa = one_e_blocks(Foo, Fov, Fvo, Fvv)
Foo = einsum('ij,i,j->ij', fdb[numpy.ix_(ioccb, ioccb)], sfob, sfob)\
+ einsum('ij,i,j->ij', fb[numpy.ix_(ioccb, ioccb)], dsob, sfob)\
+ einsum('ij,i,j->ij', fb[numpy.ix_(ioccb, ioccb)], sfob, dsob)
Fov = einsum('ia,i,a->ia', fdb[numpy.ix_(ioccb, ivirb)], sfob, sfvb)\
+ einsum('ia,i,a->ia', fb[numpy.ix_(ioccb, ivirb)], dsob, sfvb)\
+ einsum('ia,i,a->ia', fb[numpy.ix_(ioccb, ivirb)], sfob, dsvb)
Fvo = einsum('ai,a,i->ai', fdb[numpy.ix_(ivirb, ioccb)], sfvb, sfob)\
+ einsum('ai,a,i->ai', fb[numpy.ix_(ivirb, ioccb)], dsvb, sfob)\
+ einsum('ai,a,i->ai', fb[numpy.ix_(ivirb, ioccb)], sfvb, dsob)
Fvv = einsum('ab,a,b->ab', fdb[numpy.ix_(ivirb, ivirb)], sfvb, sfvb)\
+ einsum('ab,a,b->ab', fb[numpy.ix_(ivirb, ivirb)], dsvb, sfvb)\
+ einsum('ab,a,b->ab', fb[numpy.ix_(ivirb, ivirb)], sfvb, dsvb)
Fb = one_e_blocks(Foo, Fov, Fvo, Fvv)
# get ERIs
eriA, eriB, eriAB = sys.u_aint_tot()
Ia = _form_ft_d_active_eris(eriA, sfoa, sfva, dsoa, dsva, iocca, ivira)
Ib = _form_ft_d_active_eris(eriB, sfob, sfvb, dsob, dsvb, ioccb, ivirb)
Ivvvv = einsum('abcd,a,b,c,d->abcd', eriAB[numpy.ix_(ivira, ivirb, ivira, ivirb)], dsva, sfvb, sfva, sfvb)\
+ einsum('abcd,a,b,c,d->abcd', eriAB[numpy.ix_(ivira, ivirb, ivira, ivirb)], sfva, dsvb, sfva, sfvb)\
+ einsum('abcd,a,b,c,d->abcd', eriAB[numpy.ix_(ivira, ivirb, ivira, ivirb)], sfva, sfvb, dsva, sfvb)\
+ einsum('abcd,a,b,c,d->abcd', eriAB[numpy.ix_(ivira, ivirb, ivira, ivirb)], sfva, sfvb, sfva, dsvb)
Ivvvo = einsum('abci,a,b,c,i->abci', eriAB[numpy.ix_(ivira, ivirb, ivira, ioccb)], dsva, sfvb, sfva, sfob)\
+ einsum('abci,a,b,c,i->abci', eriAB[numpy.ix_(ivira, ivirb, ivira, ioccb)], sfva, dsvb, sfva, sfob)\
+ einsum('abci,a,b,c,i->abci', eriAB[numpy.ix_(ivira, ivirb, ivira, ioccb)], sfva, sfvb, dsva, sfob)\
+ einsum('abci,a,b,c,i->abci', eriAB[numpy.ix_(ivira, ivirb, ivira, ioccb)], sfva, sfvb, sfva, dsob)
Ivvov = einsum('abic,a,b,i,c->abic', eriAB[numpy.ix_(ivira, ivirb, iocca, ivirb)], dsva, sfvb, sfoa, sfvb)\
+ einsum('abic,a,b,i,c->abic', eriAB[numpy.ix_(ivira, ivirb, iocca, ivirb)], sfva, dsvb, sfoa, sfvb)\
+ einsum('abic,a,b,i,c->abic', eriAB[numpy.ix_(ivira, ivirb, iocca, ivirb)], sfva, sfvb, sfoa, dsvb)\
+ einsum('abic,a,b,i,c->abic', eriAB[numpy.ix_(ivira, ivirb, iocca, ivirb)], sfva, sfvb, dsoa, sfvb)
Ivovv = einsum('aibc,a,i,b,c->aibc', eriAB[numpy.ix_(ivira, ioccb, ivira, ivirb)], dsva, sfob, sfva, sfvb)\
+ einsum('aibc,a,i,b,c->aibc', eriAB[numpy.ix_(ivira, ioccb, ivira, ivirb)], sfva, dsob, sfva, sfvb)\
+ einsum('aibc,a,i,b,c->aibc', eriAB[numpy.ix_(ivira, ioccb, ivira, ivirb)], sfva, sfob, dsva, sfvb)\
+ einsum('aibc,a,i,b,c->aibc', eriAB[numpy.ix_(ivira, ioccb, ivira, ivirb)], sfva, sfob, sfva, dsvb)
Iovvv = einsum('iabc,i,a,b,c->iabc', eriAB[numpy.ix_(iocca, ivirb, ivira, ivirb)], sfoa, dsvb, sfva, sfvb)\
+ einsum('iabc,i,a,b,c->iabc', eriAB[numpy.ix_(iocca, ivirb, ivira, ivirb)], dsoa, sfvb, sfva, sfvb)\
+ einsum('iabc,i,a,b,c->iabc', eriAB[numpy.ix_(iocca, ivirb, ivira, ivirb)], sfoa, sfvb, dsva, sfvb)\
+ einsum('iabc,i,a,b,c->iabc', eriAB[numpy.ix_(iocca, ivirb, ivira, ivirb)], sfoa, sfvb, sfva, dsvb)
Ivvoo = einsum('abij,a,b,i,j->abij', eriAB[numpy.ix_(ivira, ivirb, iocca, ioccb)], dsva, sfvb, sfoa, sfob)\
+ einsum('abij,a,b,i,j->abij', eriAB[numpy.ix_(ivira, ivirb, iocca, ioccb)], sfva, dsvb, sfoa, sfob)\
+ einsum('abij,a,b,i,j->abij', eriAB[numpy.ix_(ivira, ivirb, iocca, ioccb)], sfva, sfvb, dsoa, sfob)\
+ einsum('abij,a,b,i,j->abij', eriAB[numpy.ix_(ivira, ivirb, iocca, ioccb)], sfva, sfvb, sfoa, dsob)
Ivovo = einsum('ajbi,a,j,b,i->ajbi', eriAB[numpy.ix_(ivira, ioccb, ivira, ioccb)], dsva, sfob, sfva, sfob)\
+ einsum('ajbi,a,j,b,i->ajbi', eriAB[numpy.ix_(ivira, ioccb, ivira, ioccb)], sfva, dsob, sfva, sfob)\
+ einsum('ajbi,a,j,b,i->ajbi', eriAB[numpy.ix_(ivira, ioccb, ivira, ioccb)], sfva, sfob, dsva, sfob)\
+ einsum('ajbi,a,j,b,i->ajbi', eriAB[numpy.ix_(ivira, ioccb, ivira, ioccb)], sfva, sfob, sfva, dsob)
Ioovv = einsum('ijab,i,j,a,b->ijab', eriAB[numpy.ix_(iocca, ioccb, ivira, ivirb)], dsoa, sfob, sfva, sfvb)\
+ einsum('ijab,i,j,a,b->ijab', eriAB[numpy.ix_(iocca, ioccb, ivira, ivirb)], sfoa, dsob, sfva, sfvb)\
+ einsum('ijab,i,j,a,b->ijab', eriAB[numpy.ix_(iocca, ioccb, ivira, ivirb)], sfoa, sfob, dsva, sfvb)\
+ einsum('ijab,i,j,a,b->ijab', eriAB[numpy.ix_(iocca, ioccb, ivira, ivirb)], sfoa, sfob, sfva, dsvb)
Iovvo = einsum('jabi,j,a,b,i->jabi', eriAB[numpy.ix_(iocca, ivirb, ivira, ioccb)], sfoa, dsvb, sfva, sfob)\
+ einsum('jabi,j,a,b,i->jabi', eriAB[numpy.ix_(iocca, ivirb, ivira, ioccb)], dsoa, sfvb, sfva, sfob)\
+ einsum('jabi,j,a,b,i->jabi', eriAB[numpy.ix_(iocca, ivirb, ivira, ioccb)], sfoa, sfvb, dsva, sfob)\
+ einsum('jabi,j,a,b,i->jabi', eriAB[numpy.ix_(iocca, ivirb, ivira, ioccb)], sfoa, sfvb, sfva, dsob)
Ivoov = einsum('ajib,a,j,i,b->ajib', eriAB[numpy.ix_(ivira, ioccb, iocca, ivirb)], dsva, sfob, sfoa, sfvb)\
+ einsum('ajib,a,j,i,b->ajib', eriAB[numpy.ix_(ivira, ioccb, iocca, ivirb)], sfva, dsob, sfoa, sfvb)\
+ einsum('ajib,a,j,i,b->ajib', eriAB[numpy.ix_(ivira, ioccb, iocca, ivirb)], sfva, sfob, sfoa, dsvb)\
+ einsum('ajib,a,j,i,b->ajib', eriAB[numpy.ix_(ivira, ioccb, iocca, ivirb)], sfva, sfob, dsoa, sfvb)
Iovov = einsum('jaib,j,a,i,b->jaib', eriAB[numpy.ix_(iocca, ivirb, iocca, ivirb)], sfoa, dsvb, sfoa, sfvb)\
+ einsum('jaib,j,a,i,b->jaib', eriAB[numpy.ix_(iocca, ivirb, iocca, ivirb)], dsoa, sfvb, sfoa, sfvb)\
+ einsum('jaib,j,a,i,b->jaib', eriAB[numpy.ix_(iocca, ivirb, iocca, ivirb)], sfoa, sfvb, sfoa, dsvb)\
+ einsum('jaib,j,a,i,b->jaib', eriAB[numpy.ix_(iocca, ivirb, iocca, ivirb)], sfoa, sfvb, dsoa, sfvb)
Ivooo = einsum('akij,a,k,i,j->akij', eriAB[numpy.ix_(ivira, ioccb, iocca, ioccb)], dsva, sfob, sfoa, sfob)\
+ einsum('akij,a,k,i,j->akij', eriAB[numpy.ix_(ivira, ioccb, iocca, ioccb)], sfva, dsob, sfoa, sfob)\
+ einsum('akij,a,k,i,j->akij', eriAB[numpy.ix_(ivira, ioccb, iocca, ioccb)], sfva, sfob, dsoa, sfob)\
+ einsum('akij,a,k,i,j->akij', eriAB[numpy.ix_(ivira, ioccb, iocca, ioccb)], sfva, sfob, sfoa, dsob)
Iovoo = einsum('kaij,k,a,i,j->kaij', eriAB[numpy.ix_(iocca, ivirb, iocca, ioccb)], sfoa, dsvb, sfoa, sfob)\
+ einsum('kaij,k,a,i,j->kaij', eriAB[numpy.ix_(iocca, ivirb, iocca, ioccb)], dsoa, sfvb, sfoa, sfob)\
+ einsum('kaij,k,a,i,j->kaij', eriAB[numpy.ix_(iocca, ivirb, iocca, ioccb)], sfoa, sfvb, dsoa, sfob)\
+ einsum('kaij,k,a,i,j->kaij', eriAB[numpy.ix_(iocca, ivirb, iocca, ioccb)], sfoa, sfvb, sfoa, dsob)
Ioovo = einsum('jkai,j,k,a,i->jkai', eriAB[numpy.ix_(iocca, ioccb, ivira, ioccb)], dsoa, sfob, sfva, sfob)\
+ einsum('jkai,j,k,a,i->jkai', eriAB[numpy.ix_(iocca, ioccb, ivira, ioccb)], sfoa, dsob, sfva, sfob)\
+ einsum('jkai,j,k,a,i->jkai', eriAB[numpy.ix_(iocca, ioccb, ivira, ioccb)], sfoa, sfob, sfva, dsob)\
+ einsum('jkai,j,k,a,i->jkai', eriAB[numpy.ix_(iocca, ioccb, ivira, ioccb)], sfoa, sfob, dsva, sfob)
Iooov = einsum('jkia,j,k,i,a->jkia', eriAB[numpy.ix_(iocca, ioccb, iocca, ivirb)], dsoa, sfob, sfoa, sfvb)\
+ einsum('jkia,j,k,i,a->jkia', eriAB[numpy.ix_(iocca, ioccb, iocca, ivirb)], sfoa, dsob, sfoa, sfvb)\
+ einsum('jkia,j,k,i,a->jkia', eriAB[numpy.ix_(iocca, ioccb, iocca, ivirb)], sfoa, sfob, dsoa, sfvb)\
+ einsum('jkia,j,k,i,a->jkia', eriAB[numpy.ix_(iocca, ioccb, iocca, ivirb)], sfoa, sfob, sfoa, dsvb)
Ioooo = einsum('klij,k,l,i,j->klij', eriAB[numpy.ix_(iocca, ioccb, iocca, ioccb)], dsoa, sfob, sfoa, sfob)\
+ einsum('klij,k,l,i,j->klij', eriAB[numpy.ix_(iocca, ioccb, iocca, ioccb)], sfoa, dsob, sfoa, sfob)\
+ einsum('klij,k,l,i,j->klij', eriAB[numpy.ix_(iocca, ioccb, iocca, ioccb)], sfoa, sfob, dsoa, sfob)\
+ einsum('klij,k,l,i,j->klij', eriAB[numpy.ix_(iocca, ioccb, iocca, ioccb)], sfoa, sfob, sfoa, dsob)
Iabab = two_e_blocks_full(
vvvv=Ivvvv, vvvo=Ivvvo, vvov=Ivvov, vovv=Ivovv,
ovvv=Iovvv, vvoo=Ivvoo, vovo=Ivovo, ovvo=Iovvo,
voov=Ivoov, ovov=Iovov, oovv=Ioovv, vooo=Ivooo,
ovoo=Iovoo, oovo=Ioovo, ooov=Iooov, oooo=Ioooo)
return Fa, Fb, Ia, Ib, Iabab
def g_n2rdm_full(beta, sfo, sfv, P2):
n2rdm = +(1.0/beta)*einsum('cdab,c,d,a,b->cdab', P2[0], sfv, sfv, sfv, sfv)
n2rdm += (1.0/beta)*einsum('ciab,c,i,a,b->ciab', P2[1], sfv, sfo, sfv, sfv)
n2rdm -= (1.0/beta)*einsum('ciab,c,i,a,b->icab', P2[1], sfv, sfo, sfv, sfv)
n2rdm += (1.0/beta)*einsum('bcai,b,c,a,i->bcai', P2[2], sfv, sfv, sfv, sfo)
n2rdm -= (1.0/beta)*einsum('bcai,b,c,a,i->bcia', P2[2], sfv, sfv, sfv, sfo)
n2rdm += (1.0/beta)*einsum('ijab,i,j,a,b->ijab', P2[3], sfo, sfo, sfv, sfv)
n2rdm += (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4], sfv, sfo, sfv, sfo)
n2rdm -= (1.0/beta)*einsum('bjai,b,j,a,i->bjia', P2[4], sfv, sfo, sfv, sfo)
n2rdm -= (1.0/beta)*einsum('bjai,b,j,a,i->jbai', P2[4], sfv, sfo, sfv, sfo)
n2rdm += (1.0/beta)*einsum('bjai,b,j,a,i->jbia', P2[4], sfv, sfo, sfv, sfo)
n2rdm += (1.0/beta)*einsum('abij,a,b,i,j->abij', P2[5], sfv, sfv, sfo, sfo)
n2rdm += (1.0/beta)*einsum('jkai,j,k,a,i->jkai', P2[6], sfo, sfo, sfv, sfo)
n2rdm -= (1.0/beta)*einsum('jkai,j,k,a,i->jkia', P2[6], sfo, sfo, sfv, sfo)
n2rdm += (1.0/beta)*einsum('kaij,k,a,i,j->kaij', P2[7], sfo, sfv, sfo, sfo)
n2rdm -= (1.0/beta)*einsum('kaij,k,a,i,j->akij', P2[7], sfo, sfv, sfo, sfo)
n2rdm += (1.0/beta)*einsum('klij,k,l,i,j->klij', P2[8], sfo, sfo, sfo, sfo)
return n2rdm
def g_n2rdm_full_active(beta, n, iocc, ivir, sfo, sfv, P2):
n2rdm = numpy.zeros((n, n, n, n), dtype=P2[0].dtype)
n2rdm[numpy.ix_(ivir, ivir, ivir, ivir)] += \
(1.0/beta)*einsum('cdab,c,d,a,b->cdab', P2[0], sfv, sfv, sfv, sfv)
n2rdm[numpy.ix_(ivir, iocc, ivir, ivir)] += \
(1.0/beta)*einsum('ciab,c,i,a,b->ciab', P2[1], sfv, sfo, sfv, sfv)
n2rdm[numpy.ix_(iocc, ivir, ivir, ivir)] -= \
(1.0/beta)*einsum('ciab,c,i,a,b->icab', P2[1], sfv, sfo, sfv, sfv)
n2rdm[numpy.ix_(ivir, ivir, ivir, iocc)] += \
(1.0/beta)*einsum('bcai,b,c,a,i->bcai', P2[2], sfv, sfv, sfv, sfo)
n2rdm[numpy.ix_(ivir, ivir, iocc, ivir)] -= \
(1.0/beta)*einsum('bcai,b,c,a,i->bcia', P2[2], sfv, sfv, sfv, sfo)
n2rdm[numpy.ix_(iocc, iocc, ivir, ivir)] += \
(1.0/beta)*einsum('ijab,i,j,a,b->ijab', P2[3], sfo, sfo, sfv, sfv)
n2rdm[numpy.ix_(ivir, iocc, ivir, iocc)] += \
(1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4], sfv, sfo, sfv, sfo)
n2rdm[numpy.ix_(ivir, iocc, iocc, ivir)] -= \
(1.0/beta)*einsum('bjai,b,j,a,i->bjia', P2[4], sfv, sfo, sfv, sfo)
n2rdm[numpy.ix_(iocc, ivir, ivir, iocc)] -= \
(1.0/beta)*einsum('bjai,b,j,a,i->jbai', P2[4], sfv, sfo, sfv, sfo)
n2rdm[numpy.ix_(iocc, ivir, iocc, ivir)] += \
(1.0/beta)*einsum('bjai,b,j,a,i->jbia', P2[4], sfv, sfo, sfv, sfo)
n2rdm[numpy.ix_(ivir, ivir, iocc, iocc)] += \
(1.0/beta)*einsum('abij,a,b,i,j->abij', P2[5], sfv, sfv, sfo, sfo)
n2rdm[numpy.ix_(iocc, iocc, ivir, iocc)] += \
(1.0/beta)*einsum('jkai,j,k,a,i->jkai', P2[6], sfo, sfo, sfv, sfo)
n2rdm[numpy.ix_(iocc, iocc, iocc, ivir)] -= \
(1.0/beta)*einsum('jkai,j,k,a,i->jkia', P2[6], sfo, sfo, sfv, sfo)
n2rdm[numpy.ix_(iocc, ivir, iocc, iocc)] += \
(1.0/beta)*einsum('kaij,k,a,i,j->kaij', P2[7], sfo, sfv, sfo, sfo)
n2rdm[numpy.ix_(ivir, iocc, iocc, iocc)] -= \
(1.0/beta)*einsum('kaij,k,a,i,j->akij', P2[7], sfo, sfv, sfo, sfo)
n2rdm[numpy.ix_(iocc, iocc, iocc, iocc)] += \
(1.0/beta)*einsum('klij,k,l,i,j->klij', P2[8], sfo, sfo, sfo, sfo)
return n2rdm
def u_n2rdm_full(beta, sfoa, sfva, sfob, sfvb, P2):
na = sfoa.size
nb = sfob.size
P2aa = numpy.zeros((na, na, na, na), dtype=P2[0][0].dtype)
P2aa += (1.0/beta)*einsum('cdab,c,d,a,b->cdab', P2[0][0], sfva, sfva, sfva, sfva)
P2aa += (1.0/beta)*einsum('ciab,c,i,a,b->ciab', P2[1][0], sfva, sfoa, sfva, sfva)
P2aa -= (1.0/beta)*einsum('ciab,c,i,a,b->icab', P2[1][0], sfva, sfoa, sfva, sfva)
P2aa += (1.0/beta)*einsum('bcai,b,c,a,i->bcai', P2[2][0], sfva, sfva, sfva, sfoa)
P2aa -= (1.0/beta)*einsum('bcai,b,c,a,i->bcia', P2[2][0], sfva, sfva, sfva, sfoa)
P2aa += (1.0/beta)*einsum('ijab,i,j,a,b->ijab', P2[3][0], sfoa, sfoa, sfva, sfva)
P2aa += (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4][0], sfva, sfoa, sfva, sfoa)
P2aa -= (1.0/beta)*einsum('bjai,b,j,a,i->bjia', P2[4][0], sfva, sfoa, sfva, sfoa)
P2aa -= (1.0/beta)*einsum('bjai,b,j,a,i->jbai', P2[4][0], sfva, sfoa, sfva, sfoa)
P2aa += (1.0/beta)*einsum('bjai,b,j,a,i->jbia', P2[4][0], sfva, sfoa, sfva, sfoa)
P2aa += (1.0/beta)*einsum('abij,a,b,i,j->abij', P2[5][0], sfva, sfva, sfoa, sfoa)
P2aa += (1.0/beta)*einsum('jkai,j,k,a,i->jkai', P2[6][0], sfoa, sfoa, sfva, sfoa)
P2aa -= (1.0/beta)*einsum('jkai,j,k,a,i->jkia', P2[6][0], sfoa, sfoa, sfva, sfoa)
P2aa += (1.0/beta)*einsum('kaij,k,a,i,j->kaij', P2[7][0], sfoa, sfva, sfoa, sfoa)
P2aa -= (1.0/beta)*einsum('kaij,k,a,i,j->akij', P2[7][0], sfoa, sfva, sfoa, sfoa)
P2aa += (1.0/beta)*einsum('klij,k,l,i,j->klij', P2[8][0], sfoa, sfoa, sfoa, sfoa)
P2bb = numpy.zeros((nb, nb, nb, nb), dtype=P2[0][1].dtype)
P2bb += (1.0/beta)*einsum('cdab,c,d,a,b->cdab', P2[0][1], sfvb, sfvb, sfvb, sfvb)
P2bb += (1.0/beta)*einsum('ciab,c,i,a,b->ciab', P2[1][1], sfvb, sfob, sfvb, sfvb)
P2bb -= (1.0/beta)*einsum('ciab,c,i,a,b->icab', P2[1][1], sfvb, sfob, sfvb, sfvb)
P2bb += (1.0/beta)*einsum('bcai,b,c,a,i->bcai', P2[2][1], sfvb, sfvb, sfvb, sfob)
P2bb -= (1.0/beta)*einsum('bcai,b,c,a,i->bcia', P2[2][1], sfvb, sfvb, sfvb, sfob)
P2bb += (1.0/beta)*einsum('ijab,i,j,a,b->ijab', P2[3][1], sfob, sfob, sfvb, sfvb)
P2bb += (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4][1], sfvb, sfob, sfvb, sfob)
P2bb -= (1.0/beta)*einsum('bjai,b,j,a,i->bjia', P2[4][1], sfvb, sfob, sfvb, sfob)
P2bb -= (1.0/beta)*einsum('bjai,b,j,a,i->jbai', P2[4][1], sfvb, sfob, sfvb, sfob)
P2bb += (1.0/beta)*einsum('bjai,b,j,a,i->jbia', P2[4][1], sfvb, sfob, sfvb, sfob)
P2bb += (1.0/beta)*einsum('abij,a,b,i,j->abij', P2[5][1], sfvb, sfvb, sfob, sfob)
P2bb += (1.0/beta)*einsum('jkai,j,k,a,i->jkai', P2[6][1], sfob, sfob, sfvb, sfob)
P2bb -= (1.0/beta)*einsum('jkai,j,k,a,i->jkia', P2[6][1], sfob, sfob, sfvb, sfob)
P2bb += (1.0/beta)*einsum('kaij,k,a,i,j->kaij', P2[7][1], sfob, sfvb, sfob, sfob)
P2bb -= (1.0/beta)*einsum('kaij,k,a,i,j->akij', P2[7][1], sfob, sfvb, sfob, sfob)
P2bb += (1.0/beta)*einsum('klij,k,l,i,j->klij', P2[8][1], sfob, sfob, sfob, sfob)
P2ab = numpy.zeros((na, nb, na, nb), dtype=P2[0][2].dtype)
P2ab += (1.0/beta)*einsum('cdab,c,d,a,b->cdab', P2[0][2], sfva, sfvb, sfva, sfvb)
P2ab += (1.0/beta)*einsum('ciab,c,i,a,b->ciab', P2[1][2], sfva, sfob, sfva, sfvb)
P2ab += (1.0/beta)*einsum('bcai,b,c,a,i->bcai', P2[2][2], sfva, sfvb, sfva, sfob)
P2ab += (1.0/beta)*einsum('ijab,i,j,a,b->ijab', P2[3][2], sfoa, sfob, sfva, sfvb)
P2ab += (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4][2], sfva, sfob, sfva, sfob)
P2ab += (1.0/beta)*einsum('abij,a,b,i,j->abij', P2[5][2], sfva, sfvb, sfoa, sfob)
P2ab += (1.0/beta)*einsum('jkai,j,k,a,i->jkai', P2[6][2], sfoa, sfob, sfva, sfob)
P2ab += (1.0/beta)*einsum('kaij,k,a,i,j->kaij', P2[7][2], sfoa, sfvb, sfoa, sfob)
P2ab += (1.0/beta)*einsum('klij,k,l,i,j->klij', P2[8][2], sfoa, sfob, sfoa, sfob)
P2ab += (1.0/beta)*einsum('ciab,c,i,a,b->ciab', P2[1][3], sfvb, sfoa, sfvb, sfva).transpose((1, 0, 3, 2))
P2ab += (1.0/beta)*einsum('bcai,b,c,a,i->bcai', P2[2][3], sfvb, sfva, sfvb, sfoa).transpose((1, 0, 3, 2))
P2ab += (1.0/beta)*einsum('jkai,j,k,a,i->jkai', P2[6][3], sfob, sfoa, sfvb, sfoa).transpose((1, 0, 3, 2))
P2ab += (1.0/beta)*einsum('kaij,k,a,i,j->kaij', P2[7][3], sfob, sfva, sfob, sfoa).transpose((1, 0, 3, 2))
P2ab -= (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4][3], sfva, sfob, sfvb, sfoa).transpose((0, 1, 3, 2))
P2ab -= (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4][4], sfvb, sfoa, sfva, sfob).transpose((1, 0, 2, 3))
P2ab += (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4][5], sfvb, sfoa, sfvb, sfoa).transpose((1, 0, 3, 2))
return (P2aa, P2bb, P2ab)
def u_n2rdm_full_active(beta, na, nb, iocca, ivira, ioccb, ivirb, sfoa, sfva, sfob, sfvb, P2):
P2aa = numpy.zeros((na, na, na, na), dtype=P2[0][0].dtype)
P2aa[numpy.ix_(ivira, ivira, ivira, ivira)] += (1.0/beta)*einsum('cdab,c,d,a,b->cdab', P2[0][0], sfva, sfva, sfva, sfva)
P2aa[numpy.ix_(ivira, iocca, ivira, ivira)] += (1.0/beta)*einsum('ciab,c,i,a,b->ciab', P2[1][0], sfva, sfoa, sfva, sfva)
P2aa[numpy.ix_(iocca, ivira, ivira, ivira)] -= (1.0/beta)*einsum('ciab,c,i,a,b->icab', P2[1][0], sfva, sfoa, sfva, sfva)
P2aa[numpy.ix_(ivira, ivira, ivira, iocca)] += (1.0/beta)*einsum('bcai,b,c,a,i->bcai', P2[2][0], sfva, sfva, sfva, sfoa)
P2aa[numpy.ix_(ivira, ivira, iocca, ivira)] -= (1.0/beta)*einsum('bcai,b,c,a,i->bcia', P2[2][0], sfva, sfva, sfva, sfoa)
P2aa[numpy.ix_(iocca, iocca, ivira, ivira)] += (1.0/beta)*einsum('ijab,i,j,a,b->ijab', P2[3][0], sfoa, sfoa, sfva, sfva)
P2aa[numpy.ix_(ivira, iocca, ivira, iocca)] += (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4][0], sfva, sfoa, sfva, sfoa)
P2aa[numpy.ix_(ivira, iocca, iocca, ivira)] -= (1.0/beta)*einsum('bjai,b,j,a,i->bjia', P2[4][0], sfva, sfoa, sfva, sfoa)
P2aa[numpy.ix_(iocca, ivira, ivira, iocca)] -= (1.0/beta)*einsum('bjai,b,j,a,i->jbai', P2[4][0], sfva, sfoa, sfva, sfoa)
P2aa[numpy.ix_(iocca, ivira, iocca, ivira)] += (1.0/beta)*einsum('bjai,b,j,a,i->jbia', P2[4][0], sfva, sfoa, sfva, sfoa)
P2aa[numpy.ix_(ivira, ivira, iocca, iocca)] += (1.0/beta)*einsum('abij,a,b,i,j->abij', P2[5][0], sfva, sfva, sfoa, sfoa)
P2aa[numpy.ix_(iocca, iocca, ivira, iocca)] += (1.0/beta)*einsum('jkai,j,k,a,i->jkai', P2[6][0], sfoa, sfoa, sfva, sfoa)
P2aa[numpy.ix_(iocca, iocca, iocca, ivira)] -= (1.0/beta)*einsum('jkai,j,k,a,i->jkia', P2[6][0], sfoa, sfoa, sfva, sfoa)
P2aa[numpy.ix_(iocca, ivira, iocca, iocca)] += (1.0/beta)*einsum('kaij,k,a,i,j->kaij', P2[7][0], sfoa, sfva, sfoa, sfoa)
P2aa[numpy.ix_(ivira, iocca, iocca, iocca)] -= (1.0/beta)*einsum('kaij,k,a,i,j->akij', P2[7][0], sfoa, sfva, sfoa, sfoa)
P2aa[numpy.ix_(iocca, iocca, iocca, iocca)] += (1.0/beta)*einsum('klij,k,l,i,j->klij', P2[8][0], sfoa, sfoa, sfoa, sfoa)
P2bb = numpy.zeros((nb, nb, nb, nb), dtype=P2[0][1].dtype)
P2bb[numpy.ix_(ivirb, ivirb, ivirb, ivirb)] += (1.0/beta)*einsum('cdab,c,d,a,b->cdab', P2[0][1], sfvb, sfvb, sfvb, sfvb)
P2bb[numpy.ix_(ivirb, ioccb, ivirb, ivirb)] += (1.0/beta)*einsum('ciab,c,i,a,b->ciab', P2[1][1], sfvb, sfob, sfvb, sfvb)
P2bb[numpy.ix_(ioccb, ivirb, ivirb, ivirb)] -= (1.0/beta)*einsum('ciab,c,i,a,b->icab', P2[1][1], sfvb, sfob, sfvb, sfvb)
P2bb[numpy.ix_(ivirb, ivirb, ivirb, ioccb)] += (1.0/beta)*einsum('bcai,b,c,a,i->bcai', P2[2][1], sfvb, sfvb, sfvb, sfob)
P2bb[numpy.ix_(ivirb, ivirb, ioccb, ivirb)] -= (1.0/beta)*einsum('bcai,b,c,a,i->bcia', P2[2][1], sfvb, sfvb, sfvb, sfob)
P2bb[numpy.ix_(ioccb, ioccb, ivirb, ivirb)] += (1.0/beta)*einsum('ijab,i,j,a,b->ijab', P2[3][1], sfob, sfob, sfvb, sfvb)
P2bb[numpy.ix_(ivirb, ioccb, ivirb, ioccb)] += (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4][1], sfvb, sfob, sfvb, sfob)
P2bb[numpy.ix_(ivirb, ioccb, ioccb, ivirb)] -= (1.0/beta)*einsum('bjai,b,j,a,i->bjia', P2[4][1], sfvb, sfob, sfvb, sfob)
P2bb[numpy.ix_(ioccb, ivirb, ivirb, ioccb)] -= (1.0/beta)*einsum('bjai,b,j,a,i->jbai', P2[4][1], sfvb, sfob, sfvb, sfob)
P2bb[numpy.ix_(ioccb, ivirb, ioccb, ivirb)] += (1.0/beta)*einsum('bjai,b,j,a,i->jbia', P2[4][1], sfvb, sfob, sfvb, sfob)
P2bb[numpy.ix_(ivirb, ivirb, ioccb, ioccb)] += (1.0/beta)*einsum('abij,a,b,i,j->abij', P2[5][1], sfvb, sfvb, sfob, sfob)
P2bb[numpy.ix_(ioccb, ioccb, ivirb, ioccb)] += (1.0/beta)*einsum('jkai,j,k,a,i->jkai', P2[6][1], sfob, sfob, sfvb, sfob)
P2bb[numpy.ix_(ioccb, ioccb, ioccb, ivirb)] -= (1.0/beta)*einsum('jkai,j,k,a,i->jkia', P2[6][1], sfob, sfob, sfvb, sfob)
P2bb[numpy.ix_(ioccb, ivirb, ioccb, ioccb)] += (1.0/beta)*einsum('kaij,k,a,i,j->kaij', P2[7][1], sfob, sfvb, sfob, sfob)
P2bb[numpy.ix_(ivirb, ioccb, ioccb, ioccb)] -= (1.0/beta)*einsum('kaij,k,a,i,j->akij', P2[7][1], sfob, sfvb, sfob, sfob)
P2bb[numpy.ix_(ioccb, ioccb, ioccb, ioccb)] += (1.0/beta)*einsum('klij,k,l,i,j->klij', P2[8][1], sfob, sfob, sfob, sfob)
P2ab = numpy.zeros((na, nb, na, nb), dtype=P2[0][2].dtype)
P2ab[numpy.ix_(ivira, ivirb, ivira, ivirb)] += (1.0/beta)*einsum('cdab,c,d,a,b->cdab', P2[0][2], sfva, sfvb, sfva, sfvb)
P2ab[numpy.ix_(ivira, ioccb, ivira, ivirb)] += (1.0/beta)*einsum('ciab,c,i,a,b->ciab', P2[1][2], sfva, sfob, sfva, sfvb)
P2ab[numpy.ix_(ivira, ivirb, ivira, ioccb)] += (1.0/beta)*einsum('bcai,b,c,a,i->bcai', P2[2][2], sfva, sfvb, sfva, sfob)
P2ab[numpy.ix_(iocca, ioccb, ivira, ivirb)] += (1.0/beta)*einsum('ijab,i,j,a,b->ijab', P2[3][2], sfoa, sfob, sfva, sfvb)
P2ab[numpy.ix_(ivira, ioccb, ivira, ioccb)] += (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4][2], sfva, sfob, sfva, sfob)
P2ab[numpy.ix_(ivira, ivirb, iocca, ioccb)] += (1.0/beta)*einsum('abij,a,b,i,j->abij', P2[5][2], sfva, sfvb, sfoa, sfob)
P2ab[numpy.ix_(iocca, ioccb, ivira, ioccb)] += (1.0/beta)*einsum('jkai,j,k,a,i->jkai', P2[6][2], sfoa, sfob, sfva, sfob)
P2ab[numpy.ix_(iocca, ivirb, iocca, ioccb)] += (1.0/beta)*einsum('kaij,k,a,i,j->kaij', P2[7][2], sfoa, sfvb, sfoa, sfob)
P2ab[numpy.ix_(iocca, ioccb, iocca, ioccb)] += (1.0/beta)*einsum('klij,k,l,i,j->klij', P2[8][2], sfoa, sfob, sfoa, sfob)
P2ab[numpy.ix_(iocca, ivirb, ivira, ivirb)] += (1.0/beta)*einsum('ciab,c,i,a,b->ciab', P2[1][3], sfvb, sfoa, sfvb, sfva).transpose((1, 0, 3, 2))
P2ab[numpy.ix_(ivira, ivirb, iocca, ivirb)] += (1.0/beta)*einsum('bcai,b,c,a,i->bcai', P2[2][3], sfvb, sfva, sfvb, sfoa).transpose((1, 0, 3, 2))
P2ab[numpy.ix_(iocca, ioccb, iocca, ivirb)] += (1.0/beta)*einsum('jkai,j,k,a,i->jkai', P2[6][3], sfob, sfoa, sfvb, sfoa).transpose((1, 0, 3, 2))
P2ab[numpy.ix_(ivira, ioccb, iocca, iocca)] += (1.0/beta)*einsum('kaij,k,a,i,j->kaij', P2[7][3], sfob, sfva, sfob, sfoa).transpose((1, 0, 3, 2))
P2ab[numpy.ix_(ivira, ioccb, iocca, ivirb)] -= (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4][3], sfva, sfob, sfvb, sfoa).transpose((0, 1, 3, 2))
P2ab[numpy.ix_(iocca, ivirb, ivira, ioccb)] -= (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4][4], sfvb, sfoa, sfva, sfob).transpose((1, 0, 2, 3))
P2ab[numpy.ix_(iocca, ivirb, iocca, ivirb)] += (1.0/beta)*einsum('bjai,b,j,a,i->bjai', P2[4][5], sfvb, sfoa, sfvb, sfoa).transpose((1, 0, 3, 2))
return (P2aa, P2bb, P2ab)
def g_Fd_on(Fd, ndia, ndba, ndji, ndai):
temp = -einsum('ia,aik->k', ndia, Fd)
temp -= einsum('ba,abk->k', ndba, Fd)
temp -= einsum('ji,ijk->k', ndji, Fd)
temp -= einsum('ai,iak->k', ndai, Fd)
return temp
def g_Fd_on_active(Fd, iocc, ivir, ndia, ndba, ndji, ndai):
Fdai = Fd[numpy.ix_(ivir, iocc)]
Fdab = Fd[numpy.ix_(ivir, ivir)]
Fdij = Fd[numpy.ix_(iocc, iocc)]
Fdia = Fd[numpy.ix_(iocc, ivir)]
temp = -einsum('ia,aik->k', ndia, Fdai)
temp -= einsum('ba,abk->k', ndba, Fdab)
temp -= einsum('ji,ijk->k', ndji, Fdij)
temp -= einsum('ai,iak->k', ndai, Fdia)
return temp
def g_d_on_oo(dso, F, I, dia, dji, dai, P2, jitemp):
jitemp -= 0.5*einsum('ia,ai->i', dia, F.vo)*dso
jitemp -= 0.5*einsum('ji,ij->i', dji, F.oo)*dso
jitemp -= 0.5*einsum('ji,ij->j', dji, F.oo)*dso
jitemp -= 0.5*einsum('ai,ia->i', dai, F.ov)*dso
jitemp -= 0.5*0.50*einsum('ciab,abci->i', P2[1], I.vvvo)*dso
jitemp -= 0.5*0.50*einsum('bcai,aibc->i', P2[2], I.vovv)*dso
jitemp -= 0.5*0.5*einsum('ijab,abij->i', P2[3], I.vvoo)*dso
jitemp -= 0.5*1.00*einsum('bjai,aibj->i', P2[4], I.vovo)*dso
jitemp -= 0.5*1.00*einsum('bjai,aibj->j', P2[4], I.vovo)*dso
jitemp -= 0.5*0.5*einsum('abij,ijab->i', P2[5], I.oovv)*dso
jitemp -= 0.5*0.50*einsum('jkai,aijk->i', P2[6], I.vooo)*dso
jitemp -= 0.5*1.0*einsum('jkai,aijk->j', P2[6], I.vooo)*dso
jitemp -= 0.5*1.0*einsum('kaij,ijka->i', P2[7], I.ooov)*dso
jitemp -= 0.5*0.50*einsum('kaij,ijka->k', P2[7], I.ooov)*dso
jitemp -= 0.5*0.5*einsum('klij,ijkl->i', P2[8], I.oooo)*dso
jitemp -= 0.5*0.5*einsum('klij,ijkl->k', P2[8], I.oooo)*dso
def g_d_on_vv(dsv, F, I, dia, dba, dai, P2, batemp):
batemp += 0.50*einsum('ia,ai->a', dia, F.vo)*dsv
batemp += 0.50*einsum('ba,ab->b', dba, F.vv)*dsv
batemp += 0.50*einsum('ba,ab->a', dba, F.vv)*dsv
batemp += 0.50*einsum('ai,ia->a', dai, F.ov)*dsv
batemp += 0.5*0.5*einsum('cdab,abcd->c', P2[0], I.vvvv)*dsv
batemp += 0.5*0.5*einsum('cdab,abcd->a', P2[0], I.vvvv)*dsv
batemp += 0.5*0.50*einsum('ciab,abci->c', P2[1], I.vvvo)*dsv
batemp += 0.5*1.0*einsum('ciab,abci->a', P2[1], I.vvvo)*dsv
batemp += 0.5*1.0*einsum('bcai,aibc->b', P2[2], I.vovv)*dsv
batemp += 0.5*0.50*einsum('bcai,aibc->a', P2[2], I.vovv)*dsv
batemp += 0.5*0.50*einsum('ijab,abij->a', P2[3], I.vvoo)*dsv
batemp += 0.5*1.00*einsum('bjai,aibj->a', P2[4], I.vovo)*dsv
batemp += 0.5*1.00*einsum('bjai,aibj->b', P2[4], I.vovo)*dsv
batemp += 0.5*0.5*einsum('abij,ijab->a', P2[5], I.oovv)*dsv
batemp += 0.5*0.50*einsum('jkai,aijk->a', P2[6], I.vooo)*dsv
batemp += 0.5*0.50*einsum('kaij,ijka->a', P2[7], I.ooov)*dsv
def u_Fd_on(Fdaa, Fdab, Fdba, Fdbb, ndia, ndba, ndji, ndai):
tempA = -einsum('ia,aik->k', ndia[0], Fdaa)
tempA -= einsum('ba,abk->k', ndba[0], Fdaa)
tempA -= einsum('ji,ijk->k', ndji[0], Fdaa)
tempA -= einsum('ai,iak->k', ndai[0], Fdaa)
tempA -= einsum('ia,aik->k', ndia[1], Fdba)
tempA -= einsum('ba,abk->k', ndba[1], Fdba)
tempA -= einsum('ji,ijk->k', ndji[1], Fdba)
tempA -= einsum('ai,iak->k', ndai[1], Fdba)
tempB = -einsum('ia,aik->k', ndia[1], Fdbb)
tempB -= einsum('ba,abk->k', ndba[1], Fdbb)
tempB -= einsum('ji,ijk->k', ndji[1], Fdbb)
tempB -= einsum('ai,iak->k', ndai[1], Fdbb)
tempB -= einsum('ia,aik->k', ndia[0], Fdab)
tempB -= einsum('ba,abk->k', ndba[0], Fdab)
tempB -= einsum('ji,ijk->k', ndji[0], Fdab)
tempB -= einsum('ai,iak->k', ndai[0], Fdab)
return tempA, tempB
def u_Fd_on_active(Fdaa, Fdab, Fdba, Fdbb, iocca, ivira, ioccb, ivirb, ndia, ndba, ndji, ndai):
Fdaik = Fdaa[numpy.ix_(ivira, iocca)]
Fdabk = Fdaa[numpy.ix_(ivira, ivira)]
Fdijk = Fdaa[numpy.ix_(iocca, iocca)]
Fdiak = Fdaa[numpy.ix_(iocca, ivira)]
FdaiK = Fdab[numpy.ix_(ivira, iocca)]
FdabK = Fdab[numpy.ix_(ivira, ivira)]
FdijK = Fdab[numpy.ix_(iocca, iocca)]
FdiaK = Fdab[numpy.ix_(iocca, ivira)]
FdAIK = Fdbb[numpy.ix_(ivirb, ioccb)]
FdABK = Fdbb[numpy.ix_(ivirb, ivirb)]
FdIJK = Fdbb[numpy.ix_(ioccb, ioccb)]
FdIAK = Fdbb[numpy.ix_(ioccb, ivirb)]
FdAIk = Fdba[numpy.ix_(ivirb, ioccb)]
FdABk = Fdba[numpy.ix_(ivirb, ivirb)]
FdIJk = Fdba[numpy.ix_(ioccb, ioccb)]
FdIAk = Fdba[numpy.ix_(ioccb, ivirb)]
tempA = -einsum('ia,aik->k', ndia[0], Fdaik)
tempA -= einsum('ba,abk->k', ndba[0], Fdabk)
tempA -= einsum('ji,ijk->k', ndji[0], Fdijk)
tempA -= einsum('ai,iak->k', ndai[0], Fdiak)
tempA -= einsum('ia,aik->k', ndia[1], FdAIk)
tempA -= einsum('ba,abk->k', ndba[1], FdABk)
tempA -= einsum('ji,ijk->k', ndji[1], FdIJk)
tempA -= einsum('ai,iak->k', ndai[1], FdIAk)
tempB = -einsum('ia,aik->k', ndia[1], FdAIK)
tempB -= einsum('ba,abk->k', ndba[1], FdABK)
tempB -= einsum('ji,ijk->k', ndji[1], FdIJK)
tempB -= einsum('ai,iak->k', ndai[1], FdIAK)
tempB -= einsum('ia,aik->k', ndia[0], FdaiK)
tempB -= einsum('ba,abk->k', ndba[0], FdabK)
tempB -= einsum('ji,ijk->k', ndji[0], FdijK)
tempB -= einsum('ai,iak->k', ndai[0], FdiaK)
return tempA, tempB
def u_d_on_oo(dsoa, dsob, Fa, Fb, Ia, Ib, Iabab, dia, dji, dai, P2, jitempa, jitempb):
jitempa -= 0.5*einsum('ia,ai->i', dia[0], Fa.vo)*dsoa
jitempa -= 0.5*einsum('ji,ij->i', dji[0], Fa.oo)*dsoa
jitempa -= 0.5*einsum('ji,ij->j', dji[0], Fa.oo)*dsoa
jitempa -= 0.5*einsum('ai,ia->i', dai[0], Fa.ov)*dsoa
jitempb -= 0.5*einsum('ia,ai->i', dia[1], Fb.vo)*dsob
jitempb -= 0.5*einsum('ji,ij->i', dji[1], Fb.oo)*dsob
jitempb -= 0.5*einsum('ji,ij->j', dji[1], Fb.oo)*dsob
jitempb -= 0.5*einsum('ai,ia->i', dai[1], Fb.ov)*dsob
jitempa -= 0.5*0.5*einsum('ijab,abij->i', P2[3][0], Ia.vvoo)*dsoa
jitempb -= 0.5*0.5*einsum('ijab,abij->i', P2[3][1], Ib.vvoo)*dsob
jitempa -= 0.5*1.0*einsum('iJaB,aBiJ->i', P2[3][2], Iabab.vvoo)*dsoa
jitempb -= 0.5*1.0*einsum('iJaB,aBiJ->J', P2[3][2], Iabab.vvoo)*dsob
jitempa -= 0.5*0.5*einsum('ciab,abci->i', P2[1][0], Ia.vvvo)*dsoa
jitempb -= 0.5*0.5*einsum('ciab,abci->i', P2[1][1], Ib.vvvo)*dsob
jitempb -= 0.5*1.0*einsum('ciab,abci->i', P2[1][2], Iabab.vvvo)*dsob
jitempa -= 0.5*1.0*einsum('ciab,baic->i', P2[1][3], Iabab.vvov)*dsoa
jitempa -= 0.5*0.5*einsum('jkai,aijk->i', P2[6][0], Ia.vooo)*dsoa
jitempb -= 0.5*0.5*einsum('jkai,aijk->i', P2[6][1], Ib.vooo)*dsob
jitempb -= 0.5*1.0*einsum('jKaI,aIjK->I', P2[6][2], Iabab.vooo)*dsob
jitempa -= 0.5*1.0*einsum('JkAi,iAkJ->i', P2[6][3], Iabab.ovoo)*dsoa
jitempa -= 0.5*1.0*einsum('jkai,aijk->j', P2[6][0], Ia.vooo)*dsoa
jitempb -= 0.5*1.0*einsum('jkai,aijk->j', P2[6][1], Ib.vooo)*dsob
jitempa -= 0.5*1.0*einsum('jKaI,aIjK->j', P2[6][2], Iabab.vooo)*dsoa
jitempb -= 0.5*1.0*einsum('JkAi,iAkJ->J', P2[6][3], Iabab.ovoo)*dsob
jitempb -= 0.5*1.0*einsum('jKaI,aIjK->K', P2[6][2], Iabab.vooo)*dsob
jitempa -= 0.5*1.0*einsum('JkAi,iAkJ->k', P2[6][3], Iabab.ovoo)*dsoa
jitempa -= 0.5*1.0*einsum('bjai,aibj->i', P2[4][0], Ia.vovo)*dsoa
jitempa -= 0.5*1.0*einsum('bjai,aibj->j', P2[4][0], Ia.vovo)*dsoa
jitempb -= 0.5*1.0*einsum('BJAI,AIBJ->I', P2[4][1], Ib.vovo)*dsob
jitempb -= 0.5*1.0*einsum('BJAI,AIBJ->J', P2[4][1], Ib.vovo)*dsob
jitempb -= 0.5*1.0*einsum('bJaI,aIbJ->I', P2[4][2], Iabab.vovo)*dsob
jitempb -= 0.5*1.0*einsum('bJaI,aIbJ->J', P2[4][2], Iabab.vovo)*dsob
jitempa += 0.5*1.0*einsum('bJAi,iAbJ->i', P2[4][3], Iabab.ovvo)*dsoa
jitempb += 0.5*1.0*einsum('bJAi,iAbJ->J', P2[4][3], Iabab.ovvo)*dsob
jitempb += 0.5*1.0*einsum('BjaI,aIjB->I', P2[4][4], Iabab.voov)*dsob
jitempa += 0.5*1.0*einsum('BjaI,aIjB->j', P2[4][4], Iabab.voov)*dsoa
jitempa -= 0.5*1.0*einsum('BjAi,iAjB->i', P2[4][5], Iabab.ovov)*dsoa
jitempa -= 0.5*1.0*einsum('BjAi,iAjB->j', P2[4][5], Iabab.ovov)*dsoa
jitempa -= 0.5*0.5*einsum('klij,ijkl->i', P2[8][0], Ia.oooo)*dsoa
jitempa -= 0.5*0.5*einsum('klij,ijkl->k', P2[8][0], Ia.oooo)*dsoa
jitempb -= 0.5*0.5*einsum('klij,ijkl->i', P2[8][1], Ib.oooo)*dsob
jitempb -= 0.5*0.5*einsum('klij,ijkl->k', P2[8][1], Ib.oooo)*dsob
jitempa -= 0.5*1.0*einsum('kLiJ,iJkL->i', P2[8][2], Iabab.oooo)*dsoa
jitempb -= 0.5*1.0*einsum('kLiJ,iJkL->J', P2[8][2], Iabab.oooo)*dsob
jitempa -= 0.5*1.0*einsum('kLiJ,iJkL->k', P2[8][2], Iabab.oooo)*dsoa
jitempb -= 0.5*1.0*einsum('kLiJ,iJkL->L', P2[8][2], Iabab.oooo)*dsob
jitempa -= 0.5*0.5*einsum('bcai,aibc->i', P2[2][0], Ia.vovv)*dsoa
jitempb -= 0.5*0.5*einsum('bcai,aibc->i', P2[2][1], Ib.vovv)*dsob
jitempb -= 0.5*1.0*einsum('bCaI,aIbC->I', P2[2][2], Iabab.vovv)*dsob
jitempa -= 0.5*1.0*einsum('BcAi,iAcB->i', P2[2][3], Iabab.ovvv)*dsoa
jitempa -= 0.5*1.0*einsum('kaij,ijka->i', P2[7][0], Ia.ooov)*dsoa
jitempa -= 0.5*0.5*einsum('kaij,ijka->k', P2[7][0], Ia.ooov)*dsoa
jitempb -= 0.5*1.0*einsum('kaij,ijka->i', P2[7][1], Ib.ooov)*dsob
jitempb -= 0.5*0.5*einsum('kaij,ijka->k', P2[7][1], Ib.ooov)*dsob
jitempa -= 0.5*1.0*einsum('kAiJ,iJkA->i', P2[7][2], Iabab.ooov)*dsoa
jitempb -= 0.5*1.0*einsum('kAiJ,iJkA->J', P2[7][2], Iabab.ooov)*dsob
jitempa -= 0.5*1.0*einsum('kAiJ,iJkA->k', P2[7][2], Iabab.ooov)*dsoa
jitempb -= 0.5*1.0*einsum('KaIj,jIaK->I', P2[7][3], Iabab.oovo)*dsob
jitempa -= 0.5*1.0*einsum('KaIj,jIaK->j', P2[7][3], Iabab.oovo)*dsoa
jitempb -= 0.5*1.0*einsum('KaIj,jIaK->K', P2[7][3], Iabab.oovo)*dsob
jitempa -= 0.5*0.5*einsum('abij,ijab->i', P2[5][0], Ia.oovv)*dsoa
jitempb -= 0.5*0.5*einsum('abij,ijab->i', P2[5][1], Ib.oovv)*dsob
jitempa -= 0.5*1.0*einsum('aBiJ,iJaB->i', P2[5][2], Iabab.oovv)*dsoa
jitempb -= 0.5*1.0*einsum('aBiJ,iJaB->J', P2[5][2], Iabab.oovv)*dsob
def u_d_on_vv(dsva, dsvb, Fa, Fb, Ia, Ib, Iabab, dia, dba, dai, P2, batempa, batempb):
batempa += 0.5*einsum('ia,ai->a', dia[0], Fa.vo)*dsva
batempa += 0.5*einsum('ba,ab->a', dba[0], Fa.vv)*dsva
batempa += 0.5*einsum('ba,ab->b', dba[0], Fa.vv)*dsva
batempa += 0.5*einsum('ai,ia->a', dai[0], Fa.ov)*dsva
batempb += 0.5*einsum('ia,ai->a', dia[1], Fb.vo)*dsvb
batempb += 0.5*einsum('ba,ab->a', dba[1], Fb.vv)*dsvb
batempb += 0.5*einsum('ba,ab->b', dba[1], Fb.vv)*dsvb
batempb += 0.5*einsum('ai,ia->a', dai[1], Fb.ov)*dsvb
batempa += 0.5*0.5*einsum('ijab,abij->a', P2[3][0], Ia.vvoo)*dsva
batempb += 0.5*0.5*einsum('ijab,abij->a', P2[3][1], Ib.vvoo)*dsvb
batempa += 0.5*1.0*einsum('iJaB,aBiJ->a', P2[3][2], Iabab.vvoo)*dsva
batempb += 0.5*1.0*einsum('iJaB,aBiJ->B', P2[3][2], Iabab.vvoo)*dsvb
batempa += 0.5*1.0*einsum('ciab,abci->a', P2[1][0], Ia.vvvo)*dsva
batempa += 0.5*0.5*einsum('ciab,abci->c', P2[1][0], Ia.vvvo)*dsva
batempb += 0.5*1.0*einsum('ciab,abci->a', P2[1][1], Ib.vvvo)*dsvb
batempb += 0.5*0.5*einsum('ciab,abci->c', P2[1][1], Ib.vvvo)*dsvb
batempa += 0.5*1.0*einsum('cIaB,aBcI->a', P2[1][2], Iabab.vvvo)*dsva
batempb += 0.5*1.0*einsum('cIaB,aBcI->B', P2[1][2], Iabab.vvvo)*dsvb
batempa += 0.5*1.0*einsum('cIaB,aBcI->c', P2[1][2], Iabab.vvvo)*dsva
batempb += 0.5*1.0*einsum('CiAb,bAiC->A', P2[1][3], Iabab.vvov)*dsvb
batempa += 0.5*1.0*einsum('CiAb,bAiC->b', P2[1][3], Iabab.vvov)*dsva
batempb += 0.5*1.0*einsum('CiAb,bAiC->C', P2[1][3], Iabab.vvov)*dsvb
batempa += 0.5*0.5*einsum('jkai,aijk->a', P2[6][0], Ia.vooo)*dsva
batempb += 0.5*0.5*einsum('jkai,aijk->a', P2[6][1], Ib.vooo)*dsvb
batempa += 0.5*1.0*einsum('jKaI,aIjK->a', P2[6][2], Iabab.vooo)*dsva
batempb += 0.5*1.0*einsum('JkAi,iAkJ->A', P2[6][3], Iabab.ovoo)*dsvb
batempa += 0.5*0.5*einsum('cdab,abcd->a', P2[0][0], Ia.vvvv)*dsva
batempa += 0.5*0.5*einsum('cdab,abcd->c', P2[0][0], Ia.vvvv)*dsva
batempb += 0.5*0.5*einsum('cdab,abcd->a', P2[0][1], Ib.vvvv)*dsvb
batempb += 0.5*0.5*einsum('cdab,abcd->c', P2[0][1], Ib.vvvv)*dsvb
batempa += 0.5*1.0*einsum('cDaB,aBcD->a', P2[0][2], Iabab.vvvv)*dsva
batempb += 0.5*1.0*einsum('cDaB,aBcD->B', P2[0][2], Iabab.vvvv)*dsvb
batempa += 0.5*1.0*einsum('cDaB,aBcD->c', P2[0][2], Iabab.vvvv)*dsva
batempb += 0.5*1.0*einsum('cDaB,aBcD->D', P2[0][2], Iabab.vvvv)*dsvb
batempa += 0.5*1.0*einsum('bjai,aibj->a', P2[4][0], Ia.vovo)*dsva
batempa += 0.5*1.0*einsum('bjai,aibj->b', P2[4][0], Ia.vovo)*dsva
batempb += 0.5*1.0*einsum('BJAI,AIBJ->A', P2[4][1], Ib.vovo)*dsvb
batempb += 0.5*1.0*einsum('BJAI,AIBJ->B', P2[4][1], Ib.vovo)*dsvb
batempa += 0.5*1.0*einsum('bJaI,aIbJ->a', P2[4][2], Iabab.vovo)*dsva
batempa += 0.5*1.0*einsum('bJaI,aIbJ->b', P2[4][2], Iabab.vovo)*dsva
batempb -= 0.5*1.0*einsum('bJAi,iAbJ->A', P2[4][3], Iabab.ovvo)*dsvb
batempa -= 0.5*1.0*einsum('bJAi,iAbJ->b', P2[4][3], Iabab.ovvo)*dsva
batempa -= 0.5*1.0*einsum('BjaI,aIjB->a', P2[4][4], Iabab.voov)*dsva
batempb -= 0.5*1.0*einsum('BjaI,aIjB->B', P2[4][4], Iabab.voov)*dsvb
batempb += 0.5*1.0*einsum('BjAi,iAjB->A', P2[4][5], Iabab.ovov)*dsvb
batempb += 0.5*1.0*einsum('BjAi,iAjB->B', P2[4][5], Iabab.ovov)*dsvb
batempa += 0.5*0.5*einsum('bcai,aibc->a', P2[2][0], Ia.vovv)*dsva
batempa += 0.5*1.0*einsum('bcai,aibc->b', P2[2][0], Ia.vovv)*dsva
batempb += 0.5*0.5*einsum('bcai,aibc->a', P2[2][1], Ib.vovv)*dsvb
batempb += 0.5*1.0*einsum('bcai,aibc->b', P2[2][1], Ib.vovv)*dsvb
batempa += 0.5*1.0*einsum('bCaI,aIbC->a', P2[2][2], Iabab.vovv)*dsva
batempa += 0.5*1.0*einsum('bCaI,aIbC->b', P2[2][2], Iabab.vovv)*dsva
batempb += 0.5*1.0*einsum('bCaI,aIbC->C', P2[2][2], Iabab.vovv)*dsvb
batempb += 0.5*1.0*einsum('BcAi,iAcB->A', P2[2][3], Iabab.ovvv)*dsvb
batempb += 0.5*1.0*einsum('BcAi,iAcB->B', P2[2][3], Iabab.ovvv)*dsvb
batempa += 0.5*1.0*einsum('BcAi,iAcB->c', P2[2][3], Iabab.ovvv)*dsva
batempa += 0.5*0.5*einsum('kaij,ijka->a', P2[7][0], Ia.ooov)*dsva
batempb += 0.5*0.5*einsum('kaij,ijka->a', P2[7][1], Ib.ooov)*dsvb
batempb += 0.5*1.0*einsum('kAiJ,iJkA->A', P2[7][2], Iabab.ooov)*dsvb
batempa += 0.5*1.0*einsum('KaIj,jIaK->a', P2[7][3], Iabab.oovo)*dsva
batempa += 0.5*0.5*einsum('abij,ijab->a', P2[5][0], Ia.oovv)*dsva
batempb += 0.5*0.5*einsum('abij,ijab->a', P2[5][1], Ib.oovv)*dsvb
batempa += 0.5*1.0*einsum('aBiJ,iJaB->a', P2[5][2], Iabab.oovv)*dsva
batempb += 0.5*1.0*einsum('aBiJ,iJaB->B', P2[5][2], Iabab.oovv)*dsvb
def r_Fd_on(Fdss, Fdos, ndia, ndba, ndji, ndai):
temp = -einsum('ia,aik->k', ndia, Fdss)
temp -= einsum('ba,abk->k', ndba, Fdss)
temp -= einsum('ji,ijk->k', ndji, Fdss)
temp -= einsum('ai,iak->k', ndai, Fdss)
temp -= einsum('ia,aik->k', ndia, Fdos)
temp -= einsum('ba,abk->k', ndba, Fdos)
temp -= einsum('ji,ijk->k', ndji, Fdos)
temp -= einsum('ai,iak->k', ndai, Fdos)
return temp
def r_Fd_on_active(Fdss, Fdos, iocc, ivir, ndia, ndba, ndji, ndai):
Fdaik = Fdss[numpy.ix_(ivir, iocc)]
Fdabk = Fdss[numpy.ix_(ivir, ivir)]
Fdijk = Fdss[numpy.ix_(iocc, iocc)]
Fdiak = Fdss[numpy.ix_(iocc, ivir)]
FdAIk = Fdos[numpy.ix_(ivir, iocc)]
FdABk = Fdos[numpy.ix_(ivir, ivir)]
FdIJk = Fdos[numpy.ix_(iocc, iocc)]
FdIAk = Fdos[numpy.ix_(iocc, ivir)]
temp = -einsum('ia,aik->k', ndia, Fdaik)
temp -= einsum('ba,abk->k', ndba, Fdabk)
temp -= einsum('ji,ijk->k', ndji, Fdijk)
temp -= einsum('ai,iak->k', ndai, Fdiak)
temp -= einsum('ia,aik->k', ndia, FdAIk)
temp -= einsum('ba,abk->k', ndba, FdABk)
temp -= einsum('ji,ijk->k', ndji, FdIJk)
temp -= einsum('ai,iak->k', ndai, FdIAk)
return temp
def r_d_on_oo(dso, F, I, dia, dji, dai, P2, jitemp):
jitemp -= 0.5*einsum('ia,ai->i', dia, F.vo)*dso
jitemp -= 0.5*einsum('ji,ij->i', dji, F.oo)*dso
jitemp -= 0.5*einsum('ji,ij->j', dji, F.oo)*dso
jitemp -= 0.5*einsum('ai,ia->i', dai, F.ov)*dso
jitemp -= 0.5*0.5*einsum('ijab,abij->i', P2[3] - P2[3].transpose((0, 1, 3, 2)), I.vvoo - I.vvoo.transpose((0, 1, 3, 2)))*dso
jitemp -= 0.5*1.0*einsum('iJaB,aBiJ->i', P2[3], I.vvoo)*dso
jitemp -= 0.5*0.5*einsum('ciab,abci->i', P2[1] - P2[1].transpose((0, 1, 3, 2)), I.vvvo - I.vvov.transpose((0, 1, 3, 2)))*dso
jitemp -= 0.5*1.0*einsum('ciab,baic->i', P2[1], I.vvov)*dso
jitemp -= 0.5*0.5*einsum('jkai,aijk->i', P2[7] - P2[7].transpose((1, 0, 2, 3)), I.vooo - I.vooo.transpose((0, 1, 3, 2)))*dso
jitemp -= 0.5*1.0*einsum('JkAi,iAkJ->i', P2[7], I.ovoo)*dso
jitemp -= 0.5*1.0*einsum('jkai,aijk->j', P2[7] - P2[7].transpose((1, 0, 2, 3)), I.vooo - I.vooo.transpose((0, 1, 3, 2)))*dso
jitemp -= 0.5*1.0*einsum('jKaI,aIjK->j', P2[7], I.vooo)*dso
jitemp -= 0.5*1.0*einsum('JkAi,iAkJ->k', P2[7], I.ovoo)*dso
jitemp -= 0.5*1.0*einsum('bjai,aibj->i', P2[4] - P2[5].transpose((0, 1, 3, 2)), I.vovo - I.voov.transpose((0, 1, 3, 2)))*dso
jitemp -= 0.5*1.0*einsum('bjai,aibj->j', P2[4] - P2[5].transpose((0, 1, 3, 2)), I.vovo - I.voov.transpose((0, 1, 3, 2)))*dso
jitemp -= 0.5*1.0*einsum('bJAi,iAbJ->i', P2[5].transpose((0, 1, 3, 2)), I.ovvo)*dso
jitemp -= 0.5*1.0*einsum('BjaI,aIjB->j', P2[5].transpose((0, 1, 3, 2)), I.voov)*dso
jitemp -= 0.5*1.0*einsum('BjAi,iAjB->i', P2[4], I.ovov)*dso
jitemp -= 0.5*1.0*einsum('BjAi,iAjB->j', P2[4], I.ovov)*dso
jitemp -= 0.5*0.5*einsum('klij,ijkl->i', P2[9] - P2[9].transpose((0, 1, 3, 2)), I.oooo - I.oooo.transpose((0, 1, 3, 2)))*dso
jitemp -= 0.5*0.5*einsum('klij,ijkl->k', P2[9] - P2[9].transpose((0, 1, 3, 2)), I.oooo - I.oooo.transpose((0, 1, 3, 2)))*dso
jitemp -= 0.5*1.0*einsum('kLiJ,iJkL->i', P2[9], I.oooo)*dso
jitemp -= 0.5*1.0*einsum('kLiJ,iJkL->k', P2[9], I.oooo)*dso
jitemp -= 0.5*0.5*einsum('bcai,aibc->i', P2[2] - P2[2].transpose((1, 0, 2, 3)), I.vovv - I.vovv.transpose((0, 1, 3, 2)))*dso
jitemp -= 0.5*1.0*einsum('BcAi,iAcB->i', P2[2], I.ovvv)*dso
jitemp -= 0.5*1.0*einsum('kaij,ijka->i', P2[8] - P2[8].transpose((0, 1, 3, 2)), I.ooov - I.oovo.transpose((0, 1, 3, 2)))*dso
jitemp -= 0.5*0.5*einsum('kaij,ijka->k', P2[8] - P2[8].transpose((0, 1, 3, 2)), I.ooov - I.oovo.transpose((0, 1, 3, 2)))*dso
jitemp -= 0.5*1.0*einsum('kAiJ,iJkA->i', P2[8], I.ooov)*dso
jitemp -= 0.5*1.0*einsum('kAiJ,iJkA->k', P2[8], I.ooov)*dso
jitemp -= 0.5*1.0*einsum('KaIj,jIaK->j', P2[8], I.oovo)*dso
jitemp -= 0.5*0.5*einsum('abij,ijab->i', P2[6] - P2[6].transpose((1, 0, 2, 3)), I.oovv - I.oovv.transpose((0, 1, 3, 2)))*dso
jitemp -= 0.5*1.0*einsum('aBiJ,iJaB->i', P2[6], I.oovv)*dso
def r_d_on_vv(dsv, F, I, dia, dba, dai, P2, batemp):
batemp += 0.5*einsum('ia,ai->a', dia, F.vo)*dsv
batemp += 0.5*einsum('ba,ab->a', dba, F.vv)*dsv
batemp += 0.5*einsum('ba,ab->b', dba, F.vv)*dsv
batemp += 0.5*einsum('ai,ia->a', dai, F.ov)*dsv
batemp += 0.5*0.5*einsum('ijab,abij->a', P2[3] - P2[3].transpose((0, 1, 3, 2)), I.vvoo - I.vvoo.transpose((0, 1, 3, 2)))*dsv
batemp += 0.5*1.0*einsum('iJaB,aBiJ->a', P2[3], I.vvoo)*dsv
batemp += 0.5*1.0*einsum('ciab,abci->a', P2[1] - P2[1].transpose((0, 1, 3, 2)), I.vvvo - I.vvov.transpose((0, 1, 3, 2)))*dsv
batemp += 0.5*0.5*einsum('ciab,abci->c', P2[1] - P2[1].transpose((0, 1, 3, 2)), I.vvvo - I.vvov.transpose((0, 1, 3, 2)))*dsv
batemp += 0.5*1.0*einsum('cIaB,aBcI->a', P2[1], I.vvvo)*dsv
batemp += 0.5*1.0*einsum('cIaB,aBcI->c', P2[1], I.vvvo)*dsv
batemp += 0.5*1.0*einsum('CiAb,bAiC->b', P2[1], I.vvov)*dsv
batemp += 0.5*0.5*einsum('jkai,aijk->a', P2[7] - P2[7].transpose((1, 0, 2, 3)), I.vooo - I.vooo.transpose((0, 1, 3, 2)))*dsv
batemp += 0.5*1.0*einsum('jKaI,aIjK->a', P2[7], I.vooo)*dsv
batemp += 0.5*0.5*einsum('cdab,abcd->a', P2[0] - P2[0].transpose((0, 1, 3, 2)), I.vvvv - I.vvvv.transpose((0, 1, 3, 2)))*dsv
batemp += 0.5*0.5*einsum('cdab,abcd->c', P2[0] - P2[0].transpose((0, 1, 3, 2)), I.vvvv - I.vvvv.transpose((0, 1, 3, 2)))*dsv
batemp += 0.5*1.0*einsum('cDaB,aBcD->a', P2[0], I.vvvv)*dsv
batemp += 0.5*1.0*einsum('cDaB,aBcD->c', P2[0], I.vvvv)*dsv
batemp += 0.5*1.0*einsum('bjai,aibj->a', P2[4] - P2[5].transpose((0, 1, 3, 2)), I.vovo - I.voov.transpose((0, 1, 3, 2)))*dsv
batemp += 0.5*1.0*einsum('bjai,aibj->b', P2[4] - P2[5].transpose((0, 1, 3, 2)), I.vovo - I.voov.transpose((0, 1, 3, 2)))*dsv
batemp += 0.5*1.0*einsum('bJaI,aIbJ->a', P2[4], I.vovo)*dsv
batemp += 0.5*1.0*einsum('bJaI,aIbJ->b', P2[4], I.vovo)*dsv
batemp += 0.5*1.0*einsum('bJAi,iAbJ->b', P2[5].transpose((0, 1, 3, 2)), I.ovvo)*dsv
batemp += 0.5*1.0*einsum('BjaI,aIjB->a', P2[5].transpose((0, 1, 3, 2)), I.voov)*dsv
batemp += 0.5*0.5*einsum('bcai,aibc->a', P2[2] - P2[2].transpose((1, 0, 2, 3)), I.vovv - I.vovv.transpose((0, 1, 3, 2)))*dsv
batemp += 0.5*1.0*einsum('bcai,aibc->b', P2[2] - P2[2].transpose((1, 0, 2, 3)), I.vovv - I.vovv.transpose((0, 1, 3, 2)))*dsv
batemp += 0.5*1.0*einsum('bCaI,aIbC->a', P2[2], I.vovv)*dsv
batemp += 0.5*1.0*einsum('bCaI,aIbC->b', P2[2], I.vovv)*dsv
batemp += 0.5*1.0*einsum('BcAi,iAcB->c', P2[2], I.ovvv)*dsv
batemp += 0.5*0.5*einsum('kaij,ijka->a', P2[8] - P2[8].transpose((0, 1, 3, 2)), I.ooov - I.oovo.transpose((0, 1, 3, 2)))*dsv
batemp += 0.5*1.0*einsum('KaIj,jIaK->a', P2[8], I.oovo)*dsv
batemp += 0.5*0.5*einsum('abij,ijab->a', P2[6] - P2[6].transpose((0, 1, 3, 2)), I.oovv - I.oovv.transpose((0, 1, 3, 2)))*dsv
batemp += 0.5*1.0*einsum('aBiJ,iJaB->a', P2[6], I.oovv)*dsv
def g_full_rdm2(fo, n1rdm, rdm2):
rdm2 += einsum('pr,qs->pqrs', numpy.diag(fo), numpy.diag(fo))
rdm2 -= einsum('pr,qs->pqsr', numpy.diag(fo), numpy.diag(fo))
rdm2 += 0.5*einsum('pr,qs->pqrs', numpy.diag(fo), n1rdm)
rdm2 -= 0.5*einsum('pr,qs->pqsr', numpy.diag(fo), n1rdm)
rdm2 += 0.5*einsum('pr,qs->pqrs', n1rdm, numpy.diag(fo))
rdm2 -= 0.5*einsum('pr,qs->pqsr', n1rdm, numpy.diag(fo))
def g_full_rdm2_active(focc, iocc, iall, n1rdm, rdm2):
rdm2[numpy.ix_(iocc, iocc, iocc, iocc)] += einsum('pr,qs->pqrs', numpy.diag(focc), numpy.diag(focc))
rdm2[numpy.ix_(iocc, iocc, iocc, iocc)] -= einsum('pr,qs->pqsr', numpy.diag(focc), numpy.diag(focc))
rdm2[numpy.ix_(iocc, iall, iocc, iall)] += 0.5*einsum('pr,qs->pqrs', numpy.diag(focc), n1rdm)
rdm2[numpy.ix_(iocc, iall, iall, iocc)] -= 0.5*einsum('pr,qs->pqsr', numpy.diag(focc), n1rdm)
rdm2[numpy.ix_(iall, iocc, iall, iocc)] += 0.5*einsum('pr,qs->pqrs', n1rdm, numpy.diag(focc))
rdm2[numpy.ix_(iall, iocc, iocc, iall)] -= 0.5*einsum('pr,qs->pqsr', n1rdm, numpy.diag(focc))
def u_full_rdm2(foa, fob, n1rdm, rdm2):
rdm2[0] += einsum('pr,qs->pqrs', numpy.diag(foa), numpy.diag(foa))
rdm2[0] -= einsum('pr,qs->pqsr', numpy.diag(foa), numpy.diag(foa))
rdm2[0] += 0.5*einsum('pr,qs->pqrs', numpy.diag(foa), n1rdm[0])
rdm2[0] -= 0.5*einsum('pr,qs->pqsr', numpy.diag(foa), n1rdm[0])
rdm2[0] += 0.5*einsum('pr,qs->pqrs', n1rdm[0], numpy.diag(foa))
rdm2[0] -= 0.5*einsum('pr,qs->pqsr', n1rdm[0], numpy.diag(foa))
rdm2[1] += einsum('pr,qs->pqrs', numpy.diag(fob), numpy.diag(fob))
rdm2[1] -= einsum('pr,qs->pqsr', numpy.diag(fob), numpy.diag(foa))
rdm2[1] += 0.5*einsum('pr,qs->pqrs', numpy.diag(fob), n1rdm[1])
rdm2[1] -= 0.5*einsum('pr,qs->pqsr', numpy.diag(fob), n1rdm[1])
rdm2[1] += 0.5*einsum('pr,qs->pqrs', n1rdm[1], numpy.diag(fob))
rdm2[1] -= 0.5*einsum('pr,qs->pqsr', n1rdm[1], numpy.diag(fob))
rdm2[2] += einsum('pr,qs->pqrs', numpy.diag(foa), numpy.diag(fob))
rdm2[2] += 0.5*einsum('pr,qs->pqrs', numpy.diag(foa), n1rdm[1])
rdm2[2] += 0.5*einsum('pr,qs->pqrs', n1rdm[0], numpy.diag(fob))
def u_full_rdm2_active(focca, foccb, iocca, ioccb, ialla, iallb, n1rdm, rdm2):
rdm2[0][numpy.ix_(iocca, iocca, iocca, iocca)] += einsum('pr,qs->pqrs', numpy.diag(focca), numpy.diag(focca))
rdm2[0][numpy.ix_(iocca, iocca, iocca, iocca)] -= einsum('pr,qs->pqsr', numpy.diag(focca), numpy.diag(focca))
rdm2[0][numpy.ix_(iocca, ialla, iocca, ialla)] += 0.5*einsum('pr,qs->pqrs', numpy.diag(focca), n1rdm[0])
rdm2[0][numpy.ix_(iocca, ialla, ialla, iocca)] -= 0.5*einsum('pr,qs->pqsr', numpy.diag(focca), n1rdm[0])
rdm2[0][numpy.ix_(ialla, iocca, ialla, iocca)] += 0.5*einsum('pr,qs->pqrs', n1rdm[0], numpy.diag(focca))
rdm2[0][numpy.ix_(ialla, iocca, iocca, ialla)] -= 0.5*einsum('pr,qs->pqsr', n1rdm[0], numpy.diag(focca))
rdm2[1][numpy.ix_(ioccb, ioccb, ioccb, ioccb)] += einsum('pr,qs->pqrs', numpy.diag(foccb), numpy.diag(foccb))
rdm2[1][numpy.ix_(ioccb, ioccb, ioccb, ioccb)] -= einsum('pr,qs->pqsr', numpy.diag(foccb), numpy.diag(foccb))
rdm2[1][numpy.ix_(ioccb, iallb, ioccb, iallb)] += 0.5*einsum('pr,qs->pqrs', numpy.diag(foccb), n1rdm[1])
rdm2[1][numpy.ix_(ioccb, iallb, iallb, ioccb)] -= 0.5*einsum('pr,qs->pqsr', numpy.diag(foccb), n1rdm[1])
rdm2[1][numpy.ix_(iallb, ioccb, iallb, ioccb)] += 0.5*einsum('pr,qs->pqrs', n1rdm[1], numpy.diag(foccb))
rdm2[1][numpy.ix_(iallb, ioccb, ioccb, iallb)] -= 0.5*einsum('pr,qs->pqsr', n1rdm[1], numpy.diag(foccb))
rdm2[2][numpy.ix_(iocca, ioccb, iocca, ioccb)] += einsum('pr,qs->pqrs', numpy.diag(focca), numpy.diag(foccb))
rdm2[2][numpy.ix_(iocca, iallb, iocca, iallb)] += 0.5*einsum('pr,qs->pqrs', numpy.diag(focca), n1rdm[1])
rdm2[2][numpy.ix_(ialla, ioccb, ialla, ioccb)] += 0.5*einsum('pr,qs->pqrs', n1rdm[0], numpy.diag(foccb))
|
{"hexsha": "8ff56292136f2ea65dca497fbd944735f6a798bb", "size": 108567, "ext": "py", "lang": "Python", "max_stars_repo_path": "kelvin/cc_utils.py", "max_stars_repo_name": "MoleOrbitalHybridAnalyst/kelvin", "max_stars_repo_head_hexsha": "99538f8360975e2f80941446d8fbf2e848f74cf9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-05T15:53:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-05T15:53:46.000Z", "max_issues_repo_path": "kelvin/cc_utils.py", "max_issues_repo_name": "MoleOrbitalHybridAnalyst/kelvin", "max_issues_repo_head_hexsha": "99538f8360975e2f80941446d8fbf2e848f74cf9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kelvin/cc_utils.py", "max_forks_repo_name": "MoleOrbitalHybridAnalyst/kelvin", "max_forks_repo_head_hexsha": "99538f8360975e2f80941446d8fbf2e848f74cf9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-13T18:41:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T18:41:06.000Z", "avg_line_length": 52.3466730955, "max_line_length": 148, "alphanum_fraction": 0.5824790222, "include": true, "reason": "import numpy", "num_tokens": 49460}
|
import cv2
import numpy as np
import picamera
import serial
import time
def identifySq(pt, w, h):
tlx = 80
tly = 210
ppx = 94
ppy = 82
sqx = (pt[0]-(tlx-ppx/2))/ppx
sqy = (pt[1]-(tly-ppy/2))/ppy
# print ("ID",pt, w, h, sqx, sqy)
if sqx < 0 or sqx >= 4 or sqy < 0 or sqy >= 4:
return 0, False
return sqy*4 + sqx, True
def extractBoard(cam):
# Acquire source image.
cam.capture('newimg.jpg')
# Read source image.
im_src = cv2.imread('newimg.jpg')
# Resize image
newWidth = 640.0
rat1 = newWidth / im_src.shape[1]
dim1 = (int(newWidth), int(im_src.shape[0] * rat1))
im_small = cv2.resize(im_src, dim1, interpolation = cv2.INTER_AREA)
# Four corners of the book in source image
pts_src = np.array([[57, 368], [98, 22], [585, 28], [626, 374]], dtype=float)
# Read destination image.
im_dst = cv2.imread('destimg2.jpg')
# Four corners of the book in destination image.
pts_dst = np.array([[0, 0], [511, 0], [511, 639], [0, 639]], dtype=float)
# Calculate Homography
h, status = cv2.findHomography(pts_src, pts_dst)
# Warp source image to destination based on homography
im_out = cv2.warpPerspective(im_small, h, (im_dst.shape[1], im_dst.shape[0]))
im_grey = cv2.cvtColor(im_out, cv2.COLOR_BGR2GRAY)
cv2.imwrite('img23.png', im_out)
# Match to template tiles
tileFiles = ['tile000002.png', 'tile000004.png', 'tile000008.png',
'tile000016.png', 'tile000032.png', 'tile000064.png',
'tile000128.png', 'tile000256.png', 'tile000512.png',
'tile001024.png']
lineThicknessIdx = 1
tileVal = 2
boardCells = [0] * 16
candidatesFound = []
for tileFile in tileFiles:
tile = cv2.imread(tileFile, 0)
w, h = tile.shape[::-1]
# Apply template Matching
method = cv2.TM_CCOEFF_NORMED
res = cv2.matchTemplate(im_grey, tile, method)
threshold = 0.9
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
sq, sqValid = identifySq(pt, w, h)
if sqValid:
candidatesFound.append([sq, tileVal, res])
tileVal *= 2
candidatesFound.sort(key=lambda cand: cand[0])
posVals = []
curSq = 0
for cand in candidatesFound:
sq = cand[0]
tileVal = cand[1]
if sq != curSq:
posVals.sort(key=lambda pos: pos[1])
if len(posVals) > 0 and posVals[len(posVals)-1] != 0:
boardCells[curSq] = posVals[len(posVals)-1][0]
posVals = []
curSq = sq
else:
foundVal = False
for posVal in posVals:
if tileVal == posVal[0]:
posVal[1] += 1
foundVal = True
if not foundVal:
posVals.append([tileVal, 1])
posVals.sort(key=lambda pos: pos[1])
if len(posVals) > 0 and posVals[len(posVals)-1] != 0:
boardCells[curSq] = posVals[len(posVals)-1][0]
# if boardCells[sq] == 0:
# boardCells[sq] = tileVal
# for cell in boardCells:
# cv2.putText(im_out, str(tileVal), (pt[0],pt[1]+h/3),cv2.FONT_HERSHEY_SCRIPT_COMPLEX, 1, 0, 1)
#print(sq, tileVal)
# print(pt, tileVal, w, h)
#cv2.rectangle(im_out, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), lineThicknessIdx)
# lineThicknessIdx += 1
# print("Found", len(zip(*loc[::-1])),"tiles of", tileVal)
# cv2.imshow("Matched One", im_out)
# cv2.waitKey(1000)
return boardCells
centrePos = b'G0 0 130'
straightPos = b'G0 0 200'
def writeSerialCmd(cmd, delay=0.2):
serialPort.write(cmd)
serialPort.write(b'\r\n')
time.sleep(delay)
def makeMove(serialPort, dirn, afterDelay=0.0):
downPen = b'P1'
upPen = b'P0'
leftPos = b'G0 0 110'
upPos = b'G0 -20 130'
rightPos = b'G0 0 150'
downPos = b'G0 15 130'
moves = []
if dirn == 'left':
moves = [downPen, leftPos, upPen, centrePos]
elif dirn == 'up':
moves = [downPen, upPos, upPen, centrePos]
elif dirn == 'right':
moves = [downPen, rightPos, upPen, centrePos]
elif dirn == 'down':
moves = [downPen, downPos, upPen, centrePos]
for move in moves:
writeSerialCmd(move)
time.sleep(afterDelay)
def algo1(boardCells):
dirns = ['left','up','down','right']
bestDir = ""
bestCombines = 0
bestCombineSum = 0
bestMoveCount = 0
bestBoard = []
for dir in dirns:
cellMap = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
if dir == 'left':
cellMap = [12,8,4,0,13,9,5,1,14,10,6,2,15,11,7,3]
elif dir == 'down':
cellMap = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
elif dir == 'right':
cellMap = [3,7,11,15,2,6,10,14,1,5,9,13,0,4,8,12]
newBoard = boardCells[:]
combines = 0
combineSum = 0
moveCount = 0
movesMade = True
while(movesMade):
movesMade = False
for cellIdx in range(12):
if newBoard[cellMap[cellIdx]] == 0:
for idx in range(3 - cellIdx/4):
if newBoard[cellMap[cellIdx+4+idx*4]] != 0:
newBoard[cellMap[cellIdx+idx*4]] = newBoard[cellMap[cellIdx+4+idx*4]]
newBoard[cellMap[cellIdx+4+idx*4]] = 0
movesMade = True
moveCount += 1
for cellIdx in range(12):
if newBoard[cellMap[cellIdx]] != 0 and newBoard[cellMap[cellIdx]] == newBoard[cellMap[cellIdx+4]]:
combines += 1
combineSum += boardCells[cellMap[cellIdx]] * 2
newBoard[cellMap[cellIdx]] *= 2
newBoard[cellMap[cellIdx+4]] = 0
for idx in range(2 - cellIdx/4):
newBoard[cellMap[cellIdx+4+idx*4]] = newBoard[cellMap[cellIdx+8+idx*4]]
newBoard[cellMap[cellIdx+8+idx*4]] = 0
print(dir,"num combines", combines, "Sum", combineSum,"MoveCount", moveCount)
if (bestCombines + bestMoveCount == 0 and combines + moveCount > 0) or \
(bestCombineSum < combineSum) or \
(bestMoveCount < moveCount and combineSum == bestCombineSum):
bestCombines = combines
bestDir = dir
bestCombineSum = combineSum
bestMoveCount = moveCount
bestBoard = newBoard[:]
print("Best Dir", bestDir)
return bestBoard, bestDir
def compareToPrev(curBoard, prevBoard):
if len(prevBoard) != 16:
return
numDiffs = 0
for ii in range(len(curBoard)):
if curBoard[ii] != prevBoard[ii]:
if numDiffs == 0:
if curBoard[ii] == 2 or curBoard[ii] == 4:
print("New tile is", curBoard[ii],"at pos", ii)
else:
print("ERROR IN BOARD POS AT", ii, "VAL NOT 2 OR 4")
else:
print("ERROR IN BOARD POS AT", ii, "MORE THAN ONE DIFF")
numDiffs += 1
if __name__ == '__main__' :
cam = picamera.PiCamera()
with serial.Serial("/dev/ttyAMA0", baudrate=115200, timeout=3.0) as serialPort:
writeSerialCmd(centrePos, 2.0)
prevBoard = []
for moveIdx in range(1000):
boardCells = extractBoard(cam)
if sum(boardCells) == 0:
print("Can't see board")
break
print("Current", boardCells)
compareToPrev(boardCells, prevBoard)
newBoard, bestDir = algo1(boardCells)
if bestDir == "":
print("No moves can be made")
break
makeMove(serialPort, bestDir, 1.0)
print("Expected", newBoard)
prevBoard = newBoard[:]
writeSerialCmd(straightPos, 2.0)
|
{"hexsha": "0aa75cc857ce6615a7c8a612ace54be1b4f547fa", "size": 7970, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tests/TestImgRecognitionAndMotorControl/Test2048Detect5.py", "max_stars_repo_name": "robdobsn/RobotPlay2048", "max_stars_repo_head_hexsha": "0715fd67313ccf6015871c2a73f38de3ca014f10", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Tests/TestImgRecognitionAndMotorControl/Test2048Detect5.py", "max_issues_repo_name": "robdobsn/RobotPlay2048", "max_issues_repo_head_hexsha": "0715fd67313ccf6015871c2a73f38de3ca014f10", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tests/TestImgRecognitionAndMotorControl/Test2048Detect5.py", "max_forks_repo_name": "robdobsn/RobotPlay2048", "max_forks_repo_head_hexsha": "0715fd67313ccf6015871c2a73f38de3ca014f10", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0598290598, "max_line_length": 110, "alphanum_fraction": 0.5447929737, "include": true, "reason": "import numpy", "num_tokens": 2368}
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
__weights_dict = dict()
def load_weights(weight_file):
if weight_file == None:
return
try:
weights_dict = np.load(weight_file).item()
except:
weights_dict = np.load(weight_file, encoding='bytes').item()
return weights_dict
class KitModel(nn.Module):
def __init__(self, weight_file):
super(KitModel, self).__init__()
global __weights_dict
__weights_dict = load_weights(weight_file)
self.vgg_16_conv1_conv1_1_Conv2D = self.__conv(2, name='vgg_16/conv1/conv1_1/Conv2D', in_channels=3, out_channels=64, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_conv1_conv1_2_Conv2D = self.__conv(2, name='vgg_16/conv1/conv1_2/Conv2D', in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_conv2_conv2_1_Conv2D = self.__conv(2, name='vgg_16/conv2/conv2_1/Conv2D', in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_conv2_conv2_2_Conv2D = self.__conv(2, name='vgg_16/conv2/conv2_2/Conv2D', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_conv3_conv3_1_Conv2D = self.__conv(2, name='vgg_16/conv3/conv3_1/Conv2D', in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_conv3_conv3_2_Conv2D = self.__conv(2, name='vgg_16/conv3/conv3_2/Conv2D', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_conv3_conv3_3_Conv2D = self.__conv(2, name='vgg_16/conv3/conv3_3/Conv2D', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_conv4_conv4_1_Conv2D = self.__conv(2, name='vgg_16/conv4/conv4_1/Conv2D', in_channels=256, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_conv4_conv4_2_Conv2D = self.__conv(2, name='vgg_16/conv4/conv4_2/Conv2D', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_conv4_conv4_3_Conv2D = self.__conv(2, name='vgg_16/conv4/conv4_3/Conv2D', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_conv5_conv5_1_Conv2D = self.__conv(2, name='vgg_16/conv5/conv5_1/Conv2D', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_conv5_conv5_2_Conv2D = self.__conv(2, name='vgg_16/conv5/conv5_2/Conv2D', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_conv5_conv5_3_Conv2D = self.__conv(2, name='vgg_16/conv5/conv5_3/Conv2D', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.vgg_16_fc6_Conv2D = self.__conv(2, name='vgg_16/fc6/Conv2D', in_channels=512, out_channels=4096, kernel_size=(7, 7), stride=(1, 1), groups=1, bias=True)
self.vgg_16_fc7_Conv2D = self.__conv(2, name='vgg_16/fc7/Conv2D', in_channels=4096, out_channels=4096, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.vgg_16_fc8_Conv2D = self.__conv(2, name='vgg_16/fc8/Conv2D', in_channels=4096, out_channels=110, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
def forward(self, x,get_feature=False):
vgg_16_conv1_conv1_1_Conv2D_pad = F.pad(x, (1, 1, 1, 1))
vgg_16_conv1_conv1_1_Conv2D = self.vgg_16_conv1_conv1_1_Conv2D(vgg_16_conv1_conv1_1_Conv2D_pad)
vgg_16_conv1_conv1_1_Relu = F.relu(vgg_16_conv1_conv1_1_Conv2D)
vgg_16_conv1_conv1_2_Conv2D_pad = F.pad(vgg_16_conv1_conv1_1_Relu, (1, 1, 1, 1))
vgg_16_conv1_conv1_2_Conv2D = self.vgg_16_conv1_conv1_2_Conv2D(vgg_16_conv1_conv1_2_Conv2D_pad)
vgg_16_conv1_conv1_2_Relu = F.relu(vgg_16_conv1_conv1_2_Conv2D)
vgg_16_pool1_MaxPool = F.max_pool2d(vgg_16_conv1_conv1_2_Relu, kernel_size=(2, 2), stride=(2, 2), padding=0, ceil_mode=False)
vgg_16_conv2_conv2_1_Conv2D_pad = F.pad(vgg_16_pool1_MaxPool, (1, 1, 1, 1))
vgg_16_conv2_conv2_1_Conv2D = self.vgg_16_conv2_conv2_1_Conv2D(vgg_16_conv2_conv2_1_Conv2D_pad)
vgg_16_conv2_conv2_1_Relu = F.relu(vgg_16_conv2_conv2_1_Conv2D)
vgg_16_conv2_conv2_2_Conv2D_pad = F.pad(vgg_16_conv2_conv2_1_Relu, (1, 1, 1, 1))
vgg_16_conv2_conv2_2_Conv2D = self.vgg_16_conv2_conv2_2_Conv2D(vgg_16_conv2_conv2_2_Conv2D_pad)
vgg_16_conv2_conv2_2_Relu = F.relu(vgg_16_conv2_conv2_2_Conv2D)
vgg_16_pool2_MaxPool = F.max_pool2d(vgg_16_conv2_conv2_2_Relu, kernel_size=(2, 2), stride=(2, 2), padding=0, ceil_mode=False)
vgg_16_conv3_conv3_1_Conv2D_pad = F.pad(vgg_16_pool2_MaxPool, (1, 1, 1, 1))
vgg_16_conv3_conv3_1_Conv2D = self.vgg_16_conv3_conv3_1_Conv2D(vgg_16_conv3_conv3_1_Conv2D_pad)
vgg_16_conv3_conv3_1_Relu = F.relu(vgg_16_conv3_conv3_1_Conv2D)
vgg_16_conv3_conv3_2_Conv2D_pad = F.pad(vgg_16_conv3_conv3_1_Relu, (1, 1, 1, 1))
vgg_16_conv3_conv3_2_Conv2D = self.vgg_16_conv3_conv3_2_Conv2D(vgg_16_conv3_conv3_2_Conv2D_pad)
vgg_16_conv3_conv3_2_Relu = F.relu(vgg_16_conv3_conv3_2_Conv2D)
vgg_16_conv3_conv3_3_Conv2D_pad = F.pad(vgg_16_conv3_conv3_2_Relu, (1, 1, 1, 1))
vgg_16_conv3_conv3_3_Conv2D = self.vgg_16_conv3_conv3_3_Conv2D(vgg_16_conv3_conv3_3_Conv2D_pad)
vgg_16_conv3_conv3_3_Relu = F.relu(vgg_16_conv3_conv3_3_Conv2D)
vgg_16_pool3_MaxPool = F.max_pool2d(vgg_16_conv3_conv3_3_Relu, kernel_size=(2, 2), stride=(2, 2), padding=0, ceil_mode=False)
vgg_16_conv4_conv4_1_Conv2D_pad = F.pad(vgg_16_pool3_MaxPool, (1, 1, 1, 1))
vgg_16_conv4_conv4_1_Conv2D = self.vgg_16_conv4_conv4_1_Conv2D(vgg_16_conv4_conv4_1_Conv2D_pad)
vgg_16_conv4_conv4_1_Relu = F.relu(vgg_16_conv4_conv4_1_Conv2D)
vgg_16_conv4_conv4_2_Conv2D_pad = F.pad(vgg_16_conv4_conv4_1_Relu, (1, 1, 1, 1))
vgg_16_conv4_conv4_2_Conv2D = self.vgg_16_conv4_conv4_2_Conv2D(vgg_16_conv4_conv4_2_Conv2D_pad)
vgg_16_conv4_conv4_2_Relu = F.relu(vgg_16_conv4_conv4_2_Conv2D)
vgg_16_conv4_conv4_3_Conv2D_pad = F.pad(vgg_16_conv4_conv4_2_Relu, (1, 1, 1, 1))
vgg_16_conv4_conv4_3_Conv2D = self.vgg_16_conv4_conv4_3_Conv2D(vgg_16_conv4_conv4_3_Conv2D_pad)
vgg_16_conv4_conv4_3_Relu = F.relu(vgg_16_conv4_conv4_3_Conv2D)
vgg_16_pool4_MaxPool = F.max_pool2d(vgg_16_conv4_conv4_3_Relu, kernel_size=(2, 2), stride=(2, 2), padding=0, ceil_mode=False)
vgg_16_conv5_conv5_1_Conv2D_pad = F.pad(vgg_16_pool4_MaxPool, (1, 1, 1, 1))
vgg_16_conv5_conv5_1_Conv2D = self.vgg_16_conv5_conv5_1_Conv2D(vgg_16_conv5_conv5_1_Conv2D_pad)
vgg_16_conv5_conv5_1_Relu = F.relu(vgg_16_conv5_conv5_1_Conv2D)
vgg_16_conv5_conv5_2_Conv2D_pad = F.pad(vgg_16_conv5_conv5_1_Relu, (1, 1, 1, 1))
vgg_16_conv5_conv5_2_Conv2D = self.vgg_16_conv5_conv5_2_Conv2D(vgg_16_conv5_conv5_2_Conv2D_pad)
vgg_16_conv5_conv5_2_Relu = F.relu(vgg_16_conv5_conv5_2_Conv2D)
vgg_16_conv5_conv5_3_Conv2D_pad = F.pad(vgg_16_conv5_conv5_2_Relu, (1, 1, 1, 1))
vgg_16_conv5_conv5_3_Conv2D = self.vgg_16_conv5_conv5_3_Conv2D(vgg_16_conv5_conv5_3_Conv2D_pad)
vgg_16_conv5_conv5_3_Relu = F.relu(vgg_16_conv5_conv5_3_Conv2D)
vgg_16_pool5_MaxPool = F.max_pool2d(vgg_16_conv5_conv5_3_Relu, kernel_size=(2, 2), stride=(2, 2), padding=0, ceil_mode=False)
vgg_16_fc6_Conv2D = self.vgg_16_fc6_Conv2D(vgg_16_pool5_MaxPool)
vgg_16_fc6_Relu = F.relu(vgg_16_fc6_Conv2D)
vgg_16_fc7_Conv2D = self.vgg_16_fc7_Conv2D(vgg_16_fc6_Relu)
vgg_16_fc7_Relu = F.relu(vgg_16_fc7_Conv2D)
vgg_16_fc8_Conv2D = self.vgg_16_fc8_Conv2D(vgg_16_fc7_Relu)
vgg_16_fc8_squeezed = torch.squeeze(vgg_16_fc8_Conv2D)
return vgg_16_fc8_squeezed
@staticmethod
def __conv(dim, name, **kwargs):
if dim == 1: layer = nn.Conv1d(**kwargs)
elif dim == 2: layer = nn.Conv2d(**kwargs)
elif dim == 3: layer = nn.Conv3d(**kwargs)
else: raise NotImplementedError()
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
|
{"hexsha": "9a7a842b68c4f61e8944a20671fa0b27cc9644dc", "size": 8502, "ext": "py", "lang": "Python", "max_stars_repo_path": "models_old/tf_to_pytorch_vgg16.py", "max_stars_repo_name": "jiangyangzhou/Non-targeted-Attack-IJCAI2019-ColdRiver", "max_stars_repo_head_hexsha": "f9f26b4e00241c7831a2e46a0a2c965457fe99e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-06-19T15:16:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-19T01:33:10.000Z", "max_issues_repo_path": "models_old/tf_to_pytorch_vgg16.py", "max_issues_repo_name": "jiangyangzhou/Non-targeted-Attack-IJCAI2019-ColdRiver", "max_issues_repo_head_hexsha": "f9f26b4e00241c7831a2e46a0a2c965457fe99e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models_old/tf_to_pytorch_vgg16.py", "max_forks_repo_name": "jiangyangzhou/Non-targeted-Attack-IJCAI2019-ColdRiver", "max_forks_repo_head_hexsha": "f9f26b4e00241c7831a2e46a0a2c965457fe99e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-06-04T09:27:10.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-12T06:13:16.000Z", "avg_line_length": 76.5945945946, "max_line_length": 184, "alphanum_fraction": 0.742178311, "include": true, "reason": "import numpy", "num_tokens": 3418}
|
import numpy as np
import numpy.matlib
def get_rigid_transform(A, B):
cenA = np.mean(A, 0) # 3
cenB = np.mean(B, 0) # 3
N = A.shape[0] # 24
H = np.dot((B - np.matlib.repmat(cenB, N, 1)).transpose(), (A - np.matlib.repmat(cenA, N, 1)))
[U, _, V] = np.linalg.svd(H)
R = np.dot(U, V) # matlab returns the transpose: .transpose()
if np.linalg.det(R) < 0:
U[:, 2] = -U[:, 2]
R = np.dot(U, V.transpose())
t = np.dot(-R, cenA.transpose()) + cenB.transpose()
return R, t
def get_affine_trans(target, source):
rigid_transform = get_rigid_transform(source, target)
# Concatenate rotation and translation
rigid_transform = np.asarray(
np.concatenate((rigid_transform[0], np.matrix(rigid_transform[1]).T), axis=1)
)
rigid_transform = np.concatenate((rigid_transform, np.array([[0, 0, 0, 1]])))
return rigid_transform
|
{"hexsha": "96c5665eda74a7ad5e0837fa5e2870c6c62e0084", "size": 899, "ext": "py", "lang": "Python", "max_stars_repo_path": "meshreg/datasets/coordutils.py", "max_stars_repo_name": "pgrady3/handobjectconsist", "max_stars_repo_head_hexsha": "9651c569c328707cc1ad1e4797b9e4b32083c446", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 103, "max_stars_repo_stars_event_min_datetime": "2020-06-05T10:55:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T06:26:45.000Z", "max_issues_repo_path": "meshreg/datasets/coordutils.py", "max_issues_repo_name": "jonashein/handobjectnet_baseline", "max_issues_repo_head_hexsha": "29175be4528f68b8a2aa6dc6aa37ee0a042f93ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2020-06-17T21:26:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T03:24:37.000Z", "max_forks_repo_path": "meshreg/datasets/coordutils.py", "max_forks_repo_name": "jonashein/handobjectnet_baseline", "max_forks_repo_head_hexsha": "29175be4528f68b8a2aa6dc6aa37ee0a042f93ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-06-07T07:24:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T06:26:48.000Z", "avg_line_length": 31.0, "max_line_length": 98, "alphanum_fraction": 0.6129032258, "include": true, "reason": "import numpy", "num_tokens": 288}
|
function metID = findMetIDs(model, metList)
% Finds metabolite numbers in a model
%
% USAGE:
%
% metID = findMetIds(model, metList)
%
% INPUTS:
% model: COBRA model structure
% metList: List of metabolites
%
% OUTPUT:
% metID: List of metabolite IDs corresponding to `metList`
%
% .. Author: - Jan Schellenberger 8/15/08
if (iscell(metList))
[tmp,metID] = ismember(metList,model.mets);
else
metID = find(strcmp(model.mets,metList));
if (isempty(metID))
metID = 0;
end
if (length(metID) > 1)
metID = metID(1);
end
end
|
{"author": "opencobra", "repo": "cobratoolbox", "sha": "e60274d127f65d518535fd0814d20c53dc530f73", "save_path": "github-repos/MATLAB/opencobra-cobratoolbox", "path": "github-repos/MATLAB/opencobra-cobratoolbox/cobratoolbox-e60274d127f65d518535fd0814d20c53dc530f73/src/analysis/exploration/findMetIDs.m"}
|
subsection {* Weakest precondition calculus *}
theory utp_wp
imports "../hoare/utp_hoare"
begin
text {* A very quick implementation of wp -- more laws still needed! *}
named_theorems wp
method wp_tac = (simp add: wp)
consts
uwp :: "'a \<Rightarrow> 'b \<Rightarrow> 'c" (infix "wp" 60)
definition wp_upred :: "('\<alpha>, '\<beta>) rel \<Rightarrow> '\<beta> cond \<Rightarrow> '\<alpha> cond" where
"wp_upred Q r = \<lfloor>\<not> (Q ;; (\<not> \<lceil>r\<rceil>\<^sub><)) :: ('\<alpha>, '\<beta>) rel\<rfloor>\<^sub><"
adhoc_overloading
uwp wp_upred
declare wp_upred_def [urel_defs]
theorem wp_assigns_r [wp]:
"\<langle>\<sigma>\<rangle>\<^sub>a wp r = \<sigma> \<dagger> r"
by rel_auto
theorem wp_skip_r [wp]:
"II wp r = r"
by rel_auto
theorem wp_true [wp]:
"r \<noteq> true \<Longrightarrow> true wp r = false"
by rel_auto
theorem wp_conj [wp]:
"P wp (q \<and> r) = (P wp q \<and> P wp r)"
by rel_auto
theorem wp_seq_r [wp]: "(P ;; Q) wp r = P wp (Q wp r)"
by rel_auto
theorem wp_cond [wp]: "(P \<triangleleft> b \<triangleright>\<^sub>r Q) wp r = ((b \<Rightarrow> P wp r) \<and> ((\<not> b) \<Rightarrow> Q wp r))"
by rel_auto
theorem wp_hoare_link:
"\<lbrace>p\<rbrace>Q\<lbrace>r\<rbrace>\<^sub>u \<longleftrightarrow> (Q wp r \<sqsubseteq> p)"
by rel_auto
text {* If two programs have the same weakest precondition for any postcondition then the programs
are the same. *}
theorem wp_eq_intro: "\<lbrakk> \<And> r. P wp r = Q wp r \<rbrakk> \<Longrightarrow> P = Q"
by (rel_auto robust, fastforce+)
end
|
{"author": "git-vt", "repo": "orca", "sha": "92bda0f9cfe5cc680b9c405fc38f07a960087a36", "save_path": "github-repos/isabelle/git-vt-orca", "path": "github-repos/isabelle/git-vt-orca/orca-92bda0f9cfe5cc680b9c405fc38f07a960087a36/Archive/Programming-Languages-Semantics/WP11-C-semantics/src/orca/utp/utp_wp.thy"}
|
import numpy as np
from multiprocessing import Pool
import os
from sklearn.feature_extraction import image
def _denoise_pixel(img, x, y, K, L, sig):
def getBlock(x, y):
return img[x - halfK: x + halfK + 1, y - halfK: y + halfK + 1]
# def mse(block):
# return np.mean((block - target)**2)
halfK = K//2
halfL = L//2
# Dimension of each block vector (= number of rows in the training matrix)
m = K**2
# Number of columns in the training matrix
n = m * 8 + 1
# Block centered around x,y
target = getBlock(x, y)
# Assemble a pool of blocks.
dim1, dim2 = img.shape
rng = halfL - halfK
blocks = image.extract_patches(
img[max(K, x - rng) - halfK : min(x + rng + 1, dim2 - K) + halfK,
max(K, y - rng) - halfK : min(y + rng + 1, dim1 - K) + halfK], (K, K)
).reshape(-1, K, K)
# Sort by MSE
sortIndexes = ((blocks - target)**2).reshape(blocks.shape[0], m, order = 'F').mean(axis = 1).argsort()
# Construct the training matrix with the target and the best blocks reshaped into columns.
trainingMatrix = blocks[sortIndexes].reshape(blocks.shape[0], m, order = 'F').swapaxes(1, 0)[:,:n+1]
mean = trainingMatrix.mean(axis=1)
trainingMatrix = trainingMatrix - mean.reshape(m, 1)
noiseCov = sig**2 * np.eye(m, m)
inputCov = (trainingMatrix @ trainingMatrix.T)/n
eigvectors = np.linalg.eig(inputCov)[1]
PX = eigvectors.T
transInput = PX @ trainingMatrix
transNoiseCov = PX @ noiseCov @ PX.T
transInputCov = (transInput @ transInput.T)/n
transDenoisedOutCov = np.maximum(np.zeros(transInputCov.shape), transInputCov - transNoiseCov)
shrinkCoef = np.diag(transDenoisedOutCov)/(np.diag(transDenoisedOutCov) + np.diag(transInputCov))
Y1 = transInput[:, 0] * shrinkCoef
X1 = PX.T @ Y1 + mean
return X1[m//2]
def _denoise_row(img, x, left_y, right_y, K, L, sig, log):
if log:
print(x)
return (x, left_y, right_y,
[_denoise_pixel(img, x, y, K, L, sig) for y in range(left_y, right_y)])
def _denoise_image(img, K, L, sig, log):
global outImg
outImg = np.copy(img)
width, height = img.shape
halfL = L // 2
halfK = K // 2
def denoiseRowCallback(result):
global outImg
x, y_left, y_right, data = result
outImg[x, y_left:y_right] = data
global pool
# parallel
progress = [pool.apply_async(_denoise_row, (img, x, halfK, height - halfK, K, L, sig, log,), callback=denoiseRowCallback) for x in range(halfK, width - halfK)]
for each in progress:
each.wait()
# non-parallel:
# for x in range(halfK, width - halfK):
# if log:
# print(x)
# for y in range(halfK, height - halfK):
# outImg[x, y] = _denoise_pixel(img, x, y, K, L, sig)
return outImg
def denoise(noised_img, sig1, K=3, L=21, log=False):
global pool
try:
pool # pool already exists
except NameError:
# creating new pool
pool = Pool(os.cpu_count() - 1) # don't use all cores, your UI may start to lag
stage1 = _denoise_image(noised_img, K, L, sig1, log)
sig2 = 0.35 * np.sqrt(sig1 - np.mean((stage1 - noised_img)**2))
if log:
print('sig2 = ', sig2)
stage2 = _denoise_image(stage1, K, L, sig2, log)
# pool.terminate()
return stage2
|
{"hexsha": "edcef0c537fb57e44c80a68a94ef9ead44d9a0e2", "size": 3493, "ext": "py", "lang": "Python", "max_stars_repo_path": "lpg_pca_impl.py", "max_stars_repo_name": "delmarrerikaine/LPG-PCA", "max_stars_repo_head_hexsha": "deb631ee2c4c88190ce4204fcbc0765ae5cd8f53", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-07T01:00:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-07T01:00:18.000Z", "max_issues_repo_path": "lpg_pca_impl.py", "max_issues_repo_name": "delmarrerikaine/LPG-PCA", "max_issues_repo_head_hexsha": "deb631ee2c4c88190ce4204fcbc0765ae5cd8f53", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lpg_pca_impl.py", "max_forks_repo_name": "delmarrerikaine/LPG-PCA", "max_forks_repo_head_hexsha": "deb631ee2c4c88190ce4204fcbc0765ae5cd8f53", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-06-29T16:30:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-18T17:40:47.000Z", "avg_line_length": 30.3739130435, "max_line_length": 164, "alphanum_fraction": 0.5903235042, "include": true, "reason": "import numpy", "num_tokens": 1064}
|
from wordcloud import WordCloud
import numpy as np
import jieba
from PIL import Image
from scipy.misc import imread
import os
from os import path
import matplotlib.pyplot as plt
def draw_wordCloud():
comment_text = open('text.txt', 'r').read()
cut_text = "".join(jieba.cut(comment_text))
color_mask = imread('a770e9a89504c1cfd3be62c3b6b3b2c6.jpg')
temp_path = path.dirname(__file__)
cloud = WordCloud(
font_path = path.join(temp_path, 'font.ttf'),
background_color = 'white',
mask = color_mask,
max_words = 2000,
max_font_size = 80
)
word_cloud = cloud.generate(cut_text)
word_cloud.to_file('result.jpg')
os.startfile('result.jpg')
# plt.imshow(word_cloud)
# plt.axis('off')
# plt.show()
def main():
draw_wordCloud()
main()
|
{"hexsha": "17fd2019deb2b7d429dc226f83dac98e47419fb3", "size": 820, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "mental2008/wordcloud", "max_stars_repo_head_hexsha": "ff9c2d83ddc438e7663d2315860915ca1106d334", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test.py", "max_issues_repo_name": "mental2008/wordcloud", "max_issues_repo_head_hexsha": "ff9c2d83ddc438e7663d2315860915ca1106d334", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "mental2008/wordcloud", "max_forks_repo_head_hexsha": "ff9c2d83ddc438e7663d2315860915ca1106d334", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1176470588, "max_line_length": 63, "alphanum_fraction": 0.6731707317, "include": true, "reason": "import numpy,from scipy", "num_tokens": 221}
|
import Mercury as Hg
import ProtoBuf
using Sockets
using ZMQ
using BenchmarkTools
using Test
using Logging
Logging.disable_logging(Logging.Info)
# Generate ProtoBuf julia files
outdir = joinpath(@__DIR__, "jlout")
if !isdir(outdir)
Base.Filesystem.mkdir(outdir)
end
protodir = joinpath(@__DIR__, "proto")
msgfile = joinpath(protodir, "test_msg.proto")
ProtoBuf.protoc(`-I=$protodir --julia_out=$outdir $msgfile`)
include(joinpath(@__DIR__, "jlout", "test_msg_pb.jl"))
include("publisher_tests.jl")
include("subscriber_tests.jl")
if Sys.islinux()
include("rate_limiter_tests.jl")
end
include("node_tests.jl")
|
{"hexsha": "8de25d28410bc3f3c03e289cc254d23420bc0e5d", "size": 619, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "RoboticExplorationLab/Mercury.jl", "max_stars_repo_head_hexsha": "8d000b623ee1a2d5ca676ea10847de3abe6f46b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-08T16:53:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-14T03:35:25.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "RoboticExplorationLab/Mercury.jl", "max_issues_repo_head_hexsha": "8d000b623ee1a2d5ca676ea10847de3abe6f46b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-09-09T21:27:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-03T14:59:25.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "RoboticExplorationLab/Mercury.jl", "max_forks_repo_head_hexsha": "8d000b623ee1a2d5ca676ea10847de3abe6f46b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.9259259259, "max_line_length": 60, "alphanum_fraction": 0.7689822294, "num_tokens": 177}
|
import logging
import ransac.core as ransac
import ransac.models.circle as circle_model
import random
import math
import matplotlib.pyplot as plt
import numpy as np
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(levelname)s \t%(message)s')
def main():
logging.info("create_circle_modeler.py main()")
real_center = (5, 3)
real_radius = 4.5
number_of_inliers = 200
inliers_noise = 0.5
number_of_outliers = 50
outliers_range = [-12, 12]
number_of_trials = 100
acceptable_error = 0.7
xy_tuples = []
for inlierNdx in range(number_of_inliers):
theta = random.uniform(0, 2 * math.pi)
p = [real_center[0] + real_radius * math.cos(theta),
real_center[1] + real_radius * math.sin(theta)]
p[0] += random.uniform(-inliers_noise, inliers_noise)
p[1] += random.uniform(-inliers_noise, inliers_noise)
xy_tuples.append((p, 0))
for outlierNdx in range(number_of_outliers):
p = (random.uniform(outliers_range[0], outliers_range[1]),
random.uniform(outliers_range[0], outliers_range[1]))
xy_tuples.append((p, 0))
# Create the circle modeler
circle_modeler = ransac.Modeler(circle_model.Circle, number_of_trials, acceptable_error)
consensus_circle, inliers_list, outliers_list = circle_modeler.ConsensusModel(xy_tuples)
logging.info("consensus_circle.center = {}; consensus_circle.radius = {}".format(consensus_circle.center, consensus_circle.radius))
real_circle_list = [(real_center[0] + real_radius * math.cos(theta),
real_center[1] + real_radius * math.sin(theta))
for theta in np.arange(0, 2 * math.pi, 0.01)]
found_circle_list = [(consensus_circle.center[0] + consensus_circle.radius * math.cos(theta),
consensus_circle.center[1] + consensus_circle.radius * math.sin(theta))
for theta in np.arange(0, 2 * math.pi, 0.01)]
# Display the results
fig, ax = plt.subplots()
ax.scatter([inlier[0][0] for inlier in inliers_list], [inlier[0][1] for inlier in inliers_list],
c='green', label='inliers')
ax.scatter([outlier[0][0] for outlier in outliers_list], [outlier[0][1] for outlier in outliers_list],
c='red', label='outliers')
ax.scatter([p[0] for p in real_circle_list], [p[1] for p in real_circle_list],
c='blue', label='real circle', s=1)
ax.scatter([p[0] for p in found_circle_list], [p[1] for p in found_circle_list],
c='fuchsia', label='consensus circle', s=1)
ax.legend()
ax.grid(True)
plt.show()
if __name__ == '__main__':
main()
|
{"hexsha": "30c3228bc6dc2fd713a50be8117295f9074c1d04", "size": 2702, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/create_circle_modeler.py", "max_stars_repo_name": "sebastiengilbert73/ransac", "max_stars_repo_head_hexsha": "4c4d683e58b6b73e7877b18d9700b7c63045710a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/create_circle_modeler.py", "max_issues_repo_name": "sebastiengilbert73/ransac", "max_issues_repo_head_hexsha": "4c4d683e58b6b73e7877b18d9700b7c63045710a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/create_circle_modeler.py", "max_forks_repo_name": "sebastiengilbert73/ransac", "max_forks_repo_head_hexsha": "4c4d683e58b6b73e7877b18d9700b7c63045710a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5692307692, "max_line_length": 135, "alphanum_fraction": 0.651369356, "include": true, "reason": "import numpy", "num_tokens": 705}
|
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import SGDClassifier
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from data_helper_ml import load_data_and_labels
import numpy as np
categories = ['good', 'bad', 'mid']
# 我在data_helper_ml文件中定义了一些文本清理任务,如输入文本处理,去除停用词等
x_text, y = load_data_and_labels("./data/good_cut_jieba.txt", "./data/bad_cut_jieba.txt", "./data/mid_cut_jieba.txt")
# 划分数据集
x_train, x_test, y_train, y_test = train_test_split(x_text, y, test_size=0.2, random_state=42)
y = y.ravel()
y_train = y_train.ravel()
y_test = y_test.ravel()
print("Train/Test split: {:d}/{:d}".format(len(y_train), len(y_test)))
""" Naive Bayes classifier """
# sklearn有一套很成熟的管道流程Pipeline,快速搭建机器学习模型神器
bayes_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB())
])
bayes_clf.fit(x_train, y_train)
""" Predict the test dataset using Naive Bayes"""
predicted = bayes_clf.predict(x_test)
print('Naive Bayes correct prediction: {:4.4f}'.format(np.mean(predicted == y_test)))
# 输出f1分数,准确率,召回率等指标
print(metrics.classification_report(y_test, predicted, target_names=categories))
""" Support Vector Machine (SVM) classifier"""
svm_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, max_iter=5, random_state=42)),
])
svm_clf.fit(x_train, y_train)
predicted = svm_clf.predict(x_test)
print('SVM correct prediction: {:4.4f}'.format(np.mean(predicted == y_test)))
print(metrics.classification_report(y_test, predicted, target_names=categories))
# 输出混淆矩阵
print("Confusion Matrix:")
print(metrics.confusion_matrix(y_test, predicted))
print('\n')
""" 10-折交叉验证 """
clf_b = make_pipeline(CountVectorizer(), TfidfTransformer(), MultinomialNB())
clf_s = make_pipeline(CountVectorizer(), TfidfTransformer(),
SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, n_iter=5, random_state=42))
bayes_10_fold = cross_val_score(clf_b, x_text, y, cv=10)
svm_10_fold = cross_val_score(clf_s, x_text, y, cv=10)
print('Naives Bayes 10-fold correct prediction: {:4.4f}'.format(np.mean(bayes_10_fold)))
print('SVM 10-fold correct prediction: {:4.4f}'.format(np.mean(svm_10_fold)))
|
{"hexsha": "f294cb2b0652e0eecc17fd7120b15a25f42a484b", "size": 2702, "ext": "py", "lang": "Python", "max_stars_repo_path": "alpha/prediction/or.py", "max_stars_repo_name": "MingJerry/Guide", "max_stars_repo_head_hexsha": "0ac6ee9d20a579a93bcf9a90c53937179fdf6875", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "alpha/prediction/or.py", "max_issues_repo_name": "MingJerry/Guide", "max_issues_repo_head_hexsha": "0ac6ee9d20a579a93bcf9a90c53937179fdf6875", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-06-06T01:23:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T14:19:58.000Z", "max_forks_repo_path": "alpha/prediction/or.py", "max_forks_repo_name": "MingJerry/Guide", "max_forks_repo_head_hexsha": "0ac6ee9d20a579a93bcf9a90c53937179fdf6875", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.0333333333, "max_line_length": 118, "alphanum_fraction": 0.7124352332, "include": true, "reason": "import numpy", "num_tokens": 750}
|
[STATEMENT]
lemma aL_circ_ext:
"|x\<^sup>\<star>]y \<le> |aL * x\<^sup>\<circ>]y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. | x\<^sup>\<star> ] y \<le> | aL * x\<^sup>\<circ> ] y
[PROOF STEP]
by (simp add: aL_circ box_left_antitone)
|
{"llama_tokens": 115, "file": "Correctness_Algebras_Hoare_Modal", "length": 1}
|
import numpy as np
from ..utils import fix_dim_gmm
from .base import Acquisition, AcquisitionWeighted
class IVR(Acquisition):
"""A class for Integrated Variance Reduction.
Parameters
----------
model, inputs : see parent class (Acquisition)
Attributes
----------
model, inputs : see Parameters
"""
def evaluate(self, x):
x = np.atleast_2d(x)
_, var = self.model.predict_noiseless(x) # Noiseless ghost point
#_, var = self.model.predict(x)
integral = self.integrate_covariance(x)
if self.model.normalizer:
var /= self.model.normalizer.std**2
ivr = integral / var
return -ivr
def jacobian(self, x):
x = np.atleast_2d(x)
_, var = self.model.predict_noiseless(x) # Noiseless ghost point
#_, var = self.model.predict(x)
_, var_jac = self.model.predictive_gradients(x)
integral = self.integrate_covariance(x)
integral_jac = self.integrate_covariance_jacobian(x)
if self.model.normalizer:
var /= self.model.normalizer.std**2
ivr_jac = (integral_jac*var - integral*var_jac) / var**2
return -ivr_jac
def integrate_covariance(self, x):
"""Compute \int cov(x,x')^2 dx'."""
K = self.model.kern
X = self.model.X
Skk_inv = self.model.posterior.woodbury_inv
y_k = np.dot(Skk_inv, K.K(X,x))
term1 = K.IntKK(x)
term2 = np.dot(K.IntKK(X), y_k)
term3 = K.IntKK(X,x)
int_cov = term1 + np.dot(y_k.T, term2 - 2*term3)
int_cov = np.diag(int_cov)[:,None]
return int_cov
def integrate_covariance_jacobian(self, x):
"""Compute d/dx \int cov(x,x')^2 dx'."""
K = self.model.kern
X = self.model.X
Skk_inv = self.model.posterior.woodbury_inv
jac_ker = -K.gradients_X(np.ones((1,x.shape[0])), X, x) # Beware minus sign!
y_k = np.dot(Skk_inv, K.K(X,x))
jac_y_k = np.dot(Skk_inv, jac_ker)
dterm1_dX = 2*K.dIntKK_dX(x)
dterm2_dX = np.dot(K.IntKK(X), jac_y_k)
dterm3_dX = K.dIntKK_dX(x,X)
int_jac = dterm1_dX + 2*np.dot(y_k.T, dterm2_dX) \
- 2*np.dot(y_k.T, dterm3_dX) \
- 2*np.dot(K.IntKK(X,x).T, jac_y_k)
return int_jac
class IVR_LW(AcquisitionWeighted, IVR):
"""A class for Likelihood-Weighted Integrated Variance Reduction.
Parameters
----------
model, inputs : see parent class (AcquisitionWeighted)
Attributes
----------
model, inputs : see Parameters
Notes
-----
This subclass overrides `integrate_covariance` and
`integrate_covariance_jacobian` of the `IVR` class.
"""
def integrate_covariance(self, x):
"""Compute \int cov(x,x')^2 w_gmm(x') dx'."""
K = self.model.kern
X = self.model.X
Skk_inv = self.model.posterior.woodbury_inv
y_k = np.dot(Skk_inv, K.K(X,x))
int_cov = 0.0
gmm = self.likelihood.gmm
covs = fix_dim_gmm(gmm, matrix_type="covariance")
for ii in range(gmm.n_components):
mu_i = gmm.means_[ii]
cov_i = covs[ii]
wei = gmm.weights_[ii]
term1 = K.IntKKNorm(x, x, mu_i, cov_i)
term2 = np.dot( K.IntKKNorm(X, X, mu_i, cov_i), y_k )
term3 = K.IntKKNorm(X, x, mu_i, cov_i)
tmp_i = term1 + np.dot(y_k.T, term2 - 2*term3)
int_cov += wei*tmp_i
int_cov = np.diag(int_cov)[:,None]
return int_cov
def integrate_covariance_jacobian(self, x):
"""Compute d/dx \int cov(x,x')^2 w_gmm(x') dx'."""
K = self.model.kern
X = self.model.X
Skk_inv = self.model.posterior.woodbury_inv
jac_ker = -K.gradients_X(np.ones((1,x.shape[0])), X, x) # Beware minus sign!
y_k = np.dot(Skk_inv, K.K(X,x))
jac_y_k = np.dot(Skk_inv, jac_ker)
int_jac = 0.0
gmm = self.likelihood.gmm
covs = fix_dim_gmm(gmm, matrix_type="covariance")
for ii in range(gmm.n_components):
mu_i = gmm.means_[ii]
cov_i = covs[ii]
wei = gmm.weights_[ii]
dterm1_dX = 2*K.dIntKKNorm_dX(x, x, mu_i, cov_i)
dterm2_dX = np.dot( K.IntKKNorm(X,X,mu_i,cov_i), jac_y_k)
dterm3_dX = K.dIntKKNorm_dX(x, X, mu_i, cov_i)
tmp_i = dterm1_dX + 2*np.dot(y_k.T, dterm2_dX) \
- 2*np.dot(y_k.T, dterm3_dX) \
- 2*np.dot(K.IntKKNorm(X,x,mu_i,cov_i).T, jac_y_k)
int_jac += wei*tmp_i
return int_jac
|
{"hexsha": "f1130173d164418d864cbc6827304b98d585ef52", "size": 4636, "ext": "py", "lang": "Python", "max_stars_repo_path": "gpsearch/core/acquisitions/ivr.py", "max_stars_repo_name": "Fluid-Dynamics-Group/gpsearch", "max_stars_repo_head_hexsha": "8c5758c9fb2b623ef79952c3e9c113cb157d79bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-07-13T00:02:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T08:49:27.000Z", "max_issues_repo_path": "gpsearch/core/acquisitions/ivr.py", "max_issues_repo_name": "Fluid-Dynamics-Group/gpsearch", "max_issues_repo_head_hexsha": "8c5758c9fb2b623ef79952c3e9c113cb157d79bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gpsearch/core/acquisitions/ivr.py", "max_forks_repo_name": "Fluid-Dynamics-Group/gpsearch", "max_forks_repo_head_hexsha": "8c5758c9fb2b623ef79952c3e9c113cb157d79bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-07-18T13:29:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T15:14:14.000Z", "avg_line_length": 30.701986755, "max_line_length": 84, "alphanum_fraction": 0.5707506471, "include": true, "reason": "import numpy", "num_tokens": 1383}
|
From sflib Require Import sflib.
From Paco Require Import paco.
Require Import Coq.Classes.RelationClasses Lia Program.
From Fairness Require Export ITreeLib WFLibLarge FairBeh NatStructsLarge Mod pind.
Set Implicit Arguments.
Module WMod.
Variant output (state: Type) (ident: ID) (mident: ID) (R: Type) :=
| normal (st: state) (r: R) (fm: fmap (id_sum ident mident))
| stuck
| disabled
.
Record function (state: Type) (ident: ID) (mident: ID): Type :=
mk_fun {
type: ident;
A: Type;
R: Type;
body: A -> state -> output state ident mident R -> Prop;
}.
Record t: Type :=
mk {
state: Type;
ident: ID;
mident: ID;
st_init: state;
funs: list (fname * function state ident mident);
}.
Section INTERP.
Variable m: t.
Definition interp_state := (m.(state) * NatMap.t m.(ident))%type.
Definition interp_ident := id_sum thread_id m.(mident).
Definition interp_fmap
(fm: fmap (id_sum m.(ident) m.(mident))) (ts: NatMap.t m.(ident)) : fmap interp_ident :=
fun i =>
match i with
| inl i =>
match NatMap.find i ts with
| Some i => fm (inl i)
| None => Flag.emp
end
| inr i => fm (inr i)
end.
Definition interp_fun (f: function m.(state) m.(ident) m.(mident))
: ktree (programE interp_ident interp_state) f.(A) f.(R) :=
fun (arg: f.(A)) =>
_ <- trigger Yield;;
tid <- trigger (GetTid);;
'(st, ts) <- trigger (Get id);;
let ts := NatMap.add tid f.(type) ts in
_ <- trigger (Put (st, ts));;
_ <- trigger (Fair (prism_fmap inlp (fun i => if tid_dec i tid then Flag.success else Flag.emp)));;
ITree.iter
(fun (_: unit) =>
b <- trigger (Choose bool);;
if (b: bool)
then
_ <- trigger (Fair (prism_fmap inlp (fun i => if tid_dec i tid then Flag.fail else Flag.emp)));;
_ <- trigger Yield;; Ret (inl tt)
else
'(st, ts) <- trigger (Get id);;
next <- trigger (Choose (sig (f.(body) arg st)));;
match proj1_sig next with
| normal st r fm =>
let ts := NatMap.remove tid ts in
_ <- trigger (Fair (interp_fmap fm ts));;
_ <- trigger (Put (st, ts));;
_ <- trigger Yield;;
Ret (inr r)
| stuck _ _ _ _ => UB
| disabled _ _ _ _ => _ <- trigger Yield;; Ret (inl tt)
end) tt
.
Definition interp_fun_register (tid: thread_id) (i: m.(ident)): itree (programE interp_ident interp_state) unit :=
'(st, ts) <- trigger (Get id);;
let ts := NatMap.add tid i ts in
_ <- trigger (Put (st, ts));;
_ <- trigger (Fair (prism_fmap inlp (fun i => if tid_dec i tid then Flag.success else Flag.emp)));;
Ret tt
.
Definition interp_fun_body R (tid: thread_id)
(step: m.(state) -> output m.(state) m.(ident) m.(mident) R -> Prop)
: itree (programE interp_ident interp_state) R :=
ITree.iter
(fun (_: unit) =>
b <- trigger (Choose bool);;
if (b: bool) then
_ <- trigger (Fair (prism_fmap inlp (fun i => if tid_dec i tid then Flag.fail else Flag.emp)));;
_ <- trigger Yield;; Ret (inl tt)
else
'(st, ts) <- trigger (Get id);;
next <- trigger (Choose (sig (step st)));;
match proj1_sig next with
| normal st r fm =>
let ts := NatMap.remove tid ts in
_ <- trigger (Fair (interp_fmap fm ts));;
_ <- trigger (Put (st, ts));;
_ <- trigger Yield;;
Ret (inr r)
| stuck _ _ _ _ => UB
| disabled _ _ _ _ => _ <- trigger Yield;; Ret (inl tt)
end) tt.
Lemma interp_fun_unfold f arg
:
interp_fun f arg
=
_ <- trigger Yield;;
tid <- trigger (GetTid);;
_ <- (interp_fun_register tid f.(type));;
interp_fun_body tid (f.(body) arg)
.
Proof.
unfold interp_fun, interp_fun_register, interp_fun_body. grind.
Qed.
Lemma interp_loop_unfold
R (tid: thread_id)
(step: m.(state) -> output m.(state) m.(ident) m.(mident) R -> Prop)
:
interp_fun_body tid step
=
b <- trigger (Choose bool);;
if (b: bool) then
_ <- trigger (Fair (prism_fmap inlp (fun i => if tid_dec i tid then Flag.fail else Flag.emp)));;
_ <- trigger Yield;;
tau;; interp_fun_body tid step
else
'(st, ts) <- trigger (Get id);;
next <- trigger (Choose (sig (step st)));;
match proj1_sig next with
| normal st r fm =>
let ts := NatMap.remove tid ts in
_ <- trigger (Fair (interp_fmap fm ts));;
_ <- trigger (Put (st, ts));;
_ <- trigger Yield;;
Ret r
| stuck _ _ _ _ => UB
| disabled _ _ _ _ => _ <- trigger Yield;; tau;; interp_fun_body tid step
end.
Proof.
unfold interp_fun_body at 1. rewrite unfold_iter_eq.
unfold interp_fun_body, UB. grind.
Qed.
Definition interp_mod: Mod.t :=
Mod.mk
(m.(st_init), NatMap.empty m.(ident))
(Mod.get_funs (List.map (fun '(fn, f) => (fn, Mod.wrap_fun (interp_fun f))) m.(funs)))
.
End INTERP.
End WMod.
Arguments WMod.disabled {_ _ _ _}.
Arguments WMod.stuck {_ _ _ _}.
|
{"author": "snu-sf", "repo": "fairness", "sha": "170bd1ade88d32ac6ab661ed0c272af8a00d9ea1", "save_path": "github-repos/coq/snu-sf-fairness", "path": "github-repos/coq/snu-sf-fairness/fairness-170bd1ade88d32ac6ab661ed0c272af8a00d9ea1/src/semantics/Wrapper.v"}
|
# Importing stock libraries
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
import json
from typing import List
# Importing the GPT2 modules from huggingface/transformers
from transformers import GPT2LMHeadModel, GPT2Tokenizer
# Import os for env varibles via Beaker
import os
# WandB – Import the wandb library
import wandb
import logging
from torch import cuda
from split.utils import write_items
from optparse import OptionParser
device = 'cuda' if cuda.is_available() else 'cpu'
logger = logging.getLogger("gpt2-comet")
logging.basicConfig(level=logging.DEBUG)
# logger.info for allenai beaker verification
logger.info(device)
logger.info(torch.cuda.device_count())
from mosaic.infra.modeling import train, validate, beam_generations
from mosaic.datasets.KGDataset import KGDataset
DEBUG = False
NUM_INST = 100
def read_jsonl_lines(input_file: str) -> List[dict]:
with open(input_file) as f:
lines = f.readlines()
return [json.loads(l.strip()) for l in lines]
def main():
wandb.init(project="gpt2_comet_atomic")
config = wandb.config
config.TRAIN_BATCH_SIZE = int(os.environ.get("TRAIN_BATCH_SIZE", 2))
config.VALID_BATCH_SIZE = int(os.environ.get("VALID_BATCH_SIZE", 2))
config.TRAIN_EPOCHS = int(os.environ.get("TRAIN_EPOCHS", 3))
config.VAL_EPOCHS = int(os.environ.get("VAL_EPOCHS", 1))
config.LEARNING_RATE = float(os.environ.get("LEARNING_RATE", "1e-5"))
config.SEED = int(os.environ.get("SEED", 42))
config.IN_LEN = int(os.environ.get("IN_LEN", 16))
config.OUT_LEN = int(os.environ.get("OUT_LEN", 34))
config.SUMMARY_LEN = 0 # Used for t5
config.OUT_DIR = os.environ.get("OUT_DIR", "/models")
config.DO_TRAIN = os.environ.get("DO_TRAIN", "False") == "True"
config.DO_PRED = os.environ.get("DO_PRED", "True") == "True"
config.PRED_FILE = str(os.environ.get("PRED_FILE", ""))
config.TOP_K = int(os.environ.get("TOP_K", 40))
config.PRED_BATCH = 64
config.TOKENIZER = os.environ.get('TOKENIZER', "gpt2-xl")
torch.manual_seed(config.SEED) # pytorch random seed
np.random.seed(config.SEED) # numpy random seed
torch.backends.cudnn.deterministic = True
model_name = "gpt2" if 'GPT2_MODEL' not in os.environ else os.environ['GPT2_MODEL']
try:
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
except:
tokenizer = GPT2Tokenizer.from_pretrained(config.TOKENIZER)
tokenizer.add_special_tokens({
'eos_token': '[EOS]',
'additional_special_tokens': [
'LocationOfAction',
'HinderedBy',
'HasFirstSubevent',
'NotHasProperty',
'NotHasA',
'HasA',
'AtLocation',
'NotCapableOf',
'CausesDesire',
'HasPainCharacter',
'NotDesires',
'MadeUpOf',
'InstanceOf',
'SymbolOf',
'xReason',
'isAfter',
'HasPrerequisite',
'UsedFor',
'MadeOf',
'MotivatedByGoal',
'Causes',
'oEffect',
'CreatedBy',
'ReceivesAction',
'NotMadeOf',
'xWant',
'PartOf',
'DesireOf',
'HasPainIntensity',
'xAttr',
'DefinedAs',
'oReact',
'xIntent',
'HasSubevent',
'oWant',
'HasProperty',
'IsA',
'HasSubEvent',
'LocatedNear',
'Desires',
'isFilledBy',
'isBefore',
'InheritsFrom',
'xNeed',
'xEffect',
'xReact',
'HasLastSubevent',
'RelatedTo',
'CapableOf',
'NotIsA',
'ObjectUse',
'[GEN]'
]
})
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
train_dataset = pd.read_csv(
os.environ.get('TRAIN_DATA_PATH', "/tmp/gpt2data/atomic_train.tsv"),
encoding='latin-1', sep="\t")
if DEBUG:
train_dataset = train_dataset.head(NUM_INST)
# train_dataset = train_dataset[['head_event', 'tail_event', 'relation']]
train_dataset.head_event = train_dataset.head_event + ' ' + train_dataset.relation \
+ " [GEN]"
train_dataset.tail_event = train_dataset.tail_event + ' [EOS]'
logger.info(train_dataset.head())
logger.info(train_dataset.tail_event)
val_dataset = pd.read_csv(os.environ.get('DEV_DATA_PATH', "/tmp/gpt2data/atomic_dev.tsv"), encoding='latin-1', sep="\t")
if DEBUG:
val_dataset = val_dataset.head(NUM_INST)
val_dataset = val_dataset[['head_event', 'tail_event', 'relation']]
val_dataset.head_event = val_dataset.head_event + ' ' + val_dataset.relation + " [GEN]"
val_dataset.tail_event = val_dataset.tail_event + ' [EOS]'
logger.info(val_dataset.tail_event)
logger.info(val_dataset.head())
test_dataset = pd.read_csv(os.environ.get('TEST_DATA_PATH', "/tmp/gpt2data/atomic_test.tsv"), encoding='latin-1', sep="\t")
if DEBUG:
test_dataset = test_dataset.head(NUM_INST)
test_dataset = test_dataset[['head_event', 'tail_event', 'relation']]
test_dataset.head_event = test_dataset.head_event + ' ' + test_dataset.relation \
+ " [GEN]"
test_dataset.tail_event = test_dataset.tail_event + ' [EOS]'
logger.info(test_dataset.tail_event)
logger.info(test_dataset.head())
val_dataset_mini = pd.read_csv(os.environ.get('DEV_DATA_PATH', "/tmp/gpt2data/atomic_dev.tsv"), encoding='latin-1', sep="\t")
if DEBUG:
val_dataset_mini = val_dataset_mini.head(5)
val_dataset_mini = val_dataset_mini.sample(n=min(int(val_dataset_mini.size / 3), 100),
random_state=config.SEED)
val_dataset_mini = val_dataset_mini[['head_event', 'tail_event', 'relation']]
val_dataset_mini.head_event = val_dataset_mini.head_event + ' ' + val_dataset_mini.relation + " [GEN]"
val_dataset_mini.tail_event = val_dataset_mini.tail_event + ' [EOS]'
logger.info(val_dataset_mini.tail_event)
logger.info(val_dataset_mini.head())
logger.info("TRAIN Dataset tuple count: {}".format(train_dataset.shape))
logger.info("DEV Dataset tuple_count: {}".format(val_dataset.shape))
logger.info("DEV MINI Dataset tuple_count: {}".format(val_dataset_mini.shape))
training_set = KGDataset(train_dataset, tokenizer, config.OUT_LEN, config.SUMMARY_LEN, model="gpt2")
val_set = KGDataset(val_dataset, tokenizer, config.IN_LEN, config.OUT_LEN - config.IN_LEN, model="gpt2", is_eval=True)
val_set_mini = KGDataset(val_dataset.head(2000), tokenizer, config.IN_LEN, config.OUT_LEN - config.IN_LEN, model="gpt2", is_eval=True)
test_set = KGDataset(test_dataset, tokenizer, config.IN_LEN, config.OUT_LEN - config.IN_LEN, model="gpt2", is_eval=True)
train_params = {
'batch_size': config.TRAIN_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
val_params = {
'batch_size': 1,
'shuffle': False,
'num_workers': 0
}
training_loader = DataLoader(training_set, **train_params, drop_last=True)
val_loader = DataLoader(val_set, **val_params, drop_last=True)
test_loader = DataLoader(test_set, **val_params, drop_last=True)
val_loader_mini = DataLoader(val_set_mini, **val_params, drop_last=True)
logging.info("Loading model from {}".format(model_name))
model = GPT2LMHeadModel.from_pretrained(model_name, use_cdn=False)
logging.info("Move model to device {}".format(device))
model = model.to(device)
model.resize_token_embeddings(len(tokenizer))
optimizer = torch.optim.Adam(params=model.parameters(), lr=config.LEARNING_RATE)
wandb.watch(model, log="all")
if config.DO_TRAIN:
logger.info('Initiating Fine-Tuning for the model on our dataset')
for epoch in range(config.TRAIN_EPOCHS):
train(epoch, tokenizer, model, device, training_loader, optimizer, val_loader_mini, model_class="gpt2")
model.save_pretrained('{}/checkpoint_{}'.format(config.OUT_DIR, epoch))
tokenizer.save_pretrained('{}/checkpoint_{}'.format(config.OUT_DIR, epoch))
model.save_pretrained('/models')
if config.DO_PRED:
if config.PRED_FILE.endswith("jsonl"):
records = read_jsonl_lines(config.PRED_FILE)
pred_dataset = pd.DataFrame.from_records(records)
pred_dataset = pred_dataset.rename(columns={"head": "head_event", "tails": "tail_event"})
pred_dataset = pred_dataset.explode('tail_event')
else:
pred_dataset = pd.read_csv(config.PRED_FILE, encoding='latin-1', sep="\t")
if DEBUG:
pred_dataset = pred_dataset.head(NUM_INST)
pred_dataset = pred_dataset.drop_duplicates(['head_event', 'relation'], ignore_index=True)
pred_dataset.head_event = pred_dataset.head_event + ' ' + pred_dataset.relation + " [GEN]"
pred_dataset.tail_event = pred_dataset.tail_event + ' [EOS]'
logger.info(pred_dataset.tail_event)
logger.info(pred_dataset.head())
pred_set = KGDataset(pred_dataset, tokenizer, config.IN_LEN, config.OUT_LEN - config.IN_LEN, model="gpt2", is_eval=True)
pred_loader = DataLoader(pred_set, **val_params, drop_last=False)
pred_generations = beam_generations(tokenizer, model, device, pred_loader, top_k=config.TOP_K)
write_items(os.path.join(config.OUT_DIR, "pred_generations.jsonl"),
[json.dumps(r) for r in pred_generations])
# Resave the model to keep generations and model associated
model.save_pretrained('/models')
tokenizer.save_pretrained('/models')
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-t", "--test_install",
action="store_true", default=False,
help="Test install, without running any modeling code.")
(options, args) = parser.parse_args()
if not options.test_install:
main()
|
{"hexsha": "23c99f53efc52c496d05c9f34a6ebff0d11b131d", "size": 10278, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/comet_atomic2020_gpt2/comet_gpt2.py", "max_stars_repo_name": "anudeep23/CS7634-FinalProject-COMET2020", "max_stars_repo_head_hexsha": "ef86531719a9016f2597516d84dbcf010fb8699c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 94, "max_stars_repo_stars_event_min_datetime": "2021-02-16T07:55:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T14:58:45.000Z", "max_issues_repo_path": "models/comet_atomic2020_gpt2/comet_gpt2.py", "max_issues_repo_name": "anudeep23/CS7634-FinalProject-COMET2020", "max_issues_repo_head_hexsha": "ef86531719a9016f2597516d84dbcf010fb8699c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2021-02-20T04:45:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T18:26:35.000Z", "max_forks_repo_path": "models/comet_atomic2020_gpt2/comet_gpt2.py", "max_forks_repo_name": "anudeep23/CS7634-FinalProject-COMET2020", "max_forks_repo_head_hexsha": "ef86531719a9016f2597516d84dbcf010fb8699c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2021-02-17T07:31:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T22:15:06.000Z", "avg_line_length": 38.6390977444, "max_line_length": 139, "alphanum_fraction": 0.649250827, "include": true, "reason": "import numpy", "num_tokens": 2399}
|
% SPDX-FileCopyrightText: © 2021 Martin Michlmayr <tbm@cyrius.com>
% SPDX-License-Identifier: CC-BY-4.0
\setchapterimage[9.5cm]{images/code}
\chapter{Licensing and copyright}
\labch{copyright}
The licensing of open source projects is a widely discussed topic. The choice of a license can greatly influence the impact and growth of a project. Many resources exist to help projects make the right decision, such as the ``\href{https://choosealicense.com/}{choose an open source license}'' site.
In addition to the license itself, there are other aspects related to licensing and copyright that projects have to consider, and where FOSS foundations can play an important role.
Some projects require contributors to sign a Contributor License Agreement (CLA), which gives certain rights to the project. If a CLA is required, a FOSS foundation would be a good home for the agreements. However, CLAs are often seen as \href{https://opensource.com/article/19/2/cla-problems}{harmful} and the majority of projects rely on a system commonly referred to as \href{https://opensource.com/law/11/7/trouble-harmony-part-1}{inbound=outbound} whereby contributions to the project are provided under the project's license without the need for another agreement. Some projects additionally use a simple self-attestation mechanism to confirm the origin of the contribution, such as the \href{https://developercertificate.org/}{Developer Certificate of Origin} (DCO) used by the Linux kernel and other projects.
Another area where FOSS foundations can help is with the enforcement of open source licenses. While the idea of open source is to share the code widely, there are certain conditions attached to the distribution of the software. Unfortunately, these conditions are sometimes not followed (many times inadvertently, although sometimes intentionally). FOSS foundations can work with those organizations to resolve these types of issues, or, if necessary, to initiate legal action.
Finally, while we don't like to think about death, it's important to plan ahead. A project may want to change its license or move to a new version of a license in the future, but this can only be done with the explicit permission of all copyright holders. A FOSS organization could act as a custodian and make decisions for deceased contributors who granted them such permission.
|
{"hexsha": "7452815be12f5db12c6cd46914c76639acc774c8", "size": 2369, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/legal/copyright.tex", "max_stars_repo_name": "tbm/foss-foundations-primer", "max_stars_repo_head_hexsha": "1c7370b86f9ea5133f6a077d9b7b0105729f21ac", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-04-29T20:30:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-06T12:32:59.000Z", "max_issues_repo_path": "chapters/legal/copyright.tex", "max_issues_repo_name": "tbm/foss-foundations-primer", "max_issues_repo_head_hexsha": "1c7370b86f9ea5133f6a077d9b7b0105729f21ac", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/legal/copyright.tex", "max_forks_repo_name": "tbm/foss-foundations-primer", "max_forks_repo_head_hexsha": "1c7370b86f9ea5133f6a077d9b7b0105729f21ac", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 118.45, "max_line_length": 820, "alphanum_fraction": 0.8003376952, "num_tokens": 513}
|
import csv
import math
import pprint
import time
from argparse import ArgumentParser
import numpy as np
import airsim
import setup_path
class DroneEnv:
def __init__(self):
self.client = airsim.CarClient()
self.client.confirmConnection()
self.client.enableApiControl(True)
car_controls = airsim.CarControls()
def step(self, action):
action = agent.act(current_state)
car_controls = interpret_action(action)
self.client.setCarControls(car_controls)
car_state = self.client.getCarState()
reward = compute_reward(car_state)
done = isDone(car_state, car_controls, reward)
return car_state, reward, done
def reset(self):
self.client = airsim.CarClient()
self.client.confirmConnection()
self.client.enableApiControl(True)
car_controls = airsim.CarControls()
responses = self.client.simGetImages(
[airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)]
)
obs = self.transform_input(responses)
return obs
def get_obs(self):
responses = self.client.simGetImages(
[airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)]
)
obs = self.transform_input(responses)
return obs
def compute_reward(self, car_state):
MAX_SPEED = 300
MIN_SPEED = 10
thresh_dist = 3.5
beta = 3
z = 0
pts = [
np.array([0, -1, z]),
np.array([130, -1, z]),
np.array([130, 125, z]),
np.array([0, 125, z]),
np.array([0, -1, z]),
np.array([130, -1, z]),
np.array([130, -128, z]),
np.array([0, -128, z]),
np.array([0, -1, z]),
]
pd = car_state.kinematics_estimated.position
car_pt = np.array([pd.x_val, pd.y_val, pd.z_val])
dist = 10000000
for i in range(0, len(pts) - 1):
dist = min(
dist,
np.linalg.norm(np.cross((car_pt - pts[i]), (car_pt - pts[i + 1])))
/ np.linalg.norm(pts[i] - pts[i + 1]),
)
if dist > thresh_dist:
reward = -3
else:
reward_dist = math.exp(-beta * dist) - 0.5
reward_speed = (
(car_state.speed - MIN_SPEED) / (MAX_SPEED - MIN_SPEED)
) - 0.5
reward = reward_dist + reward_speed
return reward
def isDone(self, scar_state, car_controls, reward):
done = 0
if reward < -1:
done = 1
self.client.reset()
self.client.enableApiControl(False)
if car_controls.brake == 0:
if car_state.speed <= 5:
done = 1
self.client.reset()
self.client.enableApiControl(False)
return done
def transform_input(self, responses):
response = responses[0]
img1d = np.fromstring(
response.image_data_uint8, dtype=np.uint8
)
img_rgba = img1d.reshape(
response.height, response.width, 4
)
print("height, width: ", response.height, response.width)
img2d = np.flipud(img_rgba)
from PIL import Image
image = Image.fromarray(img2d)
im_final = np.array(image.resize((84, 84)).convert("L"))
return im_final
def interpret_action(self, action):
car_controls.brake = 0
car_controls.throttle = 1
if action == 0:
car_controls.throttle = 0
car_controls.brake = 1
elif action == 1:
car_controls.steering = 0
elif action == 2:
car_controls.steering = 0.5
elif action == 3:
car_controls.steering = -0.5
elif action == 4:
car_controls.steering = 0.25
else:
car_controls.steering = -0.25
return car_controls
|
{"hexsha": "3727ad164466bbea23d762b797b5e10bec926067", "size": 3978, "ext": "py", "lang": "Python", "max_stars_repo_path": "env_car.py", "max_stars_repo_name": "ysbsb/code_demo", "max_stars_repo_head_hexsha": "d6ed52506439b7b0fecc01f7c831f257064f97f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-11T14:42:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-11T14:42:46.000Z", "max_issues_repo_path": "env_car.py", "max_issues_repo_name": "subinlab/code_demo", "max_issues_repo_head_hexsha": "d6ed52506439b7b0fecc01f7c831f257064f97f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "env_car.py", "max_forks_repo_name": "subinlab/code_demo", "max_forks_repo_head_hexsha": "d6ed52506439b7b0fecc01f7c831f257064f97f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.618705036, "max_line_length": 82, "alphanum_fraction": 0.5485168426, "include": true, "reason": "import numpy", "num_tokens": 958}
|
import os
import numpy as np
def parse_icd9_range(range_: str) -> (str, str, int, int):
ranges = range_.lstrip().split('-')
if ranges[0][0] == 'V':
prefix = 'V'
format_ = '%02d'
start, end = int(ranges[0][1:]), int(ranges[1][1:])
elif ranges[0][0] == 'E':
prefix = 'E'
format_ = '%03d'
start, end = int(ranges[0][1:]), int(ranges[1][1:])
else:
prefix = ''
format_ = '%03d'
if len(ranges) == 1:
start = int(ranges[0])
end = start
else:
start, end = int(ranges[0]), int(ranges[1])
return prefix, format_, start, end
def generate_code_levels(path, code_map: dict) -> np.ndarray:
print('generating code levels ...')
three_level_code_set = set(code.split('.')[0] for code in code_map)
icd9_path = os.path.join(path, 'icd9.txt')
icd9_range = list(open(icd9_path, 'r', encoding='utf-8').readlines())
three_level_dict = dict()
level1, level2, level3 = (1, 1, 1)
level1_can_add = False
for range_ in icd9_range:
range_ = range_.rstrip()
if range_[0] == ' ':
prefix, format_, start, end = parse_icd9_range(range_)
level2_cannot_add = True
for i in range(start, end + 1):
code = prefix + format_ % i
if code in three_level_code_set:
three_level_dict[code] = [level1, level2, level3]
level3 += 1
level1_can_add = True
level2_cannot_add = False
if not level2_cannot_add:
level2 += 1
else:
if level1_can_add:
level1 += 1
level1_can_add = False
code_level = dict()
for code, cid in code_map.items():
three_level_code = code.split('.')[0]
three_level = three_level_dict[three_level_code]
code_level[code] = three_level + [cid]
code_level_matrix = np.zeros((len(code_map) + 1, 4), dtype=int)
for code, cid in code_map.items():
code_level_matrix[cid] = code_level[code]
return code_level_matrix
def generate_subclass_map(code_level_matrix: np.ndarray) -> list:
code_num, level_num = code_level_matrix.shape
max_level = np.max(code_level_matrix, axis=0)
subclass_map = [[np.array(list(set((code_level_matrix[np.where(code_level_matrix[:, i] == l)[0]][:, i + 1]))),
dtype=int) - 1
for l in range(1, max_level[i] + 1)] for i in range(level_num - 1)]
return subclass_map
def generate_code_code_adjacent(pids: np.ndarray, patient_admission: dict, admission_codes_encoded: dict, code_num: int) -> np.ndarray:
print('generating code code adjacent matrix ...')
n = code_num + 1
result = np.zeros((n, n), dtype=float)
for i, pid in enumerate(pids):
print('\r\t%d / %d' % (i, len(pids)), end='')
for admission in patient_admission[pid]:
codes = admission_codes_encoded[admission['admission_id']]
for row in range(len(codes) - 1):
for col in range(row + 1, len(codes)):
c_i = codes[row]
c_j = codes[col]
result[c_i, c_j] += 1
result[c_j, c_i] += 1
print('\r\t%d / %d' % (len(pids), len(pids)))
s = result.sum(axis=-1, keepdims=True)
s[s == 0] = 1
result = result / s
result = result + np.eye(result.shape[0]) * 9
# rowsum = result.sum(axis=-1)
# degree_mat_inv_sqrt = np.diag(np.power(rowsum, -0.5).flatten())
# result = result.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt)
result = result / result.sum(axis=-1, keepdims=True)
return result.astype(np.float32)
|
{"hexsha": "07eb121b53c5428e32450879474b504d0883d89f", "size": 3782, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocess/auxiliary.py", "max_stars_repo_name": "LuChang-CS/sherbet", "max_stars_repo_head_hexsha": "d1061aca108eab8e0ccbd2202460e25261fdf1d5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-01-26T05:38:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T08:54:18.000Z", "max_issues_repo_path": "preprocess/auxiliary.py", "max_issues_repo_name": "LuChang-CS/sherbet", "max_issues_repo_head_hexsha": "d1061aca108eab8e0ccbd2202460e25261fdf1d5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocess/auxiliary.py", "max_forks_repo_name": "LuChang-CS/sherbet", "max_forks_repo_head_hexsha": "d1061aca108eab8e0ccbd2202460e25261fdf1d5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.82, "max_line_length": 135, "alphanum_fraction": 0.5684822845, "include": true, "reason": "import numpy", "num_tokens": 1030}
|
import glob
import json
import os
import shutil
import operator
import sys
import argparse
import math
import numpy as np
from copy import deepcopy
parser = argparse.ArgumentParser()
parser.add_argument('-na', '--no-animation', help="no animation is shown.", action="store_true")
parser.add_argument('-np', '--no-plot', help="no plot is shown.", action="store_true")
parser.add_argument('-q', '--quiet', help="minimalistic console output.", action="store_true")
# argparse receiving list of classes to be ignored (e.g., python main.py --ignore person book)
parser.add_argument('-i', '--ignore', nargs='+', type=str, help="ignore a list of classes.")
# argparse receiving list of classes with specific IoU (e.g., python main.py --set-class-iou person 0.7)
parser.add_argument('--set-class-iou', nargs='+', type=str, help="set IoU for a specific class.")
parser.add_argument('--classes', type=str, help='names of classes as "name1,name2" in order as in labels.txt ')
parser.add_argument('--train-gt-folder', type=str, help='folder containing training labels')
parser.add_argument('--log', type=str, help='just a text inserted into logs')
args = parser.parse_args()
'''
0,0 ------> x (width)
|
| (Left,Top)
| *_________
| | |
| |
y |_________|
(height) *
(Right,Bottom)
'''
# if there are no classes to ignore then replace None by empty list
if args.ignore is None:
args.ignore = []
specific_iou_flagged = False
if args.set_class_iou is not None:
specific_iou_flagged = True
# make sure that the cwd() is the location of the python script (so that every path makes sense)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
GT_PATH = os.path.join(os.getcwd(), 'input', 'ground-truth')
DR_PATH = os.path.join(os.getcwd(), 'input', 'detection-results')
# if there are no images then no animation can be shown
IMG_PATH = os.path.join(os.getcwd(), 'input', 'images-optional')
if os.path.exists(IMG_PATH):
for dirpath, dirnames, files in os.walk(IMG_PATH):
if not files:
# no image files found
args.no_animation = True
else:
args.no_animation = True
# try to import OpenCV if the user didn't choose the option --no-animation
show_animation = False
if not args.no_animation:
try:
import cv2
show_animation = True
except ImportError:
print("\"opencv-python\" not found, please install to visualize the results.")
args.no_animation = True
# try to import Matplotlib if the user didn't choose the option --no-plot
draw_plot = False
if not args.no_plot:
try:
import matplotlib.pyplot as plt
draw_plot = True
except ImportError:
print("\"matplotlib\" not found, please install it to get the resulting plots.")
args.no_plot = True
def log_average_miss_rate(prec, rec, num_images):
"""
log-average miss rate:
Calculated by averaging miss rates at 9 evenly spaced FPPI points
between 10e-2 and 10e0, in log-space.
output:
lamr | log-average miss rate
mr | miss rate
fppi | false positives per image
references:
[1] Dollar, Piotr, et al. "Pedestrian Detection: An Evaluation of the
State of the Art." Pattern Analysis and Machine Intelligence, IEEE
Transactions on 34.4 (2012): 743 - 761.
"""
# if there were no detections of that class
if prec.size == 0:
lamr = 0
mr = 1
fppi = 0
return lamr, mr, fppi
fppi = (1 - prec)
mr = (1 - rec)
fppi_tmp = np.insert(fppi, 0, -1.0)
mr_tmp = np.insert(mr, 0, 1.0)
# Use 9 evenly spaced reference points in log-space
ref = np.logspace(-2.0, 0.0, num=9)
for i, ref_i in enumerate(ref):
# np.where() will always find at least 1 index, since min(ref) = 0.01 and min(fppi_tmp) = -1.0
j = np.where(fppi_tmp <= ref_i)[-1][-1]
ref[i] = mr_tmp[j]
# log(0) is undefined, so we use the np.maximum(1e-10, ref)
lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))
return lamr, mr, fppi
"""
throw error and exit
"""
def error(msg):
print(msg)
sys.exit(0)
"""
check if the number is a float between 0.0 and 1.0
"""
def is_float_between_0_and_1(value):
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False
"""
Calculate the AP given the recall and precision array
1st) We compute a version of the measured precision/recall curve with
precision monotonically decreasing
2nd) We compute the AP as the area under this curve by numerical integration.
"""
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre) - 2, -1, -1):
mpre[i] = max(mpre[i], mpre[i + 1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
"""
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i - 1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
ap = 0.0
for i in i_list:
ap += ((mrec[i] - mrec[i - 1]) * mpre[i])
return ap, mrec, mpre
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves
# Arguments
recall: The recall curve (list)
precision: The precision curve (list)
# Returns
Average precision, precision curve, recall curve
"""
# Append sentinel values to beginning and end
mrec = np.concatenate(([0.0], recall, [1.0]))
mpre = np.concatenate(([1.0], precision, [0.0]))
# Compute the precision envelope
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
# Integrate area under curve
method = 'interp' # methods: 'continuous', 'interp'
if method == 'interp':
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
else: # 'continuous'
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
return ap, mrec, mpre
"""
Convert the lines of a file to a list
"""
def file_lines_to_list(path):
# open txt file lines to a list
with open(path) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
"""
Draws text in image
"""
def draw_text_in_image(img, text, pos, color, line_width):
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
lineType = 1
bottomLeftCornerOfText = pos
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
color,
lineType)
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]
return img, (line_width + text_width)
"""
Plot - adjust axes
"""
def adjust_axes(r, t, fig, axes):
# get text width for re-scaling
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
# get axis width in inches
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
# get axis limit
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1] * propotion])
"""
Draw plot using Matplotlib
"""
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color,
true_p_bar):
# sort the dictionary by decreasing value, into a list of tuples
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
# unpacking the list of tuples into two lists
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
#
if true_p_bar != "":
"""
Special case to draw in:
- green -> TP: True Positives (object detected and matches ground-truth)
- red -> FP: False Positives (object detected but does not match ground-truth)
- pink -> FN: False Negatives (object not detected but present in the ground-truth)
"""
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Positive')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Positive',
left=fp_sorted)
# add legend
plt.legend(loc='lower right')
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
# trick to paint multicolor with offset:
# first paint everything and then repaint the first number
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values) - 1): # largest bar
adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val) # add a space before
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
# re-set axes to show number inside the figure
if i == (len(sorted_values) - 1): # largest bar
adjust_axes(r, t, fig, axes)
# set window title
fig.canvas.set_window_title(window_title)
# write classes in y axis
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
"""
Re-scale height accordingly
"""
init_height = fig.get_figheight()
# comput the matrix height in points and inches
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)
height_in = height_pt / dpi
# compute the required figure height
top_margin = 0.15 # in percentage of the figure height
bottom_margin = 0.05 # in percentage of the figure height
figure_height = height_in / (1 - top_margin - bottom_margin)
# set new height
if figure_height > init_height:
fig.set_figheight(figure_height)
# set plot title
plt.title(plot_title, fontsize=14)
# set axis titles
# plt.xlabel('classes')
plt.xlabel(x_label, fontsize='large')
# adjust size of window
fig.tight_layout()
# save the plot
fig.savefig(output_path)
# show image
if to_show:
plt.show()
# close the plot
plt.close()
"""
Create a ".temp_files/" and "output/" directory
"""
TEMP_FILES_PATH = ".temp_files"
if not os.path.exists(TEMP_FILES_PATH): # if it doesn't exist already
os.makedirs(TEMP_FILES_PATH)
output_files_path = "output"
if os.path.exists(output_files_path): # if it exist already
# reset the output directory
shutil.rmtree(output_files_path)
os.makedirs(output_files_path)
if draw_plot:
os.makedirs(os.path.join(output_files_path, "classes"))
if show_animation:
os.makedirs(os.path.join(output_files_path, "images", "detections_one_by_one"))
"""
ground-truth
Load each of the ground-truth files into a temporary ".json" file.
Create a list of all the class names present in the ground-truth (gt_classes).
"""
# get a list with the ground-truth files
ground_truth_files_list = glob.glob(GT_PATH + '/*.txt')
if len(ground_truth_files_list) == 0:
error("Error: No ground-truth files found!")
ground_truth_files_list.sort()
# dictionary with counter per class
gt_counter_per_class = {}
counter_images_per_class = {}
gt_files = []
for txt_file in ground_truth_files_list:
# print(txt_file)
file_id = txt_file.split(".txt", 1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
# check if there is a correspondent detection-results file
temp_path = os.path.join(DR_PATH, (file_id + ".txt"))
if not os.path.exists(temp_path):
error_msg = "Error. File not found: {}\n".format(temp_path)
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-dr.py)"
error(error_msg)
lines_list = file_lines_to_list(txt_file)
# create ground-truth dictionary
bounding_boxes = []
is_difficult = False
already_seen_classes = []
for line in lines_list:
try:
if "difficult" in line:
class_name, left, top, right, bottom, _difficult = line.split()
is_difficult = True
else:
class_name, left, top, right, bottom = line.split()
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <left> <top> <right> <bottom> ['difficult']\n"
error_msg += " Received: " + line
error_msg += "\n\nIf you have a <class_name> with spaces between words you should remove them\n"
error_msg += "by running the script \"remove_space.py\" or \"rename_class.py\" in the \"extra/\" folder."
error(error_msg)
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
bbox = left + " " + top + " " + right + " " + bottom
if is_difficult:
bounding_boxes.append({"class_name": class_name, "bbox": bbox, "used": False, "difficult": True})
is_difficult = False
else:
bounding_boxes.append({"class_name": class_name, "bbox": bbox, "used": False})
# count that object
if class_name in gt_counter_per_class:
gt_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
gt_counter_per_class[class_name] = 1
if class_name not in already_seen_classes:
if class_name in counter_images_per_class:
counter_images_per_class[class_name] += 1
else:
# if class didn't exist yet
counter_images_per_class[class_name] = 1
already_seen_classes.append(class_name)
# dump bounding_boxes into a ".json" file
new_temp_file = TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json"
gt_files.append(new_temp_file)
with open(new_temp_file, 'w') as outfile:
json.dump(bounding_boxes, outfile)
gt_classes = list(gt_counter_per_class.keys())
# let's sort the classes alphabetically
gt_classes = sorted(gt_classes)
n_classes = len(gt_classes)
# print(gt_classes)
# print(gt_counter_per_class)
"""
Check format of the flag --set-class-iou (if used)
e.g. check if class exists
"""
if specific_iou_flagged:
n_args = len(args.set_class_iou)
error_msg = \
'\n --set-class-iou [class_1] [IoU_1] [class_2] [IoU_2] [...]'
if n_args % 2 != 0:
error('Error, missing arguments. Flag usage:' + error_msg)
# [class_1] [IoU_1] [class_2] [IoU_2]
# specific_iou_classes = ['class_1', 'class_2']
specific_iou_classes = args.set_class_iou[::2] # even
# iou_list = ['IoU_1', 'IoU_2']
iou_list = args.set_class_iou[1::2] # odd
if len(specific_iou_classes) != len(iou_list):
error('Error, missing arguments. Flag usage:' + error_msg)
for tmp_class in specific_iou_classes:
if tmp_class not in gt_classes:
error('Error, unknown class \"' + tmp_class + '\". Flag usage:' + error_msg)
for num in iou_list:
if not is_float_between_0_and_1(num):
error('Error, IoU must be between 0.0 and 1.0. Flag usage:' + error_msg)
"""
detection-results
Load each of the detection-results files into a temporary ".json" file.
"""
# get a list with the detection-results files
dr_files_list = glob.glob(DR_PATH + '/*.txt')
dr_files_list.sort()
for class_index, class_name in enumerate(gt_classes):
bounding_boxes = []
for txt_file in dr_files_list:
# print(txt_file)
# the first time it checks if all the corresponding ground-truth files exist
file_id = txt_file.split(".txt", 1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
temp_path = os.path.join(GT_PATH, (file_id + ".txt"))
if class_index == 0:
if not os.path.exists(temp_path):
error_msg = "Error. File not found: {}\n".format(temp_path)
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-dr.py)"
error(error_msg)
lines = file_lines_to_list(txt_file)
for line in lines:
try:
tmp_class_name, confidence, left, top, right, bottom = line.split()
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <confidence> <left> <top> <right> <bottom>\n"
error_msg += " Received: " + line
error(error_msg)
if tmp_class_name == class_name:
# print("match")
bbox = left + " " + top + " " + right + " " + bottom
bounding_boxes.append({"confidence": confidence, "file_id": file_id, "bbox": bbox})
# print(bounding_boxes)
# sort detection-results by decreasing confidence
bounding_boxes.sort(key=lambda x: float(x['confidence']), reverse=True)
with open(TEMP_FILES_PATH + "/" + class_name + "_dr.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
"""
Calculate the AP for each class
"""
count_true_positives = {}
ap_dictionary = {}
lamr_dictionary = {}
def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (nparray, nx1 or nx10).
conf: Objectness value from 0-1 (nparray).
pred_cls: Predicted object classes (nparray).
target_cls: True object classes (nparray).
plot: Plot precision-recall curve at mAP@0.5
save_dir: Plot save directory
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(target_cls)
nc = unique_classes.shape[0] # number of classes, number of detections
# Create Precision-Recall curve and compute AP for each class
px, py = np.linspace(0, 1, 1000), [] # for plotting
ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
nt = [] #number of targets per class
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_l = (target_cls == c).sum() # number of labels
nt.append(n_l)
n_p = i.sum() # number of predictions
if n_p == 0 or n_l == 0:
continue
else:
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum(0)
tpc = tp[i].cumsum(0)
# Recall
recall = tpc / (n_l + 1e-16) # recall curve
r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
if plot and j == 0:
py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
# Compute F1 (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + 1e-16)
i = f1.mean(0).argmax() # max F1 index
return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32'), np.array(nt)
def calculate_ap_for_each_class():
min_overlaps = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
tp = None # of size [x, 10] means if a detection is tp for 0.5,0.55,0.6 ... 0.95
conf = None #np.ndarray(1) conf for every detection
pred_cls = None #np.ndarray(1) predicted class for every detection
target_cls = np.array([], dtype=np.float64) # just all the target classes
used_file_ids = []
with open(output_files_path + "/output.txt", 'w') as output_file:
output_file.write("# AP and precision/recall per class\n")
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
"""
Load detection-results of that class
"""
dr_file = TEMP_FILES_PATH + "/" + class_name + "_dr.json"
dr_data = json.load(open(dr_file))
"""
Assign detection-results to ground-truth objects
"""
nd = len(dr_data)
tp_start = 0 #from where we start writing every tp
if tp is None:
tp = np.ndarray([nd, 10], dtype=bool)
tp[:,:] = False
else:
tp_start = tp.shape[0]
tp = np.append(tp, np.ndarray([nd, 10], dtype=bool), axis=0)
tp[tp_start:, :] = False
conf_start = 0 # from where we start writing every tp
if conf is None:
conf = np.ndarray([nd], dtype=float)
conf[:] = 0
else:
conf_start = conf.shape[0]
conf = np.append(conf, np.ndarray([nd], dtype=float))
conf[conf_start:] = 0
pred_start = 0 # from where we start writing every tp
if pred_cls is None:
pred_cls = np.ndarray([nd], dtype=np.float64)
pred_cls[:] = class_index
else:
pred_start = pred_cls.shape[0]
pred_cls = np.append(pred_cls, np.ndarray([nd], dtype=np.float64))
pred_cls[pred_start:] = class_index
for idx, detection in enumerate(dr_data):
conf[conf_start + idx] = float(detection['confidence'])
file_id = detection["file_id"]
# assign detection-results to ground truth object if any
# open ground-truth with that file_id
gt_file = TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json"
ground_truth_data = json.load(open(gt_file))
ovmax = -1
gt_match = -1
# load detected object bounding-box
bb = [float(x) for x in detection["bbox"].split()]
for obj in ground_truth_data:
if file_id not in used_file_ids:
target_cls = np.append(target_cls, np.array( gt_classes.index(obj["class_name"]) ))
# look for a class_name match
if obj["class_name"] == class_name:
bbgt = [float(x) for x in obj["bbox"].split()]
bi = [max(bb[0], bbgt[0]), max(bb[1], bbgt[1]), min(bb[2], bbgt[2]), min(bb[3], bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
used_file_ids.append(file_id)
if gt_match != -1 and "difficult" not in gt_match:
if not bool(gt_match["used"]):
do_the_rest = False
for i, min_overlap in enumerate(min_overlaps):
if ovmax >= min_overlap:
# true positive
if do_the_rest is False:
do_the_rest = True
tp[tp_start + idx, i] = True
if do_the_rest:
gt_match["used"] = True
count_true_positives[class_name] += 1
# update the ".json" file
with open(gt_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
else:
# false positive (multiple detection)
pass
return tp, conf, pred_cls, target_cls
tp, conf, pred_cls, target_cls = calculate_ap_for_each_class()
p, r, ap, f1, ap_class, nt = ap_per_class(tp, conf, pred_cls, target_cls)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
out_log = open("log_out.txt", "a")
out_log.write("*"*20)
out_log.write("\n")
out_log.write("*"*20)
out_log.write("\n")
out_log.write("*"*20)
out_log.write("\n")
out_log.write(args.log + "\n")
# Print results
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
print(s)
out_log.write(s+"\n")
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
print(pf % ('all', len(ground_truth_files_list), nt.sum(), mp, mr, map50, map))
out_log.write(pf % ('all', len(ground_truth_files_list), nt.sum(), mp, mr, map50, map)+"\n")
print("-"*100)
out_log.write("-"*100 + "\n")
for i, c in enumerate(ap_class):
s = pf % (args.classes.split(',')[c] if args.classes is not None else c, len(ground_truth_files_list), nt[c], p[i], r[i], ap50[i], ap[i])
print(s)
out_log.write(s + "\n")
#show amount of labels per class in train set
if args.train_gt_folder:
path = args.train_gt_folder
files = os.listdir(path)
dicti = {}
for file in files:
if '.txt' in file and "labels" not in file:
with open(path + file, 'r') as f:
lines = f.readlines()
for line in lines:
nr = line.split(" ")[0]
if nr in dicti:
dicti[nr] = dicti[nr] + 1
else:
dicti[nr] = 1
train = {key: int(dicti[key]) for key in sorted(dicti.keys(), key=int)}
print("\n\nlabels per class in train set:")
print(args.classes if args.classes is not None else "")
print(train)
out_log.write("\n\nlabels per class in train set:"+"\n")
out_log.write(args.classes if args.classes is not None else "")
out_log.write(str(train))
print("\n\n\n----- for easy insertion into a sheet just copy this by row")
out_log.write("\n\n\n----- for easy insertion into a sheet just copy this by row"+"\n")
a = ['all', str(len(ground_truth_files_list)), str(nt.sum()), '%2.3g'%mp, '%2.3g'%mr, '%2.3g'%map50, '%2.3g'%map]
print("\t".join(a))
out_log.write("\t".join(a) + "\n")
for i, c in enumerate(ap_class):
a = [args.classes.split(',')[c] if args.classes is not None else c
, str(len(ground_truth_files_list)), str(nt[c]), '%2.3g'%p[i], '%2.3g'%r[i], '%2.3g'%ap50[i], '%2.3g'%ap[i]]
print( "\t".join(a) )
out_log.write("\t".join(a) + "\n")
out_log.close()
"""
Draw false negatives
"""
if show_animation:
pink = (203, 192, 255)
for tmp_file in gt_files:
ground_truth_data = json.load(open(tmp_file))
# print(ground_truth_data)
# get name of corresponding image
start = TEMP_FILES_PATH + '/'
img_id = tmp_file[tmp_file.find(start) + len(start):tmp_file.rfind('_ground_truth.json')]
img_cumulative_path = output_files_path + "/images/" + img_id + ".jpg"
img = cv2.imread(img_cumulative_path)
if img is None:
img_path = IMG_PATH + '/' + img_id + ".jpg"
img = cv2.imread(img_path)
# draw false negatives
for obj in ground_truth_data:
if not obj['used']:
bbgt = [int(round(float(x))) for x in obj["bbox"].split()]
cv2.rectangle(img, (bbgt[0], bbgt[1]), (bbgt[2], bbgt[3]), pink, 2)
cv2.imwrite(img_cumulative_path, img)
# remove the temp_files directory
shutil.rmtree(TEMP_FILES_PATH)
"""
Count total of detection-results
"""
# iterate through all the files
det_counter_per_class = {}
for txt_file in dr_files_list:
# get lines to list
lines_list = file_lines_to_list(txt_file)
for line in lines_list:
class_name = line.split()[0]
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
# count that object
if class_name in det_counter_per_class:
det_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
det_counter_per_class[class_name] = 1
# print(det_counter_per_class)
dr_classes = list(det_counter_per_class.keys())
"""
Plot the total number of occurences of each class in the ground-truth
"""
if draw_plot:
window_title = "ground-truth-info"
plot_title = "ground-truth\n"
plot_title += "(" + str(len(ground_truth_files_list)) + " files and " + str(n_classes) + " classes)"
x_label = "Number of objects per class"
output_path = output_files_path + "/ground-truth-info.png"
to_show = False
plot_color = 'forestgreen'
draw_plot_func(
gt_counter_per_class,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
'',
)
"""
Write number of ground-truth objects per class to results.txt
"""
with open(output_files_path + "/output.txt", 'a') as output_file:
output_file.write("\n# Number of ground-truth objects per class\n")
for class_name in sorted(gt_counter_per_class):
output_file.write(class_name + ": " + str(gt_counter_per_class[class_name]) + "\n")
"""
Finish counting true positives
"""
for class_name in dr_classes:
# if class exists in detection-result but not in ground-truth then there are no true positives in that class
if class_name not in gt_classes:
count_true_positives[class_name] = 0
# print(count_true_positives)
"""
Plot the total number of occurences of each class in the "detection-results" folder
"""
if draw_plot:
window_title = "detection-results-info"
# Plot title
plot_title = "detection-results\n"
plot_title += "(" + str(len(dr_files_list)) + " files and "
count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(det_counter_per_class.values()))
plot_title += str(count_non_zero_values_in_dictionary) + " detected classes)"
# end Plot title
x_label = "Number of objects per class"
output_path = output_files_path + "/detection-results-info.png"
to_show = False
plot_color = 'forestgreen'
true_p_bar = count_true_positives
draw_plot_func(
det_counter_per_class,
len(det_counter_per_class),
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
true_p_bar
)
"""
Write number of detected objects per class to output.txt
"""
with open(output_files_path + "/output.txt", 'a') as output_file:
output_file.write("\n# Number of detected objects per class\n")
for class_name in sorted(dr_classes):
n_det = det_counter_per_class[class_name]
text = class_name + ": " + str(n_det)
text += " (tp:" + str(count_true_positives[class_name]) + ""
text += ", fp:" + str(n_det - count_true_positives[class_name]) + ")\n"
output_file.write(text)
"""
Draw log-average miss rate plot (Show lamr of all classes in decreasing order)
"""
if draw_plot:
window_title = "lamr"
plot_title = "log-average miss rate"
x_label = "log-average miss rate"
output_path = output_files_path + "/lamr.png"
to_show = False
plot_color = 'royalblue'
draw_plot_func(
lamr_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
)
"""
Draw mAP plot (Show AP's of all classes in decreasing order)
"""
if draw_plot:
window_title = "mAP"
plot_title = "mAP = {0:.2f}%".format(mAP * 100)
x_label = "Average Precision"
output_path = output_files_path + "/mAP.png"
to_show = True
plot_color = 'royalblue'
draw_plot_func(
ap_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
)
|
{"hexsha": "a020629b55ab6abfc8cf8bf22253da2b57e92a2b", "size": 35019, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "vadimen/mAP", "max_stars_repo_head_hexsha": "6b284707d91706d5e261da69c9c7376cd57ee386", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "vadimen/mAP", "max_issues_repo_head_hexsha": "6b284707d91706d5e261da69c9c7376cd57ee386", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "vadimen/mAP", "max_forks_repo_head_hexsha": "6b284707d91706d5e261da69c9c7376cd57ee386", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.592476489, "max_line_length": 141, "alphanum_fraction": 0.5944201719, "include": true, "reason": "import numpy", "num_tokens": 9006}
|
"""Helper methods for deep learning.
--- NOTATION ---
The following letters will be used throughout this module.
E = number of examples (storm objects)
M = number of rows per radar image
N = number of columns per radar image
H_r = number of heights per radar image
F_r = number of radar fields (not including different heights)
H_s = number of height levels per sounding
F_s = number of sounding fields (not including different heights)
C = number of field/height pairs per radar image
K = number of classes for target variable
T = number of file times (time steps or SPC dates)
"""
import copy
import pickle
import os.path
import numpy
from gewittergefahr.gg_utils import target_val_utils
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import soundings
from gewittergefahr.gg_utils import moisture_conversions
from gewittergefahr.gg_utils import temperature_conversions
from gewittergefahr.gg_utils import standard_atmosphere as standard_atmo
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
TOLERANCE_FOR_FREQUENCY_SUM = 1e-3
DEFAULT_REFL_MASK_THRESHOLD_DBZ = 15.
PASCALS_TO_MB = 0.01
METRES_PER_SECOND_TO_KT = 3.6 / 1.852
MEAN_VALUE_COLUMN = 'mean_value'
STANDARD_DEVIATION_COLUMN = 'standard_deviation'
MIN_VALUE_COLUMN = 'min_value'
MAX_VALUE_COLUMN = 'max_value'
NORMALIZATION_COLUMNS_NO_HEIGHT = [
MEAN_VALUE_COLUMN, STANDARD_DEVIATION_COLUMN, MIN_VALUE_COLUMN,
MAX_VALUE_COLUMN
]
NORMALIZATION_COLUMNS_WITH_HEIGHT = [
MEAN_VALUE_COLUMN, STANDARD_DEVIATION_COLUMN
]
MINMAX_NORMALIZATION_TYPE_STRING = 'minmax'
Z_NORMALIZATION_TYPE_STRING = 'z_score'
VALID_NORMALIZATION_TYPE_STRINGS = [
MINMAX_NORMALIZATION_TYPE_STRING, Z_NORMALIZATION_TYPE_STRING]
DEFAULT_MIN_NORMALIZED_VALUE = -1.
DEFAULT_MAX_NORMALIZED_VALUE = 1.
def _check_normalization_type(normalization_type_string):
"""Ensures that normalization type is valid.
:param normalization_type_string: Normalization type.
:raises: ValueError: if
`normalization_type_string not in VALID_NORMALIZATION_TYPE_STRINGS`.
"""
error_checking.assert_is_string(normalization_type_string)
if normalization_type_string not in VALID_NORMALIZATION_TYPE_STRINGS:
error_string = (
'\n\n{0:s}\nValid normalization types (listed above) do not include'
' "{1:s}".'
).format(
str(VALID_NORMALIZATION_TYPE_STRINGS), normalization_type_string
)
raise ValueError(error_string)
def check_class_fractions(sampling_fraction_by_class_dict, target_name):
"""Error-checks sampling fractions (one for each class of target variable).
:param sampling_fraction_by_class_dict: Dictionary, where each key is
the integer representing a class (-2 for "dead storm") and each value is
the corresponding sampling fraction.
:param target_name: Name of target variable (must be accepted by
`target_val_utils.target_name_to_params`).
:raises: KeyError: if dictionary does not contain the expected keys (class
integers).
:raises: ValueError: if sum(class_fractions) != 1.
"""
num_classes = target_val_utils.target_name_to_num_classes(
target_name=target_name, include_dead_storms=False)
num_extended_classes = target_val_utils.target_name_to_num_classes(
target_name=target_name, include_dead_storms=True)
expected_keys = numpy.linspace(
0, num_classes - 1, num=num_classes, dtype=int
).tolist()
if num_extended_classes > num_classes:
expected_keys.append(target_val_utils.DEAD_STORM_INTEGER)
actual_keys = list(sampling_fraction_by_class_dict.keys())
if set(expected_keys) != set(actual_keys):
error_string = (
'\n{0:s}\nExpected sampling_fraction_by_class_dict to contain the'
' keys listed above. Instead, contains the keys listed below.'
'\n{1:s}'
).format(
str(expected_keys), str(actual_keys)
)
raise KeyError(error_string)
class_fractions = numpy.array(list(
sampling_fraction_by_class_dict.values()
))
sum_of_class_fractions = numpy.sum(class_fractions)
absolute_diff = numpy.absolute(sum_of_class_fractions - 1.)
if absolute_diff > TOLERANCE_FOR_FREQUENCY_SUM:
error_string = (
'\n{0:s}\nSum of sampling fractions (listed above) should be 1. '
'Instead, got sum = {1:.4f}.'
).format(
str(class_fractions), sum_of_class_fractions
)
raise ValueError(error_string)
def class_fractions_to_num_examples(
sampling_fraction_by_class_dict, target_name, num_examples_total):
"""For each target class, converts sampling fraction to number of examples.
:param sampling_fraction_by_class_dict: See doc for `check_class_fractions`.
:param target_name: Same.
:param num_examples_total: Total number of examples to draw.
:return: num_examples_by_class_dict: Dictionary, where each key is the
integer representing a class (-2 for "dead storm") and each value is the
corresponding number of examples to draw.
"""
check_class_fractions(
sampling_fraction_by_class_dict=sampling_fraction_by_class_dict,
target_name=target_name)
class_keys = list(sampling_fraction_by_class_dict.keys())
num_extended_classes = len(class_keys)
error_checking.assert_is_integer(num_examples_total)
error_checking.assert_is_geq(num_examples_total, num_extended_classes)
num_examples_by_class_dict = {}
num_examples_used = 0
num_classes_used = 0
for this_key in class_keys[:-1]:
this_num_examples = int(numpy.round(
sampling_fraction_by_class_dict[this_key] * num_examples_total
))
this_num_examples = max([this_num_examples, 1])
num_classes_used += 1
num_classes_left = num_extended_classes - num_classes_used
this_num_examples = min([
this_num_examples,
num_examples_total - num_examples_used - num_classes_left
])
num_examples_by_class_dict.update({this_key: this_num_examples})
num_examples_used += this_num_examples
this_num_examples = num_examples_total - num_examples_used
num_examples_by_class_dict.update({class_keys[-1]: this_num_examples})
return num_examples_by_class_dict
def event_probs_to_multiclass(event_probabilities):
"""Converts 1-D array of event probabilities to 2-D array.
E = number of examples
:param event_probabilities: length-E numpy array of event probabilities.
:return: class_probability_matrix: E-by-2 numpy array, where second column
contains probabilities of event and first column contains probabilities
of non-event.
"""
error_checking.assert_is_numpy_array(event_probabilities, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(event_probabilities, 0.)
error_checking.assert_is_leq_numpy_array(event_probabilities, 1.)
these_probs = numpy.reshape(
event_probabilities, (len(event_probabilities), 1)
)
return numpy.hstack((1. - these_probs, these_probs))
def class_fractions_to_weights(
sampling_fraction_by_class_dict, target_name, binarize_target):
"""For each target class, converts sampling fraction to loss-fctn weight.
:param sampling_fraction_by_class_dict: See doc for `check_class_fractions`.
:param target_name: Same.
:param binarize_target: Boolean flag. If True, the target variable will be
binarized, so that the highest class = 1 and all other classes = 0.
Otherwise, the original number of classes will be retained, except that
-2 ("dead storm") will be mapped to 0 (the lowest class).
:return: lf_weight_by_class_dict: Dictionary, where each key is the integer
representing a class and each value is the corresponding loss-function
weight.
"""
check_class_fractions(
sampling_fraction_by_class_dict=sampling_fraction_by_class_dict,
target_name=target_name)
error_checking.assert_is_boolean(binarize_target)
class_keys = list(sampling_fraction_by_class_dict.keys())
if binarize_target:
max_key = numpy.max(numpy.array(class_keys))
positive_fraction = sampling_fraction_by_class_dict[max_key]
negative_fraction = 1. - positive_fraction
new_sampling_fraction_dict = {
0: negative_fraction, 1: positive_fraction
}
else:
new_sampling_fraction_dict = copy.deepcopy(
sampling_fraction_by_class_dict)
if target_val_utils.DEAD_STORM_INTEGER in class_keys:
new_sampling_fraction_dict[0] = (
new_sampling_fraction_dict[0] +
new_sampling_fraction_dict[target_val_utils.DEAD_STORM_INTEGER]
)
del new_sampling_fraction_dict[target_val_utils.DEAD_STORM_INTEGER]
class_keys = list(new_sampling_fraction_dict.keys())
class_fractions = numpy.array(list(
new_sampling_fraction_dict.values()
))
loss_function_weights = 1. / class_fractions
loss_function_weights = (
loss_function_weights / numpy.sum(loss_function_weights)
)
return dict(list(zip(class_keys, loss_function_weights)))
def check_radar_images(
radar_image_matrix, min_num_dimensions=3, max_num_dimensions=5):
"""Error-checks storm-centered radar images.
:param radar_image_matrix: numpy array of radar images. Dimensions may be
E x M x N, E x M x N x C, or E x M x N x H_r x F_r.
:param min_num_dimensions: Minimum dimensionality of `radar_image_matrix`.
:param max_num_dimensions: Maximum dimensionality of `radar_image_matrix`.
"""
error_checking.assert_is_integer(min_num_dimensions)
error_checking.assert_is_geq(min_num_dimensions, 3)
error_checking.assert_is_leq(min_num_dimensions, 5)
error_checking.assert_is_integer(max_num_dimensions)
error_checking.assert_is_geq(max_num_dimensions, 3)
error_checking.assert_is_leq(max_num_dimensions, 5)
error_checking.assert_is_geq(max_num_dimensions, min_num_dimensions)
error_checking.assert_is_numpy_array_without_nan(radar_image_matrix)
num_dimensions = len(radar_image_matrix.shape)
error_checking.assert_is_geq(num_dimensions, min_num_dimensions)
error_checking.assert_is_leq(num_dimensions, max_num_dimensions)
def check_soundings(sounding_matrix, num_examples=None, num_height_levels=None,
num_fields=None):
"""Error-checks storm-centered soundings.
:param sounding_matrix: numpy array (E x H_s x F_s) of soundings.
:param num_examples: Number of examples in `sounding_matrix`.
:param num_height_levels: Number of height levels expected in
`sounding_matrix`.
:param num_fields: Number of fields expected in `sounding_matrix`.
"""
error_checking.assert_is_real_numpy_array(sounding_matrix)
error_checking.assert_is_numpy_array(sounding_matrix, num_dimensions=3)
expected_dimensions = []
if num_examples is None:
expected_dimensions += [sounding_matrix.shape[0]]
else:
expected_dimensions += [num_examples]
if num_height_levels is None:
expected_dimensions += [sounding_matrix.shape[1]]
else:
expected_dimensions += [num_height_levels]
if num_fields is None:
expected_dimensions += [sounding_matrix.shape[2]]
else:
expected_dimensions += [num_fields]
error_checking.assert_is_numpy_array(
sounding_matrix, exact_dimensions=numpy.array(expected_dimensions))
def check_target_array(target_array, num_dimensions, num_classes):
"""Error-checks target values.
:param target_array: numpy array in one of two formats.
[1] length-E integer numpy array of target values. All values are -2
("dead storm") or 0...[K - 1], where K = number of classes.
[2] E-by-K numpy array, where each value is 0 or 1. If target_array[i, k] =
1, the [i]th storm object belongs to the [k]th class. Classes are
mutually exclusive and collectively exhaustive, so the sum across each
row of the matrix is 1.
:param num_dimensions: Number of dimensions expected in `target_array`.
:param num_classes: Number of classes that should be represented in
`target_array`.
"""
error_checking.assert_is_integer(num_dimensions)
error_checking.assert_is_geq(num_dimensions, 1)
error_checking.assert_is_leq(num_dimensions, 2)
error_checking.assert_is_integer(num_classes)
error_checking.assert_is_geq(num_classes, 2)
num_examples = target_array.shape[0]
if num_dimensions == 1:
error_checking.assert_is_integer_numpy_array(target_array)
these_expected_dim = numpy.array([num_examples], dtype=int)
error_checking.assert_is_numpy_array(
target_array, exact_dimensions=these_expected_dim)
# TODO(thunderhoser): This is a HACK. Should do better input-checking.
# live_storm_object_indices = numpy.where(
# target_array != target_val_utils.DEAD_STORM_INTEGER
# )[0]
# error_checking.assert_is_geq_numpy_array(
# target_array[live_storm_object_indices], 0
# )
error_checking.assert_is_geq_numpy_array(
target_array, target_val_utils.DEAD_STORM_INTEGER)
error_checking.assert_is_less_than_numpy_array(
target_array, num_classes)
else:
error_checking.assert_is_geq_numpy_array(target_array, 0)
error_checking.assert_is_leq_numpy_array(target_array, 1)
these_expected_dim = numpy.array([num_examples, num_classes], dtype=int)
error_checking.assert_is_numpy_array(
target_array, exact_dimensions=these_expected_dim)
def stack_radar_fields(tuple_of_3d_matrices):
"""Stacks radar images with different field/height pairs.
:param tuple_of_3d_matrices: length-C tuple, where each item is an
E-by-M-by-N numpy array of radar images.
:return: radar_image_matrix: E-by-M-by-N-by-C numpy array of radar images.
"""
radar_image_matrix = numpy.stack(tuple_of_3d_matrices, axis=-1)
check_radar_images(
radar_image_matrix=radar_image_matrix, min_num_dimensions=4,
max_num_dimensions=4)
return radar_image_matrix
def stack_radar_heights(tuple_of_4d_matrices):
"""Stacks radar images with different heights.
:param tuple_of_4d_matrices: tuple (length H_r), where each item is a numpy
array (E x M x N x F_r) of radar images.
:return: radar_image_matrix: numpy array (E x M x N x H_r x F_r) of radar
images.
"""
radar_image_matrix = numpy.stack(tuple_of_4d_matrices, axis=-2)
check_radar_images(
radar_image_matrix=radar_image_matrix, min_num_dimensions=5,
max_num_dimensions=5)
return radar_image_matrix
def normalize_radar_images(
radar_image_matrix, field_names, normalization_type_string,
normalization_param_file_name, test_mode=False, min_normalized_value=0.,
max_normalized_value=1., normalization_table=None):
"""Normalizes radar images.
If normalization_type_string = "z", z-score normalization is done for each
field independently. Means and standard deviations are read from the
normalization file.
If normalization_type_string = "minmax", min-max normalization is done for
each field independently, using the following equations. Climatological
minima and maxima are read from the normalization file.
x_unscaled(i, j) = [x(i, j) - x_min] / [x_max - x_min]
x_scaled(i, j) = x_unscaled(i, j) * [
max_normalized_value - min_normalized_value
] + min_normalized_value
x(i, j) = original value at pixel (i, j)
x_min = climatological minimum for field x
x_max = climatological max for field x
x_unscaled(i, j) = normalized but unscaled value at pixel (i, j)
min_normalized_value: from input args
max_normalized_value: from input args
x_scaled(i, j) = normalized and scaled value at pixel (i, j)
:param radar_image_matrix: numpy array of radar images. Dimensions may be
E x M x N x C or E x M x N x H_r x F_r.
:param field_names: 1-D list with names of radar fields, in the order that
they appear in radar_image_matrix. If radar_image_matrix is
4-dimensional, field_names must have length C. If radar_image_matrix is
5-dimensional, field_names must have length F_r. Each field name must
be accepted by `radar_utils.check_field_name`.
:param normalization_type_string: Normalization type (must be accepted by
`_check_normalization_type`).
:param normalization_param_file_name: Path to file with normalization
params. Will be read by `read_normalization_params_from_file`.
:param test_mode: For testing only. Leave this alone.
:param min_normalized_value:
[used only if normalization_type_string = "minmax"]
Minimum normalized value.
:param max_normalized_value:
[used only if normalization_type_string = "minmax"]
Max normalized value.
:param normalization_table: For testing only. Leave this alone.
:return: radar_image_matrix: Normalized version of input, with the same
dimensions.
"""
error_checking.assert_is_boolean(test_mode)
if not test_mode:
normalization_table = read_normalization_params_from_file(
normalization_param_file_name)[0]
check_radar_images(
radar_image_matrix=radar_image_matrix, min_num_dimensions=4,
max_num_dimensions=5)
num_fields = radar_image_matrix.shape[-1]
error_checking.assert_is_string_list(field_names)
error_checking.assert_is_numpy_array(
numpy.array(field_names),
exact_dimensions=numpy.array([num_fields]))
_check_normalization_type(normalization_type_string)
if normalization_type_string == MINMAX_NORMALIZATION_TYPE_STRING:
error_checking.assert_is_greater(
max_normalized_value, min_normalized_value)
for j in range(num_fields):
if normalization_type_string == MINMAX_NORMALIZATION_TYPE_STRING:
this_min_value = normalization_table[
MIN_VALUE_COLUMN].loc[field_names[j]]
this_max_value = normalization_table[
MAX_VALUE_COLUMN].loc[field_names[j]]
radar_image_matrix[..., j] = (
(radar_image_matrix[..., j] - this_min_value) /
(this_max_value - this_min_value))
radar_image_matrix[..., j] = min_normalized_value + (
radar_image_matrix[..., j] *
(max_normalized_value - min_normalized_value))
else:
this_mean = normalization_table[
MEAN_VALUE_COLUMN].loc[field_names[j]]
this_standard_deviation = normalization_table[
STANDARD_DEVIATION_COLUMN].loc[field_names[j]]
radar_image_matrix[..., j] = (
(radar_image_matrix[..., j] - this_mean) /
this_standard_deviation)
return radar_image_matrix
def denormalize_radar_images(
radar_image_matrix, field_names, normalization_type_string,
normalization_param_file_name, test_mode=False, min_normalized_value=0.,
max_normalized_value=1., normalization_table=None):
"""Denormalizes radar images.
This method is the inverse of `normalize_radar_images`.
:param radar_image_matrix: See doc for `normalize_radar_images`.
:param field_names: Same.
:param normalization_type_string: Same.
:param normalization_param_file_name: Path to file with normalization
params. Will be read by `read_normalization_params_from_file`.
:param test_mode: For testing only. Leave this alone.
:param min_normalized_value: Same.
:param max_normalized_value: Same.
:param normalization_table: For testing only. Leave this alone.
:return: radar_image_matrix: Denormalized version of input, with the same
dimensions.
"""
error_checking.assert_is_boolean(test_mode)
if not test_mode:
normalization_table = read_normalization_params_from_file(
normalization_param_file_name)[0]
check_radar_images(
radar_image_matrix=radar_image_matrix, min_num_dimensions=4,
max_num_dimensions=5)
num_fields = radar_image_matrix.shape[-1]
error_checking.assert_is_string_list(field_names)
error_checking.assert_is_numpy_array(
numpy.array(field_names),
exact_dimensions=numpy.array([num_fields]))
_check_normalization_type(normalization_type_string)
if normalization_type_string == MINMAX_NORMALIZATION_TYPE_STRING:
error_checking.assert_is_greater(
max_normalized_value, min_normalized_value)
# error_checking.assert_is_geq_numpy_array(
# radar_image_matrix, min_normalized_value)
# error_checking.assert_is_leq_numpy_array(
# radar_image_matrix, max_normalized_value)
for j in range(num_fields):
if normalization_type_string == MINMAX_NORMALIZATION_TYPE_STRING:
this_min_value = normalization_table[
MIN_VALUE_COLUMN].loc[field_names[j]]
this_max_value = normalization_table[
MAX_VALUE_COLUMN].loc[field_names[j]]
radar_image_matrix[..., j] = (
(radar_image_matrix[..., j] - min_normalized_value) /
(max_normalized_value - min_normalized_value))
radar_image_matrix[..., j] = this_min_value + (
radar_image_matrix[..., j] * (this_max_value - this_min_value))
else:
this_mean = normalization_table[
MEAN_VALUE_COLUMN].loc[field_names[j]]
this_standard_deviation = normalization_table[
STANDARD_DEVIATION_COLUMN].loc[field_names[j]]
radar_image_matrix[..., j] = this_mean + (
this_standard_deviation * radar_image_matrix[..., j])
return radar_image_matrix
def mask_low_reflectivity_pixels(
radar_image_matrix_3d, field_names,
reflectivity_threshold_dbz=DEFAULT_REFL_MASK_THRESHOLD_DBZ):
"""Masks pixels with low reflectivity.
Specifically, at each pixel with low reflectivity, this method sets all
variables to zero.
:param radar_image_matrix_3d: numpy array of radar images. Dimensions must
be E x M x N x H_r x F_r.
:param field_names: List of field names (length F_r). Each field name must
be accepted by `radar_utils.check_field_name`.
:param reflectivity_threshold_dbz: Threshold used to define "low
reflectivity" (units of dBZ).
:return: radar_image_matrix_3d: Same as input, but low-reflectivity pixels
are masked.
:raises: ValueError: if `"reflectivity_dbz" not in field_names`.
"""
# TODO(thunderhoser): Maybe values shouldn't always be set to zero?
check_radar_images(
radar_image_matrix=radar_image_matrix_3d, min_num_dimensions=5,
max_num_dimensions=5)
num_fields = radar_image_matrix_3d.shape[-1]
error_checking.assert_is_string_list(field_names)
error_checking.assert_is_numpy_array(
numpy.array(field_names), exact_dimensions=numpy.array([num_fields]))
error_checking.assert_is_greater(reflectivity_threshold_dbz, 0.)
if radar_utils.REFL_NAME not in field_names:
error_string = (
'Cannot find "{0:s}" (needed to find low-reflectivity pixels) in '
'the following list:\n{1:s}'
).format(radar_utils.REFL_NAME, str(field_names))
raise ValueError(error_string)
reflectivity_index = field_names.index(radar_utils.REFL_NAME)
tuple_of_bad_indices = numpy.where(
radar_image_matrix_3d[..., reflectivity_index] <
reflectivity_threshold_dbz)
num_bad_values = len(tuple_of_bad_indices[0])
for j in range(num_fields):
these_last_indices = numpy.full(num_bad_values, j, dtype=int)
radar_image_matrix_3d[
tuple_of_bad_indices[0], tuple_of_bad_indices[1],
tuple_of_bad_indices[2], tuple_of_bad_indices[3],
these_last_indices] = 0.
return radar_image_matrix_3d
def normalize_soundings(
sounding_matrix, field_names, normalization_type_string,
normalization_param_file_name, test_mode=False, min_normalized_value=0.,
max_normalized_value=1., normalization_table=None):
"""Normalizes soundings.
This method uses the same equations as `normalize_radar_images`.
:param sounding_matrix: numpy array (E x H_s x F_s) of soundings.
:param field_names: list (length F_s) of field names, in the order that they
appear in `sounding_matrix`.
:param normalization_type_string: Normalization type (must be accepted by
`_check_normalization_type`).
:param normalization_param_file_name: Path to file with normalization
params. Will be read by `read_normalization_params_from_file`.
:param test_mode: For testing only. Leave this alone.
:param min_normalized_value:
[used only if normalization_type_string = "minmax"]
Minimum normalized value.
:param max_normalized_value:
[used only if normalization_type_string = "minmax"]
Max normalized value.
:param normalization_table: For testing only. Leave this alone.
:return: sounding_matrix: Normalized version of input, with the same
dimensions.
"""
error_checking.assert_is_boolean(test_mode)
if not test_mode:
normalization_table = read_normalization_params_from_file(
normalization_param_file_name)[2]
error_checking.assert_is_string_list(field_names)
error_checking.assert_is_numpy_array(
numpy.array(field_names), num_dimensions=1)
num_fields = len(field_names)
check_soundings(sounding_matrix=sounding_matrix, num_fields=num_fields)
_check_normalization_type(normalization_type_string)
if normalization_type_string == MINMAX_NORMALIZATION_TYPE_STRING:
error_checking.assert_is_greater(
max_normalized_value, min_normalized_value)
for j in range(num_fields):
if normalization_type_string == MINMAX_NORMALIZATION_TYPE_STRING:
this_min_value = normalization_table[
MIN_VALUE_COLUMN].loc[field_names[j]]
this_max_value = normalization_table[
MAX_VALUE_COLUMN].loc[field_names[j]]
sounding_matrix[..., j] = (
(sounding_matrix[..., j] - this_min_value) /
(this_max_value - this_min_value)
)
sounding_matrix[..., j] = min_normalized_value + (
sounding_matrix[..., j] *
(max_normalized_value - min_normalized_value)
)
else:
this_mean = normalization_table[
MEAN_VALUE_COLUMN].loc[field_names[j]]
this_standard_deviation = normalization_table[
STANDARD_DEVIATION_COLUMN
].loc[field_names[j]]
sounding_matrix[..., j] = (
(sounding_matrix[..., j] - this_mean) / this_standard_deviation
)
return sounding_matrix
def denormalize_soundings(
sounding_matrix, field_names, normalization_type_string,
normalization_param_file_name, test_mode=False, min_normalized_value=0.,
max_normalized_value=1., normalization_table=None):
"""Denormalizes soundings.
This method is the inverse of `normalize_soundings`.
:param sounding_matrix: See doc for `normalize_soundings`.
:param field_names: Same.
:param normalization_type_string: Same.
:param normalization_param_file_name: Path to file with normalization
params. Will be read by `read_normalization_params_from_file`.
:param test_mode: For testing only. Leave this alone.
:param min_normalized_value: Same.
:param max_normalized_value: Same.
:param normalization_table: For testing only. Leave this alone.
:return: sounding_matrix: Denormalized version of input, with the same
dimensions.
"""
error_checking.assert_is_boolean(test_mode)
if not test_mode:
normalization_table = read_normalization_params_from_file(
normalization_param_file_name)[2]
error_checking.assert_is_string_list(field_names)
error_checking.assert_is_numpy_array(
numpy.array(field_names), num_dimensions=1)
num_fields = len(field_names)
check_soundings(sounding_matrix=sounding_matrix, num_fields=num_fields)
_check_normalization_type(normalization_type_string)
if normalization_type_string == MINMAX_NORMALIZATION_TYPE_STRING:
error_checking.assert_is_greater(
max_normalized_value, min_normalized_value)
# error_checking.assert_is_geq_numpy_array(
# sounding_matrix, min_normalized_value)
# error_checking.assert_is_leq_numpy_array(
# sounding_matrix, max_normalized_value)
for j in range(num_fields):
if normalization_type_string == MINMAX_NORMALIZATION_TYPE_STRING:
this_min_value = normalization_table[
MIN_VALUE_COLUMN].loc[field_names[j]]
this_max_value = normalization_table[
MAX_VALUE_COLUMN].loc[field_names[j]]
sounding_matrix[..., j] = (
(sounding_matrix[..., j] - min_normalized_value) /
(max_normalized_value - min_normalized_value)
)
sounding_matrix[..., j] = this_min_value + (
sounding_matrix[..., j] * (this_max_value - this_min_value)
)
else:
this_mean = normalization_table[
MEAN_VALUE_COLUMN].loc[field_names[j]]
this_standard_deviation = normalization_table[
STANDARD_DEVIATION_COLUMN
].loc[field_names[j]]
sounding_matrix[..., j] = this_mean + (
this_standard_deviation * sounding_matrix[..., j]
)
return sounding_matrix
def soundings_to_metpy_dictionaries(
sounding_matrix, field_names, height_levels_m_agl=None,
storm_elevations_m_asl=None):
"""Converts soundings to format required by MetPy.
If `sounding_matrix` contains pressures, `height_levels_m_agl` and
`storm_elevations_m_asl` will not be used.
Otherwise, `height_levels_m_agl` and `storm_elevations_m_asl` will be used
to estimate the pressure levels for each sounding.
:param sounding_matrix: numpy array (E x H_s x F_s) of soundings.
:param field_names: list (length F_s) of field names, in the order that they
appear in `sounding_matrix`.
:param height_levels_m_agl: numpy array (length H_s) of height levels
(metres above ground level), in the order that they appear in
`sounding_matrix`.
:param storm_elevations_m_asl: length-E numpy array of storm elevations
(metres above sea level).
:return: list_of_metpy_dictionaries: length-E list of dictionaries. The
format of each dictionary is described in the input doc for
`sounding_plotting.plot_sounding`.
"""
error_checking.assert_is_string_list(field_names)
error_checking.assert_is_numpy_array(
numpy.array(field_names), num_dimensions=1)
check_soundings(
sounding_matrix=sounding_matrix, num_fields=len(field_names))
try:
pressure_index = field_names.index(soundings.PRESSURE_NAME)
pressure_matrix_pascals = sounding_matrix[..., pressure_index]
except ValueError:
error_checking.assert_is_geq_numpy_array(height_levels_m_agl, 0)
error_checking.assert_is_numpy_array(
height_levels_m_agl, num_dimensions=1)
error_checking.assert_is_numpy_array_without_nan(storm_elevations_m_asl)
error_checking.assert_is_numpy_array(
storm_elevations_m_asl, num_dimensions=1)
num_height_levels = len(height_levels_m_agl)
num_examples = len(storm_elevations_m_asl)
check_soundings(
sounding_matrix=sounding_matrix, num_examples=num_examples,
num_height_levels=num_height_levels)
height_matrix_m_asl = numpy.full(
(num_examples, num_height_levels), numpy.nan
)
for i in range(num_examples):
height_matrix_m_asl[i, ...] = (
height_levels_m_agl + storm_elevations_m_asl[i]
)
pressure_matrix_pascals = standard_atmo.height_to_pressure(
height_matrix_m_asl)
try:
temperature_index = field_names.index(soundings.TEMPERATURE_NAME)
temperature_matrix_kelvins = sounding_matrix[..., temperature_index]
except ValueError:
virtual_pot_temp_index = field_names.index(
soundings.VIRTUAL_POTENTIAL_TEMPERATURE_NAME)
temperature_matrix_kelvins = (
temperature_conversions.temperatures_from_potential_temperatures(
potential_temperatures_kelvins=sounding_matrix[
..., virtual_pot_temp_index],
total_pressures_pascals=pressure_matrix_pascals)
)
try:
relative_humidity_index = field_names.index(
soundings.RELATIVE_HUMIDITY_NAME
)
dewpoint_matrix_kelvins = (
moisture_conversions.relative_humidity_to_dewpoint(
relative_humidities=
sounding_matrix[..., relative_humidity_index],
temperatures_kelvins=temperature_matrix_kelvins,
total_pressures_pascals=pressure_matrix_pascals
)
)
except ValueError:
specific_humidity_index = field_names.index(
soundings.SPECIFIC_HUMIDITY_NAME
)
dewpoint_matrix_kelvins = (
moisture_conversions.specific_humidity_to_dewpoint(
specific_humidities_kg_kg01=
sounding_matrix[..., specific_humidity_index],
temperatures_kelvins=temperature_matrix_kelvins,
total_pressures_pascals=pressure_matrix_pascals
)
)
temperature_matrix_celsius = temperature_conversions.kelvins_to_celsius(
temperature_matrix_kelvins)
dewpoint_matrix_celsius = temperature_conversions.kelvins_to_celsius(
dewpoint_matrix_kelvins)
try:
u_wind_index = field_names.index(soundings.U_WIND_NAME)
v_wind_index = field_names.index(soundings.V_WIND_NAME)
include_wind = True
except ValueError:
include_wind = False
num_examples = sounding_matrix.shape[0]
list_of_metpy_dictionaries = [None] * num_examples
for i in range(num_examples):
list_of_metpy_dictionaries[i] = {
soundings.PRESSURE_COLUMN_METPY:
pressure_matrix_pascals[i, :] * PASCALS_TO_MB,
soundings.TEMPERATURE_COLUMN_METPY:
temperature_matrix_celsius[i, :],
soundings.DEWPOINT_COLUMN_METPY: dewpoint_matrix_celsius[i, :],
}
if include_wind:
list_of_metpy_dictionaries[i].update({
soundings.U_WIND_COLUMN_METPY:
(sounding_matrix[i, ..., u_wind_index] *
METRES_PER_SECOND_TO_KT),
soundings.V_WIND_COLUMN_METPY:
(sounding_matrix[i, ..., v_wind_index] *
METRES_PER_SECOND_TO_KT)
})
return list_of_metpy_dictionaries
def sample_by_class(
sampling_fraction_by_class_dict, target_name, target_values,
num_examples_total, test_mode=False):
"""Randomly draws examples, respecting the fraction for each target class.
In other words, this method allows "oversampling" and "undersampling" of
different classes.
:param sampling_fraction_by_class_dict: See doc for `check_class_fractions`.
:param target_name: Same.
:param target_values: See doc for `check_target_array` (format 1).
:param num_examples_total: See doc for `class_fractions_to_num_examples`.
:param test_mode: Leave this argument alone.
:return: indices_to_keep: 1-D numpy array with indices of examples to keep.
These are array indices into `target_values`.
"""
num_desired_examples_by_class_dict = class_fractions_to_num_examples(
sampling_fraction_by_class_dict=sampling_fraction_by_class_dict,
target_name=target_name, num_examples_total=num_examples_total)
num_classes = target_val_utils.target_name_to_num_classes(
target_name=target_name, include_dead_storms=False)
check_target_array(
target_array=target_values, num_dimensions=1, num_classes=num_classes)
indices_to_keep_by_class_dict = {}
num_desired_examples_by_extended_class = []
num_avail_examples_by_extended_class = []
class_keys = list(num_desired_examples_by_class_dict.keys())
for this_key in class_keys:
num_desired_examples_by_extended_class.append(
num_desired_examples_by_class_dict[this_key]
)
these_indices = numpy.where(target_values == this_key)[0]
num_avail_examples_by_extended_class.append(len(these_indices))
if (num_desired_examples_by_extended_class[-1] > 0 and
num_avail_examples_by_extended_class[-1] == 0):
return None
indices_to_keep_by_class_dict.update({this_key: these_indices})
num_desired_examples_by_extended_class = numpy.array(
num_desired_examples_by_extended_class)
num_avail_examples_by_extended_class = numpy.array(
num_avail_examples_by_extended_class)
if numpy.any(num_avail_examples_by_extended_class <
num_desired_examples_by_extended_class):
avail_to_desired_ratio_by_extended_class = (
num_avail_examples_by_extended_class.astype(float) /
num_desired_examples_by_extended_class)
num_examples_total = int(numpy.floor(
num_examples_total *
numpy.min(avail_to_desired_ratio_by_extended_class)))
num_desired_examples_by_class_dict = class_fractions_to_num_examples(
sampling_fraction_by_class_dict=sampling_fraction_by_class_dict,
target_name=target_name, num_examples_total=num_examples_total)
indices_to_keep = numpy.array([], dtype=int)
class_keys = list(indices_to_keep_by_class_dict.keys())
for this_key in class_keys:
if not test_mode:
numpy.random.shuffle(indices_to_keep_by_class_dict[this_key])
this_num_examples = num_desired_examples_by_class_dict[this_key]
indices_to_keep = numpy.concatenate((
indices_to_keep,
indices_to_keep_by_class_dict[this_key][:this_num_examples]
))
return indices_to_keep
def write_normalization_params(
pickle_file_name, radar_table_no_height, radar_table_with_height,
sounding_table_no_height, sounding_table_with_height):
"""Writes normalization parameters to Pickle file.
:param pickle_file_name: Path to output file.
:param radar_table_no_height: Single-indexed pandas DataFrame. Each index
is a field name (accepted by `radar_utils.check_field_name`). Must
contain the following columns.
radar_table_no_height.mean_value: Mean value for the given field.
radar_table_no_height.standard_deviation: Standard deviation.
radar_table_no_height.min_value: Minimum value.
radar_table_no_height.max_value: Max value.
:param radar_table_with_height: Double-indexed pandas DataFrame. Each index
is a tuple with (field_name, height_m_agl), where `field_name` is
accepted by `radar_utils.check_field_name` and `height_m_agl` is in
metres above ground level. Must contain the following columns.
radar_table_with_height.mean_value: Mean value for the given field.
radar_table_with_height.standard_deviation: Standard deviation.
:param sounding_table_no_height: Single-indexed pandas DataFrame. Each
index is a field name (accepted by `soundings.check_field_name`).
Columns should be the same as in `radar_table_no_height`.
:param sounding_table_with_height: Double-indexed pandas DataFrame. Each
index is a tuple with (field_name, height_m_agl), where `field_name` is
accepted by `soundings.check_field_name` and `height_m_agl` is in metres
above ground level. Columns should be the same as in
`radar_table_with_height`.
"""
# TODO(thunderhoser): Move this to normalization.py or something.
error_checking.assert_columns_in_dataframe(
radar_table_no_height, NORMALIZATION_COLUMNS_NO_HEIGHT)
error_checking.assert_columns_in_dataframe(
radar_table_with_height, NORMALIZATION_COLUMNS_WITH_HEIGHT)
error_checking.assert_columns_in_dataframe(
sounding_table_no_height, NORMALIZATION_COLUMNS_NO_HEIGHT)
error_checking.assert_columns_in_dataframe(
sounding_table_with_height, NORMALIZATION_COLUMNS_WITH_HEIGHT)
file_system_utils.mkdir_recursive_if_necessary(file_name=pickle_file_name)
pickle_file_handle = open(pickle_file_name, 'wb')
pickle.dump(radar_table_no_height, pickle_file_handle)
pickle.dump(radar_table_with_height, pickle_file_handle)
pickle.dump(sounding_table_no_height, pickle_file_handle)
pickle.dump(sounding_table_with_height, pickle_file_handle)
pickle_file_handle.close()
def read_normalization_params_from_file(pickle_file_name):
"""Reads normalization parameters from Pickle file.
:param pickle_file_name: Path to input file.
:return: radar_table_no_height: See doc for `write_normalization_params`.
:return: radar_table_with_height: Same.
:return: sounding_table_no_height: Same.
:return: sounding_table_with_height: Same.
"""
if not os.path.isfile(pickle_file_name):
pickle_file_name = pickle_file_name.replace(
'/condo/swatwork/ralager', '/scratch/ralager')
if not os.path.isfile(pickle_file_name):
pickle_file_name = pickle_file_name.replace(
'/scratch/ralager', '/glade/scratch/ryanlage')
if not os.path.isfile(pickle_file_name):
pickle_file_name = pickle_file_name.replace(
'/glade/scratch/ryanlage', '/glade/work/ryanlage')
if not os.path.isfile(pickle_file_name):
pickle_file_name = pickle_file_name.replace(
'/glade/work/ryanlage', '/condo/swatwork/ralager')
if not os.path.isfile(pickle_file_name):
pickle_file_name = pickle_file_name.replace(
'/condo/swatwork/ralager', '/condo/swatcommon/common')
if not os.path.isfile(pickle_file_name):
pickle_file_name = pickle_file_name.replace(
'/condo/swatwork/ralager', '/scratch/ralager')
# TODO(thunderhoser): Move this to normalization.py or something.
pickle_file_handle = open(pickle_file_name, 'rb')
radar_table_no_height = pickle.load(pickle_file_handle)
radar_table_with_height = pickle.load(pickle_file_handle)
sounding_table_no_height = pickle.load(pickle_file_handle)
sounding_table_with_height = pickle.load(pickle_file_handle)
pickle_file_handle.close()
error_checking.assert_columns_in_dataframe(
radar_table_no_height, NORMALIZATION_COLUMNS_NO_HEIGHT)
error_checking.assert_columns_in_dataframe(
radar_table_with_height, NORMALIZATION_COLUMNS_WITH_HEIGHT)
error_checking.assert_columns_in_dataframe(
sounding_table_no_height, NORMALIZATION_COLUMNS_NO_HEIGHT)
error_checking.assert_columns_in_dataframe(
sounding_table_with_height, NORMALIZATION_COLUMNS_WITH_HEIGHT)
return (radar_table_no_height, radar_table_with_height,
sounding_table_no_height, sounding_table_with_height)
|
{"hexsha": "af731b4838f3ea3d542e20d80ee02c8cbf06fb52", "size": 43975, "ext": "py", "lang": "Python", "max_stars_repo_path": "gewittergefahr/deep_learning/deep_learning_utils.py", "max_stars_repo_name": "dopplerchase/GewitterGefahr", "max_stars_repo_head_hexsha": "4415b08dd64f37eba5b1b9e8cc5aa9af24f96593", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2018-10-04T01:07:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T08:49:32.000Z", "max_issues_repo_path": "gewittergefahr/deep_learning/deep_learning_utils.py", "max_issues_repo_name": "liuximarcus/GewitterGefahr", "max_issues_repo_head_hexsha": "d819874d616f98a25187bfd3091073a2e6d5279e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-12-25T02:01:08.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-19T01:54:21.000Z", "max_forks_repo_path": "gewittergefahr/deep_learning/deep_learning_utils.py", "max_forks_repo_name": "liuximarcus/GewitterGefahr", "max_forks_repo_head_hexsha": "d819874d616f98a25187bfd3091073a2e6d5279e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2017-12-10T23:05:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T08:49:33.000Z", "avg_line_length": 40.1231751825, "max_line_length": 80, "alphanum_fraction": 0.7191586128, "include": true, "reason": "import numpy", "num_tokens": 9534}
|
##### Beginning of file
function _is_filesystem_root(path::AbstractString)::Bool
path::String = abspath(strip(path))
if path == dirname(path)
return true
else
return false
end
end
function _is_package_directory(path::AbstractString)::Bool
path::String = abspath(strip(path))
if isfile(joinpath(path, "Project.toml"))
return true
else
return false
end
end
function _find_package_directory(path::AbstractString)::String
path::String = abspath(strip(path))
if _is_package_directory(path)
return path
elseif _is_filesystem_root(path)
error(string("Could not find the Project.toml file"))
else
result = _find_package_directory(dirname(path))
return result
end
end
"""
package_directory()::String
Return the Snapshots package directory.
"""
function package_directory()::String
result::String = _find_package_directory(abspath(strip(@__FILE__)))
return result
end
function _location(m::Method)::String
result::String = abspath(first(functionloc(m)))
return result
end
function _location(f::Function)::String
result::String = abspath(first(functionloc(f)))
return result
end
function _location(f::Function, types::Tuple)::String
result::String = abspath(first(functionloc(f, types)))
return result
end
function _location(m::Module)::String
result::String = abspath(_location(getfield(m, :eval)))
return result
end
"""
package_directory(parts...)::String
Equivalent to `abspath(joinpath(abspath(package_directory()), parts...))`.
"""
function package_directory(parts...)::String
result::String = abspath(joinpath(abspath(package_directory()), parts...))
return result
end
"""
package_directory(m::Method)::String
If method `m`
is part of a Julia package, returns the package root directory.
If method `m`
is not part of a Julia package, throws an error.
"""
function package_directory(m::Method)::String
m_module_directory::String = abspath(_location(m))
m_package_directory::String = abspath(
_find_package_directory(m_module_directory)
)
return m_package_directory
end
"""
package_directory(m::Method, parts...)::String
Equivalent to
`result = abspath(joinpath(abspath(package_directory(m)), parts...))`.
"""
function package_directory(m::Method, parts...)::String
result::String = abspath(joinpath(abspath(package_directory(m)), parts...))
return result
end
"""
package_directory(f::Function)::String
If function `f`
is part of a Julia package, returns the package root directory.
If function `f`
is not part of a Julia package, throws an error.
"""
function package_directory(f::Function)::String
m_module_directory::String = abspath(_location(f))
m_package_directory::String = abspath(
_find_package_directory(m_module_directory)
)
return m_package_directory
end
"""
package_directory(f::Function, parts...)::String
Equivalent to
`result = abspath(joinpath(abspath(package_directory(f)), parts...))`.
"""
function package_directory(f::Function, parts...)::String
result::String = abspath(joinpath(abspath(package_directory(f)), parts...))
return result
end
"""
package_directory(f::Function, types::Tuple)::String
If function `f` with type signature `types`
is part of a Julia package, returns the package root directory.
If function `f` with type signature `types`
is not part of a Julia package, throws an error.
"""
function package_directory(f::Function, types::Tuple)::String
m_module_directory::String = abspath(_location(f, types))
m_package_directory::String = abspath(
_find_package_directory(m_module_directory)
)
return m_package_directory
end
"""
package_directory(f::Function, types::Tuple, parts...)::String
Equivalent to
`result = abspath(joinpath(abspath(package_directory(f, types)), parts...))`.
"""
function package_directory(f::Function, types::Tuple, parts...)::String
result::String = abspath(joinpath(abspath(package_directory(f, types)), parts...))
return result
end
"""
package_directory(m::Module)::String
If module `m`
is part of a Julia package, returns the package root directory.
If module `m`
is not part of a Julia package, throws an error.
"""
function package_directory(m::Module)::String
m_module_directory::String = abspath(_location(m))
m_package_directory::String = abspath(
_find_package_directory(m_module_directory)
)
return m_package_directory
end
"""
package_directory(m::Module, parts...)::String
Equivalent to
`result = abspath(joinpath(abspath(package_directory(m)), parts...))`.
"""
function package_directory(m::Module, parts...)::String
result::String = abspath(joinpath(abspath(package_directory(m)), parts...))
return result
end
##### End of file
|
{"hexsha": "233b515cac8dd06b1fa786482809fd550355eb0e", "size": 4876, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/package_directory.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Snapshots.jl-44eb87bc-f37b-45e8-9f53-3bcb453a652d", "max_stars_repo_head_hexsha": "7d31297350f9ad4af022d8735c19a78335bed917", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/package_directory.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Snapshots.jl-44eb87bc-f37b-45e8-9f53-3bcb453a652d", "max_issues_repo_head_hexsha": "7d31297350f9ad4af022d8735c19a78335bed917", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/package_directory.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Snapshots.jl-44eb87bc-f37b-45e8-9f53-3bcb453a652d", "max_forks_repo_head_hexsha": "7d31297350f9ad4af022d8735c19a78335bed917", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0748663102, "max_line_length": 86, "alphanum_fraction": 0.7120590648, "num_tokens": 1108}
|
function DEM_demo_fMRI_HMM
% Demonstration of Hidden Markov models for fMRI
%__________________________________________________________________________
% This demonstration routine illustrates the modelling of state
% transitions generating resting state fMRI timeseries. The hidden states
% are modelled as a hidden Markov model, where each state corresponds to a
% particular point in the parameter space of effective connectivity. This
% effective connectivity then generates complex cross spectral data
% features of the observed timeseries. Model specification requires prior
% constraints on the probability transition matrix among hidden states,
% which implicitly specifies the number of hidden states. The user also
% has to specify the number of windows for epochs to apply to the
% timeseries, where each epoch places a lower bound on the duration of
% each (discrete) state.
% We first generate synthetic data using regular transitions among
% three hidden states (C.F., a discrete version of a heteroclinic
% cycle for orbit). The data are then converted by a routine that
% combines a parametric empirical Bayesian model and a hidden Markov model
% (as implemented as a special case of a Markov decision process). This
% inversion is repeated for each model specified in terms of the
% transition matrices (as prior Dirichlet concentration parameters).
% Setting a prior transition parameter to 0 precludes that transition. In
% this way, several different models of transitions and number of hidden
% states can be scored in terms of the variational free energy.
% Following inversion, the results are plotted in terms of expected
% state transitions, fluctuations in connections that are allowed to
% change (specified in the usual way by DCM.b), the deviations in
% connectivity associated with each hidden state and the expected
% probability transition matrix.
% Finally, we consider Bayesian model comparison in terms of group
% differences (here, simply the difference between the first and second
% simulated subject). Bayesian model comparison is simple to do in this
% context by comparing the free energy of a hidden Markov model in which
% both groups share the same state dependent connections and transition
% probabilities, with two independent models. These can be evaluated
% efficiently using Bayesian model reduction implicit in PEB. in this
% example, we did not introduce any differences between the two groups
% (i.e., subjects) and therefore expected to infer no group effect.
%__________________________________________________________________________
% Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging
% Karl Friston
% $Id: DEM_demo_fMRI_HMM.m 7679 2019-10-24 15:54:07Z spm $
% (I) Simulate fMRI timeseries
%==========================================================================
rng('default')
% Assume we have P sessions with N epochs of T scans with a TR of 2 secs:
% -------------------------------------------------------------------------
% These epochs could be from a single subject or result from the
% concatenation of multiple sessions (under the assumption that they share
% the same modes of connectivity).
% -------------------------------------------------------------------------
P = 2; % number of sessions (e.g., subjects)
S = 3; % number of latent (hidden) states
N = 9; % number of epochs or windows
T = 128; % number of observations (per epoch)
TR = 2; % repetition time or timing
t = (1:(T*N))*TR; % observation times (seconds)
n = 3; % number of regions or nodes
% setup model for generating timeseries
% -------------------------------------------------------------------------
options.nonlinear = 0;
options.two_state = 0;
options.stochastic = 0;
options.induced = 1;
% get priors to generate simulated data
% -------------------------------------------------------------------------
a = ones(n,n);
b = zeros(n,n,0);
c = zeros(n,n);
d = zeros(n,n,0);
pP = spm_dcm_fmri_priors(a,b,c,d,options);
% average parameters - a simple hierarchy of three nodes
% -------------------------------------------------------------------------
pP.A = [ 0 -.3 0;
.4 0 -.1;
0 .3 0];
pP.C = eye(n,n);
pP.transit = randn(n,1)/64;
% generate spectral density of neuronal fluctuations and observation noise
% -------------------------------------------------------------------------
[Gu,Gn,Hz,dt] = spm_csd_fmri_gu(pP,TR);
Gu = Gu(:,1,1)*ones(1,n);
Gn = Gn(:,1,1)*ones(1,n);
% specify and generate Markovian succession of hidden states
%==========================================================================
% connections associated with hidden states: here, intrinsic connectivity
% -------------------------------------------------------------------------
B = zeros(n,n,S);
B(1,1,1) = 1/2;
B(2,2,2) = 1/2;
B(3,3,3) = 1/2;
pP.B = any(B,3); % state-dependent connections
% generate sequence of hidden states: here, a simple orbit
% -------------------------------------------------------------------------
bb = spm_speye(S,S,-1); bb(1,S) = 1;
o = kron(ones(1,N),1:S);
o = o(1:N);
% State-dependent deviations in connectivity from average
% -------------------------------------------------------------------------
for i = 1:N
tB(:,:,i) = B(:,:,o(i));
end
for i = 1:S
for j = 1:S
tB(i,j,:) = tB(i,j,:) - mean(tB(i,j,:));
end
end
%% simulate epoch-specific responses to endogenous fluctuations
%==========================================================================
M.x = sparse(n,5);
M.f = 'spm_fx_fmri';
X = cell(N,1);
Y = cell(N,1);
E = cell(N,1);
u = cell(N,1);
for p = 1:P
% parameters for this epoch, plus a small random effect
% ---------------------------------------------------------------------
gu = spm_rand_power_law(Gu,Hz,dt,N*T);
ge = spm_rand_power_law(Gn,Hz,dt,N*T);
for s = 1:N
% parameters for this epoch, plus a small random effect
% -----------------------------------------------------------------
tP = pP;
tP.A = tP.A + tB(:,:,s);
tP.A = tP.A.*(1 + randn(n,n)/64);
tP.C = eye(n,n);
% integrate states with endogenous fluctuations (gu)
% -----------------------------------------------------------------
j = (1:T) + (s - 1)*T;
M.f = 'spm_fx_fmri';
U.u = gu(j,:);
U.dt = TR;
x = spm_int_J(tP,M,U);
M.x = spm_unvec(x(end,:),M.x);
% haemodynamic observer function to produce BOLD signal
% -----------------------------------------------------------------
for i = 1:T
y(i,:) = spm_gx_fmri(spm_unvec(x(i,:),M.x),[],tP)';
end
% response with observation noise (ge)
% -----------------------------------------------------------------
e = ge(j,:);
X{s} = x;
Y{s} = y + e;
E{s} = e;
u{s} = U.u;
TP(s,p) = tP;
end
% concatenate epochs into a single timeseries
%----------------------------------------------------------------------
xY.dt = TR;
xY.y = spm_cat(Y);
xY.u = spm_cat(u);
xY.X = spm_cat(X);
xY.E = spm_cat(E);
% and create DCM cell array
%----------------------------------------------------------------------
DCM{1,p}.options = options;
DCM{1,p}.a = logical(pP.A);
DCM{1,p}.b = logical(pP.B);
DCM{1,p}.c = zeros(n,0);
DCM{1,p}.d = zeros(n,n,0);
DCM{1,p}.Y = xY;
end
%% show simulated responses and windows
%--------------------------------------------------------------------------
spm_figure('Getwin','Figure 1'); clf
subplot(3,2,1), plot(t,xY.u)
title('Endogenous fluctuations','FontSize',16)
xlabel('Time (seconds)'), ylabel('Amplitude'), axis square, spm_axis tight
subplot(3,2,2), hold off
plot(t,xY.X(:,(n + 1):end),'c'), hold on
plot(t,xY.X(:,1:n)), hold off
title('Hidden states','FontSize',16)
xlabel('Time (seconds)'), ylabel('Amplitude'), axis square, spm_axis tight
subplot(3,2,3)
plot(t,xY.y,t,xY.E,':')
title('Hemodynamic response and noise','FontSize',16)
xlabel('Time (seconds)'), ylabel('Amplitude'), axis square, spm_axis tight
% This completes the simulation of the data. We now turn to inverting the
% data to see if one can recover the number of hidden states, the form of
% the state transitions and the connectivity modes associated with each
% state:
%--------------------------------------------------------------------------
% (II) Inversion under a hidden Markov model
%==========================================================================
% Specify model space as a cell array of probability transition matrices:
% here, the model space at the level of the HMM will allow all
% transitions among one to 4 hidden states. These models are specified in
% terms of Dirichlet priors; starting with a small value of allowable
% transitions (1/16)
%--------------------------------------------------------------------------
for i = 1:4
b{i} = ones(i,i)/16;
end
% invert hidden Markov model: this is the routine demonstrated
%--------------------------------------------------------------------------
[HMM,CSD] = spm_dcm_HMM(DCM,N,b);
% This completes the inversion. We now just need to look at the results:
%--------------------------------------------------------------------------
%% (III) report analysis
%==========================================================================
spm_figure('Getwin','Figure 1');
% plot windows
% -------------------------------------------------------------------------
subplot(3,2,3), hold on
for i = 1:N, plot(t,CSD{i,end}.W - 1), end, hold off
% show estimates for a single session
% -------------------------------------------------------------------------
subplot(3,2,4)
spm_plot_ci(CSD{end}.Ep,CSD{end}.Cp), hold on
bar(TP(end).A(:),1/4), hold off, axis square
title('True and MAP connections (Deterministic)','FontSize',16)
% show state-dependent changes in connectivity over sessions
% -------------------------------------------------------------------------
for i = 1:numel(CSD)
tp(i,:) = spm_vec(TP(i).A);
qp(i,:) = spm_vec(CSD{i}.Ep.A);
pp(i,:) = spm_vec(HMM(S).Ep{i}.A);
end
subplot(3,3,7); imagesc(tp)
title('True connections','FontSize',16), axis square
subplot(3,3,8); imagesc(qp)
title('MAP estimates', 'FontSize',16), axis square
subplot(3,3,9); imagesc(pp)
title('PEB estimates', 'FontSize',16), axis square
% report hidden Markov model
%==========================================================================
spm_dcm_HMM_plot(HMM,S)
% And overlay true values, as cyan dots
%==========================================================================
% true state transitions
%--------------------------------------------------------------------------
x = sparse(o,1:N,1,S,N);
x = kron(ones(1,P),x);
N = size(x,2);
% associate true and discovered states - and reorder
%--------------------------------------------------------------------------
r = x*HMM(S).X';
j = zeros(S,1);
for i = 1:S
[d,m] = max(r(:,i));
j(i) = m;
r(m,:) = 0;
end
[o,i] = find(x(j,:));
B = B(:,:,j);
bb = bb(j,j);
% superimpose true values
%--------------------------------------------------------------------------
spm_figure('Getwin','HMM')
% hidden states
%--------------------------------------------------------------------------
subplot(4,1,1), hold on
for i = 1:N, plot(i,o(i),'.c','MarkerSize',32), end, hold off
% state-dependent parameters - fluctuations
%--------------------------------------------------------------------------
subplot(4,1,2), hold on
for i = 1:N
[j,k] = max(spm_vec(TP(i).A - pP.A));
plot(i,k,'.c','MarkerSize',32)
end, hold off
subplot(4,1,3), hold on
for i = 1:N, pA(:,i) = spm_vec(TP(i).A); end
plot(1:N,pA(HMM(S).iP,:),'-.'), hold off, spm_axis tight
% state-dependent parameters - expectations
%--------------------------------------------------------------------------
subplot(4,2,7), hold on
for i = 1:S
c = spm_vec(B(:,:,i));
[j,k] = max(c(HMM(S).iP));
plot(i,k,'.c','MarkerSize',32)
end, hold off
% expected transition probabilities
%--------------------------------------------------------------------------
subplot(4,2,8), hold on
for i = 1:S, [j,k] = max(bb(:,i)); plot(i,k,'.c','MarkerSize',32), end
hold off
%% Bayesian model comparison in terms of group (i.e., subject) differences
%==========================================================================
% model as a single group or two separate groups
%--------------------------------------------------------------------------
hmm0 = HMM(S);
hmm1 = spm_dcm_HMM(CSD(:,1),b(S));
hmm2 = spm_dcm_HMM(CSD(:,2),b(S));
% compare the free energy of the combined groups with the combined
% free energy:
%--------------------------------------------------------------------------
F = [hmm0.F; hmm1.F + hmm2.F];
F = F - min(F);
% report model comparison in terms of free energy (i.e., log evidence)
%--------------------------------------------------------------------------
spm_figure('Getwin','HMM-F')
subplot(2,2,3)
bar(F,'c'), title('Group difference','FontSize',16)
xlabel('Effect'), ylabel('Log evidence'), axis square
set(gca,'XTickLabel',{'None','Effect'})
% and show the independent maximum a posteriori estimates of state
% dependent connectivity
%--------------------------------------------------------------------------
subplot(4,2,6)
bar(hmm1.qP), title('Group 1','FontSize',16)
xlabel('Parameter'), ylabel('Connectivity (log)'), axis square
subplot(4,2,8)
bar(hmm2.qP), title('Group 2','FontSize',16)
xlabel('Parameter'), ylabel('Connectivity (log)'), axis square
return
|
{"author": "spm", "repo": "spm12", "sha": "3085dac00ac804adb190a7e82c6ef11866c8af02", "save_path": "github-repos/MATLAB/spm-spm12", "path": "github-repos/MATLAB/spm-spm12/spm12-3085dac00ac804adb190a7e82c6ef11866c8af02/toolbox/DEM/DEM_demo_fMRI_HMM.m"}
|
import os
import argparse
import time
import numpy as np
import pickle
import torch
from torch.autograd import Variable
from PIL import Image
from yolov2 import Yolov2
from dataset.factory import get_imdb
from dataset.roidb import RoiDataset
from yolo_eval import yolo_eval
from util.visualize import draw_detection_boxes
import matplotlib.pyplot as plt
from util.network import WeightLoader
from torch.utils.data import DataLoader
#from config import config as cfg
import config as cfg
def parse_args():
parser = argparse.ArgumentParser('Yolo v2')
parser.add_argument('--dataset', dest='dataset',
default='voc07test', type=str)
parser.add_argument('--output_dir', dest='output_dir',
default='output', type=str)
parser.add_argument('--model_name', dest='model_name',
default='yolov2_epoch_160', type=str)
parser.add_argument('--nw', dest='num_workers',
help='number of workers to load training data',
default=1, type=int)
parser.add_argument('--bs', dest='batch_size',
default=2, type=int)
parser.add_argument('--cuda', dest='use_cuda',
default=False, type=bool)
parser.add_argument('--vis', dest='vis',
default=False, type=bool)
args = parser.parse_args()
return args
def prepare_im_data(img):
"""
Prepare image data that will be feed to network.
Arguments:
img -- PIL.Image object
Returns:
im_data -- tensor of shape (3, H, W).
im_info -- dictionary {height, width}
"""
im_info = dict()
im_info['width'], im_info['height'] = img.size
# resize the image
H, W = cfg.input_size
im_data = img.resize((H, W))
# to torch tensor
im_data = torch.from_numpy(np.array(im_data)).float() / 255
im_data = im_data.permute(2, 0, 1).unsqueeze(0)
return im_data, im_info
def test():
args = parse_args()
args.conf_thresh = 0.005
args.nms_thresh = 0.45
if args.vis:
args.conf_thresh = 0.5
print('Called with args:')
print(args)
# prepare dataset
if args.dataset == 'voc07trainval':
args.imdbval_name = 'voc_2007_trainval'
elif args.dataset == 'voc07test':
args.imdbval_name = 'voc_2007_test'
else:
raise NotImplementedError
val_imdb = get_imdb(args.imdbval_name)
val_dataset = RoiDataset(val_imdb, train=False)
val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
# load model
model = Yolov2()
# weight_loader = WeightLoader()
# weight_loader.load(model, 'yolo-voc.weights')
# print('loaded')
model_path = os.path.join(args.output_dir, args.model_name+'.pth')
print('loading model from {}'.format(model_path))
if torch.cuda.is_available():
checkpoint = torch.load(model_path)
else:
checkpoint = torch.load(model_path, map_location='cpu')
model.load_state_dict(checkpoint['model'])
if args.use_cuda:
model.cuda()
model.eval()
print('model loaded')
dataset_size = len(val_imdb.image_index)
all_boxes = [[[] for _ in range(dataset_size)] for _ in range(val_imdb.num_classes)]
det_file = os.path.join(args.output_dir, 'detections.pkl')
img_id = -1
with torch.no_grad():
for batch, (im_data, im_infos) in enumerate(val_dataloader):
if args.use_cuda:
im_data_variable = Variable(im_data).cuda()
else:
im_data_variable = Variable(im_data)
yolo_outputs = model(im_data_variable)
for i in range(im_data.size(0)):
img_id += 1
output = [item[i].data for item in yolo_outputs]
im_info = {'width': im_infos[i][0], 'height': im_infos[i][1]}
detections = yolo_eval(output, im_info, conf_threshold=args.conf_thresh,
nms_threshold=args.nms_thresh)
print('im detect [{}/{}]'.format(img_id+1, len(val_dataset)))
if len(detections) > 0:
for cls in range(val_imdb.num_classes):
inds = torch.nonzero(detections[:, -1] == cls).view(-1)
if inds.numel() > 0:
cls_det = torch.zeros((inds.numel(), 5))
cls_det[:, :4] = detections[inds, :4]
cls_det[:, 4] = detections[inds, 4] * detections[inds, 5]
all_boxes[cls][img_id] = cls_det.cpu().numpy()
if args.vis:
img = Image.open(val_imdb.image_path_at(img_id))
if len(detections) == 0:
continue
det_boxes = detections[:, :5].cpu().numpy()
det_classes = detections[:, -1].long().cpu().numpy()
im2show = draw_detection_boxes(img, det_boxes, det_classes, class_names=val_imdb.classes)
plt.figure()
plt.imshow(im2show)
plt.show()
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
val_imdb.evaluate_detections(all_boxes, output_dir=args.output_dir)
if __name__ == '__main__':
test()
|
{"hexsha": "0c8706c654e9383989e0c18406a17c8f85deff78", "size": 5405, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "minji-o-j/YOLO", "max_stars_repo_head_hexsha": "5f2d12a80879c80d4b04b4b9acd937c290d0fbd8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-26T12:25:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-26T12:25:17.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "minji-o-j/YOLOv2", "max_issues_repo_head_hexsha": "5f2d12a80879c80d4b04b4b9acd937c290d0fbd8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2020-10-22T11:56:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-23T15:25:39.000Z", "max_forks_repo_path": "test.py", "max_forks_repo_name": "minji-o-j/YOLO", "max_forks_repo_head_hexsha": "5f2d12a80879c80d4b04b4b9acd937c290d0fbd8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.861878453, "max_line_length": 109, "alphanum_fraction": 0.5922294172, "include": true, "reason": "import numpy", "num_tokens": 1240}
|
// Copyright Ricardo Calheiros de Miranda Cosme 2018.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#pragma once
#include <boost/fusion/include/as_vector.hpp>
#include <boost/mpl/vector.hpp>
#include <occi.h>
#include <string>
namespace picapau { namespace oracle { namespace core {
//RAII for ::oracle::occi::ResultSet
//This type isn't Copyable but it is Movable
template<typename... Columns>
struct result_set
{
using columns_t = boost::mpl::vector<Columns...>;
using tuple_t = typename boost::fusion::result_of::as_vector<
columns_t>::type;
using expected_tuple_t = boost::expected<tuple_t, std::string>;
result_set() = default;
result_set(::oracle::occi::Statement& stmt,
::oracle::occi::ResultSet& rs)
: _stmt(&stmt)
, _rs(&rs)
{}
result_set(result_set&& rhs) noexcept
: _stmt(rhs._stmt)
, _rs(rhs._rs)
{
rhs._stmt = nullptr;
rhs._rs = nullptr;
}
result_set& operator=(result_set&& rhs) noexcept
{
_stmt = rhs._stmt;
_rs = rhs._rs;
rhs._stmt = nullptr;
rhs._rs = nullptr;
return *this;
}
~result_set()
{
if(_stmt == nullptr) return;
_stmt->closeResultSet(_rs);
}
::oracle::occi::ResultSet* get() const noexcept
{ return _rs; }
::oracle::occi::ResultSet& operator*() const noexcept
{
assert(_rs != nullptr);
return *_rs;
}
::oracle::occi::ResultSet* operator->() const noexcept
{
assert(_rs != nullptr);
return _rs;
}
::oracle::occi::Statement* stmt() const noexcept
{ return _stmt; }
private:
::oracle::occi::Statement* _stmt{nullptr};
::oracle::occi::ResultSet* _rs{nullptr};
};
template<typename... Columns>
inline bool operator==(const result_set<Columns...>& lhs,
const result_set<Columns...>& rhs)
{ return lhs.stmt() == rhs.stmt() && lhs.get() == rhs.get(); }
template<typename... Columns>
inline bool operator!=(const result_set<Columns...>& lhs,
const result_set<Columns...>& rhs)
{ return !(lhs == rhs); }
}}}
|
{"hexsha": "36cd24d9e5b8659d8b824aae6cfe7f837143c745", "size": 2349, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/picapau/oracle/core/result_set.hpp", "max_stars_repo_name": "ricardocosme/picapau", "max_stars_repo_head_hexsha": "751b946b3911f3ff15e19d177b0b561412c5f8d1", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-10-23T20:01:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-23T20:01:15.000Z", "max_issues_repo_path": "include/picapau/oracle/core/result_set.hpp", "max_issues_repo_name": "ricardocosme/picapau", "max_issues_repo_head_hexsha": "751b946b3911f3ff15e19d177b0b561412c5f8d1", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/picapau/oracle/core/result_set.hpp", "max_forks_repo_name": "ricardocosme/picapau", "max_forks_repo_head_hexsha": "751b946b3911f3ff15e19d177b0b561412c5f8d1", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2580645161, "max_line_length": 67, "alphanum_fraction": 0.5836526181, "num_tokens": 587}
|
[STATEMENT]
lemma eval_red_Hcomp:
assumes "Ide a" and "Ide b"
shows "\<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
have "Nml (a \<^bold>\<star> b) \<Longrightarrow> ?thesis"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
assume 1: "Nml (a \<^bold>\<star> b)"
[PROOF STATE]
proof (state)
this:
Nml (a \<^bold>\<star> b)
goal (1 subgoal):
1. Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
hence 2: "Nml a \<and> Nml b \<and> Src a = Trg b"
[PROOF STATE]
proof (prove)
using this:
Nml (a \<^bold>\<star> b)
goal (1 subgoal):
1. Nml a \<and> Nml b \<and> Src a = Trg b
[PROOF STEP]
using Nml_HcompD(3-4,7)
[PROOF STATE]
proof (prove)
using this:
Nml (a \<^bold>\<star> b)
Nml (?t \<^bold>\<star> ?u) \<Longrightarrow> Nml ?t
Nml (?t \<^bold>\<star> ?u) \<Longrightarrow> Nml ?u
Nml (?t \<^bold>\<star> ?u) \<Longrightarrow> Src ?t = Trg ?u
goal (1 subgoal):
1. Nml a \<and> Nml b \<and> Src a = Trg b
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Nml a \<and> Nml b \<and> Src a = Trg b
goal (1 subgoal):
1. Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
have "\<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>a\<rbrace> \<star> \<lbrace>b\<rbrace>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>a\<rbrace> \<star> \<lbrace>b\<rbrace>
[PROOF STEP]
using 1 Nml_HcompD
[PROOF STATE]
proof (prove)
using this:
Nml (a \<^bold>\<star> b)
Nml (?t \<^bold>\<star> ?u) \<Longrightarrow> \<^bold>\<langle>un_Prim ?t\<^bold>\<rangle> = ?t
Nml (?t \<^bold>\<star> ?u) \<Longrightarrow> C.arr (un_Prim ?t)
Nml (?t \<^bold>\<star> ?u) \<Longrightarrow> Nml ?t
Nml (?t \<^bold>\<star> ?u) \<Longrightarrow> Nml ?u
Nml (?t \<^bold>\<star> ?u) \<Longrightarrow> \<not> is_Prim\<^sub>0 ?u
Nml (?t \<^bold>\<star> ?u) \<Longrightarrow> \<^bold>\<langle>src\<^sub>C (un_Prim ?t)\<^bold>\<rangle>\<^sub>0 = Trg ?u
Nml (?t \<^bold>\<star> ?u) \<Longrightarrow> Src ?t = Trg ?u
goal (1 subgoal):
1. \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>a\<rbrace> \<star> \<lbrace>b\<rbrace>
[PROOF STEP]
by (metis eval.simps(3) red_Nml)
[PROOF STATE]
proof (state)
this:
\<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>a\<rbrace> \<star> \<lbrace>b\<rbrace>
goal (1 subgoal):
1. Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>a\<rbrace> \<star> \<lbrace>b\<rbrace>
goal (1 subgoal):
1. Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
have "... = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrace>a\<rbrace> \<star> \<lbrace>b\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
using assms 1 2 ide_eval_Ide Nmlize_in_Hom red2_Nml Nmlize_Nml
[PROOF STATE]
proof (prove)
using this:
Ide a
Ide b
Nml (a \<^bold>\<star> b)
Nml a \<and> Nml b \<and> Src a = Trg b
Ide ?t \<Longrightarrow> ide \<lbrace>?t\<rbrace>
Arr ?t \<Longrightarrow> \<^bold>\<lfloor>?t\<^bold>\<rfloor> \<in> HHom (Src ?t) (Trg ?t)
Arr ?t \<Longrightarrow> \<^bold>\<lfloor>?t\<^bold>\<rfloor> \<in> VHom \<^bold>\<lfloor>Dom ?t\<^bold>\<rfloor> \<^bold>\<lfloor>Cod ?t\<^bold>\<rfloor>
Nml (?a \<^bold>\<star> ?b) \<Longrightarrow> ?a \<^bold>\<Down> ?b = ?a \<^bold>\<star> ?b
Nml ?t \<Longrightarrow> \<^bold>\<lfloor>?t\<^bold>\<rfloor> = ?t
goal (1 subgoal):
1. \<lbrace>a\<rbrace> \<star> \<lbrace>b\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
by (simp add: eval_simps')
[PROOF STATE]
proof (state)
this:
\<lbrace>a\<rbrace> \<star> \<lbrace>b\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
goal (1 subgoal):
1. Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
\<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
goal (1 subgoal):
1. \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
goal (1 subgoal):
1. \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
goal (1 subgoal):
1. \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
have "\<not> Nml (a \<^bold>\<star> b) \<Longrightarrow> ?thesis"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
using assms Can_red2
[PROOF STATE]
proof (prove)
using this:
Ide a
Ide b
\<lbrakk>Ide ?a; Nml ?a; Ide ?b; Nml ?b; Src ?a = Trg ?b\<rbrakk> \<Longrightarrow> Can (?a \<^bold>\<Down> ?b)
\<lbrakk>Ide ?a; Nml ?a; Ide ?b; Nml ?b; Src ?a = Trg ?b\<rbrakk> \<Longrightarrow> ?a \<^bold>\<Down> ?b \<in> VHom (?a \<^bold>\<star> ?b) \<^bold>\<lfloor>?a \<^bold>\<star> ?b\<^bold>\<rfloor>
goal (1 subgoal):
1. \<not> Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
by (simp add: Can_red(1) iso_eval_Can)
[PROOF STATE]
proof (state)
this:
\<not> Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
goal (1 subgoal):
1. \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
\<not> Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
\<not> Nml (a \<^bold>\<star> b) \<Longrightarrow> \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
goal (1 subgoal):
1. \<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<lbrace>(a \<^bold>\<star> b)\<^bold>\<down>\<rbrace> = \<lbrace>\<^bold>\<lfloor>a\<^bold>\<rfloor> \<^bold>\<Down> \<^bold>\<lfloor>b\<^bold>\<rfloor>\<rbrace> \<cdot> (\<lbrace>a\<^bold>\<down>\<rbrace> \<star> \<lbrace>b\<^bold>\<down>\<rbrace>)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5392, "file": "Bicategory_Coherence", "length": 26}
|
#ifndef STAN_MATH_PRIM_SCAL_PROB_UNIFORM_RNG_HPP
#define STAN_MATH_PRIM_SCAL_PROB_UNIFORM_RNG_HPP
#include <boost/random/uniform_real_distribution.hpp>
#include <boost/random/variate_generator.hpp>
#include <stan/math/prim/scal/err/check_consistent_sizes.hpp>
#include <stan/math/prim/scal/err/check_finite.hpp>
#include <stan/math/prim/scal/err/check_greater.hpp>
#include <stan/math/prim/scal/err/check_not_nan.hpp>
#include <stan/math/prim/scal/fun/constants.hpp>
#include <stan/math/prim/scal/fun/value_of.hpp>
#include <stan/math/prim/scal/meta/VectorBuilder.hpp>
#include <stan/math/prim/scal/meta/include_summand.hpp>
namespace stan {
namespace math {
template <class RNG>
inline double uniform_rng(double alpha, double beta, RNG& rng) {
using boost::random::uniform_real_distribution;
using boost::variate_generator;
static const char* function = "uniform_rng";
check_finite(function, "Lower bound parameter", alpha);
check_finite(function, "Upper bound parameter", beta);
check_greater(function, "Upper bound parameter", beta, alpha);
variate_generator<RNG&, uniform_real_distribution<> > uniform_rng(
rng, uniform_real_distribution<>(alpha, beta));
return uniform_rng();
}
} // namespace math
} // namespace stan
#endif
|
{"hexsha": "40ddbbd61a89ed0b7c15f54942680113851a4870", "size": 1261, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "stan/math/prim/scal/prob/uniform_rng.hpp", "max_stars_repo_name": "sakrejda/math", "max_stars_repo_head_hexsha": "3cc99955807cf1f4ea51efd79aa3958b74d24af2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "stan/math/prim/scal/prob/uniform_rng.hpp", "max_issues_repo_name": "sakrejda/math", "max_issues_repo_head_hexsha": "3cc99955807cf1f4ea51efd79aa3958b74d24af2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stan/math/prim/scal/prob/uniform_rng.hpp", "max_forks_repo_name": "sakrejda/math", "max_forks_repo_head_hexsha": "3cc99955807cf1f4ea51efd79aa3958b74d24af2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0810810811, "max_line_length": 68, "alphanum_fraction": 0.7819191118, "num_tokens": 305}
|
// __BEGIN_LICENSE__
// Copyright (c) 2006-2013, United States Government as represented by the
// Administrator of the National Aeronautics and Space Administration. All
// rights reserved.
//
// The NASA Vision Workbench is licensed under the Apache License,
// Version 2.0 (the "License"); you may not use this file except in
// compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// __END_LICENSE__
#include <vw/Cartography/PointImageManipulation.h>
#include <boost/math/special_functions/fpclassify.hpp>
using namespace vw;
Vector3 cartography::GeodeticToCartesian::operator()( Vector3 const& v ) const {
if ( boost::math::isnan(v[2]) )
return Vector3();
return m_datum.geodetic_to_cartesian(v);
}
Vector3 cartography::CartesianToGeodetic::operator()( Vector3 const& v ) const {
if ( v == Vector3() )
return Vector3(0,0,std::numeric_limits<double>::quiet_NaN());
return m_datum.cartesian_to_geodetic(v);
}
Vector3 cartography::GeodeticToProjection::operator()( Vector3 const& v ) const {
if ( boost::math::isnan(v[2]) )
return v;
Vector2 pix = m_reference.lonlat_to_pixel( subvector(v, 0, 2) );
return Vector3( pix[0], pix[1], v[2] );
}
Vector3 cartography::ProjectionToGeodetic::operator()( Vector3 const& v ) const {
Vector2 ll = m_reference.pixel_to_lonlat( subvector( v, 0, 2 ) );
return Vector3( ll[0], ll[1], v[2] );
}
Vector3 cartography::GeodeticToPoint::operator()( Vector3 const& v ) const {
if ( boost::math::isnan(v[2]) )
return v;
Vector2 pix = m_reference.lonlat_to_point( subvector(v, 0, 2) );
return Vector3( pix[0], pix[1], v[2] );
}
Vector3 cartography::PointToGeodetic::operator()( Vector3 const& v ) const {
Vector2 ll = m_reference.point_to_lonlat( subvector( v, 0, 2 ) );
return Vector3( ll[0], ll[1], v[2] );
}
|
{"hexsha": "e925f49edbcfa4c32a5e99f2700ba464212b59eb", "size": 2191, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/vw/Cartography/PointImageManipulation.cc", "max_stars_repo_name": "maxerbubba/visionworkbench", "max_stars_repo_head_hexsha": "b06ba0597cd3864bb44ca52671966ca580c02af1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 318.0, "max_stars_repo_stars_event_min_datetime": "2015-01-02T16:37:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T07:12:20.000Z", "max_issues_repo_path": "src/vw/Cartography/PointImageManipulation.cc", "max_issues_repo_name": "maxerbubba/visionworkbench", "max_issues_repo_head_hexsha": "b06ba0597cd3864bb44ca52671966ca580c02af1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 39.0, "max_issues_repo_issues_event_min_datetime": "2015-07-30T22:22:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-23T16:11:55.000Z", "max_forks_repo_path": "src/vw/Cartography/PointImageManipulation.cc", "max_forks_repo_name": "maxerbubba/visionworkbench", "max_forks_repo_head_hexsha": "b06ba0597cd3864bb44ca52671966ca580c02af1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 135.0, "max_forks_repo_forks_event_min_datetime": "2015-01-19T00:57:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T13:51:40.000Z", "avg_line_length": 37.1355932203, "max_line_length": 81, "alphanum_fraction": 0.7161113647, "num_tokens": 620}
|
import tensorflow as tf
import tensorflow_io as tfio
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft
from scipy.io import wavfile as wav
'''
Tensorflow conversion to spectrograms
Maybe use if we go with Mel spectrograms
'''
def print_FFT(song_path):
rate, data = wav.read(song_path)
fft_out = fft(data)
plt.plot(data, np.abs(fft_out))
plt.show()
def convert_audio_tensor(song_path):
# Convert wav file to Tensor
audio = tfio.audio.AudioIOTensor(song_path)
# Splice the audio file
audio_splice = audio[100:]
# Remove last dimension
audio_tensor = tf.squeeze(audio_splice, axis=[-1])
rate, data = wav.read(song_path)
max_data = max(data)
# Prepare the audio file for printing
# Cast to float value
tensor = tf.cast(audio_tensor, tf.float32) / max_data
spectrogram = tfio.experimental.audio.spectrogram(
tensor, nfft=512, window=512, stride=1024)
# Function to plot the spectrogram from TensorFlow conversion
def plot_spectrogram(spectrogram):
plt.figure()
plt.imshow(tf.math.log(spectrogram).numpy())
plt.show()
|
{"hexsha": "ad7d908389e071f2d689cd1b2d7c4520758f4d25", "size": 1141, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python Files/Dataset_Formating/Tensor_audio_formating.py", "max_stars_repo_name": "brennanMosher/Music-Genre-Recognition-using-a-Machine-Learning-Appraoch", "max_stars_repo_head_hexsha": "7834fe5d709e894322ad76ef118067febaa78bce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-13T16:22:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-13T16:22:27.000Z", "max_issues_repo_path": "Python Files/Dataset_Formating/Tensor_audio_formating.py", "max_issues_repo_name": "brennanMosher/Music-Genre-Recognition-using-a-Machine-Learning-Appraoch", "max_issues_repo_head_hexsha": "7834fe5d709e894322ad76ef118067febaa78bce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python Files/Dataset_Formating/Tensor_audio_formating.py", "max_forks_repo_name": "brennanMosher/Music-Genre-Recognition-using-a-Machine-Learning-Appraoch", "max_forks_repo_head_hexsha": "7834fe5d709e894322ad76ef118067febaa78bce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.2857142857, "max_line_length": 61, "alphanum_fraction": 0.7142857143, "include": true, "reason": "import numpy,from scipy", "num_tokens": 283}
|
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
import numpy as np
class Preprocessing(object):
"""
Preprocessing class used for spectrum preprocessing.
"""
def __init__(self, spectrum):
"""
Create an Preprocessing object
Args:
spectrum (pymatgen.core.spectrum.Spectrum): Spectrum object used to
initialize preprocessing class.
"""
self.spectrum = spectrum
self.process_tag = []
self.proc_dict = {
'1st_der': 'first_derivative',
'2nd_der': 'second_derivative',
'vecnorm': 'vector_norm_normalize',
'maxnorm': 'maximum_intensity_norm',
'areanorm': 'area_normalize',
'snvnorm': 'snv_norm',
'square': 'square_root_squashing',
'sigmoid': 'sigmoid_squashing',
'1st_wt': 'weighted_first_derivative',
'2nd_wt': 'weighted_second_derivative',
'intnorm': 'intensity_normalize'
}
@property
def preprocessing_method(self):
"""
Returns: a list of available preprocessing methods
"""
return list(self.proc_dict.values())
def first_derivative(self):
"""
Return first derivative as spectrum
"""
deriv_x, deriv_y = self.derivative_spect(self.spectrum, 1)
self.spectrum.x, self.spectrum.y = np.copy(deriv_x), np.copy(deriv_y)
def second_derivative(self):
"""
Return second derivative as spectrum
"""
deriv_x, deriv_y = self.derivative_spect(self.spectrum, 2)
self.spectrum.x, self.spectrum.y = np.copy(deriv_x), np.copy(deriv_y)
def weighted_first_derivative(self):
"""
Return weighted first derivative spectrum as spectrum
"""
deriv_x, deriv_y = self.derivative_spect(self.spectrum, 1)
self.spectrum.x, self.spectrum.y = deriv_x, np.multiply(
self.spectrum.y[:-1], deriv_y)
def weighted_second_derivative(self):
"""
Return weighted second derivative spectrum as spectrum
"""
deriv_x, deriv_y = self.derivative_spect(self.spectrum, 2)
self.spectrum.x, self.spectrum.y = deriv_x, np.multiply(
self.spectrum.y[:-2], deriv_y)
def intensity_normalize(self):
"""
Normalize with respect to the intensity sum
"""
self.spectrum.normalize('sum')
def maximum_intensity_norm(self):
"""
Normalize with respect to the maximum intensity
"""
self.spectrum.normalize('max')
def vector_norm_normalize(self):
"""
Normalize with respect to the norm of the spectrum as a vector
"""
spect_norm = np.linalg.norm(self.spectrum.y)
self.spectrum.y /= spect_norm
def area_normalize(self):
"""
Normalize the peak intensity using under curve area, i.e. normalized
curve's under curve area should equals 1
"""
under_curve_area = np.trapz(self.spectrum.y, self.spectrum.x)
self.spectrum.y /= under_curve_area
def snv_norm(self):
"""
Normalize with repect to the variance of the spectrum intensity and
return abs. spectrum
"""
inten_mean = np.mean(self.spectrum.y)
inten_std = np.mean(self.spectrum.y)
normalized_mu = np.divide(np.subtract(self.spectrum.y, inten_mean), inten_std)
# Since snv norm will return negative absorption value after
# normalization, need to add
# the minimum absorption value and shift the baseline back to zero
min_norm_mu = np.abs(np.min(normalized_mu))
normalized_mu = np.add(normalized_mu, min_norm_mu)
self.spectrum.y = normalized_mu
def square_root_squashing(self):
"""
Squashing the spectrum using square root of the spectrum
"""
squashed_mu = np.sqrt(np.abs(self.spectrum.y))
self.spectrum.y = squashed_mu
def sigmoid_squashing(self):
"""
Squashing the spectrum using the sigmoid funtion, i.e.
squashed_y = (1 - cos(pi*spectrum.y))/2
"""
squashed_mu = np.divide(np.subtract(1, np.cos(np.pi * self.spectrum.y)),
2)
self.spectrum.y = squashed_mu
def derivative_spect(self, spect1, order):
"""
Calculate derivative of a given spectrum, to keep returned spectrum
dimension consistent, endpoints are not pad with endvalues
Args:
spect1: Given spectrum with spect1.x corresponding to energy.
spect1.y corresponding to absorption
order: The number of times the spectrum are differenced
Returns: Differenciated x and y
"""
deriv_x = np.copy(spect1.x)
deriv_y = np.copy(spect1.y)
def first_derivative(x, y):
derivative = np.diff(y) / np.diff(x)
return x[:-1], derivative
while order >= 1:
deriv_x, deriv_y = first_derivative(deriv_x, deriv_y)
order -= 1
return deriv_x, deriv_y
def spectrum_process(self, process_seq):
"""
Preprocess the self.spectrum object using the preprocess method listed
in process_seq
Args:
process_seq (list/tuple/string): preprocessing methods
"""
if (process_seq is not None) and (isinstance(process_seq, list) or
isinstance(process_seq, tuple)):
for pro in process_seq:
getattr(self, self.proc_dict[pro])()
self.process_tag.append(pro)
if (process_seq is not None) and isinstance(process_seq, str):
getattr(self, self.proc_dict[process_seq])()
self.process_tag.append(process_seq)
|
{"hexsha": "949a5975e0f1bbcde2799fac9b149a31761ac288", "size": 5913, "ext": "py", "lang": "Python", "max_stars_repo_path": "veidt/elsie/preprocessing.py", "max_stars_repo_name": "yimingchen95/veidt", "max_stars_repo_head_hexsha": "90f201f856d2f71c578f74b7391c0c9ff284986b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2018-01-18T08:32:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-06T16:56:30.000Z", "max_issues_repo_path": "veidt/elsie/preprocessing.py", "max_issues_repo_name": "yimingchen95/veidt", "max_issues_repo_head_hexsha": "90f201f856d2f71c578f74b7391c0c9ff284986b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 77, "max_issues_repo_issues_event_min_datetime": "2017-08-16T18:09:07.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-03T13:17:17.000Z", "max_forks_repo_path": "veidt/elsie/preprocessing.py", "max_forks_repo_name": "yimingchen95/veidt", "max_forks_repo_head_hexsha": "90f201f856d2f71c578f74b7391c0c9ff284986b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2017-09-11T17:23:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-30T02:19:21.000Z", "avg_line_length": 33.7885714286, "max_line_length": 86, "alphanum_fraction": 0.6057838661, "include": true, "reason": "import numpy", "num_tokens": 1268}
|
###########################################################################################
# Adapted from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py #
# Mainly changed the model forward() function #
###########################################################################################
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
class MaskFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, i, r, c, size):
# ctx.save_for_backward(i, r, c, size)
mask = torch.zeros(input.shape).to(input.device)
mask[i,:, r:(r+size), c:(c+size)] = 1
return torch.where(mask == 1, torch.tensor(0.).to(input.device), input)
@staticmethod
def backward(ctx, grad_output):
# i,r,c, size = ctx.saved_tensors
# if we want to mark mask on the backwards pass
# mask = torch.ones(grad_output.shape).to(grad_output.device)
# mask[i,:, r:(r+size), c:(c+size)] = 1
return grad_output, None, None, None, None
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, dohisto=False):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dohisto = dohisto
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
# if self.dohisto:
# temp = out
# print(np.count_nonzero(temp.cpu().detach().numpy()))
# print(torch.max(out), torch.min(out))
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
# if self.dohisto:
# print(torch.max(out), torch.min(out))
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
# if self.dohisto:
# temp = identity
# print(np.count_nonzero(temp.cpu().detach().numpy()))
# print(torch.max(out), torch.min(out))
return out
class ResNet(nn.Module):
temp = 30
def testhook(self, module, input, output):
# print(torch.max(input[0]), torch.min(input[0]))
# print(output[0])
self._get_histo(output[0], self.temp)
self.temp+=1
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, clip_range=None, aggregation = 'mean',
dohisto=False, collapsefunc=None, ret_mask_activations=False,
doforwardmask=True):
super(ResNet, self).__init__()
self.i = 0
self.clip_range = clip_range
self.aggregation = aggregation
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0],
dohisto =dohisto)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0],
dohisto = dohisto)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1],
dohisto = dohisto)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2],
dohisto = dohisto)
# for i in range(len(self.layer1)):
# self.layer1[i].relu.register_forward_hook(self.testhook)
# for i in range(len(self.layer2)):
# self.layer2[i].relu.register_forward_hook(self.testhook)
# for i in range(len(self.layer3)):
# self.layer3[i].relu.register_forward_hook(self.testhook)
# for i in range(len(self.layer4)):
# self.layer4[i].relu.register_forward_hook(self.testhook)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.dohisto = dohisto
self.collapsefunc = collapsefunc
self.mask = MaskFunction.apply
self.ret_mask_activations = ret_mask_activations
self.doforwardmask = doforwardmask
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False, dohisto = False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, dohisto=dohisto))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
# save a fig of the activations at the layer
def _get_histo(self, x, layer):
if self.dohisto:
batch = x.cpu().detach().numpy()
for i in range(len(x)):
out = batch[i]
if layer==-1:
# unnormalize
out[0] = out[0] * 0.2023 + 0.4914
out[1] = out[1] * 0.1994 + 0.4822
out[2] = out[2] * 0.2010 + 0.4465
out = np.transpose(out, (1,2,0))
s = '00'
else:
out = np.max(out, axis=0)
s = str(layer)
plt.figure()
plt.imshow(out)
plt.savefig('image_dumps/adaptive/'+str(i) + '_layer_' + s)
plt.close()
def _mask(self, x, mean, stddev, patchsizes):
# analyze in np for ease of use - TODO: parallelize in pytorch
temp = x.cpu().detach().numpy()
mean_ = mean.cpu().detach().numpy()
stddev_ = stddev.cpu().detach().numpy()
# collapse over channels
if self.collapsefunc == 'max':
collapsed = np.max(temp, axis=1)
mean_ = np.max(mean_)
stddev_ = np.max(stddev_)
elif self.collapsefunc == 'l2':
collapsed = np.linalg.norm(temp, axis=1)
mean_ = np.linalg.norm(mean_)
stddev_ = np.linalg.norm(stddev_)
else:
return x, None
masked_act = torch.zeros(collapsed.shape).to(x.device)
for i in range(len(collapsed)):
max_=-1
r,c = 0,0
size = patchsizes[0]
# find patch in scale space
for s in range(patchsizes[0], patchsizes[1]):
# 1/s box kernel
f = np.ones((s,s,))/ (s)
smoothed = scipy.signal.convolve2d(collapsed[i,:,:], f, mode='valid')
curr_max = smoothed.max()
if curr_max > max_:
max_ = curr_max
r,c, = np.unravel_index(smoothed.argmax(), smoothed.shape)
size = s
# /s is renormalization the values not being normalized
if max_/size > mean_ + 2 * stddev_:
# for adaptive attack, return the masked activations
masked_act[i, r:(r+size), c:(c+size)] = torch.max(x[i, :, r:(r+size), c:(c+size)], dim=0)[0]
# for adaptive attack, do not do forward mask
if self.doforwardmask:
x = self.mask(x, torch.tensor(i), torch.tensor(r), torch.tensor(c), torch.tensor(size))
return x, masked_act
def _forward_impl(self, x):
# See note [TorchScript super()]
# print(x.shape)
activation_list = []
self._get_histo(x, -1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x, masked_act = self._mask(x, self.bn1.bias, self.bn1.weight, (15,24))
if self.ret_mask_activations:
activation_list.append(masked_act)
self._get_histo(x, 0)
x = self.maxpool(x)
x = self.layer1(x)
# print('layer1', self.layer1[2].bn3.bias, self.layer1[2].bn3.weight)
x, masked_act = self._mask(x, torch.add(self.layer1[2].bn3.bias, self.layer1[1].bn3.bias),
torch.sqrt(torch.add(torch.pow(self.layer1[2].bn3.weight, 2), torch.pow(self.layer1[1].bn3.weight, 2))),
(5, 12))
if self.ret_mask_activations:
activation_list.append(masked_act * 4) # make each layer equal besides downsampling
self._get_histo(x, 1)
x = self.layer2(x)
x, masked_act = self._mask(x, torch.add(self.layer2[3].bn3.bias, self.layer2[2].bn3.bias),
torch.sqrt(torch.add(torch.pow(self.layer2[3].bn3.weight, 2), torch.pow(self.layer2[2].bn3.weight, 2))),
(3,10))
if self.ret_mask_activations:
activation_list.append(masked_act * 16)
self._get_histo(x, 2)
x = self.layer3(x)
self._get_histo(x, 3)
x = self.layer4(x)
self._get_histo(x, 4)
x = x.permute(0,2,3,1)
x = self.fc(x)
if self.clip_range is not None:
x = torch.clamp(x,self.clip_range[0],self.clip_range[1])
if self.aggregation == 'mean':
x = torch.mean(x,dim=(1,2))
elif self.aggregation == 'median':
x = x.view([x.size()[0],-1,10])
x = torch.median(x,dim=1)
return x.values
elif self.aggregation =='cbn': # clipping function from Clipped BagNet
x = torch.tanh(x*0.05-1)
x = torch.mean(x,dim=(1,2))
elif self.aggregation == 'none':
pass
# print(x.shape)
if self.ret_mask_activations:
return x, activation_list
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
|
{"hexsha": "aad6c46523b0e8a743f17dd3fc5b60c3e4c4fba2", "size": 21760, "ext": "py", "lang": "Python", "max_stars_repo_path": "nets/resnet.py", "max_stars_repo_name": "nicksum107/thesiswork", "max_stars_repo_head_hexsha": "5d175d0e110b08b7da2926fc64287086f503e086", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nets/resnet.py", "max_issues_repo_name": "nicksum107/thesiswork", "max_issues_repo_head_hexsha": "5d175d0e110b08b7da2926fc64287086f503e086", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nets/resnet.py", "max_forks_repo_name": "nicksum107/thesiswork", "max_forks_repo_head_hexsha": "5d175d0e110b08b7da2926fc64287086f503e086", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5267175573, "max_line_length": 116, "alphanum_fraction": 0.5972886029, "include": true, "reason": "import numpy,import scipy", "num_tokens": 5538}
|
import pandas as pd
import numpy as np
import xgboost as xgb
import lightgbm as lgb
import pyarrow as pa
import pyarrow.parquet as pq
import json
import traceback
from utils import *
import argparse
# specify the version.
parser = argparse.ArgumentParser()
parser.add_argument('--version', '-v', default=1, help='version')
args = parser.parse_args()
version = int(args.version)
num_average = 10
try:
sub = pd.DataFrame()
sub["item_id"] = pd.read_csv("../input/test.csv")["item_id"]
sub["deal_probability"] = 0
X_train = read_parquet("../tmp/X_train.parquet")
X_test = read_parquet("../tmp/X_test.parquet")
y_train = read_parquet("../tmp/y_train.parquet").values.ravel()
X_tr_sta, X_te_sta, _ = read_train_test_data_stacking()
X_train = pd.concat([X_train, X_tr_sta],axis=1)
X_test = pd.concat([X_test, X_te_sta],axis=1)
del X_tr_sta, X_te_sta; gc.collect()
nogain_features =[]
f =open("../tmp/no_gain_features_stack_version.txt")
for l in f.readlines():
nogain_features.append(l.replace("\n",""))
f.close()
drop_cols = [col for col in X_train.columns if col in nogain_features]
X_train = X_train.drop(drop_cols, axis=1)
X_test = X_test.drop(drop_cols, axis=1)
print("start cross validation")
print("X_train shape: ", X_train.shape)
print("X_test shape: ", X_test.shape)
features = X_train.columns
print("features: ", features)
d_train = lgb.Dataset(X_train, label=y_train)
del X_train; gc.collect()
for i in range(num_average):
print("ITERATION {}".format(i))
seed = np.random.randint(0,100000)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'xentropy',#'regression'
'metric': 'rmse',#'xentropy'
'num_leaves': 200,
# 'min_sum_hessian_in_leaf': 10,
'max_depth': 10,
'learning_rate': 0.02,
'feature_fraction': 0.2235,
'bagging_fraction': 0.9,
# 'bagging_freq': 5,
'lambda_l1': 10,
'lambda_l2': 2,
'verbose': 0,
'feature_fraction_seed':seed,
'bagging_seed':seed,
'seed':seed
}
print("parameters...")
print(params)
print("Start CV...")
cvresult = lgb.cv(
params
, d_train
, 20000
, early_stopping_rounds=200
, verbose_eval=100
, nfold=5
, stratified=False
)['rmse-mean']
num_rounds = int(len(cvresult))
print("Done CV. best iteration: {}".format(num_rounds))
bst = lgb.train(
params
, d_train
, num_rounds
, verbose_eval=100
)
sub["deal_probability"] += bst.predict(X_test) / num_average
sub["deal_probability"] = sub["deal_probability"].clip(0.0, 1.0)
sub.to_csv("../output/lgb_with_stack_seed_average_{}.csv".format(num_average), index=False)
notify_line("Seed Average Done:")
except:
print(traceback.format_exc())
notify_line(traceback.format_exc())
|
{"hexsha": "5821b92614b4d0040f62076c17262ab22757fa75", "size": 3402, "ext": "py", "lang": "Python", "max_stars_repo_path": "yuki/avito/src/lgbm_with_stack_seed_average.py", "max_stars_repo_name": "RandLive/Avito-Demand-Prediction-Challenge", "max_stars_repo_head_hexsha": "eb2955c6cb799907071d8bbf7b31b73b163c604f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "yuki/avito/src/lgbm_with_stack_seed_average.py", "max_issues_repo_name": "RandLive/Avito-Demand-Prediction-Challenge", "max_issues_repo_head_hexsha": "eb2955c6cb799907071d8bbf7b31b73b163c604f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "yuki/avito/src/lgbm_with_stack_seed_average.py", "max_forks_repo_name": "RandLive/Avito-Demand-Prediction-Challenge", "max_forks_repo_head_hexsha": "eb2955c6cb799907071d8bbf7b31b73b163c604f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0943396226, "max_line_length": 95, "alphanum_fraction": 0.5552616108, "include": true, "reason": "import numpy", "num_tokens": 815}
|
# Orthogonal polynomials
Copyright (C) 2020 Andreas Kloeckner
<details>
<summary>MIT License</summary>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
</details>
```python
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as pt
```
## Mini-Introduction to `sympy`
```python
import sympy as sym
# Enable "pretty-printing" in IPython
sym.init_printing()
```
Make a new `Symbol` and work with it:
```python
```
```python
myexpr = (x**2-3)**2
myexpr
myexpr.expand()
```
```python
sym.integrate(myexpr, x)
```
```python
sym.integrate(myexpr, (x, -1, 1))
```
## Orthogonal polynomials
Now write a function `inner_product(f, g)`:
```python
```
Show that it works:
```python
```
```python
```
Next, define a `basis` consisting of a few monomials:
```python
```
And run Gram-Schmidt on it:
```python
orth_basis = []
for q in basis:
for prev_q in orth_basis:
q = q - inner_product(prev_q, q)*prev_q / inner_product(prev_q,prev_q)
orth_basis.append(q)
legendre_basis = [orth_basis[0],]
#to compute Legendre polynomials need to normalize so that q(1)=1 rather than ||q||=1
for q in orth_basis[1:]:
q = q / q.subs(x,1)
legendre_basis.append(q)
```
```python
legendre_basis
```
These are called the *Legendre polynomials*.
--------------------
What do they look like?
```python
mesh = np.linspace(-1, 1, 100)
pt.figure(figsize=(8,8))
for f in legendre_basis:
f = sym.lambdify(x, f)
pt.plot(mesh, [f(xi) for xi in mesh])
```
-----
These functions are important enough to be included in `scipy.special` as `eval_legendre`:
```python
import scipy.special as sps
for i in range(10):
pt.plot(mesh, sps.eval_legendre(i, mesh))
```
What can we find out about the conditioning of the generalized Vandermonde matrix for Legendre polynomials?
```python
#keep
n = 20
xs = np.linspace(-1, 1, n)
V = np.array([
sps.eval_legendre(i, xs)
for i in range(n)
]).T
la.cond(V)
```
The Chebyshev basis can similarly be defined by Gram-Schmidt, but now with respect to a different inner-product weight function,
$$w(x) = 1/\sqrt{1-x^2}.$$
```python
```
```python
for i in range(10):
pt.plot(mesh, np.cos(i*np.arccos(mesh)))
```
Chebyshev polynomials achieve similar good, but imperfect conditioning on a uniform grid (but perfect conditioning on a grid of Chebyshev nodes).
```python
#keep
n = 20
xs = np.linspace(-1, 1, n)
V = np.array([
np.cos(i*np.arccos(xs))
for i in range(n)
]).T
la.cond(V)
```
```python
```
|
{"hexsha": "aeb5d48d99398b895b8b12191e8997e85b9e1577", "size": 8701, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "cleared-demos/interpolation/Orthogonal Polynomials.ipynb", "max_stars_repo_name": "xywei/numerics-notes", "max_stars_repo_head_hexsha": "70e67e17d855b7bb06a0de7e3570d40ad50f941b", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2021-01-24T21:12:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T19:58:25.000Z", "max_issues_repo_path": "cleared-demos/interpolation/Orthogonal Polynomials.ipynb", "max_issues_repo_name": "xywei/numerics-notes", "max_issues_repo_head_hexsha": "70e67e17d855b7bb06a0de7e3570d40ad50f941b", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-08-24T17:48:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-14T21:22:02.000Z", "max_forks_repo_path": "cleared-demos/interpolation/Orthogonal Polynomials.ipynb", "max_forks_repo_name": "xywei/numerics-notes", "max_forks_repo_head_hexsha": "70e67e17d855b7bb06a0de7e3570d40ad50f941b", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-11-23T09:56:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-24T17:30:26.000Z", "avg_line_length": 24.86, "max_line_length": 155, "alphanum_fraction": 0.4650040225, "converted": true, "num_tokens": 936}
|
import copy
from liegroups import SO3, SE3
import transforms3d as tf3d
from transforms3d.quaternions import mat2quat
from numpy.linalg import lstsq
import numpy as np
TASK_DIM = 6
JOINT_NAMES = 1
JOINT_ACTIVE = 3
LINK_NAMES = 12
ZERO_DISP = [0, 0, 0]
POS = range(0, 3)
ROT = range(3, 6)
KI = .01
# originally from https://github.com/utiasSTARS/pyb-manipulator/tree/manipulator-learning
def pose_error(p1, q1, p2, q2):
"""
Twist representing the distance between two poses in the world frame
The transform between poses is returned in the pose 1 frame, then we rotate it back to world frame
"""
r1 = SO3.from_quaternion(q1, 'xyzw')
r2 = SO3.from_quaternion(q2, 'xyzw')
t1 = SE3(r1, p1)
t2 = SE3(r2, p2)
xi = SE3.log((t1.inv().dot(t2)))
return r1.dot(xi[POS]), r1.dot(xi[ROT])
class Manipulator:
"""
Provides a pybullet API wrapper for simpler interfacing and manipulator-specific functions.
The update() function should be called in a loop in order to store joint states and update joint controls.
"""
def __init__(self,
pb_client,
urdf_path,
ee_link_index,
tool_link_index,
control_method,
gripper_control_method,
gripper_indices=(),
arm_indices=(),
gripper_max=(),
base_pos=(0,0,0),
base_rot=(0, 0, 0, 1),
get_velocities=False,
self_collision=False,
get_ee_ft=True,
use_ft_impedance=False,
force_gravity_sub=0,
max_gripper_vel=0.8,
gripper_force=10,
pos_ctrl_max_arm_force=None):
self._pb_client = pb_client
self.self_collision = self_collision
self.urdf_path = urdf_path
self.init_base_pos = base_pos
self.init_base_rot = base_rot
# user selected parameters -- non-private can be modified on the fly
if not self_collision:
self._arm = [self._pb_client.loadURDF(urdf_path)] # arm object
else:
self._arm = [self._pb_client.loadURDF(urdf_path, flags=pb_client.URDF_USE_SELF_COLLISION)] # arm object
self._num_jnt = self._pb_client.getNumJoints(self._arm[0]) # number of joints
self._num_lnk = self._pb_client.getNumJoints(self._arm[0]) # Equal to the number of joints I think
self._jnt_infos = [self._pb_client.getJointInfo(self._arm[0], i) for i in range(self._num_jnt)] # list of joint info objects
self._active_ind = [j for j, i in zip(range(len(self._jnt_infos)), self._jnt_infos) if
i[JOINT_ACTIVE] > -1] # indices of active joints
self._true_active_ind = self._active_ind[:]
self._gripper_ind = gripper_indices # gripper join indices
if len(arm_indices) > 0:
self._arm_ind = list(arm_indices)
self._active_ind = list(arm_indices) + list(gripper_indices)
else:
self._arm_ind = [e for e in self._active_ind if e not in tuple(self._gripper_ind)] # arm joint indices
self._num_jnt_gripper = len(self._gripper_ind) # number of gripper joints
self._num_jnt_arm = len(self._active_ind) - self._num_jnt_gripper # number of arm joints
self._control_method = control_method # ee control method
self._gripper_control_method = gripper_control_method # gripper control method
self._ee_link_ind = ee_link_index # index of end effector link
self._tool_link_ind = tool_link_index
self._get_velocities = get_velocities
self.pos_control_max_velocity = 1e10 # max joint velocity in pos control, can be set by user in set_frame_pose_goal
self.pos_ctrl_max_arm_force = pos_ctrl_max_arm_force
# define containers for states, poses, jacobians
self.lnk_state = [None] * self._num_lnk
self.lnk_pose = [None] * self._num_lnk
self.lnk_pose_ref_frames = {}
self.lnk_vel = [None] * self._num_lnk
self.lnk_vel_ref_frames = {}
self.J = np.zeros([self._num_lnk, TASK_DIM, self._num_jnt_arm])
self.H = np.zeros([self._num_lnk, TASK_DIM, self._num_jnt_arm, self._num_jnt_arm])
# gripper options
self.gripper_max = gripper_max # open max and closed max respectively
self.max_gripper_vel = max_gripper_vel
self.gripper_force = gripper_force
self.gripper_p = 5 # for p control
# initial values
self.vel_cmd = np.zeros(len(self._active_ind))
self.pos_cmd = np.zeros(len(self._active_ind))
# set starting base position and orientation
self._pb_client.resetBasePositionAndOrientation(self._arm[0], base_pos, base_rot)
self.get_joint_states()
self._reset_all_flags() # reset all flags
# error used in I PID component
self._e = 0
# force torque
if get_ee_ft:
self.ee_ft = np.zeros(6)
self._pb_client.enableJointForceTorqueSensor(self._arm[0], self._arm_ind[-1], enableSensor=True)
self.ft_gravity = [0, 0, force_gravity_sub]
self.use_ft_impedance = False
if use_ft_impedance:
self.use_ft_impedance = True
self.ft_gravity_buffer_size = 50
self.ft_gravity_buffer = []
# get fixed transform from force torque to tool frame
ft_pos, ft_orient = self._pb_client.getLinkState(self._arm[0], self._arm_ind[-1])[4:6]
t_pos, t_orient = self._pb_client.getLinkState(self._arm[0], self._tool_link_ind)[4:6]
T_world_ft = self.pb_pos_orient_to_mat(ft_pos, ft_orient)
T_world_tool = self.pb_pos_orient_to_mat(t_pos, t_orient)
self.T_ft_to_tool = np.linalg.inv(T_world_ft).dot(T_world_tool)
self.T_tool_to_ft = np.linalg.inv(self.T_ft_to_tool)
def _reset_all_flags(self):
"""
Reset all flags to false
"""
self.__have_state = [False] * self._num_lnk
self.__have_pose = [False] * self._num_lnk
self.__have_pose_with_ref = []
self.__have_vel_with_ref = []
self.__have_vel = [False] * self._num_lnk
self.__have_J = [False] * self._num_lnk
self.__have_H = [False] * self._num_lnk
# GET - PRIVATE
# --------------------------------------------------------------------------------------------------------------
def get_link_names(self):
"""
Returns a list of all link names
"""
names = []
for info in self._jnt_infos:
names.append(info[LINK_NAMES])
return names
def get_joint_names(self):
"""
Returns a list of all joint names
"""
names = []
for info in self._jnt_infos:
names.append(info[JOINT_NAMES])
return names
def get_ee_ft(self):
"""
Get force torque reading of final joint
"""
jnt_state = self._pb_client.getJointState(self._arm[0], self._arm_ind[-1])
ft = np.array(jnt_state[2])
# rotate gravity to subtract from force measurement
ft_pos, ft_orient = self._pb_client.getLinkState(self._arm[0], self._arm_ind[-1])[4:6]
R_ft = self.pb_pos_orient_to_mat(ft_pos, ft_orient)[:3, :3]
gravity_in_ft = R_ft.dot(self.ft_gravity)
ft[:3] = ft[:3] - gravity_in_ft
self.ee_ft = ft
return self.ee_ft
def get_joint_states(self):
"""
Get positions, velocities and torques of active joints (as opposed to passive, fixed joints)
"""
jnt_states = self._pb_client.getJointStates(self._arm[0], range(self._pb_client.getNumJoints(self._arm[0])))
# jnt_states = [j for j, i in zip(jnt_states, self._jnt_infos) if i[3] > -1] # get only active states
jnt_states = [jnt_states[i] for i in self._active_ind]
self.jnt_pos = np.array([state[0] for state in jnt_states])
self.jnt_vel = np.array([state[1] for state in jnt_states])
self.jnt_torq = np.array([state[3] for state in jnt_states])
return self.jnt_pos, self.jnt_vel, self.jnt_torq
def get_link_state(self, link_index):
"""
Returns information on the link URDF frame and centre of mass poses in the world frame
"""
if not self.__have_state[link_index]:
self.lnk_state[link_index] = self._pb_client.getLinkState(self._arm[0],
linkIndex=link_index,
computeLinkVelocity=self._get_velocities)
self.__have_state[link_index] = True
return self.lnk_state[link_index]
def get_link_pose(self, link_index, ref_frame_index=None):
"""
Get a links pose in the world frame as a 7 dimensional vector containing the
position (x,y,z) and quaternion (x,y,z,w)
:param link_index: Index for link to get pose of.
:param ref_frame_index: Index for link to get pose relative to.
"""
key = str([ref_frame_index, link_index])
if [ref_frame_index, link_index] not in self.__have_pose_with_ref:
lnk_state = self.get_link_state(link_index)
lnk_frame_pos = np.asarray(lnk_state[4])
lnk_frame_rot = np.asarray(lnk_state[5])
if ref_frame_index is not None:
ref_pose = self.get_link_pose(ref_frame_index)
ref_pose_mat = np.eye(4)
ref_pose_mat[:3, :3] = SO3.from_quaternion(ref_pose[3:], 'xyzw').as_matrix()
ref_pose_mat[:3, 3] = ref_pose[:3]
tf_pose_world_to_ref = np.linalg.inv(ref_pose_mat)
lnk_pose_mat = np.eye(4)
lnk_pose_mat[:3, :3] = SO3.from_quaternion(lnk_frame_rot, 'xyzw').as_matrix()
lnk_pose_mat[:3, 3] = lnk_frame_pos
lnk_pose_in_ref_mat = np.dot(tf_pose_world_to_ref, lnk_pose_mat)
lnk_frame_pos = lnk_pose_in_ref_mat[:3, 3]
lnk_frame_rot_wxyz = mat2quat(lnk_pose_in_ref_mat[:3, :3])
lnk_frame_rot = np.array([*lnk_frame_rot_wxyz[1:4], lnk_frame_rot_wxyz[0]])
self.lnk_pose_ref_frames[key] = np.concatenate(
(lnk_frame_pos, lnk_frame_rot)) # transform from x,y,z,w to w,x,y,z
self.__have_pose_with_ref.append([ref_frame_index, link_index])
return self.lnk_pose_ref_frames[key]
def get_link_vel(self, link_index, ref_frame_index=None):
"""
Get a link's velocity in the given reference frame as a 6 dimensional vector containing
translational and rotational velocity.
:param link_index:
:return:
"""
if not self._get_velocities:
raise AttributeError("Manipulator variable _get_velocities must be True to get velocity values.")
key = str([ref_frame_index, link_index])
if [ref_frame_index, link_index] not in self.__have_vel_with_ref:
lnk_state = self.get_link_state(link_index)
lnk_frame_lin_vel = np.asarray(lnk_state[6])
lnk_frame_rot_vel = np.asarray(lnk_state[7])
if ref_frame_index is not None:
cur_rot = self.get_link_pose(ref_frame_index)[3:]
lnk_frame_lin_vel = lnk_frame_lin_vel.dot(SO3.from_quaternion(cur_rot, 'xyzw').as_matrix())
lnk_frame_rot_vel = lnk_frame_rot_vel.dot(SO3.from_quaternion(cur_rot, 'xyzw').as_matrix())
# self.lnk_vel[link_index] = np.concatenate((lnk_frame_lin_vel, lnk_frame_rot_vel))
# self.__have_vel[link_index] = True
# return self.lnk_vel[link_index]
self.lnk_vel_ref_frames[key] = np.concatenate((lnk_frame_lin_vel, lnk_frame_rot_vel))
self.__have_vel_with_ref.append([ref_frame_index, link_index])
return self.lnk_vel_ref_frames[key]
def get_link_acc(self):
"""
Get a link's acceleration in the given reference frame as a 6 dimensional vector containing
translational and rotational acceleration.
:param link_index:
:return:
"""
raise NotImplementedError("Getting accelerations not yet implemented.")
def _get_link_jacobian(self, link_index):
"""
Get the Jacobian of a link frame in the form 6xN [J_trans; J_rot]
"""
if not self.__have_J[link_index]:
jnt_pos = self.jnt_pos
j_t, j_r = self._pb_client.calculateJacobian(self._arm[0], link_index, ZERO_DISP, list(jnt_pos),
[0] * len(jnt_pos), [0] * len(jnt_pos))
j = np.concatenate((j_t, j_r), axis=0)
self.J[link_index, :, :] = j[:, :self._num_jnt_arm] # we don't need columns associated with the gripper
self.__have_J[link_index] = True
return self.J[link_index, :, :]
def _get_link_hessian(self, link_index):
"""
Compute the Jacobian derivative w.r.t joint angles
Ref: Arjang Hourtash, 2005.
"""
if not self.__have_H[link_index]:
j = self._get_link_jacobian(link_index)
for k in range(1, self._num_jnt_arm):
j_k = j[:, k]
for l in range(1, self._num_jnt_arm):
j_l = j[:, l]
h = (np.cross(j_k[ROT], j_l[POS]), np.cross(j_k[ROT], j_l[ROT]))
self.H[link_index, :, l, k] = np.concatenate(h, axis=0).T
self.__have_H[link_index] = True
return self.H[link_index, :, :, :]
# JOINT CONTROL - PRIVATE
# --------------------------------------------------------------------------------------------------------------
def _hard_set_joint_positions(self, cmd):
"""
Set joint positions without simulating actual control loops
"""
k = 0
cmd_ind = [j for j, i in zip(range(self._pb_client.getNumJoints(self._arm[0])), self._jnt_infos) if i[3] > -1]
for j in cmd_ind:
self._pb_client.resetJointState(self._arm[0], j, cmd[k])
k = k + 1
def _joint_position_control(self, cmd, gripper_only=False, arm_only=False):
"""
Position control of joints.
"""
if not gripper_only:
args = [self._arm[0]]
for i in range(self._num_jnt_arm):
kwargs = dict(jointIndex=self._active_ind[i], controlMode=self._pb_client.POSITION_CONTROL,
targetPosition=cmd[i], maxVelocity=self.pos_control_max_velocity)
if self.pos_ctrl_max_arm_force is not None:
kwargs['force'] = self.pos_ctrl_max_arm_force
self._pb_client.setJointMotorControl2(*args, **kwargs)
# enforce max velocity for gripper joints
if not arm_only:
for i in range(1, self._num_jnt_gripper + 1):
self._pb_client.setJointMotorControl2(
self._arm[0], jointIndex=self._active_ind[-i],
controlMode=self._pb_client.POSITION_CONTROL, targetPosition=cmd[-i],
maxVelocity=self.max_gripper_vel, force=self.gripper_force
)
def _joint_velocity_control(self, cmd, arm_only=False):
"""
Velocity control of joints.
"""
self._e = self._e + (cmd - self.jnt_vel) # integrate error
self._e[-self._num_jnt_gripper:] = 0 # no integral for gripper joints
forces = [1000] * self._num_jnt_arm
# forces = [50] * self._num_jnt_arm
if arm_only: # don't include gripper joints
if self._num_jnt_gripper > 0:
cmd = cmd[:-self._num_jnt_gripper]
e = self._e[:-self._num_jnt_gripper]
ji = self._active_ind[:-self._num_jnt_gripper]
else:
e = self._e
ji = self._active_ind
self._pb_client.setJointMotorControlArray(
self._arm[0], jointIndices=ji,
controlMode=self._pb_client.VELOCITY_CONTROL, targetVelocities=cmd + KI * e,
forces=forces)
else: # include gripper joints
forces = forces + [1] * self._num_jnt_gripper
self._pb_client.setJointMotorControlArray(self._arm[0], jointIndices=self._active_ind,
controlMode=self._pb_client.VELOCITY_CONTROL,
targetVelocities=cmd + KI * self._e, forces=forces)
# OTHER
# ----------------------------------------------------------------------------------------------------------------
def check_contact(self, objects=()):
"""
Checks for contacts between the manipulator and given list of links indices.
"""
if not objects:
objects = range(self._num_jnt)
for i in objects:
cont = self._pb_client.getContactPoints(self._arm[0], -1, i)
if cont:
return True
return False
# SET GOALS
# ----------------------------------------------------------------------------------------------------------------
def set_control_method(self, m):
"""
Sets the control method variable
"""
self._control_method = m
def set_joint_position_goal(self, cmd):
"""
Set goal joint position
"""
self.pos_cmd = cmd
def set_joint_velocity_goal(self, cmd):
"""
Set goal joint velocity
"""
self.vel_cmd = cmd
def set_frame_pose_goal(self, index, t_pos, t_rot, ref_frame_index=None, max_joint_velocity=None):
''' set a pose goal for an arbitrary frame'''
if ref_frame_index is not None:
ref_frame_pose = self.get_link_pose(link_index=ref_frame_index)
T_world_to_ref = np.eye(4)
T_world_to_ref[:3, 3] = ref_frame_pose[:3]
T_world_to_ref[:3, :3] = SO3.from_quaternion(ref_frame_pose[3:], 'xyzw').as_matrix()
T_ref_to_des = np.eye(4)
T_ref_to_des[:3, 3] = t_pos
T_ref_to_des[:3, :3] = SO3.from_quaternion(t_rot, 'xyzw').as_matrix()
T_world_to_des = np.dot(T_world_to_ref, T_ref_to_des)
t_pos = T_world_to_des[:3, 3]
t_rot_wxyz = mat2quat(T_world_to_des[:3, :3])
t_rot = np.array([*t_rot_wxyz[1:4], t_rot_wxyz[0]])
result = self._pb_client.calculateInverseKinematics(self._arm[0], index, targetPosition=t_pos.tolist(),
targetOrientation=t_rot.tolist(), maxNumIterations=200,
residualThreshold=0.002)
help = np.array(result)
if max_joint_velocity is not None:
self.pos_control_max_velocity = max_joint_velocity
self.set_joint_position_goal(np.concatenate((help[:self._num_jnt_arm], np.zeros(self._num_jnt_gripper))))
def set_frame_velocity_goal(self, index, t_vel, task, impedance_K=np.diag([300] * 3 + [30] * 3)):
"""
Set Cartesian velocity goal for arbitrary frame, optionally modifying action for simple impedance control
with ee ft sensor.
"""
j = self._get_link_jacobian(index)
if self.use_ft_impedance and index == self._tool_link_ind:
t_vel = self.impedance_mod_vel(t_vel, 5, .5, impedance_K) # this doesn't work too well at the moment
dq, res, rank, a = lstsq(j[task, :],t_vel[task],rcond = None) # LS solver
self.set_joint_velocity_goal(np.concatenate((dq, np.zeros(self._num_jnt_gripper)))) # Add zeros for gripper
def impedance_mod_vel(self, vel, f_max, t_max, K=np.eye(6) * 1e3):
"""
Modify a velocity command using ee force torque sensor with basic impedance control
"""
norm = np.linalg.norm
inv = np.linalg.inv
vel_mod = copy.deepcopy(vel)
ft = np.array(self.get_ee_ft())
# for automatically generating ft_gravity, but can cause issues for use with real env, so currently unused
if self.ft_gravity is None:
self.ft_gravity_buffer.append(ft[:3])
if len(self.ft_gravity_buffer) == self.ft_gravity_buffer_size:
self.ft_gravity = np.array([0, 0, np.linalg.norm(np.array(self.ft_gravity_buffer).mean(axis=0))])
return vel_mod
# rotate gravity to subtract from force measurement
ft_pos, ft_orient = self._pb_client.getLinkState(self._arm[0], self._arm_ind[-1])[4:6]
R_ft = self.pb_pos_orient_to_mat(ft_pos, ft_orient)[:3, :3]
gravity_in_ft = R_ft.dot(self.ft_gravity)
ft[:3] = ft[:3] - gravity_in_ft
# transform ee ft measurement to tool pose
force_tool = self.T_ft_to_tool[:3, :3].dot(ft[:3])
torque_tool = self.T_ft_to_tool[:3, :3].dot(ft[3:])
t_norm = norm(torque_tool)
if t_norm > t_max and False:
new_torque = inv(K[3:, 3:]).dot(ft[3:])
new_t_norm = norm(new_torque)
new_t_max = 1 / K[3, 3] * t_max
R_t_ext = tf3d.axangles.axangle2mat(new_torque / new_t_norm, new_t_norm - new_t_max, )
T_R_mod = np.eye(4)
T_R_mod[:3, :3] = R_t_ext
T_mod_torque = self.T_tool_to_ft.dot(T_R_mod).dot(self.T_ft_to_tool)
# get T_mod_torque as ax angle and delta pos
T_mod_torque_ax, T_mod_torque_ang = tf3d.axangles.mat2axangle(T_mod_torque[:3, :3])
vel_mod[:3] = vel[:3] - T_mod_torque[:3, 3]
vel_mod[3:] = vel[3:] - T_mod_torque_ax * T_mod_torque_ang
f_norm = norm(force_tool)
if f_norm > f_max:
new_force = force_tool - (force_tool / f_norm) * f_max
vel_mod[:3] = inv(K[:3, :3]).dot(new_force) + vel_mod[:3]
return vel_mod
def pb_pos_orient_to_mat(self, pos, orient):
"""
Get a 4x4 transformation matrix given a pb pos and orientation
"""
mat = np.eye(4)
mat[:3, :3] = SO3.from_quaternion(orient, 'xyzw').as_matrix()
mat[:3, 3] = pos
return mat
def invert_transform(self, mat):
"""
Inverse transform of 4x4 matrix
"""
mat_out = np.eye(4)
C_out_inv = mat[:3, :3].T
mat_out[:3, :3] = C_out_inv
mat_out[:3, 3] = -C_out_inv.dot(mat[:3, 3])
return mat_out
def close_gripper(self):
"""
Close the robot gripper (modifies the current joint position command)
"""
if self._num_jnt_gripper > 0:
if self._gripper_control_method == 'p':
self.pos_cmd[-self._num_jnt_gripper:] = self.gripper_max[1] * np.ones(self._num_jnt_gripper)
elif self._gripper_control_method == 'v':
# self.vel_cmd[-self._num_jnt_gripper:] = self.gripper_p * (self.jnt_pos[-self._num_jnt_gripper:]
# <= self.gripper_max[1]).astype(float)
self.vel_cmd[-self._num_jnt_gripper:] = self.gripper_p * (self.gripper_max[1] - self.jnt_pos[-self._num_jnt_gripper:]).astype(float)
self.vel_cmd[-self._num_jnt_gripper:] = np.clip(self.vel_cmd[-self._num_jnt_gripper:],
-np.inf, self.max_gripper_vel)
def open_gripper(self):
"""
Open the robot gripper (modifies the current joint position command)
"""
if self._num_jnt_gripper > 0:
if self._gripper_control_method == 'p':
self.pos_cmd[-self._num_jnt_gripper:] = self.gripper_max[0] * np.ones(self._num_jnt_gripper)
elif self._gripper_control_method == 'v':
self.vel_cmd[-self._num_jnt_gripper:] = -self.gripper_p * (self.jnt_pos[-self._num_jnt_gripper:]
- self.gripper_max[0]).astype(float)
self.vel_cmd[-self._num_jnt_gripper:] = np.clip(self.vel_cmd[-self._num_jnt_gripper:],
-self.max_gripper_vel, np.inf)
def set_gripper_cmd(self, cmd):
"""
Set the current gripper command to a specific value, clipped to limits.
"""
if self._num_jnt_gripper > 0:
if self._gripper_control_method == 'p':
self.pos_cmd[-self._num_jnt_gripper:] = np.clip(cmd, self.gripper_max[1], self.gripper_max[0])
elif self._gripper_control_method == 'v':
self.vel_cmd[-self._num_jnt_gripper] = np.clip(cmd, -self.max_gripper_vel, self.max_gripper_vel)
# UPDATE INTERNALLY
# ----------------------------------------------------------------------------------------------------------------
def update(self):
"""
This function should be configurable
"""
# run iteration of control loop
if self._control_method == 'p' and self._gripper_control_method == 'p':
self._joint_position_control(self.pos_cmd, arm_only=False)
elif self._control_method == 'v' and self._gripper_control_method == 'v':
self._joint_velocity_control(self.vel_cmd, arm_only=False)
elif self._control_method == 'v' and self._gripper_control_method == 'p':
self._joint_velocity_control(self.vel_cmd, arm_only=True)
self._joint_position_control(self.pos_cmd, gripper_only=True)
# get joint positions, velocities, torques
self.get_joint_states()
self._reset_all_flags()
def reset_commands(self):
"""
Set all commands to zero.
"""
self.vel_cmd = np.zeros(len(self._active_ind))
self.pos_cmd = np.zeros(len(self._active_ind))
def reload_urdf(self):
"""
Reload the urdf of the robot after deleting the current robot.
"""
# this checks to see if the object currently exists
if self._pb_client.getBodyUniqueId(self._arm[0]) >= 0:
self._pb_client.removeBody(self._arm[0])
self.__init__(self._pb_client, self.urdf_path, self._ee_link_ind, self._tool_link_ind,
self._control_method, self._gripper_control_method,
self._gripper_ind, self._arm_ind, self.gripper_max,
self.init_base_pos, self.init_base_rot, self._get_velocities, self.self_collision)
# if not self.self_collision:
# self._arm = [self._pb_client.loadURDF(self.urdf_path)] # arm object
# else:
# self._arm = [self._pb_client.loadURDF(self.urdf_path,
# flags=self._pb_client.URDF_USE_SELF_COLLISION)] # arm object
|
{"hexsha": "4e83fe92a15113e803278c7ffeef93f58c769270", "size": 26902, "ext": "py", "lang": "Python", "max_stars_repo_path": "manipulator_learning/sim/robots/manipulator.py", "max_stars_repo_name": "utiasSTARS/manipulator_learning", "max_stars_repo_head_hexsha": "9a0e0c66c0a3c07124331f010bd04bb52eaf95bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-16T02:24:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T02:27:00.000Z", "max_issues_repo_path": "manipulator_learning/sim/robots/manipulator.py", "max_issues_repo_name": "utiasSTARS/manipulator_learning", "max_issues_repo_head_hexsha": "9a0e0c66c0a3c07124331f010bd04bb52eaf95bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "manipulator_learning/sim/robots/manipulator.py", "max_forks_repo_name": "utiasSTARS/manipulator_learning", "max_forks_repo_head_hexsha": "9a0e0c66c0a3c07124331f010bd04bb52eaf95bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.3204508857, "max_line_length": 148, "alphanum_fraction": 0.5940078805, "include": true, "reason": "import numpy,from numpy", "num_tokens": 6616}
|
[STATEMENT]
lemma mult_minus_eq_nat:
fixes x::nat and y ::nat and z::nat
assumes " x+y = z"
shows " -x-y = -z "
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - int x - int y = - int z
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
x + y = z
goal (1 subgoal):
1. - int x - int y = - int z
[PROOF STEP]
by linarith
|
{"llama_tokens": 155, "file": "Amicable_Numbers_Amicable_Numbers", "length": 2}
|
export parallel
parallel() = schedule_on(ThreadsScheduler())
|
{"hexsha": "e0966ea7ae0b903d7543c22f728afea3852a79af", "size": 62, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/operators/parallel.jl", "max_stars_repo_name": "hgeorgako/Rocket.jl", "max_stars_repo_head_hexsha": "9661dad340e9a079ebd6ed57dcf9e5db31af637f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 109, "max_stars_repo_stars_event_min_datetime": "2020-02-04T00:32:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T06:39:36.000Z", "max_issues_repo_path": "src/operators/parallel.jl", "max_issues_repo_name": "biaslab/Rx.jl", "max_issues_repo_head_hexsha": "ffaf60bbcd3c104ca8132fe22149d3ce2e26be03", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-03-18T09:44:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-03T11:08:28.000Z", "max_forks_repo_path": "src/operators/parallel.jl", "max_forks_repo_name": "biaslab/Rx.jl", "max_forks_repo_head_hexsha": "ffaf60bbcd3c104ca8132fe22149d3ce2e26be03", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-02-26T15:49:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-06T17:25:43.000Z", "avg_line_length": 15.5, "max_line_length": 44, "alphanum_fraction": 0.7903225806, "num_tokens": 12}
|
# Copyright 2021 Mechanics of Microstructures Group
# at The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import ast
from typing import List
from scipy.stats import linregress
from scipy.stats._stats_mstats_common import LinregressResult
import pandas as pd
from defdap.plotting import Plot, GrainPlot
from defdap import hrdic
class GrainInspector:
"""
Class containing the interactive grain inspector tool for slip trace analysis
and relative displacement ratio analysis.
"""
def __init__(self,
currMap: 'hrdic.Map',
vmax: float = 0.1,
corrAngle: float = 0):
# Initialise some values
self.grainID = 0
self.currMap = currMap
self.currEBSDMap = self.currMap.ebsdMap
self.currDICGrain = self.currMap[self.grainID]
self.currEBSDGrain = self.currDICGrain.ebsdGrain
self.vmax = vmax
self.corrAngle = corrAngle
self.filename = str(self.currMap.retrieveName()) + '_RDR.txt'
# Draw the figure
self.draw()
def draw(self):
""" Draw the main window, buttons, text boxes and axes.
"""
# Plot window
self.plot = Plot(ax=None, makeInteractive=True, figsize=(14,8), title='Grain Inspector')
######## Buttons
self.plot.addButton(
'Save\nLine', self.saveLine, (0.73, 0.48, 0.05, 0.04))
self.plot.addButton(
'Previous\nGrain', lambda e, p: self.gotoGrain(self.grainID-1, p), (0.73, 0.94, 0.05, 0.04))
self.plot.addButton(
'Next\nGrain', lambda e, p: self.gotoGrain(self.grainID+1, p), (0.79, 0.94, 0.05, 0.04))
self.plot.addButton(
'Run\nAll STA', self.batchRunSTA, (0.85, 0.07, 0.11, 0.04))
self.plot.addButton(
'Clear\nAll Lines', self.clearAllLines, (0.89, 0.48, 0.05, 0.04))
self.plot.addButton(
'Load\nFile', self.loadFile, (0.85, 0.02, 0.05, 0.04))
self.plot.addButton(
'Save\nFile', self.saveFile, (0.91, 0.02, 0.05, 0.04))
# Text boxes
self.plot.addTextBox(label='', loc=(0.7, 0.02, 0.13, 0.04),
changeHandler=self.updateFilename, initial = self.filename)
self.plot.addTextBox(label='Go to \ngrain ID:', loc=(0.9, 0.94, 0.05, 0.04),
submitHandler=self.gotoGrain)
self.plot.addTextBox(label='Remove\nID:', loc=(0.83, 0.48, 0.05, 0.04),
submitHandler=self.removeLine)
self.RDRGroupBox = self.plot.addTextBox(label='Run RDR only\non group:', loc=(0.78, 0.07, 0.05, 0.04),
submitHandler=self.runRDRGroup)
# Axes
self.maxShearAx = self.plot.addAxes((0.05, 0.4, 0.65, 0.55))
self.slipTraceAx = self.plot.addAxes((0.2, 0.05, 0.6, 0.3))
self.unitCellAx = self.plot.addAxes((0.05, 0.055, 0.15, 0.3), proj='3d')
self.grainInfoAx = self.plot.addAxes((0.73, 0.86, 0.25, 0.06))
self.lineInfoAx = self.plot.addAxes((0.73, 0.55, 0.25, 0.3))
self.groupsInfoAx = self.plot.addAxes((0.73, 0.15, 0.25, 0.3))
self.grainPlot = self.currMap[self.grainID].plotMaxShear(fig=self.plot.fig, ax=self.maxShearAx,
vmax=self.vmax, plotScaleBar=True, plotColourBar=True)
self.plot.ax.axis('off')
# Draw the stuff that will need to be redrawn often in a seperate function
self.redraw()
def gotoGrain(self,
event: int,
plot):
""" Go to a specified grain ID.
Parameters
----------
event
Grain ID to go to.
"""
## Go to grain ID specified in event
self.grainID=int(event)
self.grainPlot.arrow=None
self.currDICGrain = self.currMap[self.grainID]
self.currEBSDGrain = self.currDICGrain.ebsdGrain
self.redraw()
def saveLine(self,
event: np.ndarray,
plot):
""" Save the start point, end point and angle of drawn line into the grain.
Parameters
----------
event
Start x, start y, end x, end y point of line passed from drawn line.
"""
# Get angle of lines
lineAngle = 90-np.rad2deg(np.arctan2(self.grainPlot.p2[1]-self.grainPlot.p1[1],
self.grainPlot.p2[0]-self.grainPlot.p1[0]))
if lineAngle > 180: lineAngle -= 180
elif lineAngle < 0: lineAngle += 180
lineAngle -= self.corrAngle
# Two decimal places
points = [float("{:.2f}".format(point)) for point in self.grainPlot.p1+self.grainPlot.p2]
lineAngle = float("{:.2f}".format(lineAngle))
# Save drawn line to the DIC grain
self.currDICGrain.pointsList.append([points, lineAngle, -1])
# Group lines and redraw
self.groupLines()
self.redrawLine()
def groupLines(self,
grain: 'defdap.hrdic.Grain'=None):
"""
Group the lines drawn in the current grain item using a mean shift algorithm,
save the average angle and then detect the active slip planes.
groupsList is a list of line groups: [id, angle, [slip plane id], [angular deviation]
Parameters
----------
grain
Grain for which to group the slip lines.
"""
if grain == None:
grain = self.currDICGrain
if grain.pointsList == []:
grain.groupsList = []
else:
for i, line in enumerate(grain.pointsList):
angle = line[1]
if i == 0:
line[2]=0 # Make group 0 for first detected angle
grain.groupsList = [[0, angle, 0, 0, 0]]
nextGroup=1
else: # If there is more that one angle
if np.any(np.abs(np.array([x[1] for x in grain.groupsList])-angle)<10):
# If within +- 5 degrees of exisitng group, set that as the group
group = np.argmin(np.abs(np.array([x[1] for x in grain.groupsList])-angle))
grain.pointsList[i][2]=group
newAv = float('{0:.2f}'.format(np.average([x[1] for x in grain.pointsList if x[2]==group])))
grain.groupsList[group][1] = newAv
else:
# Make new group and set
grain.groupsList.append([nextGroup, angle, 0, 0, 0])
line[2]=nextGroup
nextGroup += 1
# Detect active slip systems in each group
for group in grain.groupsList:
activePlanes = []
deviation = []
experimentalAngle = group[1]
for idx, theoreticalAngle in enumerate(np.rad2deg(grain.ebsdGrain.slipTraceAngles)):
if theoreticalAngle-5 < experimentalAngle < theoreticalAngle+5:
activePlanes.append(idx)
deviation.append(float('{0:.2f}'.format(experimentalAngle-theoreticalAngle)))
group[2] = activePlanes
group[3] = deviation
def clearAllLines(self,
event,
plot):
""" Clear all lines in a given grain.
"""
self.currDICGrain.pointsList = []
self.currDICGrain.groupsList = []
self.redraw()
def removeLine(self,
event: int,
plot):
""" Remove single line [runs after submitting a text box].
Parameters
----------
event
Line ID to remove.
"""
## Remove single line
del self.currDICGrain.pointsList[int(event)]
self.groupLines()
self.redraw()
def redraw(self):
"""Draw items which need to be redrawn when changing grain ID.
"""
# Plot max shear for grain
self.maxShearAx.clear()
self.grainPlot = self.currMap[self.grainID].plotMaxShear(
fig=self.plot.fig, ax=self.maxShearAx, vmax=self.vmax, plotColourBar=False, plotScaleBar=True)
# Draw unit cell
self.unitCellAx.clear()
self.currEBSDGrain.plotUnitCell(fig=self.plot.fig, ax=self.unitCellAx)
# Write grain info text
self.grainInfoAx.clear()
self.grainInfoAx.axis('off')
grainInfoText = 'Grain ID: {0} / {1}\n'.format(self.grainID, len(self.currMap.grainList)-1)
grainInfoText += 'Min: {0:.1f} % Mean:{1:.1f} % Max: {2:.1f} %'.format(
np.min(self.currDICGrain.maxShearList)*100,
np.mean(self.currDICGrain.maxShearList)*100,
np.max(self.currDICGrain.maxShearList)*100)
self.plot.addText(self.grainInfoAx, 0, 1, grainInfoText, va='top', ha='left', fontsize=10)
# Detect lines
self.plot.addEventHandler('button_press_event',lambda e, p: self.grainPlot.lineSlice(e, p))
self.plot.addEventHandler('button_release_event', lambda e, p: self.grainPlot.lineSlice(e, p))
self.redrawLine()
def redrawLine(self):
"""
Draw items which need to be redrawn when adding a line.
"""
# Write lines text and draw lines
linesTxt = 'List of lines\n\nLineID x0 y0 x1 y1 Angle Group\n'
if self.currDICGrain.pointsList != []:
for idx, points in enumerate(self.currDICGrain.pointsList):
linesTxt += '{0} {1:.1f} {2:.1f} {3:.1f} {4:.1f} {5:.1f} {6}\n'.format(idx,
points[0][0],points[0][1],points[0][2],points[0][3],points[1],points[2])
self.grainPlot.addArrow(startEnd=points[0], clearPrev=False, persistent=True, label=idx)
self.lineInfoAx.clear()
self.lineInfoAx.axis('off')
self.plot.addText(self.lineInfoAx, 0, 1, linesTxt, va='top', fontsize=10)
# Write groups info text
groupsTxt = 'List of groups\n\nGroupID Angle System Dev RDR\n'
if self.currDICGrain.groupsList != []:
for idx, group in enumerate(self.currDICGrain.groupsList):
groupsTxt += '{0} {1:.1f} {2} {3} {4:.2f}\n'.format(
idx, group[1], group[2], np.round(group[3], 3), group[4])
self.groupsInfoAx.clear()
self.groupsInfoAx.axis('off')
self.plot.addText(self.groupsInfoAx, 0, 1, groupsTxt, va='top', fontsize=10)
# Draw slip traces
self.slipTraceAx.clear()
self.slipTraceAx.set_aspect('equal', 'box')
slipPlot = GrainPlot(fig=self.plot.fig, callingGrain=self.currMap[self.grainID], ax=self.slipTraceAx)
traces = slipPlot.addSlipTraces(topOnly=True)
self.slipTraceAx.axis('off')
# Draw slip bands
bands = [elem[1] for elem in self.currDICGrain.groupsList]
if self.currDICGrain.groupsList != None:
slipPlot.addSlipBands(topOnly=True, angles=list(np.deg2rad(bands)))
def runRDRGroup(self,
event: int,
plot):
""" Run RDR on a specified group, upon submitting a text box.
Parameters
----------
event
Group ID specified from text box.
"""
## Run RDR for group of lines
if event != '':
self.calcRDR(grain = self.currDICGrain, group=int(event))
self.RDRGroupBox.set_val('')
def batchRunSTA(self,
event,
plot):
""" Run slip trace analysis on all grains which hve slip trace lines drawn.
"""
# Print header
print("Grain\tEul1\tEul2\tEul3\tMaxSF\tGroup\tAngle\tSystem\tDev\RDR")
# Print information for each grain
for idx, grain in enumerate(self.currMap):
if grain.pointsList != []:
for group in grain.groupsList:
maxSF = np.max([item for sublist in grain.ebsdGrain.averageSchmidFactors for item in sublist])
eulers = self.currEBSDGrain.refOri.eulerAngles()*180/np.pi
text = '{0}\t{1:.1f}\t{2:.1f}\t{3:.1f}\t{4:.3f}\t'.format(
idx, eulers[0], eulers[1], eulers[2], maxSF)
text += '{0}\t{1:.1f}\t{2}\t{3}\t{4:.2f}'.format(
group[0], group[1], group[2], np.round(group[3],3), group[4])
print(text)
def calcRDR(self,
grain: int,
group: int,
showPlot: bool = True,
length: float = 2.5):
""" Calculates the relative displacement ratio for a given grain and group.
Parameters
----------
grain
DIC grain ID to run RDR on.
group
group ID to run RDR on.
showPlot
if True, show plot window.
length
length of perpendicular lines used for RDR.
"""
ulist=[]; vlist=[]; allxlist = []; allylist = [];
# Get all lines belonging to group
points = []
for point in grain.pointsList:
if point[2] == group:
points.append(point[0])
for point in points:
x0=point[0]; y0=point[1]; x1=point[2]; y1=point[3];
grad = (y1-y0)/(x1-x0)
invgrad = -1/grad
profile_length = np.sqrt((y1-y0)**2+(x1-x0)**2)
num = np.round(profile_length*2)
### Calculate positions for each point along slip trace line (x,y)
x, y = np.round(np.linspace(x0, x1, int(num))), np.round(np.linspace(y0, y1, int(num)))
df = pd.DataFrame({'x':x, 'y':y}).drop_duplicates()
x,y = df['x'].values.tolist(),df['y'].values.tolist()
## Calculate deviation from (0,0) for points along line with angle perpendicular to slip line (xnew,ynew)
x0new = np.sqrt(length/(invgrad**2+1))*np.sign(grad)
y0new = -np.sqrt(length/(1/invgrad**2+1))
x1new = -np.sqrt(length/(invgrad**2+1))*np.sign(grad)
y1new = np.sqrt(length/(1/invgrad**2+1))
profile_length=np.sqrt((y1new-y0new)**2+(x1new-x0new)**2)
num = np.round(profile_length)
xnew, ynew = np.linspace(x0new, x1new, int(num)), np.linspace(y0new, y1new, int(num))
xnew, ynew = np.around(xnew).astype(int), np.around(ynew).astype(int)
df = pd.DataFrame({'x':xnew, 'y':ynew}).drop_duplicates()
xnew,ynew = df['x'].values.tolist(), df['y'].values.tolist()
for x,y in zip(x,y):
xperp = []; yperp = [];
for xdiff, ydiff in zip(xnew, ynew):
xperp.append(int(x+xdiff))
yperp.append(int(y+ydiff))
allxlist.append(xperp)
allylist.append(yperp)
xmap = self.currDICGrain.extremeCoords[0] + xperp
ymap = self.currDICGrain.extremeCoords[1] + yperp
### For all points, append u and v to list
u = []; v = [];
for xmap, ymap in zip(xmap,ymap):
u.append((self.currMap.crop(self.currMap.x_map))[ymap, xmap])
v.append((self.currMap.crop(self.currMap.y_map))[ymap, xmap])
### Take away mean
u = u-np.mean(u); v = v-np.mean(v)
### Append to main lists (ulist,vlist)
ulist.extend(u)
vlist.extend(v)
### Linear regression of ucentered against vcentered
linRegResults = linregress(x=vlist,y=ulist)
# Save measured RDR
grain.groupsList[group][4] = linRegResults.slope
if showPlot: self.plotRDR(grain, group, ulist, vlist, allxlist, allylist, linRegResults)
def plotRDR(self,
grain: int,
group: int,
ulist: List[float],
vlist: List[float],
allxlist: List[float],
allylist: List[float],
linRegResults: 'LinregressResult'):
"""
Plot RDR figure, including location of perpendicular lines and scatter plot of ucentered vs vcentered.
Parameters
----------
grain
DIC grain to plot.
group
Group ID to plot.
ulist
List of ucentered values.
vlist
List of vcentered values.
allxlist
List of all x values.
allylist
List of all y values.
linRegResults
Results from linear regression of ucentered vs vcentered
{slope, intercept, rvalue, pvalue, stderr}.
"""
# Draw window and axes
self.rdrPlot = Plot(ax=None, makeInteractive=True, title='RDR Calculation', figsize=(21, 7))
self.rdrPlot.ax.axis('off')
self.rdrPlot.grainAx = self.rdrPlot.addAxes((0.05, 0.07, 0.20, 0.85))
self.rdrPlot.textAx = self.rdrPlot.addAxes((0.27, 0.07, 0.20, 0.85))
self.rdrPlot.textAx.axis('off')
self.rdrPlot.numLineAx = self.rdrPlot.addAxes((0.48, 0.07, 0.2, 0.85))
self.rdrPlot.numLineAx.axis('off')
self.rdrPlot.plotAx = self.rdrPlot.addAxes((0.75, 0.07, 0.2, 0.85))
## Draw grain plot
self.rdrPlot.grainPlot = self.currDICGrain.plotMaxShear(fig=self.rdrPlot.fig, ax=self.rdrPlot.grainAx,
plotColourBar=False, plotScaleBar = True)
self.rdrPlot.grainPlot.addColourBar(label='Effective Shear Strain', fraction=0.046, pad=0.04)
## Draw all points
self.rdrPlot.grainAx.plot(allxlist, allylist, 'rx',lw=0.5)
for xlist, ylist in zip(allxlist, allylist):
self.rdrPlot.grainAx.plot(xlist, ylist, '-',lw=1)
## Generate scatter plot
slope = linRegResults.slope
r_value = linRegResults.rvalue
intercept = linRegResults.intercept
std_err = linRegResults.stderr
self.rdrPlot.plotAx.scatter(x=vlist,y=ulist,marker='x', lw=1)
self.rdrPlot.plotAx.plot(
[np.min(vlist), np.max(vlist)],[slope*np.min(vlist)+intercept,slope*np.max(vlist)+intercept], '-')
self.rdrPlot.plotAx.set_xlabel('v-centered')
self.rdrPlot.plotAx.set_ylabel('u-centered')
self.rdrPlot.addText(self.rdrPlot.plotAx, 0.95, 0.01, 'Slope = {0:.3f} ± {1:.3f}\nR-squared = {2:.3f}\nn={3}'
.format(slope,std_err,r_value**2,len(ulist)), va='bottom', ha='right',
transform=self.rdrPlot.plotAx.transAxes, fontsize=10);
## Write grain info
ebsdGrain = grain.ebsdGrain
ebsdGrain.calcSlipTraces()
if ebsdGrain.averageSchmidFactors is None:
raise Exception("Run 'calcAverageGrainSchmidFactors' first")
eulers = np.rad2deg(ebsdGrain.refOri.eulerAngles())
text = 'Average angle: {0:.2f}\n'.format(grain.groupsList[group][1])
text += 'Eulers: {0:.1f} {1:.1f} {2:.1f}\n\n'.format(eulers[0], eulers[1], eulers[2])
self.rdrPlot.addText(self.rdrPlot.textAx, 0.15, 1, text, fontsize=10, va='top')
## Write slip system info
RDRs = []; offset = 0;
for idx, (ssGroup, sfGroup, slipTraceAngle) in enumerate(
zip(grain.ebsdMap.slipSystems, ebsdGrain.averageSchmidFactors, np.rad2deg(ebsdGrain.slipTraceAngles))):
text = "{0:s} {1:.1f}\n".format(ssGroup[0].slipPlaneLabel, slipTraceAngle)
tempRDRs = [];
for ss, sf in zip(ssGroup, sfGroup):
slipDirSample = ebsdGrain.refOri.conjugate.transformVector(ss.slipDir)
text = text + " {0:s} SF: {1:.3f} RDR: {2:.3f}\n".format\
(ss.slipDirLabel, sf,-slipDirSample[0]/slipDirSample[1])
RDR = -slipDirSample[0]/slipDirSample[1]
tempRDRs.append(RDR)
RDRs.append(tempRDRs)
if idx in grain.groupsList[group][2]:
self.rdrPlot.addText(self.rdrPlot.textAx, 0.15, 0.9-offset, text, weight='bold', fontsize=10, va='top')
else:
self.rdrPlot.addText(self.rdrPlot.textAx, 0.15, 0.9-offset, text, fontsize=10, va='top')
offset += 0.0275 * text.count('\n')
# Plot RDR values on number line
uniqueRDRs = set()
for x in [item for sublist in RDRs for item in sublist]: uniqueRDRs.add(x)
self.rdrPlot.numLineAx.axvline(x=0, ymin=-20, ymax=20, c='k')
self.rdrPlot.numLineAx.plot(np.zeros(len(uniqueRDRs)), list(uniqueRDRs), 'bo', label='Theroretical RDR values')
self.rdrPlot.numLineAx.plot([0], slope, 'ro', label='Measured RDR value')
self.rdrPlot.addText(self.rdrPlot.numLineAx, -0.009, slope-0.01, '{0:.3f}'.format(float(slope)))
self.rdrPlot.numLineAx.legend(bbox_to_anchor=(1.15, 1.05))
# Label RDRs by slip system on number line
for RDR in list(uniqueRDRs):
self.rdrPlot.addText(self.rdrPlot.numLineAx, -0.009, RDR-0.01, '{0:.3f}'.format(float(RDR)))
txt = ''
for idx, ssGroup in enumerate(RDRs):
for idx2, rdr in enumerate(ssGroup):
if rdr == RDR:
txt += str('{0} {1} '.format(self.currEBSDMap.slipSystems[idx][idx2].slipPlaneLabel,
self.currEBSDMap.slipSystems[idx][idx2].slipDirLabel))
self.rdrPlot.addText(self.rdrPlot.numLineAx,0.002, RDR-0.01, txt)
self.rdrPlot.numLineAx.set_ylim(slope-1, slope+1)
self.rdrPlot.numLineAx.set_xlim(-0.01, 0.05)
def updateFilename(self,
event: str,
plot):
""" Update class variable filename, based on text input from textbox handler.
event:
Text in textbox.
"""
self.filename = event
def saveFile(self,
event,
plot):
""" Save a file which contains definitions of slip lines drawn in grains
[(x0, y0, x1, y1), angle, groupID]
and groups of lines, defined by an average angle and identified sip plane
[groupID, angle, [slip plane id(s)], [angular deviation(s)]]
"""
with open(self.currMap.path + str(self.filename), 'w') as file:
file.write('# This is a file generated by defdap which contains definitions of slip lines drawn in grains by grainInspector\n')
file.write('# [(x0, y0, x1, y1), angle, groupID]\n')
file.write('# and groups of lines, defined by an average angle and identified sip plane\n')
file.write('# [groupID, angle, [slip plane id], [angular deviation]\n\n')
for i, grain in enumerate(self.currMap):
if grain.pointsList != []:
file.write('Grain {0}\n'.format(i))
file.write('{0} Lines\n'.format(len(grain.pointsList)))
for point in grain.pointsList:
file.write(str(point)+'\n')
file.write('{0} Groups\n'.format(len(grain.groupsList)))
for group in grain.groupsList:
file.write(str(group)+'\n')
file.write('\n')
def loadFile(self,
event,
plot):
""" Load a file which contains definitions of slip lines drawn in grains
[(x0, y0, x1, y1), angle, groupID]
and groups of lines, defined by an average angle and identified sip plane
[groupID, angle, [slip plane id(s)], [angular deviation(s)]]
"""
with open(self.currMap.path + str(self.filename), 'r') as file:
lines = file.readlines()
# Parse file and make list of
# [start index, grain ID, number of lines, number of groups]
indexlist=[]
for i, line in enumerate(lines):
if line[0] != '#' and len(line) >1:
if ('Grain') in line:
grainID = int(line.split(' ')[-1])
startIndex = i
if ('Lines') in line:
numLines = int(line.split(' ')[0])
if ('Groups') in line:
numGroups = int(line.split(' ')[0])
indexlist.append([startIndex, grainID, numLines, numGroups])
# Write data from file into grain
for startIndex, grainID, numLines, numGroups in indexlist:
startIndexLines = startIndex+2
grainPoints = lines[startIndexLines:startIndexLines+numLines]
for point in grainPoints:
self.currMap[grainID].pointsList.append(ast.literal_eval(point.split('\\')[0]))
startIndexGroups = startIndex+3+numLines
grainGroups = lines[startIndexGroups:startIndexGroups+numGroups]
for group in grainGroups:
self.currMap[grainID].groupsList.append(ast.literal_eval(group.split('\\')[0]))
self.redraw()
|
{"hexsha": "10e3a72b02ffc19c0e561d284b451942da95bdcc", "size": 25820, "ext": "py", "lang": "Python", "max_stars_repo_path": "defdap/inspector.py", "max_stars_repo_name": "MechMicroMan/DefDAP", "max_stars_repo_head_hexsha": "d8769c9255b6a64ab528d99057afa5c05b8f5cac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2019-08-29T12:55:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T15:58:58.000Z", "max_issues_repo_path": "defdap/inspector.py", "max_issues_repo_name": "MechMicroMan/DefDAP", "max_issues_repo_head_hexsha": "d8769c9255b6a64ab528d99057afa5c05b8f5cac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 38, "max_issues_repo_issues_event_min_datetime": "2019-12-01T17:36:34.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-26T11:17:27.000Z", "max_forks_repo_path": "defdap/inspector.py", "max_forks_repo_name": "MechMicroMan/DefDAP", "max_forks_repo_head_hexsha": "d8769c9255b6a64ab528d99057afa5c05b8f5cac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-04-06T11:36:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T15:45:45.000Z", "avg_line_length": 41.2460063898, "max_line_length": 139, "alphanum_fraction": 0.5581332301, "include": true, "reason": "import numpy,from scipy", "num_tokens": 6735}
|
[STATEMENT]
lemma trace_ft_append: "trace_between s (tr1@tr2) s'
\<longleftrightarrow> (\<exists>sh. trace_between s tr1 sh \<and> trace_between sh tr2 s')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. trace_between s (tr1 @ tr2) s' = (\<exists>sh. trace_between s tr1 sh \<and> trace_between sh tr2 s')
[PROOF STEP]
apply (induction tr2 arbitrary: s' rule: rev_induct)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>s'. trace_between s (tr1 @ []) s' = (\<exists>sh. trace_between s tr1 sh \<and> trace_between sh [] s')
2. \<And>x xs s'. (\<And>s'. trace_between s (tr1 @ xs) s' = (\<exists>sh. trace_between s tr1 sh \<and> trace_between sh xs s')) \<Longrightarrow> trace_between s (tr1 @ xs ## x) s' = (\<exists>sh. trace_between s tr1 sh \<and> trace_between sh (xs ## x) s')
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x xs s'. (\<And>s'. trace_between s (tr1 @ xs) s' = (\<exists>sh. trace_between s tr1 sh \<and> trace_between sh xs s')) \<Longrightarrow> trace_between s (tr1 @ xs ## x) s' = (\<exists>sh. trace_between s tr1 sh \<and> trace_between sh (xs ## x) s')
[PROOF STEP]
apply (subst append_assoc[symmetric], subst trace_ft_snoc)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x xs s'. (\<And>s'. trace_between s (tr1 @ xs) s' = (\<exists>sh. trace_between s tr1 sh \<and> trace_between sh xs s')) \<Longrightarrow> (\<exists>sh a ou. x = Trans sh a ou s' \<and> trace_between s (tr1 @ xs) sh \<and> step sh a = (ou, s')) = (\<exists>sh. trace_between s tr1 sh \<and> trace_between sh (xs ## x) s')
[PROOF STEP]
apply (auto simp: trace_ft_snoc)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 652, "file": "CoCon_Traceback_Properties", "length": 5}
|
#generate community
p = random_micrm_params(2,2,0.5)
#convert to ODESystem
@named sys = micrm_system(p)
@testset "MTK system" begin
@test length(states(sys)) == 4
@test length(parameters(sys)) == 14
end
#convert to problem
#define starting mass
u0 = fill(0.1, 4)
u0 = [states(sys)[i] => u0[i] for i = eachindex(u0)]
tspan = (0.0, 10.0) #define tspan
prob = ODEProblem(sys,u0,(0.0,100.0),[], jac = true)
sol = solve(prob)
|
{"hexsha": "20cd60a0ec582172c618b69046c4efc2900842e1", "size": 434, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/MTK_test.jl", "max_stars_repo_name": "CleggTom/MiCRM.jl", "max_stars_repo_head_hexsha": "578a31774b81927a444eb39c459e4af4281448b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/MTK_test.jl", "max_issues_repo_name": "CleggTom/MiCRM.jl", "max_issues_repo_head_hexsha": "578a31774b81927a444eb39c459e4af4281448b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/MTK_test.jl", "max_forks_repo_name": "CleggTom/MiCRM.jl", "max_forks_repo_head_hexsha": "578a31774b81927a444eb39c459e4af4281448b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7, "max_line_length": 52, "alphanum_fraction": 0.6612903226, "num_tokens": 157}
|
#include <Eigen/Core>
#include <iostream>
using namespace Eigen;
using namespace std;
void PolygonToEquations(const MatrixX2d& pts, MatrixX2d& ab, VectorXd& c) {
// ax + by + c <= 0
// assume polygon is convex
Vector2d p0 = pts.row(0);
for (int i=0; i < pts.rows(); ++i) {
int i1 = (i+1) % pts.rows();
double x0 = pts(i,0),
y0 = pts(i,1),
x1 = pts(i1,0),
y1 = pts(i1,1);
ab(i,0) = -(y1 - y0);
ab(i,1) = x1 - x0;
ab.row(i).normalize();
c(i) = -ab.row(i).dot(pts.row(i));
}
}
int main() {
MatrixX2d m(4,2), ab(4,2);
VectorXd c(4);
m << 0,0,
0,1,
1,1,
1,0;
PolygonToEquations(m, ab, c);
cout << "ab: " << ab << endl;
cout << "c: " << c.transpose() << endl;
}
|
{"hexsha": "176d0b83e605d5d5c3ec9862f4f5338b3efe44a5", "size": 748, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/sandbox/polygon_expt.cpp", "max_stars_repo_name": "HARPLab/trajopt", "max_stars_repo_head_hexsha": "40e2260d8f1e4d0a6a7a8997927bd65e5f36c3a4", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 250.0, "max_stars_repo_stars_event_min_datetime": "2015-01-13T04:38:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T15:52:54.000Z", "max_issues_repo_path": "src/sandbox/polygon_expt.cpp", "max_issues_repo_name": "HARPLab/trajopt", "max_issues_repo_head_hexsha": "40e2260d8f1e4d0a6a7a8997927bd65e5f36c3a4", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 31.0, "max_issues_repo_issues_event_min_datetime": "2015-08-19T13:14:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T08:08:26.000Z", "max_forks_repo_path": "src/sandbox/polygon_expt.cpp", "max_forks_repo_name": "HARPLab/trajopt", "max_forks_repo_head_hexsha": "40e2260d8f1e4d0a6a7a8997927bd65e5f36c3a4", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 118.0, "max_forks_repo_forks_event_min_datetime": "2015-01-08T16:06:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-19T11:44:00.000Z", "avg_line_length": 20.7777777778, "max_line_length": 75, "alphanum_fraction": 0.5093582888, "num_tokens": 287}
|
@memoize function result(x, y=True, maxdepth=2, truth_table=truths) # if x ⟹ y it will return true, false or missing
# hardcoded things
if y == True && class(x) <:ASubset
sub, super = args(x)
if sub == super
return True
elseif super == Ω
return True
end
if class(sub) <: AConstantSymbol && class(sub) <: AConstantSymbol
if sub == Statements && super == Sets
return False
elseif sub == Sets && super == Statements
return False
elseif sub == Ω && super == Statements
return False
elseif sub == Ω && super == Sets
return False
end
end
end
depth = 0
values = Term{<:ABasic}[x]
old_values = values # checked trees
while !(y in values) && !(~(y) in values) && (depth < maxdepth)
println("$depth $maxdepth $x $y")
depth += 1
println("$depth $maxdepth")
new_values = []
for value in values
append!(new_values, rewrites(value, truth_table))
end
values = setdiff(new_values, old_values)
append!(old_values, values)
# values = values[map(count_ops, values) .<= maxops]
println("$depth $maxdepth")
println(length(values))
end
if y in values
return True
elseif ~(y) in values
return False
else
return Unknowable
end
end
export result
|
{"hexsha": "d6979bc6ea76cb14c551649505ce2141d38064a2", "size": 1493, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/tree_type3/proving/Proving.jl", "max_stars_repo_name": "Maelstrom6/Breadth.jl", "max_stars_repo_head_hexsha": "5ccb6ec063e1d0337856257608ad887a7bd53eb8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tree_type3/proving/Proving.jl", "max_issues_repo_name": "Maelstrom6/Breadth.jl", "max_issues_repo_head_hexsha": "5ccb6ec063e1d0337856257608ad887a7bd53eb8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-26T00:50:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-26T00:50:17.000Z", "max_forks_repo_path": "src/tree_type3/proving/Proving.jl", "max_forks_repo_name": "Maelstrom6/Breadth.jl", "max_forks_repo_head_hexsha": "5ccb6ec063e1d0337856257608ad887a7bd53eb8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.86, "max_line_length": 117, "alphanum_fraction": 0.5351640991, "num_tokens": 368}
|
[STATEMENT]
lemma alternativelistconc2[rule_format]:
"a \<in> set (net_list_aux [x]) \<longrightarrow> a \<in> set (net_list_aux [y,x])"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<in> set (net_list_aux [x]) \<longrightarrow> a \<in> set (net_list_aux [y, x])
[PROOF STEP]
by (induct y, simp_all)
|
{"llama_tokens": 132, "file": "UPF_Firewall_FWNormalisation_NormalisationGenericProofs", "length": 1}
|
#include "CorePch.h"
#include <rtp++/network/TcpRtpConnection.h>
#include <boost/bind.hpp>
#include <boost/asio/ip/multicast.hpp>
#include <boost/asio/ip/udp.hpp>
#include <boost/asio/placeholders.hpp>
#include <boost/make_shared.hpp>
#include <cpputil/OBitStream.h>
#include <rtp++/RtpTime.h>
#include <rtp++/network/NetworkPacket.h>
#ifndef _WIN32
#define LOG_TCP_INFO
#ifdef LOG_TCP_INFO
#include <netinet/tcp.h>
#endif
#endif
using namespace boost::asio::ip;
namespace rtp_plus_plus
{
TcpRtpConnection::ptr TcpRtpConnection::create(boost::asio::io_service& ioService, const std::string& sBindIp, unsigned short uiBindPort)
{
return boost::make_shared<TcpRtpConnection>(boost::ref(ioService), boost::ref(sBindIp), uiBindPort);
}
TcpRtpConnection::TcpRtpConnection(boost::asio::io_service& ioService)
:m_rIoService(ioService),
m_timer(ioService),
m_socket(m_rIoService),
m_bConnectionInProgress(false)
{
VLOG(2) << "TcpRtpConnection constructor";
}
#define BIND_TO_0_0_0_0_FOR_EC2_NAT_TRAVERSAL
TcpRtpConnection::TcpRtpConnection(boost::asio::io_service& ioService, const std::string& sBindIp, unsigned short uiBindPort)
:m_rIoService(ioService),
m_timer(ioService),
#ifdef BIND_TO_0_0_0_0_FOR_EC2_NAT_TRAVERSAL
m_sIpAddress("0.0.0.0"),
#else
m_sIpAddress(sBindIp),
#endif
m_uiPort(uiBindPort),
m_address(boost::asio::ip::address::from_string(m_sIpAddress)),
m_endpoint(m_address, uiBindPort),
#if 0
m_socket(m_rIoService, boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), uiBindPort)),
#else
m_socket(m_rIoService),
#endif
m_bConnectionInProgress(false)
{
initialise();
VLOG(2) << "TcpRtpConnection constructor: socket bound to " << sBindIp << ":" << uiBindPort;
}
TcpRtpConnection::~TcpRtpConnection()
{
DLOG(INFO) << "[" << this << "] Destructor";
}
#define TCP_RTP_HEADER_SIZE 2
void TcpRtpConnection::start(tcp::resolver::iterator endpoint_iter)
{
connect(endpoint_iter);
}
void TcpRtpConnection::start()
{
boost::asio::async_read(m_socket,
m_streamBuffer, boost::asio::transfer_at_least(TCP_RTP_HEADER_SIZE),
boost::bind(&TcpRtpConnection::readHeaderHandler, shared_from_this(),
boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred));
//m_socket.async_read_some(boost::asio::buffer(m_buffer),
// boost::bind(&TcpRtpConnection::readCompletionHandler, shared_from_this(),
// boost::asio::placeholders::error,
// boost::asio::placeholders::bytes_transferred));
//if (m_bTimeOut)
//{
// /* Create a task that will be called if we wait more than 300ms */
// m_timer.expires_from_now(boost::posix_time::milliseconds(m_uiTimeoutMs));
// m_timer.async_wait(boost::bind(&TcpRtpConnection::timeoutHandler, shared_from_this(), boost::asio::placeholders::error));
//}
}
void TcpRtpConnection::stop()
{
m_socket.close();
}
void TcpRtpConnection::connect(tcp::resolver::iterator endpoint_iter)
{
//tcp::resolver resolver(m_rIoService);
//// TODO:
//std::string sRemoteIp = "127.0.0.1";
//uint16_t uiRemotePort = 49170;
//tcp::resolver::query query(sRemoteIp, toString(uiRemotePort));
//tcp::resolver::iterator iterator = resolver.resolve(query);
tcp::endpoint endpoint = *endpoint_iter;
socket().async_connect(endpoint,
boost::bind(&TcpRtpConnection::handleConnect, shared_from_this(), boost::asio::placeholders::error, ++endpoint_iter));
m_bConnectionInProgress = true;
}
void TcpRtpConnection::handleConnect( const boost::system::error_code& error, tcp::resolver::iterator endpointIterator )
{
if (!error)
{
#ifdef RTVC_DEBUG_XMLP_SOCKET
LOG_DBG1(rLogger, LOG_FUNCTION, "[%1%] ### Connection Ok ###", this);
#endif
LOG(INFO) << "Connection success";
m_bConnectionInProgress = false;
// start read
start();
// check for queued writes
// send next sample if one was queued in the meantime
if ( !m_vDeliveryQueue.empty() )
{
#if 0
DLOG(INFO) << "Sending queued packet Queue size: " << m_vDeliveryQueue.size();
#endif
NetworkPackage_t package = m_vDeliveryQueue.front();
Buffer networkPacket = package.first;
EndPoint ep = package.second;
boost::asio::async_write( m_socket,
boost::asio::buffer(networkPacket.data(), networkPacket.getSize()),
boost::bind(&TcpRtpConnection::writeCompletionHandler,
shared_from_this(),
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred)
);
}
}
else
{
if (error == boost::asio::error::invalid_argument)
{
if (endpointIterator == tcp::resolver::iterator())
{
// Failed to connect
LOG(WARNING) << "Failed to connect using endpoint! No more endpoints.";
// TODO: how to notify application: will a read or write on the unconnected socket trigger
// an error?
//m_writeCompletionHandler(error, pRequest, shared_from_this());
}
else
{
tcp::endpoint endpoint = *endpointIterator;
socket().async_connect(endpoint, boost::bind(&TcpRtpConnection::handleConnect,
shared_from_this(), boost::asio::placeholders::error, ++endpointIterator));
}
}
else
{
// TODO: how to notify application: will a read or write on the unconnected socket trigger
// an error?
//m_writeCompletionHandler(error, pRequest, shared_from_this());
}
}
}
void TcpRtpConnection::send(Buffer networkPacket, const EndPoint& endpoint)
{
VLOG(10) << "Outgoing data packet of size " << networkPacket.getSize();
// prepend size for TCP framing
Buffer newBuffer(new uint8_t[networkPacket.getSize() + 2], networkPacket.getSize() + 2);
OBitStream out(newBuffer);
uint16_t uiSize = networkPacket.getSize();
out.write(uiSize, 16);
const uint8_t* pDest = networkPacket.data();
out.writeBytes(pDest, uiSize);
boost::mutex::scoped_lock l(m_lock);
bool bBusyWriting = !m_vDeliveryQueue.empty();
m_vDeliveryQueue.push_back( std::make_pair(newBuffer, endpoint) );
if (!m_bConnectionInProgress && !bBusyWriting)
{
boost::asio::async_write(m_socket, boost::asio::buffer(newBuffer.data(), newBuffer.getSize()),
boost::bind(&TcpRtpConnection::writeCompletionHandler,
shared_from_this(),
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred)
);
}
else
{
#if 1
VLOG(10) << "Packet queued, connection or write in progress";
#endif
}
}
void TcpRtpConnection::close()
{
VLOG(1) << "[" << this << "] Closing socket [" << m_sIpAddress << ":" << m_uiPort << "]";
m_socket.close();
}
void TcpRtpConnection::initialise()
{
m_socket.open(m_endpoint.protocol());
// Binding to port 0 should result in the OS selecting the port
VLOG(1) << "[" << this << "] Binding socket [" << m_sIpAddress << ":" << m_uiPort << "]";
// this needs to happen before the bind
boost::asio::socket_base::reuse_address option(true);
m_socket.set_option(option);
boost::system::error_code ec;
//m_socket.bind(m_endpoint, ec);
//m_socket.bind(boost::asio::ip::udp::endpoint(boost::asio::ip::udp::v4(), m_uiPort), ec);
m_socket.bind(m_endpoint, ec);
if (ec)
{
LOG(WARNING) << "Error binding socket to " << m_sIpAddress << ":" << m_uiPort << " " << ec.message();
}
// it helps to increase the buffer size to lessen packet loss
//boost::asio::socket_base::receive_buffer_size option(udp_receiver_buffer_size_kb);
//m_socket.set_option(option);
}
void TcpRtpConnection::writeCompletionHandler(const boost::system::error_code& ec, std::size_t bytes_transferred)
{
boost::mutex::scoped_lock l(m_lock);
#ifdef LOG_TCP_INFO
/* Fill tcp_info structure with data */
uint32_t tcp_info_length = sizeof(tcp_info);
struct tcp_info tcp_info;
boost::asio::ip::tcp::socket::native_handle_type tcpSocket = m_socket.native_handle();
if ( getsockopt( tcpSocket, SOL_TCP, TCP_INFO, (void *)&tcp_info, &tcp_info_length ) == 0 )
{
VLOG(2) << "[" << this << "] Wrote " << bytes_transferred << " TCP Stats: "
<< " sent: " << tcp_info.tcpi_last_data_sent
<< " recv: " << tcp_info.tcpi_last_data_recv
<< " cwnd: " << tcp_info.tcpi_snd_cwnd
<< " send_sst " << tcp_info.tcpi_snd_ssthresh
<< " recv_sst " << tcp_info.tcpi_rcv_ssthresh
<< " rtt: " << tcp_info.tcpi_rtt
<< " rttvar: " << tcp_info.tcpi_rttvar
<< " unack: " << tcp_info.tcpi_unacked
<< " sack: " << tcp_info.tcpi_sacked
<< " lost: " << tcp_info.tcpi_lost
<< " rtx: " << tcp_info.tcpi_retrans
<< " fackets: " << tcp_info.tcpi_fackets;
#if 0
fprintf(statistics,"%.6f %u %u %u %u %u %u %u %u %u %u %u %u\n",
time_to_seconds( &time_start, &time_now ),
tcp_info.tcpi_last_data_sent,
tcp_info.tcpi_last_data_recv,
tcp_info.tcpi_snd_cwnd,
tcp_info.tcpi_snd_ssthresh,
tcp_info.tcpi_rcv_ssthresh,
tcp_info.tcpi_rtt,
tcp_info.tcpi_rttvar,
tcp_info.tcpi_unacked,
tcp_info.tcpi_sacked,
tcp_info.tcpi_lost,
tcp_info.tcpi_retrans,
tcp_info.tcpi_fackets
);
if ( fflush(statistics) != 0 ) {
fprintf(stderr, "Cannot flush buffers: %s\n", strerror(errno) );
}
#endif
}
#endif
NetworkPackage_t package = m_vDeliveryQueue.front();
m_vDeliveryQueue.pop_front();
if (m_fnOnSend)
{
m_fnOnSend(ec, shared_from_this(), package.first, package.second);
}
if (!ec)
{
VLOG(10) << "[" << this << "][" << m_sIpAddress << ":" << m_uiPort << "] Sent " << bytes_transferred << " to " << package.second.getAddress() << ":" << package.second.getPort();
// send next sample if one was queued in the meantime
if ( !m_vDeliveryQueue.empty() )
{
#if 0
DLOG(INFO) << "Sending queued packet Queue size: " << m_vDeliveryQueue.size();
#endif
NetworkPackage_t package = m_vDeliveryQueue.front();
Buffer networkPacket = package.first;
EndPoint ep = package.second;
boost::asio::async_write( m_socket,
boost::asio::buffer(networkPacket.data(), networkPacket.getSize()),
boost::bind(&TcpRtpConnection::writeCompletionHandler,
shared_from_this(),
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred)
);
}
}
else
{
LOG(WARNING) << "Send failed: " << ec.message();
}
}
void TcpRtpConnection::timeoutHandler(const boost::system::error_code& ec)
{
if (ec != boost::asio::error::operation_aborted)
{
// socket read timed out
if (m_fnOnTimeout) m_fnOnTimeout(shared_from_this());
}
}
void TcpRtpConnection::readCompletionHandler(const boost::system::error_code& ec, std::size_t bytes_received, uint32_t uiPacketSize)
{
// #define DEBUG_NTP
#ifdef DEBUG_NTP
DLOG(INFO) << "NTP arrival time: " << convertNtpTimestampToPosixTime(getNTPTimeStamp());
#endif
#ifdef LOG_TCP_INFO
/* Fill tcp_info structure with data */
uint32_t tcp_info_length = sizeof(tcp_info);
struct tcp_info tcp_info;
boost::asio::ip::tcp::socket::native_handle_type tcpSocket = m_socket.native_handle();
if ( getsockopt( tcpSocket, SOL_TCP, TCP_INFO, (void *)&tcp_info, &tcp_info_length ) == 0 )
{
VLOG(2) << "[" << this << "] Read " << bytes_received << " TCP Stats: "
<< " sent: " << tcp_info.tcpi_last_data_sent
<< " recv: " << tcp_info.tcpi_last_data_recv
<< " cwnd: " << tcp_info.tcpi_snd_cwnd
<< " send_sst " << tcp_info.tcpi_snd_ssthresh
<< " recv_sst " << tcp_info.tcpi_rcv_ssthresh
<< " rtt: " << tcp_info.tcpi_rtt
<< " rttvar: " << tcp_info.tcpi_rttvar
<< " unack: " << tcp_info.tcpi_unacked
<< " sack: " << tcp_info.tcpi_sacked
<< " lost: " << tcp_info.tcpi_lost
<< " rtx: " << tcp_info.tcpi_retrans
<< " fackets: " << tcp_info.tcpi_fackets;
#if 0
fprintf(statistics,"%.6f %u %u %u %u %u %u %u %u %u %u %u %u\n",
time_to_seconds( &time_start, &time_now ),
tcp_info.tcpi_last_data_sent,
tcp_info.tcpi_last_data_recv,
tcp_info.tcpi_snd_cwnd,
tcp_info.tcpi_snd_ssthresh,
tcp_info.tcpi_rcv_ssthresh,
tcp_info.tcpi_rtt,
tcp_info.tcpi_rttvar,
tcp_info.tcpi_unacked,
tcp_info.tcpi_sacked,
tcp_info.tcpi_lost,
tcp_info.tcpi_retrans,
tcp_info.tcpi_fackets
);
if ( fflush(statistics) != 0 ) {
fprintf(stderr, "Cannot flush buffers: %s\n", strerror(errno) );
}
#endif
}
#endif
NetworkPacket networkPacket(RtpTime::getNTPTimeStamp());
if (!ec)
{
std::string sHostIp = m_socket.remote_endpoint().address().to_string();
uint16_t uiHostPort = m_socket.remote_endpoint().port();
VLOG(10) << "[" << this << "][" << m_sIpAddress << ":" << m_uiPort << "] Received " << bytes_received << " from " << sHostIp << ":" << uiHostPort;
EndPoint ep;
ep.setAddress(sHostIp);
ep.setPort(uiHostPort);
uint32_t uiSBSize = m_streamBuffer.size();
std::istream is(&m_streamBuffer);
// read media sample
NetworkPacket networkPacket(RtpTime::getNTPTimeStamp());
networkPacket.setData(new uint8_t[uiPacketSize], uiPacketSize);
is.read((char*)networkPacket.data(), uiPacketSize);
if (m_fnOnRecv)
m_fnOnRecv(ec, shared_from_this(), networkPacket, ep);
// start next read
do
{
uint32_t uiNextRead = 0;
uiSBSize = m_streamBuffer.size();
if (uiSBSize >= 2)
{
is.read((char*)m_sizeBuffer, TCP_RTP_HEADER_SIZE);
uint16_t uiPacketSize = (m_sizeBuffer[0] << 8) | m_sizeBuffer[1];
// TODO: should do this repeatedly until no more packets can be parsed
if (m_streamBuffer.size() >= uiPacketSize )
{
// read media sample
NetworkPacket networkPacket(RtpTime::getNTPTimeStamp());
networkPacket.setData(new uint8_t[uiPacketSize], uiPacketSize);
is.read((char*)networkPacket.data(), uiPacketSize);
if (m_fnOnRecv)
m_fnOnRecv(ec, shared_from_this(), networkPacket, ep);
}
else
{
uiNextRead = uiPacketSize - m_streamBuffer.size();
boost::asio::async_read(m_socket,
m_streamBuffer, boost::asio::transfer_at_least(uiNextRead),
boost::bind(&TcpRtpConnection::readCompletionHandler, shared_from_this(),
boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred, uiPacketSize));
break;
}
}
else
{
// read next header
uiNextRead = TCP_RTP_HEADER_SIZE - m_streamBuffer.size();
boost::asio::async_read(m_socket,
m_streamBuffer, boost::asio::transfer_at_least(uiNextRead),
boost::bind(&TcpRtpConnection::readHeaderHandler, shared_from_this(),
boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred));
break;
}
}while (true);
}
else
{
if (ec != boost::asio::error::operation_aborted)
{
LOG(WARNING) << "Receive failed: " << ec.message();
if (m_fnOnRecv)
m_fnOnRecv(ec, shared_from_this(), NetworkPacket(), EndPoint());
}
}
}
void TcpRtpConnection::readHeaderHandler(const boost::system::error_code& ec, std::size_t bytes_received)
{
VLOG(15) << "Read header: bytes received: " << bytes_received;
// #define DEBUG_NTP
#ifdef DEBUG_NTP
DLOG(INFO) << "NTP arrival time: " << convertNtpTimestampToPosixTime(getNTPTimeStamp());
#endif
#ifdef LOG_TCP_INFO
/* Fill tcp_info structure with data */
uint32_t tcp_info_length = sizeof(tcp_info);
struct tcp_info tcp_info;
boost::asio::ip::tcp::socket::native_handle_type tcpSocket = m_socket.native_handle();
if ( getsockopt( tcpSocket, SOL_TCP, TCP_INFO, (void *)&tcp_info, &tcp_info_length ) == 0 )
{
VLOG(2) << "[" << this << "] Read " << bytes_received << " TCP Stats: "
<< " sent: " << tcp_info.tcpi_last_data_sent
<< " recv: " << tcp_info.tcpi_last_data_recv
<< " cwnd: " << tcp_info.tcpi_snd_cwnd
<< " send_sst " << tcp_info.tcpi_snd_ssthresh
<< " recv_sst " << tcp_info.tcpi_rcv_ssthresh
<< " rtt: " << tcp_info.tcpi_rtt
<< " rttvar: " << tcp_info.tcpi_rttvar
<< " unack: " << tcp_info.tcpi_unacked
<< " sack: " << tcp_info.tcpi_sacked
<< " lost: " << tcp_info.tcpi_lost
<< " rtx: " << tcp_info.tcpi_retrans
<< " fackets: " << tcp_info.tcpi_fackets;
#if 0
fprintf(statistics,"%.6f %u %u %u %u %u %u %u %u %u %u %u %u\n",
time_to_seconds( &time_start, &time_now ),
tcp_info.tcpi_last_data_sent,
tcp_info.tcpi_last_data_recv,
tcp_info.tcpi_snd_cwnd,
tcp_info.tcpi_snd_ssthresh,
tcp_info.tcpi_rcv_ssthresh,
tcp_info.tcpi_rtt,
tcp_info.tcpi_rttvar,
tcp_info.tcpi_unacked,
tcp_info.tcpi_sacked,
tcp_info.tcpi_lost,
tcp_info.tcpi_retrans,
tcp_info.tcpi_fackets
);
if ( fflush(statistics) != 0 ) {
fprintf(stderr, "Cannot flush buffers: %s\n", strerror(errno) );
}
#endif
}
#endif
EndPoint ep;
if (!ec)
{
std::string sHostIp = m_socket.remote_endpoint().address().to_string();
uint16_t uiHostPort = m_socket.remote_endpoint().port();
VLOG(10) << "[" << this << "][" << m_sIpAddress << ":" << m_uiPort << "] Received " << bytes_received << " from " << sHostIp << ":" << uiHostPort;
EndPoint ep;
ep.setAddress(sHostIp);
ep.setPort(uiHostPort);
// read size
std::istream is(&m_streamBuffer);
size_t size = m_streamBuffer.size();
assert (size > 0);
uint32_t uiNextRead = 0;
do
{
if (m_streamBuffer.size() >= 2)
{
is.read((char*)m_sizeBuffer, TCP_RTP_HEADER_SIZE);
uint16_t uiPacketSize = (m_sizeBuffer[0] << 8) | m_sizeBuffer[1];
uint32_t uiSBSize = m_streamBuffer.size();
// TODO: should do this repeatedly until no more packets can be parsed
if (uiSBSize >= uiPacketSize )
{
// read media sample
NetworkPacket networkPacket(RtpTime::getNTPTimeStamp());
networkPacket.setData(new uint8_t[uiPacketSize], uiPacketSize);
is.read((char*)networkPacket.data(), uiPacketSize);
if (m_fnOnRecv)
m_fnOnRecv(ec, shared_from_this(), networkPacket, ep);
}
else
{
uiNextRead = uiPacketSize - m_streamBuffer.size();
boost::asio::async_read(m_socket,
m_streamBuffer, boost::asio::transfer_at_least(uiNextRead),
boost::bind(&TcpRtpConnection::readCompletionHandler, shared_from_this(),
boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred, uiPacketSize));
break;
}
}
else
{
// read next header
uiNextRead = TCP_RTP_HEADER_SIZE - m_streamBuffer.size();
boost::asio::async_read(m_socket,
m_streamBuffer, boost::asio::transfer_at_least(uiNextRead),
boost::bind(&TcpRtpConnection::readHeaderHandler, shared_from_this(),
boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred));
break;
}
}while (true);
}
else
{
if (ec != boost::asio::error::operation_aborted)
{
LOG(WARNING) << "Receive failed: " << ec.message();
if (m_fnOnRecv)
m_fnOnRecv(ec, shared_from_this(), NetworkPacket(), EndPoint());
}
}
}
}
|
{"hexsha": "a91cac243d49b01c169fbea522276642f5cc7192", "size": 20257, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Lib/network/TcpRtpConnection.cpp", "max_stars_repo_name": "miseri/rtp_plus_plus", "max_stars_repo_head_hexsha": "244ddd86f40f15247dd39ae7f9283114c2ef03a2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-07-14T08:15:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-14T08:15:05.000Z", "max_issues_repo_path": "src/Lib/network/TcpRtpConnection.cpp", "max_issues_repo_name": "7956968/rtp_plus_plus", "max_issues_repo_head_hexsha": "244ddd86f40f15247dd39ae7f9283114c2ef03a2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Lib/network/TcpRtpConnection.cpp", "max_forks_repo_name": "7956968/rtp_plus_plus", "max_forks_repo_head_hexsha": "244ddd86f40f15247dd39ae7f9283114c2ef03a2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2021-07-14T08:15:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-14T08:56:10.000Z", "avg_line_length": 35.1074523397, "max_line_length": 184, "alphanum_fraction": 0.6289677642, "num_tokens": 5242}
|
# This file is auto-generated by AWSMetadata.jl
using AWS
using AWS.AWSServices: directory_service
using AWS.Compat
using AWS.UUIDs
"""
AcceptSharedDirectory()
Accepts a directory sharing request that was sent from the directory owner account.
# Required Parameters
- `SharedDirectoryId`: Identifier of the shared directory in the directory consumer account. This identifier is different for each directory owner account.
"""
accept_shared_directory(SharedDirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("AcceptSharedDirectory", Dict{String, Any}("SharedDirectoryId"=>SharedDirectoryId); aws_config=aws_config)
accept_shared_directory(SharedDirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("AcceptSharedDirectory", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("SharedDirectoryId"=>SharedDirectoryId), args)); aws_config=aws_config)
"""
AddIpRoutes()
If the DNS server for your on-premises domain uses a publicly addressable IP address, you must add a CIDR address block to correctly route traffic to and from your Microsoft AD on Amazon Web Services. AddIpRoutes adds this address block. You can also use AddIpRoutes to facilitate routing traffic that uses public IP ranges from your Microsoft AD on AWS to a peer VPC. Before you call AddIpRoutes, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the AddIpRoutes operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.
# Required Parameters
- `DirectoryId`: Identifier (ID) of the directory to which to add the address block.
- `IpRoutes`: IP address blocks, using CIDR format, of the traffic to route. This is often the IP address block of the DNS server used for your on-premises domain.
# Optional Parameters
- `UpdateSecurityGroupForDirectoryControllers`: If set to true, updates the inbound and outbound rules of the security group that has the description: \"AWS created security group for directory ID directory controllers.\" Following are the new rules: Inbound: Type: Custom UDP Rule, Protocol: UDP, Range: 88, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 123, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 138, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 389, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 464, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 445, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 88, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 135, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 445, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 464, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 636, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 1024-65535, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 3268-33269, Source: 0.0.0.0/0 Type: DNS (UDP), Protocol: UDP, Range: 53, Source: 0.0.0.0/0 Type: DNS (TCP), Protocol: TCP, Range: 53, Source: 0.0.0.0/0 Type: LDAP, Protocol: TCP, Range: 389, Source: 0.0.0.0/0 Type: All ICMP, Protocol: All, Range: N/A, Source: 0.0.0.0/0 Outbound: Type: All traffic, Protocol: All, Range: All, Destination: 0.0.0.0/0 These security rules impact an internal network interface that is not exposed publicly.
"""
add_ip_routes(DirectoryId, IpRoutes; aws_config::AWSConfig=global_aws_config()) = directory_service("AddIpRoutes", Dict{String, Any}("DirectoryId"=>DirectoryId, "IpRoutes"=>IpRoutes); aws_config=aws_config)
add_ip_routes(DirectoryId, IpRoutes, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("AddIpRoutes", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "IpRoutes"=>IpRoutes), args)); aws_config=aws_config)
"""
AddRegion()
Adds two domain controllers in the specified Region for the specified directory.
# Required Parameters
- `DirectoryId`: The identifier of the directory to which you want to add Region replication.
- `RegionName`: The name of the Region where you want to add domain controllers for replication. For example, us-east-1.
- `VPCSettings`:
"""
add_region(DirectoryId, RegionName, VPCSettings; aws_config::AWSConfig=global_aws_config()) = directory_service("AddRegion", Dict{String, Any}("DirectoryId"=>DirectoryId, "RegionName"=>RegionName, "VPCSettings"=>VPCSettings); aws_config=aws_config)
add_region(DirectoryId, RegionName, VPCSettings, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("AddRegion", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "RegionName"=>RegionName, "VPCSettings"=>VPCSettings), args)); aws_config=aws_config)
"""
AddTagsToResource()
Adds or overwrites one or more tags for the specified directory. Each directory can have a maximum of 50 tags. Each tag consists of a key and optional value. Tag keys must be unique to each resource.
# Required Parameters
- `ResourceId`: Identifier (ID) for the directory to which to add the tag.
- `Tags`: The tags to be assigned to the directory.
"""
add_tags_to_resource(ResourceId, Tags; aws_config::AWSConfig=global_aws_config()) = directory_service("AddTagsToResource", Dict{String, Any}("ResourceId"=>ResourceId, "Tags"=>Tags); aws_config=aws_config)
add_tags_to_resource(ResourceId, Tags, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("AddTagsToResource", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ResourceId"=>ResourceId, "Tags"=>Tags), args)); aws_config=aws_config)
"""
CancelSchemaExtension()
Cancels an in-progress schema extension to a Microsoft AD directory. Once a schema extension has started replicating to all domain controllers, the task can no longer be canceled. A schema extension can be canceled during any of the following states; Initializing, CreatingSnapshot, and UpdatingSchema.
# Required Parameters
- `DirectoryId`: The identifier of the directory whose schema extension will be canceled.
- `SchemaExtensionId`: The identifier of the schema extension that will be canceled.
"""
cancel_schema_extension(DirectoryId, SchemaExtensionId; aws_config::AWSConfig=global_aws_config()) = directory_service("CancelSchemaExtension", Dict{String, Any}("DirectoryId"=>DirectoryId, "SchemaExtensionId"=>SchemaExtensionId); aws_config=aws_config)
cancel_schema_extension(DirectoryId, SchemaExtensionId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("CancelSchemaExtension", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "SchemaExtensionId"=>SchemaExtensionId), args)); aws_config=aws_config)
"""
ConnectDirectory()
Creates an AD Connector to connect to an on-premises directory. Before you call ConnectDirectory, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the ConnectDirectory operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.
# Required Parameters
- `ConnectSettings`: A DirectoryConnectSettings object that contains additional information for the operation.
- `Name`: The fully qualified name of the on-premises directory, such as corp.example.com.
- `Password`: The password for the on-premises user account.
- `Size`: The size of the directory.
# Optional Parameters
- `Description`: A description for the directory.
- `ShortName`: The NetBIOS name of the on-premises directory, such as CORP.
- `Tags`: The tags to be assigned to AD Connector.
"""
connect_directory(ConnectSettings, Name, Password, Size; aws_config::AWSConfig=global_aws_config()) = directory_service("ConnectDirectory", Dict{String, Any}("ConnectSettings"=>ConnectSettings, "Name"=>Name, "Password"=>Password, "Size"=>Size); aws_config=aws_config)
connect_directory(ConnectSettings, Name, Password, Size, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("ConnectDirectory", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ConnectSettings"=>ConnectSettings, "Name"=>Name, "Password"=>Password, "Size"=>Size), args)); aws_config=aws_config)
"""
CreateAlias()
Creates an alias for a directory and assigns the alias to the directory. The alias is used to construct the access URL for the directory, such as http://<alias>.awsapps.com. After an alias has been created, it cannot be deleted or reused, so this operation should only be used when absolutely necessary.
# Required Parameters
- `Alias`: The requested alias. The alias must be unique amongst all aliases in AWS. This operation throws an EntityAlreadyExistsException error if the alias already exists.
- `DirectoryId`: The identifier of the directory for which to create the alias.
"""
create_alias(Alias, DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateAlias", Dict{String, Any}("Alias"=>Alias, "DirectoryId"=>DirectoryId); aws_config=aws_config)
create_alias(Alias, DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateAlias", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("Alias"=>Alias, "DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
CreateComputer()
Creates an Active Directory computer object in the specified directory.
# Required Parameters
- `ComputerName`: The name of the computer account.
- `DirectoryId`: The identifier of the directory in which to create the computer account.
- `Password`: A one-time password that is used to join the computer to the directory. You should generate a random, strong password to use for this parameter.
# Optional Parameters
- `ComputerAttributes`: An array of Attribute objects that contain any LDAP attributes to apply to the computer account.
- `OrganizationalUnitDistinguishedName`: The fully-qualified distinguished name of the organizational unit to place the computer account in.
"""
create_computer(ComputerName, DirectoryId, Password; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateComputer", Dict{String, Any}("ComputerName"=>ComputerName, "DirectoryId"=>DirectoryId, "Password"=>Password); aws_config=aws_config)
create_computer(ComputerName, DirectoryId, Password, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateComputer", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ComputerName"=>ComputerName, "DirectoryId"=>DirectoryId, "Password"=>Password), args)); aws_config=aws_config)
"""
CreateConditionalForwarder()
Creates a conditional forwarder associated with your AWS directory. Conditional forwarders are required in order to set up a trust relationship with another domain. The conditional forwarder points to the trusted domain.
# Required Parameters
- `DirectoryId`: The directory ID of the AWS directory for which you are creating the conditional forwarder.
- `DnsIpAddrs`: The IP addresses of the remote DNS server associated with RemoteDomainName.
- `RemoteDomainName`: The fully qualified domain name (FQDN) of the remote domain with which you will set up a trust relationship.
"""
create_conditional_forwarder(DirectoryId, DnsIpAddrs, RemoteDomainName; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateConditionalForwarder", Dict{String, Any}("DirectoryId"=>DirectoryId, "DnsIpAddrs"=>DnsIpAddrs, "RemoteDomainName"=>RemoteDomainName); aws_config=aws_config)
create_conditional_forwarder(DirectoryId, DnsIpAddrs, RemoteDomainName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateConditionalForwarder", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "DnsIpAddrs"=>DnsIpAddrs, "RemoteDomainName"=>RemoteDomainName), args)); aws_config=aws_config)
"""
CreateDirectory()
Creates a Simple AD directory. For more information, see Simple Active Directory in the AWS Directory Service Admin Guide. Before you call CreateDirectory, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the CreateDirectory operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.
# Required Parameters
- `Name`: The fully qualified name for the directory, such as corp.example.com.
- `Password`: The password for the directory administrator. The directory creation process creates a directory administrator account with the user name Administrator and this password. If you need to change the password for the administrator account, you can use the ResetUserPassword API call. The regex pattern for this string is made up of the following conditions: Length (?=^.{8,64}) – Must be between 8 and 64 characters AND any 3 of the following password complexity rules required by Active Directory: Numbers and upper case and lowercase (?=.*d)(?=.*[A-Z])(?=.*[a-z]) Numbers and special characters and lower case (?=.*d)(?=.*[^A-Za-z0-9s])(?=.*[a-z]) Special characters and upper case and lower case (?=.*[^A-Za-z0-9s])(?=.*[A-Z])(?=.*[a-z]) Numbers and upper case and special characters (?=.*d)(?=.*[A-Z])(?=.*[^A-Za-z0-9s]) For additional information about how Active Directory passwords are enforced, see Password must meet complexity requirements on the Microsoft website.
- `Size`: The size of the directory.
# Optional Parameters
- `Description`: A description for the directory.
- `ShortName`: The NetBIOS name of the directory, such as CORP.
- `Tags`: The tags to be assigned to the Simple AD directory.
- `VpcSettings`: A DirectoryVpcSettings object that contains additional information for the operation.
"""
create_directory(Name, Password, Size; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateDirectory", Dict{String, Any}("Name"=>Name, "Password"=>Password, "Size"=>Size); aws_config=aws_config)
create_directory(Name, Password, Size, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateDirectory", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("Name"=>Name, "Password"=>Password, "Size"=>Size), args)); aws_config=aws_config)
"""
CreateLogSubscription()
Creates a subscription to forward real-time Directory Service domain controller security logs to the specified Amazon CloudWatch log group in your AWS account.
# Required Parameters
- `DirectoryId`: Identifier of the directory to which you want to subscribe and receive real-time logs to your specified CloudWatch log group.
- `LogGroupName`: The name of the CloudWatch log group where the real-time domain controller logs are forwarded.
"""
create_log_subscription(DirectoryId, LogGroupName; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateLogSubscription", Dict{String, Any}("DirectoryId"=>DirectoryId, "LogGroupName"=>LogGroupName); aws_config=aws_config)
create_log_subscription(DirectoryId, LogGroupName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateLogSubscription", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "LogGroupName"=>LogGroupName), args)); aws_config=aws_config)
"""
CreateMicrosoftAD()
Creates a Microsoft AD directory in the AWS Cloud. For more information, see AWS Managed Microsoft AD in the AWS Directory Service Admin Guide. Before you call CreateMicrosoftAD, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the CreateMicrosoftAD operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.
# Required Parameters
- `Name`: The fully qualified domain name for the AWS Managed Microsoft AD directory, such as corp.example.com. This name will resolve inside your VPC only. It does not need to be publicly resolvable.
- `Password`: The password for the default administrative user named Admin. If you need to change the password for the administrator account, you can use the ResetUserPassword API call.
- `VpcSettings`: Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation.
# Optional Parameters
- `Description`: A description for the directory. This label will appear on the AWS console Directory Details page after the directory is created.
- `Edition`: AWS Managed Microsoft AD is available in two editions: Standard and Enterprise. Enterprise is the default.
- `ShortName`: The NetBIOS name for your domain, such as CORP. If you don't specify a NetBIOS name, it will default to the first part of your directory DNS. For example, CORP for the directory DNS corp.example.com.
- `Tags`: The tags to be assigned to the AWS Managed Microsoft AD directory.
"""
create_microsoft_ad(Name, Password, VpcSettings; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateMicrosoftAD", Dict{String, Any}("Name"=>Name, "Password"=>Password, "VpcSettings"=>VpcSettings); aws_config=aws_config)
create_microsoft_ad(Name, Password, VpcSettings, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateMicrosoftAD", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("Name"=>Name, "Password"=>Password, "VpcSettings"=>VpcSettings), args)); aws_config=aws_config)
"""
CreateSnapshot()
Creates a snapshot of a Simple AD or Microsoft AD directory in the AWS cloud. You cannot take snapshots of AD Connector directories.
# Required Parameters
- `DirectoryId`: The identifier of the directory of which to take a snapshot.
# Optional Parameters
- `Name`: The descriptive name to apply to the snapshot.
"""
create_snapshot(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateSnapshot", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
create_snapshot(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateSnapshot", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
CreateTrust()
AWS Directory Service for Microsoft Active Directory allows you to configure trust relationships. For example, you can establish a trust between your AWS Managed Microsoft AD directory, and your existing on-premises Microsoft Active Directory. This would allow you to provide users and groups access to resources in either domain, with a single set of credentials. This action initiates the creation of the AWS side of a trust relationship between an AWS Managed Microsoft AD directory and an external domain. You can create either a forest trust or an external trust.
# Required Parameters
- `DirectoryId`: The Directory ID of the AWS Managed Microsoft AD directory for which to establish the trust relationship.
- `RemoteDomainName`: The Fully Qualified Domain Name (FQDN) of the external domain for which to create the trust relationship.
- `TrustDirection`: The direction of the trust relationship.
- `TrustPassword`: The trust password. The must be the same password that was used when creating the trust relationship on the external domain.
# Optional Parameters
- `ConditionalForwarderIpAddrs`: The IP addresses of the remote DNS server associated with RemoteDomainName.
- `SelectiveAuth`: Optional parameter to enable selective authentication for the trust.
- `TrustType`: The trust relationship type. Forest is the default.
"""
create_trust(DirectoryId, RemoteDomainName, TrustDirection, TrustPassword; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateTrust", Dict{String, Any}("DirectoryId"=>DirectoryId, "RemoteDomainName"=>RemoteDomainName, "TrustDirection"=>TrustDirection, "TrustPassword"=>TrustPassword); aws_config=aws_config)
create_trust(DirectoryId, RemoteDomainName, TrustDirection, TrustPassword, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("CreateTrust", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "RemoteDomainName"=>RemoteDomainName, "TrustDirection"=>TrustDirection, "TrustPassword"=>TrustPassword), args)); aws_config=aws_config)
"""
DeleteConditionalForwarder()
Deletes a conditional forwarder that has been set up for your AWS directory.
# Required Parameters
- `DirectoryId`: The directory ID for which you are deleting the conditional forwarder.
- `RemoteDomainName`: The fully qualified domain name (FQDN) of the remote domain with which you are deleting the conditional forwarder.
"""
delete_conditional_forwarder(DirectoryId, RemoteDomainName; aws_config::AWSConfig=global_aws_config()) = directory_service("DeleteConditionalForwarder", Dict{String, Any}("DirectoryId"=>DirectoryId, "RemoteDomainName"=>RemoteDomainName); aws_config=aws_config)
delete_conditional_forwarder(DirectoryId, RemoteDomainName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DeleteConditionalForwarder", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "RemoteDomainName"=>RemoteDomainName), args)); aws_config=aws_config)
"""
DeleteDirectory()
Deletes an AWS Directory Service directory. Before you call DeleteDirectory, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the DeleteDirectory operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.
# Required Parameters
- `DirectoryId`: The identifier of the directory to delete.
"""
delete_directory(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("DeleteDirectory", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
delete_directory(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DeleteDirectory", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
DeleteLogSubscription()
Deletes the specified log subscription.
# Required Parameters
- `DirectoryId`: Identifier of the directory whose log subscription you want to delete.
"""
delete_log_subscription(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("DeleteLogSubscription", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
delete_log_subscription(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DeleteLogSubscription", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
DeleteSnapshot()
Deletes a directory snapshot.
# Required Parameters
- `SnapshotId`: The identifier of the directory snapshot to be deleted.
"""
delete_snapshot(SnapshotId; aws_config::AWSConfig=global_aws_config()) = directory_service("DeleteSnapshot", Dict{String, Any}("SnapshotId"=>SnapshotId); aws_config=aws_config)
delete_snapshot(SnapshotId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DeleteSnapshot", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("SnapshotId"=>SnapshotId), args)); aws_config=aws_config)
"""
DeleteTrust()
Deletes an existing trust relationship between your AWS Managed Microsoft AD directory and an external domain.
# Required Parameters
- `TrustId`: The Trust ID of the trust relationship to be deleted.
# Optional Parameters
- `DeleteAssociatedConditionalForwarder`: Delete a conditional forwarder as part of a DeleteTrustRequest.
"""
delete_trust(TrustId; aws_config::AWSConfig=global_aws_config()) = directory_service("DeleteTrust", Dict{String, Any}("TrustId"=>TrustId); aws_config=aws_config)
delete_trust(TrustId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DeleteTrust", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("TrustId"=>TrustId), args)); aws_config=aws_config)
"""
DeregisterCertificate()
Deletes from the system the certificate that was registered for a secured LDAP connection.
# Required Parameters
- `CertificateId`: The identifier of the certificate.
- `DirectoryId`: The identifier of the directory.
"""
deregister_certificate(CertificateId, DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("DeregisterCertificate", Dict{String, Any}("CertificateId"=>CertificateId, "DirectoryId"=>DirectoryId); aws_config=aws_config)
deregister_certificate(CertificateId, DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DeregisterCertificate", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("CertificateId"=>CertificateId, "DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
DeregisterEventTopic()
Removes the specified directory as a publisher to the specified SNS topic.
# Required Parameters
- `DirectoryId`: The Directory ID to remove as a publisher. This directory will no longer send messages to the specified SNS topic.
- `TopicName`: The name of the SNS topic from which to remove the directory as a publisher.
"""
deregister_event_topic(DirectoryId, TopicName; aws_config::AWSConfig=global_aws_config()) = directory_service("DeregisterEventTopic", Dict{String, Any}("DirectoryId"=>DirectoryId, "TopicName"=>TopicName); aws_config=aws_config)
deregister_event_topic(DirectoryId, TopicName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DeregisterEventTopic", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "TopicName"=>TopicName), args)); aws_config=aws_config)
"""
DescribeCertificate()
Displays information about the certificate registered for a secured LDAP connection.
# Required Parameters
- `CertificateId`: The identifier of the certificate.
- `DirectoryId`: The identifier of the directory.
"""
describe_certificate(CertificateId, DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeCertificate", Dict{String, Any}("CertificateId"=>CertificateId, "DirectoryId"=>DirectoryId); aws_config=aws_config)
describe_certificate(CertificateId, DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeCertificate", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("CertificateId"=>CertificateId, "DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
DescribeConditionalForwarders()
Obtains information about the conditional forwarders for this account. If no input parameters are provided for RemoteDomainNames, this request describes all conditional forwarders for the specified directory ID.
# Required Parameters
- `DirectoryId`: The directory ID for which to get the list of associated conditional forwarders.
# Optional Parameters
- `RemoteDomainNames`: The fully qualified domain names (FQDN) of the remote domains for which to get the list of associated conditional forwarders. If this member is null, all conditional forwarders are returned.
"""
describe_conditional_forwarders(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeConditionalForwarders", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
describe_conditional_forwarders(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeConditionalForwarders", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
DescribeDirectories()
Obtains information about the directories that belong to this account. You can retrieve information about specific directories by passing the directory identifiers in the DirectoryIds parameter. Otherwise, all directories that belong to the current account are returned. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the DescribeDirectoriesResult.NextToken member contains a token that you pass in the next call to DescribeDirectories to retrieve the next set of items. You can also specify a maximum number of return results with the Limit parameter.
# Optional Parameters
- `DirectoryIds`: A list of identifiers of the directories for which to obtain the information. If this member is null, all directories that belong to the current account are returned. An empty list results in an InvalidParameterException being thrown.
- `Limit`: The maximum number of items to return. If this value is zero, the maximum number of items is specified by the limitations of the operation.
- `NextToken`: The DescribeDirectoriesResult.NextToken value from a previous call to DescribeDirectories. Pass null if this is the first call.
"""
describe_directories(; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeDirectories"; aws_config=aws_config)
describe_directories(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeDirectories", args; aws_config=aws_config)
"""
DescribeDomainControllers()
Provides information about any domain controllers in your directory.
# Required Parameters
- `DirectoryId`: Identifier of the directory for which to retrieve the domain controller information.
# Optional Parameters
- `DomainControllerIds`: A list of identifiers for the domain controllers whose information will be provided.
- `Limit`: The maximum number of items to return.
- `NextToken`: The DescribeDomainControllers.NextToken value from a previous call to DescribeDomainControllers. Pass null if this is the first call.
"""
describe_domain_controllers(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeDomainControllers", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
describe_domain_controllers(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeDomainControllers", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
DescribeEventTopics()
Obtains information about which SNS topics receive status messages from the specified directory. If no input parameters are provided, such as DirectoryId or TopicName, this request describes all of the associations in the account.
# Optional Parameters
- `DirectoryId`: The Directory ID for which to get the list of associated SNS topics. If this member is null, associations for all Directory IDs are returned.
- `TopicNames`: A list of SNS topic names for which to obtain the information. If this member is null, all associations for the specified Directory ID are returned. An empty list results in an InvalidParameterException being thrown.
"""
describe_event_topics(; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeEventTopics"; aws_config=aws_config)
describe_event_topics(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeEventTopics", args; aws_config=aws_config)
"""
DescribeLDAPSSettings()
Describes the status of LDAP security for the specified directory.
# Required Parameters
- `DirectoryId`: The identifier of the directory.
# Optional Parameters
- `Limit`: Specifies the number of items that should be displayed on one page.
- `NextToken`: The type of next token used for pagination.
- `Type`: The type of LDAP security to enable. Currently only the value Client is supported.
"""
describe_ldapssettings(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeLDAPSSettings", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
describe_ldapssettings(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeLDAPSSettings", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
DescribeRegions()
Provides information about the Regions that are configured for multi-Region replication.
# Required Parameters
- `DirectoryId`: The identifier of the directory.
# Optional Parameters
- `NextToken`: The DescribeRegionsResult.NextToken value from a previous call to DescribeRegions. Pass null if this is the first call.
- `RegionName`: The name of the Region. For example, us-east-1.
"""
describe_regions(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeRegions", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
describe_regions(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeRegions", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
DescribeSharedDirectories()
Returns the shared directories in your account.
# Required Parameters
- `OwnerDirectoryId`: Returns the identifier of the directory in the directory owner account.
# Optional Parameters
- `Limit`: The number of shared directories to return in the response object.
- `NextToken`: The DescribeSharedDirectoriesResult.NextToken value from a previous call to DescribeSharedDirectories. Pass null if this is the first call.
- `SharedDirectoryIds`: A list of identifiers of all shared directories in your account.
"""
describe_shared_directories(OwnerDirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeSharedDirectories", Dict{String, Any}("OwnerDirectoryId"=>OwnerDirectoryId); aws_config=aws_config)
describe_shared_directories(OwnerDirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeSharedDirectories", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("OwnerDirectoryId"=>OwnerDirectoryId), args)); aws_config=aws_config)
"""
DescribeSnapshots()
Obtains information about the directory snapshots that belong to this account. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the DescribeSnapshots.NextToken member contains a token that you pass in the next call to DescribeSnapshots to retrieve the next set of items. You can also specify a maximum number of return results with the Limit parameter.
# Optional Parameters
- `DirectoryId`: The identifier of the directory for which to retrieve snapshot information.
- `Limit`: The maximum number of objects to return.
- `NextToken`: The DescribeSnapshotsResult.NextToken value from a previous call to DescribeSnapshots. Pass null if this is the first call.
- `SnapshotIds`: A list of identifiers of the snapshots to obtain the information for. If this member is null or empty, all snapshots are returned using the Limit and NextToken members.
"""
describe_snapshots(; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeSnapshots"; aws_config=aws_config)
describe_snapshots(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeSnapshots", args; aws_config=aws_config)
"""
DescribeTrusts()
Obtains information about the trust relationships for this account. If no input parameters are provided, such as DirectoryId or TrustIds, this request describes all the trust relationships belonging to the account.
# Optional Parameters
- `DirectoryId`: The Directory ID of the AWS directory that is a part of the requested trust relationship.
- `Limit`: The maximum number of objects to return.
- `NextToken`: The DescribeTrustsResult.NextToken value from a previous call to DescribeTrusts. Pass null if this is the first call.
- `TrustIds`: A list of identifiers of the trust relationships for which to obtain the information. If this member is null, all trust relationships that belong to the current account are returned. An empty list results in an InvalidParameterException being thrown.
"""
describe_trusts(; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeTrusts"; aws_config=aws_config)
describe_trusts(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DescribeTrusts", args; aws_config=aws_config)
"""
DisableClientAuthentication()
Disable client authentication for smart cards.
# Required Parameters
- `DirectoryId`: Disable client authentication in a specified directory for smart cards.
- `Type`: Disable the type of client authentication request.
"""
disable_client_authentication(DirectoryId, Type; aws_config::AWSConfig=global_aws_config()) = directory_service("DisableClientAuthentication", Dict{String, Any}("DirectoryId"=>DirectoryId, "Type"=>Type); aws_config=aws_config)
disable_client_authentication(DirectoryId, Type, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DisableClientAuthentication", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "Type"=>Type), args)); aws_config=aws_config)
"""
DisableLDAPS()
Deactivates LDAP secure calls for the specified directory.
# Required Parameters
- `DirectoryId`: The identifier of the directory.
- `Type`: The type of LDAP security to enable. Currently only the value Client is supported.
"""
disable_ldaps(DirectoryId, Type; aws_config::AWSConfig=global_aws_config()) = directory_service("DisableLDAPS", Dict{String, Any}("DirectoryId"=>DirectoryId, "Type"=>Type); aws_config=aws_config)
disable_ldaps(DirectoryId, Type, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DisableLDAPS", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "Type"=>Type), args)); aws_config=aws_config)
"""
DisableRadius()
Disables multi-factor authentication (MFA) with the Remote Authentication Dial In User Service (RADIUS) server for an AD Connector or Microsoft AD directory.
# Required Parameters
- `DirectoryId`: The identifier of the directory for which to disable MFA.
"""
disable_radius(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("DisableRadius", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
disable_radius(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DisableRadius", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
DisableSso()
Disables single-sign on for a directory.
# Required Parameters
- `DirectoryId`: The identifier of the directory for which to disable single-sign on.
# Optional Parameters
- `Password`: The password of an alternate account to use to disable single-sign on. This is only used for AD Connector directories. For more information, see the UserName parameter.
- `UserName`: The username of an alternate account to use to disable single-sign on. This is only used for AD Connector directories. This account must have privileges to remove a service principal name. If the AD Connector service account does not have privileges to remove a service principal name, you can specify an alternate account with the UserName and Password parameters. These credentials are only used to disable single sign-on and are not stored by the service. The AD Connector service account is not changed.
"""
disable_sso(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("DisableSso", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
disable_sso(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("DisableSso", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
EnableClientAuthentication()
Enable client authentication for smardtcards.
# Required Parameters
- `DirectoryId`: Enable client authentication in a specified directory for smart cards.
- `Type`: Enable the type of client authentication request.
"""
enable_client_authentication(DirectoryId, Type; aws_config::AWSConfig=global_aws_config()) = directory_service("EnableClientAuthentication", Dict{String, Any}("DirectoryId"=>DirectoryId, "Type"=>Type); aws_config=aws_config)
enable_client_authentication(DirectoryId, Type, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("EnableClientAuthentication", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "Type"=>Type), args)); aws_config=aws_config)
"""
EnableLDAPS()
Activates the switch for the specific directory to always use LDAP secure calls.
# Required Parameters
- `DirectoryId`: The identifier of the directory.
- `Type`: The type of LDAP security to enable. Currently only the value Client is supported.
"""
enable_ldaps(DirectoryId, Type; aws_config::AWSConfig=global_aws_config()) = directory_service("EnableLDAPS", Dict{String, Any}("DirectoryId"=>DirectoryId, "Type"=>Type); aws_config=aws_config)
enable_ldaps(DirectoryId, Type, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("EnableLDAPS", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "Type"=>Type), args)); aws_config=aws_config)
"""
EnableRadius()
Enables multi-factor authentication (MFA) with the Remote Authentication Dial In User Service (RADIUS) server for an AD Connector or Microsoft AD directory.
# Required Parameters
- `DirectoryId`: The identifier of the directory for which to enable MFA.
- `RadiusSettings`: A RadiusSettings object that contains information about the RADIUS server.
"""
enable_radius(DirectoryId, RadiusSettings; aws_config::AWSConfig=global_aws_config()) = directory_service("EnableRadius", Dict{String, Any}("DirectoryId"=>DirectoryId, "RadiusSettings"=>RadiusSettings); aws_config=aws_config)
enable_radius(DirectoryId, RadiusSettings, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("EnableRadius", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "RadiusSettings"=>RadiusSettings), args)); aws_config=aws_config)
"""
EnableSso()
Enables single sign-on for a directory. Single sign-on allows users in your directory to access certain AWS services from a computer joined to the directory without having to enter their credentials separately.
# Required Parameters
- `DirectoryId`: The identifier of the directory for which to enable single-sign on.
# Optional Parameters
- `Password`: The password of an alternate account to use to enable single-sign on. This is only used for AD Connector directories. For more information, see the UserName parameter.
- `UserName`: The username of an alternate account to use to enable single-sign on. This is only used for AD Connector directories. This account must have privileges to add a service principal name. If the AD Connector service account does not have privileges to add a service principal name, you can specify an alternate account with the UserName and Password parameters. These credentials are only used to enable single sign-on and are not stored by the service. The AD Connector service account is not changed.
"""
enable_sso(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("EnableSso", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
enable_sso(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("EnableSso", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
GetDirectoryLimits()
Obtains directory limit information for the current Region.
"""
get_directory_limits(; aws_config::AWSConfig=global_aws_config()) = directory_service("GetDirectoryLimits"; aws_config=aws_config)
get_directory_limits(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("GetDirectoryLimits", args; aws_config=aws_config)
"""
GetSnapshotLimits()
Obtains the manual snapshot limits for a directory.
# Required Parameters
- `DirectoryId`: Contains the identifier of the directory to obtain the limits for.
"""
get_snapshot_limits(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("GetSnapshotLimits", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
get_snapshot_limits(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("GetSnapshotLimits", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
ListCertificates()
For the specified directory, lists all the certificates registered for a secured LDAP connection.
# Required Parameters
- `DirectoryId`: The identifier of the directory.
# Optional Parameters
- `Limit`: The number of items that should show up on one page
- `NextToken`: A token for requesting another page of certificates if the NextToken response element indicates that more certificates are available. Use the value of the returned NextToken element in your request until the token comes back as null. Pass null if this is the first call.
"""
list_certificates(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("ListCertificates", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
list_certificates(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("ListCertificates", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
ListIpRoutes()
Lists the address blocks that you have added to a directory.
# Required Parameters
- `DirectoryId`: Identifier (ID) of the directory for which you want to retrieve the IP addresses.
# Optional Parameters
- `Limit`: Maximum number of items to return. If this value is zero, the maximum number of items is specified by the limitations of the operation.
- `NextToken`: The ListIpRoutes.NextToken value from a previous call to ListIpRoutes. Pass null if this is the first call.
"""
list_ip_routes(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("ListIpRoutes", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
list_ip_routes(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("ListIpRoutes", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
ListLogSubscriptions()
Lists the active log subscriptions for the AWS account.
# Optional Parameters
- `DirectoryId`: If a DirectoryID is provided, lists only the log subscription associated with that directory. If no DirectoryId is provided, lists all log subscriptions associated with your AWS account. If there are no log subscriptions for the AWS account or the directory, an empty list will be returned.
- `Limit`: The maximum number of items returned.
- `NextToken`: The token for the next set of items to return.
"""
list_log_subscriptions(; aws_config::AWSConfig=global_aws_config()) = directory_service("ListLogSubscriptions"; aws_config=aws_config)
list_log_subscriptions(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("ListLogSubscriptions", args; aws_config=aws_config)
"""
ListSchemaExtensions()
Lists all schema extensions applied to a Microsoft AD Directory.
# Required Parameters
- `DirectoryId`: The identifier of the directory from which to retrieve the schema extension information.
# Optional Parameters
- `Limit`: The maximum number of items to return.
- `NextToken`: The ListSchemaExtensions.NextToken value from a previous call to ListSchemaExtensions. Pass null if this is the first call.
"""
list_schema_extensions(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("ListSchemaExtensions", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
list_schema_extensions(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("ListSchemaExtensions", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
ListTagsForResource()
Lists all tags on a directory.
# Required Parameters
- `ResourceId`: Identifier (ID) of the directory for which you want to retrieve tags.
# Optional Parameters
- `Limit`: Reserved for future use.
- `NextToken`: Reserved for future use.
"""
list_tags_for_resource(ResourceId; aws_config::AWSConfig=global_aws_config()) = directory_service("ListTagsForResource", Dict{String, Any}("ResourceId"=>ResourceId); aws_config=aws_config)
list_tags_for_resource(ResourceId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("ListTagsForResource", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ResourceId"=>ResourceId), args)); aws_config=aws_config)
"""
RegisterCertificate()
Registers a certificate for secured LDAP connection.
# Required Parameters
- `CertificateData`: The certificate PEM string that needs to be registered.
- `DirectoryId`: The identifier of the directory.
# Optional Parameters
- `ClientCertAuthSettings`:
- `Type`: The certificate type to register for the request.
"""
register_certificate(CertificateData, DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("RegisterCertificate", Dict{String, Any}("CertificateData"=>CertificateData, "DirectoryId"=>DirectoryId); aws_config=aws_config)
register_certificate(CertificateData, DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("RegisterCertificate", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("CertificateData"=>CertificateData, "DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
RegisterEventTopic()
Associates a directory with an SNS topic. This establishes the directory as a publisher to the specified SNS topic. You can then receive email or text (SMS) messages when the status of your directory changes. You get notified if your directory goes from an Active status to an Impaired or Inoperable status. You also receive a notification when the directory returns to an Active status.
# Required Parameters
- `DirectoryId`: The Directory ID that will publish status messages to the SNS topic.
- `TopicName`: The SNS topic name to which the directory will publish status messages. This SNS topic must be in the same region as the specified Directory ID.
"""
register_event_topic(DirectoryId, TopicName; aws_config::AWSConfig=global_aws_config()) = directory_service("RegisterEventTopic", Dict{String, Any}("DirectoryId"=>DirectoryId, "TopicName"=>TopicName); aws_config=aws_config)
register_event_topic(DirectoryId, TopicName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("RegisterEventTopic", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "TopicName"=>TopicName), args)); aws_config=aws_config)
"""
RejectSharedDirectory()
Rejects a directory sharing request that was sent from the directory owner account.
# Required Parameters
- `SharedDirectoryId`: Identifier of the shared directory in the directory consumer account. This identifier is different for each directory owner account.
"""
reject_shared_directory(SharedDirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("RejectSharedDirectory", Dict{String, Any}("SharedDirectoryId"=>SharedDirectoryId); aws_config=aws_config)
reject_shared_directory(SharedDirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("RejectSharedDirectory", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("SharedDirectoryId"=>SharedDirectoryId), args)); aws_config=aws_config)
"""
RemoveIpRoutes()
Removes IP address blocks from a directory.
# Required Parameters
- `CidrIps`: IP address blocks that you want to remove.
- `DirectoryId`: Identifier (ID) of the directory from which you want to remove the IP addresses.
"""
remove_ip_routes(CidrIps, DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("RemoveIpRoutes", Dict{String, Any}("CidrIps"=>CidrIps, "DirectoryId"=>DirectoryId); aws_config=aws_config)
remove_ip_routes(CidrIps, DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("RemoveIpRoutes", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("CidrIps"=>CidrIps, "DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
RemoveRegion()
Stops all replication and removes the domain controllers from the specified Region. You cannot remove the primary Region with this operation. Instead, use the DeleteDirectory API.
# Required Parameters
- `DirectoryId`: The identifier of the directory for which you want to remove Region replication.
"""
remove_region(DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("RemoveRegion", Dict{String, Any}("DirectoryId"=>DirectoryId); aws_config=aws_config)
remove_region(DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("RemoveRegion", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
RemoveTagsFromResource()
Removes tags from a directory.
# Required Parameters
- `ResourceId`: Identifier (ID) of the directory from which to remove the tag.
- `TagKeys`: The tag key (name) of the tag to be removed.
"""
remove_tags_from_resource(ResourceId, TagKeys; aws_config::AWSConfig=global_aws_config()) = directory_service("RemoveTagsFromResource", Dict{String, Any}("ResourceId"=>ResourceId, "TagKeys"=>TagKeys); aws_config=aws_config)
remove_tags_from_resource(ResourceId, TagKeys, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("RemoveTagsFromResource", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ResourceId"=>ResourceId, "TagKeys"=>TagKeys), args)); aws_config=aws_config)
"""
ResetUserPassword()
Resets the password for any user in your AWS Managed Microsoft AD or Simple AD directory. You can reset the password for any user in your directory with the following exceptions: For Simple AD, you cannot reset the password for any user that is a member of either the Domain Admins or Enterprise Admins group except for the administrator user. For AWS Managed Microsoft AD, you can only reset the password for a user that is in an OU based off of the NetBIOS name that you typed when you created your directory. For example, you cannot reset the password for a user in the AWS Reserved OU. For more information about the OU structure for an AWS Managed Microsoft AD directory, see What Gets Created in the AWS Directory Service Administration Guide.
# Required Parameters
- `DirectoryId`: Identifier of the AWS Managed Microsoft AD or Simple AD directory in which the user resides.
- `NewPassword`: The new password that will be reset.
- `UserName`: The user name of the user whose password will be reset.
"""
reset_user_password(DirectoryId, NewPassword, UserName; aws_config::AWSConfig=global_aws_config()) = directory_service("ResetUserPassword", Dict{String, Any}("DirectoryId"=>DirectoryId, "NewPassword"=>NewPassword, "UserName"=>UserName); aws_config=aws_config)
reset_user_password(DirectoryId, NewPassword, UserName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("ResetUserPassword", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "NewPassword"=>NewPassword, "UserName"=>UserName), args)); aws_config=aws_config)
"""
RestoreFromSnapshot()
Restores a directory using an existing directory snapshot. When you restore a directory from a snapshot, any changes made to the directory after the snapshot date are overwritten. This action returns as soon as the restore operation is initiated. You can monitor the progress of the restore operation by calling the DescribeDirectories operation with the directory identifier. When the DirectoryDescription.Stage value changes to Active, the restore operation is complete.
# Required Parameters
- `SnapshotId`: The identifier of the snapshot to restore from.
"""
restore_from_snapshot(SnapshotId; aws_config::AWSConfig=global_aws_config()) = directory_service("RestoreFromSnapshot", Dict{String, Any}("SnapshotId"=>SnapshotId); aws_config=aws_config)
restore_from_snapshot(SnapshotId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("RestoreFromSnapshot", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("SnapshotId"=>SnapshotId), args)); aws_config=aws_config)
"""
ShareDirectory()
Shares a specified directory (DirectoryId) in your AWS account (directory owner) with another AWS account (directory consumer). With this operation you can use your directory from any AWS account and from any Amazon VPC within an AWS Region. When you share your AWS Managed Microsoft AD directory, AWS Directory Service creates a shared directory in the directory consumer account. This shared directory contains the metadata to provide access to the directory within the directory owner account. The shared directory is visible in all VPCs in the directory consumer account. The ShareMethod parameter determines whether the specified directory can be shared between AWS accounts inside the same AWS organization (ORGANIZATIONS). It also determines whether you can share the directory with any other AWS account either inside or outside of the organization (HANDSHAKE). The ShareNotes parameter is only used when HANDSHAKE is called, which sends a directory sharing request to the directory consumer.
# Required Parameters
- `DirectoryId`: Identifier of the AWS Managed Microsoft AD directory that you want to share with other AWS accounts.
- `ShareMethod`: The method used when sharing a directory to determine whether the directory should be shared within your AWS organization (ORGANIZATIONS) or with any AWS account by sending a directory sharing request (HANDSHAKE).
- `ShareTarget`: Identifier for the directory consumer account with whom the directory is to be shared.
# Optional Parameters
- `ShareNotes`: A directory share request that is sent by the directory owner to the directory consumer. The request includes a typed message to help the directory consumer administrator determine whether to approve or reject the share invitation.
"""
share_directory(DirectoryId, ShareMethod, ShareTarget; aws_config::AWSConfig=global_aws_config()) = directory_service("ShareDirectory", Dict{String, Any}("DirectoryId"=>DirectoryId, "ShareMethod"=>ShareMethod, "ShareTarget"=>ShareTarget); aws_config=aws_config)
share_directory(DirectoryId, ShareMethod, ShareTarget, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("ShareDirectory", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "ShareMethod"=>ShareMethod, "ShareTarget"=>ShareTarget), args)); aws_config=aws_config)
"""
StartSchemaExtension()
Applies a schema extension to a Microsoft AD directory.
# Required Parameters
- `CreateSnapshotBeforeSchemaExtension`: If true, creates a snapshot of the directory before applying the schema extension.
- `Description`: A description of the schema extension.
- `DirectoryId`: The identifier of the directory for which the schema extension will be applied to.
- `LdifContent`: The LDIF file represented as a string. To construct the LdifContent string, precede each line as it would be formatted in an ldif file with n. See the example request below for more details. The file size can be no larger than 1MB.
"""
start_schema_extension(CreateSnapshotBeforeSchemaExtension, Description, DirectoryId, LdifContent; aws_config::AWSConfig=global_aws_config()) = directory_service("StartSchemaExtension", Dict{String, Any}("CreateSnapshotBeforeSchemaExtension"=>CreateSnapshotBeforeSchemaExtension, "Description"=>Description, "DirectoryId"=>DirectoryId, "LdifContent"=>LdifContent); aws_config=aws_config)
start_schema_extension(CreateSnapshotBeforeSchemaExtension, Description, DirectoryId, LdifContent, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("StartSchemaExtension", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("CreateSnapshotBeforeSchemaExtension"=>CreateSnapshotBeforeSchemaExtension, "Description"=>Description, "DirectoryId"=>DirectoryId, "LdifContent"=>LdifContent), args)); aws_config=aws_config)
"""
UnshareDirectory()
Stops the directory sharing between the directory owner and consumer accounts.
# Required Parameters
- `DirectoryId`: The identifier of the AWS Managed Microsoft AD directory that you want to stop sharing.
- `UnshareTarget`: Identifier for the directory consumer account with whom the directory has to be unshared.
"""
unshare_directory(DirectoryId, UnshareTarget; aws_config::AWSConfig=global_aws_config()) = directory_service("UnshareDirectory", Dict{String, Any}("DirectoryId"=>DirectoryId, "UnshareTarget"=>UnshareTarget); aws_config=aws_config)
unshare_directory(DirectoryId, UnshareTarget, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("UnshareDirectory", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "UnshareTarget"=>UnshareTarget), args)); aws_config=aws_config)
"""
UpdateConditionalForwarder()
Updates a conditional forwarder that has been set up for your AWS directory.
# Required Parameters
- `DirectoryId`: The directory ID of the AWS directory for which to update the conditional forwarder.
- `DnsIpAddrs`: The updated IP addresses of the remote DNS server associated with the conditional forwarder.
- `RemoteDomainName`: The fully qualified domain name (FQDN) of the remote domain with which you will set up a trust relationship.
"""
update_conditional_forwarder(DirectoryId, DnsIpAddrs, RemoteDomainName; aws_config::AWSConfig=global_aws_config()) = directory_service("UpdateConditionalForwarder", Dict{String, Any}("DirectoryId"=>DirectoryId, "DnsIpAddrs"=>DnsIpAddrs, "RemoteDomainName"=>RemoteDomainName); aws_config=aws_config)
update_conditional_forwarder(DirectoryId, DnsIpAddrs, RemoteDomainName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("UpdateConditionalForwarder", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "DnsIpAddrs"=>DnsIpAddrs, "RemoteDomainName"=>RemoteDomainName), args)); aws_config=aws_config)
"""
UpdateNumberOfDomainControllers()
Adds or removes domain controllers to or from the directory. Based on the difference between current value and new value (provided through this API call), domain controllers will be added or removed. It may take up to 45 minutes for any new domain controllers to become fully active once the requested number of domain controllers is updated. During this time, you cannot make another update request.
# Required Parameters
- `DesiredNumber`: The number of domain controllers desired in the directory.
- `DirectoryId`: Identifier of the directory to which the domain controllers will be added or removed.
"""
update_number_of_domain_controllers(DesiredNumber, DirectoryId; aws_config::AWSConfig=global_aws_config()) = directory_service("UpdateNumberOfDomainControllers", Dict{String, Any}("DesiredNumber"=>DesiredNumber, "DirectoryId"=>DirectoryId); aws_config=aws_config)
update_number_of_domain_controllers(DesiredNumber, DirectoryId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("UpdateNumberOfDomainControllers", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DesiredNumber"=>DesiredNumber, "DirectoryId"=>DirectoryId), args)); aws_config=aws_config)
"""
UpdateRadius()
Updates the Remote Authentication Dial In User Service (RADIUS) server information for an AD Connector or Microsoft AD directory.
# Required Parameters
- `DirectoryId`: The identifier of the directory for which to update the RADIUS server information.
- `RadiusSettings`: A RadiusSettings object that contains information about the RADIUS server.
"""
update_radius(DirectoryId, RadiusSettings; aws_config::AWSConfig=global_aws_config()) = directory_service("UpdateRadius", Dict{String, Any}("DirectoryId"=>DirectoryId, "RadiusSettings"=>RadiusSettings); aws_config=aws_config)
update_radius(DirectoryId, RadiusSettings, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("UpdateRadius", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("DirectoryId"=>DirectoryId, "RadiusSettings"=>RadiusSettings), args)); aws_config=aws_config)
"""
UpdateTrust()
Updates the trust that has been set up between your AWS Managed Microsoft AD directory and an on-premises Active Directory.
# Required Parameters
- `TrustId`: Identifier of the trust relationship.
# Optional Parameters
- `SelectiveAuth`: Updates selective authentication for the trust.
"""
update_trust(TrustId; aws_config::AWSConfig=global_aws_config()) = directory_service("UpdateTrust", Dict{String, Any}("TrustId"=>TrustId); aws_config=aws_config)
update_trust(TrustId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("UpdateTrust", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("TrustId"=>TrustId), args)); aws_config=aws_config)
"""
VerifyTrust()
AWS Directory Service for Microsoft Active Directory allows you to configure and verify trust relationships. This action verifies a trust relationship between your AWS Managed Microsoft AD directory and an external domain.
# Required Parameters
- `TrustId`: The unique Trust ID of the trust relationship to verify.
"""
verify_trust(TrustId; aws_config::AWSConfig=global_aws_config()) = directory_service("VerifyTrust", Dict{String, Any}("TrustId"=>TrustId); aws_config=aws_config)
verify_trust(TrustId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = directory_service("VerifyTrust", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("TrustId"=>TrustId), args)); aws_config=aws_config)
|
{"hexsha": "7668e3b52e59c57c9a5c5ef9b4f11111b8f86af6", "size": 67414, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/services/directory_service.jl", "max_stars_repo_name": "sean-bennett112/AWS.jl", "max_stars_repo_head_hexsha": "08347ed4afdfeb70009369630b4f2de70d8f7b81", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/services/directory_service.jl", "max_issues_repo_name": "sean-bennett112/AWS.jl", "max_issues_repo_head_hexsha": "08347ed4afdfeb70009369630b4f2de70d8f7b81", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/services/directory_service.jl", "max_forks_repo_name": "sean-bennett112/AWS.jl", "max_forks_repo_head_hexsha": "08347ed4afdfeb70009369630b4f2de70d8f7b81", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 77.6658986175, "max_line_length": 1604, "alphanum_fraction": 0.7932625271, "num_tokens": 15242}
|
#! /usr/bin/env python
# Author: S.Rodney
# Created : 2014.04.21
def reportDone( pid, dayspan=1, emailto='',emailuser='',emailpass='',
logfile=None, verbose=False ):
""" Check for visits executed in the last <ndays> days.
Fetch the visit status page, parse the visit info, print a report to stdout
and/or the logfile.
If the email options are provided, the report is also emailed.
"""
import time
soup = fetchPID( pid )
visdict = parseStatus(soup)
preface = """
%s
Daily HST Execution Update for program %i
"""%( time.asctime(), pid )
footer = """
Visit Status page: http://www.stsci.edu/cgi-bin/get-visit-status?id=%i
MAST archive page: http://archive.stsci.edu/hst/search.php?sci_pep_id=%i&action=Search&outputformat=HTML_Table&max_records=100
--------------------------------------
"""%( pid, pid )
report = checkDone(visdict, dayspan=dayspan )
if report and verbose :
print( preface + report + footer )
elif verbose :
print( "Twice Daily HST Visit status for PID %i: nothing to report."%pid)
if report and logfile :
fout = open(logfile,'a')
print>>fout, preface+report+footer
fout.close()
elif logfile :
fout = open(logfile,'a')
print>>fout, time.asctime() + " Twice-Daily HST Visit status for PID %i: nothing to report."%pid
fout.close()
if report and emailto and emailuser and emailpass :
# send a notice for visits completed in the last 1 day
sendgmail( emailuser, emailpass, emailto,
'HST Visits completed : PID %i'%pid, preface+report )
def reportComing( pid, dayspan=7, emailto='',emailuser='',emailpass='',
logfile=None, verbose=False ):
"""Construct a report listing any visits scheduled for execution in the
next <dayspan> days. Fetch the visit status page, parse the visit info,
and print a report. If the email options are provided, the report is
also emailed.
"""
import time
soup = fetchPID( pid )
visdict = parseStatus(soup)
preface = """
%s
Weekly HST Visit Status Update for program %i
"""%( time.asctime(), pid )
footer = """
Visit Status page: http://www.stsci.edu/cgi-bin/get-visit-status?id=%i
MAST archive page: http://archive.stsci.edu/hst/search.php?sci_pep_id=%i&action=Search&outputformat=HTML_Table&max_records=100
--------------------------------------
"""%( pid , pid )
report = checkComing(visdict, dayspan )
if report and verbose :
print( preface + report + footer )
elif verbose :
print( "Weekly HST Visit status for PID %i: nothing to report."%pid)
if report and logfile :
fout = open(logfile,'a')
print>>fout, preface+report+footer
fout.close()
elif logfile :
fout = open(logfile,'a')
print>>fout, time.asctime() + " : Weekly HST Visit status for PID %i: nothing to report."%pid
fout.close()
if report and emailto and emailuser and emailpass :
# send a notice for visits scheduled in the next 7 days
sendgmail( emailuser, emailpass, emailto,
'Weekly HST schedule update for PID %i'%pid, preface + report + footer )
def sendgmail( username, password, toaddr, subject, message, ccaddr=''):
"""Send an email using gmail.
"""
import smtplib
from email import MIMEMultipart
from email import MIMEText
fromaddr = "%s@gmail.com"%username
msg = MIMEMultipart.MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['CC'] = ccaddr
msg['Subject'] = subject
msg.attach( MIMEText.MIMEText( message, 'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login( username, password)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
def fetchPID( pid ):
""" Read in the HST visit status page for the given program ID.
"""
import sys
try:
from bs4 import BeautifulSoup as bs
except ImportError :
print("Error: hstMonitor requires BeautifulSoup4.")
print(" http://www.crummy.com/software/BeautifulSoup")
print("Install it via pip (or, if you prefer, easy_install)")
print(" pip install beautifulsoup4")
sys.exit()
try:
import requests
except ImportError :
print("Error: hstMonitor requires .")
print(" http://docs.python-requests.org/en/latest")
print("Install it via pip (or, if you prefer, easy_install)")
print(" pip install requests")
sys.exit()
r = requests.get("http://www.stsci.edu/cgi-bin/get-visit-status?id=%i&markupFormat=html&observatory=HST"%pid)
data = r.text
soup = bs(data)
return( soup )
def parseStatus( soup ) :
""" Parse the status page soup into a dictionary of visits.
"""
import numpy as np
from dateutil.parser import parse as dtparse
# from dateutil.relativedelta import relativedelta
import datetime
today = datetime.datetime.today()
visdict = {}
trowlist = soup.findAll('tr')
for trow in trowlist :
rowtext = trow.getText().strip().split('\n')
rowtext = np.array( rowtext )[ np.where( [ len(rt) for rt in rowtext ] )[0] ]
if rowtext[0] == 'Visit' : continue
visit=rowtext[0]
status=rowtext[1]
targets=rowtext[2].replace('ANY','')
configs=rowtext[3]
startdate = dtparse( '9999-09-09' )
enddate = dtparse( '9999-09-09')
if status in ['Archived', 'Executed', 'Scheduled'] :
startdate = dtparse( rowtext[4].split('-')[0] )
enddate = dtparse( rowtext[5].split('-')[0] )
elif status in ['Scheduling','Implementation'] :
planwindowlist = rowtext[4:]
for planwindow in planwindowlist :
if '-' not in planwindow : continue
try :
startdate0 = dtparse( planwindow.split('-')[0] )
enddate0 = dtparse( planwindow.split('-')[1].split('(')[0] )
except :
continue
daystostart0 = (startdate0 - today ).days
daystoend0 = (startdate0 - today ).days
if daystostart0>=0 and daystoend0>=0 :
startdate = startdate0
enddate = enddate0
break
daystostart = ( startdate - today ).days
daystoend = ( enddate - today ).days
visdict[visit] = {'visit':visit,
'status':status,
'targets':targets,
'configs':configs,
'startdate':startdate,
'enddate':enddate,
'daystostart':daystostart,
'daystoend':daystoend,
}
return( visdict )
def checkDone( visdict, dayspan=0.5 ):
"""Check for any visits that have been archived/executed/scheduled in the
last <dayspan> days, and return a string reporting them to the user.
If nothing has been done lately, returns 0.
"""
daynames = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
archivedThisWeek = [ k for k in visdict.keys()
if ( visdict[k]['status']=='Archived'
and -dayspan < visdict[k]['daystoend'] < 1 )
]
scheduledThisWeek = [ k for k in visdict.keys()
if ( visdict[k]['status']=='Scheduled'
and -dayspan < visdict[k]['daystostart'] < 1 )
]
executedThisWeek = [ k for k in visdict.keys()
if ( visdict[k]['status']=='Executed'
and -dayspan < visdict[k]['daystoend'] < 1 )
]
doneLately = [ k for k in visdict.keys()
if ( visdict[k]['status'] in
[ 'Archived', 'Executed', 'Scheduled' ]
and -dayspan < visdict[k]['daystoend'] < 1 )
]
if len(doneLately)==0 : return('')
report = "\n"
report += "Archived in the last %.1f days : "%dayspan
report += ','.join(archivedThisWeek).strip(',') + '\n'
report += "Executed in the last %.1f days : "%dayspan
report += ','.join(executedThisWeek).strip(',') + '\n'
report += "Scheduled for the last %.1f days : "%dayspan
report += ','.join(scheduledThisWeek).strip(',') + '\n'
datekey = lambda x : visdict[x]['enddate'].isoformat()
doneVisits = sorted(doneLately,key=datekey,reverse=False)
report += '\n Visits Completed in the Last %.1f days:\n\n'%dayspan
for vis in doneVisits :
datestr = visdict[vis]['enddate'].date().isoformat()
timestr = visdict[vis]['enddate'].time().isoformat()[:5]
target = visdict[vis]['targets']
weekday = daynames[ visdict[vis]['enddate'].date().weekday() ]
report += '%s %s %s %s (%s)\n'%(
vis, weekday, datestr, timestr, target )
report += '\n'
return( report )
def checkComing( visdict, dayspan=8 ):
"""Check for any visits that might be scheduled for execution in the
next <dayspan> days, and return a string reporting them to the user.
If nothing has been done lately, returns 0.
"""
daynames = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
if dayspan <= 8 :
schedlist = ['Scheduled']
else :
schedlist = ['Scheduled','Scheduling','Implementation']
comingSoon = [ k for k in visdict.keys()
if ( visdict[k]['status'] in schedlist )
and ( (0 < visdict[k]['daystostart'] < dayspan)
or (0 < visdict[k]['daystoend'] < dayspan) )
]
datekey = lambda x : visdict[x]['enddate'].isoformat()
comingVisits = sorted(comingSoon,key=datekey,reverse=False)
if len(comingVisits)==0 : return('')
report = '\n Visits Scheduled (or schedulable) for the Next %i days:\n\n'%dayspan
for vis in comingVisits :
datestr = visdict[vis]['enddate'].date().isoformat()
timestr = visdict[vis]['enddate'].time().isoformat()[:5]
target = visdict[vis]['targets']
weekday = daynames[ visdict[vis]['enddate'].date().weekday() ]
report += '%s %s %s %s (%s)\n'%(
vis, weekday, datestr,timestr,target)
report += '\n'
return( report )
def mkReport( visdict, lookback=1, lookahead=7 ):
"""Construct a report of the visits that have been
archived/executed in the last <lookback> days
and those scheduled for execution in the next <lookahead> days.
"""
report1 = reportDone( visdict, dayspan=lookback )
report2 = checkComing( visdict, dayspan=lookahead )
return( report1 + report2 )
if __name__ == "__main__":
import argparse
import datetime
import os
import sys
import logging
try:
from apscheduler.scheduler import Scheduler
except ImportError :
print("Error: hstMonitor requires APScheduler.")
print(" http://pythonhosted.org/APScheduler")
print("Install it via pip (or, if you prefer, easy_install)")
print(" pip install apscheduler")
sys.exit()
logging.basicConfig()
parser = argparse.ArgumentParser(
description='Fetch the visit status page, parse the visit info, print a report.')
# Required positional argument
parser.add_argument('PID', type=str, help='HST Program ID to check.')
# optional arguments
# parser.add_argument('--lookback', metavar='N', type=float,
# help='Number of days before today to search for completed visits.',
# default=1)
parser.add_argument('--lookahead', metavar='N', type=float,
help='Number of days after today to search for scheduled visits.',
default=7)
parser.add_argument('--quiet', dest='verbose', action='store_false', help='Suppress all stdout print statements', default=True)
parser.add_argument('--logfile', metavar='hstMonitor.log', type=str, help='Name of the .log file.', default='hstMonitor.log')
parser.add_argument('--clobber', action='store_true', help='Clobber any existing .log file.')
mailpar = parser.add_argument_group( "Options for e-mailing reports via gmail")
parser.add_argument('--emailto', metavar='A,B,C', type=str, help='email addresses to send reports to.', default='')
parser.add_argument('--emailuser', metavar='X', type=str, help='gmail username, for sending reports.', default='')
parser.add_argument('--emailpass', metavar='Y', type=str, help='gmail password, for sending reports.', default='')
argv = parser.parse_args()
pidlist = argv.PID.split(',')
if argv.clobber and os.path.exists( argv.logfile ):
os.remove( argv.logfile )
sched = Scheduler( standalone=True )
for pid in pidlist :
# twice every day, check for newly-completed visits
sched.add_cron_job( reportDone, hour='0,12',
name='hstMon: Twice-daily Execution Check for %i'%int(pid),
args=[int(pid)],
kwargs={'dayspan':0.5,
'logfile':argv.logfile,
'emailto':argv.emailto,
'emailuser':argv.emailuser,
'emailpass':argv.emailpass,
'verbose':argv.verbose }
)
# every week, check for newly-scheduled visits
sched.add_cron_job( reportComing,
day_of_week='sun',
name='hstMon: Weekly Schedule Check for %i'%int(pid),
args=[int(pid)],
kwargs={'dayspan':argv.lookahead,
'logfile':argv.logfile,
'emailto':argv.emailto,
'emailuser':argv.emailuser,
'emailpass':argv.emailpass,
'verbose':argv.verbose }
)
sched.start()
|
{"hexsha": "ec91ace4a5b745db877e8d4fbdcd070f7084b9e5", "size": 14464, "ext": "py", "lang": "Python", "max_stars_repo_path": "hstMonitor.py", "max_stars_repo_name": "srodney/hstsntools", "max_stars_repo_head_hexsha": "a36e0cc89dece4c992bb312df1af1dc5de595619", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-06-03T04:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2016-06-03T04:26:21.000Z", "max_issues_repo_path": "hstMonitor.py", "max_issues_repo_name": "srodney/hstsntools", "max_issues_repo_head_hexsha": "a36e0cc89dece4c992bb312df1af1dc5de595619", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hstMonitor.py", "max_forks_repo_name": "srodney/hstsntools", "max_forks_repo_head_hexsha": "a36e0cc89dece4c992bb312df1af1dc5de595619", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-01-13T07:10:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-02T18:41:02.000Z", "avg_line_length": 38.4680851064, "max_line_length": 132, "alphanum_fraction": 0.5688606195, "include": true, "reason": "import numpy", "num_tokens": 3456}
|
function allen_ccf_2pi(tv,av,st)
% allen_ccf_2pi(tv,av,st)
% written by Samuel Picard (samuel.picard@ucl.ac.uk)
% based on original allen_ccf_npx tool written by Andy Peters (peters.andrew.j@gmail.com)
%
% GUI for planning 2pi chronic window implant with the Allen CCF
% Part of repository: https://github.com/cortex-lab/allenCCF
% - directions for installing atlas in repository readme
% - some dependent functions from that repository
%
% (optional inputs - if CCF path written in Line 22, loaded automatically)
% tv, av, st = CCF template volume, annotated volume, structure tree
% tv = readNPY('template_volume_10um.npy');
% av = readNPY('annotation_volume_10um_by_index.npy');
% st = loadStructureTree('structure_tree_safe_2017.csv');
% Initialize gui_data structure
gui_data = struct;
% Allen CCF-bregma transform (estimated from eyeballing Paxinos->CCF)
% [AP,DV,ML]
bregma = [540,0,570];
% If not already loaded in, load in atlas
if nargin < 3
allen_atlas_path = 'C:\Users\Samuel\Documents\GitHub\allenCCF'; %put path to atlas files here
if isempty(allen_atlas_path)
error('Enter path where Allen CCF is stored at Line 26');
end
tv = readNPY([allen_atlas_path filesep 'template_volume_10um.npy']); % grey-scale "background signal intensity"
av = readNPY([allen_atlas_path filesep 'annotation_volume_10um_by_index.npy']); % the number at each pixel labels the area, see note below
st = loadStructureTree([allen_atlas_path filesep 'structure_tree_safe_2017.csv']); % a table of what all the labels mean
end
% Load the colormap (located in the repository, find by associated fcn)
allenCCF_path = fileparts(which('allenCCFbregma'));
cmap_filename = [allenCCF_path filesep 'allen_ccf_colormap_2017.mat'];
load(cmap_filename);
% Set up the gui
window_atlas_gui = figure('Toolbar','none','Menubar','none','color','w', ...
'Name','Atlas-window viewer','Units','normalized','Position',[0.2,0.2,0.7,0.7]);
% Set up the atlas axes
axes_atlas = subplot(2,2,[1 3]);
[~, brain_outline] = plotBrainGrid([],axes_atlas);
hold(axes_atlas,'on');
axis vis3d equal off manual
view([-30,25]);
caxis([0 300]);
[ap_max,dv_max,ml_max] = size(tv);
xlim([-10,ap_max+10])
ylim([-10,ml_max+10])
zlim([-10,dv_max+10])
% Set up the window area axes
%top of window
axes_window_areas = subplot(2,2,2);
axes_window_areas.ActivePositionProperty = 'position';
window_areas_plot = image(nan(166,166)); %initialized for 5mm window at 30um resolution
axis off auto
window_areas_text = text(axes_window_areas,zeros(1,100),zeros(1,100),repmat({''}, 1, 100));
set(axes_window_areas,'FontSize',11,'XLimSpec', 'Tight','YLimSpec', 'Tight');
colormap(axes_window_areas,cmap);
caxis([1,size(cmap,1)])
%bottom of window
axes_window_areas_bottom = subplot(2,2,4);
axes_window_areas_bottom.ActivePositionProperty = 'position';
window_areas_bottom_plot = image(nan(166,166)); %initialized for 5mm window at 30um resolution
axis off auto
window_areas_bottom_text = text(axes_window_areas_bottom,zeros(1,100),zeros(1,100),repmat({''}, 1, 100));
set(axes_window_areas_bottom,'FontSize',11,'XLimSpec', 'Tight','YLimSpec', 'Tight');
colormap(axes_window_areas_bottom,cmap);
caxis([1,size(cmap,1)])
% Position the axes
set(axes_atlas,'Position',[-0.15,-0.1,1,1.2]);
set(axes_window_areas,'Position',[0.7,0.55,0.2,0.34]);
set(axes_window_areas_bottom,'Position',[0.7,0.1,0.2,0.34]);
% Set the current axes to the atlas (dirty, but some gca requirements)
axes(axes_atlas);
% Set up the window reference/actual
window_centre = [bregma(1),bregma(3),0];
window_depth = 60; % effective z-range of 2p microscope (in 10 ums)
window_diameter = 500; % diameter in 10 ums
window_vector = [window_centre',[window_centre(1),window_centre(2),window_depth]'];
window_vector_line = line(window_vector(1,:),window_vector(2,:),window_vector(3,:),'linewidth',3,'color','b','linestyle','-');
window_rads = linspace(-pi,pi,100);
window_circle = line(0.5*window_diameter*cos(window_rads)+window_centre(1),...
0.5*window_diameter*sin(window_rads)+window_centre(2),...
zeros(size(window_rads))+window_centre(3),...
'linewidth',1.5,'color','r','linestyle','--');
% add titles for window plots
title(axes_window_areas,'Top of window');
title(axes_window_areas_bottom,[sprintf('Bottom (%i ',10*window_depth),'\mu','m)']);
% Set up the text to display coordinates
window_coordinates_text = uicontrol('Style','text','String','', ...
'Units','normalized','Position',[0,0.95,1,0.05], ...
'BackgroundColor','w','HorizontalAlignment','left','FontSize',12);
% Store data
gui_data.tv = tv; % Intensity atlas
gui_data.av = av; % Annotated atlas
gui_data.st = st; % Labels table
gui_data.cmap = cmap; % Atlas colormap
gui_data.bregma = bregma; % Bregma for external referencing
gui_data.window_depth = window_depth; % Effective z-depth under window
gui_data.structure_plot_idx = []; % Plotted structures
gui_data.window_angle = [0;0]; % window angles in ML/DV
gui_data.window_centre = window_centre; % Window reference centre on 3D atlas
gui_data.window_diameter = window_diameter;
gui_data.ref_centre = window_centre;
%Store handles
gui_data.handles.cortex_outline = brain_outline;
gui_data.handles.structure_patch = []; % Plotted structures
gui_data.handles.axes_atlas = axes_atlas; % Axes with 3D atlas
gui_data.handles.axes_window_areas = axes_window_areas; % Axes with window areas
gui_data.handles.axes_window_areas_bottom = axes_window_areas_bottom; % Axes with window areas
gui_data.handles.slice_plot = surface('EdgeColor','none'); % Slice on 3D atlas
gui_data.handles.slice_volume = 'tv'; % The volume shown in the slice
gui_data.handles.window_vector = window_vector_line; % Window centre vector on 3D atlas
gui_data.handles.window_circle = window_circle; % Window circle on 3D atlas
gui_data.handles.window_areas_plot = window_areas_plot; % Color-coded window regions
gui_data.handles.window_areas_bottom_plot = window_areas_bottom_plot; % Color-coded window regions
gui_data.handles.window_areas_text = window_areas_text; % Labels for window regions
gui_data.handles.window_areas_bottom_text = window_areas_bottom_text; % Labels for window regions
gui_data.window_coordinates_text = window_coordinates_text; % Window coordinates text
% Make 3D rotation the default state (toggle on/off with 'r')
h = rotate3d(axes_atlas);
h.Enable = 'on';
% Update the slice whenever a rotation is completed
%h.ActionPostCallback = @update_slice;
% Set functions for key presses
hManager = uigetmodemanager(window_atlas_gui);
[hManager.WindowListenerHandles.Enabled] = deal(false);
set(window_atlas_gui,'KeyPressFcn',@key_press);
set(window_atlas_gui,'KeyReleaseFcn',@key_release);
% Upload gui_data
guidata(window_atlas_gui, gui_data);
% Display the first slice and update the window position
update_slice(window_atlas_gui);
update_window_coordinates(window_atlas_gui);
% Display controls
display_controls;
end
function key_press(window_atlas_gui,eventdata)
% Get guidata
gui_data = guidata(window_atlas_gui);
switch eventdata.Key
case 'uparrow'
if isempty(eventdata.Modifier)
% Up: move window anterior
ap_offset = -10;
set(gui_data.handles.window_circle,'XData',get(gui_data.handles.window_circle,'XData') + ap_offset);
set(gui_data.handles.window_vector,'XData',get(gui_data.handles.window_vector,'XData') + ap_offset);
gui_data.window_centre = gui_data.window_centre + [ap_offset,0,0];
elseif any(strcmp(eventdata.Modifier,'shift'))
% Ctrl-up: increase DV angle
angle_change = [1;0];
gui_data = update_window_angle(window_atlas_gui,angle_change);
elseif any(strcmp(eventdata.Modifier,'alt'))
% Alt-up: raise window
dv_offset = -10;
old_window_vector = cell2mat(get(gui_data.handles.window_vector,{'XData','YData','ZData'})');
old_window_circle = cell2mat(get(gui_data.handles.window_circle,{'XData','YData','ZData'})');
move_vector = diff(old_window_vector,[],2)./ ...
norm(diff(old_window_vector,[],2))*dv_offset;
new_window_vector = bsxfun(@plus,old_window_vector,move_vector);
new_window_circle = bsxfun(@plus,old_window_circle,move_vector);
set(gui_data.handles.window_vector,'XData',new_window_vector(1,:), ...
'YData',new_window_vector(2,:),'ZData',new_window_vector(3,:));
set(gui_data.handles.window_circle,'XData',new_window_circle(1,:), ...
'YData',new_window_circle(2,:),'ZData',new_window_circle(3,:));
gui_data.window_centre = gui_data.window_centre + move_vector';
end
case 'downarrow'
if isempty(eventdata.Modifier)
% Down: move window posterior
ap_offset = 10;
set(gui_data.handles.window_circle,'XData',get(gui_data.handles.window_circle,'XData') + ap_offset);
set(gui_data.handles.window_vector,'XData',get(gui_data.handles.window_vector,'XData') + ap_offset);
gui_data.window_centre = gui_data.window_centre + [ap_offset,0,0];
elseif any(strcmp(eventdata.Modifier,'shift'))
% Ctrl-down: decrease DV angle
angle_change = [-1;0];
gui_data = update_window_angle(window_atlas_gui,angle_change);
elseif any(strcmp(eventdata.Modifier,'alt'))
% Alt-down: lower window
dv_offset = 10;
old_window_vector = cell2mat(get(gui_data.handles.window_vector,{'XData','YData','ZData'})');
old_window_circle = cell2mat(get(gui_data.handles.window_circle,{'XData','YData','ZData'})');
move_vector = diff(old_window_vector,[],2)./ ...
norm(diff(old_window_vector,[],2))*dv_offset;
new_window_vector = bsxfun(@plus,old_window_vector,move_vector);
new_window_circle = bsxfun(@plus,old_window_circle,move_vector);
set(gui_data.handles.window_vector,'XData',new_window_vector(1,:), ...
'YData',new_window_vector(2,:),'ZData',new_window_vector(3,:));
set(gui_data.handles.window_circle,'XData',new_window_circle(1,:), ...
'YData',new_window_circle(2,:),'ZData',new_window_circle(3,:));
gui_data.window_centre = gui_data.window_centre + move_vector';
end
case 'rightarrow'
if isempty(eventdata.Modifier)
% Right: move window right
ml_offset = 10;
set(gui_data.handles.window_circle,'YData',get(gui_data.handles.window_circle,'YData') + ml_offset);
set(gui_data.handles.window_vector,'YData',get(gui_data.handles.window_vector,'YData') + ml_offset);
gui_data.window_centre = gui_data.window_centre + [0,ml_offset,0];
elseif any(strcmp(eventdata.Modifier,'shift'))
% Ctrl-right: increase vertical angle
angle_change = [0;1];
gui_data = update_window_angle(window_atlas_gui,angle_change);
end
case 'leftarrow'
if isempty(eventdata.Modifier)
% Left: move window left
ml_offset = -10;
set(gui_data.handles.window_circle,'YData',get(gui_data.handles.window_circle,'YData') + ml_offset);
set(gui_data.handles.window_vector,'YData',get(gui_data.handles.window_vector,'YData') + ml_offset);
gui_data.window_centre = gui_data.window_centre + [0,ml_offset,0];
elseif any(strcmp(eventdata.Modifier,'shift'))
% Ctrl-left: decrease vertical angle
angle_change = [0;-1];
gui_data = update_window_angle(window_atlas_gui,angle_change);
end
case 'c'
% Bring up controls again
display_controls;
case 'b'
% Toggle brain outline visibility
current_visibility = gui_data.handles.cortex_outline.Visible;
switch current_visibility; case 'on'; new_visibility = 'off'; case 'off'; new_visibility = 'on'; end;
set(gui_data.handles.cortex_outline,'Visible',new_visibility);
case 'a'
% Toggle plotted structure visibility
if ~isempty(gui_data.structure_plot_idx)
current_alpha = get(gui_data.handles.structure_patch(1),'FaceAlpha');
switch current_alpha
case 0
new_alpha = 0.2;
case 0.2
new_alpha = 1;
case 1
new_alpha = 0;
end
set(gui_data.handles.structure_patch,'FaceAlpha',new_alpha);
end
case 's'
% Toggle slice volume/visibility
slice_volumes = {'tv','av','none'};
new_slice_volume = slice_volumes{circshift( ...
strcmp(gui_data.handles.slice_volume,slice_volumes),[0,1])};
if strcmp(new_slice_volume,'none')
set(gui_data.handles.slice_plot,'Visible','off');
else
set(gui_data.handles.slice_plot,'Visible','on');
end
gui_data.handles.slice_volume = new_slice_volume;
guidata(window_atlas_gui, gui_data);
update_slice(window_atlas_gui);
case 'w'
% Toggle window visibility
current_visibility = gui_data.handles.window_circle.Visible;
switch current_visibility; case 'on'; new_visibility = 'off'; case 'off'; new_visibility = 'on'; end;
set(gui_data.handles.window_circle,'Visible',new_visibility);
case 'm'
% Set window position manually and find window angle automatically
set_window_position(window_atlas_gui);
% Get updated guidata
gui_data = guidata(window_atlas_gui);
case {'equal','add'}
% Add structure(s) to display
slice_spacing = 10;
% Prompt for which structures to show (only structures which are
% labelled in the slice-spacing downsampled annotated volume)
if ~any(strcmp(eventdata.Modifier,'shift'))
% (no shift: list in native CCF order)
parsed_structures = unique(reshape(gui_data.av(1:slice_spacing:end, ...
1:slice_spacing:end,1:slice_spacing:end),[],1));
if ~any(strcmp(eventdata.Modifier,'alt'))
% (no alt: list all)
plot_structures_parsed = listdlg('PromptString','Select a structure to plot:', ...
'ListString',gui_data.st.safe_name(parsed_structures),'ListSize',[520,500]);
plot_structures = parsed_structures(plot_structures_parsed);
else
% (alt: search list)
structure_search = lower(inputdlg('Search structures'));
structure_match = find(contains(lower(gui_data.st.safe_name),structure_search));
list_structures = intersect(parsed_structures,structure_match);
if isempty(list_structures)
error('No structure search results')
end
plot_structures_parsed = listdlg('PromptString','Select a structure to plot:', ...
'ListString',gui_data.st.safe_name(list_structures),'ListSize',[520,500]);
plot_structures = list_structures(plot_structures_parsed);
end
if ~isempty(plot_structures)
for curr_plot_structure = reshape(plot_structures,1,[])
% If this label isn't used, don't plot
if ~any(reshape(gui_data.av( ...
1:slice_spacing:end,1:slice_spacing:end,1:slice_spacing:end),[],1) == curr_plot_structure)
disp(['"' gui_data.st.safe_name{curr_plot_structure} '" is not parsed in the atlas'])
continue
end
gui_data.structure_plot_idx(end+1) = curr_plot_structure;
plot_structure_color = hex2dec(reshape(gui_data.st.color_hex_triplet{curr_plot_structure},2,[])')./255;
structure_3d = isosurface(permute(gui_data.av(1:slice_spacing:end, ...
1:slice_spacing:end,1:slice_spacing:end) == curr_plot_structure,[3,1,2]),0);
if isempty(gui_data.handles.structure_patch)
structure_alpha = 0.2;
else
structure_alpha = get(gui_data.handles.structure_patch(1),'FaceAlpha');
end
gui_data.handles.structure_patch(end+1) = patch('Vertices',structure_3d.vertices*slice_spacing, ...
'Faces',structure_3d.faces, ...
'FaceColor',plot_structure_color,'EdgeColor','none','FaceAlpha',structure_alpha);
end
end
elseif any(strcmp(eventdata.Modifier,'shift'))
% (shift: use hierarchy search)
if ~any(strcmp(eventdata.Modifier,'control'))
% (no ctrl, choose between all structures)
plot_structures = hierarchicalSelect(gui_data.st);
else
% (ctrl: take pre-defined regions: isocortex, cerebellum, SC & IC)
plot_structures = [7,13,31,108,115,122,158,221,239,246,253,279,298,340,361,368,374,381,809,813,1016,1017];
end
if ~isempty(plot_structures) % will be empty if dialog was cancelled
% get all children of this one
for plot_structure = plot_structures
thisID = gui_data.st.id(plot_structure);
idStr = sprintf('/%d/', thisID);
theseCh = find(cellfun(@(x)contains(x,idStr), gui_data.st.structure_id_path));
% plot the structure
slice_spacing = 5;
plot_structure_color = hex2dec(reshape(gui_data.st.color_hex_triplet{plot_structure},3,[]))./255;
structure_3d = isosurface(permute(ismember(gui_data.av(1:slice_spacing:end, ...
1:slice_spacing:end,1:slice_spacing:end),theseCh),[3,1,2]),0);
if isempty(gui_data.handles.structure_patch)
structure_alpha = 0.2;
else
structure_alpha = get(gui_data.handles.structure_patch(1),'FaceAlpha');
end
gui_data.structure_plot_idx(end+1) = plot_structure;
gui_data.handles.structure_patch(end+1) = patch('Vertices',structure_3d.vertices*slice_spacing, ...
'Faces',structure_3d.faces, ...
'FaceColor',plot_structure_color,'EdgeColor','none','FaceAlpha',structure_alpha);
end
end
end
case {'hyphen','subtract'}
% Remove structure(s) already plotted
if ~isempty(gui_data.structure_plot_idx)
remove_structures = listdlg('PromptString','Select a structure to remove:', ...
'ListString',gui_data.st.safe_name(gui_data.structure_plot_idx));
delete(gui_data.handles.structure_patch(remove_structures))
gui_data.structure_plot_idx(remove_structures) = [];
gui_data.handles.structure_patch(remove_structures) = [];
end
case 'x'
% Export the window vector coordinates in Allen CCF to the workspace
window_vector = cell2mat(get(gui_data.handles.window_vector_line,{'XData','YData','ZData'})');
window_vector_ccf = round(window_vector([1,3,2],:))';
assignin('base','window_vector_ccf',window_vector_ccf)
disp('Copied window vector coordinates to workspace');
end
% Upload gui_data
guidata(window_atlas_gui, gui_data);
end
function key_release(window_atlas_gui,eventdata)
% Get guidata
gui_data = guidata(window_atlas_gui);
switch eventdata.Key
case {'rightarrow','leftarrow','uparrow','downarrow'}
% Update the window info/slice on arrow release
update_window_coordinates(window_atlas_gui);
update_slice(window_atlas_gui);
end
% Upload gui_data
guidata(window_atlas_gui, gui_data);
end
function update_slice(window_atlas_gui,varargin)
% Get guidata
gui_data = guidata(window_atlas_gui);
% Only update the slice if it's visible
if strcmp(gui_data.handles.slice_plot(1).Visible,'on')
offsets = [0,gui_data.window_depth];
plot_handles = {'window_areas','window_areas_bottom'};
for islice = 1:length(offsets)
% Get current window outline coordinate
curr_window_circle = cell2mat(get(gui_data.handles.window_circle,{'XData','YData','ZData'})');
window_vector = cell2mat(get(gui_data.handles.window_vector,{'XData','YData','ZData'})');
move_vector = diff(window_vector,[],2)./ ...
norm(diff(window_vector,[],2))*offsets(islice);
window_circle = bsxfun(@plus,curr_window_circle,move_vector);
window_centre = gui_data.window_centre + move_vector';
% Get two vectors on the window plane
vlength = size(window_circle,2);
window_vector_1 = window_circle(:,1)' - window_circle(:,round(0.4*vlength))';
window_vector_2 = window_circle(:,1)' - window_circle(:,round(0.7*vlength))';
%get the normal vector of the plane
normal_vector = cross(window_vector_1,window_vector_2);
% Get the plane offset through the window centre
window_top = window_centre;
plane_offset = -(normal_vector*window_top');
% Define a plane of points to index
% (the plane grid is defined based on the which cardinal plan is most
% orthogonal to the plotted plane. this is janky but it works)
slice_px_space = 3;
[~,cam_plane] = max(abs(normal_vector./norm(normal_vector)));
switch cam_plane
case 1
[plane_y,plane_z] = meshgrid(1:slice_px_space:size(gui_data.tv,3),1:slice_px_space:size(gui_data.tv,2));
plane_x = ...
(normal_vector(2)*plane_y+normal_vector(3)*plane_z + plane_offset)/ ...
-normal_vector(1);
case 2
[plane_x,plane_z] = meshgrid(1:slice_px_space:size(gui_data.tv,1),1:slice_px_space:size(gui_data.tv,2));
plane_y = ...
(normal_vector(1)*plane_x+normal_vector(3)*plane_z + plane_offset)/ ...
-normal_vector(2);
case 3
[plane_x,plane_y] = meshgrid(1:slice_px_space:size(gui_data.tv,1),1:slice_px_space:size(gui_data.tv,3));
plane_z = ...
(normal_vector(1)*plane_x+normal_vector(2)*plane_y + plane_offset)/ ...
-normal_vector(3);
end
% Get the coordinates on the plane
x_idx = round(plane_x);
y_idx = round(plane_y);
z_idx = round(plane_z);
% make a mask for the window (in the horizontal plane)
window_mask = uint8(((plane_x-window_centre(1)).^2 + (plane_y-window_centre(2)).^2 + (plane_z-window_centre(3)).^2)<(gui_data.window_diameter/2)^2);
% Find plane coordinates in bounds with the volume and the window
use_xd = x_idx > 0 & x_idx < size(gui_data.tv,1);
use_yd = y_idx > 0 & y_idx < size(gui_data.tv,3);
use_zd = z_idx > 0 & z_idx < size(gui_data.tv,2);
use_idx = use_xd & use_yd & use_zd & window_mask;
curr_slice_idx = sub2ind(size(gui_data.tv),x_idx(use_idx),z_idx(use_idx),y_idx(use_idx));
% Find plane coordinates that contain brain
curr_slice_isbrain = false(size(use_idx));
curr_slice_isbrain(use_idx) = gui_data.av(curr_slice_idx) > 1;
% Index coordinates in bounds + with brain
grab_pix_idx = sub2ind(size(gui_data.tv),x_idx(curr_slice_isbrain),z_idx(curr_slice_isbrain),y_idx(curr_slice_isbrain));
% Grab pixels from (selected) volume
curr_slice = nan(size(use_idx));
switch gui_data.handles.slice_volume
case 'tv'
curr_slice(curr_slice_isbrain) = gui_data.tv(grab_pix_idx);
colormap(gui_data.handles.axes_atlas,'gray');
caxis([0,255]);
case 'av'
curr_slice(curr_slice_isbrain) = gui_data.av(grab_pix_idx);
colormap(gui_data.handles.axes_atlas,gui_data.cmap);
caxis([1,size(gui_data.cmap,1)]);
end
% Update the slice display
if islice==2 %1 = top, 2 = bottom
set(gui_data.handles.slice_plot,'XData',plane_x,'YData',plane_y,'ZData',plane_z,'CData',curr_slice);
end
% Update the title with new depth
if islice==2
set(eval(['gui_data.handles.axes_',plot_handles{islice},'.Title']),'String',[sprintf('Bottom (%i ',10*offsets(islice)),'\mu','m)']);
end
% Update the window overview
curr_slice_av = nan(size(use_idx));
curr_slice_av(curr_slice_isbrain) = gui_data.av(grab_pix_idx);
x_edges = [find(sum(window_mask,1),1,'first'),find(sum(window_mask,1),1,'last')];
y_edges = [find(sum(window_mask,2),1,'first'),find(sum(window_mask,2),1,'last')];
cropped_window = curr_slice_av(y_edges(1):y_edges(2),x_edges(1):x_edges(2))';
set(eval(['gui_data.handles.',plot_handles{islice},'_plot']),'CData',cropped_window);
%update area labels based on which areas are contained in window
window_areas = cropped_window;
unique_areas = unique(window_areas(~isnan(window_areas(:))));
unique_parent_struct = unique(gui_data.st.parent_structure_id(unique_areas));
for i_struct = 1:100
if i_struct<=length(unique_parent_struct)
areas_in_parent_struct = find(gui_data.st.parent_structure_id==unique_parent_struct(i_struct));
area_bm = ismember(window_areas,areas_in_parent_struct);
area_bm_largest = zeros(size(area_bm));
cc = bwconncomp(area_bm);
[~,largest_blob] = max(cellfun('length',cc.PixelIdxList));
area_bm_largest(cc.PixelIdxList{largest_blob}) = 1;
[area_x,area_y] = find(area_bm_largest==1);
area_centre = [mean(area_y),mean(area_x)];
area_string = gui_data.st.acronym{find(gui_data.st.id==unique_parent_struct(i_struct))};
else
area_centre = [0,0];
area_string = '';
end
set(eval(['gui_data.handles.',plot_handles{islice},'_text(',num2str(i_struct),')']),...
'Position',area_centre,'String',area_string,...
'VerticalAlignment','middle','HorizontalAlignment','center');
end
end
% Upload gui_data
guidata(window_atlas_gui, gui_data);
end
end
function set_window_position(window_atlas_gui,varargin) %this function should place window at tangent plane
% Get guidata
gui_data = guidata(window_atlas_gui);
prompt_text = { ...
'AP position (mm from bregma)', ...
'ML position (mm from bregma)', ...
'window diameter (mm)', ...
'maximum depth (microns)'};
new_window_position = cellfun(@str2num,inputdlg(prompt_text,'Set window position',1));
% Convert centre position: mm->CCF
window_centre_ccf_coords = round(gui_data.bregma([1,3])' - 100*[new_window_position(1);-new_window_position(2)]);%not sure why we need this sign change...
window_diameter = new_window_position(3)*100;
window_depth = new_window_position(4)/10;
%load surface data or generate it from scratch
if ~exist('surf_coords','var')
try
load('dorsalsurface_nomidline_ccf_coords.mat')
catch
%CODE TO GENERATE SURFACE MAP
surf_coords = nan(size(gui_data.av,1),size(gui_data.av,3));
for i=1:size(gui_data.av,1)
for j=1:size(gui_data.av,3)
idxs = find(squeeze(gui_data.av(i,:,j))>1,1,'first');
if ~isempty(idxs)
surf_coords(i,j) = idxs;
end
end
idx_c = round(size(gui_data.av,3)/2);
[m,idx_m] = min(surf_coords(i,:));
idx_m_c = idx_m-idx_c;
surf_coords(i,idx_c-abs(idx_m_c):idx_c+abs(idx_m_c)) = m;
end
save('dorsalsurface_nomidline_ccf_coords.mat',surf_coords);
end
end
%figure out angle of tangent plane
surf_depth_coords = surf_ccf_coords(window_centre_ccf_coords(1),window_centre_ccf_coords(2));
centre_surf_ccf_coords = [window_centre_ccf_coords(1); surf_depth_coords; window_centre_ccf_coords(2)];
centre_patch = surf_ccf_coords(window_centre_ccf_coords(1)-50:window_centre_ccf_coords(1)+50,window_centre_ccf_coords(2)-50:window_centre_ccf_coords(2)+50);
centre_patch_smooth = imgaussfilt(centre_patch,'FilterSize',9);
[fx,fy] = gradient(centre_patch_smooth,1);
avg_gradient = [-nanmean(fy(:));nanmean(fx(:))];
window_angle_rad = atan(avg_gradient);
window_angle_deg = (window_angle_rad/(2*pi))*360;
%compute new window vector and circle (horizontal)
%window_diameter = gui_data.window_diameter;
window_rads = linspace(-pi,pi,100);
window_centre = [window_centre_ccf_coords', surf_depth_coords-gui_data.ref_centre(3)];
window_circle = [0.5*window_diameter*cos(window_rads)+window_centre(1);...
0.5*window_diameter*sin(window_rads)+window_centre(2);...
zeros(size(window_rads))+window_centre(3)];
window_vector = [window_centre',(window_centre+[0,0,window_depth])'];
%rotate these by the tangent angle
eul = [0;window_angle_rad]; %[yaw; pitch; roll]
R = eul2rotm(eul'); %find rotation matrix
window_circle_centred = window_circle - window_centre'; %translate to origin by subtracting its centre coords
window_vector_centred = window_vector - window_centre';
new_window_circle_centred = R*window_circle_centred; %rotate around origin
new_window_vector_centred = R*window_vector_centred;
new_window_circle = new_window_circle_centred + window_centre'; %translate back to its position
new_window_vector = new_window_vector_centred + window_centre';
% update gui_data
set(gui_data.handles.window_circle,'XData',new_window_circle(1,:), ...
'YData',new_window_circle(2,:), ...
'ZData',new_window_circle(3,:));
set(gui_data.handles.window_vector,'XData',new_window_vector(1,:), ...
'YData',new_window_vector(2,:), ...
'ZData',new_window_vector(3,:));
gui_data.window_angle = window_angle_deg;
gui_data.window_centre = [window_centre_ccf_coords', surf_depth_coords];
gui_data.window_diameter = window_diameter;
gui_data.window_depth = window_depth;
% Upload gui_data
guidata(window_atlas_gui, gui_data);
% Update the slice and window coordinates
update_slice(window_atlas_gui);
update_window_coordinates(window_atlas_gui);
end
function gui_data = update_window_angle(window_atlas_gui,angle_change)
% Get guidata
gui_data = guidata(window_atlas_gui);
% Set new angle
new_angle = gui_data.window_angle + angle_change;
gui_data.window_angle = new_angle;
% Get the positions of the window circle and vector
window_circle = cell2mat(get(gui_data.handles.window_circle,{'XData','YData','ZData'})');
window_vector = cell2mat(get(gui_data.handles.window_vector,{'XData','YData','ZData'})');
% Compute the rotation matrix
eul = [0;(angle_change/360)*2*pi];
R = eul2rotm(eul');
% temporarily translate window position to origin
window_circle_centred = window_circle - gui_data.window_centre';
window_vector_centred = window_vector - gui_data.window_centre';
% rotate the vector around the origin using rotation matrix
new_window_circle_centred = R*window_circle_centred;
new_window_vector_centred = R*window_vector_centred;
% translate rotated window back to the original centre point
new_window_circle = new_window_circle_centred + gui_data.window_centre';
new_window_vector = new_window_vector_centred + gui_data.window_centre';
% update gui_data
set(gui_data.handles.window_circle,'XData',new_window_circle(1,:), ...
'YData',new_window_circle(2,:), ...
'ZData',new_window_circle(3,:));
set(gui_data.handles.window_vector,'XData',new_window_vector(1,:), ...
'YData',new_window_vector(2,:), ...
'ZData',new_window_vector(3,:));
% Upload gui_data
guidata(window_atlas_gui, gui_data);
end
function update_window_coordinates(window_atlas_gui,varargin)
% Get guidata
gui_data = guidata(window_atlas_gui);
window_bregma_coordinate = round((gui_data.bregma([1,3,2]) - gui_data.window_centre)*10);
% Update the text
window_text = ['Window position: ' ....
num2str(window_bregma_coordinate(1)) ' AP, ', ...
num2str(-window_bregma_coordinate(2)) ' ML, ', ...
num2str(-window_bregma_coordinate(3)) ' DV, ', ...
num2str(round(gui_data.window_angle(1))) char(176) ' pitch, ' ...
num2str(round(gui_data.window_angle(2))) char(176) ' roll'];
set(gui_data.window_coordinates_text,'String',window_text);
% Upload gui_data
guidata(window_atlas_gui, gui_data);
end
function display_controls
% Print controls
CreateStruct.Interpreter = 'tex';
CreateStruct.WindowStyle = 'non-modal';
msgbox( ...
{'\fontsize{12}' ...
'\bf Window: \rm' ...
'Arrow keys : translate window' ...
'Alt/Option up/down : raise/lower window' ...
'Shift arrow keys : change window angle' ...
'm : set location manually (tangent to surface)', ...
'\bf 3D brain areas: \rm' ...
' =/+ : add (list selector)' ...
' Alt/Option =/+ : add (search)' ...
' Shift =/+ : add (hierarchy selector)' ...
' Ctrl Shift =/+ : add all regions on surface' ...
' - : remove', ...
'\bf Visibility: \rm' ...
's : atlas slice (toggle tv/av/off)' ...
'b : brain outline' ...
'w : window outline' ...
'a : 3D brain areas (toggle transparency)' ...
'\bf Other: \rm' ...
'x : export window coordinates to workspace' ...
'c : bring up controls box'}, ...
'Controls',CreateStruct);
end
|
{"author": "cortex-lab", "repo": "allenCCF", "sha": "0bbff55fc906fd3f023da81ce1d0e4b8726d4fd0", "save_path": "github-repos/MATLAB/cortex-lab-allenCCF", "path": "github-repos/MATLAB/cortex-lab-allenCCF/allenCCF-0bbff55fc906fd3f023da81ce1d0e4b8726d4fd0/Browsing Functions/allen_ccf_2pi.m"}
|
module Main
import Collie
import Interface
import Data.Version
import System.Directory.Extra
import Command
%hide Collie.(.handleWith)
exitError : HasIO io => String -> io a
exitError err = do
putStrLn ""
putStrLn err
putStrLn ""
exitFailure
exitSuccess : HasIO io => String -> io a
exitSuccess msg = do
putStrLn ""
putStrLn msg
putStrLn ""
exitSuccess
--
-- Entrypoint
--
-- Handling more complex commands (with arguemnts and modifiers) first
-- makes compile times shorter.
handleCommand' : Command.idv ~~> IO ()
handleCommand' =
[ const $ do putStrLn "Expected a subcommand."
exitError idv.usage
, "install" ::= [ (\args => let version = args.arguments
-- Installing the LSP requires installing the API
-- just as installing the API requires installing
-- Idris.
in if args.modifiers.project "--lsp"
then installLSPCommand version
else if args.modifiers.project "--api"
then installAPICommand version
else installCommand version True
)
]
, "uninstall" ::= [ (\args => let version = args.arguments
in uninstallCommand version
)
]
, "select" ::= [ (\args => let version = args.arguments
in selectCommand version )
, "system" ::= [ const selectSystemCommand ]
]
, "--help" ::= [ const . exitSuccess $ idv.usage ]
, "list" ::= [ const listVersionsCommand ]
]
main : IO ()
main = do
Just _ <- inDir idvLocation $ idv.handleWith handleCommand'
| Nothing => exitError "Could not access \{idvLocation}."
pure ()
|
{"hexsha": "5ad97c534ca4b0f70dae15190aca648dba7a7dcb", "size": 1980, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "cli/src/Main.idr", "max_stars_repo_name": "memoryruins/idv", "max_stars_repo_head_hexsha": "7631bd1c0bdea2cfb672fa178918b4e6191a738c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cli/src/Main.idr", "max_issues_repo_name": "memoryruins/idv", "max_issues_repo_head_hexsha": "7631bd1c0bdea2cfb672fa178918b4e6191a738c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cli/src/Main.idr", "max_forks_repo_name": "memoryruins/idv", "max_forks_repo_head_hexsha": "7631bd1c0bdea2cfb672fa178918b4e6191a738c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0, "max_line_length": 85, "alphanum_fraction": 0.5136363636, "num_tokens": 393}
|
! path: $Source: /storm/rc1/cvsroot/rc/rrtmg_lw/src/mcica_subcol_gen_lw.1col.f90,v $
! author: $Author: mike $
! revision: $Revision: 1.5 $
! created: $Date: 2009/05/22 21:04:30 $
!
module mcica_subcol_gen_lw
! --------------------------------------------------------------------------
! | |
! | Copyright 2006-2009, Atmospheric & Environmental Research, Inc. (AER). |
! | This software may be used, copied, or redistributed as long as it is |
! | not sold and this copyright notice is reproduced on each copy made. |
! | This model is provided as is without any express or implied warranties. |
! | (http://www.rtweb.aer.com/) |
! | |
! --------------------------------------------------------------------------
! Purpose: Create McICA stochastic arrays for cloud physical or optical properties.
! Two options are possible:
! 1) Input cloud physical properties: cloud fraction, ice and liquid water
! paths, ice fraction, and particle sizes. Output will be stochastic
! arrays of these variables. (inflag = 1)
! 2) Input cloud optical properties directly: cloud optical depth, single
! scattering albedo and asymmetry parameter. Output will be stochastic
! arrays of these variables. (inflag = 0; longwave scattering is not
! yet available, ssac and asmc are for future expansion)
! --------- Modules ----------
use parkind, only : im => kind_im, rb => kind_rb
use parrrtm, only : nbndlw, ngptlw
use rrlw_con, only: grav
use rrlw_wvn, only: ngb
use rrlw_vsn
implicit none
! public interfaces/functions/subroutines
public :: mcica_subcol_lw, generate_stochastic_clouds
contains
!------------------------------------------------------------------
! Public subroutines
!------------------------------------------------------------------
subroutine mcica_subcol_lw(iplon, nlayers, icld, ims, irng, play, &
cldfrac, ciwp, clwp, rei, rel, tauc, cldfmc, &
ciwpmc, clwpmc, reicmc, relqmc, taucmc)
! Control
integer(kind=im), intent(in) :: iplon ! column/longitude dimension
integer(kind=im), intent(in) :: nlayers ! number of model layers
integer(kind=im), intent(in) :: icld ! clear/cloud, cloud overlap flag
integer(kind=im), intent(in) :: ims ! mcica statistical loop index; also
! value for changing mcica permute seed
integer(kind=im), intent(inout) :: irng ! flag for random number generator
! 0 = kissvec
! 1 = Mersenne Twister
! Atmosphere
real(kind=rb), intent(in) :: play(:) ! layer pressures (mb)
! Dimensions: (nlayers)
! Atmosphere/clouds - cldprop
real(kind=rb), intent(in) :: cldfrac(:) ! layer cloud fraction
! Dimensions: (nlayers)
real(kind=rb), intent(in) :: tauc(:,:) ! in-cloud optical depth
! Dimensions: (nbndlw,nlayers)
! real(kind=rb), intent(in) :: ssac(:,:) ! in-cloud single scattering albedo
! Dimensions: (nbndlw,nlayers)
! real(kind=rb), intent(in) :: asmc(:,:) ! in-cloud asymmetry parameter
! Dimensions: (nbndlw,nlayers)
real(kind=rb), intent(in) :: ciwp(:) ! in-cloud ice water path
! Dimensions: (nlayers)
real(kind=rb), intent(in) :: clwp(:) ! in-cloud liquid water path
! Dimensions: (nlayers)
real(kind=rb), intent(in) :: rei(:) ! cloud ice particle size
! Dimensions: (nlayers)
real(kind=rb), intent(in) :: rel(:) ! cloud liquid particle size
! Dimensions: (nlayers)
! ----- Output -----
! Atmosphere/clouds - cldprmc [mcica]
real(kind=rb), intent(out) :: cldfmc(:,:) ! cloud fraction [mcica]
! Dimensions: (ngptlw,nlayers)
real(kind=rb), intent(out) :: ciwpmc(:,:) ! in-cloud ice water path [mcica]
! Dimensions: (ngptlw,nlayers)
real(kind=rb), intent(out) :: clwpmc(:,:) ! in-cloud liquid water path [mcica]
! Dimensions: (ngptlw,nlayers)
real(kind=rb), intent(out) :: relqmc(:) ! liquid particle size (microns)
! Dimensions: (nlayers)
real(kind=rb), intent(out) :: reicmc(:) ! ice partcle size (microns)
! Dimensions: (nlayers)
real(kind=rb), intent(out) :: taucmc(:,:) ! in-cloud optical depth [mcica]
! Dimensions: (ngptlw,nlayers)
! real(kind=rb), intent(out) :: ssacmc(:,:) ! in-cloud single scattering albedo [mcica]
! Dimensions: (ngptlw,nlayers)
! real(kind=rb), intent(out) :: asmcmc(:,:) ! in-cloud asymmetry parameter [mcica]
! Dimensions: (ngptlw,nlayers)
! ----- Local -----
! Stochastic cloud generator variables [mcica]
integer(kind=im), parameter :: nsubclw = ngptlw ! number of sub-columns (g-point intervals)
integer(kind=im) :: permuteseed ! if the cloud generator is called multiple times,
! permute the seed between each call.
integer(kind=im) :: ilev ! loop index
real(kind=rb) :: pmid(nlayers) ! layer pressures (Pa)
! real(kind=rb) :: pdel(nlayers) ! layer pressure thickness (Pa)
! real(kind=rb) :: qi(nlayers) ! ice water (specific humidity)
! real(kind=rb) :: ql(nlayers) ! liq water (specific humidity)
! Return if clear sky; or stop if icld out of range
if (icld.eq.0) return
if (icld.lt.0.or.icld.gt.3) then
stop 'MCICA_SUBCOL: INVALID ICLD'
endif
! Set permute seed for random number generator
! For single column mode, permuteseed must be different for each (ims) sample performed
permuteseed = ims*nsubclw
! Pass particle sizes to new arrays, no subcolumns for these properties yet
! Convert pressures from mb to Pa
reicmc(:nlayers) = rei(:nlayers)
relqmc(:nlayers) = rel(:nlayers)
pmid(:nlayers) = play(:nlayers)*1.e2_rb
! Convert input ice and liquid cloud water paths to specific humidity ice and liquid components
! cwp = (q * pdel * 1000.) / gravit)
! = (kg/kg * kg m-1 s-2 *1000.) / m s-2
! = (g m-2)
!
! q = (cwp * gravit) / (pdel *1000.)
! = (g m-2 * m s-2) / (kg m-1 s-2 * 1000.)
! = kg/kg
! do ilev = 1, nlayers
! qi(ilev) = (ciwp(ilev) * grav) / (pdel(ilev) * 1000._rb)
! ql(ilev) = (clwp(ilev) * grav) / (pdel(ilev) * 1000._rb)
! enddo
! Generate the stochastic subcolumns of cloud optical properties for the longwave;
call generate_stochastic_clouds (nlayers, icld, irng, pmid, cldfrac, clwp, ciwp, tauc, &
cldfmc, clwpmc, ciwpmc, taucmc, permuteseed)
end subroutine mcica_subcol_lw
!-------------------------------------------------------------------------------------------------
subroutine generate_stochastic_clouds(nlayers, icld, irng, pmid, cld, clwp, ciwp, tauc, &
cld_stoch, clwp_stoch, ciwp_stoch, tauc_stoch, changeSeed)
!-------------------------------------------------------------------------------------------------
!----------------------------------------------------------------------------------------------------------------
! ---------------------
! Contact: Cecile Hannay (hannay@ucar.edu)
!
! Original code: Based on Raisanen et al., QJRMS, 2004.
!
! Modifications: Generalized for use with RRTMG and added Mersenne Twister as the default
! random number generator, which can be changed to the optional kissvec random number generator
! with flag 'irng'. Some extra functionality has been commented or removed.
! Michael J. Iacono, AER, Inc., February 2007
!
! Given a profile of cloud fraction, cloud water and cloud ice, we produce a set of subcolumns.
! Each layer within each subcolumn is homogeneous, with cloud fraction equal to zero or one
! and uniform cloud liquid and cloud ice concentration.
! The ensemble as a whole reproduces the probability function of cloud liquid and ice within each layer
! and obeys an overlap assumption in the vertical.
!
! Overlap assumption:
! The cloud are consistent with 4 overlap assumptions: random, maximum, maximum-random and exponential.
! The default option is maximum-random (option 3)
! The options are: 1=random overlap, 2=max/random, 3=maximum overlap, 4=exponential overlap
! This is set with the variable "overlap"
!mji - Exponential overlap option (overlap=4) has been deactivated in this version
! The exponential overlap uses also a length scale, Zo. (real, parameter :: Zo = 2500. )
!
! Seed:
! If the stochastic cloud generator is called several times during the same timestep,
! one should change the seed between the call to insure that the subcolumns are different.
! This is done by changing the argument 'changeSeed'
! For example, if one wants to create a set of columns for the shortwave and another set for the longwave ,
! use 'changeSeed = 1' for the first call and'changeSeed = 2' for the second call
!
! PDF assumption:
! We can use arbitrary complicated PDFS.
! In the present version, we produce homogeneuous clouds (the simplest case).
! Future developments include using the PDF scheme of Ben Johnson.
!
! History file:
! Option to add diagnostics variables in the history file. (using FINCL in the namelist)
! nsubcol = number of subcolumns
! overlap = overlap type (1-3)
! Zo = length scale
! CLOUD_S = mean of the subcolumn cloud fraction ('_S" means Stochastic)
! CLDLIQ_S = mean of the subcolumn cloud water
! CLDICE_S = mean of the subcolumn cloud ice
!
! Note:
! Here: we force that the cloud condensate to be consistent with the cloud fraction
! i.e we only have cloud condensate when the cell is cloudy.
! In CAM: The cloud condensate and the cloud fraction are obtained from 2 different equations
! and the 2 quantities can be inconsistent (i.e. CAM can produce cloud fraction
! without cloud condensate or the opposite).
!---------------------------------------------------------------------------------------------------------------
use mcica_random_numbers
! The Mersenne Twister random number engine
use MersenneTwister, only: randomNumberSequence, &
new_RandomNumberSequence, getRandomReal
type(randomNumberSequence) :: randomNumbers
! -- Arguments
integer(kind=im), intent(in) :: nlayers ! number of layers
integer(kind=im), intent(in) :: icld ! clear/cloud, cloud overlap flag
integer(kind=im), intent(inout) :: irng ! flag for random number generator
! 0 = kissvec
! 1 = Mersenne Twister
integer(kind=im), optional, intent(in) :: changeSeed ! allows permuting the seed
! Column state (cloud fraction, cloud water, cloud ice) + variables needed to read physics state
real(kind=rb), intent(in) :: pmid(:) ! layer pressure (Pa)
! Dimensions: (nlayers)
real(kind=rb), intent(in) :: cld(:) ! cloud fraction
! Dimensions: (nlayers)
real(kind=rb), intent(in) :: clwp(:) ! in-cloud liquid water path
! Dimensions: (nlayers)
real(kind=rb), intent(in) :: ciwp(:) ! in-cloud ice water path
! Dimensions: (nlayers)
real(kind=rb), intent(in) :: tauc(:,:) ! in-cloud optical depth
! Dimensions: (nbndlw,nlayers)
! real(kind=rb), intent(in) :: ssac(:,:) ! in-cloud single scattering albedo
! Dimensions: (nbndlw,nlayers)
! inactive - for future expansion
! real(kind=rb), intent(in) :: asmc(:,:) ! in-cloud asymmetry parameter
! Dimensions: (nbndlw,nlayers)
! inactive - for future expansion
real(kind=rb), intent(out) :: cld_stoch(:,:) ! subcolumn cloud fraction
! Dimensions: (ngptlw,nlayers)
real(kind=rb), intent(out) :: clwp_stoch(:,:) ! subcolumn in-cloud liquid water path
! Dimensions: (ngptlw,nlayers)
real(kind=rb), intent(out) :: ciwp_stoch(:,:) ! subcolumn in-cloud ice water path
! Dimensions: (ngptlw,nlayers)
real(kind=rb), intent(out) :: tauc_stoch(:,:) ! subcolumn in-cloud optical depth
! Dimensions: (ngptlw,nlayers)
! real(kind=rb), intent(out) :: ssac_stoch(:,:) ! subcolumn in-cloud single scattering albedo
! Dimensions: (ngptlw,nlayers)
! inactive - for future expansion
! real(kind=rb), intent(out) :: asmc_stoch(:,:) ! subcolumn in-cloud asymmetry parameter
! Dimensions: (ngptlw,nlayers)
! inactive - for future expansion
! -- Local variables
integer(kind=im), parameter :: nsubcol = ngptlw ! number of sub-columns (g-point intervals)
real(kind=rb) :: cldf(nlayers) ! cloud fraction
! Mean over the subcolumns (cloud fraction, cloud water , cloud ice) - inactive
! real(kind=rb) :: mean_cld_stoch(nlayers) ! cloud fraction
! real(kind=rb) :: mean_clwp_stoch(nlayers) ! cloud water
! real(kind=rb) :: mean_ciwp_stoch(nlayers) ! cloud ice
! real(kind=rb) :: mean_tauc_stoch(nlayers) ! cloud optical depth
! real(kind=rb) :: mean_ssac_stoch(nlayers) ! cloud single scattering albedo
! real(kind=rb) :: mean_asmc_stoch(nlayers) ! cloud asymmetry parameter
! Set overlap
integer(kind=im) :: overlap ! 1 = random overlap, 2 = maximum/random,
! 3 = maximum overlap,
! real(kind=rb), parameter :: Zo = 2500._rb ! length scale (m)
! real(kind=rb) :: zm(nlayers) ! Height of midpoints (above surface)
! real(kind=rb), dimension(nlayers) :: alpha=0.0_rb ! overlap parameter
! Constants (min value for cloud fraction and cloud water and ice)
real(kind=rb), parameter :: cldmin = 1.0e-20_rb ! min cloud fraction
! real(kind=rb), parameter :: qmin = 1.0e-10_rb ! min cloud water and cloud ice (not used)
! Variables related to random number and seed
real(kind=rb), dimension(nsubcol, nlayers) :: CDF, CDF2 ! random numbers
integer(kind=im) :: seed1, seed2, seed3, seed4 ! seed to create random number (kissvec)
real(kind=rb) :: rand_num ! random number (kissvec)
integer(kind=im) :: iseed ! seed to create random number (Mersenne Twister)
real(kind=rb) :: rand_num_mt ! random number (Mersenne Twister)
! Flag to identify cloud fraction in subcolumns
logical, dimension(nsubcol, nlayers) :: iscloudy ! flag that says whether a gridbox is cloudy
! Indices
integer(kind=im) :: ilev, isubcol, i, n ! indices
!------------------------------------------------------------------------------------------
! Check that irng is in bounds; if not, set to default
if (irng .ne. 0) irng = 1
! Pass input cloud overlap setting to local variable
overlap = icld
! Ensure that cloud fractions are in bounds
do ilev = 1, nlayers
cldf(ilev) = cld(ilev)
if (cldf(ilev) < cldmin) then
cldf(ilev) = 0._rb
endif
enddo
! ----- Create seed --------
! Advance randum number generator by changeseed values
if (irng.eq.0) then
! For kissvec, create a seed that depends on the state of the columns. Maybe not the best way, but it works.
! Must use pmid from bottom four layers.
if (pmid(1).lt.pmid(2)) then
stop 'MCICA_SUBCOL: KISSVEC SEED GENERATOR REQUIRES PMID FROM BOTTOM FOUR LAYERS.'
endif
seed1 = (pmid(1) - int(pmid(1))) * 1000000000_im
seed2 = (pmid(2) - int(pmid(2))) * 1000000000_im
seed3 = (pmid(3) - int(pmid(3))) * 1000000000_im
seed4 = (pmid(4) - int(pmid(4))) * 1000000000_im
do i=1,changeSeed
call kissvec(seed1, seed2, seed3, seed4, rand_num)
enddo
elseif (irng.eq.1) then
randomNumbers = new_RandomNumberSequence(seed = changeSeed)
endif
! ------ Apply overlap assumption --------
! generate the random numbers
select case (overlap)
case(1)
! Random overlap
! i) pick a random value at every level
if (irng.eq.0) then
do isubcol = 1,nsubcol
do ilev = 1,nlayers
call kissvec(seed1, seed2, seed3, seed4, rand_num) ! we get different random number for each level
CDF(isubcol, ilev) = rand_num
enddo
enddo
elseif (irng.eq.1) then
do isubcol = 1, nsubcol
do ilev = 1, nlayers
rand_num_mt = getRandomReal(randomNumbers)
CDF(isubcol,ilev) = rand_num_mt
enddo
enddo
endif
case(2)
! Maximum-Random overlap
! i) pick a random number for top layer.
! ii) walk down the column:
! - if the layer above is cloudy, we use the same random number than in the layer above
! - if the layer above is clear, we use a new random number
if (irng.eq.0) then
do isubcol = 1,nsubcol
do ilev = 1,nlayers
call kissvec(seed1, seed2, seed3, seed4, rand_num)
CDF(isubcol, ilev) = rand_num
enddo
enddo
elseif (irng.eq.1) then
do isubcol = 1, nsubcol
do ilev = 1, nlayers
rand_num_mt = getRandomReal(randomNumbers)
CDF(isubcol,ilev) = rand_num_mt
enddo
enddo
endif
do ilev = 2,nlayers
do isubcol = 1, nsubcol
if (CDF(isubcol, ilev-1) > 1._rb - cldf(ilev-1) ) then
CDF(isubcol,ilev) = CDF(isubcol,ilev-1)
else
CDF(isubcol,ilev) = CDF(isubcol,ilev) * (1._rb - cldf(ilev-1))
endif
enddo
enddo
case(3)
! Maximum overlap
! i) pick the same random number at every level
if (irng.eq.0) then
do isubcol = 1,nsubcol
call kissvec(seed1, seed2, seed3, seed4, rand_num)
do ilev = 1,nlayers
CDF(isubcol, ilev) = rand_num
enddo
enddo
elseif (irng.eq.1) then
do isubcol = 1, nsubcol
rand_num_mt = getRandomReal(randomNumbers)
do ilev = 1, nlayers
CDF(isubcol,ilev) = rand_num_mt
enddo
enddo
endif
! case(4) - inactive
! ! Exponential overlap: weighting between maximum and random overlap increases with the distance.
! ! The random numbers for exponential overlap verify:
! ! j=1 RAN(j)=RND1
! ! j>1 if RND1 < alpha(j,j-1) => RAN(j) = RAN(j-1)
! ! RAN(j) = RND2
! ! alpha is obtained from the equation
! ! alpha = exp(- (Zi-Zj-1)/Zo) where Zo is a characteristic length scale
! ! compute alpha
! zm = state%zm
! alpha(:, 1) = 0._rb
! do ilev = 2,nlayers
! alpha(:, ilev) = exp( -( zm (:, ilev-1) - zm (:, ilev)) / Zo)
! end do
! ! generate 2 streams of random numbers
! do isubcol = 1,nsubcol
! do ilev = 1,nlayers
! call kissvec(seed1, seed2, seed3, seed4, rand_num)
! CDF(isubcol, ilev) = rand_num
! call kissvec(seed1, seed2, seed3, seed4, rand_num)
! CDF2(isubcol, ilev) = rand_num
! end do
! end do
! ! generate random numbers
! do ilev = 2,nlayers
! where (CDF2(:, ilev) < spread(alpha (:,ilev), dim=1, nCopies=nsubcol) )
! CDF(:,ilev) = CDF(:,ilev-1)
! end where
! end do
end select
! -- generate subcolumns for homogeneous clouds -----
do ilev = 1,nlayers
iscloudy(:,ilev) = (CDF(:,ilev) >= 1._rb - spread(cldf(ilev), dim=1, nCopies=nsubcol) )
enddo
! where the subcolumn is cloudy, the subcolumn cloud fraction is 1;
! where the subcolumn is not cloudy, the subcolumn cloud fraction is 0;
! where there is a cloud, define the subcolumn cloud properties,
! otherwise set these to zero
do ilev = 1,nlayers
do isubcol = 1, nsubcol
if ( iscloudy(isubcol,ilev) ) then
cld_stoch(isubcol,ilev) = 1._rb
clwp_stoch(isubcol,ilev) = clwp(ilev)
ciwp_stoch(isubcol,ilev) = ciwp(ilev)
n = ngb(isubcol)
tauc_stoch(isubcol,ilev) = tauc(n,ilev)
! ssac_stoch(isubcol,ilev) = ssac(n,ilev)
! asmc_stoch(isubcol,ilev) = asmc(n,ilev)
else
cld_stoch(isubcol,ilev) = 0._rb
clwp_stoch(isubcol,ilev) = 0._rb
ciwp_stoch(isubcol,ilev) = 0._rb
tauc_stoch(isubcol,ilev) = 0._rb
! ssac_stoch(isubcol,ilev) = 1._rb
! asmc_stoch(isubcol,ilev) = 1._rb
endif
enddo
enddo
! -- compute the means of the subcolumns ---
! mean_cld_stoch(:) = 0._rb
! mean_clwp_stoch(:) = 0._rb
! mean_ciwp_stoch(:) = 0._rb
! mean_tauc_stoch(:) = 0._rb
! mean_ssac_stoch(:) = 0._rb
! mean_asmc_stoch(:) = 0._rb
! do i = 1, nsubcol
! mean_cld_stoch(:) = cld_stoch(i,:) + mean_cld_stoch(:)
! mean_clwp_stoch( :) = clwp_stoch( i,:) + mean_clwp_stoch( :)
! mean_ciwp_stoch( :) = ciwp_stoch( i,:) + mean_ciwp_stoch( :)
! mean_tauc_stoch( :) = tauc_stoch( i,:) + mean_tauc_stoch( :)
! mean_ssac_stoch( :) = ssac_stoch( i,:) + mean_ssac_stoch( :)
! mean_asmc_stoch( :) = asmc_stoch( i,:) + mean_asmc_stoch( :)
! end do
! mean_cld_stoch(:) = mean_cld_stoch(:) / nsubcol
! mean_clwp_stoch( :) = mean_clwp_stoch( :) / nsubcol
! mean_ciwp_stoch( :) = mean_ciwp_stoch( :) / nsubcol
! mean_tauc_stoch( :) = mean_tauc_stoch( :) / nsubcol
! mean_ssac_stoch( :) = mean_ssac_stoch( :) / nsubcol
! mean_asmc_stoch( :) = mean_asmc_stoch( :) / nsubcol
end subroutine generate_stochastic_clouds
!------------------------------------------------------------------
! Private subroutines
!------------------------------------------------------------------
!--------------------------------------------------------------------------------------------------
subroutine kissvec(seed1,seed2,seed3,seed4,ran_arr)
!--------------------------------------------------------------------------------------------------
! public domain code
! made available from http://www.fortran.com/
! downloaded by pjr on 03/16/04
! converted to vector form, functions inlined by pjr,mvr on 05/10/2004
! The KISS (Keep It Simple Stupid) random number generator. Combines:
! (1) The congruential generator x(n)=69069*x(n-1)+1327217885, period 2^32.
! (2) A 3-shift shift-register generator, period 2^32-1,
! (3) Two 16-bit multiply-with-carry generators, period 597273182964842497>2^59
! Overall period>2^123;
!
real(kind=rb), intent(inout) :: ran_arr
integer(kind=im), intent(inout) :: seed1,seed2,seed3,seed4
! integer(kind=im) :: i,sz,kiss
integer(kind=im) :: kiss
integer(kind=im) :: m, k, n
! inline function
m(k, n) = ieor (k, ishft (k, n) )
! sz = size(ran_arr)
! do i = 1, sz
seed1 = 69069_im * seed1 + 1327217885_im
seed2 = m (m (m (seed2, 13_im), - 17_im), 5_im)
seed3 = 18000_im * iand (seed3, 65535_im) + ishft (seed3, - 16_im)
seed4 = 30903_im * iand (seed4, 65535_im) + ishft (seed4, - 16_im)
kiss = seed1 + seed2 + ishft (seed3, 16_im) + seed4
ran_arr = kiss*2.328306e-10_rb + 0.5_rb
! end do
end subroutine kissvec
end module mcica_subcol_gen_lw
|
{"hexsha": "1af55f35ec8ac551799ddd8b78f7dfb2ff3d9e4e", "size": 26312, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "rrtmg_lw/src/mcica_subcol_gen_lw.1col.f90", "max_stars_repo_name": "danielkoll/PyRADS_vs_RRTMG", "max_stars_repo_head_hexsha": "72361b22fbebd96022f9082c306ac30fb8f46b6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rrtmg_lw/src/mcica_subcol_gen_lw.1col.f90", "max_issues_repo_name": "danielkoll/PyRADS_vs_RRTMG", "max_issues_repo_head_hexsha": "72361b22fbebd96022f9082c306ac30fb8f46b6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rrtmg_lw/src/mcica_subcol_gen_lw.1col.f90", "max_forks_repo_name": "danielkoll/PyRADS_vs_RRTMG", "max_forks_repo_head_hexsha": "72361b22fbebd96022f9082c306ac30fb8f46b6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-13T08:49:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-13T08:49:58.000Z", "avg_line_length": 47.6666666667, "max_line_length": 117, "alphanum_fraction": 0.5239054424, "num_tokens": 6805}
|
from nose.tools import raises
import networkx as nx
# smoke tests for exceptions
@raises(nx.NetworkXException)
def test_raises_networkx_exception():
raise nx.NetworkXException
@raises(nx.NetworkXError)
def test_raises_networkx_error():
raise nx.NetworkXError
@raises(nx.NetworkXPointlessConcept)
def test_raises_networkx_pointless_concept():
raise nx.NetworkXPointlessConcept
@raises(nx.NetworkXAlgorithmError)
def test_raises_networkx_algorithm_error():
raise nx.NetworkXAlgorithmError
@raises(nx.NetworkXUnfeasible)
def test_raises_networkx_unfeasible():
raise nx.NetworkXUnfeasible
@raises(nx.NetworkXNoPath)
def test_raises_networkx_no_path():
raise nx.NetworkXNoPath
@raises(nx.NetworkXUnbounded)
def test_raises_networkx_unbounded():
raise nx.NetworkXUnbounded
|
{"hexsha": "78923ef7c8d57158d71fd027baf4317899917572", "size": 837, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/prism-fruit/Games-DQL/examples/games/car/networkx/tests/test_exceptions.py", "max_stars_repo_name": "kushgrover/apt-vs-dift", "max_stars_repo_head_hexsha": "250f64e6c442f6018cab65ec6979d9568a842f57", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/prism-fruit/Games-DQL/examples/games/car/networkx/tests/test_exceptions.py", "max_issues_repo_name": "kushgrover/apt-vs-dift", "max_issues_repo_head_hexsha": "250f64e6c442f6018cab65ec6979d9568a842f57", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/prism-fruit/Games-DQL/examples/games/car/networkx/tests/test_exceptions.py", "max_forks_repo_name": "kushgrover/apt-vs-dift", "max_forks_repo_head_hexsha": "250f64e6c442f6018cab65ec6979d9568a842f57", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6176470588, "max_line_length": 46, "alphanum_fraction": 0.7897252091, "include": true, "reason": "import networkx", "num_tokens": 187}
|
[STATEMENT]
lemma measurable_bind2:
assumes "f \<in> measurable M (subprob_algebra N)" and "g \<in> measurable N (subprob_algebra R)"
shows "(\<lambda>x. bind (f x) g) \<in> measurable M (subprob_algebra R)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. f x \<bind> g) \<in> M \<rightarrow>\<^sub>M subprob_algebra R
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
f \<in> M \<rightarrow>\<^sub>M subprob_algebra N
g \<in> N \<rightarrow>\<^sub>M subprob_algebra R
goal (1 subgoal):
1. (\<lambda>x. f x \<bind> g) \<in> M \<rightarrow>\<^sub>M subprob_algebra R
[PROOF STEP]
by (intro measurable_bind' measurable_const) auto
|
{"llama_tokens": 252, "file": null, "length": 2}
|
#!/usr/bin/env python3
import sys
import numpy
import pylab
pylab.rcParams["font.size"]=8
pylab.rcParams["legend.fontsize"]=8
#pylab.rcParams["lines.linewidth"]=1
#pylab.rcParams["axes.linewidth"]=2
#pylab.rcParams["axes.labelsize"]="large"
#pylab.rcParams["axes.labelweight"]="bold"
pylab.rcParams["xtick.major.size"]=0
pylab.rcParams["xtick.minor.size"]=0
pylab.rcParams["ytick.major.size"]=0
pylab.rcParams["ytick.minor.size"]=0
#pylab.rcParams["xtick.direction"]="out"
#pylab.rcParams["ytick.direction"]="out"
#pylab.rcParams["figure.figsize"]=(3, 3)
#activity
colormap="hot" #"jet", "bwr"
spike=numpy.loadtxt(sys.argv[1], delimiter=",")
pylab.clf()
pylab.figure(figsize=(3,2))
pylab.imshow(spike[:, 1:].T, aspect="auto", interpolation="none", cmap=colormap, extent=[spike[0,0]/1000.0, spike[-1,0]/1000.0, len(spike[0,1:]), 1])
#limit=numpy.max(numpy.abs(xE[:,1:]))
#pylab.clim([-limit, limit])
pylab.colorbar()
pylab.xlabel("Time [s]")
pylab.ylabel("Neuron #")
pylab.tight_layout()
pylab.savefig("spike.pdf")
#part
part_len=10*100
part_num=int(len(spike[:,0])//part_len)
for i in range(part_num):
pylab.clf()
pylab.figure(figsize=(3,4))
pylab.subplot(2,1,1)
pylab.imshow(spike[i*part_len:(i+1)*part_len, 1:].T, aspect="auto", interpolation="none", cmap=colormap, extent=[spike[i*part_len,0]/1000.0, spike[(i+1)*part_len-1,0]/1000.0, len(spike[0,1:]), 1])
pylab.colorbar()
pylab.xlabel("Time [s]")
pylab.ylabel("Neuron #")
pylab.tight_layout()
pylab.subplot(2,1,2)
pylab.plot(spike[i*part_len:(i+1)*part_len, 0], numpy.mean(spike[i*part_len:(i+1)*part_len, 1:]*1000.0, axis=1))
pylab.xlabel("Time [s]")
pylab.ylabel("Mean rate [Hz]")
pylab.tight_layout()
pylab.savefig("spike_part"+str(i)+".pdf")
|
{"hexsha": "8e8c70a570140873f2f3e5b2b1133e4ec943ffc0", "size": 1773, "ext": "py", "lang": "Python", "max_stars_repo_path": "Fig1/ADPmod/plot_spike.py", "max_stars_repo_name": "TatsuyaHaga/reversereplaymodel_codes", "max_stars_repo_head_hexsha": "503d545449efab603e18d224fc2f94158d967530", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-01-12T14:16:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T20:51:03.000Z", "max_issues_repo_path": "Fig2/symmetric_noSTDmod/plot_spike.py", "max_issues_repo_name": "elifesciences-publications/reversereplaymodel_codes", "max_issues_repo_head_hexsha": "579009d260f32b259994d77c8a66877cf6304dee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Fig2/symmetric_noSTDmod/plot_spike.py", "max_forks_repo_name": "elifesciences-publications/reversereplaymodel_codes", "max_forks_repo_head_hexsha": "579009d260f32b259994d77c8a66877cf6304dee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1052631579, "max_line_length": 200, "alphanum_fraction": 0.6807670615, "include": true, "reason": "import numpy", "num_tokens": 577}
|
"""Mass budget-related quantities."""
try:
from animal_spharm import SpharmInterface
except ImportError:
pass
from aospy.constants import grav
from aospy.utils.vertcoord import (d_deta_from_pfull, d_deta_from_phalf,
to_pfull_from_phalf, dp_from_ps, int_dp_g,
integrate)
import numpy as np
from .. import PFULL_STR, PHALF_STR, PLEVEL_STR, TIME_STR
from .numerics import (d_dx_from_latlon, d_dy_from_lat, d_dp_from_p,
d_dx_at_const_p_from_eta, d_dy_at_const_p_from_eta)
from .advection import horiz_advec, horiz_advec_spharm
from .tendencies import (time_tendency_first_to_last,
time_tendency_each_timestep)
def horiz_divg(u, v, radius):
"""Mass horizontal divergence."""
du_dx = d_dx_from_latlon(u, radius)
dv_dy = d_dy_from_lat(v, radius, vec_field=True)
return du_dx + dv_dy
def horiz_divg_spharm(u, v, radius):
sph_int = SpharmInterface(u, v, rsphere=radius, make_vectorwind=True)
del u, v
divg = sph_int.vectorwind.divergence()
return sph_int.to_xarray(divg)
def horiz_divg_from_eta(u, v, ps, radius, bk, pk):
return (d_dx_at_const_p_from_eta(u, ps, radius, bk, pk) +
d_dy_at_const_p_from_eta(v, ps, radius, bk, pk, vec_field=True))
def vert_divg(omega, p):
"""Mass vertical divergence."""
return d_dp_from_p(omega, p)
def divg_3d(u, v, omega, radius, p):
"""Total (3-D) divergence. Should nearly equal 0 by continuity."""
return horiz_divg(u, v, radius) + vert_divg(omega, p)
def dp(ps, bk, pk, arr):
"""Pressure thickness of hybrid coordinate levels from surface pressure."""
return dp_from_ps(bk, pk, ps, arr[PFULL_STR])
def mass_column(ps):
"""Total mass per square meter of atmospheric column."""
return ps / grav.value
def mass_column_integral(bk, pk, ps):
"""
Total mass per square meter of atmospheric column.
Explicitly computed by integrating over pressure, rather than implicitly
using surface pressure. Useful for checking if model data conserves mass.
"""
dp = dp_from_ps(bk, pk, ps)
return dp.sum(dim=PFULL_STR)
def mass_column_source(evap, precip):
"""Source term of column mass budget."""
return grav.value * (evap - precip)
def mass_column_divg(u, v, radius, dp):
"""Horizontal divergence of vertically integrated flow."""
u_int = integrate(u, dp, is_pressure=True)
v_int = integrate(v, dp, is_pressure=True)
return horiz_divg(u_int, v_int, radius)
def mass_column_divg_spharm(u, v, radius, dp):
"""Horizontal divergence of vertically integrated flow."""
u_int = integrate(u, dp, is_pressure=True)
v_int = integrate(v, dp, is_pressure=True)
return horiz_divg_spharm(u_int, v_int, radius)
def budget_residual(tendency, transport, source=None, freq='1M'):
"""Compute residual between tendency and transport terms.
Resamples transport and source terms to specified frequency, since often
tendencies are computed at monthly intervals while the transport is much
higher frequencies (e.g. 3- or 6-hourly).
"""
resid = (tendency +
transport.resample(freq, TIME_STR, how='mean').dropna(TIME_STR))
if source is not None:
resid -= source.resample(freq, TIME_STR, how='mean').dropna(TIME_STR)
return resid
def mass_column_budget_lhs(ps, u, v, radius, dp, freq='1M'):
"""Tendency plus flux terms in the column-integrated mass budget.
Theoretically the sum of the tendency and transport terms exactly equals
the source term, however artifacts introduced by numerics and other things
yield a residual.
"""
# tendency = time_tendency_first_to_last(ps, freq=freq)
tendency = time_tendency_each_timestep(ps)
transport = mass_column_divg_spharm(u, v, radius, dp)
return budget_residual(tendency, transport, freq=freq)
def mass_column_budget_with_adj_lhs(ps, u, v, q, radius, dp, freq='1M'):
"""Tendency plus flux terms in the column-integrated mass budget.
Theoretically the sum of the tendency and transport terms exactly equals
the source term, however artifacts introduced by numerics and other things
yield a residual.
"""
tendency = time_tendency_first_to_last(ps, freq=freq)
transport = mass_column_divg_adj(u, v, q, ps, radius, dp)
return budget_residual(tendency, transport, freq=freq)
def mass_column_budget_residual(ps, u, v, evap, precip, radius, dp, freq='1M'):
"""Residual in the mass budget.
Theoretically the sum of the tendency and transport terms exactly equals
the source term, however artifacts introduced by numerics and other things
yield a residual.
"""
# tendency = time_tendency_first_to_last(ps, freq=freq)
tendency = time_tendency_each_timestep(ps)
transport = mass_column_divg_spharm(u, v, radius, dp)
source = mass_column_source(evap, precip)
return tendency + transport - source
# return budget_residual(tendency, transport, source, freq=freq)
def uv_column_budget_adjustment(u, v, residual, col_integral, radius):
"""Generic column budget conservation adjustment to apply to horiz wind."""
for p_str in [PFULL_STR, PHALF_STR, PLEVEL_STR]:
if hasattr(u, p_str):
dim = p_str
break
else:
raise AttributeError("Couldn't find vertical dimension "
"of {}".format(u))
sph_int = SpharmInterface(u.isel(**{dim: 0}), v.isel(**{dim: 0}),
rsphere=radius, make_spharmt=True, squeeze=True)
# Assume residual stems entirely from divergent flow.
resid_spectral = SpharmInterface.prep_for_spharm(residual)
resid_spectral = sph_int.spharmt.grdtospec(resid_spectral)
vort_spectral = np.zeros_like(resid_spectral)
u_adj, v_adj = sph_int.spharmt.getuv(vort_spectral, resid_spectral)
u_arr, v_arr = sph_int.to_xarray(u_adj), sph_int.to_xarray(v_adj)
return u_arr / col_integral, v_arr / col_integral
def uv_mass_adjustment(ps, u, v, evap, precip, radius, dp, freq='1M'):
"""Adjustment to horizontal winds to enforce column mass budget closure."""
residual = mass_column_budget_residual(ps, u, v, evap, precip, radius, dp,
freq=freq)
return uv_column_budget_adjustment(u, v, residual, ps, radius)
def uv_mass_adjusted(ps, u, v, evap, precip, radius, dp, freq='1M'):
"""Horizontal winds adjusted to impose column mass budget closure."""
u_adj, v_adj = uv_mass_adjustment(ps, u, v, evap, precip, radius, dp,
freq='1M')
return u - u_adj, v - v_adj
def u_mass_adjustment(ps, u, v, evap, precip, radius, dp, freq='1M'):
"""Adjustment to zonal wind to enforce column mass budget closure."""
u_adj, _ = uv_mass_adjustment(ps, u, v, evap, precip, radius, dp,
freq='1M')
return u_adj
def v_mass_adjustment(ps, u, v, evap, precip, radius, dp, freq='1M'):
"""Adjustment to meridional wind to enforce column mass budget closure."""
_, v_adj = uv_mass_adjustment(ps, u, v, evap, precip, radius, dp,
freq='1M')
return v_adj
def u_mass_adjusted(ps, u, v, evap, precip, radius, dp, freq='1M'):
"""Zonal wind adjusted to impose column mass budget closure."""
u_adj, _ = uv_mass_adjusted(ps, u, v, evap, precip, radius, dp, freq='1M')
return u_adj
def v_mass_adjusted(ps, u, v, evap, precip, radius, dp, freq='1M'):
"""Meridional wind adjusted to impose column mass budget closure."""
_, v_adj = uv_mass_adjusted(ps, u, v, evap, precip, radius, dp, freq='1M')
return v_adj
def mass_column_divg_adj(ps, u, v, evap, precip, radius, dp, freq='1M'):
u_adj, v_adj = uv_mass_adjusted(ps, u, v, evap, precip, radius, dp,
freq=freq)
return mass_column_divg_spharm(u_adj, v_adj, radius, dp)
def mass_column_budget_adj_residual(ps, u, v, evap, precip, radius, dp,
freq='1M'):
tendency = time_tendency_each_timestep(ps)
u_adj, v_adj = uv_mass_adjusted(ps, u, v, evap, precip, radius, dp,
freq=freq)
transport = mass_column_divg_spharm(u_adj, v_adj, radius, dp)
source = mass_column_source(evap, precip)
return tendency + transport - source
def column_flux_divg(arr, u, v, radius, dp):
"""Column flux divergence, with the field defined per unit mass of air."""
return horiz_divg_spharm(int_dp_g(arr*u, dp), int_dp_g(arr*v, dp), radius)
def column_flux_divg_adj(arr, ps, u, v, evap, precip, radius, dp, freq='1M'):
"""Column flux divergence, with the field defined per unit mass of air."""
u_adj, v_adj = uv_mass_adjusted(ps, u, v, evap, precip, radius, dp,
freq=freq)
return horiz_divg_spharm(int_dp_g(arr*u_adj, dp), int_dp_g(arr*v_adj, dp),
radius)
def horiz_divg_mass_adj(u, v, evap, precip, ps, radius, dp):
u_adj, v_adj = uv_mass_adjusted(ps, u, v, evap, precip, radius, dp)
return horiz_divg(u_adj, v_adj, radius)
def horiz_divg_mass_adj_spharm(u, v, evap, precip, ps, radius, dp):
u_adj, v_adj = uv_mass_adjusted(ps, u, v, evap, precip, radius, dp)
return horiz_divg_spharm(u_adj, v_adj, radius)
def horiz_divg_mass_adj_from_eta(u, v, evap, precip, ps, radius, dp, bk, pk):
"""Mass-balance adjusted horizontal divergence from model coordinates."""
u_adj, v_adj = uv_mass_adjusted(ps, u, v, evap, precip, radius, dp)
divg_eta = horiz_divg_spharm(u_adj, v_adj, radius)
du_deta, dv_deta = d_deta_from_pfull(u_adj), d_deta_from_pfull(v_adj)
pfull_coord = u[PFULL_STR]
bk_at_pfull = to_pfull_from_phalf(bk, pfull_coord)
da_deta = d_deta_from_phalf(pk, pfull_coord)
db_deta = d_deta_from_phalf(bk, pfull_coord)
return (divg_eta - (bk_at_pfull / (da_deta + db_deta*ps)) *
horiz_advec_spharm(ps, du_deta, dv_deta, radius))
def horiz_advec_mass_adj(arr, u, v, evap, precip, ps, radius, dp):
u_adj, v_adj = uv_mass_adjusted(ps, u, v, evap, precip, radius, dp)
return horiz_advec(arr, u_adj, v_adj, radius)
def horiz_advec_mass_adj_spharm(arr, u, v, evap, precip, ps, radius, dp):
u_adj, v_adj = uv_mass_adjusted(ps, u, v, evap, precip, radius, dp)
return horiz_advec_spharm(arr, u_adj, v_adj, radius)
def ps_horiz_advec(ps, u, v, evap, precip, radius, dp):
"""Horizontal advection of surface pressure."""
u_adj, v_adj = uv_mass_adjusted(ps, u, v, evap, precip, radius, dp)
sfc_sel = {PFULL_STR: u_adj[PFULL_STR].max()}
def sel(arr):
"""Grab the value at the level nearest the surface."""
return arr.sel(**sfc_sel).drop(PFULL_STR)
u_adj = sel(u_adj)
v_adj = sel(v_adj)
return horiz_advec_spharm(ps, u_adj, v_adj, radius)
def column_dry_air_mass(ps, wvp):
"""Total mass of dry air in an atmospheric column (from Trenberth 1991)"""
return ps / grav - wvp
def dry_mass_column_tendency(ps, q, dp, freq='1M'):
"""Combined time-tendency term in column mass budget equation.
See e.g. Trenberth 1991, Eq. 9.
"""
return (time_tendency_first_to_last(ps, freq=freq) -
grav.value * time_tendency_first_to_last(int_dp_g(q, dp),
freq=freq))
def dry_mass_column_divg(u, v, q, radius, dp):
"""Transport term of atmospheric column mass budget.
E.g. Trenberth 1991, Eq. 9
"""
u_int = integrate((1. - q)*u, dp, is_pressure=True)
v_int = integrate((1. - q)*v, dp, is_pressure=True)
return horiz_divg(u_int, v_int, radius)
def dry_mass_column_budget_residual(ps, u, v, q, radius, dp, freq='1M'):
"""Residual in the dry mass budget.
Theoretically the sum of the tendency and transport terms is exactly zero,
however artifacts introduced by numerics and other things yield a
residual.
"""
tendency = dry_mass_column_tendency(ps, q, dp, freq=freq)
transport = dry_mass_column_divg(u, v, q, radius, dp)
return budget_residual(tendency, transport, freq=freq)
def uv_dry_mass_adjustment(ps, u, v, q, radius, dp, freq='1M'):
"""Adjustment to horiz. winds to enforce column dry mass budget closure."""
residual = dry_mass_column_budget_residual(ps, u, v, q, radius,
dp, freq=freq)
return uv_column_budget_adjustment(u, v, residual, ps, radius)
def uv_dry_mass_adjusted(ps, u, v, q, radius, dp, freq='1M'):
"""Horizontal winds adjusted to impose column dry mass budget closure."""
u_adj, v_adj = uv_dry_mass_adjustment(ps, u, v, q, radius, dp, freq=freq)
return u - u_adj, v - v_adj
def dry_mass_column_divg_adj(ps, u, v, q, radius, dp, freq='1M'):
"""Column divergence of dry mass with budget correction applied."""
u_adj, v_adj = uv_dry_mass_adjusted(ps, u, v, q, radius, dp, freq=freq)
return column_flux_divg(1 - q, u_adj, v_adj, radius, dp)
def dry_mass_column_budget_adj_residual(ps, u, v, q, radius, dp, freq='1M'):
"""Residual in column mass budget when flow is adjusted for balance."""
tendency = dry_mass_column_tendency(ps, q, dp, freq=freq)
transport = dry_mass_column_divg_adj(ps, u, v, q, radius, dp)
return budget_residual(tendency, transport, freq=freq)
|
{"hexsha": "6cad343ed6f0a202290eac77297896b37706d938", "size": 13401, "ext": "py", "lang": "Python", "max_stars_repo_path": "aospy_user/calcs/mass.py", "max_stars_repo_name": "spencerahill/aospy-obj-lib", "max_stars_repo_head_hexsha": "76803806e8c6b0042c901735eed1c88042d4e4ed", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-10-27T19:32:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-07T12:41:30.000Z", "max_issues_repo_path": "aospy_user/calcs/mass.py", "max_issues_repo_name": "spencerahill/aospy-obj-lib", "max_issues_repo_head_hexsha": "76803806e8c6b0042c901735eed1c88042d4e4ed", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2015-09-25T15:45:59.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-31T13:50:29.000Z", "max_forks_repo_path": "aospy_user/calcs/mass.py", "max_forks_repo_name": "spencerahill/aospy-obj-lib", "max_forks_repo_head_hexsha": "76803806e8c6b0042c901735eed1c88042d4e4ed", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2991202346, "max_line_length": 79, "alphanum_fraction": 0.6763674353, "include": true, "reason": "import numpy", "num_tokens": 3648}
|
import matplotlib.pyplot as plt
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('data', help='Arquivo com as contagens das palavras')
parser.add_argument('title', help='Titulo do gráfico')
args = parser.parse_args()
image_name = args.title.lower().replace(' ', '_') + '.png'
words_freq = []
words = []
freq = []
plt.rcdefaults()
fig, ax = plt.subplots(figsize=(20, 10))
def read_words(data):
with open(data, 'r') as words_file:
try:
while True:
line = words_file.readline().replace('\n','')
word, freq = line.split(' - ')
yield (word, freq)
except EOFError:
pass
except ValueError:
pass
for w, f in read_words(args.data):
words_freq.append((w, int(f)))
words_freq = sorted(words_freq, key=lambda x: x[0])
words = [wf[0] for wf in words_freq]
freq = [wf[1] for wf in words_freq]
position = np.arange(len(words))
ax.barh(position, freq, color='green')
ax.set_yticks(position)
ax.set_yticklabels(words)
ax.invert_yaxis()
ax.set_xlabel('Ocorrências')
ax.set_title(args.title)
for i, v in enumerate(freq):
ax.text(v, i, str(v), color='black', va='center')
#plt.show()
plt.savefig(image_name)
|
{"hexsha": "5e3e4db961805bb5b58ea04dc1478c2e015000db", "size": 1257, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/plot_cont.py", "max_stars_repo_name": "Vnicius/filter", "max_stars_repo_head_hexsha": "0e478c5bc02c5152151308a1ca750c458c982135", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/plot_cont.py", "max_issues_repo_name": "Vnicius/filter", "max_issues_repo_head_hexsha": "0e478c5bc02c5152151308a1ca750c458c982135", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plot_cont.py", "max_forks_repo_name": "Vnicius/filter", "max_forks_repo_head_hexsha": "0e478c5bc02c5152151308a1ca750c458c982135", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6530612245, "max_line_length": 73, "alphanum_fraction": 0.6451869531, "include": true, "reason": "import numpy", "num_tokens": 313}
|
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* (C) Copyright 2013 Andrey Semashev
*/
/*!
* \file exceptions.hpp
*
* \brief This header includes all exception types.
*/
#ifndef BOOST_SYNC_EXCEPTIONS_HPP_INCLUDED_
#define BOOST_SYNC_EXCEPTIONS_HPP_INCLUDED_
#include <boost/sync/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#include <boost/sync/exceptions/runtime_exception.hpp>
#include <boost/sync/exceptions/resource_error.hpp>
#include <boost/sync/exceptions/lock_error.hpp>
#include <boost/sync/exceptions/wait_error.hpp>
#include <boost/sync/exceptions/overflow_error.hpp>
#endif // BOOST_SYNC_EXCEPTIONS_HPP_INCLUDED_
|
{"hexsha": "25f5c14fc4a8c2fb8773fcebcbb4cfbcd2b0564b", "size": 789, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/sync/exceptions.hpp", "max_stars_repo_name": "ballisticwhisper/boost", "max_stars_repo_head_hexsha": "f72119ab640b564c4b983bd457457046b52af9ee", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-01-02T14:24:56.000Z", "max_stars_repo_stars_event_max_datetime": "2015-01-02T14:25:17.000Z", "max_issues_repo_path": "boost/sync/exceptions.hpp", "max_issues_repo_name": "ballisticwhisper/boost", "max_issues_repo_head_hexsha": "f72119ab640b564c4b983bd457457046b52af9ee", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2019-01-13T23:45:51.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-03T08:13:26.000Z", "max_forks_repo_path": "boost/sync/exceptions.hpp", "max_forks_repo_name": "ballisticwhisper/boost", "max_forks_repo_head_hexsha": "f72119ab640b564c4b983bd457457046b52af9ee", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2018-04-04T10:55:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-23T18:52:06.000Z", "avg_line_length": 26.3, "max_line_length": 61, "alphanum_fraction": 0.7693282636, "num_tokens": 185}
|
/*
* @name BookFiler Library - Sort Filter Table Widget
* @author Branden Lee
* @version 1.00
* @license MIT
* @brief sqlite3 based table widget.
*/
#ifndef BOOKFILER_LIBRARY_SORT_FILTER_TABLE_WIDGET_MAIN_WIDGET_H
#define BOOKFILER_LIBRARY_SORT_FILTER_TABLE_WIDGET_MAIN_WIDGET_H
// config
#include "../core/config.hpp"
// C++17
//#include <filesystem>
#include <algorithm>
#include <cstdlib>
#include <fstream>
#include <functional>
#include <initializer_list>
#include <iostream>
#include <map>
#include <memory>
#include <string>
#include <thread>
#include <utility>
#include <vector>
/* boost 1.72.0
* License: Boost Software License (similar to BSD and MIT)
*/
#include <boost/signals2.hpp>
/* sqlite3 3.33.0
* License: PublicDomain
*/
#include <sqlite3.h>
/* QT 5.13.2
* License: LGPLv3
*/
#include <QTableView>
#include <QWidget>
// Local Project
#include "ItemModel.hpp"
#include "ItemDelegate.hpp"
/*
* bookfiler - widget
*/
namespace bookfiler {
namespace widget {
class TableImpl : public QTableView {
Q_OBJECT
private:
std::shared_ptr<sqlite3> database;
std::string tableName, idColumn, viewRootId;
std::shared_ptr<TableItemModel> itemModel;
std::shared_ptr<TableItemDelegate> itemDelegate;
boost::signals2::signal<void(std::vector<std::string>,
std::vector<std::string>,
std::vector<std::string>)>
updateSignal;
public:
TableImpl();
~TableImpl();
/* Sets the database to use for the view widget.
* @param database mysqlite3 database that this tree widget will be synced
* with
* @param tableName the table name
* @param idColumn The name of the ID column in the database table.
* for example the sql schema may be: "guid" text(32) NOT NULL
* @return 0 on success, else error code
*/
int setData(std::shared_ptr<sqlite3> database, std::string tableName,
std::string idColumn);
/* Called when the sqlite3 database is updated by another widget, thread, or
* process. Need to rebuild the entire internal representation of the tree
* because no hint at which rows were added, updated, or deleted is provided.
* @return 0 on success, else error code
*/
int update();
/* Called when the sqlite3 database is updated by another widget, thread, or
* process.
* @param addedIdList a list of id that were added. Only the
* row id provided was added, not the children, unless the child id is
* also listed
* @param updatedIdList a list of id that were updated. Only the
* row id provided was updated, not the children, unless the child id is
* also listed
* @param deletedIdList a list of id that were deleted. Only the
* row id provided was deleted, not the children, unless the child id is
* also listed
* @return 0 on success, else error code
*/
int updateIdHint(std::vector<std::string> addedIdList,
std::vector<std::string> updatedIdList,
std::vector<std::string> deletedIdList);
/* Connect a function that will be signaled when the database is updated by
* this widget
* @param addedIdList a list of id that were added. Only the
* row id provided was added, not the children, unless the child id is
* also listed
* @param updatedIdList a list of id that were updated. Only the
* row id provided was updated, not the children, unless the child id is
* also listed
* @param deletedIdList a list of id that were deleted. Only the
* row id provided was deleted, not the children, unless the child id is
* also listed
* @return 0 on success, else error code
*/
int connectUpdateIdHint(
std::function<void(std::vector<std::string>, std::vector<std::string>,
std::vector<std::string>)>);
/*
* @param columnNum The column number that the editor widget will be used for
* starting from 0
* @param editorWidgetCreator A function to call that will create a new
* std::shared_ptr for the item editor widget
*/
int setItemEditorWidget(
int columnNum,
std::function<std::shared_ptr<QWidget>()> editorWidgetCreator);
};
} // namespace widget
} // namespace bookfiler
#endif
// end BOOKFILER_LIBRARY_SORT_FILTER_TABLE_WIDGET_MAIN_WIDGET_H
|
{"hexsha": "3e97c9ee73682ba26e003666a14f0476ef7177e7", "size": 4274, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/UI/MainWidget.hpp", "max_stars_repo_name": "bradosia/BookFiler-Lib-Sort-Filter-Table-Widget", "max_stars_repo_head_hexsha": "6d4b99ed27eb6b43f6ac0495a8adb02bec5c801e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-02-25T05:09:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T03:27:26.000Z", "max_issues_repo_path": "src/UI/MainWidget.hpp", "max_issues_repo_name": "bradosia/BookFiler-Lib-Sort-Filter-Table-Widget", "max_issues_repo_head_hexsha": "6d4b99ed27eb6b43f6ac0495a8adb02bec5c801e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/UI/MainWidget.hpp", "max_forks_repo_name": "bradosia/BookFiler-Lib-Sort-Filter-Table-Widget", "max_forks_repo_head_hexsha": "6d4b99ed27eb6b43f6ac0495a8adb02bec5c801e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-12-11T17:06:44.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-11T17:06:44.000Z", "avg_line_length": 31.4264705882, "max_line_length": 79, "alphanum_fraction": 0.6958352831, "num_tokens": 1057}
|
using EasyDataAugmentation
using Documenter
DocMeta.setdocmeta!(EasyDataAugmentation, :DocTestSetup, :(using EasyDataAugmentation); recursive=true)
makedocs(;
modules=[EasyDataAugmentation],
authors="lilianabs <lilianabsmath@google.com> and contributors",
repo="https://github.com/lilianabs/EasyDataAugmentation.jl/blob/{commit}{path}#{line}",
sitename="EasyDataAugmentation.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://lilianabs.github.io/EasyDataAugmentation.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/lilianabs/EasyDataAugmentation.jl",
devbranch="main",
)
|
{"hexsha": "e3bb8ae4269ebdd2446ff47d0d0b033ee74c92d7", "size": 732, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "lilianabs/EasyDataAugmentationNLP.jl", "max_stars_repo_head_hexsha": "bb54e163ef74f10f8dc4e21a1bbc04bb35a3e24a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-23T18:01:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-23T18:01:18.000Z", "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "lilianabs/EasyDataAugmentation.jl", "max_issues_repo_head_hexsha": "bb54e163ef74f10f8dc4e21a1bbc04bb35a3e24a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "lilianabs/EasyDataAugmentation.jl", "max_forks_repo_head_hexsha": "bb54e163ef74f10f8dc4e21a1bbc04bb35a3e24a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.28, "max_line_length": 103, "alphanum_fraction": 0.6926229508, "num_tokens": 196}
|
import numpy as np
from scipy import stats, linalg
import os
import pandas as pd
import neurolab as nl
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import pickle
import itertools as it
def make_data(data, n_bin=6):
data_bin = np.vsplit(data, n_bin)
data_init = [data_bin[i] for i in range(n_bin-1)]
data_fin = [data_bin[i+1] for i in range(n_bin-1)]
data_midpt = [(data_bin[i]+data_bin[i+1])*0.5 for i in range(n_bin-1)]
data_deriv = [np.sign(data_bin[i+1]-data_bin[i]) for i in range(n_bin-1)]
return np.vstack(data_init), np.vstack(data_fin), np.vstack(data_midpt), np.vstack(data_deriv)
def make_data_diff(data, n_bin=6):
data_bin = np.vsplit(data, n_bin)
data_diff = [data_bin[i+1]-data_bin[i] for i in range(n_bin-1)]
return np.vstack(data_diff)
def make_data_boundary(data, boundary=0, n_bin=6):
data_bin = np.vsplit(data, n_bin)
data_init = [data_bin[i] for i in range(n_bin-1)]
data_fin = [data_bin[i+1] for i in range(n_bin-1)]
data_midpt = [(data_bin[i]+data_bin[i+1])*0.5 for i in range(n_bin-1)]
data_diff = [data_bin[i+1]-data_bin[i] for i in range(n_bin-1)]
data_deriv = np.copy(np.vstack(data_diff))
data_deriv[data_deriv <= boundary] = -1
data_deriv[data_deriv > boundary] = 1
return np.vstack(data_init), np.vstack(data_fin), np.vstack(data_midpt), np.vstack(data_deriv)
|
{"hexsha": "7ec6d98eaa1508addabc5b008369c19b8eb6e354", "size": 1526, "ext": "py", "lang": "Python", "max_stars_repo_path": "f_data_prep.py", "max_stars_repo_name": "jungminshan/drosophila", "max_stars_repo_head_hexsha": "8efccfdaaac1404811eac2d81a90f5f42b1d24c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "f_data_prep.py", "max_issues_repo_name": "jungminshan/drosophila", "max_issues_repo_head_hexsha": "8efccfdaaac1404811eac2d81a90f5f42b1d24c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "f_data_prep.py", "max_forks_repo_name": "jungminshan/drosophila", "max_forks_repo_head_hexsha": "8efccfdaaac1404811eac2d81a90f5f42b1d24c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2432432432, "max_line_length": 98, "alphanum_fraction": 0.7201834862, "include": true, "reason": "import numpy,from scipy", "num_tokens": 439}
|
/*
* libasiotap - A portable TAP adapter extension for Boost::ASIO.
* Copyright (C) 2010-2011 Julien KAUFFMANN <julien.kauffmann@freelan.org>
*
* This file is part of libasiotap.
*
* libasiotap is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 3 of
* the License, or (at your option) any later version.
*
* libasiotap is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*
* In addition, as a special exception, the copyright holders give
* permission to link the code of portions of this program with the
* OpenSSL library under certain conditions as described in each
* individual source file, and distribute linked combinations
* including the two.
* You must obey the GNU General Public License in all respects
* for all of the code used other than OpenSSL. If you modify
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you
* do not wish to do so, delete this exception statement from your
* version. If you delete this exception statement from all source
* files in the program, then also delete it here.
*
* If you intend to use libasiotap in a commercial software, please
* contact me : we may arrange this for a small fee or no fee at all,
* depending on the nature of your project.
*/
/**
* \file base_dns_servers_manager.hpp
* \author Julien KAUFFMANN <julien.kauffmann@freelan.org>
* \brief The base DNS servers manager class.
*/
#pragma once
#include <map>
#include <boost/asio.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/weak_ptr.hpp>
#include <boost/function.hpp>
#include <boost/bind.hpp>
#include <boost/system/system_error.hpp>
#include "types/ip_endpoint.hpp"
namespace asiotap
{
/**
* \brief Handle DNS servers.
*/
template <typename DNSServersManagerType>
class base_dns_servers_manager
{
public:
struct dns_server_type
{
std::string interface_name;
ip_address dns_server_address;
friend bool operator<(const dns_server_type& lhs, const dns_server_type& rhs) {
if (lhs.interface_name == rhs.interface_name) {
return (lhs.dns_server_address < rhs.dns_server_address);
} else {
return (lhs.interface_name < rhs.interface_name);
}
}
friend bool operator==(const dns_server_type& lhs, const dns_server_type& rhs) {
return ((lhs.interface_name == rhs.interface_name) && (lhs.dns_server_address == rhs.dns_server_address));
}
friend std::ostream& operator<<(std::ostream& os, const dns_server_type& value) {
return os << value.interface_name << " - " << value.dns_server_address;
}
};
class entry_type_impl
{
public:
~entry_type_impl()
{
if (m_success)
{
m_dns_servers_manager.unregister_dns_server(m_dns_server);
}
}
entry_type_impl(const entry_type_impl&) = delete;
entry_type_impl& operator=(const entry_type_impl&) = delete;
entry_type_impl(entry_type_impl&&) = delete;
entry_type_impl& operator=(entry_type_impl&&) = delete;
const dns_server_type& dns_server() const
{
return m_dns_server;
}
private:
entry_type_impl(base_dns_servers_manager& dns_server_manager, const dns_server_type& _dns_server) :
m_dns_servers_manager(dns_server_manager),
m_dns_server(_dns_server),
m_success(m_dns_servers_manager.register_dns_server(m_dns_server))
{
}
base_dns_servers_manager& m_dns_servers_manager;
dns_server_type m_dns_server;
bool m_success;
friend class base_dns_servers_manager<DNSServersManagerType>;
};
/**
* \brief The entry implementation type.
*/
typedef boost::shared_ptr<entry_type_impl> entry_type;
/**
* \brief The add handler type.
*/
typedef boost::function<bool (const dns_server_type&)> dns_server_add_handler_type;
/**
* \brief The remove handler type.
*/
typedef boost::function<bool (const dns_server_type&)> dns_server_remove_handler_type;
/**
* \brief The registration success handler type.
*/
typedef boost::function<void(const dns_server_type&)> dns_server_registration_success_handler_type;
/**
* \brief The registration failure handler type.
*/
typedef boost::function<void(const dns_server_type&, const boost::system::system_error&)> dns_server_registration_failure_handler_type;
/**
* \brief The unregistration success handler type.
*/
typedef boost::function<void(const dns_server_type&)> dns_server_unregistration_success_handler_type;
/**
* \brief The unregistration failure handler type.
*/
typedef boost::function<void(const dns_server_type&, const boost::system::system_error&)> dns_server_unregistration_failure_handler_type;
explicit base_dns_servers_manager(boost::asio::io_service& io_service_) :
m_io_service(io_service_),
m_dns_server_add_handler(),
m_dns_server_remove_handler(),
m_dns_server_registration_success_handler(),
m_dns_server_registration_failure_handler(),
m_dns_server_unregistration_success_handler(),
m_dns_server_unregistration_failure_handler()
{
}
base_dns_servers_manager(const base_dns_servers_manager&) = delete;
base_dns_servers_manager& operator=(const base_dns_servers_manager&) = delete;
base_dns_servers_manager(base_dns_servers_manager&&) = delete;
base_dns_servers_manager& operator=(base_dns_servers_manager&&) = delete;
boost::asio::io_service& io_service()
{
return m_io_service;
}
void set_dns_server_add_handler(dns_server_add_handler_type handler)
{
m_dns_server_add_handler = handler;
}
void set_dns_server_remove_handler(dns_server_remove_handler_type handler)
{
m_dns_server_remove_handler = handler;
}
void set_dns_server_registration_success_handler(dns_server_registration_success_handler_type handler)
{
m_dns_server_registration_success_handler = handler;
}
void set_dns_server_registration_failure_handler(dns_server_registration_failure_handler_type handler)
{
m_dns_server_registration_failure_handler = handler;
}
void set_dns_server_unregistration_success_handler(dns_server_unregistration_success_handler_type handler)
{
m_dns_server_unregistration_success_handler = handler;
}
void set_dns_server_unregistration_failure_handler(dns_server_unregistration_failure_handler_type handler)
{
m_dns_server_unregistration_failure_handler = handler;
}
bool register_dns_server(const dns_server_type& dns_server)
{
try
{
bool result = false;
if (m_dns_server_add_handler) {
result = m_dns_server_add_handler(dns_server);
}
if (!result) {
static_cast<DNSServersManagerType*>(this)->register_dns_server(dns_server);
}
if (m_dns_server_registration_success_handler)
{
m_dns_server_registration_success_handler(dns_server);
}
}
catch (boost::system::system_error& ex)
{
if (m_dns_server_registration_failure_handler)
{
m_dns_server_registration_failure_handler(dns_server, ex);
}
return false;
}
return true;
}
bool unregister_dns_server(const dns_server_type& dns_server)
{
try
{
bool result = false;
if (m_dns_server_remove_handler) {
result = m_dns_server_remove_handler(dns_server);
}
if (!result) {
static_cast<DNSServersManagerType*>(this)->unregister_dns_server(dns_server);
}
if (m_dns_server_unregistration_success_handler)
{
m_dns_server_unregistration_success_handler(dns_server);
}
}
catch (boost::system::system_error& ex)
{
if (m_dns_server_unregistration_failure_handler)
{
m_dns_server_unregistration_failure_handler(dns_server, ex);
}
return false;
}
return true;
}
entry_type get_dns_server_entry(const dns_server_type& dns_server)
{
entry_type entry = m_entry_table[dns_server].lock();
if (!entry)
{
entry = boost::shared_ptr<entry_type_impl>(new entry_type_impl(*this, dns_server));
m_entry_table[dns_server] = entry;
}
return entry;
}
protected:
typedef std::map<dns_server_type, boost::weak_ptr<entry_type_impl>> entry_table_type;
private:
boost::asio::io_service& m_io_service;
entry_table_type m_entry_table;
dns_server_add_handler_type m_dns_server_add_handler;
dns_server_remove_handler_type m_dns_server_remove_handler;
dns_server_registration_success_handler_type m_dns_server_registration_success_handler;
dns_server_registration_failure_handler_type m_dns_server_registration_failure_handler;
dns_server_unregistration_success_handler_type m_dns_server_unregistration_success_handler;
dns_server_unregistration_failure_handler_type m_dns_server_unregistration_failure_handler;
};
}
|
{"hexsha": "0c43ce2b983d57ba1887439ac449e43445f8dc63", "size": 9320, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "blades/freelan/libs/asiotap/include/asiotap/base_dns_servers_manager.hpp", "max_stars_repo_name": "krattai/AEBL", "max_stars_repo_head_hexsha": "a7b12c97479e1236d5370166b15ca9f29d7d4265", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2016-04-26T03:43:54.000Z", "max_stars_repo_stars_event_max_datetime": "2016-11-17T08:09:04.000Z", "max_issues_repo_path": "blades/freelan/libs/asiotap/include/asiotap/base_dns_servers_manager.hpp", "max_issues_repo_name": "krattai/AEBL", "max_issues_repo_head_hexsha": "a7b12c97479e1236d5370166b15ca9f29d7d4265", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 17.0, "max_issues_repo_issues_event_min_datetime": "2015-01-05T21:06:22.000Z", "max_issues_repo_issues_event_max_datetime": "2015-12-07T20:45:44.000Z", "max_forks_repo_path": "blades/freelan/libs/asiotap/include/asiotap/base_dns_servers_manager.hpp", "max_forks_repo_name": "krattai/AEBL", "max_forks_repo_head_hexsha": "a7b12c97479e1236d5370166b15ca9f29d7d4265", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2016-04-26T03:43:55.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-06T11:02:08.000Z", "avg_line_length": 30.1618122977, "max_line_length": 140, "alphanum_fraction": 0.7401287554, "num_tokens": 2148}
|
"""
Reimplementing segan paper as close as possible.
Deepak Baby, UGent, June 2018.
"""
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer, flatten, fully_connected
import numpy as np
from keras.layers import Subtract, Activation, Input
from keras.models import Model
from keras.optimizers import Adam
from keras.layers.merge import _Merge
from keras.callbacks import TensorBoard
import keras.backend as K
from data_ops import *
from file_ops import *
from models import *
from wgan_ops import *
from functools import partial
import time
from tqdm import *
import h5py
import os,sys
import scipy.io.wavfile as wavfile
BATCH_SIZE = 100
GRADIENT_PENALTY_WEIGHT = 10 # need to tune
class RandomWeightedAverage (_Merge):
def _merge_function (self, inputs):
weights = K.random_uniform((BATCH_SIZE, 1, 1))
return (weights * inputs[0]) + ((1 - weights) * inputs[1])
if __name__ == '__main__':
# Various GAN options
opts = {}
opts ['dirhead'] = 'RSGAN_GP' + str(GRADIENT_PENALTY_WEIGHT)
opts ['gp_weight'] = GRADIENT_PENALTY_WEIGHT
##########################
opts ['z_off'] = not False # set to True to omit the latent noise input
# normalization
#################################
# Only one of the follwoing should be set to True or all of can be False
opts ['applybn'] = False
opts ['applyinstancenorm'] = True # Works even without any normalization
##################################
# Show model summary
opts ['show_summary'] = False
## Set the matfiles
clean_train_matfile = "./data/clean_train_segan1d.mat"
noisy_train_matfile = "./data/noisy_train_segan1d.mat"
noisy_test_matfile = "./data/noisy_test_segan1d.mat"
####################################################
# Other fixed options
opts ['window_length'] = 2**14
opts ['featdim'] = 1 # 1 since it is just 1d time samples
opts ['filterlength'] = 31
opts ['strides'] = 2
opts ['padding'] = 'SAME'
opts ['g_enc_numkernels'] = [16, 32, 32, 64, 64, 128, 128, 256, 256, 512, 1024]
opts ['g_enc_lstm_cells'] = [1024]
opts ['d_fmaps'] = opts ['g_enc_numkernels'] # We use the same structure for discriminator
opts ['d_lstms'] = opts ['g_enc_lstm_cells']
opts['leakyrelualpha'] = 0.3
opts ['batch_size'] = BATCH_SIZE
opts ['applyprelu'] = True
opts ['d_activation'] = 'leakyrelu'
g_enc_numkernels = opts ['g_enc_numkernels']
opts ['g_dec_numkernels'] = g_enc_numkernels[:-1][::-1] + [1]
opts ['gt_stride'] = 2
opts ['g_l1loss'] = 200.
opts ['d_lr'] = 2e-4
opts ['g_lr'] = 2e-4
opts ['random_seed'] = 111
n_epochs = 81
fs = 16000
# set flags for training or testing
TRAIN_SEGAN = True
SAVE_MODEL = True
LOAD_SAVED_MODEL = False
TEST_SEGAN = True
modeldir = get_modeldirname(opts)
print ("The model directory is " + modeldir)
print ("_____________________________________")
if not os.path.exists(modeldir):
os.makedirs(modeldir)
# Obtain the generator and the discriminator
D = discriminator(opts)
G = generator(opts)
# Define optimizers
g_opt = keras.optimizers.Adam(lr=opts['g_lr'])
d_opt = keras.optimizers.Adam(lr=opts['d_lr'])
# The G model has the wav and the noise inputs
wav_shape = (opts['window_length'], opts['featdim'])
z_dim1 = int(opts['window_length']/ (opts ['strides'] ** len(opts ['g_enc_numkernels'])))
z_dim2 = opts ['g_enc_numkernels'][-1]
wav_in_clean = Input(shape=wav_shape, name="main_input_clean")
wav_in_noisy = Input(shape=wav_shape, name="main_input_noisy")
if not opts ['z_off']:
z = Input (shape=(z_dim1, z_dim2), name="noise_input")
G_wav = G([wav_in_noisy, z])
G_model = Model([wav_in_noisy, z], G_wav)
else :
G_wav = G(wav_in_noisy)
G_model = Model(wav_in_noisy, G_wav)
d_out = D([wav_in_clean, wav_in_noisy])
D = Model([wav_in_clean, wav_in_noisy], d_out)
G_model.summary()
D.summary()
# ADDING RELATIVISTIC LOSS AT OUTPUT
for layer in D.layers :
layer.trainable = False
D.trainable = False
if not opts ['z_off']:
G_wav = G([wav_in_noisy, z])
else :
G_wav = G(wav_in_noisy)
D_out_for_G = D([G_wav, wav_in_noisy])
D_out_for_real = D([wav_in_clean, wav_in_noisy])
d_outG = Subtract()([D_out_for_G, D_out_for_real])
d_outG = Activation('sigmoid', name="DoutG")(d_outG)
if not opts ['z_off']:
G_D = Model(inputs=[wav_in_clean, wav_in_noisy, z], outputs = [d_outG, G_wav])
else :
G_D = Model(inputs=[wav_in_clean, wav_in_noisy], outputs = [d_outG, G_wav])
G_D.summary()
G_D.compile(optimizer=g_opt,
loss={'model_2': 'mean_absolute_error', 'DoutG': 'binary_crossentropy'},
loss_weights = {'model_2' : opts['g_l1loss'], 'DoutG': 1} )
print (G_D.metrics_names)
# Now we need D model so that gradient penalty can be incorporated
for layer in D.layers :
layer.trainable = True
for layer in G.layers :
layer.trainable = False
D.trainable = True
G.trainable = False
if not opts ['z_off']:
G_wav_for_D = G([wav_in_noisy, z])
else :
G_wav_for_D = G(wav_in_noisy)
d_out_for_G = D([G_wav_for_D, wav_in_noisy])
d_out_for_real = D([wav_in_clean, wav_in_noisy])
# for gradient penalty
averaged_samples = RandomWeightedAverage()([wav_in_clean, G_wav_for_D])
# We will need to this also through D, for computing the gradients
d_out_for_averaged = D([averaged_samples, wav_in_noisy])
# compute the GP loss by means of partial function in keras
partial_gp_loss = partial(gradient_penalty_loss,
averaged_samples = averaged_samples,
gradient_penalty_weight=GRADIENT_PENALTY_WEIGHT)
partial_gp_loss.__name__ = 'gradient_penalty'
d_outD = Subtract()([d_out_for_real, d_out_for_G])
d_outD = Activation('sigmoid', name="DoutD")(d_outD)
if not opts ['z_off']:
D_final = Model(inputs = [wav_in_clean, wav_in_noisy, z],
outputs = [d_outD, d_out_for_averaged])
else :
D_final = Model(inputs = [wav_in_clean, wav_in_noisy],
outputs = [d_outD, d_out_for_averaged])
D_final.compile(optimizer = d_opt,
loss = ['binary_crossentropy', partial_gp_loss ])
D_final.summary()
print (D_final.metrics_names)
# create label vectors for training
positive_y = np.ones((BATCH_SIZE, 1), dtype=np.float32)
negative_y = -1 * positive_y
dummy_y = np.zeros((BATCH_SIZE, 1), dtype=np.float32) # for GP Loss
if TEST_SEGAN:
ftestnoisy = h5py.File(noisy_test_matfile)
noisy_test_data = ftestnoisy['feat_data']
noisy_test_dfi = ftestnoisy['dfi']
print ("Number of test files: " + str(noisy_test_dfi.shape[1]) )
# Begin the training part
if TRAIN_SEGAN:
fclean = h5py.File(clean_train_matfile)
clean_train_data = np.array(fclean['feat_data'])
fnoisy = h5py.File(noisy_train_matfile)
noisy_train_data = np.array(fnoisy['feat_data'])
print ("********************************************")
print (" SEGAN TRAINING ")
print ("********************************************")
print ("Shape of clean feats mat " + str(clean_train_data.shape))
print ("Shape of noisy feats mat " + str(noisy_train_data.shape))
numtrainsamples = clean_train_data.shape[1]
# Tensorboard stuff
log_path = './logs/' + modeldir
callback = TensorBoard(log_path)
callback.set_model(G_D)
train_names = ['G_loss', 'G_adv_loss', 'G_l1Loss']
idx_all = np.arange(numtrainsamples)
# set random seed
np.random.seed(opts['random_seed'])
batch_size = opts['batch_size']
num_batches_per_epoch = int(np.floor(clean_train_data.shape[1]/batch_size))
for epoch in range(n_epochs):
# train D with minibatch
np.random.shuffle(idx_all) # shuffle the indices for the next epoch
for batch_idx in range(num_batches_per_epoch):
start_time = time.time()
idx_beg = batch_idx * batch_size
idx_end = idx_beg + batch_size
idx = np.sort(np.array(idx_all[idx_beg:idx_end]))
#print ("Batch idx " + str(idx[:5]) +" ... " + str(idx[-5:]))
cleanwavs = np.array(clean_train_data[:,idx]).T
cleanwavs = data_preprocess(cleanwavs, preemph=opts['preemph'])
cleanwavs = np.expand_dims(cleanwavs, axis = 2)
noisywavs = np.array(noisy_train_data[:,idx]).T
noisywavs = data_preprocess(noisywavs, preemph=opts['preemph'])
noisywavs = np.expand_dims(noisywavs, axis = 2)
if not opts ['z_off']:
noiseinput = np.random.normal(0, 1,
(batch_size, z_dim1, z_dim2))
[_, d_loss, d_gploss] = D_final.train_on_batch({'main_input_clean': cleanwavs,
'main_input_noisy': noisywavs, 'noise_input': noiseinput},
{'DoutD': positive_y, 'model_4': dummy_y} )
[g_loss, g_dLoss, g_l1loss] = G_D.train_on_batch({'main_input_clean': cleanwavs,
'main_input_noisy': noisywavs, 'noise_input': noiseinput},
{'model_2': cleanwavs, 'DoutG': positive_y} )
else:
[_, d_loss, d_gploss] = D_final.train_on_batch({'main_input_clean': cleanwavs,
'main_input_noisy': noisywavs,},
{'DoutD': positive_y, 'model_4': dummy_y} )
[g_loss, g_dLoss, g_l1loss] = G_D.train_on_batch({'main_input_clean': cleanwavs,
'main_input_noisy': noisywavs},
{'model_2': cleanwavs,
'DoutG': positive_y} )
time_taken = time.time() - start_time
printlog = "E%d/%d:B%d/%d [D loss: %f] [D_GP loss: %f] [G loss: %f] [G_D loss: %f] [G_L1 loss: %f] [Exec. time: %f]" % (epoch, n_epochs, batch_idx, num_batches_per_epoch, d_loss, d_gploss, g_loss, g_dLoss, g_l1loss, time_taken)
print (printlog)
# Tensorboard stuff
logs = [g_loss, g_dLoss, g_l1loss]
write_log(callback, train_names, logs, epoch)
if (TEST_SEGAN and epoch % 10 == 0) or epoch == n_epochs - 1:
print ("********************************************")
print (" SEGAN TESTING ")
print ("********************************************")
resultsdir = modeldir + "/test_results_epoch" + str(epoch)
if not os.path.exists(resultsdir):
os.makedirs(resultsdir)
if LOAD_SAVED_MODEL:
print ("Loading model from " + modeldir + "/Gmodel")
json_file = open(modeldir + "/Gmodel.json", "r")
loaded_model_json = json_file.read()
json_file.close()
G_loaded = model_from_json(loaded_model_json)
G_loaded.compile(loss='mean_squared_error', optimizer=g_opt)
G_loaded.load_weights(modeldir + "/Gmodel.h5")
else:
G_loaded = G
print ("Saving Results to " + resultsdir)
for test_num in tqdm(range(noisy_test_dfi.shape[1])) :
test_beg = noisy_test_dfi[0, test_num]
test_end = noisy_test_dfi[1, test_num]
#print ("Reading indices " + str(test_beg) + " to " + str(test_end))
noisywavs = np.array(noisy_test_data[:,test_beg:test_end]).T
noisywavs = data_preprocess(noisywavs, preemph=opts['preemph'])
noisywavs = np.expand_dims(noisywavs, axis = 2)
if not opts['z_off']:
noiseinput = np.random.normal(0, 1, (noisywavs.shape[0], z_dim1, z_dim2))
cleaned_wavs = G_loaded.predict([noisywavs, noiseinput])
else :
cleaned_wavs = G_loaded.predict(noisywavs)
cleaned_wavs = np.reshape(cleaned_wavs, (noisywavs.shape[0], noisywavs.shape[1]))
cleanwav = reconstruct_wav(cleaned_wavs)
cleanwav = np.reshape(cleanwav, (-1,)) # make it to 1d by dropping the extra dimension
if opts['preemph'] > 0:
cleanwav = de_emph(cleanwav, coeff=opts['preemph'])
destfilename = resultsdir + "/testwav_%d.wav" % (test_num)
wavfile.write(destfilename, fs, cleanwav)
# Finally, save the model
if SAVE_MODEL:
model_json = G.to_json()
with open(modeldir + "/Gmodel.json", "w") as json_file:
json_file.write(model_json)
G.save_weights(modeldir + "/Gmodel.h5")
print ("Model saved to " + modeldir)
|
{"hexsha": "c79696f6cc4b23b6cc0f151867546dbad929fbf3", "size": 13823, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_rsgan-gp_se.py", "max_stars_repo_name": "samiulshuvo/se_relativisticgan", "max_stars_repo_head_hexsha": "5501c4d96faa03eb3c1fd776b232b68940183f4d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 55, "max_stars_repo_stars_event_min_datetime": "2019-02-17T11:40:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T10:10:00.000Z", "max_issues_repo_path": "run_rsgan-gp_se.py", "max_issues_repo_name": "samiulshuvo/se_relativisticgan", "max_issues_repo_head_hexsha": "5501c4d96faa03eb3c1fd776b232b68940183f4d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-02-17T11:47:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-21T11:15:14.000Z", "max_forks_repo_path": "run_rsgan-gp_se.py", "max_forks_repo_name": "deepakbaby/se_relativisticgan", "max_forks_repo_head_hexsha": "5501c4d96faa03eb3c1fd776b232b68940183f4d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2018-11-03T15:05:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T03:46:30.000Z", "avg_line_length": 42.9285714286, "max_line_length": 244, "alphanum_fraction": 0.5686175215, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3452}
|
#This is a code for thresholding the CAM image and output a mask
import numpy as np
import scipy.misc as misc
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
root = "./result/"
img_path = root+"00436515-870c-4b36-a041-de91049b9ab4-densenet121-cam.jpg"
img = mpimg.imread(img_path)
img_name = (img_path.split("/")[2]).split(".")[0]
img_id = "00436515-870c-4b36-a041-de91049b9ab4"
csv_file = "/home/tianshu/pneumonia/dataset/stage_2_train_labels/stage_2_train_labels.csv"
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
def thresholding(img):
plt.figure()
gray = rgb2gray(img).astype("uint8")
arr = np.asarray(gray, dtype="uint8")
for j in range(arr.shape[1]):
for i in range(arr.shape[0]):
if(arr[i][j]>=60 and arr[i][j]<=180):
arr[i][j] = 255
else:
arr[i][j] = 0
im2, contours, hierarchy = cv2.findContours(arr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
C = []
for contour in contours:
area = cv2.contourArea(contour)
if area<10000 and area>1000:
C.append(contour)
#cv2.drawContours(img, C, -1, (0,255,0), 3)
#assume only 1 bbox detected
location = []
for i in range(len(C)):
location = cv2.boundingRect(C[i])
x, y, w, h = location
print(location)
#resize mask to original size
fractor = 1024.0/224.0
for i in range(len(location)):
location[i] = int(location[i]*fractor)
print(location)
#plt.figure()
#cv2.rectangle(img,(x,y),(x+w,y+h),(255, 0, 0),2)
#original_size = (1024, 1024)
#img = cv2.resize(img, original_size, interpolation=cv2.INTER_AREA)
#plt.imshow(img)
#plt.savefig("BBox-%s.png" %(img_name))
#draw ground truth
import pandas as pd
df = pd.read_csv(csv_file)
index = 0
for i in range(df.shape[1]):
if(df.loc[i]['patientId']==img_id):
index = i
break
x, y, w, h = df.iloc[index][1:-1].astype("int")
#plt.figure()
#cv2.rectangle(img,(x,y), (x+w, y+h), (0,255,0),2)
#plt.imshow(img)
#plt.savefig("IoU-%s.png" %(img_name))
thresholding(img)
|
{"hexsha": "239e4bd97ae40b4086438d36a175d60cdd90b56f", "size": 2204, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/thresholding.py", "max_stars_repo_name": "hizircanbayram/Explainable-Pneumonia-Learning-A-Comprehensive-Study", "max_stars_repo_head_hexsha": "56269d80ca6d5626dc7683d9f699964d6f54044a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-05T07:52:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T07:52:57.000Z", "max_issues_repo_path": "code/thresholding.py", "max_issues_repo_name": "hizircanbayram/Explainable-Pneumonia-Learning-A-Comprehensive-Study", "max_issues_repo_head_hexsha": "56269d80ca6d5626dc7683d9f699964d6f54044a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/thresholding.py", "max_forks_repo_name": "hizircanbayram/Explainable-Pneumonia-Learning-A-Comprehensive-Study", "max_forks_repo_head_hexsha": "56269d80ca6d5626dc7683d9f699964d6f54044a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0422535211, "max_line_length": 92, "alphanum_fraction": 0.6084392015, "include": true, "reason": "import numpy,import scipy", "num_tokens": 686}
|
import time
import networks
import pdb
from data.frankenstein_dataset import FrankensteinDataset
from data.horizon_dataset import HorizonDataset
from data.eval_dataset import EvalDataset
import matplotlib.pyplot as plt
from scipy.misc import imsave
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch
import numpy as np
batch_size = 1
NUM_SLICES = 10
device = torch.device("cuda")
model = networks.define_D(6, 64, 'n_layers', n_layers_D=3, use_sigmoid=False)
chkpt = torch.load('checkpoints/patch/179.pth')
model.load_state_dict(chkpt['state_dict'])
model.to(device)
patch_loss = networks.GANLoss()
dataset = EvalDataset()
#dataset.initialize('../../../data/semanticLandscapes512/train_img', allrandom=True, return_idx=True)
dataset.initialize('../../../data/MITCVCL/coast', allrandom=True)
def convertImage(im):
# undo right image flip when cat-ing
im = np.concatenate((im[:3,:,:], im[3:,:,::-1]), 2)
return im.transpose(1,2,0)
def convertPano(best_params):
# Set left_aux to original left_aux
dataset.left_aux = best_params[0]
# Get original pair and use it to start pano
data, aux = dataset.get_deterministic(best_params[1])
pair = data.numpy()
pano = np.concatenate((pair[:3,:,:], pair[3:,:,::-1]), 2)
# Iterate through rest of the pairs
for i in range(2, len(best_params)):
data, aux = dataset.get_deterministic(best_params[i])
pair = data.numpy()
pano = np.concatenate((pano, pair[3:,:,::-1]), 2)
return pano.transpose(1,2,0)
best_params = [dataset.left_aux]
used = [dataset.left_aux['idx']]
model.eval()
for im_idx in range(NUM_SLICES):
preds = []
crop_params = []
# Look at all pairs in dataset
for i in range(len(dataset)):
if i % 100 == 0:
print(i)
# Skips same image
if i == dataset.left_aux['idx'] or i in used:
preds.append(0)
crop_params.append({})
continue
data, aux = dataset[i]
data = data.unsqueeze(0)
data = data.to(device)
pred = model(data)
preds.append(pred.mean().item())
crop_params.append(aux)
# Softmax preds (not exactly necessary here actually)
preds = np.array(preds)
preds = 1 / (1 + np.exp(-preds))
# Argsort image pairs
crop_params = [crop_params[i] for i in np.argsort(preds)]
indices = np.arange(len(dataset))[np.argsort(preds)]
preds = preds[np.argsort(preds)]
# Get best pair
best_index = indices[-1]
best_param = crop_params[-1]
data, aux = dataset.get_deterministic(best_param)
# Save stuff
used.append(best_index)
best_params.append(best_param)
#imsave('pano/test_{}.jpg'.format(im_idx), convertImage(data.numpy()))
#np.save('pano/test_{}.npy'.format(im_idx), data.numpy())
# Set left params for next round
dataset.set_left(best_param)
# Fine tune horizons
# `best_params` contains all info needed for each image slice
dataset.left_aux = best_params[0]
best_params_horizons = [best_params[0]]
best_preds = []
# Iterate through all slices
for i in range(1, len(best_params)):
preds = []
crop_params = []
# Iterate through all possible y_crops
for params in dataset.y_offsets(best_params[i]):
data, aux = dataset.get_deterministic(params)
data = data.unsqueeze(0)
data = data.to(device)
pred = model(data)
preds.append(pred.mean().item())
crop_params.append(aux)
imsave('pano/{}_{}.jpg'.format(i, params['y_crop']), convertImage(data.detach().cpu().numpy()[0]))
# Softmax preds (not exactly necessary)
preds = np.array(preds)
preds = 1 / (1 + np.exp(-preds))
np.save('pano/preds_{}.npy'.format(i), preds)
# Argsort image pairs
crop_params = [crop_params[i] for i in np.argsort(preds)]
indices = np.arange(len(dataset))[np.argsort(preds)]
preds = preds[np.argsort(preds)]
# Replace best_params[i]
best_param = crop_params[-1]
best_params_horizons.append(best_param)
dataset.left_aux = best_param
best_preds.append(preds[-1])
print(best_params)
print(best_params_horizons)
print(best_preds)
imsave('pano/pano.jpg'.format(im_idx), convertPano(best_params_horizons))
|
{"hexsha": "74b9fca82746618fe516f50253b2cb96bcc604c7", "size": 4270, "ext": "py", "lang": "Python", "max_stars_repo_path": "discriminator/vanilla/generate_pano_noGAN.py", "max_stars_repo_name": "dangeng/infiniteGANorama", "max_stars_repo_head_hexsha": "92c9cbe0638cf9fcdc05020759772e36aebf788c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "discriminator/vanilla/generate_pano_noGAN.py", "max_issues_repo_name": "dangeng/infiniteGANorama", "max_issues_repo_head_hexsha": "92c9cbe0638cf9fcdc05020759772e36aebf788c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "discriminator/vanilla/generate_pano_noGAN.py", "max_forks_repo_name": "dangeng/infiniteGANorama", "max_forks_repo_head_hexsha": "92c9cbe0638cf9fcdc05020759772e36aebf788c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8513513514, "max_line_length": 106, "alphanum_fraction": 0.668852459, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1087}
|
#!/usr/bin/env python
#!python
#command='Produce.Simulated.FussyJuncs.py heterozygous --reference /mnt/EXT/Mills-scratch2/reference/GRCh37/human_g1k_v37.fasta --input-sim /mnt/EXT/Mills-scratch2/Xuefang/Simulate.FussyJunc/Simulate.het.rerun.test.20150901/het.sim --output-prefix /mnt/EXT/Mills-scratch2/Xuefang/Simulate.FussyJunc/Simulate.het.rerun.test.20150901/het'
#sys.argv=command.split()
import os
import sys
import getopt
import re
import pickle
import time
import datetime
import random
import numpy
import glob
import numpy as np
from scipy.stats import scoreatpercentile
script_name=sys.argv[0]
if len(sys.argv)<2:
print 'Produce.Simulated.FussyJuncs.py Last Update:2015-08-20'
print ''
print 'this script is used to randomly simulate simple/complex SVs and form a corresponding altered reference genome'
print ''
print 'Usage:'
print 'Produce.Simulated.FussyJuncs.py [options] <parameters>'
print ' '
print 'Options:'
print 'heterozygous: simulate simple heterozygous SVs'
print 'homozygous: simulate simple homozygous SVs'
print 'complex: simulate complex SVs'
print ' '
print 'Parameters:'
print '--reference: reference genme'
print '--input-sim: input sim format,see example'
print '--input-rec: input rec format, specially designed for complex events,see example'
print '--output-prefix: prefix of output files'
else:
function_name=sys.argv[1]
def insert_read_decide(bp_list):
#decide which class to simulate, ClassI~71%, ClassII~29%
SV_class_decide=random.choice(range(100))
if SV_class_decide>70:#if ClassII
sub_class_decide=random.choice(range(100))
if sub_class_decide<60:#2-20bp micro insertion of random seqs
return produce_random_seqs(random.choice(range(2,20)))
else: #over 20bp insertion
sub2_class_decide=random.choice(range(100))
if sub2_class_decide<25: #25%, 20-50bp random seqs
return produce_random_seqs(random.choice(range(20,50)))
elif sub2_class_decide<50: #25%, 20-50bp seqs from another chromosome
temp=[]
for x in seq_ins_pools.keys():
if not x==bp_list[0]:
temp.append(x)
return random.choice(seq_ins_pools[random.choice(temp)])
else: #50%, 20-50bp seqs from the same chromosome
if bp_list[0] in seq_ins_pools.keys():
return random.choice(seq_ins_pools[bp_list[0]])
else:
return ''
else:#if ClassI
return ''
if function_name=='heterozygous':
def sv_rec_2(sv_info):
for k1ab in sorted(sv_info.keys()):
for k2ab in sv_info[k1ab].keys():
if not k2ab==k1ab:
k1aba=k1ab.split('/')[0]
k2aba=k2ab.split('/')[0]
k2abb=k2ab.split('/')[1]
flaga=[]
flagb=[]
test=[[],[]]
if flaga==[] and not k1aba==k2aba:
if k2aba=='':
csv1=[[i for i in k1aba],[],[],0]
else:
csv1=simple_flag_SA(k1aba,k2aba)
add_csv_info(csv1,1,k1ab,k2ab)
if flagb==[] and not k1aba==k2abb:
if k2abb=='':
csv1=[[i for i in k2abb],[],[],0]
else:
csv1=simple_flag_SA(k1aba,k2abb)
add_csv_info(csv1,2,k1ab,k2ab)
score_Cff=-20
def hash_reorder():
for ka1 in del1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in del1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
ka_new=[ka1,ka2[0],ka2[-1],REF_AL,'<DEL>',ka2[3],Pass_Sign,'SVTYPE=DEL;END='+str(ka2[1]),'GT',GenoType]
if not ka2[-1] in sv_out[ka1].keys():
sv_out[ka1][ka2[-1]]=[]
if not ka_new in sv_out[ka1][ka2[-1]]:
sv_out[ka1][ka2[-1]].append(ka_new)
for ka1 in inv1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in inv1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
ka_new=[ka1,ka2[0],ka2[-1],REF_AL,'<INV>',ka2[3],Pass_Sign,'SVTYPE=INV;END='+str(ka2[1]),'GT',GenoType]
if not ka2[-1] in sv_out[ka1].keys():
sv_out[ka1][ka2[-1]]=[]
if not ka_new in sv_out[ka1][ka2[-1]]:
sv_out[ka1][ka2[-1]].append(ka_new)
for ka1 in dup1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in dup1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
CopyNumber=str(ka2[-1])
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
ka_new=[ka1,ka2[0],ka2[-2],REF_AL,'<DUP>',ka2[3],Pass_Sign,'SVTYPE=DUP;END='+str(ka2[1]),'GT:CN',GenoType+':'+CopyNumber]
if not ka2[-2] in sv_out[ka1].keys():
sv_out[ka1][ka2[-2]]=[]
if not ka_new in sv_out[ka1][ka2[-2]]:
sv_out[ka1][ka2[-2]].append(ka_new)
for ka1 in tra1.keys():
ks1=ka1.split('_')[0]
ks2='_'.join(ka1.split('_')[:-1])
SV_Score=float(ka1.split('_')[-1])
Pass_Sign='PASS'
if SV_Score<score_Cff:
Pass_Sign='LowQual'
if not ks1 in sv_out.keys():
sv_out[ks1]={}
if not ks2 in sv_out[ks1].keys():
sv_out[ks1][ks2]=[]
for ka2 in tra1[ka1].keys():
hetx='het'+ka2
if ka2=='a':
GenoType='1|0'
elif ka2=='b':
Genotype='0|1'
for ka3 in tra1[ka1][ka2]:
ka_new=ka3[:2]+[ks2,ka3[2]]+ka3[3:]+[SV_Score,Pass_Sign,'SVTYPE=TRA','GT',GenoType]
if not ka_new in sv_out[ks1][ks2]:
sv_out[ks1][ks2].append(ka_new)
def write_VCF_header(output_file):
fo=open(output_file,'w')
print output_file
print>>fo, '##fileformat=VCFv4.1'
print>>fo,'##fileDate='+time.strftime("%Y%m%d")
print>>fo,'##reference=hg19'
print>>fo,'##INFO=<ID=BKPTID,Number=.,Type=String,Description="ID of the assembled alternate allele in the assembly file">'
print>>fo,'##INFO=<ID=CIEND,Number=2,Type=Integer,Description="Confidence interval around END for imprecise variants">'
print>>fo,'##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">'
print>>fo,'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">'
print>>fo,'##INFO=<ID=HOMLEN,Number=.,Type=Integer,Description="Length of base pair identical micro-homology at event breakpoints">'
print>>fo,'##INFO=<ID=HOMSEQ,Number=.,Type=String,Description="Sequence of base pair identical micro-homology at event breakpoints">'
print>>fo,'##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">'
print>>fo,'##INFO=<ID=MEINFO,Number=4,Type=String,Description="Mobile element info of the form NAME,START,END,POLARITY">'
print>>fo,'##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">'
print>>fo,'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">'
print>>fo,'##FILTER=<ID=LowQual,Description="Score of final structural - Theoretical Score <-50">'
print>>fo,'##ALT=<ID=DEL,Description="Deletion">'
print>>fo,'##ALT=<ID=DEL:ME:ALU,Description="Deletion of ALU element">'
print>>fo,'##ALT=<ID=DEL:ME:L1,Description="Deletion of L1 element">'
print>>fo,'##ALT=<ID=DUP,Description="Duplication">'
print>>fo,'##ALT=<ID=DUP_TANDEM,Description="Tandem Duplication">'
print>>fo,'##ALT=<ID=INS,Description="Insertion of novel sequence">'
print>>fo,'##ALT=<ID=INS:ME:ALU,Description="Insertion of ALU element">'
print>>fo,'##ALT=<ID=INS:ME:L1,Description="Insertion of L1 element">'
print>>fo,'##ALT=<ID=INV,Description="Inversion">'
print>>fo,'##ALT=<ID=CNV,Description="Copy number variable region">'
print>>fo,'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">'
print>>fo,'##FORMAT=<ID=GQ,Number=1,Type=Float,Description="Genotype quality">'
print>>fo,'##FORMAT=<ID=CN,Number=1,Type=Integer,Description="Copy number genotype for imprecise events">'
print>>fo,'##FORMAT=<ID=CNQ,Number=1,Type=Float,Description="Copy number genotype quality for imprecise events">'
print>>fo,'\t'.join(['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT',output_file.split('/')[-1].replace('.vcf','')])
fo.close()
def write_VCF_main(output_file):
fo=open(output_file,'a')
print output_file
sv_reorganize={}
for k1 in sv_out.keys():
sv_reorganize[k1]={}
for k2 in sv_out[k1].keys():
start=int(k2.split('_')[1])
if not start in sv_reorganize[k1].keys():
sv_reorganize[k1][start]={}
SVtemp_a=[]
SVtemp_b=[]
for k3 in sv_out[k1][k2]:
if not k3[:-1] in SVtemp_a:
SVtemp_a.append(k3[:-1])
SVtemp_b.append([k3[-1]])
else:
SVtemp_b[SVtemp_a.index(k3[:-1])].append(k3[-1])
SVtemp=[]
sv_reorganize[k1][start][k2]=[]
for k3 in range(len(SVtemp_a)):
if len(SVtemp_b[k3])==2 and SVtemp_b[k3] in [['0|1', '1|0'],['1|0', '0|1']]:
SVtemp_b[k3]=['1|1']
for k3 in range(len(SVtemp_a)):
for k4 in SVtemp_b[k3]:
sv_reorganize[k1][start][k2].append(SVtemp_a[k3]+[k4])
for k1 in chromos:
if k1 in sv_reorganize.keys():
for k2 in sorted(sv_reorganize[k1].keys()):
for k3 in sorted(sv_reorganize[k1][k2].keys()):
for k4 in sv_reorganize[k1][k2][k3]:
if k4[3]=='N':
k4[3]=ref_base_returnN(ref,k4[0],k4[1])
print >>fo, '\t'.join([str(i) for i in k4])
fo.close()
def simple_flag_SA(k1,k2):
temp=[]
break_flag=0
for i in k2:
if not i=='^':
temp.append(i)
else:
temp[-1]+=i
temp2=[temp[0]]
for i in range(len(temp[1:])):
if not '^' in temp[i] and not '^' in temp[i+1] and ord(temp[i+1])-ord(temp[i])==1:
temp2[-1]+=temp[i+1]
elif '^' in temp[i] and '^' in temp[i+1] and ord(temp[i+1][0])-ord(temp[i][0])==-1:
temp2[-1]=temp[i+1][0]+temp2[-1]
else:
temp2.append(temp[i+1])
outdel=[]
outinv=[]
outdup=[]
outtra=0
for i in range(len(temp2)):
j=temp2[i]
if '^' in j:
if not j.replace('^','') in outinv:
outinv.append(j.replace('^',''))
temp2[i]=j.replace('^','')
temp3=''.join(temp2)
for i in range(len(temp3)-1):
if ord(temp3[i+1])-ord(temp3[i])<0:
outtra=1
if not temp3==k1:
temp4=[]
for i in temp3:
if temp3.count(i)>1:
if not i in outdup:
outdup.append(i)
if not i in temp4:
temp4.append(i)
if not ''.join(temp4)==k1:
for i in k1:
if not i in temp4:
outdel.append(i)
if not outdup==[]:
dupuni=unit_produce(outdup)
outdup2=[]
k3=k2
for i in dupuni:
ia=i
ib=''.join([j+'^' for j in i[::-1]])
if len(i)>1:
if temp2.count(ia)+temp2.count(ib)>1:
outdup2.append([i,temp2.count(ia)+temp2.count(ib)])
k3=k3.replace(ia,'')
k3=k3.replace(ib,'')
elif len(i)==1:
if k3.count(ia)+k3.count(ib)>1:
outdup2.append([i,k3.count(ia)])
k3=k3.replace(ia,'')
k3=k3.replace(ib,'')
else:
outdup2=[]
return [outdel,outinv,outdup2,outtra]
def add_csv_info(csv1,flag_sex,k1,k2):
#flag_sex=1: Maternal
#flag_sex=2: Paternal
if flag_sex==1:
del_let=[csv1[0],[]]
inv_let=[csv1[1],[]]
dup_let=[csv1[2],[]]
else:
del_let=[[],csv1[0]]
inv_let=[[],csv1[1]]
dup_let=[[],csv1[2]]
for k3 in sv_info[k1][k2]:
del_info_add(k3,del_let)
inv_info_add(k3,inv_let)
dup_info_2_add(k3,dup_let)
if csv1[3]==1:
tra_info_add(k1,k2)
def del_info_add(k3,del_let):
tempa=bp_to_hash(k3[:-1],del_let[0])
tempb=bp_to_hash(k3[:-1],del_let[1])
for k1 in tempa:
if k1 in tempb:
tempc='hom'
tempb.remove(k1)
else:
tempc='heta'
if not k1[0] in del1.keys():
del1[k1[0]]=[]
del1[k1[0]].append(k1[1:]+[tempc,k3[-1],'_'.join(k3[:-1])])
for k1 in tempb:
if not k1[0] in del1.keys():
del1[k1[0]]=[]
del1[k1[0]].append(k1[1:]+['hetb',k3[-1],'_'.join(k3[:-1])])
def dup_info_add(k3,dup_let):
#dup_let=[k2i,k2j]
for k2x in dup_let:
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4])
for k5 in temp:
if not k5[0] in dup1.keys():
dup1[k5[0]]=[]
dup1[k5[0]].append(k5[1:]+[k3[-1],'_'.join(k3[:-1]),k2a.count(k4)])
def dup_info_2_add(k3,dup_let):
temprec=-1
for k2x in dup_let:
temprec+=1
hetx=['heta','hetb'][temprec]
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4[0]])
for k5 in temp:
if not k5[0] in dup1.keys():
dup1[k5[0]]=[]
if k4[1]>1:
dup1[k5[0]].append(k5[1:]+[hetx,k3[-1],'_'.join(k3[:-1]),k4[1]])
def inv_info_add(k3,inv_let):
#inv_let=[k2m,k2n]
temprec=-1
for k2x in inv_let:
temprec+=1
hetx=['heta','hetb'][temprec]
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4])
for k5 in temp:
if not k5[0] in inv1.keys():
inv1[k5[0]]=[]
inv1[k5[0]].append(k5[1:]+[hetx,k3[-1],'_'.join(k3[:-1])])
def let_reclust(vec_in):
if vec_in==[]:
return []
else:
k2e=[]
k2e=[vec_in[0]]
for k3 in range(len(vec_in)-1):
if '^' in vec_in[k3+1]:
if '^' in vec_in[k3] and ord(vec_in[k3][0])-ord(vec_in[k3+1][0])==1:
k2e[-1]+=vec_in[k3+1]
else:
k2e.append(vec_in[k3+1])
else:
if ord(vec_in[k3+1][0])-ord(vec_in[k3][0])==1 and not '^' in vec_in[k3]:
k2e[-1]+=vec_in[k3+1]
else:
k2e.append(vec_in[k3+1])
k2f=[]
for k3 in k2e:
if '^' in k3:
k5=''
for k4 in range(len(k3)/2):
k5+=k3[2*k4]
k6=k5[::-1]+'^'
if not k6 in k2f:
k2f.append(k6)
else:
k2f.append(k3)
return k2f
def dup_let_recombind(vec_in):
if vec_in==[]:
return []
else:
vec2=sorted(vec_in)
vec=[[vec2[0]]]
for ka in vec2[1:]:
if ord(ka)-ord(vec[-1][-1])==1:
vec[-1].append(ka)
else:
vec.append([ka])
vec3=[]
for ka in vec:
if len(ka)==1:
vec3.append(ka)
else:
for kb in range(2,len(ka)+1):
for kc in ka[:(1-kb)]:
vec3.append([])
for kd in range(kb):
vec3[-1].append(ka[ka.index(kc)+kd])
vec4=[''.join(i) for i in vec3]
return vec4
def comp_info_reorganize(k1,k2):
del_let=[[],[]]
dup_let=[[],[]]
inv_let=[[],[]]
tra_let=[[],[]]
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
k2c=[]
k2d=[]
for k3 in k2a:
if not k3=='^':
k2c.append(k3)
else:
k2c[-1]+=k3
for k3 in k2b:
if not k3=='^':
k2d.append(k3)
else:
k2d[-1]+=k3
for k3 in k1.split('/')[0]:
if k2a.count(k3)==0:
del_let[0].append(k3)
if k2b.count(k3)==0:
del_let[1].append(k3)
if k2a.count(k3)>1:
dup_let[0].append(k3)
if k2b.count(k3)>1:
dup_let[1].append(k3)
k2e=let_reclust(k2c)
k2f=let_reclust(k2d)
k2g=dup_let_recombind(dup_let[0])
k2h=dup_let_recombind(dup_let[1])
k2i=[]
#integreated dup sections
k2j=[]
#integreated dup sections
for k3 in k2g:
flag1=0
for k4 in k2e:
if k3 in k4:
flag1+=1
if flag1>1:
k2i.append(k3)
for k3 in dup_let[0]:
if k2e.count(k3[0])+k2e.count(k3[0]+'^')>0:
if not k3[0] in k2i:
k2i.append(k3[0])
for k3 in k2h:
flag1=0
for k4 in k2e:
if k3 in k4:
flag1+=1
if flag1>1:
k2j.append(k3)
for k3 in dup_let[1]:
if k2e.count(k3[0])+k2e.count(k3[0]+'^')>0:
if not k3[0] in k2j:
k2j.append(k3[0])
k2m=[]
for k3 in k2e:
if k3[-1]=='^':
k2m.append(k3)
k2n=[]
for k3 in k2f:
if k3[-1]=='^':
k2n.append(k3)
for k3 in sv_info[k1][k2]:
del_info_add(k3,del_let)
dup_info_add(k3,[k2i,k2j])
inv_info_add(k3,[k2m,k2n])
def bp_to_hash(bp_list,sv_let):
bp_hash={}
block_rec=0
block_hash=[]
sv_let=[i[0] for i in sv_let]
for a3 in bp_list:
if a3 in chromos or not a3.isdigit():
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
bp_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
out=[]
if not sv_let==[]:
if len(sv_let)==1:
out=[bp_hash[sv_let[0]]]
else:
out.append(bp_hash[sv_let[0]])
for ka in range(len(sv_let)-1):
if ord(sv_let[ka+1])-ord(sv_let[ka])==1 and bp_hash[sv_let[ka+1]][0]==bp_hash[sv_let[ka]][0]:
out[-1]+=bp_hash[sv_let[ka+1]][1:]
else:
out.append(bp_hash[sv_let[ka+1]])
out2=[]
for ka in out:
out2.append([ka[0],int(ka[1]),int(ka[-1])])
return out2
def tra_info_add(k1,k2):
for k3 in sv_info[k1][k2]:
SV_ID='_'.join([str(i) for i in k3])
tra1[SV_ID]={}
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
bp_hash={}
block_rec=0
block_hash=[]
for a3 in k3[:-1]:
if a3 in chromos or not a3.isdigit():
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
bp_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
for a3 in bp_hash.keys():
temp=[]
for a4 in bp_hash[a3][1:]:
temp.append(int(a4)-1)
temp.append(int(a4))
bp_hash[a3][1:]=temp
#ref_allele['left']=[ref_allele[k1[0]][0]]
#ref_allele['right']=[ref_allele[k1[-1]][1]]
bp_hash['left']=[bp_hash[k1[0]][0],bp_hash[k1[0]][1],bp_hash[k1[0]][2]]
bp_hash['right']=[bp_hash[k1[-1]][0],bp_hash[k1[-1]][3],bp_hash[k1[-1]][4]]
ref_allele={}
for a3 in bp_hash.keys():
ref_allele[a3]=[bp_hash[a3][0]]
for a4 in bp_hash[a3][1:]:
ref_allele[a3].append(ref_base_returnN(ref,bp_hash[a3][0],a4))
if not k2a==k1.split('/')[0] and del_flag_SA(k1.split('/')[0],k2a)==0:
flag1=0#flag1==0:w/o inversion in the alt structure
if '^' in k2a:
flag1+=1
flag2=0#flag2==0:w/o duplication in the alt structure
for j in k2a:
if k2a.count(j)>1:
flag2+=1
flag3=0 #flag3==0: w/o translocation
if len(k2a)>1:
for i in range(len(k2a)-1):
if not ord(k2a[i+1])>ord(k2a[i]):
flag3+=1
if flag1+flag2+flag3==0:
heta_Del_block=[]
for a1 in k1.split('/')[0]:
if not a1 in k2a:
heta_Del_block.append(a1)
tra1[SV_ID]['a']=[]
block_hash=[]
del_hash={}
block_rec=0
for a3 in a2[0]:
if a3 in chromos:
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
del_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
if not heta_Del_block==[]:
a_heta=0
heta_Del_new=[heta_Del_block[0]]
while True:
a_heta+=1
if a_heta==len(heta_Del_block):break
if ord(heta_Del_block[a_heta])-ord(heta_Del_block[a_heta-1])==1 and del_hash[heta_Del_block[a_heta]][0]==del_hash[heta_Del_block[a_heta-1]][0]:
heta_Del_new[-1]+=heta_Del_block[a_heta]
else:
heta_Del_new.append(heta_Del_block[a_heta])
for a3 in heta_Del_new:
a4=a3[0]
tra1[SV_ID]['a'].append(['DEL',del_hash[a4][0],int(del_hash[a4][1]),ref_allele[a4][2]])
a4=a3[-1]
tra1[SV_ID]['a'][-1].append(int(del_hash[a4][2])-1)
else:
tra1[SV_ID]['a']=[]
t1=[]
for a3 in k2a:
if not a3=='^':
t1.append(a3)
else:
t1[-1]+=a3
t2=[t1[0]]
for a3 in t1[1:]:
if not '^' in a3 and not '^' in t2[-1] and ord(a3)-ord(t2[-1][-1])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-1]][0]:
t2[-1]+=a3
elif '^' in a3 and '^' in t2[-1] and ord(t2[-1][-2])-ord(a3[0])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-2]][0]:
t2[-1]+=a3
else:
t2.append(a3)
a3='left'
a4=t2[0]
l_chr=bp_hash[a3][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a4:
if not a4[0]==k1[0]:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3][1])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+'['+r_chr+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a4:
tra1[SV_ID]['a'].append([r_chr, bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3][1])+']'])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
for t3 in range(len(t2)-1):
a3=t2[t3]
a4=t2[t3+1]
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a3 and not '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a3 and not '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['+ref_allele[a3[-2]][2]])
elif not '^' in a3 and '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
elif '^' in a3 and '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2], ']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'+ref_allele[a3[-2]][2]])
if len(t2)>1:
a3=t2[t3+1]
else:
a3=t2[0]
a4='right'
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4][0]
if not '^' in a3:
if not a3[-1]==k1[-1]:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['])
if '^' in a3:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['+ref_allele[a3[-2]][2]])
#print [k1,k2]
if not k2b==k1.split('/')[1] and del_flag_SA(k1.split('/')[1],k2b)==0:
flag1=0#flag1==0:w/o inversion in the alt structure
if '^' in k2b:
flag1+=1
flag2=0#flag2==0:w/o duplication in the alt structure
for j in k2b:
if k2b.count(j)>1:
flag2+=1
flag3=0 #flag3==0: w/o translocation
if len(k2b)>1:
for i in range(len(k2b)-1):
if not ord(k2b[i+1])>ord(k2b[i]):
flag3+=1
if flag1+flag2+flag3==0:
heta_Del_block=[]
for a1 in k1.split('/')[1]:
if not a1 in k2b:
heta_Del_block.append(a1)
tra1[SV_ID]['b']=[]
block_hash=[]
del_hash={}
block_rec=0
for a3 in a2[0]:
if a3 in chromos:
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
del_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
if not heta_Del_block==[]:
a_heta=0
heta_Del_new=[heta_Del_block[0]]
while True:
a_heta+=1
if a_heta==len(heta_Del_block):break
if ord(heta_Del_block[a_heta])-ord(heta_Del_block[a_heta-1])==1 and del_hash[heta_Del_block[a_heta]][0]==del_hash[heta_Del_block[a_heta-1]][0]:
heta_Del_new[-1]+=heta_Del_block[a_heta]
else:
heta_Del_new.append(heta_Del_block[a_heta])
for a3 in heta_Del_new:
a4=a3[0]
tra1[SV_ID]['b'].append(['DEL',del_hash[a4][0],int(del_hash[a4][1]),ref_allele[a4][2]])
a4=a3[-1]
tra1[SV_ID]['b'][-1].append(int(del_hash[a4][2])-1)
else:
tra1[SV_ID]['b']=[]
t1=[]
for a3 in k2b:
if not a3=='^':
t1.append(a3)
else:
t1[-1]+=a3
t2=[t1[0]]
for a3 in t1[1:]:
if not '^' in a3 and not '^' in t2[-1] and ord(a3)-ord(t2[-1][-1])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-1]][0]:
t2[-1]+=a3
elif '^' in a3 and '^' in t2[-1] and ord(t2[-1][-2])-ord(a3[0])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-2]][0]:
t2[-1]+=a3
else:
t2.append(a3)
a3='left'
a4=t2[0]
l_chr=bp_hash[a3][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a4:
if not a4[0]==k1[0]:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3][1])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+'['+r_chr+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a4:
tra1[SV_ID]['b'].append([r_chr, bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3][1])+']'])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
for t3 in range(len(t2)-1):
a3=t2[t3]
a4=t2[t3+1]
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a3 and not '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a3 and not '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['+ref_allele[a3[-2]][2]])
elif not '^' in a3 and '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
elif '^' in a3 and '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2], ']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'+ref_allele[a3[-2]][2]])
if len(t2)>1:
a3=t2[t3+1]
else:
a3=t2[0]
a4='right'
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4][0]
if not '^' in a3:
if not a3[-1]==k1[-1]:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['])
if '^' in a3:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['+ref_allele[a3[-2]][2]])
def sv_homo_initial():
sv_homo_info['DEL']=[]
sv_homo_info['DUP']=[]
sv_homo_info['INV']=[]
sv_homo_info['TRA']=[]
sv_homo_info['DUP_TANDEM']=[]
def produce_keys(key):
if key=='DEL':
ka='a/a'
kb='/'
elif key=='DUP_TANDEM':
ka='a/a'
dup_num=random.sample(range(2,20),1)
kb='/'.join([''.join(['a' for i in range(dup_num[0])]),''.join(['a' for i in range(dup_num[0])])])
elif key=='INV':
ka='a/a'
kb='a^/a^'
elif key=='TRA':
ka='ab/ab'
kb='ba/ba'
elif key=='DUP':
ka='ab/ab'
kb='aba/aba'
return [ka,kb]
def sv_homo_produce():
for k1 in SV_region:
sv_len=k1[2]-k1[1]
k2=k1[-1]
sv_homo_info[k2].append(k1+produce_keys(k2))
def sv_het_produce():
for k1 in sv_homo_info.keys():
sv_het_info[k1]=[]
for k2 in sv_homo_info[k1]:
allele=random.choice(range(2))
alle_poor=[k2[-2].split('/')[0],k2[-1].split('/')[0]]
k2[-1]='/'.join([alle_poor[allele],alle_poor[1-allele]])
sv_het_info[k1].append(k2)
def sv_rec_homo_produce():
for k1 in sv_homo_info.keys():
fo=open(dict_opts['--output-prefix']+'.homo.'+k1+'.rec','w')
print dict_opts['--output-prefix']+'.homo.'+k1+'.rec'
for k2 in sv_homo_info[k1]:
print >>fo, ' '.join([str(i) for i in k2])
fo.close()
def sv_rec_het_produce():
for k1 in sv_het_info.keys():
fo=open(dict_opts['--output-prefix']+'.het.'+k1+'.rec','w')
print dict_opts['--output-prefix']+'.het.'+k1+'.rec'
for k2 in sv_het_info[k1]:
print >>fo, ' '.join([str(i) for i in k2])
fo.close()
def sv_info_rewrite(sv_h_info):
for k1 in sv_h_info.keys():
for k2 in sv_h_info[k1]:
if not k2[-2] in sv_info.keys():
sv_info[k2[-2]]={}
if not k2[-1] in sv_info[k2[-2]].keys():
sv_info[k2[-2]][k2[-1]]=[]
sv_info[k2[-2]][k2[-1]].append([str(i) for i in k2[:-3]]+[0.0])
def sv_stat_calcu(sv_hash,key):
out=[]
for k1 in sv_hash[key]:
sv_min=int(k1[1])
sv_max=int(k1[2])
sv_int=(int(k1[2])-int(k1[1]))/3
out.append([k1[0],sv_min,sv_min+sv_int, sv_max-sv_int,sv_max])
return out
def sv_size_pick(sv_stat):
out=[]
for k1 in sv_stat:
out+=[random.choice(range(int(k1[1]),int(k1[2]))) for i in range(int(k1[0]/3))]
out+=[random.choice(range(int(k1[2]),int(k1[3]))) for i in range(int(int(k1[0])-int(k1[0]/3))/2)]
out+=[random.choice(range(int(k1[3]),int(k1[4]))) for i in range(int(k1[0])-int(k1[0]/3)-int(int(k1[0])-int(k1[0]/3))/2)]
permute=random.sample(out,len(out))
return out
def chromos_readin(refs):
fin=open(refs+'.fai')
chromos=[]
chromo_length=[]
genome_length=0
for line in fin:
pin=line.strip().split()
chromos.append(pin[0])
genome_length+=int(pin[1])
chromo_length.append(int(pin[1]))
fin.close()
chromo_num_region=[]
for k1 in chromo_length:
chromo_num_region.append(int(round(float(k1)/float(genome_length)*sv_total_num)))
chrom_to_remove=[]
out_num_region=[]
out_chromos=[]
out_length={}
for i in range(len(chromo_num_region)):
if chromo_num_region[i]>1:
out_chromos.append(chromos[i])
out_num_region.append(chromo_num_region[i])
out_length[chromos[i]]=chromo_length[i]
return [genome_length]+[out_chromos]+[out_num_region]+[out_length]
def sv_hash_add(list_in,key):
for i in list_in:
if not i in sv_hash.keys():
sv_hash[i]=[key]
else:
sv_hash[i]+=[key]
def sv_region_pick():
#pick random regions across the genome
SV_region=[]
rec=-1
sv_size=del_size+dup_size+inv_size+tra_size+dup2_size
sv_size=random.sample(sv_size,len(sv_size))
for k1 in range(len(chromos)):
chromosome=chromos[k1]
num_region=chromo_num_region[k1]
range_region=chromo_length[chromosome]
temp_start_region=sorted(random.sample(range(1000, range_region-1000),num_region+1))
temp_end_region=[]
for k2 in range(num_region):
start=temp_start_region[k2]
start2=temp_start_region[k2+1]
if start2-start<1000: continue
rec+=1
temp_sv_size=sv_size[rec]
sv_type=sv_hash[sv_size[rec]][0]
del sv_hash[sv_size[rec]][0]
end=start+temp_sv_size
if not end<start2-300:
end=random.choice(range(start,int(numpy.mean([start,start2]))))
if sv_type=='TRA':
end2=random.choice(range(end+100,start2-100))
temp_end_region.append(end)
if sv_type=='TRA':
SV_region.append([chromos[k1],start,end,end2,sv_type])
else:
SV_region.append([chromos[k1],start,end,sv_type])
return SV_region
def ref_base_returnN(ref,chromo,pos):
return 'N'
def ref_base_readin(ref,chromo,pos):
fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,chromo,str(pos),str(pos)))
tre=fref.readline().strip().split()
REF_AL=fref.readline().strip().split()
if not REF_AL==[]:
return REF_AL[0]
else:
return 'N'
def del_flag_SA(k1,k2):
out=0
if not '^' in k2:
flagdup=0
for i in k2:
if k2.count(i)>1:
flagdup+=1
if flagdup==0:
flagtra=0
for i in range(len(k2)-1):
if ord(k2[i+1])-ord(k2[i])<1:
flagtra+=1
if flagtra==0:
if not k1==k2:
out=1
return out
def order_SV_Homo_write(sv_info):
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
if not k3[0] in order_SV_Pos.keys():
order_SV_Pos[k3[0]]={}
if not int(k3[1]) in order_SV_Pos[k3[0]].keys():
order_SV_Pos[k3[0]][int(k3[1])]=[]
order_SV_Pos[k3[0]][int(k3[1])].append([[k3[0]]+[int(i) for i in k3[1:-1]],[k2.split('/')[0]]])
def order_SV_Het_write(sv_info):
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
if not k3[0] in order_SV_Pos.keys():
order_SV_Pos[k3[0]]={}
if not int(k3[1]) in order_SV_Pos[k3[0]].keys():
order_SV_Pos[k3[0]][int(k3[1])]=[]
order_SV_Pos[k3[0]][int(k3[1])].append([[k3[0]]+[int(i) for i in k3[1:-1]],[k2.split('/')[0],k2.split('/')[1],k1.split('/')[0]]])
def Ref_Alt_Produce(ChromoList,bp_list,letter_new,Ref_Seq_File):
#Chromo=Chr, target chromosome
#BamN: DG187, DG196... name of sample
#eg of bp_list:[184569179, 184569775, 184571064, 184572009, 184572016]
#Eg of flank: flank : 446
if letter_new=='':
return insert_read_decide(bp_list)
else:
bp_hash={}
bp_seq=[]
for k1 in bp_list:
if k1 in ChromoList:
bp_seq.append([k1])
else:
bp_seq[-1].append(k1)
rec=0
for k1 in bp_seq:
for k2 in range(len(k1)-2):
rec+=1
bp_hash[chr(96+rec)]=[k1[0],k1[k2+1],k1[k2+2]]
letter_seq={}
for k1 in bp_hash.keys():
Chromo=bp_hash[k1][0]
region_left=bp_hash[k1][1]
region_right=bp_hash[k1][2]
seq=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File,Chromo,region_left,region_right))
seq.readline().strip().split()
lines=[]
while True:
line=seq.readline().strip().split()
if not line: break
lines.append(line)
Seq1=lines[0][0]
for j in range(len(lines))[1:]:
Seq1=''.join([Seq1,lines[j][0]])
letter_seq[k1]=Seq1
letter_seq[k1+'^']=reverse(complementary(Seq1))
new_Seq=''
new_letter=[]
for k1 in letter_new:
if not k1=='^':
new_letter.append(k1)
else:
new_letter[-1]+=k1
for k1 in new_letter:
new_Seq+=letter_seq[k1]
new_Seq+=insert_read_decide(bp_list)
return new_Seq
def Ref_Ref_Produce(Chromo,bp_list,Ref_Seq_File):
start=int(bp_list[0])
end=int(bp_list[-1])
new1_ref=''
fin=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File, Chromo, start,end))
fin.readline().strip().split()
for line in fin:
pin=line.strip().split()
new1_ref+=pin[0]
fin.close()
return new1_ref
def reverse(seq):
seq2=[]
for i in seq[::-1]:
seq2.append(i)
return ''.join(seq2)
def complementary(seq):
seq2=[]
for i in seq:
if i in 'ATGCN':
seq2.append('ATGCN'['TACGN'.index(i)])
elif i in 'atgcn':
seq2.append('atgcn'['tacgn'.index(i)])
return ''.join(seq2)
def unit_produce(list):
temp1=[sorted(list)[0]]
for k1 in sorted(list)[1:]:
if ord(k1)-ord(temp1[-1][-1])==1:
temp1[-1]+=k1
else:
temp1.append(k1)
temp2=[]
for k1 in temp1:
for k2 in range(len(k1)+1)[1:]:
for k3 in range(len(k1)-k2+1):
temp2.append(k1[k3:(k3+k2)])
return temp2[::-1]
def fasta_homo_write(fasta_out):
fo=open(fasta_out,'w')
print fasta_out
for k1 in chromos:
print >>fo, '>'+k1
new1_ref=''
rec1_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
rec1_start=end
rec1_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for k1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[k1*60:(k1+1)*60])
new1_seq.append(new1_ref[(k1+1)*60:])
for k1 in new1_seq:
if not k1=='':
print >>fo, k1
fo.close()
def fasta_het_write_a(fasta_out):
fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'w')
#fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'w')
fo1.close()
#fo2.close()
print fasta_out.replace('.het.fa','.het1.fa')
#print fasta_out.replace('.het.fa','.het2.fa')
for k1 in chromos:
fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'a')
#fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'a')
print >>fo1, '>'+k1
#print >>fo2, '>'+k1
new1_ref=''
rec1_start=0
#new2_ref=''
#rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
if not k3[0][1][0]==k3[0][1][2]:
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
else:
new1_ref+=Ref_Ref_Produce(k1,[start,end],ref)
rec1_start=end
#rec2_start+=1
#new2_ref+=Ref_Ref_Produce(k1,[rec2_start,start-1],ref)
#if not k3[0][1][1]==k3[0][1][2]:
# new2_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][1],ref)
#else:
# new2_ref+=Ref_Ref_Produce(k1,[start,end],ref)
#rec2_start=end
rec1_start+=1
#rec2_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for ka1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[ka1*60:(ka1+1)*60])
new1_seq.append(new1_ref[(ka1+1)*60:])
for ka1 in new1_seq:
if not ka1=='':
print >>fo1, ka1
#new2_ref+=Ref_Ref_Produce(k1,[rec2_start,chromo_length[k1]],ref)
#new2_seq=[]
#for ka1 in range(len(new2_ref)/60):
# new2_seq.append(new2_ref[ka1*60:(ka1+1)*60])
#new2_seq.append(new2_ref[(ka1+1)*60:])
#for ka1 in new2_seq:
# if not ka1=='':
# print >>fo2, ka1
fo1.close()
#fo2.close()
def fasta_het_write_b(fasta_out):
#fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'w')
fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'w')
#fo1.close()
fo2.close()
#print fasta_out.replace('.het.fa','.het1.fa')
print fasta_out.replace('.het.fa','.het2.fa')
for k1 in chromos:
#fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'a')
fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'a')
#print >>fo1, '>'+k1
print >>fo2, '>'+k1
#new1_ref=''
#rec1_start=0
new2_ref=''
rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
#rec1_start+=1
#new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
#if not k3[0][1][0]==k3[0][1][2]:
# new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
#else:
# new1_ref+=Ref_Ref_Produce(k1,[start,end],ref)
#rec1_start=end
rec2_start+=1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,start-1],ref)
if not k3[0][1][1]==k3[0][1][2]:
new2_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][1],ref)
else:
new2_ref+=Ref_Ref_Produce(k1,[start,end],ref)
rec2_start=end
#rec1_start+=1
rec2_start+=1
#new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
#new1_seq=[]
#for ka1 in range(len(new1_ref)/60):
# new1_seq.append(new1_ref[ka1*60:(ka1+1)*60])
#new1_seq.append(new1_ref[(ka1+1)*60:])
#for ka1 in new1_seq:
# if not ka1=='':
# print >>fo1, ka1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,chromo_length[k1]],ref)
new2_seq=[]
for ka1 in range(len(new2_ref)/60):
new2_seq.append(new2_ref[ka1*60:(ka1+1)*60])
new2_seq.append(new2_ref[(ka1+1)*60:])
for ka1 in new2_seq:
if not ka1=='':
print >>fo2, ka1
#fo1.close()
fo2.close()
def Sample_info_ReadIn(Sam_File):
fi=open(Sam_File)
for line in fi:
pin=line.strip().split()
if not pin==[]:
if not pin[0] in sv_hash.keys():
sv_hash[pin[0]]=[]
sv_hash[pin[0]].append([int(i) for i in pin[1:]])
sv_hash[pin[0]][-1][0]=int(sv_hash[pin[0]][-1][0]*1.25)
else:
sv_hash[pin[0]].append([int(i) for i in pin[1:]])
sv_hash[pin[0]][-1][0]=int(sv_hash[pin[0]][-1][0]*1.25)
fi.close()
def sv_total_num_calcu():
sv_total_num=0
for k1 in del_stat:
sv_total_num+=k1[0]
for k1 in dup_stat:
sv_total_num+=k1[0]
for k1 in inv_stat:
sv_total_num+=k1[0]
for k1 in tra_stat:
sv_total_num+=k1[0]
for k1 in dup2_stat:
sv_total_num+=k1[0]
return sv_total_num
def pick_random_seqs(ref,sv_total_num,chromo_length):
#12% of all SVs have micro insrts at both /either ends
#double number of seqs would be randomly picked from genome as long micro-insertions
num_micro_ins_over20bp=float(sv_total_num)*0.12*2
genome_length=0
chromos_num_regions={}
chrom_seqs={}
for x in chromo_length.keys():
if not 'GL' in x and not x in ['X','Y','MT']:
genome_length+=chromo_length[x]
for x in chromo_length.keys():
if not 'GL' in x and not x in ['X','Y','MT']:
chromos_num_regions[x]=float(chromo_length[x])/float(genome_length)*num_micro_ins_over20bp
for x in chromos_num_regions.keys():
chrom_seqs[x]=[]
int_num=int(round(chromos_num_regions[x]))
seq_pick=random.sample(range(10000,chromo_length[x]-10000),int_num)
for y in sorted(seq_pick):
length_pick=random.sample(range(20,50),1)[0]
seqs=os.popen(r'''samtools faidx %s %s:%d-%d'''%(ref,x,y,y+length_pick))
seqs.readline()
test=seqs.readline().strip()
if not 'NNNNNNNN' in test:
chrom_seqs[x].append(test)
seqs.close()
return chrom_seqs
def produce_random_seqs(length):
out=[]
for x in range(length):
out.append(random.choice(['A','T','G','C']))
return ''.join(out)
opts,args=getopt.getopt(sys.argv[2:],'',['reference=','input-sim=','input-rec=','output-prefix='])
dict_opts=dict(opts)
Sam_File=dict_opts['--input-sim']
sv_hash={}
Sample_info_ReadIn(Sam_File)
del_stat=sv_stat_calcu(sv_hash,'DEL')
dup_stat=sv_stat_calcu(sv_hash,'DUP_TANDEM')
dup2_stat=sv_stat_calcu(sv_hash,'DUP')
dup3_stat=[]
for i in dup2_stat:
dup3_stat.append([i[0]]+[j+1000 for j in i[1:]])
dup2_stat=dup3_stat
inv_stat=sv_stat_calcu(sv_hash,'INV')
tra_stat=sv_stat_calcu(sv_hash,'TRA')
sv_total_num=sv_total_num_calcu()
del_size=sv_size_pick(del_stat)
dup_size=sv_size_pick(dup_stat)
dup2_size=sv_size_pick(dup2_stat)
inv_size=sv_size_pick(inv_stat)
tra_size=sv_size_pick(tra_stat)
refs=dict_opts['--reference']
ref=refs
if not os.path.isfile(refs):
print 'Wrong reference genome !'
if not os.path.isfile(refs+'.fai'):
print 'reference genome not indexed !'
chromos_TOTAL=chromos_readin(refs)
genome_length=chromos_TOTAL[0]
chromos=chromos_TOTAL[1]
chromo_num_region=chromos_TOTAL[2]
chromo_length=chromos_TOTAL[3]
sv_hash={}
sv_hash_add(del_size,'DEL')
sv_hash_add(dup_size,'DUP_TANDEM')
sv_hash_add(dup2_size,'DUP')
sv_hash_add(inv_size,'INV')
sv_hash_add(tra_size,'TRA')
SV_region=sv_region_pick()
SV_region_filter=[]
for x in SV_region:
if x[-1]=='DUP' and x[2]-x[1]<1100: continue
else:
SV_region_filter.append(x)
SV_region=SV_region_filter
sv_homo_info={}
sv_homo_initial()
sv_homo_produce()
sv_het_info={}
sv_het_produce()
for y in range(len(sv_het_info['DUP'])):
x=sv_het_info['DUP'][y]
if x[2]-x[1]<2000:
z=random.choice([x[1]+1000,x[2]-1000])
else:
z=random.choice(range(x[1]+800,x[1]+1200)+range(x[2]-1200,x[2]-800))
sv_het_info['DUP'][y]=x[:2]+[z]+x[2:]
sv_rec_het_produce()
sv_info={}
sv_info_rewrite(sv_het_info)
dup1={}
inv1={}
del1={}
tra1={}
sv_rec_2(sv_info)
sv_out={}
hash_reorder()
vcf_out=dict_opts['--output-prefix']+'.vcf'
write_VCF_header(vcf_out)
write_VCF_main(vcf_out)
fasta_out=dict_opts['--output-prefix']+'.het.fa'
#produce fasta file containing all sv file for homo svs
order_SV_Pos={}
order_SV_Het_write(sv_info)
seq_ins_pools=pick_random_seqs(ref,sv_total_num,chromo_length)
fasta_het_write_a(fasta_out)
fasta_het_write_b(fasta_out)
os.system(r'''samtools faidx %s'''%(fasta_out.replace('.het.fa','.het1.fa')))
os.system(r'''samtools faidx %s'''%(fasta_out.replace('.het.fa','.het2.fa')))
elif function_name=='homozygous':
def sv_rec_2(sv_info):
for k1ab in sorted(sv_info.keys()):
for k2ab in sv_info[k1ab].keys():
if not k2ab==k1ab:
k1aba=k1ab.split('/')[0]
k2aba=k2ab.split('/')[0]
k2abb=k2ab.split('/')[1]
flaga=[]
flagb=[]
test=[[],[]]
if flaga==[] and not k1aba==k2aba:
if k2aba=='':
csv1=[[i for i in k1aba],[],[],0]
else:
csv1=simple_flag_SA(k1aba,k2aba)
add_csv_info(csv1,1,k1ab,k2ab)
if flagb==[] and not k1aba==k2abb:
if k2abb=='':
csv1=[[i for i in k2abb],[],[],0]
else:
csv1=simple_flag_SA(k1aba,k2abb)
add_csv_info(csv1,2,k1ab,k2ab)
score_Cff=-20
def hash_reorder():
for ka1 in del1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in del1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
ka_new=[ka1,ka2[0],ka2[-1],REF_AL,'<DEL>',ka2[3],Pass_Sign,'SVTYPE=DEL;END='+str(ka2[1]),'GT',GenoType]
if not ka2[-1] in sv_out[ka1].keys():
sv_out[ka1][ka2[-1]]=[]
if not ka_new in sv_out[ka1][ka2[-1]]:
sv_out[ka1][ka2[-1]].append(ka_new)
for ka1 in inv1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in inv1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
ka_new=[ka1,ka2[0],ka2[-1],REF_AL,'<INV>',ka2[3],Pass_Sign,'SVTYPE=INV;END='+str(ka2[1]),'GT',GenoType]
if not ka2[-1] in sv_out[ka1].keys():
sv_out[ka1][ka2[-1]]=[]
if not ka_new in sv_out[ka1][ka2[-1]]:
sv_out[ka1][ka2[-1]].append(ka_new)
for ka1 in dup1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in dup1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
CopyNumber=str(ka2[-1])
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
ka_new=[ka1,ka2[0],ka2[-2],REF_AL,'<DUP>',ka2[3],Pass_Sign,'SVTYPE=DUP;END='+str(ka2[1]),'GT:CN',GenoType+':'+CopyNumber]
if not ka2[-2] in sv_out[ka1].keys():
sv_out[ka1][ka2[-2]]=[]
if not ka_new in sv_out[ka1][ka2[-2]]:
sv_out[ka1][ka2[-2]].append(ka_new)
for ka1 in tra1.keys():
ks1=ka1.split('_')[0]
ks2='_'.join(ka1.split('_')[:-1])
SV_Score=float(ka1.split('_')[-1])
Pass_Sign='PASS'
if SV_Score<score_Cff:
Pass_Sign='LowQual'
if not ks1 in sv_out.keys():
sv_out[ks1]={}
if not ks2 in sv_out[ks1].keys():
sv_out[ks1][ks2]=[]
for ka2 in tra1[ka1].keys():
hetx='het'+ka2
if ka2=='a':
GenoType='1|0'
elif ka2=='b':
Genotype='0|1'
for ka3 in tra1[ka1][ka2]:
ka_new=ka3[:2]+[ks2,ka3[2]]+ka3[3:]+[SV_Score,Pass_Sign,'SVTYPE=TRA','GT',GenoType]
if not ka_new in sv_out[ks1][ks2]:
sv_out[ks1][ks2].append(ka_new)
def write_VCF_header(output_file):
fo=open(output_file,'w')
print output_file
print>>fo, '##fileformat=VCFv4.1'
print>>fo,'##fileDate='+time.strftime("%Y%m%d")
print>>fo,'##reference=hg19'
print>>fo,'##INFO=<ID=BKPTID,Number=.,Type=String,Description="ID of the assembled alternate allele in the assembly file">'
print>>fo,'##INFO=<ID=CIEND,Number=2,Type=Integer,Description="Confidence interval around END for imprecise variants">'
print>>fo,'##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">'
print>>fo,'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">'
print>>fo,'##INFO=<ID=HOMLEN,Number=.,Type=Integer,Description="Length of base pair identical micro-homology at event breakpoints">'
print>>fo,'##INFO=<ID=HOMSEQ,Number=.,Type=String,Description="Sequence of base pair identical micro-homology at event breakpoints">'
print>>fo,'##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">'
print>>fo,'##INFO=<ID=MEINFO,Number=4,Type=String,Description="Mobile element info of the form NAME,START,END,POLARITY">'
print>>fo,'##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">'
print>>fo,'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">'
print>>fo,'##FILTER=<ID=LowQual,Description="Score of final structural - Theoretical Score <-50">'
print>>fo,'##ALT=<ID=DEL,Description="Deletion">'
print>>fo,'##ALT=<ID=DEL:ME:ALU,Description="Deletion of ALU element">'
print>>fo,'##ALT=<ID=DEL:ME:L1,Description="Deletion of L1 element">'
print>>fo,'##ALT=<ID=DUP,Description="Duplication">'
print>>fo,'##ALT=<ID=DUP_TANDEM,Description="Tandem Duplication">'
print>>fo,'##ALT=<ID=INS,Description="Insertion of novel sequence">'
print>>fo,'##ALT=<ID=INS:ME:ALU,Description="Insertion of ALU element">'
print>>fo,'##ALT=<ID=INS:ME:L1,Description="Insertion of L1 element">'
print>>fo,'##ALT=<ID=INV,Description="Inversion">'
print>>fo,'##ALT=<ID=CNV,Description="Copy number variable region">'
print>>fo,'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">'
print>>fo,'##FORMAT=<ID=GQ,Number=1,Type=Float,Description="Genotype quality">'
print>>fo,'##FORMAT=<ID=CN,Number=1,Type=Integer,Description="Copy number genotype for imprecise events">'
print>>fo,'##FORMAT=<ID=CNQ,Number=1,Type=Float,Description="Copy number genotype quality for imprecise events">'
print>>fo,'\t'.join(['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT',output_file.split('/')[-1].replace('.vcf','')])
fo.close()
def write_VCF_main(output_file):
fo=open(output_file,'a')
print output_file
sv_reorganize={}
for k1 in sv_out.keys():
sv_reorganize[k1]={}
for k2 in sv_out[k1].keys():
start=int(k2.split('_')[1])
if not start in sv_reorganize[k1].keys():
sv_reorganize[k1][start]={}
SVtemp_a=[]
SVtemp_b=[]
for k3 in sv_out[k1][k2]:
if not k3[:-1] in SVtemp_a:
SVtemp_a.append(k3[:-1])
SVtemp_b.append([k3[-1]])
else:
SVtemp_b[SVtemp_a.index(k3[:-1])].append(k3[-1])
SVtemp=[]
sv_reorganize[k1][start][k2]=[]
for k3 in range(len(SVtemp_a)):
if len(SVtemp_b[k3])==2 and SVtemp_b[k3] in [['0|1', '1|0'],['1|0', '0|1']]:
SVtemp_b[k3]=['1|1']
for k3 in range(len(SVtemp_a)):
for k4 in SVtemp_b[k3]:
sv_reorganize[k1][start][k2].append(SVtemp_a[k3]+[k4])
for k1 in chromos:
if k1 in sv_reorganize.keys():
for k2 in sorted(sv_reorganize[k1].keys()):
for k3 in sorted(sv_reorganize[k1][k2].keys()):
for k4 in sv_reorganize[k1][k2][k3]:
if k4[3]=='N':
k4[3]=ref_base_returnN(ref,k4[0],k4[1])
print >>fo, '\t'.join([str(i) for i in k4])
fo.close()
def simple_flag_SA(k1,k2):
temp=[]
break_flag=0
for i in k2:
if not i=='^':
temp.append(i)
else:
temp[-1]+=i
temp2=[temp[0]]
for i in range(len(temp[1:])):
if not '^' in temp[i] and not '^' in temp[i+1] and ord(temp[i+1])-ord(temp[i])==1:
temp2[-1]+=temp[i+1]
elif '^' in temp[i] and '^' in temp[i+1] and ord(temp[i+1][0])-ord(temp[i][0])==-1:
temp2[-1]=temp[i+1][0]+temp2[-1]
else:
temp2.append(temp[i+1])
outdel=[]
outinv=[]
outdup=[]
outtra=0
for i in range(len(temp2)):
j=temp2[i]
if '^' in j:
if not j.replace('^','') in outinv:
outinv.append(j.replace('^',''))
temp2[i]=j.replace('^','')
temp3=''.join(temp2)
for i in range(len(temp3)-1):
if ord(temp3[i+1])-ord(temp3[i])<0:
outtra=1
if not temp3==k1:
temp4=[]
for i in temp3:
if temp3.count(i)>1:
if not i in outdup:
outdup.append(i)
if not i in temp4:
temp4.append(i)
if not ''.join(temp4)==k1:
for i in k1:
if not i in temp4:
outdel.append(i)
if not outdup==[]:
dupuni=unit_produce(outdup)
outdup2=[]
k3=k2
for i in dupuni:
ia=i
ib=''.join([j+'^' for j in i[::-1]])
if len(i)>1:
if temp2.count(ia)+temp2.count(ib)>1:
outdup2.append([i,temp2.count(ia)+temp2.count(ib)])
k3=k3.replace(ia,'')
k3=k3.replace(ib,'')
elif len(i)==1:
if k3.count(ia)+k3.count(ib)>1:
outdup2.append([i,k3.count(ia)])
k3=k3.replace(ia,'')
k3=k3.replace(ib,'')
else:
outdup2=[]
return [outdel,outinv,outdup2,outtra]
def add_csv_info(csv1,flag_sex,k1,k2):
#flag_sex=1: Maternal
#flag_sex=2: Paternal
if flag_sex==1:
del_let=[csv1[0],[]]
inv_let=[csv1[1],[]]
dup_let=[csv1[2],[]]
else:
del_let=[[],csv1[0]]
inv_let=[[],csv1[1]]
dup_let=[[],csv1[2]]
for k3 in sv_info[k1][k2]:
del_info_add(k3,del_let)
inv_info_add(k3,inv_let)
dup_info_2_add(k3,dup_let)
if csv1[3]==1:
tra_info_add(k1,k2)
def del_info_add(k3,del_let):
tempa=bp_to_hash(k3[:-1],del_let[0])
tempb=bp_to_hash(k3[:-1],del_let[1])
for k1 in tempa:
if k1 in tempb:
tempc='hom'
tempb.remove(k1)
else:
tempc='heta'
if not k1[0] in del1.keys():
del1[k1[0]]=[]
del1[k1[0]].append(k1[1:]+[tempc,k3[-1],'_'.join(k3[:-1])])
for k1 in tempb:
if not k1[0] in del1.keys():
del1[k1[0]]=[]
del1[k1[0]].append(k1[1:]+['hetb',k3[-1],'_'.join(k3[:-1])])
def dup_info_add(k3,dup_let):
#dup_let=[k2i,k2j]
for k2x in dup_let:
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4])
for k5 in temp:
if not k5[0] in dup1.keys():
dup1[k5[0]]=[]
dup1[k5[0]].append(k5[1:]+[k3[-1],'_'.join(k3[:-1]),k2a.count(k4)])
def dup_info_2_add(k3,dup_let):
temprec=-1
for k2x in dup_let:
temprec+=1
hetx=['heta','hetb'][temprec]
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4[0]])
for k5 in temp:
if not k5[0] in dup1.keys():
dup1[k5[0]]=[]
if k4[1]>1:
dup1[k5[0]].append(k5[1:]+[hetx,k3[-1],'_'.join(k3[:-1]),k4[1]])
def inv_info_add(k3,inv_let):
#inv_let=[k2m,k2n]
temprec=-1
for k2x in inv_let:
temprec+=1
hetx=['heta','hetb'][temprec]
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4])
for k5 in temp:
if not k5[0] in inv1.keys():
inv1[k5[0]]=[]
inv1[k5[0]].append(k5[1:]+[hetx,k3[-1],'_'.join(k3[:-1])])
def let_reclust(vec_in):
if vec_in==[]:
return []
else:
k2e=[]
k2e=[vec_in[0]]
for k3 in range(len(vec_in)-1):
if '^' in vec_in[k3+1]:
if '^' in vec_in[k3] and ord(vec_in[k3][0])-ord(vec_in[k3+1][0])==1:
k2e[-1]+=vec_in[k3+1]
else:
k2e.append(vec_in[k3+1])
else:
if ord(vec_in[k3+1][0])-ord(vec_in[k3][0])==1 and not '^' in vec_in[k3]:
k2e[-1]+=vec_in[k3+1]
else:
k2e.append(vec_in[k3+1])
k2f=[]
for k3 in k2e:
if '^' in k3:
k5=''
for k4 in range(len(k3)/2):
k5+=k3[2*k4]
k6=k5[::-1]+'^'
if not k6 in k2f:
k2f.append(k6)
else:
k2f.append(k3)
return k2f
def dup_let_recombind(vec_in):
if vec_in==[]:
return []
else:
vec2=sorted(vec_in)
vec=[[vec2[0]]]
for ka in vec2[1:]:
if ord(ka)-ord(vec[-1][-1])==1:
vec[-1].append(ka)
else:
vec.append([ka])
vec3=[]
for ka in vec:
if len(ka)==1:
vec3.append(ka)
else:
for kb in range(2,len(ka)+1):
for kc in ka[:(1-kb)]:
vec3.append([])
for kd in range(kb):
vec3[-1].append(ka[ka.index(kc)+kd])
vec4=[''.join(i) for i in vec3]
return vec4
def comp_info_reorganize(k1,k2):
del_let=[[],[]]
dup_let=[[],[]]
inv_let=[[],[]]
tra_let=[[],[]]
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
k2c=[]
k2d=[]
for k3 in k2a:
if not k3=='^':
k2c.append(k3)
else:
k2c[-1]+=k3
for k3 in k2b:
if not k3=='^':
k2d.append(k3)
else:
k2d[-1]+=k3
for k3 in k1.split('/')[0]:
if k2a.count(k3)==0:
del_let[0].append(k3)
if k2b.count(k3)==0:
del_let[1].append(k3)
if k2a.count(k3)>1:
dup_let[0].append(k3)
if k2b.count(k3)>1:
dup_let[1].append(k3)
k2e=let_reclust(k2c)
k2f=let_reclust(k2d)
k2g=dup_let_recombind(dup_let[0])
k2h=dup_let_recombind(dup_let[1])
k2i=[]
#integreated dup sections
k2j=[]
#integreated dup sections
for k3 in k2g:
flag1=0
for k4 in k2e:
if k3 in k4:
flag1+=1
if flag1>1:
k2i.append(k3)
for k3 in dup_let[0]:
if k2e.count(k3[0])+k2e.count(k3[0]+'^')>0:
if not k3[0] in k2i:
k2i.append(k3[0])
for k3 in k2h:
flag1=0
for k4 in k2e:
if k3 in k4:
flag1+=1
if flag1>1:
k2j.append(k3)
for k3 in dup_let[1]:
if k2e.count(k3[0])+k2e.count(k3[0]+'^')>0:
if not k3[0] in k2j:
k2j.append(k3[0])
k2m=[]
for k3 in k2e:
if k3[-1]=='^':
k2m.append(k3)
k2n=[]
for k3 in k2f:
if k3[-1]=='^':
k2n.append(k3)
for k3 in sv_info[k1][k2]:
del_info_add(k3,del_let)
dup_info_add(k3,[k2i,k2j])
inv_info_add(k3,[k2m,k2n])
def bp_to_hash(bp_list,sv_let):
bp_hash={}
block_rec=0
block_hash=[]
sv_let=[i[0] for i in sv_let]
for a3 in bp_list:
if a3 in chromos or not a3.isdigit():
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
bp_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
out=[]
if not sv_let==[]:
if len(sv_let)==1:
out=[bp_hash[sv_let[0]]]
else:
out.append(bp_hash[sv_let[0]])
for ka in range(len(sv_let)-1):
if ord(sv_let[ka+1])-ord(sv_let[ka])==1 and bp_hash[sv_let[ka+1]][0]==bp_hash[sv_let[ka]][0]:
out[-1]+=bp_hash[sv_let[ka+1]][1:]
else:
out.append(bp_hash[sv_let[ka+1]])
out2=[]
for ka in out:
out2.append([ka[0],int(ka[1]),int(ka[-1])])
return out2
def tra_info_add(k1,k2):
for k3 in sv_info[k1][k2]:
SV_ID='_'.join([str(i) for i in k3])
tra1[SV_ID]={}
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
bp_hash={}
block_rec=0
block_hash=[]
for a3 in k3[:-1]:
if a3 in chromos or not a3.isdigit():
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
bp_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
for a3 in bp_hash.keys():
temp=[]
for a4 in bp_hash[a3][1:]:
temp.append(int(a4)-1)
temp.append(int(a4))
bp_hash[a3][1:]=temp
#ref_allele['left']=[ref_allele[k1[0]][0]]
#ref_allele['right']=[ref_allele[k1[-1]][1]]
bp_hash['left']=[bp_hash[k1[0]][0],bp_hash[k1[0]][1],bp_hash[k1[0]][2]]
bp_hash['right']=[bp_hash[k1[-1]][0],bp_hash[k1[-1]][3],bp_hash[k1[-1]][4]]
ref_allele={}
for a3 in bp_hash.keys():
ref_allele[a3]=[bp_hash[a3][0]]
for a4 in bp_hash[a3][1:]:
ref_allele[a3].append(ref_base_returnN(ref,bp_hash[a3][0],a4))
if not k2a==k1.split('/')[0] and del_flag_SA(k1.split('/')[0],k2a)==0:
flag1=0#flag1==0:w/o inversion in the alt structure
if '^' in k2a:
flag1+=1
flag2=0#flag2==0:w/o duplication in the alt structure
for j in k2a:
if k2a.count(j)>1:
flag2+=1
flag3=0 #flag3==0: w/o translocation
if len(k2a)>1:
for i in range(len(k2a)-1):
if not ord(k2a[i+1])>ord(k2a[i]):
flag3+=1
if flag1+flag2+flag3==0:
heta_Del_block=[]
for a1 in k1.split('/')[0]:
if not a1 in k2a:
heta_Del_block.append(a1)
tra1[SV_ID]['a']=[]
block_hash=[]
del_hash={}
block_rec=0
for a3 in a2[0]:
if a3 in chromos:
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
del_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
if not heta_Del_block==[]:
a_heta=0
heta_Del_new=[heta_Del_block[0]]
while True:
a_heta+=1
if a_heta==len(heta_Del_block):break
if ord(heta_Del_block[a_heta])-ord(heta_Del_block[a_heta-1])==1 and del_hash[heta_Del_block[a_heta]][0]==del_hash[heta_Del_block[a_heta-1]][0]:
heta_Del_new[-1]+=heta_Del_block[a_heta]
else:
heta_Del_new.append(heta_Del_block[a_heta])
for a3 in heta_Del_new:
a4=a3[0]
tra1[SV_ID]['a'].append(['DEL',del_hash[a4][0],int(del_hash[a4][1]),ref_allele[a4][2]])
a4=a3[-1]
tra1[SV_ID]['a'][-1].append(int(del_hash[a4][2])-1)
else:
tra1[SV_ID]['a']=[]
t1=[]
for a3 in k2a:
if not a3=='^':
t1.append(a3)
else:
t1[-1]+=a3
t2=[t1[0]]
for a3 in t1[1:]:
if not '^' in a3 and not '^' in t2[-1] and ord(a3)-ord(t2[-1][-1])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-1]][0]:
t2[-1]+=a3
elif '^' in a3 and '^' in t2[-1] and ord(t2[-1][-2])-ord(a3[0])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-2]][0]:
t2[-1]+=a3
else:
t2.append(a3)
a3='left'
a4=t2[0]
l_chr=bp_hash[a3][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a4:
if not a4[0]==k1[0]:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3][1])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+'['+r_chr+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a4:
tra1[SV_ID]['a'].append([r_chr, bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3][1])+']'])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
for t3 in range(len(t2)-1):
a3=t2[t3]
a4=t2[t3+1]
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a3 and not '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a3 and not '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['+ref_allele[a3[-2]][2]])
elif not '^' in a3 and '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
elif '^' in a3 and '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2], ']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'+ref_allele[a3[-2]][2]])
if len(t2)>1:
a3=t2[t3+1]
else:
a3=t2[0]
a4='right'
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4][0]
if not '^' in a3:
if not a3[-1]==k1[-1]:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['])
if '^' in a3:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['+ref_allele[a3[-2]][2]])
#print [k1,k2]
if not k2b==k1.split('/')[1] and del_flag_SA(k1.split('/')[1],k2b)==0:
flag1=0#flag1==0:w/o inversion in the alt structure
if '^' in k2b:
flag1+=1
flag2=0#flag2==0:w/o duplication in the alt structure
for j in k2b:
if k2b.count(j)>1:
flag2+=1
flag3=0 #flag3==0: w/o translocation
if len(k2b)>1:
for i in range(len(k2b)-1):
if not ord(k2b[i+1])>ord(k2b[i]):
flag3+=1
if flag1+flag2+flag3==0:
heta_Del_block=[]
for a1 in k1.split('/')[1]:
if not a1 in k2b:
heta_Del_block.append(a1)
tra1[SV_ID]['b']=[]
block_hash=[]
del_hash={}
block_rec=0
for a3 in a2[0]:
if a3 in chromos:
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
del_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
if not heta_Del_block==[]:
a_heta=0
heta_Del_new=[heta_Del_block[0]]
while True:
a_heta+=1
if a_heta==len(heta_Del_block):break
if ord(heta_Del_block[a_heta])-ord(heta_Del_block[a_heta-1])==1 and del_hash[heta_Del_block[a_heta]][0]==del_hash[heta_Del_block[a_heta-1]][0]:
heta_Del_new[-1]+=heta_Del_block[a_heta]
else:
heta_Del_new.append(heta_Del_block[a_heta])
for a3 in heta_Del_new:
a4=a3[0]
tra1[SV_ID]['b'].append(['DEL',del_hash[a4][0],int(del_hash[a4][1]),ref_allele[a4][2]])
a4=a3[-1]
tra1[SV_ID]['b'][-1].append(int(del_hash[a4][2])-1)
else:
tra1[SV_ID]['b']=[]
t1=[]
for a3 in k2b:
if not a3=='^':
t1.append(a3)
else:
t1[-1]+=a3
t2=[t1[0]]
for a3 in t1[1:]:
if not '^' in a3 and not '^' in t2[-1] and ord(a3)-ord(t2[-1][-1])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-1]][0]:
t2[-1]+=a3
elif '^' in a3 and '^' in t2[-1] and ord(t2[-1][-2])-ord(a3[0])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-2]][0]:
t2[-1]+=a3
else:
t2.append(a3)
a3='left'
a4=t2[0]
l_chr=bp_hash[a3][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a4:
if not a4[0]==k1[0]:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3][1])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+'['+r_chr+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a4:
tra1[SV_ID]['b'].append([r_chr, bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3][1])+']'])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
for t3 in range(len(t2)-1):
a3=t2[t3]
a4=t2[t3+1]
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a3 and not '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a3 and not '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['+ref_allele[a3[-2]][2]])
elif not '^' in a3 and '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
elif '^' in a3 and '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2], ']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'+ref_allele[a3[-2]][2]])
if len(t2)>1:
a3=t2[t3+1]
else:
a3=t2[0]
a4='right'
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4][0]
if not '^' in a3:
if not a3[-1]==k1[-1]:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['])
if '^' in a3:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['+ref_allele[a3[-2]][2]])
def sv_homo_initial():
sv_homo_info['DEL']=[]
sv_homo_info['DUP']=[]
sv_homo_info['INV']=[]
sv_homo_info['TRA']=[]
sv_homo_info['DUP_TANDEM']=[]
def produce_keys(key):
if key=='DEL':
ka='a/a'
kb='/'
elif key=='DUP_TANDEM':
ka='a/a'
dup_num=random.sample(range(2,20),1)
kb='/'.join([''.join(['a' for i in range(dup_num[0])]),''.join(['a' for i in range(dup_num[0])])])
elif key=='DUP':
ka='ab/ab'
kb='aba/aba'
elif key=='INV':
ka='a/a'
kb='a^/a^'
elif key=='TRA':
ka='ab/ab'
kb='ba/ba'
return [ka,kb]
def sv_homo_produce():
for k1 in SV_region:
sv_len=k1[2]-k1[1]
k2=k1[-1]
sv_homo_info[k2].append(k1+produce_keys(k2))
def sv_het_produce():
for k1 in sv_homo_info.keys():
sv_het_info[k1]=[]
for k2 in sv_homo_info[k1]:
allele=random.choice(range(2))
alle_poor=[k2[-2].split('/')[0],k2[-1].split('/')[0]]
k2[-1]='/'.join([alle_poor[allele],alle_poor[1-allele]])
sv_het_info[k1].append(k2)
def sv_rec_homo_produce():
for k1 in sv_homo_info.keys():
fo=open(dict_opts['--output-prefix']+'.homo.'+k1+'.rec','w')
print dict_opts['--output-prefix']+'.homo.'+k1+'.rec'
for k2 in sv_homo_info[k1]:
print >>fo, ' '.join([str(i) for i in k2])
fo.close()
def sv_rec_het_produce():
for k1 in sv_het_info.keys():
fo=open(dict_opts['--output-prefix']+'.het.'+k1+'.rec','w')
print dict_opts['--output-prefix']+'.het.'+k1+'.rec'
for k2 in sv_het_info[k1]:
print >>fo, ' '.join([str(i) for i in k2])
fo.close()
def sv_info_rewrite(sv_h_info):
for k1 in sv_h_info.keys():
for k2 in sv_h_info[k1]:
if not k2[-2] in sv_info.keys():
sv_info[k2[-2]]={}
if not k2[-1] in sv_info[k2[-2]].keys():
sv_info[k2[-2]][k2[-1]]=[]
sv_info[k2[-2]][k2[-1]].append([str(i) for i in k2[:-3]]+[0.0])
def sv_stat_calcu(sv_hash,key):
out=[]
for k1 in sv_hash[key]:
sv_min=int(k1[1])
sv_max=int(k1[2])
sv_int=(int(k1[2])-int(k1[1]))/3
out.append([k1[0],sv_min,sv_min+sv_int, sv_max-sv_int,sv_max])
return out
def sv_size_pick(sv_stat):
out=[]
for k1 in sv_stat:
out+=[random.choice(range(int(k1[1]),int(k1[2]))) for i in range(int(k1[0]/3))]
out+=[random.choice(range(int(k1[2]),int(k1[3]))) for i in range(int(int(k1[0])-int(k1[0]/3))/2)]
out+=[random.choice(range(int(k1[3]),int(k1[4]))) for i in range(int(k1[0])-int(k1[0]/3)-int(int(k1[0])-int(k1[0]/3))/2)]
permute=random.sample(out,len(out))
return out
def chromos_readin(refs):
fin=open(refs+'.fai')
chromos=[]
chromo_length=[]
genome_length=0
for line in fin:
pin=line.strip().split()
chromos.append(pin[0])
genome_length+=int(pin[1])
chromo_length.append(int(pin[1]))
fin.close()
chromo_num_region=[]
for k1 in chromo_length:
chromo_num_region.append(int(round(float(k1)/float(genome_length)*sv_total_num)))
chrom_to_remove=[]
out_num_region=[]
out_chromos=[]
out_length={}
for i in range(len(chromo_num_region)):
if chromo_num_region[i]>1:
out_chromos.append(chromos[i])
out_num_region.append(chromo_num_region[i])
out_length[chromos[i]]=chromo_length[i]
return [genome_length]+[out_chromos]+[out_num_region]+[out_length]
def sv_hash_add(list_in,key):
for i in list_in:
if not i in sv_hash.keys():
sv_hash[i]=[key]
else:
sv_hash[i]+=[key]
def sv_region_pick():
#pick random regions across the genome
SV_region=[]
rec=-1
sv_size=del_size+dup_size+inv_size+tra_size+dup2_size
sv_size=random.sample(sv_size,len(sv_size))
for k1 in range(len(chromos)):
chromosome=chromos[k1]
num_region=chromo_num_region[k1]
range_region=chromo_length[chromosome]
temp_start_region=sorted(random.sample(range(1000, range_region-1000),num_region+1))
temp_end_region=[]
for k2 in range(num_region):
start=temp_start_region[k2]
start2=temp_start_region[k2+1]
if start2-start<1000: continue
rec+=1
temp_sv_size=sv_size[rec]
sv_type=sv_hash[sv_size[rec]][0]
del sv_hash[sv_size[rec]][0]
end=start+temp_sv_size
if not end<start2-300:
end=random.choice(range(start,int(numpy.mean([start,start2]))))
if sv_type=='TRA':
end2=random.choice(range(end+100,start2-100))
temp_end_region.append(end)
if sv_type=='TRA':
SV_region.append([chromos[k1],start,end,end2,sv_type])
else:
SV_region.append([chromos[k1],start,end,sv_type])
return SV_region
def ref_base_returnN(ref,chromo,pos):
return 'N'
def ref_base_readin(ref,chromo,pos):
fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,chromo,str(pos),str(pos)))
tre=fref.readline().strip().split()
REF_AL=fref.readline().strip().split()
if not REF_AL==[]:
return REF_AL[0]
else:
return 'N'
def del_flag_SA(k1,k2):
out=0
if not '^' in k2:
flagdup=0
for i in k2:
if k2.count(i)>1:
flagdup+=1
if flagdup==0:
flagtra=0
for i in range(len(k2)-1):
if ord(k2[i+1])-ord(k2[i])<1:
flagtra+=1
if flagtra==0:
if not k1==k2:
out=1
return out
def order_SV_Homo_write(sv_info):
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
if not k3[0] in order_SV_Pos.keys():
order_SV_Pos[k3[0]]={}
if not int(k3[1]) in order_SV_Pos[k3[0]].keys():
order_SV_Pos[k3[0]][int(k3[1])]=[]
order_SV_Pos[k3[0]][int(k3[1])].append([[k3[0]]+[int(i) for i in k3[1:-1]],[k2.split('/')[0]]])
def order_SV_Het_write(sv_info):
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
if not k3[0] in order_SV_Pos.keys():
order_SV_Pos[k3[0]]={}
if not int(k3[1]) in order_SV_Pos[k3[0]].keys():
order_SV_Pos[k3[0]][int(k3[1])]=[]
order_SV_Pos[k3[0]][int(k3[1])].append([[k3[0]]+[int(i) for i in k3[1:-1]],[k2.split('/')[0],k2.split('/')[1],k1.split('/')[0]]])
def Ref_Alt_Produce(ChromoList,bp_list,letter_new,Ref_Seq_File):
#Chromo=Chr, target chromosome
#BamN: DG187, DG196... name of sample
#eg of bp_list:[184569179, 184569775, 184571064, 184572009, 184572016]
#Eg of flank: flank : 446
if letter_new=='':
return insert_read_decide(bp_list)
else:
bp_hash={}
bp_seq=[]
for k1 in bp_list:
if k1 in ChromoList:
bp_seq.append([k1])
else:
bp_seq[-1].append(k1)
rec=0
for k1 in bp_seq:
for k2 in range(len(k1)-2):
rec+=1
bp_hash[chr(96+rec)]=[k1[0],k1[k2+1],k1[k2+2]]
letter_seq={}
for k1 in bp_hash.keys():
Chromo=bp_hash[k1][0]
region_left=bp_hash[k1][1]
region_right=bp_hash[k1][2]
seq=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File,Chromo,region_left,region_right))
seq.readline().strip().split()
lines=[]
while True:
line=seq.readline().strip().split()
if not line: break
lines.append(line)
Seq1=lines[0][0]
for j in range(len(lines))[1:]:
Seq1=''.join([Seq1,lines[j][0]])
letter_seq[k1]=Seq1
letter_seq[k1+'^']=reverse(complementary(Seq1))
new_Seq=''
new_letter=[]
for k1 in letter_new:
if not k1=='^':
new_letter.append(k1)
else:
new_letter[-1]+=k1
for k1 in new_letter:
new_Seq+=letter_seq[k1]
new_Seq+=insert_read_decide(bp_list)
return new_Seq
def Ref_Ref_Produce(Chromo,bp_list,Ref_Seq_File):
start=int(bp_list[0])
end=int(bp_list[-1])
new1_ref=''
fin=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File, Chromo, start,end))
fin.readline().strip().split()
for line in fin:
pin=line.strip().split()
new1_ref+=pin[0]
fin.close()
return new1_ref
def reverse(seq):
seq2=[]
for i in seq[::-1]:
seq2.append(i)
return ''.join(seq2)
def complementary(seq):
seq2=[]
for i in seq:
if i in 'ATGCN':
seq2.append('ATGCN'['TACGN'.index(i)])
elif i in 'atgcn':
seq2.append('atgcn'['tacgn'.index(i)])
return ''.join(seq2)
def unit_produce(list):
temp1=[sorted(list)[0]]
for k1 in sorted(list)[1:]:
if ord(k1)-ord(temp1[-1][-1])==1:
temp1[-1]+=k1
else:
temp1.append(k1)
temp2=[]
for k1 in temp1:
for k2 in range(len(k1)+1)[1:]:
for k3 in range(len(k1)-k2+1):
temp2.append(k1[k3:(k3+k2)])
return temp2[::-1]
def fasta_homo_write(fasta_out):
fo=open(fasta_out,'w')
print fasta_out
for k1 in chromos:
print >>fo, '>'+k1
new1_ref=''
rec1_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
rec1_start=end
rec1_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for k1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[k1*60:(k1+1)*60])
new1_seq.append(new1_ref[(k1+1)*60:])
for k1 in new1_seq:
if not k1=='':
print >>fo, k1
fo.close()
def fasta_homo_write_test(fasta_out):
fo=open(fasta_out,'w')
print fasta_out
for k1 in chromos[:1]:
print >>fo, '>'+k1
new1_ref=''
rec1_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
rec1_start=end
rec1_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for k1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[k1*60:(k1+1)*60])
new1_seq.append(new1_ref[(k1+1)*60:])
for k1 in new1_seq:
if not k1=='':
print >>fo, k1
fo.close()
def fasta_het_write(fasta_out):
fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'w')
fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'w')
print fasta_out.replace('.het.fa','.het1.fa')
print fasta_out.replace('.het.fa','.het2.fa')
for k1 in chromos:
print >>fo1, '>'+k1
print >>fo2, '>'+k1
new1_ref=''
rec1_start=0
new2_ref=''
rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
rec1_start=end
rec2_start+=1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,start-1],ref)
new2_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][1],ref)
rec2_start=end
rec1_start+=1
rec2_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for k1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[k1*60:(k1+1)*60])
new1_seq.append(new1_ref[(k1+1)*60:])
for k1 in new1_seq:
if not k1=='':
print >>fo1, k1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,chromo_length[k1]],ref)
new2_seq=[]
for k1 in range(len(new2_ref)/60):
new2_seq.append(new2_ref[k1*60:(k1+1)*60])
new2_seq.append(new2_ref[(k1+1)*60:])
for k1 in new2_seq:
if not k1=='':
print >>fo2, k1
fo1.close()
fo2.close()
def Sample_info_ReadIn(Sam_File):
fi=open(Sam_File)
for line in fi:
pin=line.strip().split()
if not pin==[]:
if not pin[0] in sv_hash.keys():
sv_hash[pin[0]]=[]
sv_hash[pin[0]].append([int(i) for i in pin[1:]])
sv_hash[pin[0]][-1][0]=int(sv_hash[pin[0]][-1][0]*1.25)
else:
sv_hash[pin[0]].append([int(i) for i in pin[1:]])
sv_hash[pin[0]][-1][0]=int(sv_hash[pin[0]][-1][0]*1.25)
fi.close()
def write_axiom_pbs_header(fout,JobToDo):
fo=open(fout,'w')
print >>fo, '#!/bin/bash'
print >>fo, ' '
print >>fo, '#PBS -N '+JobToDo
print >>fo, '#PBS -l mem=4gb,walltime=100:0:0,nodes=compute-4-3'
print >>fo, '#PBS -m a'
print >>fo, '#PBS -M xuefzhao@umich.edu'
print >>fo, '#PBS -o '+JobToDo+'.log'
print >>fo, '#PBS -e '+JobToDo+'.err'
print >>fo, '#PBS -V'
print >>fo, '#PBS -d .'
fo.close()
def sv_total_num_calcu():
sv_total_num=0
for k1 in del_stat:
sv_total_num+=k1[0]
for k1 in dup_stat:
sv_total_num+=k1[0]
for k1 in dup2_stat:
sv_total_num+=k1[0]
for k1 in inv_stat:
sv_total_num+=k1[0]
for k1 in tra_stat:
sv_total_num+=k1[0]
return sv_total_num
def pick_random_seqs(ref,sv_total_num,chromo_length):
#12% of all SVs have micro insrts at both /either ends
#double number of seqs would be randomly picked from genome as long micro-insertions
num_micro_ins_over20bp=float(sv_total_num)*0.12*2
genome_length=0
chromos_num_regions={}
chrom_seqs={}
for x in chromo_length.keys():
if not 'GL' in x and not x in ['X','Y','MT']:
genome_length+=chromo_length[x]
for x in chromo_length.keys():
if not 'GL' in x and not x in ['X','Y','MT']:
chromos_num_regions[x]=float(chromo_length[x])/float(genome_length)*num_micro_ins_over20bp
for x in chromos_num_regions.keys():
chrom_seqs[x]=[]
int_num=int(round(chromos_num_regions[x]))
seq_pick=random.sample(range(10000,chromo_length[x]-10000),int_num)
for y in sorted(seq_pick):
length_pick=random.sample(range(20,50),1)[0]
seqs=os.popen(r'''samtools faidx %s %s:%d-%d'''%(ref,x,y,y+length_pick))
seqs.readline()
test=seqs.readline().strip()
if not 'NNNNNNNN' in test:
chrom_seqs[x].append(test)
seqs.close()
return chrom_seqs
def produce_random_seqs(length):
out=[]
for x in range(length):
out.append(random.choice(['A','T','G','C']))
return ''.join(out)
opts,args=getopt.getopt(sys.argv[2:],'',['reference=','input-sim=','input-rec=','output-prefix='])
dict_opts=dict(opts)
Sam_File=dict_opts['--input-sim']
sv_hash={}
Sample_info_ReadIn(Sam_File)
del_stat=sv_stat_calcu(sv_hash,'DEL')
dup_stat=sv_stat_calcu(sv_hash,'DUP_TANDEM')
dup2_stat=sv_stat_calcu(sv_hash,'DUP')
dup3_stat=[]
for i in dup2_stat:
dup3_stat.append([i[0]]+[j+1000 for j in i[1:]])
dup2_stat=dup3_stat
inv_stat=sv_stat_calcu(sv_hash,'INV')
tra_stat=sv_stat_calcu(sv_hash,'TRA')
del_size=sv_size_pick(del_stat)
dup_size=sv_size_pick(dup_stat)
dup2_size=sv_size_pick(dup2_stat)
inv_size=sv_size_pick(inv_stat)
tra_size=sv_size_pick(tra_stat)
sv_total_num=sv_total_num_calcu()
refs=dict_opts['--reference']
ref=refs
if not os.path.isfile(refs):
print 'Wrong reference genome !'
if not os.path.isfile(refs+'.fai'):
print 'reference genome not indexed !'
chromos_TOTAL=chromos_readin(refs)
genome_length=chromos_TOTAL[0]
chromos=chromos_TOTAL[1]
chromo_num_region=chromos_TOTAL[2]
chromo_length=chromos_TOTAL[3]
sv_hash={}
sv_hash_add(del_size,'DEL')
sv_hash_add(dup2_size,'DUP')
sv_hash_add(dup_size,'DUP_TANDEM')
sv_hash_add(inv_size,'INV')
sv_hash_add(tra_size,'TRA')
SV_region=sv_region_pick()
SV_region_filter=[]
for x in SV_region:
if x[-1]=='DUP' and x[2]-x[1]<1100: continue
else:
SV_region_filter.append(x)
SV_region=SV_region_filter
sv_homo_info={}
sv_homo_initial()
sv_homo_produce()
temp_dup=[]
for y in range(len(sv_homo_info['DUP'])):
x=sv_homo_info['DUP'][y]
if x[2]-x[1]<2000 and x[2]-x[1]>1100:
z=random.choice([x[1]+500,x[2]-500])
temp_dup.append(x[:2]+[z]+x[2:])
#sv_homo_info['DUP'][y]=x[:2]+[z]+x[2:]
elif x[2]-x[1]>1999:
z=random.choice(range(x[1]+800,x[1]+1200)+range(x[2]-1200,x[2]-800))
temp_dup.append(x[:2]+[z]+x[2:])
#sv_homo_info['DUP'][y]=x[:2]+[z]+x[2:]
elif x[2]-x[1]<1101:
continue
sv_homo_info['DUP']=temp_dup
#write homo sv rec
sv_rec_homo_produce()
sv_info={}
sv_info_rewrite(sv_homo_info)
dup1={}
inv1={}
del1={}
tra1={}
sv_rec_2(sv_info)
sv_out={}
hash_reorder()
vcf_out=dict_opts['--output-prefix']+'.vcf'
write_VCF_header(vcf_out)
write_VCF_main(vcf_out)
fasta_out=dict_opts['--output-prefix']+'.homo.fa'
seq_ins_pools=pick_random_seqs(ref,sv_total_num,chromo_length)
#produce fasta file containing all sv file for homo svs
order_SV_Pos={}
order_SV_Homo_write(sv_info)
fasta_homo_write(fasta_out)
os.system(r'''samtools faidx %s'''%(fasta_out))
elif function_name=='complex':
def bp_to_let(del_info_unit):
flag=0
for i in del_info_unit[0]:
if i in chromos or not i.isdigit():
flag+=1
if not flag==0:
letter=''.join([chr(i+97) for i in range(len(del_info_unit[0])-2*flag)])
letters='/'.join([letter,letter])
return letters
else:
return 0
def chromo_readin(ref):
fin=open(ref+'.fai')
out=[]
for line in fin:
pin=line.strip().split()
out.append(pin[0])
fin.close()
return out
def sv_sample_readin(path):
if not path[-1]=='/':
path+='/'
out={}
for k1 in os.listdir(path):
path1=path+k1+'/'
if os.path.isdir(path1):
for k2 in os.listdir(path1):
path2=path1+k2+'/'
for k3 in os.listdir(path2):
if k3.split('.')[-1]=='coverge':
fin=open(path2+k3)
while True:
pin1=fin.readline().strip().split()
if not pin1: break
pin2=fin.readline().strip().split()
if not pin2: break
pin3=fin.readline().strip().split()
pin4=fin.readline().strip().split()
pin5=fin.readline().strip().split()
k1=bp_to_let([pin1])
k2=pin2[0]
if not k1 in out.keys():
out[k1]=[]
if not k2 in out[k1]:
out[k1].append(k2)
fin.close()
return out
def sv_decide_caller(k1,k2):
if k2==k1:
return 'Right'
else:
return 'Error'
def simple_del_caller(k1,k2):
out='Error'
if '^' in k2:
return out
else:
test=0
for x in k2:
if k2.count(x)>2:
test+=1
if not test==0:
return out
else:
k1a=k1.split('/')[0]
k1b=k1.split('/')[1]
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
test=0
if not len(k2a)==1:
for x in range(len(k2a)-1):
if ord(k2a[x+1])-ord(k2a[x])<1:
test+=1
if not len(k2b)==1:
for x in range(len(k2b)-1):
if ord(k2b[x+1])-ord(k2b[x])<1:
test+=1
if not test==0:
return out
else:
return 'Right'
def simple_del_let_pick(k1,k2):
k2_new=letter_seg_1(k2)
k1_new=letter_seg_1(k1)
out=[]
out.append([])
for x in k1_new[0]:
if not x in k2_new[0]:
out[0].append(x)
out.append([])
for x in k1_new[1]:
if not x in k2_new[1]:
out[1].append(x)
out2=[[],[]]
if not out[0]==[]:
out2[0]=[out[0][0]]
if not out[1]==[]:
out2[1]=[out[1][0]]
letter_seg_2(out,out2,0)
letter_seg_2(out,out2,1)
return out2
def letter_seg_1(k2):
lets=[[],[]]
for x in k2.split('/')[0]:
if not x=='^':
lets[0].append(x)
else:
lets[0][-1]+=x
for x in k2.split('/')[1]:
if not x=='^':
lets[1].append(x)
else:
lets[1][-1]+=x
return lets
def letter_seg_2(lets,let2,index):
for x in range(len(lets[index]))[1:]:
if not '^' in lets[index][x-1] and not '^' in lets[index][x]:
if ord(lets[index][x])-ord(lets[index][x-1])==1:
let2[index][-1]+=lets[index][x]
else:
let2[index].append(lets[index][x])
elif '^' in lets[index][x-1] and '^' in lets[index][x]:
if ord(lets[index][x][0])-ord(lets[index][x-1][-2])==-1:
let2[index][-1]+=lets[index][x]
else:
let2[index].append(lets[index][x])
else:
let2[index].append(lets[index][x])
def letter_seg_into_blocks(k2):
lets=letter_seg_1(k2)
let2=[[],[]]
if not lets[0]==[]:
let2[0]=[lets[0][0]]
if not lets[1]==[]:
let2[1]=[lets[1][0]]
letter_seg_2(lets,let2,0)
letter_seg_2(lets,let2,1)
for x in range(len(let2[0])):
if '^' in let2[0][x] and len(let2[0][x])>2:
temp=let2[0][x][::-1].replace('^','')+'^'
let2[0][x]=temp
for x in range(len(let2[1])):
if '^' in let2[1][x] and len(let2[1][x])>2:
temp=let2[1][x][::-1].replace('^','')+'^'
let2[1][x]=temp
return let2
def simple_inv_caller(k1,k2):
if not '^' in k2:
return 'Error'
else:
k2_blocks=letter_seg_into_blocks(k2)
k2_new='/'.join([''.join([i.replace('^','') for i in k2_blocks[0]]),
''.join([i.replace('^','') for i in k2_blocks[1]])])
if k2_new==k1:
return 'Right'
else:
return 'Error'
def simple_dup_caller(k1,k2):
if '^' in k2:
return 'Error'
else:
k2_new=letter_seg_1(k2)
k3=[]
for x in k2_new:
if not x==[]:
k3.append([x[0]])
for y in x[1:]:
if not y==k3[-1][-1]:
k3[-1].append(y)
else:
k3.append(x)
k3_new='/'.join([''.join(k3[0]),''.join(k3[1])])
if k3_new==k1:
return 'Right'
else:
return 'Error'
def simple_tra_caller(k1,k2):
if '^' in k2:
return 'Error'
else:
flag1=0
for i in k2:
if not k2.count(i)==2:
flag1+=1
if not flag1==0:
return 'Error'
else:
return 'Right'
def simple_SV_filter(sv_hash):
out={}
for k1 in sv_hash.keys():
for k2 in sv_hash[k1]:
if sv_decide_caller(k1,k2)=='Error':
if simple_del_caller(k1,k2)=='Error':
if simple_inv_caller(k1,k2)=='Error':
if simple_dup_caller(k1,k2)=='Error':
#if simple_tra_caller(k1,k2)=='Error':
if not k1 in out.keys():
out[k1]=[]
if not k2 in out[k1]:
out[k1].append(k2)
return out
def csv_region_pick(sv_size):
#pick random regions across the genome
SV_region=[]
rec=-1
sv_size=random.sample(sv_size,len(sv_size))
for k1 in range(len(chromos)):
chromosome=chromos[k1]
num_region=chromo_num_region[k1]
range_region=chromo_length[chromosome]
temp_start_region=sorted(random.sample(range(1000, range_region-1000),num_region+1))
temp_end_region=[]
k2=-1
while True:
if k2==num_region-1: break
k2+=1
print [rec,k2,len(SV_region)]
start=temp_start_region[k2]
start2=temp_start_region[k2+1]
if start2-start<1000: continue
rec+=1
temp_sv_size=random.choice(sv_size)
sv_type=random.choice(csv1_keys)
if sv_type in csv_hash.keys():
rearranged_SV=random.choice(csv1_csv2_hash[sv_type])
num_blocks=len(sv_type.split('/')[0])
end=start+temp_sv_size
if not temp_sv_size/num_blocks>200 or end>start2-300:
rec-=1
k2-=1
continue
else:
num_of_bps=num_blocks-1
mid_length=temp_sv_size/num_blocks
bps_out=[start]
for x in range(num_of_bps):
bps_out.append(random.choice(range(bps_out[-1]+100,start+(x+1)*mid_length-100)))
bps_out.append(end)
SV_region.append([chromos[k1]]+bps_out+[sv_type,rearranged_SV])
return SV_region
def csv_info_rewrite(sv_h_info):
sv_info={}
for k2 in sv_h_info:
if not k2[-2] in sv_info.keys():
sv_info[k2[-2]]={}
if not k2[-1] in sv_info[k2[-2]].keys():
sv_info[k2[-2]][k2[-1]]=[]
sv_info[k2[-2]][k2[-1]].append([str(i) for i in k2[:-2]]+[0.0])
return sv_info
def csv_rec_write(SV_region):
out_hash={}
for x1 in SV_region:
if not x1[0] in out_hash.keys():
out_hash[x1[0]]={}
if not x1[1] in out_hash[x1[0]].keys():
out_hash[x1[0]][x1[1]]=[]
if not x1 in out_hash[x1[0]][x1[1]]:
out_hash[x1[0]][x1[1]].append(x1)
fout=dict_opts['--output-prefix']+'.SV.rec'
fo=open(fout,'w')
print fout
for x1 in chromos:
if x1 in out_hash.keys():
for x2 in sorted(out_hash[x1].keys()):
for x3 in out_hash[x1][x2]:
print >>fo, ' '.join([str(i) for i in x3])
fo.close()
return out_hash
def tra_info_add(k1,k2):
for k3 in sv_info[k1][k2]:
SV_ID='_'.join([str(i) for i in k3[:-1]])
tra1[SV_ID]={}
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
bp_hash={}
block_rec=0
block_hash=[]
for a3 in k3[:-1]:
if a3 in chromos or not a3.isdigit():
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
bp_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
for a3 in bp_hash.keys():
temp=[]
for a4 in bp_hash[a3][1:]:
temp.append(int(a4)-1)
temp.append(int(a4))
bp_hash[a3][1:]=temp
#ref_allele['left']=[ref_allele[k1[0]][0]]
#ref_allele['right']=[ref_allele[k1[-1]][1]]
bp_hash['left']=[bp_hash[k1[0]][0],bp_hash[k1[0]][1],bp_hash[k1[0]][2]]
bp_hash['right']=[bp_hash[k1[-1]][0],bp_hash[k1[-1]][3],bp_hash[k1[-1]][4]]
ref_allele={}
for a3 in bp_hash.keys():
ref_allele[a3]=[bp_hash[a3][0]]
for a4 in bp_hash[a3][1:]:
ref_allele[a3].append(ref_base_returnN(ref,bp_hash[a3][0],a4))
if not k2a==k1.split('/')[0] and del_flag_SA(k1.split('/')[0],k2a)==0:
flag1=0#flag1==0:w/o inversion in the alt structure
if '^' in k2a:
flag1+=1
flag2=0#flag2==0:w/o duplication in the alt structure
for j in k2a:
if k2a.count(j)>1:
flag2+=1
flag3=0 #flag3==0: w/o translocation
if len(k2a)>1:
for i in range(len(k2a)-1):
if not ord(k2a[i+1])>ord(k2a[i]):
flag3+=1
if flag1+flag2+flag3==0:
heta_Del_block=[]
for a1 in k1.split('/')[0]:
if not a1 in k2a:
heta_Del_block.append(a1)
tra1[SV_ID]['a']=[]
block_hash=[]
del_hash={}
block_rec=0
for a3 in a2[0]:
if a3 in chromos:
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
del_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
if not heta_Del_block==[]:
a_heta=0
heta_Del_new=[heta_Del_block[0]]
while True:
a_heta+=1
if a_heta==len(heta_Del_block):break
if ord(heta_Del_block[a_heta])-ord(heta_Del_block[a_heta-1])==1 and del_hash[heta_Del_block[a_heta]][0]==del_hash[heta_Del_block[a_heta-1]][0]:
heta_Del_new[-1]+=heta_Del_block[a_heta]
else:
heta_Del_new.append(heta_Del_block[a_heta])
for a3 in heta_Del_new:
a4=a3[0]
tra1[SV_ID]['a'].append(['DEL',del_hash[a4][0],int(del_hash[a4][1]),ref_allele[a4][2]])
a4=a3[-1]
tra1[SV_ID]['a'][-1].append(int(del_hash[a4][2])-1)
else:
tra1[SV_ID]['a']=[]
t1=[]
for a3 in k2a:
if not a3=='^':
t1.append(a3)
else:
t1[-1]+=a3
t2=[t1[0]]
for a3 in t1[1:]:
if not '^' in a3 and not '^' in t2[-1] and ord(a3)-ord(t2[-1][-1])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-1]][0]:
t2[-1]+=a3
elif '^' in a3 and '^' in t2[-1] and ord(t2[-1][-2])-ord(a3[0])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-2]][0]:
t2[-1]+=a3
else:
t2.append(a3)
a3='left'
a4=t2[0]
l_chr=bp_hash[a3][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a4:
if not a4[0]==k1[0]:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3][1])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+'['+r_chr+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a4:
tra1[SV_ID]['a'].append([r_chr, bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3][1])+']'])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
for t3 in range(len(t2)-1):
a3=t2[t3]
a4=t2[t3+1]
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a3 and not '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a3 and not '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['+ref_allele[a3[-2]][2]])
elif not '^' in a3 and '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
elif '^' in a3 and '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2], ']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'+ref_allele[a3[-2]][2]])
if len(t2)>1:
a3=t2[t3+1]
else:
a3=t2[0]
a4='right'
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4][0]
if not '^' in a3:
if not a3[-1]==k1[-1]:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['])
if '^' in a3:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['+ref_allele[a3[-2]][2]])
#print [k1,k2]
if not k2b==k1.split('/')[1] and del_flag_SA(k1.split('/')[1],k2b)==0:
flag1=0#flag1==0:w/o inversion in the alt structure
if '^' in k2b:
flag1+=1
flag2=0#flag2==0:w/o duplication in the alt structure
for j in k2b:
if k2b.count(j)>1:
flag2+=1
flag3=0 #flag3==0: w/o translocation
if len(k2b)>1:
for i in range(len(k2b)-1):
if not ord(k2b[i+1])>ord(k2b[i]):
flag3+=1
if flag1+flag2+flag3==0:
heta_Del_block=[]
for a1 in k1.split('/')[1]:
if not a1 in k2b:
heta_Del_block.append(a1)
tra1[SV_ID]['b']=[]
block_hash=[]
del_hash={}
block_rec=0
for a3 in a2[0]:
if a3 in chromos:
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
del_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
if not heta_Del_block==[]:
a_heta=0
heta_Del_new=[heta_Del_block[0]]
while True:
a_heta+=1
if a_heta==len(heta_Del_block):break
if ord(heta_Del_block[a_heta])-ord(heta_Del_block[a_heta-1])==1 and del_hash[heta_Del_block[a_heta]][0]==del_hash[heta_Del_block[a_heta-1]][0]:
heta_Del_new[-1]+=heta_Del_block[a_heta]
else:
heta_Del_new.append(heta_Del_block[a_heta])
for a3 in heta_Del_new:
a4=a3[0]
tra1[SV_ID]['b'].append(['DEL',del_hash[a4][0],int(del_hash[a4][1]),ref_allele[a4][2]])
a4=a3[-1]
tra1[SV_ID]['b'][-1].append(int(del_hash[a4][2])-1)
else:
tra1[SV_ID]['b']=[]
t1=[]
for a3 in k2b:
if not a3=='^':
t1.append(a3)
else:
t1[-1]+=a3
t2=[t1[0]]
for a3 in t1[1:]:
if not '^' in a3 and not '^' in t2[-1] and ord(a3)-ord(t2[-1][-1])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-1]][0]:
t2[-1]+=a3
elif '^' in a3 and '^' in t2[-1] and ord(t2[-1][-2])-ord(a3[0])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-2]][0]:
t2[-1]+=a3
else:
t2.append(a3)
a3='left'
a4=t2[0]
l_chr=bp_hash[a3][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a4:
if not a4[0]==k1[0]:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3][1])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+'['+r_chr+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a4:
tra1[SV_ID]['b'].append([r_chr, bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3][1])+']'])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
for t3 in range(len(t2)-1):
a3=t2[t3]
a4=t2[t3+1]
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a3 and not '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a3 and not '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['+ref_allele[a3[-2]][2]])
elif not '^' in a3 and '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
elif '^' in a3 and '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2], ']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'+ref_allele[a3[-2]][2]])
if len(t2)>1:
a3=t2[t3+1]
else:
a3=t2[0]
a4='right'
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4][0]
if not '^' in a3:
if not a3[-1]==k1[-1]:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['])
if '^' in a3:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['+ref_allele[a3[-2]][2]])
def hash_reorder():
for ka1 in del1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in del1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
else:
print ka2[2]
ka_new=[ka1,ka2[0],ka2[-1],REF_AL,'<DEL>',ka2[3],Pass_Sign,'SVTYPE=DEL;END='+str(ka2[1]),'GT',GenoType]
if not ka2[-1] in sv_out[ka1].keys():
sv_out[ka1][ka2[-1]]=[]
if not ka_new in sv_out[ka1][ka2[-1]]:
sv_out[ka1][ka2[-1]].append(ka_new)
for ka1 in inv1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in inv1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
else:
print ka2[2]
ka_new=[ka1,ka2[0],ka2[-1],REF_AL,'<INV>',ka2[3],Pass_Sign,'SVTYPE=INV;END='+str(ka2[1]),'GT',GenoType]
if not ka2[-1] in sv_out[ka1].keys():
sv_out[ka1][ka2[-1]]=[]
if not ka_new in sv_out[ka1][ka2[-1]]:
sv_out[ka1][ka2[-1]].append(ka_new)
for ka1 in dup1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in dup1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
CopyNumber=str(ka2[-1])
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
else:
print ka2[2]
ka_new=[ka1,ka2[0],ka2[-2],REF_AL,'<DUP>',ka2[3],Pass_Sign,'SVTYPE=DUP;END='+str(ka2[1]),'GT:CN',GenoType+':'+CopyNumber]
if not ka2[-2] in sv_out[ka1].keys():
sv_out[ka1][ka2[-2]]=[]
if not ka_new in sv_out[ka1][ka2[-2]]:
sv_out[ka1][ka2[-2]].append(ka_new)
for ka1 in tra1.keys():
ks1=ka1.split('_')[0]
ks2='_'.join(ka1.split('_')[:-1])
SV_Score=float(ka1.split('_')[-1])
Pass_Sign='PASS'
if SV_Score<score_Cff:
Pass_Sign='LowQual'
if not ks1 in sv_out.keys():
sv_out[ks1]={}
if not ks2 in sv_out[ks1].keys():
sv_out[ks1][ks2]=[]
for ka2 in tra1[ka1].keys():
hetx='het'+ka2
if ka2=='a':
GenoType='1|0'
elif ka2=='b':
GenoType='0|1'
else:
print ka2[2]
for ka3 in tra1[ka1][ka2]:
ka_new=ka3[:2]+[ks2,ka3[2]]+ka3[3:]+[SV_Score,Pass_Sign,'SVTYPE=TRA','GT',GenoType]
if not ka_new in sv_out[ks1][ks2]:
sv_out[ks1][ks2].append(ka_new)
def fasta_comp_write_a(fasta_out):
fo1=open(fasta_out.replace('.comp.fa','.comp1.fa'),'w')
#fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'w')
fo1.close()
#fo2.close()
print fasta_out.replace('.comp.fa','.comp1.fa')
#print fasta_out.replace('.het.fa','.het2.fa')
for k1 in chromos:
fo1=open(fasta_out.replace('.comp.fa','.comp1.fa'),'a')
#fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'a')
print >>fo1, '>'+k1
#print >>fo2, '>'+k1
new1_ref=''
rec1_start=0
#new2_ref=''
#rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
if not k3[0][1][0]==k3[0][1][2]:
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
else:
new1_ref+=Ref_Ref_Produce(k1,[start,end],ref)
rec1_start=end
rec1_start+=1
#rec2_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for ka1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[ka1*60:(ka1+1)*60])
new1_seq.append(new1_ref[(ka1+1)*60:])
for ka1 in new1_seq:
if not ka1=='':
print >>fo1, ka1
fo1.close()
def fasta_comp_write_b(fasta_out):
#fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'w')
fo2=open(fasta_out.replace('.comp.fa','.comp2.fa'),'w')
#fo1.close()
fo2.close()
#print fasta_out.replace('.het.fa','.het1.fa')
print fasta_out.replace('.comp.fa','.comp2.fa')
for k1 in chromos:
#fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'a')
fo2=open(fasta_out.replace('.comp.fa','.comp2.fa'),'a')
#print >>fo1, '>'+k1
print >>fo2, '>'+k1
#new1_ref=''
#rec1_start=0
new2_ref=''
rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
rec2_start+=1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,start-1],ref)
if not k3[0][1][1]==k3[0][1][2]:
new2_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][1],ref)
else:
new2_ref+=Ref_Ref_Produce(k1,[start,end],ref)
rec2_start=end
#rec1_start+=1
rec2_start+=1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,chromo_length[k1]],ref)
new2_seq=[]
for ka1 in range(len(new2_ref)/60):
new2_seq.append(new2_ref[ka1*60:(ka1+1)*60])
new2_seq.append(new2_ref[(ka1+1)*60:])
for ka1 in new2_seq:
if not ka1=='':
print >>fo2, ka1
#fo1.close()
fo2.close()
def write_VCF_header(output_file):
fo=open(output_file,'w')
print output_file
print>>fo, '##fileformat=VCFv4.1'
print>>fo,'##fileDate='+time.strftime("%Y%m%d")
print>>fo,'##reference=hg19'
print>>fo,'##INFO=<ID=BKPTID,Number=.,Type=String,Description="ID of the assembled alternate allele in the assembly file">'
print>>fo,'##INFO=<ID=CIEND,Number=2,Type=Integer,Description="Confidence interval around END for imprecise variants">'
print>>fo,'##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">'
print>>fo,'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">'
print>>fo,'##INFO=<ID=HOMLEN,Number=.,Type=Integer,Description="Length of base pair identical micro-homology at event breakpoints">'
print>>fo,'##INFO=<ID=HOMSEQ,Number=.,Type=String,Description="Sequence of base pair identical micro-homology at event breakpoints">'
print>>fo,'##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">'
print>>fo,'##INFO=<ID=MEINFO,Number=4,Type=String,Description="Mobile element info of the form NAME,START,END,POLARITY">'
print>>fo,'##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">'
print>>fo,'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">'
print>>fo,'##FILTER=<ID=LowQual,Description="Score of final structural - Theoretical Score <-50">'
print>>fo,'##ALT=<ID=DEL,Description="Deletion">'
print>>fo,'##ALT=<ID=DEL:ME:ALU,Description="Deletion of ALU element">'
print>>fo,'##ALT=<ID=DEL:ME:L1,Description="Deletion of L1 element">'
print>>fo,'##ALT=<ID=DUP,Description="Duplication">'
print>>fo,'##ALT=<ID=DUP:TANDEM,Description="Tandem Duplication">'
print>>fo,'##ALT=<ID=INS,Description="Insertion of novel sequence">'
print>>fo,'##ALT=<ID=INS:ME:ALU,Description="Insertion of ALU element">'
print>>fo,'##ALT=<ID=INS:ME:L1,Description="Insertion of L1 element">'
print>>fo,'##ALT=<ID=INV,Description="Inversion">'
print>>fo,'##ALT=<ID=CNV,Description="Copy number variable region">'
print>>fo,'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">'
print>>fo,'##FORMAT=<ID=GQ,Number=1,Type=Float,Description="Genotype quality">'
print>>fo,'##FORMAT=<ID=CN,Number=1,Type=Integer,Description="Copy number genotype for imprecise events">'
print>>fo,'##FORMAT=<ID=CNQ,Number=1,Type=Float,Description="Copy number genotype quality for imprecise events">'
print>>fo,'\t'.join(['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT',output_file.split('/')[-1].replace('.vcf','')])
fo.close()
def write_VCF_main(output_file):
fo=open(output_file,'a')
print output_file
sv_reorganize={}
for k1 in sv_out.keys():
sv_reorganize[k1]={}
for k2 in sv_out[k1].keys():
start=int(k2.split('_')[1])
if not start in sv_reorganize[k1].keys():
sv_reorganize[k1][start]={}
SVtemp_a=[]
SVtemp_b=[]
for k3 in sv_out[k1][k2]:
if not k3[:-1] in SVtemp_a:
SVtemp_a.append(k3[:-1])
SVtemp_b.append([k3[-1]])
else:
SVtemp_b[SVtemp_a.index(k3[:-1])].append(k3[-1])
SVtemp=[]
sv_reorganize[k1][start][k2]=[]
for k3 in range(len(SVtemp_a)):
if len(SVtemp_b[k3])==2 and SVtemp_b[k3] in [['0|1', '1|0'],['1|0', '0|1']]:
SVtemp_b[k3]=['1|1']
for k3 in range(len(SVtemp_a)):
for k4 in SVtemp_b[k3]:
sv_reorganize[k1][start][k2].append(SVtemp_a[k3]+[k4])
for k1 in chromos:
if k1 in sv_reorganize.keys():
for k2 in sorted(sv_reorganize[k1].keys()):
for k3 in sorted(sv_reorganize[k1][k2].keys()):
for k4 in sv_reorganize[k1][k2][k3]:
if k4[3]=='N':
k4[3]=ref_base_returnN(ref,k4[0],k4[1])
print >>fo, '\t'.join([str(i) for i in k4])
fo.close()
def simple_flag_SA(k1,k2):
temp=[]
break_flag=0
for i in k2:
if not i=='^':
temp.append(i)
else:
temp[-1]+=i
temp2=[temp[0]]
for i in range(len(temp[1:])):
if not '^' in temp[i] and not '^' in temp[i+1] and ord(temp[i+1])-ord(temp[i])==1:
temp2[-1]+=temp[i+1]
elif '^' in temp[i] and '^' in temp[i+1] and ord(temp[i+1][0])-ord(temp[i][0])==-1:
temp2[-1]=temp[i+1][0]+temp2[-1]
else:
temp2.append(temp[i+1])
outdel=[]
outinv=[]
outdup=[]
outtra=0
for i in range(len(temp2)):
j=temp2[i]
if '^' in j:
if not j.replace('^','') in outinv:
outinv.append(j.replace('^',''))
temp2[i]=j.replace('^','')
temp3=''.join(temp2)
for i in range(len(temp3)-1):
if ord(temp3[i+1])-ord(temp3[i])<0:
outtra=1
if not temp3==k1:
temp4=[]
for i in temp3:
if temp3.count(i)>1:
if not i in outdup:
outdup.append(i)
if not i in temp4:
temp4.append(i)
if not ''.join(temp4)==k1:
for i in k1:
if not i in temp4:
outdel.append(i)
if not outdup==[]:
dupuni=unit_produce(outdup)
outdup2=[]
k3=k2
for i in dupuni:
ia=i
ib=''.join([j+'^' for j in i[::-1]])
if len(i)>1:
if temp2.count(ia)+temp2.count(ib)>1:
outdup2.append([i,temp2.count(ia)+temp2.count(ib)])
k3=k3.replace(ia,'')
k3=k3.replace(ib,'')
elif len(i)==1:
if k3.count(ia)+k3.count(ib)>1:
outdup2.append([i,k3.count(ia)])
k3=k3.replace(ia,'')
k3=k3.replace(ib,'')
else:
outdup2=[]
return [outdel,outinv,outdup2,outtra]
def add_csv_info(csv1,flag_sex,k1,k2):
#flag_sex=1: Maternal
#flag_sex=2: Paternal
if flag_sex==1:
del_let=[csv1[0],[]]
inv_let=[csv1[1],[]]
dup_let=[csv1[2],[]]
else:
del_let=[[],csv1[0]]
inv_let=[[],csv1[1]]
dup_let=[[],csv1[2]]
for k3 in sv_info[k1][k2]:
del_info_add(k3,del_let)
inv_info_add(k3,inv_let)
dup_info_2_add(k3,dup_let)
if csv1[3]==1:
tra_info_add(k1,k2)
def del_info_add(k3,del_let):
tempa=bp_to_hash(k3[:-1],del_let[0])
tempb=bp_to_hash(k3[:-1],del_let[1])
for k1 in tempa:
if k1 in tempb:
tempc='hom'
tempb.remove(k1)
else:
tempc='heta'
if not k1[0] in del1.keys():
del1[k1[0]]=[]
del1[k1[0]].append(k1[1:]+[tempc,k3[-1],'_'.join(k3[:-1])])
for k1 in tempb:
if not k1[0] in del1.keys():
del1[k1[0]]=[]
del1[k1[0]].append(k1[1:]+['hetb',k3[-1],'_'.join(k3[:-1])])
def dup_info_add(k3,dup_let):
#dup_let=[k2i,k2j]
for k2x in dup_let:
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4])
for k5 in temp:
if not k5[0] in dup1.keys():
dup1[k5[0]]=[]
dup1[k5[0]].append(k5[1:]+[k3[-1],'_'.join(k3[:-1]),k2a.count(k4)])
def dup_info_2_add(k3,dup_let):
temprec=-1
for k2x in dup_let:
temprec+=1
hetx=['heta','hetb'][temprec]
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4[0]])
for k5 in temp:
if not k5[0] in dup1.keys():
dup1[k5[0]]=[]
if k4[1]>1:
dup1[k5[0]].append(k5[1:]+[hetx,k3[-1],'_'.join(k3[:-1]),k4[1]])
def inv_info_add(k3,inv_let):
#inv_let=[k2m,k2n]
temprec=-1
for k2x in inv_let:
temprec+=1
hetx=['heta','hetb'][temprec]
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4])
for k5 in temp:
if not k5[0] in inv1.keys():
inv1[k5[0]]=[]
inv1[k5[0]].append(k5[1:]+[hetx,k3[-1],'_'.join(k3[:-1])])
def let_reclust(vec_in):
if vec_in==[]:
return []
else:
k2e=[]
k2e=[vec_in[0]]
for k3 in range(len(vec_in)-1):
if '^' in vec_in[k3+1]:
if '^' in vec_in[k3] and ord(vec_in[k3][0])-ord(vec_in[k3+1][0])==1:
k2e[-1]+=vec_in[k3+1]
else:
k2e.append(vec_in[k3+1])
else:
if ord(vec_in[k3+1][0])-ord(vec_in[k3][0])==1 and not '^' in vec_in[k3]:
k2e[-1]+=vec_in[k3+1]
else:
k2e.append(vec_in[k3+1])
k2f=[]
for k3 in k2e:
if '^' in k3:
k5=''
for k4 in range(len(k3)/2):
k5+=k3[2*k4]
k6=k5[::-1]+'^'
if not k6 in k2f:
k2f.append(k6)
else:
k2f.append(k3)
return k2f
def dup_let_recombind(vec_in):
if vec_in==[]:
return []
else:
vec2=sorted(vec_in)
vec=[[vec2[0]]]
for ka in vec2[1:]:
if ord(ka)-ord(vec[-1][-1])==1:
vec[-1].append(ka)
else:
vec.append([ka])
vec3=[]
for ka in vec:
if len(ka)==1:
vec3.append(ka)
else:
for kb in range(2,len(ka)+1):
for kc in ka[:(1-kb)]:
vec3.append([])
for kd in range(kb):
vec3[-1].append(ka[ka.index(kc)+kd])
vec4=[''.join(i) for i in vec3]
return vec4
def comp_info_reorganize(k1,k2):
del_let=[[],[]]
dup_let=[[],[]]
inv_let=[[],[]]
tra_let=[[],[]]
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
k2c=[]
k2d=[]
for k3 in k2a:
if not k3=='^':
k2c.append(k3)
else:
k2c[-1]+=k3
for k3 in k2b:
if not k3=='^':
k2d.append(k3)
else:
k2d[-1]+=k3
for k3 in k1.split('/')[0]:
if k2a.count(k3)==0:
del_let[0].append(k3)
if k2b.count(k3)==0:
del_let[1].append(k3)
if k2a.count(k3)>1:
dup_let[0].append(k3)
if k2b.count(k3)>1:
dup_let[1].append(k3)
k2e=let_reclust(k2c)
k2f=let_reclust(k2d)
k2g=dup_let_recombind(dup_let[0])
k2h=dup_let_recombind(dup_let[1])
k2i=[]
#integreated dup sections
k2j=[]
#integreated dup sections
for k3 in k2g:
flag1=0
for k4 in k2e:
if k3 in k4:
flag1+=1
if flag1>1:
k2i.append(k3)
for k3 in dup_let[0]:
if k2e.count(k3[0])+k2e.count(k3[0]+'^')>0:
if not k3[0] in k2i:
k2i.append(k3[0])
for k3 in k2h:
flag1=0
for k4 in k2e:
if k3 in k4:
flag1+=1
if flag1>1:
k2j.append(k3)
for k3 in dup_let[1]:
if k2e.count(k3[0])+k2e.count(k3[0]+'^')>0:
if not k3[0] in k2j:
k2j.append(k3[0])
k2m=[]
for k3 in k2e:
if k3[-1]=='^':
k2m.append(k3)
k2n=[]
for k3 in k2f:
if k3[-1]=='^':
k2n.append(k3)
for k3 in sv_info[k1][k2]:
del_info_add(k3,del_let)
dup_info_add(k3,[k2i,k2j])
inv_info_add(k3,[k2m,k2n])
def bp_to_hash(bp_list,sv_let):
bp_hash={}
block_rec=0
block_hash=[]
sv_let=[i[0] for i in sv_let]
for a3 in bp_list:
if a3 in chromos or not a3.isdigit():
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
bp_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
out=[]
if not sv_let==[]:
if len(sv_let)==1:
out=[bp_hash[sv_let[0]]]
else:
out.append(bp_hash[sv_let[0]])
for ka in range(len(sv_let)-1):
if ord(sv_let[ka+1])-ord(sv_let[ka])==1 and bp_hash[sv_let[ka+1]][0]==bp_hash[sv_let[ka]][0]:
out[-1]+=bp_hash[sv_let[ka+1]][1:]
else:
out.append(bp_hash[sv_let[ka+1]])
out2=[]
for ka in out:
out2.append([ka[0],int(ka[1]),int(ka[-1])])
return out2
def sv_homo_initial():
sv_homo_info['DEL']=[]
sv_homo_info['DUP']=[]
sv_homo_info['INV']=[]
sv_homo_info['TRA']=[]
def produce_keys(key):
if key=='DEL':
ka='a/a'
kb='/'
elif key=='DUP':
ka='a/a'
dup_num=random.sample(range(2,20),1)
kb='/'.join([''.join(['a' for i in range(dup_num[0])]),''.join(['a' for i in range(dup_num[0])])])
elif key=='INV':
ka='a/a'
kb='a^/a^'
elif key=='TRA':
ka='ab/ab'
kb='ba/ba'
return [ka,kb]
def sv_homo_produce():
for k1 in SV_region:
sv_len=k1[2]-k1[1]
k2=k1[-1]
sv_homo_info[k2].append(k1+produce_keys(k2))
def sv_het_produce():
for k1 in sv_homo_info.keys():
sv_het_info[k1]=[]
for k2 in sv_homo_info[k1]:
allele=random.choice(range(2))
alle_poor=[k2[-2].split('/')[0],k2[-1].split('/')[0]]
k2[-1]='/'.join([alle_poor[allele],alle_poor[1-allele]])
sv_het_info[k1].append(k2)
def sv_rec_homo_produce():
for k1 in sv_homo_info.keys():
fo=open(dict_opts['--output-prefix']+'.homo.'+k1+'.rec','w')
print dict_opts['--output-prefix']+'.homo.'+k1+'.rec'
for k2 in sv_homo_info[k1]:
print >>fo, ' '.join([str(i) for i in k2])
fo.close()
def sv_rec_het_produce():
for k1 in sv_het_info.keys():
fo=open(dict_opts['--output-prefix']+'.het.'+k1+'.rec','w')
print dict_opts['--output-prefix']+'.het.'+k1+'.rec'
for k2 in sv_het_info[k1]:
print >>fo, ' '.join([str(i) for i in k2])
fo.close()
def sv_info_rewrite(sv_h_info):
for k1 in sv_h_info.keys():
for k2 in sv_h_info[k1]:
if not k2[-2] in sv_info.keys():
sv_info[k2[-2]]={}
if not k2[-1] in sv_info[k2[-2]].keys():
sv_info[k2[-2]][k2[-1]]=[]
sv_info[k2[-2]][k2[-1]].append([str(i) for i in k2[:-3]]+[0.0])
def sv_stat_calcu(sv_hash,key):
out=[]
for k1 in sv_hash[key]:
sv_min=int(k1[1])
sv_max=int(k1[2])
sv_int=(int(k1[2])-int(k1[1]))/3
out.append([k1[0],sv_min,sv_min+sv_int, sv_max-sv_int,sv_max])
return out
def sv_size_pick(sv_stat):
out=[]
for k1 in sv_stat:
out+=[random.choice(range(int(k1[1]),int(k1[2]))) for i in range(int(k1[0]/3))]
out+=[random.choice(range(int(k1[2]),int(k1[3]))) for i in range(int(int(k1[0])-int(k1[0]/3))/2)]
out+=[random.choice(range(int(k1[3]),int(k1[4]))) for i in range(int(k1[0])-int(k1[0]/3)-int(int(k1[0])-int(k1[0]/3))/2)]
permute=random.sample(out,len(out))
return out
def chromos_readin(refs):
fin=open(refs+'.fai')
chromos=[]
chromo_length=[]
genome_length=0
for line in fin:
pin=line.strip().split()
chromos.append(pin[0])
genome_length+=int(pin[1])
chromo_length.append(int(pin[1]))
fin.close()
chromo_num_region=[]
for k1 in chromo_length:
chromo_num_region.append(int(round(float(k1)/float(genome_length)*sv_total_num)))
chrom_to_remove=[]
out_num_region=[]
out_chromos=[]
out_length={}
for i in range(len(chromo_num_region)):
if chromo_num_region[i]>1:
out_chromos.append(chromos[i])
out_num_region.append(chromo_num_region[i])
out_length[chromos[i]]=chromo_length[i]
return [genome_length]+[out_chromos]+[out_num_region]+[out_length]
def sv_hash_add(list_in,key):
for i in list_in:
if not i in sv_hash.keys():
sv_hash[i]=[key]
else:
sv_hash[i]+=[key]
def sv_region_pick():
#pick random regions across the genome
SV_region=[]
rec=-1
sv_size=del_size+dup_size+inv_size+tra_size
sv_size=random.sample(sv_size,len(sv_size))
for k1 in range(len(chromos)):
chromosome=chromos[k1]
num_region=chromo_num_region[k1]
range_region=chromo_length[chromosome]
temp_start_region=sorted(random.sample(range(1000, range_region-1000),num_region+1))
temp_end_region=[]
for k2 in range(num_region):
start=temp_start_region[k2]
start2=temp_start_region[k2+1]
if start2-start<1000: continue
rec+=1
temp_sv_size=sv_size[rec]
sv_type=sv_hash[sv_size[rec]][0]
del sv_hash[sv_size[rec]][0]
end=start+temp_sv_size
if not end<start2-300:
end=random.choice(range(start,int(numpy.mean([start,start2]))))
if sv_type=='TRA':
end2=random.choice(range(end+100,start2-100))
temp_end_region.append(end)
if sv_type=='TRA':
SV_region.append([chromos[k1],start,end,end2,sv_type])
else:
SV_region.append([chromos[k1],start,end,sv_type])
return SV_region
def ref_base_returnN(ref,chromo,pos):
return 'N'
def ref_base_readin(ref,chromo,pos):
fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,chromo,str(pos),str(pos)))
tre=fref.readline().strip().split()
REF_AL=fref.readline().strip().split()
if not REF_AL==[]:
return REF_AL[0]
else:
return 'N'
def del_flag_SA(k1,k2):
out=0
if not '^' in k2:
flagdup=0
for i in k2:
if k2.count(i)>1:
flagdup+=1
if flagdup==0:
flagtra=0
for i in range(len(k2)-1):
if ord(k2[i+1])-ord(k2[i])<1:
flagtra+=1
if flagtra==0:
if not k1==k2:
out=1
return out
def order_SV_Homo_write(sv_info):
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
if not k3[0] in order_SV_Pos.keys():
order_SV_Pos[k3[0]]={}
if not int(k3[1]) in order_SV_Pos[k3[0]].keys():
order_SV_Pos[k3[0]][int(k3[1])]=[]
order_SV_Pos[k3[0]][int(k3[1])].append([[k3[0]]+[int(i) for i in k3[1:-1]],[k2.split('/')[0]]])
def order_SV_Het_write(sv_info):
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
if not k3[0] in order_SV_Pos.keys():
order_SV_Pos[k3[0]]={}
if not int(k3[1]) in order_SV_Pos[k3[0]].keys():
order_SV_Pos[k3[0]][int(k3[1])]=[]
order_SV_Pos[k3[0]][int(k3[1])].append([[k3[0]]+[int(i) for i in k3[1:-1]],[k2.split('/')[0],k2.split('/')[1],k1.split('/')[0]]])
def order_SV_Comp_write(sv_info):
fo=open(dict_opts['--output-prefix']+'.comp.CSV.rec','w')
rec=0
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
rec+=1
print >>fo, ' '.join([str(i) for i in k3+[k1,k2]])
fo.close()
def Ref_Alt_Produce(ChromoList,bp_list,letter_new,Ref_Seq_File):
#Chromo=Chr, target chromosome
#BamN: DG187, DG196... name of sample
#eg of bp_list:[184569179, 184569775, 184571064, 184572009, 184572016]
#Eg of flank: flank : 446
if letter_new=='':
return ''
else:
bp_hash={}
bp_seq=[]
for k1 in bp_list:
if k1 in ChromoList:
bp_seq.append([k1])
else:
bp_seq[-1].append(k1)
rec=0
for k1 in bp_seq:
for k2 in range(len(k1)-2):
rec+=1
bp_hash[chr(96+rec)]=[k1[0],k1[k2+1],k1[k2+2]]
letter_seq={}
for k1 in bp_hash.keys():
Chromo=bp_hash[k1][0]
region_left=bp_hash[k1][1]
region_right=bp_hash[k1][2]
seq=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File,Chromo,region_left,region_right))
seq.readline().strip().split()
lines=[]
while True:
line=seq.readline().strip().split()
if not line: break
lines.append(line)
Seq1=lines[0][0]
for j in range(len(lines))[1:]:
Seq1=''.join([Seq1,lines[j][0]])
letter_seq[k1]=Seq1
letter_seq[k1+'^']=reverse(complementary(Seq1))
new_Seq=''
new_letter=[]
for k1 in letter_new:
if not k1=='^':
new_letter.append(k1)
else:
new_letter[-1]+=k1
for k1 in new_letter:
new_Seq+=letter_seq[k1]
return new_Seq
def Ref_Ref_Produce(Chromo,bp_list,Ref_Seq_File):
start=int(bp_list[0])
end=int(bp_list[-1])
new1_ref=''
fin=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File, Chromo, start,end))
fin.readline().strip().split()
for line in fin:
pin=line.strip().split()
new1_ref+=pin[0]
fin.close()
return new1_ref
def reverse(seq):
seq2=[]
for i in seq[::-1]:
seq2.append(i)
return ''.join(seq2)
def complementary(seq):
seq2=[]
for i in seq:
if i in 'ATGCN':
seq2.append('ATGCN'['TACGN'.index(i)])
elif i in 'atgcn':
seq2.append('atgcn'['tacgn'.index(i)])
return ''.join(seq2)
def unit_produce(list):
temp1=[sorted(list)[0]]
for k1 in sorted(list)[1:]:
if ord(k1)-ord(temp1[-1][-1])==1:
temp1[-1]+=k1
else:
temp1.append(k1)
temp2=[]
for k1 in temp1:
for k2 in range(len(k1)+1)[1:]:
for k3 in range(len(k1)-k2+1):
temp2.append(k1[k3:(k3+k2)])
return temp2[::-1]
def fasta_homo_write(fasta_out):
fo=open(fasta_out,'w')
print fasta_out
for k1 in chromos:
print >>fo, '>'+k1
new1_ref=''
rec1_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
rec1_start=end
rec1_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for k1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[k1*60:(k1+1)*60])
new1_seq.append(new1_ref[(k1+1)*60:])
for k1 in new1_seq:
if not k1=='':
print >>fo, k1
fo.close()
def fasta_het_write_a(fasta_out):
fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'w')
#fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'w')
fo1.close()
#fo2.close()
print fasta_out.replace('.het.fa','.het1.fa')
#print fasta_out.replace('.het.fa','.het2.fa')
for k1 in chromos:
fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'a')
#fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'a')
print >>fo1, '>'+k1
#print >>fo2, '>'+k1
new1_ref=''
rec1_start=0
#new2_ref=''
#rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
if not k3[0][1][0]==k3[0][1][2]:
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
else:
new1_ref+=Ref_Ref_Produce(k1,[start,end],ref)
rec1_start=end
#rec2_start+=1
#new2_ref+=Ref_Ref_Produce(k1,[rec2_start,start-1],ref)
#if not k3[0][1][1]==k3[0][1][2]:
# new2_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][1],ref)
#else:
# new2_ref+=Ref_Ref_Produce(k1,[start,end],ref)
#rec2_start=end
rec1_start+=1
#rec2_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for ka1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[ka1*60:(ka1+1)*60])
new1_seq.append(new1_ref[(ka1+1)*60:])
for ka1 in new1_seq:
if not ka1=='':
print >>fo1, ka1
#new2_ref+=Ref_Ref_Produce(k1,[rec2_start,chromo_length[k1]],ref)
#new2_seq=[]
#for ka1 in range(len(new2_ref)/60):
# new2_seq.append(new2_ref[ka1*60:(ka1+1)*60])
#new2_seq.append(new2_ref[(ka1+1)*60:])
#for ka1 in new2_seq:
# if not ka1=='':
# print >>fo2, ka1
fo1.close()
#fo2.close()
def fasta_het_write_b(fasta_out):
#fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'w')
fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'w')
#fo1.close()
fo2.close()
#print fasta_out.replace('.het.fa','.het1.fa')
print fasta_out.replace('.het.fa','.het2.fa')
for k1 in chromos:
#fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'a')
fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'a')
#print >>fo1, '>'+k1
print >>fo2, '>'+k1
#new1_ref=''
#rec1_start=0
new2_ref=''
rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
#rec1_start+=1
#new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
#if not k3[0][1][0]==k3[0][1][2]:
# new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
#else:
# new1_ref+=Ref_Ref_Produce(k1,[start,end],ref)
#rec1_start=end
rec2_start+=1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,start-1],ref)
if not k3[0][1][1]==k3[0][1][2]:
new2_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][1],ref)
else:
new2_ref+=Ref_Ref_Produce(k1,[start,end],ref)
rec2_start=end
#rec1_start+=1
rec2_start+=1
#new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
#new1_seq=[]
#for ka1 in range(len(new1_ref)/60):
# new1_seq.append(new1_ref[ka1*60:(ka1+1)*60])
#new1_seq.append(new1_ref[(ka1+1)*60:])
#for ka1 in new1_seq:
# if not ka1=='':
# print >>fo1, ka1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,chromo_length[k1]],ref)
new2_seq=[]
for ka1 in range(len(new2_ref)/60):
new2_seq.append(new2_ref[ka1*60:(ka1+1)*60])
new2_seq.append(new2_ref[(ka1+1)*60:])
for ka1 in new2_seq:
if not ka1=='':
print >>fo2, ka1
#fo1.close()
fo2.close()
def Sample_info_ReadIn(Sam_File):
fi=open(Sam_File)
for line in fi:
pin=line.strip().split()
if not pin==[]:
if not pin[0] in sv_hash.keys():
sv_hash[pin[0]]=[]
sv_hash[pin[0]].append([int(i) for i in pin[1:]])
sv_hash[pin[0]][-1][0]=int(sv_hash[pin[0]][-1][0]*1.25)
else:
sv_hash[pin[0]].append([int(i) for i in pin[1:]])
sv_hash[pin[0]][-1][0]=int(sv_hash[pin[0]][-1][0]*1.25)
fi.close()
def sv_total_num_calcu():
sv_total_num=0
for k1 in del_stat:
sv_total_num+=k1[0]
for k1 in dup_stat:
sv_total_num+=k1[0]
for k1 in inv_stat:
sv_total_num+=k1[0]
for k1 in tra_stat:
sv_total_num+=k1[0]
return sv_total_num
def pick_random_seqs(ref,sv_total_num,chromo_length):
#12% of all SVs have micro insrts at both /either ends
#double number of seqs would be randomly picked from genome as long micro-insertions
num_micro_ins_over20bp=float(sv_total_num)*0.12*2
genome_length=0
chromos_num_regions={}
chrom_seqs={}
for x in chromo_length.keys():
if not 'GL' in x and not x in ['X','Y','MT']:
genome_length+=chromo_length[x]
for x in chromo_length.keys():
if not 'GL' in x and not x in ['X','Y','MT']:
chromos_num_regions[x]=float(chromo_length[x])/float(genome_length)*num_micro_ins_over20bp
for x in chromos_num_regions.keys():
chrom_seqs[x]=[]
int_num=int(round(chromos_num_regions[x]))
seq_pick=random.sample(range(10000,chromo_length[x]-10000),int_num)
for y in sorted(seq_pick):
length_pick=random.sample(range(20,50),1)[0]
seqs=os.popen(r'''samtools faidx %s %s:%d-%d'''%(ref,x,y,y+length_pick))
seqs.readline()
test=seqs.readline().strip()
if not 'NNNNNNNN' in test:
chrom_seqs[x].append(test)
seqs.close()
return chrom_seqs
def produce_random_seqs(length):
out=[]
for x in range(length):
out.append(random.choice(['A','T','G','C']))
return ''.join(out)
def Ref_Alt_Produce(ChromoList,bp_list,letter_new,Ref_Seq_File):
#Chromo=Chr, target chromosome
#BamN: DG187, DG196... name of sample
#eg of bp_list:[184569179, 184569775, 184571064, 184572009, 184572016]
#Eg of flank: flank : 446
if letter_new=='':
return insert_read_decide(bp_list)
else:
bp_hash={}
bp_seq=[]
for k1 in bp_list:
if k1 in ChromoList:
bp_seq.append([k1])
else:
bp_seq[-1].append(k1)
rec=0
for k1 in bp_seq:
for k2 in range(len(k1)-2):
rec+=1
bp_hash[chr(96+rec)]=[k1[0],k1[k2+1],k1[k2+2]]
letter_seq={}
for k1 in bp_hash.keys():
Chromo=bp_hash[k1][0]
region_left=bp_hash[k1][1]
region_right=bp_hash[k1][2]
seq=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File,Chromo,region_left,region_right))
seq.readline().strip().split()
lines=[]
while True:
line=seq.readline().strip().split()
if not line: break
lines.append(line)
Seq1=lines[0][0]
if len(lines)>1:
for j in range(len(lines))[1:]:
if not lines[j]==[]:
Seq1=''.join([Seq1,lines[j][0]])
letter_seq[k1]=Seq1
letter_seq[k1+'^']=reverse(complementary(Seq1))
new_Seq=''
new_letter=[]
for k1 in letter_new:
if not k1=='^':
new_letter.append(k1)
else:
new_letter[-1]+=k1
for k1 in new_letter:
new_Seq+=letter_seq[k1]
new_Seq+=insert_read_decide(bp_list)
return new_Seq
opts,args=getopt.getopt(sys.argv[2:],'',['reference=','input-sim=','input-rec=','output-prefix='])
dict_opts=dict(opts)
refs=dict_opts['--reference']
ref=refs
score_Cff=-20
Sam_File=dict_opts['--input-sim']
sv_hash={}
Sample_info_ReadIn(Sam_File)
sv_stat=sv_stat_calcu(sv_hash,'DEL')
sv_size=sv_size_pick(sv_stat)
sv_total_num=sum([i[0] for i in sv_hash[sv_hash.keys()[0]]])
chromos_TOTAL=chromos_readin(refs)
genome_length=chromos_TOTAL[0]
chromos=chromos_TOTAL[1]
chromo_num_region=chromos_TOTAL[2]
chromo_length=chromos_TOTAL[3]
csv_hash={}
fin=open(dict_opts['--input-rec'])
csv1_hash={}
csv2_hash={}
for line in fin:
pin=line.strip().split()
if not pin[0] in csv_hash.keys():
csv_hash[pin[0]]=[]
if not pin[1] in csv_hash[pin[0]]:
csv_hash[pin[0]].append(pin[1])
if not pin[0] in csv1_hash.keys():
csv1_hash[pin[0]]=0
csv1_hash[pin[0]]+=int(pin[-1])
if not pin[1] in csv2_hash.keys():
csv2_hash[pin[1]]=0
csv2_hash[pin[1]]+=int(pin[-1])
fin.close()
csv1_keys=[]
for i in csv_hash.keys():
csv1_keys+=[i for j in range(csv1_hash[i])]
csv1_csv2_hash={}
for k1 in csv_hash.keys():
csv1_csv2_hash[k1]=[]
for k2 in csv_hash[k1]:
csv1_csv2_hash[k1]+=[k2 for j in range(csv2_hash[k2])]
overlap_hash={}
SV_region=csv_region_pick(sv_size)
ordered_sv_info=csv_rec_write(SV_region)
sv_info=csv_info_rewrite(SV_region)
del1={}
dup1={}
inv1={}
tra1={}
for k1ab in sorted(sv_info.keys()):
for k2ab in sv_info[k1ab].keys():
if not k2ab==k1ab:
tra_info_add(k1ab,k2ab)
sv_out={}
hash_reorder()
vcf_out=dict_opts['--output-prefix']+'.vcf'
write_VCF_header(vcf_out)
write_VCF_main(vcf_out)
fasta_out=dict_opts['--output-prefix']+'.comp.fa'
#produce fasta file containing all sv file for homo svs
order_SV_Pos={}
order_SV_Comp_write(sv_info)
order_SV_Het_write(sv_info)
seq_ins_pools=pick_random_seqs(ref,sv_total_num,chromo_length)
fasta_comp_write_a(fasta_out)
fasta_comp_write_b(fasta_out)
os.system(r'''samtools faidx %s'''%(fasta_out.replace('.comp.fa','.comp1.fa')))
os.system(r'''samtools faidx %s'''%(fasta_out.replace('.comp.fa','.comp2.fa')))
|
{"hexsha": "54b73d4492f7b5357d54e5870dd217f28ae6f560", "size": 152415, "ext": "py", "lang": "Python", "max_stars_repo_path": "Support.Scripts/Produce.Simulated.FussyJuncs.py", "max_stars_repo_name": "mills-lab/svelter", "max_stars_repo_head_hexsha": "d318b06d588483fe8a8ebcac8c8a6c7878f2c2b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2015-11-02T06:31:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T03:14:04.000Z", "max_issues_repo_path": "Support.Scripts/Produce.Simulated.FussyJuncs.py", "max_issues_repo_name": "mills-lab/svelter", "max_issues_repo_head_hexsha": "d318b06d588483fe8a8ebcac8c8a6c7878f2c2b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2016-03-02T21:12:53.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-02T20:01:02.000Z", "max_forks_repo_path": "Support.Scripts/Produce.Simulated.FussyJuncs.py", "max_forks_repo_name": "mills-lab/svelter", "max_forks_repo_head_hexsha": "d318b06d588483fe8a8ebcac8c8a6c7878f2c2b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2015-08-19T18:33:02.000Z", "max_forks_repo_forks_event_max_datetime": "2017-05-16T03:42:57.000Z", "avg_line_length": 36.9043583535, "max_line_length": 336, "alphanum_fraction": 0.5508906604, "include": true, "reason": "import numpy,from scipy", "num_tokens": 53196}
|
import pymysql
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import font_manager
import pandas as pd
import numpy as np
import jieba
import jieba.analyse as analyse
username = 'root'
password = 'mysql'
url = '127.0.0.1'
port = 3306
database = 'campus'
conn = pymysql.connect(url, port=port, database=database, user=username, password=password)
cursor = conn.cursor()
table_name = 'campus_2020_04_13_10_08_31'
# sql = 'select reply_num from {}'.format(table_name)
sql = 'select title from {}'.format(table_name)
cursor.execute(sql)
list = cursor.fetchall()
l = pd.DataFrame(list)
ll = []
for i in l[0]:
l = analyse.extract_tags(i, topK=2, withWeight=False, allowPOS=(), withFlag=False)
# print(l)
ll.extend(l)
print(ll)
print(len(ll))
lld = pd.DataFrame(ll, columns=['title'] )
lla = pd.DataFrame([1 for _ in range(len(lld))], columns=['count'])
llf = pd.concat([lld, lla], axis=1)
# print(llf)
lldd = llf.groupby('title').count()
lt = lldd.sort_values(by='count')[-20:]
print(lt)
title = lt.index
print(title)
data = lt['count']
my_font = font_manager.FontProperties(fname='/usr/share/fonts/noto-cjk/NotoSansCJK-Light.ttc')
# 支持中文
plt.rcParams['font.sans-serif'] = ['WenQuanYi Zen Hei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
ps = plt.bar(title, data, label='', align='center')
# for p in ps[1]:
# p.set_fontproperties(my_font)
plt.xlabel('Hot Words')
plt.xticks(rotation=90)
plt.ylabel('Number')
plt.title('Hot-Graph')
plt.show()
print(type(lldd))
# jieba.analyse.extract_tags(sentence, topK=3, withWeight=False, allowPOS=(), withFlag=False)
# topK 表示返回最大权重关键词的个数,None表示全部
# withWeight表示是否返回权重,是的话返回(word,weight)的list
# allowPOS仅包括指定词性的词,默认为空即不筛选。
# jieba.analyse.textrank(self, sentence, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'), withFlag=False)
# 与TF-IDF方法相似,但是注意allowPOS有默认值,即会默认过滤某些词性。
# bins = [i for i in range(0,200,10)]
# print(bins)
# plt.hist(l[0], bins, histtype='bar')
# plt.show()
# for i in list:
# print(i[0])
# print(type(i[0]))
# print(list)
|
{"hexsha": "e690e2b33475b8ca9cc4bd7aaedbf5be4e136c6a", "size": 2066, "ext": "py", "lang": "Python", "max_stars_repo_path": "01_crawl_cases/campus_public_opinion/huitu.py", "max_stars_repo_name": "zlj-zz/anti-crawlCase", "max_stars_repo_head_hexsha": "a6ed670ad332bd456572eeff707bd5fc14186b3d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-23T23:34:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-23T23:34:14.000Z", "max_issues_repo_path": "01_crawl_cases/campus_public_opinion/huitu.py", "max_issues_repo_name": "zlj-zz/Anti-crawlCase", "max_issues_repo_head_hexsha": "a6ed670ad332bd456572eeff707bd5fc14186b3d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "01_crawl_cases/campus_public_opinion/huitu.py", "max_forks_repo_name": "zlj-zz/Anti-crawlCase", "max_forks_repo_head_hexsha": "a6ed670ad332bd456572eeff707bd5fc14186b3d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1518987342, "max_line_length": 116, "alphanum_fraction": 0.706195547, "include": true, "reason": "import numpy", "num_tokens": 690}
|
// Copyright Abel Sinkovics (abel@sinkovics.hu) 2010.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <mpllibs/metamonad/lambda.hpp>
#include <mpllibs/metamonad/lazy.hpp>
#include <mpllibs/metamonad/let.hpp>
#include <mpllibs/metamonad/eval_let.hpp>
#include <mpllibs/metamonad/returns.hpp>
#include <mpllibs/metamonad/syntax.hpp>
#include <mpllibs/metamonad/eval_syntax.hpp>
#include <mpllibs/metamonad/apply.hpp>
#include <boost/test/unit_test.hpp>
#include "common.hpp"
#include <boost/mpl/int.hpp>
#include <boost/mpl/equal_to.hpp>
#include <boost/mpl/plus.hpp>
#include <boost/mpl/assert.hpp>
#include <boost/type_traits.hpp>
BOOST_AUTO_TEST_CASE(test_lambda)
{
using boost::mpl::plus;
using boost::mpl::equal_to;
using boost::is_same;
using mpllibs::metamonad::lambda;
using mpllibs::metamonad::lazy;
using mpllibs::metamonad::eval_let;
using mpllibs::metamonad::let;
using mpllibs::metamonad::_;
using mpllibs::metamonad::returns;
using mpllibs::metamonad::syntax;
using mpllibs::metamonad::eval_syntax;
using mpllibs::metamonad::apply;
// test_metaprogramming_value
BOOST_MPL_ASSERT((
equal_to<int13, lambda<syntax<plus<int2, int11> > >::type::apply<>::type>
));
// test_no_argument
BOOST_MPL_ASSERT((
equal_to<int13, lambda<syntax<plus<int2,int11> > >::apply<>::type>
));
// test_simple_lambda
BOOST_MPL_ASSERT((
equal_to<int13, apply<lambda<x, syntax<plus<x, int11> > >, int2>::type>
));
// test_nested_lambda
BOOST_MPL_ASSERT((
equal_to<
int13,
apply<lambda<x, y, syntax<plus<x, y> > >, int2, int11>::type
>
));
// test_currying
BOOST_MPL_ASSERT((
equal_to<
int13,
apply<apply<lambda<x, y, syntax<plus<x, y> > >, int2>, int11>::type
>
));
// test_let_and_lambda3_arg
BOOST_MPL_ASSERT((
equal_to<
int11,
eval_syntax<
let<
s, syntax<int11>,
syntax<apply<lambda<x, s, z, syntax<int11> >, int0, int1, int2> >
>
>::type
>
));
// test_unused_arg
BOOST_MPL_ASSERT((
is_same<_, apply<lambda<_, syntax<returns<_> > >, int13>::type>
));
// test_laziness
BOOST_MPL_ASSERT((
equal_to<
int13,
apply<lambda<returns<x>, returns<syntax<plus<int2, x> > > >, int11>::type
>
));
// test_currying_and_lambda
BOOST_MPL_ASSERT((
equal_to<
int13,
apply<
eval_let<
y, syntax<int1>,
syntax<apply<lambda<x, y, syntax<y> >, int11> >
>,
int13
>::type
>
));
}
|
{"hexsha": "b12d5b1b56cb93939cc0302cb47bb8bfdbfb940e", "size": 2678, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/metamonad/test/lambda.cpp", "max_stars_repo_name": "sabel83/mpllibs", "max_stars_repo_head_hexsha": "8e245aedcf658fe77bb29537aeba1d4e1a619a19", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 70.0, "max_stars_repo_stars_event_min_datetime": "2015-01-15T09:05:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T15:49:31.000Z", "max_issues_repo_path": "libs/metamonad/test/lambda.cpp", "max_issues_repo_name": "sabel83/mpllibs", "max_issues_repo_head_hexsha": "8e245aedcf658fe77bb29537aeba1d4e1a619a19", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2015-06-18T19:25:34.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-13T19:49:51.000Z", "max_forks_repo_path": "libs/metamonad/test/lambda.cpp", "max_forks_repo_name": "sabel83/mpllibs", "max_forks_repo_head_hexsha": "8e245aedcf658fe77bb29537aeba1d4e1a619a19", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2015-07-10T08:18:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-01T07:17:57.000Z", "avg_line_length": 23.0862068966, "max_line_length": 79, "alphanum_fraction": 0.6497386109, "num_tokens": 802}
|
from autoconf import conf
import numba
"""
Depending on if we're using a super computer, we want two different numba decorators:
If on laptop:
@numba.jit(nopython=True, cache=True, parallel=False)
If on super computer:
@numba.jit(nopython=True, cache=False, parallel=True)
"""
try:
nopython = conf.instance["general"]["numba"]["nopython"]
cache = conf.instance["general"]["numba"]["cache"]
parallel = conf.instance["general"]["numba"]["parallel"]
except Exception:
nopython = True
cache = True
parallel = False
def jit(nopython=nopython, cache=cache, parallel=parallel):
def wrapper(func):
return numba.jit(func, nopython=nopython, cache=cache, parallel=parallel)
return wrapper
|
{"hexsha": "2d0a64c5cb29d05754ea444598a6958d21d95e40", "size": 758, "ext": "py", "lang": "Python", "max_stars_repo_path": "autolens/decorator_util.py", "max_stars_repo_name": "rakaar/PyAutoLens", "max_stars_repo_head_hexsha": "bc140c5d196c426092c1178b8abfa492c6fab859", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "autolens/decorator_util.py", "max_issues_repo_name": "rakaar/PyAutoLens", "max_issues_repo_head_hexsha": "bc140c5d196c426092c1178b8abfa492c6fab859", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autolens/decorator_util.py", "max_forks_repo_name": "rakaar/PyAutoLens", "max_forks_repo_head_hexsha": "bc140c5d196c426092c1178b8abfa492c6fab859", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.4516129032, "max_line_length": 86, "alphanum_fraction": 0.6781002639, "include": true, "reason": "import numba", "num_tokens": 185}
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from numpy import pi
from numpy.random import random
from modules.growth import spawn_curl
from modules.growth import spawn
from numpy import zeros
NMAX = 10**6
SIZE = 800
ONE = 1./SIZE
PROCS = 2
INIT_RAD = 25*ONE
INIT_NUM = 40
STP = ONE*0.4
NEARL = 6*ONE
FARL = 60*ONE
MID = 0.5
LINEWIDTH = 5.*ONE
BACK = [1,1,1,1]
FRONT = [0,0,0,5]
RED = [1,0,0,0.3]
TWOPI = pi*2.
i = 0
def steps(df):
from time import time
from modules.helpers import print_stats
global i
t1 = time()
df.optimize_position(STP)
spawn_curl(df, NEARL)
#spawn(df, NEARL, 0.05)
if df.safe_vertex_positions(3*STP)<0:
print('vertices reached the boundary. stopping.')
return False
t2 = time()
print_stats(i, t2-t1, df)
return True
np_coords = zeros(shape=(NMAX,4), dtype='float')
np_vert_coords = zeros(shape=(NMAX,2), dtype='float')
def main():
from iutils.render import Animate
from differentialLine import DifferentialLine
from modules.show import show_closed
from modules.show import show_detail
from modules.show import show
DF = DifferentialLine(NMAX, FARL*2, NEARL, FARL, PROCS)
angles = sorted(random(INIT_NUM)*TWOPI)
DF.init_circle_segment(MID,MID,INIT_RAD, angles)
def wrap(render):
global i
# animation stops when res is False
res = steps(DF)
## if fn is a path each image will be saved to that path
fn = None
## render outline with marked circles
num = DF.np_get_edges_coordinates(np_coords)
show_detail(render,np_coords[:num,:],fn)
i += 1
return res
render = Animate(SIZE, BACK, FRONT, wrap)
render.start()
if __name__ == '__main__':
main()
|
{"hexsha": "f449fb773b8144122adcf5b18dc8454949b031c5", "size": 1698, "ext": "py", "lang": "Python", "max_stars_repo_path": "generator/main_detail_ani.py", "max_stars_repo_name": "stevejaxon/leonardo-dao-vinci", "max_stars_repo_head_hexsha": "e1074f872ac83a69a70115e5e5e4376ff4462b36", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 695, "max_stars_repo_stars_event_min_datetime": "2015-07-24T00:10:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T10:24:24.000Z", "max_issues_repo_path": "generator/main_detail_ani.py", "max_issues_repo_name": "stevejaxon/leonardo-dao-vinci", "max_issues_repo_head_hexsha": "e1074f872ac83a69a70115e5e5e4376ff4462b36", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2016-08-28T01:29:20.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-27T19:52:51.000Z", "max_forks_repo_path": "generator/main_detail_ani.py", "max_forks_repo_name": "stevejaxon/leonardo-dao-vinci", "max_forks_repo_head_hexsha": "e1074f872ac83a69a70115e5e5e4376ff4462b36", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 55, "max_forks_repo_forks_event_min_datetime": "2015-07-24T19:24:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T22:30:29.000Z", "avg_line_length": 15.5779816514, "max_line_length": 60, "alphanum_fraction": 0.6790341578, "include": true, "reason": "from numpy", "num_tokens": 531}
|
"""
Visualization functions for forest of trees-based ensemble methods for Uplift modeling on Classification
Problem.
"""
from collections import defaultdict
import numpy as np
import pydotplus
def uplift_tree_string(decisionTree, x_names):
'''
Convert the tree to string for print.
Args
----
decisionTree : object
object of DecisionTree class
x_names : list
List of feature names
Returns
-------
A string representation of the tree.
'''
# Column Heading
dcHeadings = {}
for i, szY in enumerate(x_names + ['treatment_group_key']):
szCol = 'Column %d' % i
dcHeadings[szCol] = str(szY)
def toString(decisionTree, indent=''):
if decisionTree.results is not None: # leaf node
return str(decisionTree.results)
else:
szCol = 'Column %s' % decisionTree.col
if szCol in dcHeadings:
szCol = dcHeadings[szCol]
if isinstance(decisionTree.value, int) or isinstance(decisionTree.value, float):
decision = '%s >= %s?' % (szCol, decisionTree.value)
else:
decision = '%s == %s?' % (szCol, decisionTree.value)
trueBranch = indent + 'yes -> ' + toString(decisionTree.trueBranch, indent + '\t\t')
falseBranch = indent + 'no -> ' + toString(decisionTree.falseBranch, indent + '\t\t')
return (decision + '\n' + trueBranch + '\n' + falseBranch)
print(toString(decisionTree))
def uplift_tree_plot(decisionTree, x_names):
'''
Convert the tree to dot graph for plots.
Args
----
decisionTree : object
object of DecisionTree class
x_names : list
List of feature names
Returns
-------
Dot class representing the tree graph.
'''
# Column Heading
dcHeadings = {}
for i, szY in enumerate(x_names + ['treatment_group_key']):
szCol = 'Column %d' % i
dcHeadings[szCol] = str(szY)
dcNodes = defaultdict(list)
"""Plots the obtained decision tree. """
def toString(iSplit, decisionTree, bBranch, szParent="null", indent='', indexParent=0, upliftScores=list()):
if decisionTree.results is not None: # leaf node
lsY = []
for szX, n in decisionTree.results.items():
lsY.append('%s:%.2f' % (szX, n))
dcY = {"name": "%s" % ', '.join(lsY), "parent": szParent}
dcSummary = decisionTree.summary
upliftScores += [dcSummary['matchScore']]
dcNodes[iSplit].append(['leaf', dcY['name'], szParent, bBranch,
str(-round(float(decisionTree.summary['impurity']), 3)), dcSummary['samples'],
dcSummary['group_size'], dcSummary['upliftScore'], dcSummary['matchScore'],
indexParent])
else:
szCol = 'Column %s' % decisionTree.col
if szCol in dcHeadings:
szCol = dcHeadings[szCol]
if isinstance(decisionTree.value, int) or isinstance(decisionTree.value, float):
decision = '%s >= %s' % (szCol, decisionTree.value)
else:
decision = '%s == %s' % (szCol, decisionTree.value)
indexOfLevel = len(dcNodes[iSplit])
toString(iSplit + 1, decisionTree.trueBranch, True, decision, indent + '\t\t', indexOfLevel, upliftScores)
toString(iSplit + 1, decisionTree.falseBranch, False, decision, indent + '\t\t', indexOfLevel, upliftScores)
dcSummary = decisionTree.summary
upliftScores += [dcSummary['matchScore']]
dcNodes[iSplit].append([iSplit + 1, decision, szParent, bBranch,
str(-round(float(decisionTree.summary['impurity']), 3)), dcSummary['samples'],
dcSummary['group_size'], dcSummary['upliftScore'], dcSummary['matchScore'],
indexParent])
upliftScores = list()
toString(0, decisionTree, None, upliftScores=upliftScores)
upliftScoreToColor = dict()
try:
# calculate colors for nodes based on uplifts
minUplift = min(upliftScores)
maxUplift = max(upliftScores)
upliftLevels = [(uplift-minUplift)/(maxUplift-minUplift) for uplift in upliftScores] # min max scaler
baseUplift = float(decisionTree.summary.get('matchScore'))
baseUpliftLevel = (baseUplift - minUplift) / (maxUplift - minUplift) # min max scaler normalization
white = np.array([255., 255., 255.])
blue = np.array([31., 119., 180.])
green = np.array([0., 128., 0.])
for i, upliftLevel in enumerate(upliftLevels):
if upliftLevel >= baseUpliftLevel: # go blue
color = upliftLevel * blue + (1 - upliftLevel) * white
else: # go green
color = (1 - upliftLevel) * green + upliftLevel * white
color = [int(c) for c in color]
upliftScoreToColor[upliftScores[i]] = ('#%2x%2x%2x' % tuple(color)).replace(' ', '0') # color code
except Exception as e:
print(e)
lsDot = ['digraph Tree {',
'node [shape=box, style="filled, rounded", color="black", fontname=helvetica] ;',
'edge [fontname=helvetica] ;'
]
i_node = 0
dcParent = {}
totalSample = int(decisionTree.summary.get('samples')) # initialize the value with the total sample size at root
for nSplit in range(len(dcNodes.items())):
lsY = dcNodes[nSplit]
indexOfLevel = 0
for lsX in lsY:
iSplit, decision, szParent, bBranch, szImpurity, szSamples, szGroup, \
upliftScore, matchScore, indexParent = lsX
sampleProportion = round(int(szSamples)*100./totalSample, 1)
if type(iSplit) is int:
szSplit = '%d-%d' % (iSplit, indexOfLevel)
dcParent[szSplit] = i_node
lsDot.append('%d [label=<%s<br/> impurity %s<br/> total_sample %s (%s%)<br/>group_sample %s <br/> '
'uplift score: %s <br/> uplift p_value %s <br/> '
'validation uplift score %s>, fillcolor="%s"] ;' % (
i_node, decision.replace('>=', '≥').replace('?', ''), szImpurity, szSamples,
str(sampleProportion), szGroup, str(upliftScore[0]), str(upliftScore[1]),
str(matchScore), upliftScoreToColor.get(matchScore, '#e5813900')
))
else:
lsDot.append('%d [label=< impurity %s<br/> total_sample %s (%s%)<br/>group_sample %s <br/> '
'uplift score: %s <br/> uplift p_value %s <br/> validation uplift score %s <br/> '
'mean %s>, fillcolor="%s"] ;' % (
i_node, szImpurity, szSamples, str(sampleProportion), szGroup, str(upliftScore[0]),
str(upliftScore[1]), str(matchScore), decision,
upliftScoreToColor.get(matchScore, '#e5813900')
))
if szParent != 'null':
if bBranch:
szAngle = '45'
szHeadLabel = 'True'
else:
szAngle = '-45'
szHeadLabel = 'False'
szSplit = '%d-%d' % (nSplit, indexParent)
p_node = dcParent[szSplit]
if nSplit == 1:
lsDot.append('%d -> %d [labeldistance=2.5, labelangle=%s, headlabel="%s"] ;' % (p_node,
i_node, szAngle,
szHeadLabel))
else:
lsDot.append('%d -> %d ;' % (p_node, i_node))
i_node += 1
indexOfLevel += 1
lsDot.append('}')
dot_data = '\n'.join(lsDot)
graph = pydotplus.graph_from_dot_data(dot_data)
return graph
|
{"hexsha": "1648c75c30512fd3aef04a6175c668b32fd7d314", "size": 8236, "ext": "py", "lang": "Python", "max_stars_repo_path": "causalml/inference/tree/plot.py", "max_stars_repo_name": "lleiou/causalml", "max_stars_repo_head_hexsha": "2d3cacacad5ed3b0e57b593803a33c61c554f3b2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2919, "max_stars_repo_stars_event_min_datetime": "2019-08-12T23:02:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:59:34.000Z", "max_issues_repo_path": "causalml/inference/tree/plot.py", "max_issues_repo_name": "lleiou/causalml", "max_issues_repo_head_hexsha": "2d3cacacad5ed3b0e57b593803a33c61c554f3b2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 317, "max_issues_repo_issues_event_min_datetime": "2019-08-13T14:16:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T08:44:06.000Z", "max_forks_repo_path": "causalml/inference/tree/plot.py", "max_forks_repo_name": "lleiou/causalml", "max_forks_repo_head_hexsha": "2d3cacacad5ed3b0e57b593803a33c61c554f3b2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 466, "max_forks_repo_forks_event_min_datetime": "2019-08-18T01:45:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:11:53.000Z", "avg_line_length": 43.3473684211, "max_line_length": 120, "alphanum_fraction": 0.5330257407, "include": true, "reason": "import numpy", "num_tokens": 1957}
|
import numpy as np
import itertools
import pandas as pd
import re
import json
import matplotlib.pyplot as plt
import scipy.io.wavfile
import librosa
import librosa.display
import IPython.display as ipd
from random import randint
import os
from numpy import random as rd
from pandas.api.types import is_string_dtype, is_numeric_dtype
from collections import defaultdict
from functools import reduce
from sklearn import preprocessing
from scipy.cluster.hierarchy import dendrogram, linkage
from numpy import linalg
from sklearn.cluster import KMeans
from pathlib import Path, PurePath
from tqdm.notebook import tqdm
from sklearn.metrics import silhouette_score
import scipy.spatial.distance as metric
from yellowbrick.cluster import KElbowVisualizer
import subprocess
import math
from sklearn.decomposition import PCA
import sys
import plotly.graph_objects as go
import traceback
import itertools
from sklearn import metrics
path = '/content/drive/MyDrive/HomeworkIV'
############### QUESTION 1 ###############
def convert_mp3_to_wav(audio:str) -> str:
"""Convert an input MP3 audio track into a WAV file.
Args:
audio (str): An input audio track.
Returns:
[str]: WAV filename.
"""
if audio[-3:] == "mp3":
wav_audio = audio[:-3] + "wav"
if not Path(wav_audio).exists():
subprocess.check_output(f"ffmpeg -i {audio} {wav_audio}", shell=True)
return wav_audio
return audio
def plot_spectrogram_and_picks(track:np.ndarray, sr:int, peaks:np.ndarray, onset_env:np.ndarray) -> None:
"""[summary]
Args:
track (np.ndarray): A track.
sr (int): Aampling rate.
peaks (np.ndarray): Indices of peaks in the track.
onset_env (np.ndarray): Vector containing the onset strength envelope.
"""
HOP_SIZE = 512
times = librosa.frames_to_time(np.arange(len(onset_env)),
sr=sr, hop_length=HOP_SIZE)
plt.figure()
ax = plt.subplot(2, 1, 2)
D = librosa.stft(track)
librosa.display.specshow(librosa.amplitude_to_db(np.abs(D), ref=np.max),
y_axis='log', x_axis='time')
plt.subplot(2, 1, 1, sharex=ax)
plt.plot(times, onset_env, alpha=0.8, label='Onset strength')
plt.vlines(times[peaks], 0,
onset_env.max(), color='r', alpha=0.8,
label='Selected peaks')
plt.legend(frameon=True, framealpha=0.8)
plt.axis('tight')
plt.tight_layout()
plt.show()
def load_audio_picks(audio, duration, hop_size):
"""[summary]
Args:
audio (string, int, pathlib.Path or file-like object): [description]
duration (int): [description]
hop_size (int):
Returns:
tuple: Returns the audio time series (track) and sampling rate (sr), a vector containing the onset strength envelope
(onset_env), and the indices of peaks in track (peaks).
"""
try:
track, sr = librosa.load(audio, duration=duration)
onset_env = librosa.onset.onset_strength(track, sr=sr, hop_length=hop_size)
peaks = librosa.util.peak_pick(onset_env, 10, 10, 10, 10, 0.5, 0.5)
except Exception as e:
print('An error occurred processing ', str(audio))
print(e)
return track, sr, onset_env, peaks
def preprocessing_converter(N_TRACKS, mp3_tracks):
for track in tqdm(mp3_tracks, total=N_TRACKS):
convert_mp3_to_wav(str(track))
def audio_signals(HOP_SIZE, DURATION, tracks):
for idx, audio in enumerate(tracks):
if idx >= 2:
break
track, sr, onset_env, peaks = load_audio_picks(audio, DURATION, HOP_SIZE)
plot_spectrogram_and_picks(track, sr, peaks, onset_env)
def collect_query(query_tracks):
query_list= []
for q in query_tracks:
query_list.append(str(q))
return query_list
def get_tracks_informations(tracks, DURATION, HOP_SIZE):
''' This function return title, peaks frequencies of all songs in our dataset'''
titles_tracks = []
peaks_freq = []
for audio in tqdm(tracks):
# find the titles of the songs
titles_tracks.append(re.search('\d+-(.+?).wav', audio).group(1).replace('_',' '))
# use the (given) function to get the peacks and their frequencies
_, _, onset_env, peaks = load_audio_picks(audio, DURATION, HOP_SIZE)
f = [ '%.1f' % elem for elem in onset_env[peaks]]
peaks_freq.append(list(map(float,f)))
return titles_tracks, peaks_freq
def load_json_list(path, file_output_name):
''' This function allows to load and read correctly a .json file which
contains a list of lists from a specific path. '''
f = open(path)
file_output_name = json.load(f)
return file_output_name
def load_json_dict(path, file_output_name):
''' This function allows to load and read correctly a .json file which
contains a dictionary from a specific path. '''
with open(path) as json_file:
file_output_name = json.load(json_file)
return file_output_name
def create_shingles(peaks_freqencies):
''' This function create a list of all unique values of 'peaks frequencies',
called shingles. The input peaks_freqencies must be a list of lists. '''
# define an empty array to append shingles
shingles = set()
# collect all the 'peak frequence' that appear in the input array
for sublist in peaks_freqencies:
for item in sublist:
shingles.add(round(item,1))
return np.array(list(shingles))
def create_characteristic_matrix(peaks_freqencies, shingles):
'''This function creates a matrix C that has all shingles values as rows and
all different songs as columns. Its generic value C_ij is equal to one if the j-th
song has the i-th 'peak frequency' among its 'peaks frequencies' and 0 otherwise.
The input peaks_freqencies must be a list of lists'''
# set the shape of the output matrix
n = len(shingles) # number of rows
m = len(peaks_freqencies) # number of columns
# define a matrix of zeros with the correct shape
characteristic_matrix = [ [ 0 for j in range(m) ] for i in range(n) ]
for i in range(n):
for j in range(m):
# set '1' if the j-th song has the i-th 'peak frequency' among its 'peaks frequencies'
if shingles[i] in np.array(peaks_freqencies[j]).round(1):
characteristic_matrix[i][j] = 1
return np.array(characteristic_matrix)
def update_signature_matrix(characteristic_matrix):
''' This function returns a list that corresponds to a row of the signature matrix
which has all hash_songs as columns.
Each row of signature matrix contains the value of row-index of the first value
equal to '1' in characteristic matrix that is given in input. '''
# create an empty array
perm_row = []
for j in range(len(characteristic_matrix[0])):
for i in range(len(characteristic_matrix)):
# serch the first value that is not equal to 0
if characteristic_matrix[i][j] == 1:
perm_row.append(i)
# for loop must stop when the first 1 has been found
break
return perm_row
def get_perm(M):
''' This function returns a permutation of a list with values from 0 to M'''
result = np.arange(M)
return np.random.permutation(result)
def signature_matrix(peaks_freqencies, shingles):
''' This function returns the signature matrix taking in input only the peaks' frequencies.
It calls the function 'create_characteristic_matrix' to compute characteristic matrix'''
characteristic_matrix = create_characteristic_matrix(peaks_freqencies, shingles)
signature_matrix = []
list_permutation = [] # list to memorize index-permutations
# set 20 as the number of purmutation --> number of rows of the signature matrix
for i in range(20):
# get permuted rows' indexes
permutation = get_perm(len(characteristic_matrix))
list_permutation.append(permutation)
# create the permuted characteristic matrix
perm_mat = characteristic_matrix[permutation,:]
# create the row of the signature matrix
perm_row = update_signature_matrix(perm_mat)
# fill the matrix
signature_matrix.append(perm_row)
return signature_matrix, list_permutation
def define_buckets(signature_matrix, r = 4):
''' This function takes in input a signature matrix of shape (n_permutations, n_songs)
and returns a dictionary which has as key a 'hash' value and as values all the songs that contain that 'hash'
Specifically we set (default) r = 2 that means that we are going to split the matrix in 5
bands that contain 4 rows each (b = 5, r = 4). '''
# create an empty dictionary
buckets = dict()
for col_idx, col in enumerate(np.transpose(signature_matrix)):
# each bucket has 4 row (r = 4). Thus
for i in range(0, len(col), r):
hash = tuple(col[i:i+r])
# fill the dictionary
if hash in buckets:
buckets[hash].add(str(col_idx))
else:
buckets[hash] = {str(col_idx)}
return(buckets)
def get_matches(DURATION, HOP_SIZE, titles_tracks, peaks_freq, queries, r = 4):
''' This function takes in input the queries (array), an array of songs' titles and a list of songs' peak-frequencies.
It computes the signature matrix for the tracks and creates a dictionary which map for each bucket a list of song
(that are mapped in that bucket) - from the signature matrix.
Then each query is compared only with the songs that are mapped in the same buckets of the query. The 'best match' of a query is
the song that compares with the major frequency among them.'''
# call previous functions to get all objects to compute matching
#titles_tracks, peaks_freq = get_tracks_informations(tracks)
shingles = create_shingles(peaks_freq)
signature_mat, permutations = signature_matrix(peaks_freq, shingles)
bucket_dict = define_buckets(signature_mat, r)
matching_dict = dict()
titles_query = []
for query in queries:
# find the 'title' of the query
title_query = str(query).split('/')[-1].split('.')[0]
_, _, onset_env_query, peaks_query = load_audio_picks(query, DURATION, HOP_SIZE)
f = [ '%.1f' % elem for elem in onset_env_query[peaks_query]]
peaks_freq_query = [list(map(float,f))]
bucket_query = []
characteristic_matrix_query = create_characteristic_matrix(peaks_freq_query,shingles)
# Get the signature matrix (vector) for a single query with the
# same permutations of the songs' signature matrix
for perm in permutations:
car_q = characteristic_matrix_query[perm]
bucket_query.append(update_signature_matrix(car_q))
# covert bucket_query (list of lists) into array
b = np.matrix(bucket_query)
bucket_query = list(np.array(b).reshape(-1,))
# create a set of buckets to which that query has been mapped
buckets = set()
for i in range(0, len(bucket_query), r):
buckets.add(tuple(bucket_query[i:i+r]))
# serch the indexes of the songs' tracks that have been mapped in the same buckets of thet query
comparing_tracks_index = []
for b in buckets:
if b in bucket_dict:
comparing_buckets = bucket_dict[b]
for track in comparing_buckets:
comparing_tracks_index.append(track)
# compute the frequencies of the songs that are mapped in the same buckets of the query
matching_tracks = []
scores = []
unique, counts = np.unique(comparing_tracks_index, return_counts=True)
frequecy_song = dict(zip(unique, counts))
matching = max(frequecy_song, key=frequecy_song.get)
matching_dict[title_query] = titles_tracks[int(matching)]
return matching_dict
def get_tracks_list(tracks):
tracks_list = []
for i in tracks:
tracks_list.append(str(i))
return tracks_list
def save_peaks_freq(peaks_freq):
with open(path+'/peaks_freq.json', 'w') as f:
json.dump(peaks_freq, f)
def save_titles_tracks(titles_tracks):
with open(path+'/titles_tracks.json', 'w') as f:
json.dump(titles_tracks, f)
def save_matching_dict(matching_dict):
with open(path+'/matching_dict.json', 'w') as f:
json.dump(matching_dict, f)
def print_matching(input_track, matches_dict):
track = list(matches_dict.keys())[int(input_track)-1]
fig = go.Figure(data=[go.Table(
columnwidth = 80,
header=dict(values=['MATCHING SONG'],
line_color='silver',
fill_color='silver',
align='center',
font=dict(color='snow', size=12),
height=30),
cells=dict(values=['<b>{}<b>'.format(matches_dict[track])],
line_color='white',
fill = dict(color='dodgerblue'),
font=dict(color='snow', size=24),
height=50)
)])
fig.update_layout(width=1000, height=300)
fig.show()
def handle_q_1():
N_TRACKS = 1413
HOP_SIZE = 512
DURATION = 30 # TODO: to be tuned!
THRESHOLD = 0 # TODO: to be tuned!
data_folder = Path(path + "/mp3s-32k/")
mp3_tracks = data_folder.glob("*/*/*.mp3")
tracks = data_folder.glob("*/*/*.wav")
preprocessing_converter(N_TRACKS, mp3_tracks)
tracks_list = get_tracks_list(tracks)
data_folder_query = Path(path + "/query/")
query_tracks = data_folder_query.glob("*.wav")
query_list = collect_query(query_tracks)
titles_tracks, peaks_freq = get_tracks_informations(tracks_list, DURATION, HOP_SIZE)
save_peaks_freq(peaks_freq)
save_titles_tracks(titles_tracks)
matching_dict = get_matches(DURATION, HOP_SIZE, titles_tracks,peaks_freq, query_list, r = 4)
save_matching_dict(matching_dict)
matches = dict()
matches = load_json_dict(path+'/matching_dict.json', matches)
return matches
############### QUESTION 2 ###############
def remove_NaN_values(dataset):
''' Given a dataset, this function fills NaN values with 0 '''
for col in dataset.columns:
if dataset[col].isnull().any() == True:
if is_numeric_dtype(dataset[col]) == True: #case of numeric column
dataset[col] = dataset[col].fillna(dataset[col].mean())
elif is_string_dtype(dataset[col]) == True: # case of string column
dataset[col] = dataset[col] .fillna("")
def remove_object_col(dataset):
''' This function removes object type columns from the given dataset and returns a cleaned one '''
dataset_cleaned = dataset.select_dtypes(exclude = 'object')
return dataset_cleaned
def scaling_features(dataset):
''' This function returna a dataset with all scaled features.
Each feature is obtained by subtracting the mean and dividing by the standard deviation. '''
scaler = preprocessing.StandardScaler()
# We are not going to consider for the scale the track_id column which is the first one
dataset_features = pd.DataFrame(scaler.fit_transform(dataset[dataset.columns[1:]].values), columns = dataset.columns[1:])
# Add again track_id
final_df = pd.concat([dataset[dataset.columns[:1]], dataset_features], axis = 1)
return final_df
def number_of_components(pca, ratio = 0.755):
''' This function returns the number of components that explain about 75% (default) of the total variance'''
n_components = len(pca.explained_variance_ratio_)
n = len([np.cumsum(pca.explained_variance_ratio_)[i] for i in range(n_components) if np.cumsum(pca.explained_variance_ratio_)[i] <= ratio])
return(n)
def kmeans_scratch(k, ds):
'''We pass the k number of clusters and a dataset ds, the function will return the k-means from scratch'''
if isinstance(ds, pd.DataFrame):
values = ds.values # Creating a values array with the values inside ds
else:
values = ds
m = values.shape[1] # Columns
n = values.shape[0] # Rows
centroids = values[rd.choice(n, size = k, replace=False)] # Generate random the vector of centroids (initialization step)
prior_cr = np.zeros((n,k)) # Array of zeros to store during the i-th iteration the centroids of the (i-1)-th iteration
it = 0 #number of iterations
# Loop that will stop if the centroids won't change or the iterations will be at maximum equal to 10
while it != 10 or np.array_equal(centroids, prior_cr) == False:
prior_cr = centroids # Centroids for the next iteration
d = np.zeros((n,k)) # Euclidean distance matrix
cs = defaultdict(list) # Clusters collected into a dict
clusters = [] # Identified cluster
# Euclidean distances from each point to each centroid
for i in range(n):
for j in range(k):
d[i][j] += linalg.norm(values[i]-centroids[j])
''' Assign to each element of the dataset a cluster such that its distance from
that cluster is minimized. '''
clusters.append(np.where(d[i] == min(d[i]))[0][0]+1)
# Dictionary that maps each cluster to the observations that belong to it
cs[clusters[i]].append(i)
for a in range(k):
for j in range(m):
my_values = []
for i in cs[a+1]: # Clusters' labels are 1, 2, ..., K
# Take the values of the observation belonging to the i-th cluster
my_values.append(values[i][j])
# New centroids for each cluster
centroids[a][j] = np.mean(my_values)
it += 1
return clusters, d, cs
def Elbow_scratch(data, K):
''' This function returns a plot with a curve that describes how the
withness-variance vary with different number of cluster.
It is needed to decide the better number of cluster with the elbow method. '''
distortions = []
for n_cluster in tqdm(K):
# Take the distance matrix from kmeans function
clusters, distance_matrix, cs = kmeans_scratch(n_cluster, data)
# compute the squeared-sum of distances of each data-point from the correspondent centroid
distortions.append((sum(distance_matrix.min(axis=1)))**2)
# plot the curve
plt.figure(figsize=(12,5))
plt.xlabel("Numbers of clusters")
plt.ylabel("Distortions")
plt.title('Elbow Curve')
plt.plot(K,distortions, linestyle='-', marker='o', color='tomato', lw = 3, alpha = .7, markersize = 8 )
plt.show()
def gap_stat(data, k):
''' This function returns a plot with a curve that describes how the
gap-statistic vary with different number of cluster.
It can be used to decide the better number of cluster. '''
# Compute a matrix of Uniform data samples of the same shape of our input data
randomReference = np.random.random_sample(size=data.shape)
cost_r = []
gaps = []
s_k = []
for n_cluster in tqdm(range(2,k)):
# compute k-means and get only the distance metrix and the disctionary
_, distance_matrix_random, _ = kmeans_scratch(n_cluster, randomReference) # for the random data-matrix
_, distance_matrix, _ = kmeans_scratch(n_cluster, data) # for our data
# compute the squeared-sum of distances of each data-point from the correspondent centroid
costo = sum(distance_matrix.min(axis=1)**2) # for the random data-matrix
costr = sum(distance_matrix_random.min(axis=1)**2) # for our data
cost_r.append(costr)
# compute the gap statistic
gap = np.log(np.mean(cost_r)) - np.log(costo)
# keep the value of gap statistic for each value of k
gaps.append(gap)
# compute the standard deviation (of the random part)
s_k.append(np.std(np.log(np.mean(cost_r)))*np.sqrt(1+(1/len(cost_r))))
# plot the curve
plt.figure(figsize=(12,5))
plt.plot(list(range(2,k)), gaps, linestyle='-', marker='o', color='lightseagreen', lw = 3, alpha = .7, markersize = 6)
plt.xlabel("Numbers of clusters")
plt.ylabel("Gap Stistics")
plt.title('Gap Stistic Curve')
# Get the better value of k according to gap statistics
# By definition k_star is the min {k | G[k] >= G[k+1] - s_k[k+1]
k_ = []
for z in range(0, k-3):
if (gaps[z] >= gaps[z+1] - s_k[z+1]):
k_.append(z)
k_star = min(k_)
plt.plot(k_star+2, gaps[k_star], marker='o',markerfacecolor="darkcyan", markersize = 10)
plt.show()
return(k_star+2)
def Pivot(echonest, tracks, merged_df, clusters):
pivot = pd.DataFrame() #create a new dataFrame
a = []
#
for i in range(1,5):
pivot.insert(i-1, echonest.columns[i], pd.cut(echonest[echonest.columns[i]], bins = 4,labels=["1", "2", "3","4"])) # pd.cut() function is used to separate the array elements into different bins
#pd.cut will choose the bins to be evenly spaced according to the values themselves
pivot["track_genre_top"] = tracks.track_genre_top
pivot.insert(0,"track_duration", pd.qcut(merged_df[merged_df.columns[7]], q = 4,labels=["1", "2", "3","4"])) #pd.qcut() tries to divide up the underlying data into equal sized bins.
# aggregate categories of the feature track_language_code: we are going to have only four classes (english, french, spanish anh other)
for lang in range(len(tracks.track_language_code)):
if tracks.track_language_code[lang] not in ['en','es','fr','']:
tracks.track_language_code[lang] = 'oth'
pivot["track_language"] = tracks.track_language_code
pivot.insert(0, "Clusters", clusters)
return pivot
def PivotDuration(pivot):
#duration
t = np.zeros((4,6))
l = []
for j in range(0,4):
for i in range(0,6):
# I subdivide the values of the pivot table by the number in the track_duration column and by the number of the cluster witch rappresent that value
t[j][i] = ((pivot[(pivot.track_duration == str(j+1)) & (pivot.Clusters == i+1)].count()[0]))
for i in range(t.shape[1]):
t[:,i] = np.around((t[:,i]/sum(t)[i])*100,2) #I transform values into percentages
l = []
for i in range(1,7):
l.append("Cluster "+str(i))
track_duration_pivot = pd.DataFrame(t)
track_duration_pivot.columns = l
track_duration_pivot.loc[4] = sum(t)
track_duration_pivot = track_duration_pivot.rename(index={0: 'Low',1: 'Medium-Low',2: 'Medium-High', 3: 'High',4: 'Tot'})
return track_duration_pivot
def pivotAcousticness(pivot):
#acousticness
t = np.zeros((4,6))
l = []
for j in range(0,4):
for i in range(0,6):
# I subdivide the values of the pivot table by the number in the audio_features_acousticness column and by the number of the cluster witch rappresent that value
t[j][i] = ((pivot[(pivot.audio_features_acousticness == str(j+1)) & (pivot.Clusters == i+1)].count()[0]))
for i in range(t.shape[1]):
t[:,i] = np.around((t[:,i]/sum(t)[i])*100,2)
l = []
for i in range(1,7):
l.append("Cluster "+str(i))
audio_features_acousticness_pivot = pd.DataFrame(t)
audio_features_acousticness_pivot.columns = l
audio_features_acousticness_pivot.loc[4] = sum(t)
audio_features_acousticness_pivot = audio_features_acousticness_pivot.rename(index={0: 'Low',1: 'Medium-Low',2: 'Medium-High', 3: 'High',4: 'Tot'})
return audio_features_acousticness_pivot
def pivotDanceability(pivot):
#danceability
t = np.zeros((4,6))
l = []
for j in range(0,4):
for i in range(0,6):
# I subdivide the values of the pivot table by the number in the audio_features_danceability column and by the number of the cluster witch rappresent that value
t[j][i] = ((pivot[(pivot.audio_features_danceability == str(j+1)) & (pivot.Clusters == i+1)].count()[0]))
for i in range(t.shape[1]):
t[:,i] = np.around((t[:,i]/sum(t)[i])*100,2)
l = []
for i in range(1,7):
l.append("Cluster "+str(i))
audio_features_danceability_pivot = pd.DataFrame(t)
audio_features_danceability_pivot.columns = l
audio_features_danceability_pivot.loc[4] = sum(t)
audio_features_danceability_pivot = audio_features_danceability_pivot.rename(index={0: 'Low',1: 'Medium-Low',2: 'Medium-High', 3: 'High',4: 'Tot'})
return audio_features_danceability_pivot
def pivotEnergy(pivot):
#energy
t = np.zeros((4,6))
l = []
for j in range(0,4):
for i in range(0,6):
# I subdivide the values of the pivot table by the number in the audio_features_energy column and by the number of the cluster witch rappresent that value
t[j][i] = ((pivot[(pivot.audio_features_energy == str(j+1)) & (pivot.Clusters == i+1)].count()[0]))
for i in range(t.shape[1]):
t[:,i] = np.around((t[:,i]/sum(t)[i])*100,2)
l = []
for i in range(1,7):
l.append("Cluster "+str(i))
audio_features_energy_pivot = pd.DataFrame(t)
audio_features_energy_pivot.columns = l
audio_features_energy_pivot.loc[4] = sum(t)
audio_features_energy_pivot = audio_features_energy_pivot.rename(index={0: 'Low',1: 'Medium-Low',2: 'Medium-High', 3: 'High',4: 'Tot'})
return audio_features_energy_pivot
def pivotInstrumentalness (pivot):
#instrumentalness
t = np.zeros((4,6))
l = []
for j in range(0,4):
for i in range(0,6):
# I subdivide the values of the pivot table by the number in the audio_features_instrumentalness column and by the number of the cluster witch rappresent that value
t[j][i] = ((pivot[(pivot.audio_features_instrumentalness == str(j+1)) & (pivot.Clusters == i+1)].count()[0]))
for i in range(t.shape[1]):
t[:,i] = np.around((t[:,i]/sum(t)[i])*100,2)
l = []
for i in range(1,7):
l.append("Cluster "+str(i))
audio_features_instrumentalness_pivot = pd.DataFrame(t)
audio_features_instrumentalness_pivot.columns = l
audio_features_instrumentalness_pivot.loc[4] = sum(t)
audio_features_instrumentalness_pivot = audio_features_instrumentalness_pivot.rename(index={0: 'Low',1: 'Medium-Low',2: 'Medium-High', 3: 'High',4: 'Tot'})
return audio_features_instrumentalness_pivot
def pivotLanguage(pivot):
#language
t = np.zeros((len(pivot.track_language.unique()),6))
l = []
c = 0
for j in pivot.track_language.unique():
for i in range(0,6):
t[c][i] = ((pivot[(pivot.track_language == j) & (pivot.Clusters == i+1)].count()[0]))
c += 1
for i in range(t.shape[1]):
t[:,i] = np.around((t[:,i]/sum(t)[i])*100,2)
l = []
for i in range(1,7):
l.append("Cluster "+str(i))
track_language_pivot = pd.DataFrame(t[:4])
track_language_pivot.columns = l
track_language_pivot.loc[len(pivot.track_language.unique())] = sum(t)
track_language_pivot = track_language_pivot.rename(index={0: 'English', 1: 'Spanish', 2: 'Other', 3: 'French', 5: 'Total'})
return track_language_pivot
def pivotGenre(pivot):
#genre
t = np.zeros((len(pivot.track_genre_top.unique()),6))
l = []
c = 0
for j in pivot.track_genre_top.unique():
for i in range(0,6):
t[c][i] = ((pivot[(pivot.track_genre_top == j) & (pivot.Clusters == i+1)].count()[0]))
c += 1
for i in range(t.shape[1]):
t[:,i] = np.around((t[:,i]/sum(t)[i])*100,2)
l = []
for i in range(1,7):
l.append("Cluster "+str(i))
track_genre_top_pivot = pd.DataFrame(t)
track_genre_top_pivot = track_genre_top_pivot.drop(2)
track_genre_top_pivot.columns = l
track_genre_top_pivot.loc[len(pivot.track_genre_top.unique())+1] = sum(t)
track_genre_top_pivot = track_genre_top_pivot.rename(index={0: 'Hip-Hop', 1: 'Pop', 3:'Rock', 4: 'Experimental', 5: 'Folk',
6:'Jazz',7: 'Electronic', 8:'Spoken',9:'International', 10:'Soul-RnB',
11: 'BluesCountry', 12:'Classical', 13: 'Old-Time / Historic',
14:'Instrumental', 15:'Easy', 16: 'Listening', 18: 'Tot'})
return track_genre_top_pivot
def SpecialPivot(dataTracksFeatures, cluster):
pivot = pd.DataFrame()
a = []
pivot["track_genre_top_x"] = dataTracksFeatures.track_genre_top_x
pivot.insert(0,"track_duration_x", pd.qcut(dataTracksFeatures[dataTracksFeatures.columns[5]], q = 3,labels=["1", "2", "3"]))
pivot.insert(0, 'track_bit_rate', pd.qcut(dataTracksFeatures[dataTracksFeatures.columns[559]], q = 3,labels=["1", "2", "3"]))
pivot["track_language_code_y"] = dataTracksFeatures.track_language_code_y
pivot["track_location_x"] = dataTracksFeatures.artist_location_x
pivot.insert(0, "Clusters", cluster)
return pivot
def SPecialPivotBitrate(pivot):
#instrumentalness
t = np.zeros((4,6))
l = []
for j in range(0,4):
for i in range(0,6):
# I subdivide the values of the pivot table by the number in the audio_features_instrumentalness column and by the number of the cluster witch rappresent that value
t[j][i] = ((pivot[(pivot.track_bit_rate == str(j+1)) & (pivot.Clusters == i+1)].count()[0]))
for i in range(t.shape[1]):
t[:,i] = np.around((t[:,i]/sum(t)[i])*100,2)
l = []
for i in range(1,7):
l.append("Cluster "+str(i))
special_bitrate = pd.DataFrame(t)
special_bitrate.columns = l
special_bitrate.loc[4] = sum(t)
special_bitrate = special_bitrate.rename(index={0: 'Low',1: 'Medium-Low',2: 'Medium-High', 3: 'High',4: 'Tot'})
return special_bitrate
def SPecialPivotDuration(pivot):
#instrumentalness
t = np.zeros((4,6))
l = []
for j in range(0,4):
for i in range(0,6):
# I subdivide the values of the pivot table by the number in the audio_features_instrumentalness column and by the number of the cluster witch rappresent that value
t[j][i] = ((pivot[(pivot.track_duration_x == str(j+1)) & (pivot.Clusters == i+1)].count()[0]))
for i in range(t.shape[1]):
t[:,i] = np.around((t[:,i]/sum(t)[i])*100,2)
l = []
for i in range(1,7):
l.append("Cluster "+str(i))
special_duration = pd.DataFrame(t)
special_duration.columns = l
special_duration.loc[4] = sum(t)
special_duration = special_duration.rename(index={0: 'Low',1: 'Medium-Low',2: 'Medium-High', 3: 'High',4: 'Tot'})
return special_duration
def SpecialPivotGenre(pivot):
#genre
t = np.zeros((len(pivot.track_genre_top_x.unique()),6))
l = []
c = 0
for j in pivot.track_genre_top.unique():
for i in range(0,6):
t[c][i] = ((pivot[(pivot.track_genre_top_x == j) & (pivot.Clusters == i+1)].count()[0]))
c += 1
for i in range(t.shape[1]):
t[:,i] = np.around((t[:,i]/sum(t)[i])*100,2)
l = []
for i in range(1,7):
l.append("Cluster "+str(i))
track_genre_top_pivot = pd.DataFrame(t)
track_genre_top_pivot = track_genre_top_pivot.drop(2)
track_genre_top_pivot.columns = l
track_genre_top_pivot.loc[len(pivot.track_genre_top.unique())+1] = sum(t)
track_genre_top_pivot = track_genre_top_pivot.rename(index={0: 'Hip-Hop', 1: 'Pop', 3:'Rock', 4: 'Experimental', 5: 'Folk',
6:'Jazz',7: 'Electronic', 8:'Spoken',9:'International', 10:'Soul-RnB',
11: 'BluesCountry', 12:'Classical', 13: 'Old-Time / Historic',
14:'Instrumental', 15:'Easy', 16: 'Listening', 18: 'Tot'})
return track_genre_top_pivot
def SpecialPivotLanguage(pivot):
#language
t = np.zeros((len(pivot.track_language_code_y.unique()),6))
l = []
c = 0
for j in pivot.track_language_code_y.unique():
for i in range(0,6):
t[c][i] = ((pivot[(pivot.track_language_code_y == j) & (pivot.Clusters == i+1)].count()[0]))
c += 1
for i in range(t.shape[1]):
t[:,i] = np.around((t[:,i]/sum(t)[i])*100,2)
l = []
for i in range(1,7):
l.append("Cluster "+str(i))
track_language_pivot = pd.DataFrame(t[:4])
track_language_pivot.columns = l
track_language_pivot.loc[len(pivot.track_language_code_y.unique())] = sum(t)
track_language_pivot = track_language_pivot.rename(index={0: 'English', 1: 'Spanish', 2: 'Other', 3: 'French', 5: 'Total'})
return track_language_pivot
def SpecialPivotLocation(pivot):
#language
t = np.zeros((len(pivot.track_location_x.unique()),6))
l = []
c = 0
for j in pivot.track_location_x.unique():
for i in range(0,6):
t[c][i] = ((pivot[(pivot.track_location_x == j) & (pivot.Clusters == i+1)].count()[0]))
c += 1
for i in range(t.shape[1]):
t[:,i] = np.around((t[:,i]/sum(t)[i])*100,2)
l = []
for i in range(1,7):
l.append("Cluster "+str(i))
location_special = pd.DataFrame(t[:4])
location_special.columns = l
location_special.loc[len(pivot.track_location_x.unique())] = sum(t)
location_special = location_special.rename(index={0: 'Brooklyn, NY', 1: 'France', 2: 'Other', 3: 'New York, NY', 5: 'Total'})
return location_special
############### QUESTION 3 ###############
def find_values_equal_s(A, s):
myPairs = []
for pair in itertools.combinations(A,2): # make all possibile combination
if (not pair in myPairs) and (not tuple(reversed(pair)) in myPairs) and (s == pair[0] + pair[1]): #check if (x,y) and (y,x) are not on my list
myPairs.append(pair)
if len(myPairs) > 0:
print(myPairs)
else:
print("There isn't any pairs whose sum is equal to", s)
|
{"hexsha": "10940bf2a46497aa043bd8de25f05cb06302856a", "size": 34576, "ext": "py", "lang": "Python", "max_stars_repo_path": "functions.py", "max_stars_repo_name": "benedettacandelori/ADM4_group12", "max_stars_repo_head_hexsha": "95a3efe27ec481e1d28a96daef30fd52a9e1419d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "functions.py", "max_issues_repo_name": "benedettacandelori/ADM4_group12", "max_issues_repo_head_hexsha": "95a3efe27ec481e1d28a96daef30fd52a9e1419d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "functions.py", "max_forks_repo_name": "benedettacandelori/ADM4_group12", "max_forks_repo_head_hexsha": "95a3efe27ec481e1d28a96daef30fd52a9e1419d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2046511628, "max_line_length": 200, "alphanum_fraction": 0.6359613605, "include": true, "reason": "import numpy,from numpy,import scipy,from scipy", "num_tokens": 8806}
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from torch.utils.cpp_extension import BuildExtension
import numpy
# Get the numpy include directory.
numpy_include_dir = numpy.get_include()
# Extensions
# pykdtree (kd tree)
pykdtree = Extension(
'im2mesh.utils.libkdtree.pykdtree.kdtree',
sources=[
'im2mesh/utils/libkdtree/pykdtree/kdtree.c',
'im2mesh/utils/libkdtree/pykdtree/_kdtree_core.c'
],
language='c',
extra_compile_args=['-std=c99', '-O3', '-fopenmp'],
extra_link_args=['-lgomp'],
)
# mcubes (marching cubes algorithm)
mcubes_module = Extension(
'im2mesh.utils.libmcubes.mcubes',
sources=[
'im2mesh/utils/libmcubes/mcubes.pyx',
'im2mesh/utils/libmcubes/pywrapper.cpp',
'im2mesh/utils/libmcubes/marchingcubes.cpp'
],
language='c++',
extra_compile_args=['-std=c++11'],
include_dirs=[numpy_include_dir]
)
# triangle hash (efficient mesh intersection)
triangle_hash_module = Extension(
'im2mesh.utils.libmesh.triangle_hash',
sources=[
'im2mesh/utils/libmesh/triangle_hash.pyx'
],
libraries=['m'] # Unix-like specific
)
# mise (efficient mesh extraction)
mise_module = Extension(
'im2mesh.utils.libmise.mise',
sources=[
'im2mesh/utils/libmise/mise.pyx'
],
)
# simplify (efficient mesh simplification)
simplify_mesh_module = Extension(
'im2mesh.utils.libsimplify.simplify_mesh',
sources=[
'im2mesh/utils/libsimplify/simplify_mesh.pyx'
]
)
# voxelization (efficient mesh voxelization)
voxelize_module = Extension(
'im2mesh.utils.libvoxelize.voxelize',
sources=[
'im2mesh/utils/libvoxelize/voxelize.pyx'
],
libraries=['m'] # Unix-like specific
)
# Gather all extension modules
ext_modules = [
pykdtree,
mcubes_module,
triangle_hash_module,
mise_module,
simplify_mesh_module,
]
setup(
ext_modules=cythonize(ext_modules),
cmdclass={
'build_ext': BuildExtension
}
)
|
{"hexsha": "d9d21e37c3c740161997946751fd6b3246548c52", "size": 2116, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "ray8828/occupancy_flow", "max_stars_repo_head_hexsha": "09c172262bb151895d450eb323e2383a5c88841c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 146, "max_stars_repo_stars_event_min_datetime": "2019-10-14T14:55:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T09:07:15.000Z", "max_issues_repo_path": "setup.py", "max_issues_repo_name": "ray8828/occupancy_flow", "max_issues_repo_head_hexsha": "09c172262bb151895d450eb323e2383a5c88841c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-03-23T09:56:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-04T05:43:20.000Z", "max_forks_repo_path": "setup.py", "max_forks_repo_name": "ray8828/occupancy_flow", "max_forks_repo_head_hexsha": "09c172262bb151895d450eb323e2383a5c88841c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2019-11-03T12:26:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-14T06:39:30.000Z", "avg_line_length": 23.7752808989, "max_line_length": 57, "alphanum_fraction": 0.6975425331, "include": true, "reason": "import numpy", "num_tokens": 548}
|
### CONCRETE TYPE: DIRECT PROX EVALUATION
# prox! is computed using a Cholesky factorization of A'A + I/(lambda*gamma)
# or AA' + I/(lambda*gamma), according to which matrix is smaller.
# The factorization is cached and recomputed whenever gamma changes
using LinearAlgebra
using SparseArrays
using SuiteSparse
mutable struct LeastSquaresDirect{R <: Real, C <: RealOrComplex{R}, M <: AbstractMatrix{C}, V <: AbstractVector{C}, F <: Factorization} <: LeastSquares
A::M # m-by-n matrix
b::V
Atb::V
lambda::R
gamma::R
shape::Symbol
S::M
res::Vector{C} # m-sized buffer
q::Vector{C} # n-sized buffer
fact::F
function LeastSquaresDirect{R, C, M, V, F}(A::M, b::V, lambda::R) where {R <: Real, C <: RealOrComplex{R}, M <: AbstractMatrix{C}, V <: AbstractVector{C}, F <: Factorization}
if size(A, 1) != length(b)
error("A and b have incompatible dimensions")
end
if lambda <= 0
error("lambda must be positive")
end
m, n = size(A)
if m >= n
S = A'*A
shape = :Tall
else
S = A*A'
shape = :Fat
end
new(A, b, A'*b, lambda, -1, shape, S, zeros(C, m), zeros(C, n))
end
end
function LeastSquaresDirect(A::M, b::V, lambda::R) where {R <: Real, C <: Union{R, Complex{R}}, M <: DenseMatrix{C}, V <: AbstractVector{C}}
LeastSquaresDirect{R, C, M, V, Cholesky{C, M}}(A, b, lambda)
end
function LeastSquaresDirect(A::M, b::V, lambda::R) where {R <: Real, C <: Union{R, Complex{R}}, I <: Integer, M <: SparseMatrixCSC{C, I}, V <: AbstractVector{C}}
LeastSquaresDirect{R, C, M, V, SuiteSparse.CHOLMOD.Factor{C}}(A, b, lambda)
end
# Adjoint/Transpose versions
function LeastSquaresDirect(A::M, b::V, lambda::R) where {R <: Real, C <: Union{R, Complex{R}}, M <: TransposeOrAdjoint{<:DenseMatrix{C}}, V <: AbstractVector{C}}
LeastSquaresDirect(copy(A), b, lambda)
end
function LeastSquaresDirect(A::M, b::V, lambda::R) where {R <: Real, C <: Union{R, Complex{R}}, I <: Integer, M <: TransposeOrAdjoint{<:SparseMatrixCSC{C, I}}, V <: AbstractVector{C}}
LeastSquaresDirect(copy(A), b, lambda)
end
function LeastSquaresDirect(A::M, b::V, lambda::R) where {R <: Real, C <: Union{R, Complex{R}}, M <: AbstractMatrix{C}, V <: AbstractVector{C}}
@warn "Could not infer type of Factorization for $M in LeastSquaresDirect, this type will be type-unstable"
LeastSquaresDirect{R, C, M, V, Factorization}(A, b, lambda)
end
function (f::LeastSquaresDirect)(x::AbstractVector)
mul!(f.res, f.A, x)
f.res .-= f.b
return (f.lambda/2)*norm(f.res, 2)^2
end
function prox!(y::AbstractVector{C}, f::LeastSquaresDirect{R, C, M, V, F}, x::AbstractVector{C}, gamma::R=R(1)) where {R, C, M, V, F}
# if gamma different from f.gamma then call factor_step!
if gamma != f.gamma
factor_step!(f, gamma)
end
solve_step!(y, f, x, gamma)
mul!(f.res, f.A, y)
f.res .-= f.b
return (f.lambda/2)*norm(f.res, 2)^2
end
function factor_step!(f::LeastSquaresDirect{R, C, M, V, F}, gamma::R) where {R, C, M, V, F}
lamgam = f.lambda*gamma
f.fact = cholesky(f.S + I/lamgam)
f.gamma = gamma
end
function factor_step!(f::LeastSquaresDirect{R, C, M, V, F}, gamma::R) where {R, C, M <: SparseMatrixCSC, V, F}
lamgam = f.lambda*gamma
f.fact = ldlt(f.S; shift = R(1)/lamgam)
f.gamma = gamma
end
function solve_step!(y::AbstractVector{C}, f::LeastSquaresDirect{R, C, M, V, F}, x::AbstractVector{C}, gamma::R) where {R, C, M, V, F <: Cholesky{C, M}}
lamgam = f.lambda*gamma
f.q .= f.Atb .+ x./lamgam
# two cases: (1) tall A, (2) fat A
if f.shape == :Tall
# y .= f.fact\f.q
y .= f.q
LAPACK.trtrs!('U', 'C', 'N', f.fact.factors, y)
LAPACK.trtrs!('U', 'N', 'N', f.fact.factors, y)
else # f.shape == :Fat
# y .= lamgam*(f.q - (f.A'*(f.fact\(f.A*f.q))))
mul!(f.res, f.A, f.q)
LAPACK.trtrs!('U', 'C', 'N', f.fact.factors, f.res)
LAPACK.trtrs!('U', 'N', 'N', f.fact.factors, f.res)
mul!(y, adjoint(f.A), f.res)
y .-= f.q
y .*= -lamgam
end
end
function solve_step!(y::AbstractVector{C}, f::LeastSquaresDirect{R, C, M, V, F}, x::AbstractVector{C}, gamma::R) where {R, C, M, V, F}
lamgam = f.lambda*gamma
f.q .= f.Atb .+ x./lamgam
# two cases: (1) tall A, (2) fat A
if f.shape == :Tall
y .= f.fact\f.q
else # f.shape == :Fat
# y .= lamgam*(f.q - (f.A'*(f.fact\(f.A*f.q))))
mul!(f.res, f.A, f.q)
f.res .= f.fact\f.res
mul!(y, adjoint(f.A), f.res)
y .-= f.q
y .*= -lamgam
end
end
function gradient!(y::AbstractVector{C}, f::LeastSquaresDirect{R, C, M, V, F}, x::AbstractVector{C}) where {R, C, M, V, F}
mul!(f.res, f.A, x)
f.res .-= f.b
mul!(y, adjoint(f.A), f.res)
y .*= f.lambda
fy = (f.lambda/2)*dot(f.res, f.res)
end
function prox_naive(f::LeastSquaresDirect{R, C}, x::AbstractVector{C}, gamma::R=R(1)) where {R, C <: RealOrComplex{R}}
lamgam = f.lambda*gamma
y = (f.A'*f.A + I/lamgam)\(f.Atb + x/lamgam)
fy = (f.lambda/2)*norm(f.A*y-f.b)^2
return y, fy
end
|
{"hexsha": "a2b3638949dc4d289bcdbf8183d06e1eff7703b8", "size": 5202, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/functions/leastSquaresDirect.jl", "max_stars_repo_name": "UnofficialJuliaMirror/ProximalOperators.jl-a725b495-10eb-56fe-b38b-717eba820537", "max_stars_repo_head_hexsha": "0e77f72cae83cceb27543a7a91af0762fc5f1d88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/functions/leastSquaresDirect.jl", "max_issues_repo_name": "UnofficialJuliaMirror/ProximalOperators.jl-a725b495-10eb-56fe-b38b-717eba820537", "max_issues_repo_head_hexsha": "0e77f72cae83cceb27543a7a91af0762fc5f1d88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/functions/leastSquaresDirect.jl", "max_forks_repo_name": "UnofficialJuliaMirror/ProximalOperators.jl-a725b495-10eb-56fe-b38b-717eba820537", "max_forks_repo_head_hexsha": "0e77f72cae83cceb27543a7a91af0762fc5f1d88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8936170213, "max_line_length": 183, "alphanum_fraction": 0.5849673203, "num_tokens": 1828}
|
import numpy as np
import matplotlib.pyplot as plt
'''
Equations taken from:
Rahvar, S., Mehrabi, A., & Dominik, M. 2011, MNRAS, 410, 912
'''
# speed of light, c, (au/day)
# orbital radius, a, (au)
# mass of source star, m_star, mass of lens, M (solal masses)
# inclination angle with respect to observer-lens line of sight, φ (radians)
# the time of maximum brightness due to lensing, set at 0 hours
# the Einstein Crossing time, the time taken for the source star to cross the black hole's Einstein Radius, t_e (hours)
# the smallest angular seperation between the source star and the black hole, u_0 (unitless)
# the angular seperation between the source star and the black hole in terms of t, u_t (unitless)
# the universal gravitational constant, G (au^3 * day^-2 * solar mass^-1)
# the Schwarzchild Radius, the radius of the even horizon of a black hole, R_S (au)
# required inputs: set of time values (t_vals, 100000 values), t_0 (set to 0), φ, M, m_star, a
def rahvar_main(t, t_0, φ, M, m_star, a):
# constants
c = 173.145 # (au/day)
G = 0.000295913010 # au * 1/solar mass * (au/day)^2
# equation 2
R_S = (2 * M * G)/(c ** 2) # au
# equation 17
t_E = ((2 * a)/c) * (np.sqrt(M/(m_star + M))) * 1/24
# equation 19
u_0 = np.sqrt(φ * (a / ((2 * R_S))))
# equation 2
u_t = np.sqrt((u_0 ** 2) + (((t - t_0)/t_E) ** 2))
# equaiton 14
Amplitude = (2/np.pi) * ((1 + 1/(u_t ** 2)) * np.arcsin(1/(np.sqrt(1 + 1/(u_t ** 2)))) + 1/u_t)
return Amplitude
def rahvar_init(t_vals, t_0, φ, M, m_star, a): # call to run rahvar_main
amp_vals = np.zeros(100000)
additive = 0
for element in t_vals:
amp_vals[additive] = rahvar_main(element, t_0, φ, M, m_star, a)
additive += 1
return amp_vals # list of amplitude values for each given time
# example inputs below
'''
t_0 = 0.0 # hours
φ = 1.59989 * (10.0 ** (-6)) # radians
M = 8.5 # solar mass
m_star = 0.35 # solar amss
a = 17 # au
t_vals = np.linspace(-2.5, 2.5, 100000) # hours
'''
amp_vals = rahvar_init(t_vals, t_0, φ, M, m_star, a)
plt.figure(1)
plt.plot(t_vals, amp_vals, label="Rahvar")
plt.xlabel("Time [Hours]")
plt.ylabel("Magnfication")
plt.show()
|
{"hexsha": "6500259239f3a044b2a508e735a6b95c0aa59ff1", "size": 2213, "ext": "py", "lang": "Python", "max_stars_repo_path": "troia/kartik_eli/rahvar.py", "max_stars_repo_name": "tdaylan/troia", "max_stars_repo_head_hexsha": "55751fbbcab2faddcd157b22b7a127e1afffeeae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "troia/kartik_eli/rahvar.py", "max_issues_repo_name": "tdaylan/troia", "max_issues_repo_head_hexsha": "55751fbbcab2faddcd157b22b7a127e1afffeeae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "troia/kartik_eli/rahvar.py", "max_forks_repo_name": "tdaylan/troia", "max_forks_repo_head_hexsha": "55751fbbcab2faddcd157b22b7a127e1afffeeae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0126582278, "max_line_length": 119, "alphanum_fraction": 0.6357885224, "include": true, "reason": "import numpy", "num_tokens": 769}
|
from sympy.core.symbol import Symbol
from .input_size import input_size
from .base import call_method_or_dispatch, create_registerer
from sklearn.base import BaseEstimator
def syms_x(estimator):
return [Symbol('x%d' % d) for d in range(input_size(estimator))]
syms_dispatcher = {
BaseEstimator: syms_x,
}
syms = call_method_or_dispatch('syms', syms_dispatcher)
register_syms = create_registerer(syms_dispatcher, 'register_syms')
|
{"hexsha": "102be98ba62583f8b69c26795709ca4702b8899c", "size": 472, "ext": "py", "lang": "Python", "max_stars_repo_path": "sklearntools/sym/syms.py", "max_stars_repo_name": "modusdatascience/sklearntools", "max_stars_repo_head_hexsha": "6cb87edcb501440266622fe4c738be3f9015a859", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-06-13T21:13:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-19T20:37:47.000Z", "max_issues_repo_path": "sklearntools/sym/syms.py", "max_issues_repo_name": "jcrudy/sklearntools", "max_issues_repo_head_hexsha": "6cb87edcb501440266622fe4c738be3f9015a859", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sklearntools/sym/syms.py", "max_forks_repo_name": "jcrudy/sklearntools", "max_forks_repo_head_hexsha": "6cb87edcb501440266622fe4c738be3f9015a859", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-12-06T00:28:58.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-06T00:28:58.000Z", "avg_line_length": 36.3076923077, "max_line_length": 68, "alphanum_fraction": 0.7415254237, "include": true, "reason": "from sympy", "num_tokens": 112}
|
from functools import partial
import haiku as hk
import jax
import jax.numpy as jnp
class EncoderBlock(hk.Module):
def __init__(
self,
n_in: int,
n_out: int,
n_layers: int,
name: str = "EncoderBlock"
):
super().__init__(name=name)
n_hid = n_out // 4
self.post_gain = 1 / (n_layers ** 2)
self.id_path = hk.Conv2D(n_out, 1, name="id_path", data_format="NCHW") if n_in != n_out else lambda x: x
with hk.experimental.name_scope("res_path"):
self.res_path = hk.Sequential([
jax.nn.relu,
hk.Conv2D(n_hid, 3, name="conv_1", data_format="NCHW"),
jax.nn.relu,
hk.Conv2D(n_hid, 3, name="conv_2", data_format="NCHW"),
jax.nn.relu,
hk.Conv2D(n_hid, 3, name="conv_3", data_format="NCHW"),
jax.nn.relu,
hk.Conv2D(n_out, 1, name="conv_4", data_format="NCHW")])
def __call__(
self,
x: jnp.ndarray,
) -> jnp.ndarray:
return self.id_path(x) + self.post_gain * self.res_path(x)
class Encoder(hk.Module):
def __init__(
self,
n_hid: int,
n_blk_per_group: int,
input_channels: int,
vocab_size: int,
):
super().__init__()
self.input_channels = input_channels
group_count = 4
blk_range = range(n_blk_per_group)
n_layers = group_count * n_blk_per_group
make_blk = partial(EncoderBlock, n_layers=n_layers)
maxpool = partial(hk.max_pool, window_shape=2, strides=2, channel_axis=1, padding="SAME")
with hk.experimental.name_scope("blocks"):
self.blocks = hk.Sequential([
hk.Conv2D(n_hid, 7, name="input", data_format="NCHW"),
hk.Sequential([
*[make_blk(1 * n_hid, 1 * n_hid, name=f'group_1__block_{i + 1}') for i in blk_range],
maxpool
]),
hk.Sequential([
*[make_blk(1 * n_hid if i == 0 else 2 * n_hid, 2 * n_hid, name=f'group_2__block_{i + 1}') for i in blk_range],
maxpool
]),
hk.Sequential([
*[make_blk(2 * n_hid if i == 0 else 4 * n_hid, 4 * n_hid, name=f'group_3__block_{i + 1}') for i in blk_range],
maxpool
]),
hk.Sequential([
*[make_blk(4 * n_hid if i == 0 else 8 * n_hid, 8 * n_hid, name=f'group_4__block_{i + 1}') for i in blk_range],
]),
hk.Sequential([
jax.nn.relu,
hk.Conv2D(vocab_size, 1, name="output__conv", data_format="NCHW"),
]),
])
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
if len(x.shape) != 4:
raise ValueError(f'input shape {x.shape} is not 4d')
if x.shape[1] != self.input_channels:
raise ValueError(f'input has {x.shape[1]} channels but model built for {self.input_channels}')
if x.dtype != jnp.float32:
raise ValueError('input must have dtype torch.float32')
return self.blocks(x)
|
{"hexsha": "263dc0dbf9ff53c68f918d1f285f14388557958d", "size": 3272, "ext": "py", "lang": "Python", "max_stars_repo_path": "dall_e_jax/encoder.py", "max_stars_repo_name": "kingoflolz/DALL-E", "max_stars_repo_head_hexsha": "d3f3e9a57a31b1e1cc74a449a9e6e5a0442f0ac7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-04-10T15:03:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-05T02:49:51.000Z", "max_issues_repo_path": "dall_e_jax/encoder.py", "max_issues_repo_name": "kingoflolz/DALL-E", "max_issues_repo_head_hexsha": "d3f3e9a57a31b1e1cc74a449a9e6e5a0442f0ac7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dall_e_jax/encoder.py", "max_forks_repo_name": "kingoflolz/DALL-E", "max_forks_repo_head_hexsha": "d3f3e9a57a31b1e1cc74a449a9e6e5a0442f0ac7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-01T07:47:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T07:47:41.000Z", "avg_line_length": 36.3555555556, "max_line_length": 130, "alphanum_fraction": 0.5223105134, "include": true, "reason": "import jax", "num_tokens": 841}
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO dataset which returns image_id for evaluation.
Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py
"""
from pathlib import Path
import json
import os
import numpy as np
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
import datasets.transforms as T
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks)
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = {'image_id': image_id, 'annotations': target}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __init__(self, return_masks=False):
self.return_masks = return_masks
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
if self.return_masks:
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
if self.return_masks:
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
if self.return_masks:
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno])
target["area"] = area[keep]
target["iscrowd"] = iscrowd[keep]
target["orig_size"] = torch.as_tensor([int(h), int(w)])
target["size"] = torch.as_tensor([int(h), int(w)])
return image, target
def make_coco_transforms(image_set):
normalize = T.Compose([
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
if image_set == 'train':
return T.Compose([
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize(scales, max_size=1333),
T.Compose([
T.RandomResize([400, 500, 600]),
T.RandomSizeCrop(384, 600),
T.RandomResize(scales, max_size=1333),
])
),
normalize,
])
if image_set == 'val':
return T.Compose([
T.RandomResize([800], max_size=1333),
normalize,
])
raise ValueError(f'unknown {image_set}')
def radiate_to_coco(root_dir, folders, rrpn=False):
license_dicts = [{'url':'https://creativecommons.org/licenses/by-nc-sa/4.0/', 'id':1,
'name':'Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License'}]
image_dicts = []
#For vehcile detection, just a single category
category_dicts = [{'supercategory':'vehicle', 'id':0, 'name':'vehicle'}]
annotation_dicts = []
idd = 0
an_id = 0
folder_size = len(folders)
for folder in folders:
radar_folder = os.path.join(root_dir, folder, 'Navtech_Cartesian')
annotation_path = os.path.join(root_dir,
folder, 'annotations', 'annotations.json')
with open(annotation_path, 'r') as f_annotation:
annotation = json.load(f_annotation)
radar_files = os.listdir(radar_folder)
radar_files.sort()
for frame_number in range(len(radar_files)):
record = {}
objs = []
bb_created = False
idd += 1
filename = os.path.join(
radar_folder, radar_files[frame_number])
if (not os.path.isfile(filename)):
print(filename)
continue
record["license"] = 1
record["file_name"] = filename
record["id"] = idd
record["height"] = 1152
record["width"] = 1152
image_dicts.append(record)
for object in annotation:
if (object['bboxes'][frame_number]):
class_obj = object['class_name']
if (class_obj != 'pedestrian' and class_obj != 'group_of_pedestrians'):
bbox = object['bboxes'][frame_number]['position']
angle = object['bboxes'][frame_number]['rotation']
bb_created = True
if rrpn:
cx = bbox[0] + bbox[2] / 2
cy = bbox[1] + bbox[3] / 2
wid = bbox[2]
hei = bbox[3]
obj = {
"bbox": [cx, cy, wid, hei, angle],
#"bbox_mode": BoxMode.XYWHA_ABS,
"category_id": 0,
"iscrowd": 0,
"area" : wid*hei
}
else:
xmin, ymin, xmax, ymax = gen_boundingbox(
bbox, angle)
obj = {
"bbox": [xmin, ymin, xmax, ymax],
#"bbox_mode": BoxMode.XYXY_ABS,
"category_id": 0,
"iscrowd": 0,
"area": (xmax-xmin)*(ymax-ymin)
}
obj["image_id"] = idd
obj["id"] = an_id
an_id += 1
annotation_dicts.append(obj)
return {"licenses":license_dicts, "images":image_dicts, "annotations":annotation_dicts,
"categories":category_dicts}
def gen_boundingbox(bbox, angle):
theta = np.deg2rad(-angle)
R = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
points = np.array([[bbox[0], bbox[1]],
[bbox[0] + bbox[2], bbox[1]],
[bbox[0] + bbox[2], bbox[1] + bbox[3]],
[bbox[0], bbox[1] + bbox[3]]]).T
cx = bbox[0] + bbox[2] / 2
cy = bbox[1] + bbox[3] / 2
T = np.array([[cx], [cy]])
points = points - T
points = np.matmul(R, points) + T
points = points.astype(int)
min_x = np.min(points[0, :])
min_y = np.min(points[1, :])
max_x = np.max(points[0, :])
max_y = np.max(points[1, :])
#cast to standard ints to allow for json serialization
return int(min_x), int(min_y), int(max_x), int(max_y)
def build(image_set, args):
root = Path(args.coco_path).absolute()
assert root.exists(), f'provided COCO path {root} does not exist'
if args.dataset_file == 'radiate':
img_folder = root / image_set
folders=[]
#TODO - don't hard code this
if image_set == 'train':
folders = ['city_1_0', 'city_1_1']
else:
folders = ['city_1_3']
#TODO - use distinct test/val sets
folders=['tiny_foggy']
json_dict = radiate_to_coco(img_folder, folders)
#save as a file so we can then read it in
ann_file = img_folder / 'coco_annotations.json'
with open(ann_file, 'w') as outfile:
json.dump(json_dict, outfile)
else:
mode = 'instances'
PATHS = {
"train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'),
"val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'),
}
img_folder, ann_file = PATHS[image_set]
dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks)
return dataset
|
{"hexsha": "720253ec629410a0cd967e8e81fccee488c898a0", "size": 10033, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/coco.py", "max_stars_repo_name": "wdurhamh/detr_radiate", "max_stars_repo_head_hexsha": "2c9d53914816dd15fc4a6d176d5ea013703db7b3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datasets/coco.py", "max_issues_repo_name": "wdurhamh/detr_radiate", "max_issues_repo_head_hexsha": "2c9d53914816dd15fc4a6d176d5ea013703db7b3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets/coco.py", "max_forks_repo_name": "wdurhamh/detr_radiate", "max_forks_repo_head_hexsha": "2c9d53914816dd15fc4a6d176d5ea013703db7b3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5780141844, "max_line_length": 118, "alphanum_fraction": 0.5392205721, "include": true, "reason": "import numpy", "num_tokens": 2405}
|
#coding:utf-8
"""
#Author : Arijit Mukherjee
#Date : June 2016
#B.P. Poddar Institute of Management and Technology
#Inteligent Human-Computer Interaction with depth prediction using normal webcam and IR leds
#Inspired by : http://research.microsoft.com/pubs/220845/depth4free_SIGGRAPH.pdf
Demo Application to estimate hand-pose and triggering mouse events by hand gestures
and dynamic gesture recognition 8 DOF using moosegesture
up-left up up-right
7 8 9
left 4 6 right
1 2 3
down-left down down-right
"""
#default python libraries
import time
#Opencv and dependencies
import cv2
import numpy as np
#our libraries
import util as ut
import svm_train as st
import hand_util as hu
#other dependencies
from pymouse import PyMouse
from pykeyboard import PyKeyboard
import moosegesture as mges
#PyMouse the library to control mouse movements from python
m1 = PyMouse()
k1 = PyKeyboard()
#capturing device
cam=int(raw_input("Enter Camera Index : "))
cap=cv2.VideoCapture(cam)
#training the svm
model=st.trainSVM(3,40,'TrainData')
#initilizing values
thresh=120
frame_count=0
color=(0,0,255)
res=ut.get_screen_res()
w_screen=int(res['w'])+200
h_screen=int(res['h'])+200
font = cv2.FONT_HERSHEY_SIMPLEX
#loop 1 to calculate the mean threshhold
while(cap.isOpened()):
# for fps calc
t=time.time()
#capturing frame
_,img=cap.read()
#converting frame to grayscale
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#seting up the roi for the hand postion
cv2.rectangle(img,(270,165),(370,315),color,3)
#fps calc
fps=int(1/(time.time()-t))
cv2.putText(img,"FPS: "+str(fps),(50,50), font,1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('Frame',img)
frame_count+=1
#getting input
k = 0xFF & cv2.waitKey(10)
if k==27:
break
if frame_count==80:
color=(0,255,0)
if frame_count==100:
thresh=cv2.mean(gray[165:315,270:370])
thresh=thresh[0]-15
break
#initilizing values
pressed=False
mouse_enable=False
event_que=[]
gesFound=0
msg=''
#the main event loop
while(cap.isOpened()):
t=time.time()
l=[]
press_count=0
#grabbing a frame
_,img=cap.read()
#preprocessing
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,th1 =cv2.threshold(gray,thresh,255,cv2.THRESH_TOZERO)
cv2.imshow('threshold',th1)
# contour detection and getting the contours with minArea and maxArea
_,contours,hierarchy = cv2.findContours(th1.copy(),cv2.RETR_EXTERNAL, 2)
cnts=ut.getContourBiggerThan(contours,minArea=3000,maxArea=40000)
if len(cnts)==1:
mouse_enable=True
else:
mouse_enable=False
#processing the contrours
for cnt in cnts:
x,y,w,h = cv2.boundingRect(cnt)
#predicting the hand pose
_,resp=ut.getGestureImg(cnt,img,th1,model)
#calculating the centroid of the hand
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
l.append((cx,cy))
#mark the centroid in the image
cv2.circle(img,(cx,cy),5,[0,255,0],-1)
#get mouse location
mx=int((int(w_screen)/640)*cx)
my=int((int(h_screen)/480)*cy)
print mx,my
#mouse events by hand pose
if int(resp)==1 and mouse_enable:
if pressed:
pressed=False
m1.release(mx,my)
m1.move(mx,my)
if int(resp)==2:
press_count+=1
if int(resp)==2 and mouse_enable:
pressed=True
m1.press(mx,my)
#put the hand pose on the display
cv2.putText(img,resp,(x,y), font,1,(255,255,255),2,cv2.LINE_AA)
if len(l)==2:
if len(event_que)==10:
angle_change=int(event_que[9][1])
dist_change=int(event_que[9][0])
event_que.pop(0)
if abs(dist_change)>0 or abs(angle_change)>30:
msg=str(dist_change)
gesFound=10
if dist_change>200:
k1.tap_key('-',n=2)
#msg+=' Z out'
else:
k1.tap_key(k1.numpad_keys['Add'],n=2)
#msg+=' Z in'
event_que.append((ut.getDist(l[0],l[1]),ut.getSlope(l[0],l[1])))
cv2.putText(img,'que-> '+str(len(event_que))+' '+str(len(l))+' '+str(press_count),(300,50), font,1,(255,255,255),2,cv2.LINE_AA)
if gesFound>0:
cv2.putText(img,msg,(100,100), font,2,(255,255,255),10,cv2.LINE_AA)
gesFound-=1
#fps calc
fps=int(1/(time.time()-t))
cv2.putText(img,"FPS: "+str(fps),(50,50), font,1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('Frame',img)
#key press events
k = 0xFF & cv2.waitKey(10)
if k == 27:
break
if k == ord('r'):
mouse_enable=not mouse_enable
# release the capture resource and destroy all windows in the end
cap.release()
cv2.destroyAllWindows()
|
{"hexsha": "71d3d3d3284b0c3606c6e23a4830a2c621b083cf", "size": 4440, "ext": "py", "lang": "Python", "max_stars_repo_path": "python-code/opencv-learning/tiny-apps/handgesture/multitouch.py", "max_stars_repo_name": "juxiangwu/image-processing", "max_stars_repo_head_hexsha": "c644ef3386973b2b983c6b6b08f15dc8d52cd39f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2018-09-07T02:29:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-18T08:40:09.000Z", "max_issues_repo_path": "python-code/opencv-learning/tiny-apps/handgesture/multitouch.py", "max_issues_repo_name": "juxiangwu/image-processing", "max_issues_repo_head_hexsha": "c644ef3386973b2b983c6b6b08f15dc8d52cd39f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python-code/opencv-learning/tiny-apps/handgesture/multitouch.py", "max_forks_repo_name": "juxiangwu/image-processing", "max_forks_repo_head_hexsha": "c644ef3386973b2b983c6b6b08f15dc8d52cd39f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-06-20T00:09:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-15T10:14:36.000Z", "avg_line_length": 21.7647058824, "max_line_length": 128, "alphanum_fraction": 0.6932432432, "include": true, "reason": "import numpy", "num_tokens": 1425}
|
# Composite pattern - Option
import numpy as np
import scipy.stats as si
class Asset:
def price(self, scenario):
raise NotImplementedError("Abstract asset does not have a price")
def volatility(self, scenario):
raise NotImplementedError("Abstract asset does not have a volatility")
@staticmethod
def day(scenario):
return scenario["DAY"]
def clone(self):
raise NotImplementedError("Abstract asset clone not implemented")
def position(self, amount):
return Position(amount, self.clone())
def children(self):
return []
def descendants(self):
for x in self.children():
yield x
for xx in x.descendants():
yield xx
def __str__(self):
return self.__class__.__name__
class VanillaCallOption(Asset):
def __init__(self, strike, maturity, asset):
self.strike = strike
self.asset = asset
self.maturity = maturity
def clone(self):
# return VanillaCallOption(self.strike, self.maturity, self.asset) # Would not work in subclasses
return self.__class__(self.strike, self.maturity, self.asset)
def children(self):
return [self.asset]
def interest_rate(self, scenario):
return scenario["EONIA"]
def price(self, scenario):
# from https://aaronschlegel.me/black-scholes-formula-python.html
# S: spot price
S = self.asset.price(scenario)
# K: strike price
K = self.strike
# T: time to maturity
T = self.maturity - self.day(scenario)
# r: interest rate
r = self.interest_rate(scenario)
# sigma: volatility of underlying asset
sigma = self.asset.volatility(scenario)
d1 = (np.log(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
return S * si.norm.cdf(d1, 0.0, 1.0) - K * np.exp(-r * T) * si.norm.cdf(
d2, 0.0, 1.0
)
def volatility(self, scenario):
# S: spot price
S = self.asset(scenario)
# K: strike price
K = self.strike
# T: time to maturity
T = self.maturity - self.day(scenario)
# r: interest rate
r = self.interest_rate(scenario)
# sigma: volatility of underlying asset
sigma = self.asset.volatility(scenario)
d1 = (np.log(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
vega = (
1
/ np.sqrt(2 * np.pi)
* S
* np.exp(-q * T)
* np.exp(-(d1 ** 2) * 0.5)
* np.sqrt(T)
)
return vega
class Equity(Asset):
def __init__(self, name):
self.name = name
def call_option(self, strike, maturity):
return VanillaCallOption(strike, maturity, self.clone())
def clone(self):
return Equity(self.name)
def price(self, scenario):
return scenario[self.name]
def volatility(self, scenario):
return scenario[self.name + "-volatility"]
def __str__(self):
return self.name
class Position(Asset):
def __init__(self, amount, asset):
self.amount = amount
self.asset = asset
def clone(self):
return Position(self.amount, self.asset.clone())
def price(self, scenario):
return self.amount * self.asset.price(scenario)
def volatility(self, scenario):
return self.amount * self.asset.volatility(scenario)
def children(self):
yield self.asset
def __str__(self):
return f"{self.amount} {self.asset}"
class Portfolio(Asset):
def __init__(self, assets=None): # Don't use list as a default!
self.assets = assets or []
def with_asset(self, asset): # builder
self.assets.append(asset)
return self
def equity_position(self, amount, name): # builder
return self.with_asset(Equity(name).position(amount))
def clone(self):
return Portfolio([x.clone() for x in self.assets])
def price(self, scenario):
return sum(x.price(scenario) for x in self.assets)
def volatility(self, scenario):
raise NotImplementedError("This is too complicated due to correlations")
def children(self):
return self.assets
class VanillaCallOptionDK(VanillaCallOption):
def interest_rate(self, scenario):
return scenario["DKKLIBOR-1W"]
# Prototypes
NORDEA_TICK = "CPH:NDA-DK"
DANSKE_TICK = "CPH:DANSKE"
NORDEA = Equity(NORDEA_TICK)
DANSKE = Equity(DANSKE_TICK)
if __name__ == "__main__":
portfolio = Portfolio(
[
NORDEA.position(100.0),
DANSKE.position(10.0),
NORDEA.call_option(70.0, 10.0),
]
)
portfolio = (
Portfolio()
.equity_position(100.0, NORDEA_TICK)
.equity_position(10.0, DANSKE_TICK)
.with_asset(NORDEA.call_option(strike=70.0, maturity=10.0))
)
scenario = {
"DAY": 1,
"CPH:NDA-DK": 67.63,
"CPH:DANSKE": 106.1,
"CPH:NDA-DK-volatility": 1,
"EONIA": -0.0049,
"DKKLIBOR-1W": -0.00002,
}
print(portfolio.price(scenario), "DKK")
for x in portfolio.descendants():
print(str(x))
|
{"hexsha": "54dd2d01fa360c45930d777b83609fa205e2f3ad", "size": 5321, "ext": "py", "lang": "Python", "max_stars_repo_path": "portfolio/portfolio5-creation.py", "max_stars_repo_name": "orest-d/design-patterns-finance", "max_stars_repo_head_hexsha": "5878912dfa5b34925b00c38da978e7b9e4735a14", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "portfolio/portfolio5-creation.py", "max_issues_repo_name": "orest-d/design-patterns-finance", "max_issues_repo_head_hexsha": "5878912dfa5b34925b00c38da978e7b9e4735a14", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "portfolio/portfolio5-creation.py", "max_forks_repo_name": "orest-d/design-patterns-finance", "max_forks_repo_head_hexsha": "5878912dfa5b34925b00c38da978e7b9e4735a14", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0990566038, "max_line_length": 106, "alphanum_fraction": 0.5895508363, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1361}
|
"""
Figure 4K: learning angle between
habituation and recall population vectors.
"""
import pickle
import numpy as np
from scipy.io import savemat
import matplotlib.pyplot as plt
import seaborn as sns
from src.data_utils import get_per_mouse_boutons
from src.population_utils import compute_angle, get_learning_angles
sns.set_palette("colorblind")
sns.set_context("poster")
# Compute angles for both pseudo and fear conditioning
pc_angles = {}
afc_angles = {}
for stim in ['cs1', 'cs2']:
pc_angles[stim] = get_learning_angles(stim, 'Pseudo')
for stim in ['csm', 'csp']:
afc_angles[stim] = get_learning_angles(stim, 'AFC')
# Save data
angles = {'pc': pc_angles, 'afc': afc_angles}
fname = f'../data/learning_angles'
print(f"Save data to {fname}.pickle and {fname}.mat")
with open(f'{fname}.pickle', 'wb') as handle:
pickle.dump(angles, handle, protocol=pickle.HIGHEST_PROTOCOL)
savemat(f"{fname}.mat", angles)
# Plot the result
alpha = 0.5
s = 30
# First pseudo-conditioning. We average angles for CS1 and CS2
# since neither is special
for i, resp in enumerate(['exc', 'inh']):
pc_avg = (pc_angles['cs1'][resp]+ pc_angles['cs2'][resp])/2
# Mean across mice
plt.bar(i, pc_avg.mean(), color='gray', alpha=alpha)
# individual mice
plt.scatter([i] * len(pc_avg), pc_avg, s=s, alpha=alpha)
# Now conditioning. Don't average since CS+ is special
# Means across mice
plt.bar(3, afc_angles['csm']['exc'].mean(), color='tab:green', alpha=alpha)
plt.bar(4, afc_angles['csm']['inh'].mean(), color='tab:green', alpha=alpha)
plt.bar(6, afc_angles['csp']['exc'].mean(), color='tab:blue', alpha=alpha)
plt.bar(7, afc_angles['csp']['inh'].mean(), color='tab:blue', alpha=alpha)
# individual mice
plt.scatter([3]* len(afc_angles['csm']['exc']), afc_angles['csm']['exc'], s=s, alpha=alpha)
plt.scatter([4]* len(afc_angles['csm']['inh']), afc_angles['csm']['inh'], s=s, alpha=alpha)
plt.scatter([6]* len(afc_angles['csp']['exc']), afc_angles['csp']['exc'], s=s, alpha=alpha)
plt.scatter([7]* len(afc_angles['csp']['inh']), afc_angles['csp']['inh'], s=s, alpha=alpha)
# Add labels
plt.xticks([0, 1, 3, 4, 6, 7], ['PN', 'NR']*3)
plt.text(0, -50, "CS1/2", fontsize=20)
plt.text(3, -50, "CS-", fontsize=20)
plt.text(6, -50, "CS+", fontsize=20)
plt.ylabel(r"Learning $\theta$ (deg.)")
sns.despine()
plt.tight_layout()
fname = '../figures/learning_angle.pdf'
print(f"Save to {fname}")
plt.savefig(fname, dpi=300)
|
{"hexsha": "a37ff2185fe9c0c8b164ae82647368d9d5e4b2dd", "size": 2432, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/learning_angle.py", "max_stars_repo_name": "sprekelerlab/long-range-inhibition", "max_stars_repo_head_hexsha": "61aa94ee853e666304b1ac544cb300528eb3f591", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/learning_angle.py", "max_issues_repo_name": "sprekelerlab/long-range-inhibition", "max_issues_repo_head_hexsha": "61aa94ee853e666304b1ac544cb300528eb3f591", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/learning_angle.py", "max_forks_repo_name": "sprekelerlab/long-range-inhibition", "max_forks_repo_head_hexsha": "61aa94ee853e666304b1ac544cb300528eb3f591", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3150684932, "max_line_length": 91, "alphanum_fraction": 0.6854440789, "include": true, "reason": "import numpy,from scipy", "num_tokens": 737}
|
#include <boost/type_traits/is_polymorphic.hpp>
|
{"hexsha": "25c0e229a4bcef4e15c16a1ec10ffa3acb1a39b9", "size": 48, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_type_traits_is_polymorphic.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_type_traits_is_polymorphic.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_type_traits_is_polymorphic.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 24.0, "max_line_length": 47, "alphanum_fraction": 0.8333333333, "num_tokens": 11}
|
import re
import torch
import jpegio
import shutil
import numpy as np
from pathlib import Path
from functools import partial
from argus import load_model
from src.ema import ModelEma
from src import config
def deep_chunk(input, chunks, dim=0):
partial_deep_chunk = partial(deep_chunk, chunks=chunks, dim=dim)
if isinstance(input, torch.Tensor):
return torch.chunk(input, chunks, dim=dim)
if isinstance(input, tuple) and len(input) > 0:
return list(zip(*map(partial_deep_chunk, input)))
if isinstance(input, list) and len(input) > 0:
return list(map(list, zip(*map(partial_deep_chunk, input))))
if isinstance(input, dict) and len(input) > 0:
return list(map(type(input), zip(*map(partial_deep_chunk, input.items()))))
return [input for _ in range(chunks)]
def target2altered(probs):
altered = probs[:, config.altered_targets]
if isinstance(altered, torch.Tensor):
altered = torch.sum(altered, dim=1)
else:
altered = np.sum(altered, axis=1)
return altered
def initialize_amp(model,
opt_level='O1',
keep_batchnorm_fp32=None,
loss_scale='dynamic'):
from apex import amp
model.nn_module, model.optimizer = amp.initialize(
model.nn_module, model.optimizer,
opt_level=opt_level,
keep_batchnorm_fp32=keep_batchnorm_fp32,
loss_scale=loss_scale
)
model.amp = amp
def initialize_ema(model, decay=0.9999, device='', resume=''):
model.model_ema = ModelEma(model.nn_module,
decay=decay,
device=device,
resume=resume)
def get_image_quality(image_path):
jpeg = jpegio.read(str(image_path))
first_element = jpeg.quant_tables[0][0, 0]
if first_element == 2:
return 95
elif first_element == 3:
return 90
elif first_element == 8:
return 75
else:
raise Exception(f"Unknown image quality, quant tables: {jpeg.quant_tables}")
def get_best_model_path(dir_path, return_score=False):
dir_path = Path(dir_path)
model_scores = []
for model_path in dir_path.glob('*.pth'):
score = re.search(r'-(\d+(?:\.\d+)?).pth', str(model_path))
if score is not None:
score = float(score.group(0)[1:-4])
model_scores.append((model_path, score))
if not model_scores:
return None
model_score = sorted(model_scores, key=lambda x: x[1])
best_model_path = model_score[-1][0]
if return_score:
best_score = model_score[-1][1]
return best_model_path, best_score
else:
return best_model_path
def load_pretrain_weigths(model, pretrain_path):
pretrain_model = load_model(pretrain_path, device=model.device)
nn_state_dict = pretrain_model.get_nn_module().state_dict()
model.get_nn_module().load_state_dict(nn_state_dict)
return model
def check_dir_not_exist(dir_path, remove=False):
dir_path = Path(dir_path)
if dir_path.exists():
if remove:
shutil.rmtree(dir_path)
print(f"Folder '{dir_path}' removed")
else:
response = input(f"Remove '{dir_path}' (y/n)? ")
if response.lower() == 'y':
shutil.rmtree(dir_path)
print(f"Folder '{dir_path}' removed")
else:
return False
return True
|
{"hexsha": "bc76ff0a7092d5b657f19a89624f08bea7aba32c", "size": 3448, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils.py", "max_stars_repo_name": "lRomul/argus-alaska", "max_stars_repo_head_hexsha": "f45dca1781b4a5f1336ebf826e3102ad5a6c0aeb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-07-22T07:37:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-29T18:27:25.000Z", "max_issues_repo_path": "src/utils.py", "max_issues_repo_name": "lRomul/argus-alaska", "max_issues_repo_head_hexsha": "f45dca1781b4a5f1336ebf826e3102ad5a6c0aeb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils.py", "max_forks_repo_name": "lRomul/argus-alaska", "max_forks_repo_head_hexsha": "f45dca1781b4a5f1336ebf826e3102ad5a6c0aeb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5132743363, "max_line_length": 84, "alphanum_fraction": 0.6337006961, "include": true, "reason": "import numpy", "num_tokens": 809}
|
import io
import pathlib
import re
from dataclasses import dataclass
from functools import singledispatch
from typing import Tuple, Any
import ipywidgets as widgets
import numpy as np
from PIL import Image, ImageEnhance, ImageOps
URL_REGEX = re.compile(
r"^(http:\/\/www\.|https:\/\/www\.|http:\/\/|https:\/\/)?"
+ r"[a-z0-9]+([\-\.]{1}[a-z0-9]+)*\.[a-z]{2,5}(:[0-9]{1,5})"
+ r"?(\/.*)?$"
)
@dataclass
class URL:
value: str
def __bool__(self):
return bool(URL_REGEX.match(self.value))
def pil_to_widget(image: Image.Image) -> widgets.Image:
buffer = io.BytesIO()
image.convert("RGB").save(buffer, "JPEG")
buffer.seek(0)
return widgets.Image(value=buffer.read(), format="jpg")
def widget_to_pil(image: widgets.Image):
return Image.open(io.BytesIO(image.value))
def fit_image(
img: Image.Image, size
) -> Tuple[Image.Image, Tuple[int, int, int, int]]:
img_width, img_height = img.size
desired_width, desired_height = size
ratio = min(desired_width / img_width, desired_height / img_height)
img = ImageOps.scale(img, ratio)
width, height = img.size
x, y = ((desired_width - width) // 2, (desired_height - height) // 2)
border = (x, y, desired_width - x - width, desired_height - y - height)
img = ImageOps.expand(img, border=border, fill="white")
return img, (x, y, width, height)
def adjust(
img: Image.Image, contrast_factor: float, brightness_factor: float
) -> Image.Image:
img = ImageEnhance.Contrast(img).enhance(contrast_factor)
img = ImageEnhance.Brightness(img).enhance(brightness_factor)
return img
@singledispatch
def load_img(img: Any):
"""
Load an image, whether it's from a URL, a file, an array, or an already
in-memory image.
"""
raise ValueError(f"Can not load object of type {type(img)} as image.")
@load_img.register(widgets.Image)
def _img_already_widget(img: widgets.Image):
return img
@load_img.register(bytes)
def _img_already_loaded(img: bytes):
return widgets.Image(value=img)
@load_img.register(pathlib.Path)
def _load_img_path(img: pathlib.Path):
"""Read image from file"""
return load_img(img.read_bytes())
@load_img.register(str)
def _load_img_string(img: str):
"""Read image from file or from URL"""
img_path = pathlib.Path(img)
if img_path.is_file():
return load_img(img_path)
img_url = URL(img)
if img_url:
return load_img(img_url)
raise ValueError(f"{img} is neither an existing path nor a valid URL.")
@load_img.register(URL)
def _load_img_url(img: URL):
import requests # noqa: F401
response = requests.get(img.value)
response.raise_for_status()
return load_img(response.content)
@load_img.register(np.ndarray)
def _load_img_ndarray(img: np.ndarray):
"""create image from array"""
img = Image.fromarray(img.astype(np.uint8))
return load_img(img)
@load_img.register(Image.Image)
def _load_img_pillow(img: Image.Image):
"""Encode image as bytes"""
image_io = io.BytesIO()
img.save(image_io, "JPEG")
return load_img(image_io.getvalue())
|
{"hexsha": "a39f30372f5fa5465f5b1a25d9c05f0b8fd2338d", "size": 3127, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ipyannotations/images/canvases/image_utils.py", "max_stars_repo_name": "tabaspki/ipyannotations", "max_stars_repo_head_hexsha": "8253d3a0abcd5644d6e5a0c5b04557ec7f50ba4c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ipyannotations/images/canvases/image_utils.py", "max_issues_repo_name": "tabaspki/ipyannotations", "max_issues_repo_head_hexsha": "8253d3a0abcd5644d6e5a0c5b04557ec7f50ba4c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ipyannotations/images/canvases/image_utils.py", "max_forks_repo_name": "tabaspki/ipyannotations", "max_forks_repo_head_hexsha": "8253d3a0abcd5644d6e5a0c5b04557ec7f50ba4c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.016, "max_line_length": 75, "alphanum_fraction": 0.680204669, "include": true, "reason": "import numpy", "num_tokens": 814}
|
SUBROUTINE WRITCA(LUNXX,MSGT,MSGL)
C$$$ SUBPROGRAM DOCUMENTATION BLOCK
C
C SUBPROGRAM: WRITCA
C PRGMMR: J. ATOR ORG: NP12 DATE: 2004-08-18
C
C ABSTRACT: THIS SUBROUTINE IS CONSIDERED OBSOLETE AND MAY BE REMOVED
C FROM THE BUFR ARCHIVE LIBRARY IN A FUTURE VERSION. IT NOW SIMPLY
C CALLS BUFR ARCHIVE LIBRARY SUBROUTINE CMPMSG TO TOGGLE ON MESSAGE
C COMPRESSION, FOLLOWED BY A CALL TO WRITSA (SEE WRITSA DOCBLOCK).
C THIS SUBROUTINE USES THE SAME INPUT AND OUTPUT PARAMETERS AS WRITSA.
C
C PROGRAM HISTORY LOG:
C 2004-08-18 J. ATOR -- ORIGINAL AUTHOR; BASED UPON WRITSA
C 2005-03-09 J. ATOR -- MARKED AS OBSOLETE AND ADDED PRINT
C NOTIFICATION
C DART $Id$
C
C USAGE: CALL WRITCA (LUNXX, MSGT, MSGL)
C INPUT ARGUMENT LIST:
C LUNXX - INTEGER: ABSOLUTE VALUE IS FORTRAN LOGICAL UNIT NUMBER
C FOR BUFR FILE {IF LUNXX IS LESS THAN ZERO, THEN ANY
C CURRENT MESSAGE IN MEMORY WILL BE FORCIBLY FLUSHED TO
C ABS(LUNXX) AND TO ARRAY MSGT}
C
C OUTPUT ARGUMENT LIST:
C MSGT - INTEGER: *-WORD PACKED BINARY ARRAY CONTAINING BUFR
C MESSAGE (FIRST MSGL WORDS FILLED)
C MSGL - INTEGER: NUMBER OF WORDS FILLED IN MSGT
C 0 = no message was returned
C
C REMARKS:
C THIS ROUTINE CALLS: CMPMSG WRITSA
C THIS ROUTINE IS CALLED BY: None
C Normally called only by application
C programs.
C
C ATTRIBUTES:
C LANGUAGE: FORTRAN 77
C MACHINE: PORTABLE TO ALL PLATFORMS
C
C$$$
COMMON /QUIET / IPRT
DATA IFIRST/0/
SAVE IFIRST
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
IF(IFIRST.EQ.0) THEN
IF(IPRT.GE.0) THEN
PRINT*
PRINT*,'+++++++++++++++++BUFR ARCHIVE LIBRARY++++++++++++++++++++'
PRINT 101
101 FORMAT(' BUFRLIB: WRITCA - THIS SUBROUTINE IS NOW OBSOLETE; ',
. 'USE SUBROUTINES CMPMSG AND WRITSA INSTEAD')
PRINT*,'+++++++++++++++++BUFR ARCHIVE LIBRARY++++++++++++++++++++'
PRINT*
ENDIF
IFIRST = 1
ENDIF
CALL CMPMSG('Y')
CALL WRITSA(LUNXX,MSGT,MSGL)
RETURN
END
|
{"hexsha": "c205ba9b1f4e31d528c5cde91a468c76d1cfb15b", "size": 2320, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "observations/obs_converters/NCEP/prep_bufr/lib/writca.f", "max_stars_repo_name": "hkershaw-brown/feature-preprocess", "max_stars_repo_head_hexsha": "fe2bd77b38c63fa0566c83ebc4d2fac1623aef66", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 65, "max_stars_repo_stars_event_min_datetime": "2019-10-16T13:31:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T11:52:58.000Z", "max_issues_repo_path": "observations/obs_converters/NCEP/prep_bufr/lib/writca.f", "max_issues_repo_name": "hkershaw-brown/feature-preprocess", "max_issues_repo_head_hexsha": "fe2bd77b38c63fa0566c83ebc4d2fac1623aef66", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 283, "max_issues_repo_issues_event_min_datetime": "2019-09-23T15:48:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:44:41.000Z", "max_forks_repo_path": "observations/obs_converters/NCEP/prep_bufr/lib/writca.f", "max_forks_repo_name": "hkershaw-brown/feature-preprocess", "max_forks_repo_head_hexsha": "fe2bd77b38c63fa0566c83ebc4d2fac1623aef66", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 67, "max_forks_repo_forks_event_min_datetime": "2019-09-19T22:13:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:58:26.000Z", "avg_line_length": 31.7808219178, "max_line_length": 72, "alphanum_fraction": 0.5672413793, "num_tokens": 664}
|
module AddIntegersF90
using CxxInterface
const libAddIntegersF90 = joinpath(pwd(), "libAddIntegersF90")
eval(f90setup())
eval(f90newfile("AddIntegersF90.f90", ""))
eval(f90function(FnName(:add_int, "add_int", libAddIntegersF90), FnResult(Cint, "integer", Int, expr -> :(convert(Int, $expr))),
[FnArg(:x, Ref{Cint}, "x", "integer", Integer, identity), FnArg(:y, Ref{Cint}, "y", "integer", Integer, identity)],
"add_int = x + y"))
end
################################################################################
# Extract Fortran code
using CxxInterface
using .AddIntegersF90
AddIntegersF90.f90_write_code!()
################################################################################
# Compile Fortran code
# (This fails if there is no Fortran compiler available)
# macOS does not have a Fortran compiler installed by default
if !Sys.isapple()
using Libdl: dlext
run(`gfortran -fPIC -c AddIntegersF90.f90`)
run(`gfortran -shared -o libAddIntegersF90.$dlext AddIntegersF90.o`)
# Please, DO NOT call a Fortran compiler manually in your own Julia
# packages. This works only in very controlled environments such as on
# CI infrastructure. If you do, your package will be fragile, and will
# create lots of headaches for your users in the wild. Instead, use
# [BinaryBuilder](https://binarybuilder.org) and store your build
# recipes on [Yggdrasil](https://github.com/JuliaPackaging/Yggdrasil).
################################################################################
# Call the wrapped function
# (This fails if the Fortran compiler is not compatible with the
# currently running Julia executable)
# Only test cases where the Github CI environment supports this (:i686 is not supported)
if Sys.ARCH ≡ :x86_64
@test AddIntegersF90.add_int(2, 3) == 5
end
end
|
{"hexsha": "34345b5f37256f7e3db685266321aa3be3baea59", "size": 1884, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test-f90.jl", "max_stars_repo_name": "jw3126/CxxInterface.jl", "max_stars_repo_head_hexsha": "4b69da8d7e3497c10d5029c8f0c13ee81019ea13", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2021-12-20T08:02:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T02:43:15.000Z", "max_issues_repo_path": "test/test-f90.jl", "max_issues_repo_name": "jw3126/CxxInterface.jl", "max_issues_repo_head_hexsha": "4b69da8d7e3497c10d5029c8f0c13ee81019ea13", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-12-25T17:12:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-17T17:27:02.000Z", "max_forks_repo_path": "test/test-f90.jl", "max_forks_repo_name": "jw3126/CxxInterface.jl", "max_forks_repo_head_hexsha": "4b69da8d7e3497c10d5029c8f0c13ee81019ea13", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-31T22:18:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-31T22:18:04.000Z", "avg_line_length": 40.9565217391, "max_line_length": 132, "alphanum_fraction": 0.6157112527, "num_tokens": 470}
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
import copy
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import Linear, Module, Parameter
from torch.optim import Adam
from torch.utils.data import ConcatDataset, DataLoader
from tqdm.auto import tqdm
import math
import wandb
logger = logging.getLogger(__name__)
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizerFast,
Trainer,
TrainingArguments,
default_data_collator,
is_datasets_available,
is_torch_tpu_available,
set_seed,
)
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_utils import PredictionOutput, is_main_process
from sparseml.pytorch.optim.manager import ScheduledModifierManager
from sparseml.pytorch.optim.optimizer import ScheduledOptimizer
from sparseml.pytorch.utils import ModuleExporter
from distill_trainer import DistillRankingTrainer
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
train_file: Optional[str] = field(
default='data/train.json', metadata={"help": "A csv or a json file containing the training data."}
)
validation_file: Optional[str] = field(
default='data/evaluation.json', metadata={"help": "A json file containing the validation data."}
)
nm_prune_config: Optional[str] = field(
default='recipes/base.yaml', metadata={"help": "The input file name for the Neural Magic pruning config"}
)
max_train_samples: Optional[int] = field(
default=800000,
metadata={
"help": "Since the MSMARCO Dataset is 79551622 items we subsample to ~1% as after that we do not see improvment in MRR"
},
)
do_onnx_export: bool = field(
default=False, metadata={"help": "Export model to onnx"}
)
onnx_export_path: Optional[str] = field(
default='onnx-export', metadata={"help": "The filename and path which will be where onnx model is outputed"}
)
layers_to_keep: int = field(
default=12, metadata={"help":"How many layers to keep for the model"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
teacher_model_name_or_path: Optional[str] = field(
default=None, metadata={"help": "Teacher model which needs to be a trained sequence classification model"}
)
student_model_name_or_path: Optional[str] = field(
default="bert-base-uncased", metadata={"help": "Student model"}
)
temperature: Optional[float] = field(
default=1.0, metadata={"help": "Temperature applied to teacher softmax for distillation."}
)
distill_hardness: Optional[float] = field(
default=1.0, metadata={"help": "Proportion of loss coming from teacher model."}
)
config_name: Optional[str] = field(
default='bert-base-uncased', metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default='bert-base-uncased', metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default='cache',
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
def load_ranking(filename, collection, queries):
qid2documents = {}
with open(filename, 'r') as f:
for l in f:
l = l.strip().split('\t')
query = queries[l[0]]
document = collection[l[1]]
if query not in qid2documents:
qid2documents[query] = []
qid2documents[query].append(document)
return qid2documents
def load_qid2query(filename):
qid2query = {}
with open(filename,'r') as f:
for l in f:
l = l.strip().split('\t')
qid2query[int(l[0])] = l[1]
return qid2query
def load_optimizer(model, args):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (args.adam_beta1, args.adam_beta2),
"eps": args.adam_epsilon,
}
optimizer_kwargs["lr"] = args.learning_rate
return optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
def drop_layers(model, layers_to_keep):
layer_drop_matching = {
1:[0],
3:[0,5,11],
6:[0,2,4,6,8,11],
9:[0,2,3,4,5,7,8,9,11],
12:[0,1,2,3,4,5,6,7,8,9,10,11],
}
encoder_layers = model.bert.encoder.layer # change based on model name
assert layers_to_keep <= len(encoder_layers)
assert layers_to_keep in layer_drop_matching.keys()
trimmed_encoder_layers = nn.ModuleList()
for i in layer_drop_matching[layers_to_keep]:
trimmed_encoder_layers.append(encoder_layers[i])
trimmed_model = copy.deepcopy(model)
trimmed_model.bert.encoder.layer = trimmed_encoder_layers
return trimmed_model
def main():
wandb.init(project='PruneMSMARCO', entity='spacemanidol')
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
set_seed(training_args.seed)
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}")
datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
label_list = datasets["train"].unique("label")
label_list.sort()
num_labels = len(label_list)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task='msmarco-triples',
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
)
student_model = AutoModelForSequenceClassification.from_pretrained(
model_args.student_model_name_or_path,
from_tf=bool(".ckpt" in model_args.student_model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if data_args.layers_to_keep < len(student_model.bert.encoder.layer):
logger.info("Keeping %s model layers", data_args.layers_to_keep)
student_model = drop_layers(student_model, data_args.layers_to_keep)
if model_args.teacher_model_name_or_path != None:
teacher_model = AutoModelForSequenceClassification.from_pretrained(
model_args.teacher_model_name_or_path,
from_tf=bool(".ckpt" in model_args.teacher_model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
teacher_model_parameters = filter(lambda p: p.requires_grad, teacher_model.parameters())
params = sum([np.prod(p.size()) for p in teacher_model_parameters])
logger.info("Teacher Model has %s parameters", params)
else:
teacher_model = None
student_model_parameters = filter(lambda p: p.requires_grad, student_model.parameters())
params = sum([np.prod(p.size()) for p in student_model_parameters])
logger.info("Student Model has %s parameters", params)
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
padding = False
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples["query"], examples["passage"])
)
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
result["label"] = examples["label"]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=True)
datasets['train'] = datasets['train'].shuffle(seed=training_args.seed)
if data_args.max_train_samples is not None:
datasets["train"] = datasets["train"].select(range(data_args.max_train_samples))
traindataset = datasets["train"]
for index in random.sample(range(len(traindataset)), 3):
logger.info(f"Sample {index} of the training set: {traindataset[index]}.")
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
optim = load_optimizer(student_model, training_args)
steps_per_epoch = math.ceil(len(datasets["train"]) / (training_args.per_device_train_batch_size*training_args._n_gpu))
manager = ScheduledModifierManager.from_yaml(data_args.nm_prune_config)
optim = ScheduledOptimizer(optim, student_model, manager, steps_per_epoch=steps_per_epoch, loggers=None)
training_args.num_train_epochs = float(manager.modifiers[0].end_epoch)
trainer = DistillRankingTrainer(
model=student_model,
args=training_args,
train_dataset=datasets["train"],
eval_dataset=datasets["validation"],
data_collator=data_collator,
compute_metrics=compute_metrics,
optimizers=(optim, None),
teacher=teacher_model,
distill_hardness = model_args.distill_hardness,
temperature = model_args.temperature,
)
trainer.train()
trainer.save_model()
if __name__ == "__main__":
main()
|
{"hexsha": "837b3344ed03af149d0da7ac919592cece8e698d", "size": 14573, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/train.py", "max_stars_repo_name": "spacemanidol/RankingModelCompression", "max_stars_repo_head_hexsha": "43123fb37d97db3ae4338eb9af28520e2aaf88ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/train.py", "max_issues_repo_name": "spacemanidol/RankingModelCompression", "max_issues_repo_head_hexsha": "43123fb37d97db3ae4338eb9af28520e2aaf88ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/train.py", "max_forks_repo_name": "spacemanidol/RankingModelCompression", "max_forks_repo_head_hexsha": "43123fb37d97db3ae4338eb9af28520e2aaf88ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6005434783, "max_line_length": 131, "alphanum_fraction": 0.6875729088, "include": true, "reason": "import numpy", "num_tokens": 3245}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.